]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Introduce cpu_get_fsr, cpu_put_fsr
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_flushw(E) qemu_build_not_reached()
47 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
48 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
49 # define gen_helper_restored(E) qemu_build_not_reached()
50 # define gen_helper_retry(E) qemu_build_not_reached()
51 # define gen_helper_saved(E) qemu_build_not_reached()
52 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
53 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
54 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
55 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
62 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
63 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
86 # define FSR_LDXFSR_MASK 0
87 # define FSR_LDXFSR_OLDMASK 0
88 # define MAXTL_MASK 0
89 #endif
90
91 /* Dynamic PC, must exit to main loop. */
92 #define DYNAMIC_PC 1
93 /* Dynamic PC, one of two values according to jump_pc[T2]. */
94 #define JUMP_PC 2
95 /* Dynamic PC, may lookup next TB. */
96 #define DYNAMIC_PC_LOOKUP 3
97
98 #define DISAS_EXIT DISAS_TARGET_0
99
100 /* global register indexes */
101 static TCGv_ptr cpu_regwptr;
102 static TCGv cpu_fsr, cpu_pc, cpu_npc;
103 static TCGv cpu_regs[32];
104 static TCGv cpu_y;
105 static TCGv cpu_tbr;
106 static TCGv cpu_cond;
107 static TCGv cpu_cc_N;
108 static TCGv cpu_cc_V;
109 static TCGv cpu_icc_Z;
110 static TCGv cpu_icc_C;
111 #ifdef TARGET_SPARC64
112 static TCGv cpu_xcc_Z;
113 static TCGv cpu_xcc_C;
114 static TCGv_i32 cpu_fprs;
115 static TCGv cpu_gsr;
116 #else
117 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
118 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
119 #endif
120
121 #ifdef TARGET_SPARC64
122 #define cpu_cc_Z cpu_xcc_Z
123 #define cpu_cc_C cpu_xcc_C
124 #else
125 #define cpu_cc_Z cpu_icc_Z
126 #define cpu_cc_C cpu_icc_C
127 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
128 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
129 #endif
130
131 /* Floating point registers */
132 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
133
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 #endif
142
143 typedef struct DisasCompare {
144 TCGCond cond;
145 TCGv c1;
146 int c2;
147 } DisasCompare;
148
149 typedef struct DisasDelayException {
150 struct DisasDelayException *next;
151 TCGLabel *lab;
152 TCGv_i32 excp;
153 /* Saved state at parent insn. */
154 target_ulong pc;
155 target_ulong npc;
156 } DisasDelayException;
157
158 typedef struct DisasContext {
159 DisasContextBase base;
160 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162
163 /* Used when JUMP_PC value is used. */
164 DisasCompare jump;
165 target_ulong jump_pc[2];
166
167 int mem_idx;
168 bool cpu_cond_live;
169 bool fpu_enabled;
170 bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172 bool supervisor;
173 #ifdef TARGET_SPARC64
174 bool hypervisor;
175 #endif
176 #endif
177
178 sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180 int fprs_dirty;
181 int asi;
182 #endif
183 DisasDelayException *delay_excp_list;
184 } DisasContext;
185
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
193
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207
208 #define IS_IMM (insn & (1<<13))
209
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213 int bit = (rd < 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc->fprs_dirty & bit)) {
217 dc->fprs_dirty |= bit;
218 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219 }
220 #endif
221 }
222
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226 TCGv_i32 ret = tcg_temp_new_i32();
227 if (src & 1) {
228 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229 } else {
230 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231 }
232 return ret;
233 }
234
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237 TCGv_i64 t = tcg_temp_new_i64();
238
239 tcg_gen_extu_i32_i64(t, v);
240 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241 (dst & 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc, dst);
243 }
244
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
246 {
247 src = DFPREG(src);
248 return cpu_fpr[src / 2];
249 }
250
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
252 {
253 dst = DFPREG(dst);
254 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255 gen_update_fprs_dirty(dc, dst);
256 }
257
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
259 {
260 return cpu_fpr[DFPREG(dst) / 2];
261 }
262
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265 TCGv_i128 ret = tcg_temp_new_i128();
266
267 src = QFPREG(src);
268 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269 return ret;
270 }
271
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
273 {
274 dst = DFPREG(dst);
275 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276 gen_update_fprs_dirty(dc, dst);
277 }
278
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
283 #else
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
287 #else
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
290 #endif
291 #endif
292
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc) false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc) true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc) false
299 #else
300 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
301 #endif
302
303 static void gen_address_mask(DisasContext *dc, TCGv addr)
304 {
305 if (AM_CHECK(dc)) {
306 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
307 }
308 }
309
310 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
311 {
312 return AM_CHECK(dc) ? (uint32_t)addr : addr;
313 }
314
315 static TCGv gen_load_gpr(DisasContext *dc, int reg)
316 {
317 if (reg > 0) {
318 assert(reg < 32);
319 return cpu_regs[reg];
320 } else {
321 TCGv t = tcg_temp_new();
322 tcg_gen_movi_tl(t, 0);
323 return t;
324 }
325 }
326
327 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
328 {
329 if (reg > 0) {
330 assert(reg < 32);
331 tcg_gen_mov_tl(cpu_regs[reg], v);
332 }
333 }
334
335 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
336 {
337 if (reg > 0) {
338 assert(reg < 32);
339 return cpu_regs[reg];
340 } else {
341 return tcg_temp_new();
342 }
343 }
344
345 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
346 {
347 return translator_use_goto_tb(&s->base, pc) &&
348 translator_use_goto_tb(&s->base, npc);
349 }
350
351 static void gen_goto_tb(DisasContext *s, int tb_num,
352 target_ulong pc, target_ulong npc)
353 {
354 if (use_goto_tb(s, pc, npc)) {
355 /* jump to same page: we can use a direct jump */
356 tcg_gen_goto_tb(tb_num);
357 tcg_gen_movi_tl(cpu_pc, pc);
358 tcg_gen_movi_tl(cpu_npc, npc);
359 tcg_gen_exit_tb(s->base.tb, tb_num);
360 } else {
361 /* jump to another page: we can use an indirect jump */
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_lookup_and_goto_ptr();
365 }
366 }
367
368 static TCGv gen_carry32(void)
369 {
370 if (TARGET_LONG_BITS == 64) {
371 TCGv t = tcg_temp_new();
372 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
373 return t;
374 }
375 return cpu_icc_C;
376 }
377
378 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
379 {
380 TCGv z = tcg_constant_tl(0);
381
382 if (cin) {
383 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
384 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
385 } else {
386 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
387 }
388 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
389 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
390 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
391 if (TARGET_LONG_BITS == 64) {
392 /*
393 * Carry-in to bit 32 is result ^ src1 ^ src2.
394 * We already have the src xor term in Z, from computation of V.
395 */
396 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
397 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
398 }
399 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
400 tcg_gen_mov_tl(dst, cpu_cc_N);
401 }
402
403 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
404 {
405 gen_op_addcc_int(dst, src1, src2, NULL);
406 }
407
408 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
409 {
410 TCGv t = tcg_temp_new();
411
412 /* Save the tag bits around modification of dst. */
413 tcg_gen_or_tl(t, src1, src2);
414
415 gen_op_addcc(dst, src1, src2);
416
417 /* Incorprate tag bits into icc.V */
418 tcg_gen_andi_tl(t, t, 3);
419 tcg_gen_neg_tl(t, t);
420 tcg_gen_ext32u_tl(t, t);
421 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
422 }
423
424 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
425 {
426 tcg_gen_add_tl(dst, src1, src2);
427 tcg_gen_add_tl(dst, dst, gen_carry32());
428 }
429
430 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
431 {
432 gen_op_addcc_int(dst, src1, src2, gen_carry32());
433 }
434
435 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
436 {
437 TCGv z = tcg_constant_tl(0);
438
439 if (cin) {
440 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
441 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
442 } else {
443 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
444 }
445 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
446 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
447 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
448 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
449 #ifdef TARGET_SPARC64
450 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
451 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
452 #endif
453 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
454 tcg_gen_mov_tl(dst, cpu_cc_N);
455 }
456
457 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
458 {
459 gen_op_subcc_int(dst, src1, src2, NULL);
460 }
461
462 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
463 {
464 TCGv t = tcg_temp_new();
465
466 /* Save the tag bits around modification of dst. */
467 tcg_gen_or_tl(t, src1, src2);
468
469 gen_op_subcc(dst, src1, src2);
470
471 /* Incorprate tag bits into icc.V */
472 tcg_gen_andi_tl(t, t, 3);
473 tcg_gen_neg_tl(t, t);
474 tcg_gen_ext32u_tl(t, t);
475 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
476 }
477
478 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
479 {
480 tcg_gen_sub_tl(dst, src1, src2);
481 tcg_gen_sub_tl(dst, dst, gen_carry32());
482 }
483
484 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
485 {
486 gen_op_subcc_int(dst, src1, src2, gen_carry32());
487 }
488
489 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
490 {
491 TCGv zero = tcg_constant_tl(0);
492 TCGv t_src1 = tcg_temp_new();
493 TCGv t_src2 = tcg_temp_new();
494 TCGv t0 = tcg_temp_new();
495
496 tcg_gen_ext32u_tl(t_src1, src1);
497 tcg_gen_ext32u_tl(t_src2, src2);
498
499 /*
500 * if (!(env->y & 1))
501 * src2 = 0;
502 */
503 tcg_gen_andi_tl(t0, cpu_y, 0x1);
504 tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
505
506 /*
507 * b2 = src1 & 1;
508 * y = (b2 << 31) | (y >> 1);
509 */
510 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
511 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
512
513 // b1 = N ^ V;
514 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
515
516 /*
517 * src1 = (b1 << 31) | (src1 >> 1)
518 */
519 tcg_gen_andi_tl(t0, t0, 1u << 31);
520 tcg_gen_shri_tl(t_src1, t_src1, 1);
521 tcg_gen_or_tl(t_src1, t_src1, t0);
522
523 gen_op_addcc(dst, t_src1, t_src2);
524 }
525
526 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
527 {
528 #if TARGET_LONG_BITS == 32
529 if (sign_ext) {
530 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
531 } else {
532 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
533 }
534 #else
535 TCGv t0 = tcg_temp_new_i64();
536 TCGv t1 = tcg_temp_new_i64();
537
538 if (sign_ext) {
539 tcg_gen_ext32s_i64(t0, src1);
540 tcg_gen_ext32s_i64(t1, src2);
541 } else {
542 tcg_gen_ext32u_i64(t0, src1);
543 tcg_gen_ext32u_i64(t1, src2);
544 }
545
546 tcg_gen_mul_i64(dst, t0, t1);
547 tcg_gen_shri_i64(cpu_y, dst, 32);
548 #endif
549 }
550
551 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
552 {
553 /* zero-extend truncated operands before multiplication */
554 gen_op_multiply(dst, src1, src2, 0);
555 }
556
557 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
558 {
559 /* sign-extend truncated operands before multiplication */
560 gen_op_multiply(dst, src1, src2, 1);
561 }
562
563 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
564 {
565 #ifdef TARGET_SPARC64
566 gen_helper_sdiv(dst, tcg_env, src1, src2);
567 tcg_gen_ext32s_tl(dst, dst);
568 #else
569 TCGv_i64 t64 = tcg_temp_new_i64();
570 gen_helper_sdiv(t64, tcg_env, src1, src2);
571 tcg_gen_trunc_i64_tl(dst, t64);
572 #endif
573 }
574
575 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
576 {
577 TCGv_i64 t64;
578
579 #ifdef TARGET_SPARC64
580 t64 = cpu_cc_V;
581 #else
582 t64 = tcg_temp_new_i64();
583 #endif
584
585 gen_helper_udiv(t64, tcg_env, src1, src2);
586
587 #ifdef TARGET_SPARC64
588 tcg_gen_ext32u_tl(cpu_cc_N, t64);
589 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
590 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
591 tcg_gen_movi_tl(cpu_icc_C, 0);
592 #else
593 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
594 #endif
595 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
596 tcg_gen_movi_tl(cpu_cc_C, 0);
597 tcg_gen_mov_tl(dst, cpu_cc_N);
598 }
599
600 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
601 {
602 TCGv_i64 t64;
603
604 #ifdef TARGET_SPARC64
605 t64 = cpu_cc_V;
606 #else
607 t64 = tcg_temp_new_i64();
608 #endif
609
610 gen_helper_sdiv(t64, tcg_env, src1, src2);
611
612 #ifdef TARGET_SPARC64
613 tcg_gen_ext32s_tl(cpu_cc_N, t64);
614 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
615 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
616 tcg_gen_movi_tl(cpu_icc_C, 0);
617 #else
618 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
619 #endif
620 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
621 tcg_gen_movi_tl(cpu_cc_C, 0);
622 tcg_gen_mov_tl(dst, cpu_cc_N);
623 }
624
625 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
626 {
627 gen_helper_taddcctv(dst, tcg_env, src1, src2);
628 }
629
630 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
631 {
632 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
633 }
634
635 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
636 {
637 tcg_gen_ctpop_tl(dst, src2);
638 }
639
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
642 {
643 g_assert_not_reached();
644 }
645 #endif
646
647 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
648 {
649 gen_helper_array8(dst, src1, src2);
650 tcg_gen_shli_tl(dst, dst, 1);
651 }
652
653 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
654 {
655 gen_helper_array8(dst, src1, src2);
656 tcg_gen_shli_tl(dst, dst, 2);
657 }
658
659 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
660 {
661 #ifdef TARGET_SPARC64
662 gen_helper_fpack16(dst, cpu_gsr, src);
663 #else
664 g_assert_not_reached();
665 #endif
666 }
667
668 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
669 {
670 #ifdef TARGET_SPARC64
671 gen_helper_fpackfix(dst, cpu_gsr, src);
672 #else
673 g_assert_not_reached();
674 #endif
675 }
676
677 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
678 {
679 #ifdef TARGET_SPARC64
680 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
681 #else
682 g_assert_not_reached();
683 #endif
684 }
685
686 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
687 {
688 #ifdef TARGET_SPARC64
689 TCGv t1, t2, shift;
690
691 t1 = tcg_temp_new();
692 t2 = tcg_temp_new();
693 shift = tcg_temp_new();
694
695 tcg_gen_andi_tl(shift, cpu_gsr, 7);
696 tcg_gen_shli_tl(shift, shift, 3);
697 tcg_gen_shl_tl(t1, s1, shift);
698
699 /*
700 * A shift of 64 does not produce 0 in TCG. Divide this into a
701 * shift of (up to 63) followed by a constant shift of 1.
702 */
703 tcg_gen_xori_tl(shift, shift, 63);
704 tcg_gen_shr_tl(t2, s2, shift);
705 tcg_gen_shri_tl(t2, t2, 1);
706
707 tcg_gen_or_tl(dst, t1, t2);
708 #else
709 g_assert_not_reached();
710 #endif
711 }
712
713 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
714 {
715 #ifdef TARGET_SPARC64
716 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
717 #else
718 g_assert_not_reached();
719 #endif
720 }
721
722 // 1
723 static void gen_op_eval_ba(TCGv dst)
724 {
725 tcg_gen_movi_tl(dst, 1);
726 }
727
728 // 0
729 static void gen_op_eval_bn(TCGv dst)
730 {
731 tcg_gen_movi_tl(dst, 0);
732 }
733
734 /*
735 FPSR bit field FCC1 | FCC0:
736 0 =
737 1 <
738 2 >
739 3 unordered
740 */
741 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
742 unsigned int fcc_offset)
743 {
744 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
745 tcg_gen_andi_tl(reg, reg, 0x1);
746 }
747
748 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
749 {
750 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
751 tcg_gen_andi_tl(reg, reg, 0x1);
752 }
753
754 // !0: FCC0 | FCC1
755 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
756 {
757 TCGv t0 = tcg_temp_new();
758 gen_mov_reg_FCC0(dst, src, fcc_offset);
759 gen_mov_reg_FCC1(t0, src, fcc_offset);
760 tcg_gen_or_tl(dst, dst, t0);
761 }
762
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_FCC0(dst, src, fcc_offset);
768 gen_mov_reg_FCC1(t0, src, fcc_offset);
769 tcg_gen_xor_tl(dst, dst, t0);
770 }
771
772 // 1 or 3: FCC0
773 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
774 {
775 gen_mov_reg_FCC0(dst, src, fcc_offset);
776 }
777
778 // 1: FCC0 & !FCC1
779 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_andc_tl(dst, dst, t0);
785 }
786
787 // 2 or 3: FCC1
788 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 gen_mov_reg_FCC1(dst, src, fcc_offset);
791 }
792
793 // 2: !FCC0 & FCC1
794 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
795 {
796 TCGv t0 = tcg_temp_new();
797 gen_mov_reg_FCC0(dst, src, fcc_offset);
798 gen_mov_reg_FCC1(t0, src, fcc_offset);
799 tcg_gen_andc_tl(dst, t0, dst);
800 }
801
802 // 3: FCC0 & FCC1
803 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_and_tl(dst, dst, t0);
809 }
810
811 // 0: !(FCC0 | FCC1)
812 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_or_tl(dst, dst, t0);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
823 {
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_xor_tl(dst, dst, t0);
828 tcg_gen_xori_tl(dst, dst, 0x1);
829 }
830
831 // 0 or 2: !FCC0
832 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
833 {
834 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, dst, t0);
845 tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847
848 // 0 or 1: !FCC1
849 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851 gen_mov_reg_FCC1(dst, src, fcc_offset);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 static void finishing_insn(DisasContext *dc)
876 {
877 /*
878 * From here, there is no future path through an unwinding exception.
879 * If the current insn cannot raise an exception, the computation of
880 * cpu_cond may be able to be elided.
881 */
882 if (dc->cpu_cond_live) {
883 tcg_gen_discard_tl(cpu_cond);
884 dc->cpu_cond_live = false;
885 }
886 }
887
888 static void gen_generic_branch(DisasContext *dc)
889 {
890 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
891 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
892 TCGv c2 = tcg_constant_tl(dc->jump.c2);
893
894 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
895 }
896
897 /* call this function before using the condition register as it may
898 have been set for a jump */
899 static void flush_cond(DisasContext *dc)
900 {
901 if (dc->npc == JUMP_PC) {
902 gen_generic_branch(dc);
903 dc->npc = DYNAMIC_PC_LOOKUP;
904 }
905 }
906
907 static void save_npc(DisasContext *dc)
908 {
909 if (dc->npc & 3) {
910 switch (dc->npc) {
911 case JUMP_PC:
912 gen_generic_branch(dc);
913 dc->npc = DYNAMIC_PC_LOOKUP;
914 break;
915 case DYNAMIC_PC:
916 case DYNAMIC_PC_LOOKUP:
917 break;
918 default:
919 g_assert_not_reached();
920 }
921 } else {
922 tcg_gen_movi_tl(cpu_npc, dc->npc);
923 }
924 }
925
926 static void save_state(DisasContext *dc)
927 {
928 tcg_gen_movi_tl(cpu_pc, dc->pc);
929 save_npc(dc);
930 }
931
932 static void gen_exception(DisasContext *dc, int which)
933 {
934 finishing_insn(dc);
935 save_state(dc);
936 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
937 dc->base.is_jmp = DISAS_NORETURN;
938 }
939
940 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
941 {
942 DisasDelayException *e = g_new0(DisasDelayException, 1);
943
944 e->next = dc->delay_excp_list;
945 dc->delay_excp_list = e;
946
947 e->lab = gen_new_label();
948 e->excp = excp;
949 e->pc = dc->pc;
950 /* Caller must have used flush_cond before branch. */
951 assert(e->npc != JUMP_PC);
952 e->npc = dc->npc;
953
954 return e->lab;
955 }
956
957 static TCGLabel *delay_exception(DisasContext *dc, int excp)
958 {
959 return delay_exceptionv(dc, tcg_constant_i32(excp));
960 }
961
962 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
963 {
964 TCGv t = tcg_temp_new();
965 TCGLabel *lab;
966
967 tcg_gen_andi_tl(t, addr, mask);
968
969 flush_cond(dc);
970 lab = delay_exception(dc, TT_UNALIGNED);
971 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
972 }
973
974 static void gen_mov_pc_npc(DisasContext *dc)
975 {
976 finishing_insn(dc);
977
978 if (dc->npc & 3) {
979 switch (dc->npc) {
980 case JUMP_PC:
981 gen_generic_branch(dc);
982 tcg_gen_mov_tl(cpu_pc, cpu_npc);
983 dc->pc = DYNAMIC_PC_LOOKUP;
984 break;
985 case DYNAMIC_PC:
986 case DYNAMIC_PC_LOOKUP:
987 tcg_gen_mov_tl(cpu_pc, cpu_npc);
988 dc->pc = dc->npc;
989 break;
990 default:
991 g_assert_not_reached();
992 }
993 } else {
994 dc->pc = dc->npc;
995 }
996 }
997
998 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
999 DisasContext *dc)
1000 {
1001 TCGv t1;
1002
1003 cmp->c1 = t1 = tcg_temp_new();
1004 cmp->c2 = 0;
1005
1006 switch (cond & 7) {
1007 case 0x0: /* never */
1008 cmp->cond = TCG_COND_NEVER;
1009 cmp->c1 = tcg_constant_tl(0);
1010 break;
1011
1012 case 0x1: /* eq: Z */
1013 cmp->cond = TCG_COND_EQ;
1014 if (TARGET_LONG_BITS == 32 || xcc) {
1015 tcg_gen_mov_tl(t1, cpu_cc_Z);
1016 } else {
1017 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1018 }
1019 break;
1020
1021 case 0x2: /* le: Z | (N ^ V) */
1022 /*
1023 * Simplify:
1024 * cc_Z || (N ^ V) < 0 NE
1025 * cc_Z && !((N ^ V) < 0) EQ
1026 * cc_Z & ~((N ^ V) >> TLB) EQ
1027 */
1028 cmp->cond = TCG_COND_EQ;
1029 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1030 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1031 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1032 if (TARGET_LONG_BITS == 64 && !xcc) {
1033 tcg_gen_ext32u_tl(t1, t1);
1034 }
1035 break;
1036
1037 case 0x3: /* lt: N ^ V */
1038 cmp->cond = TCG_COND_LT;
1039 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1040 if (TARGET_LONG_BITS == 64 && !xcc) {
1041 tcg_gen_ext32s_tl(t1, t1);
1042 }
1043 break;
1044
1045 case 0x4: /* leu: Z | C */
1046 /*
1047 * Simplify:
1048 * cc_Z == 0 || cc_C != 0 NE
1049 * cc_Z != 0 && cc_C == 0 EQ
1050 * cc_Z & (cc_C ? 0 : -1) EQ
1051 * cc_Z & (cc_C - 1) EQ
1052 */
1053 cmp->cond = TCG_COND_EQ;
1054 if (TARGET_LONG_BITS == 32 || xcc) {
1055 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1056 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1057 } else {
1058 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1059 tcg_gen_subi_tl(t1, t1, 1);
1060 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1061 tcg_gen_ext32u_tl(t1, t1);
1062 }
1063 break;
1064
1065 case 0x5: /* ltu: C */
1066 cmp->cond = TCG_COND_NE;
1067 if (TARGET_LONG_BITS == 32 || xcc) {
1068 tcg_gen_mov_tl(t1, cpu_cc_C);
1069 } else {
1070 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1071 }
1072 break;
1073
1074 case 0x6: /* neg: N */
1075 cmp->cond = TCG_COND_LT;
1076 if (TARGET_LONG_BITS == 32 || xcc) {
1077 tcg_gen_mov_tl(t1, cpu_cc_N);
1078 } else {
1079 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1080 }
1081 break;
1082
1083 case 0x7: /* vs: V */
1084 cmp->cond = TCG_COND_LT;
1085 if (TARGET_LONG_BITS == 32 || xcc) {
1086 tcg_gen_mov_tl(t1, cpu_cc_V);
1087 } else {
1088 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1089 }
1090 break;
1091 }
1092 if (cond & 8) {
1093 cmp->cond = tcg_invert_cond(cmp->cond);
1094 }
1095 }
1096
1097 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1098 {
1099 unsigned int offset;
1100 TCGv r_dst;
1101
1102 /* For now we still generate a straight boolean result. */
1103 cmp->cond = TCG_COND_NE;
1104 cmp->c1 = r_dst = tcg_temp_new();
1105 cmp->c2 = 0;
1106
1107 switch (cc) {
1108 default:
1109 case 0x0:
1110 offset = 0;
1111 break;
1112 case 0x1:
1113 offset = 32 - 10;
1114 break;
1115 case 0x2:
1116 offset = 34 - 10;
1117 break;
1118 case 0x3:
1119 offset = 36 - 10;
1120 break;
1121 }
1122
1123 switch (cond) {
1124 case 0x0:
1125 gen_op_eval_bn(r_dst);
1126 break;
1127 case 0x1:
1128 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1129 break;
1130 case 0x2:
1131 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1132 break;
1133 case 0x3:
1134 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1135 break;
1136 case 0x4:
1137 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1138 break;
1139 case 0x5:
1140 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1141 break;
1142 case 0x6:
1143 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1144 break;
1145 case 0x7:
1146 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1147 break;
1148 case 0x8:
1149 gen_op_eval_ba(r_dst);
1150 break;
1151 case 0x9:
1152 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1153 break;
1154 case 0xa:
1155 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1156 break;
1157 case 0xb:
1158 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1159 break;
1160 case 0xc:
1161 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1162 break;
1163 case 0xd:
1164 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1165 break;
1166 case 0xe:
1167 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1168 break;
1169 case 0xf:
1170 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1171 break;
1172 }
1173 }
1174
1175 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1176 {
1177 static const TCGCond cond_reg[4] = {
1178 TCG_COND_NEVER, /* reserved */
1179 TCG_COND_EQ,
1180 TCG_COND_LE,
1181 TCG_COND_LT,
1182 };
1183 TCGCond tcond;
1184
1185 if ((cond & 3) == 0) {
1186 return false;
1187 }
1188 tcond = cond_reg[cond & 3];
1189 if (cond & 4) {
1190 tcond = tcg_invert_cond(tcond);
1191 }
1192
1193 cmp->cond = tcond;
1194 cmp->c1 = tcg_temp_new();
1195 cmp->c2 = 0;
1196 tcg_gen_mov_tl(cmp->c1, r_src);
1197 return true;
1198 }
1199
1200 static void gen_op_clear_ieee_excp_and_FTT(void)
1201 {
1202 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1203 }
1204
1205 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1206 {
1207 gen_op_clear_ieee_excp_and_FTT();
1208 tcg_gen_mov_i32(dst, src);
1209 }
1210
1211 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1212 {
1213 gen_op_clear_ieee_excp_and_FTT();
1214 tcg_gen_xori_i32(dst, src, 1u << 31);
1215 }
1216
1217 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1218 {
1219 gen_op_clear_ieee_excp_and_FTT();
1220 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1221 }
1222
1223 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1224 {
1225 gen_op_clear_ieee_excp_and_FTT();
1226 tcg_gen_mov_i64(dst, src);
1227 }
1228
1229 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1230 {
1231 gen_op_clear_ieee_excp_and_FTT();
1232 tcg_gen_xori_i64(dst, src, 1ull << 63);
1233 }
1234
1235 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1236 {
1237 gen_op_clear_ieee_excp_and_FTT();
1238 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1239 }
1240
1241 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1242 {
1243 TCGv_i64 l = tcg_temp_new_i64();
1244 TCGv_i64 h = tcg_temp_new_i64();
1245
1246 tcg_gen_extr_i128_i64(l, h, src);
1247 tcg_gen_xori_i64(h, h, 1ull << 63);
1248 tcg_gen_concat_i64_i128(dst, l, h);
1249 }
1250
1251 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1252 {
1253 TCGv_i64 l = tcg_temp_new_i64();
1254 TCGv_i64 h = tcg_temp_new_i64();
1255
1256 tcg_gen_extr_i128_i64(l, h, src);
1257 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1258 tcg_gen_concat_i64_i128(dst, l, h);
1259 }
1260
1261 #ifdef TARGET_SPARC64
1262 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1263 {
1264 switch (fccno) {
1265 case 0:
1266 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1267 break;
1268 case 1:
1269 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1270 break;
1271 case 2:
1272 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1273 break;
1274 case 3:
1275 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1276 break;
1277 }
1278 }
1279
1280 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1281 {
1282 switch (fccno) {
1283 case 0:
1284 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1285 break;
1286 case 1:
1287 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1288 break;
1289 case 2:
1290 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1291 break;
1292 case 3:
1293 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1294 break;
1295 }
1296 }
1297
1298 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1299 {
1300 switch (fccno) {
1301 case 0:
1302 gen_helper_fcmpq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1303 break;
1304 case 1:
1305 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1306 break;
1307 case 2:
1308 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1309 break;
1310 case 3:
1311 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1312 break;
1313 }
1314 }
1315
1316 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1317 {
1318 switch (fccno) {
1319 case 0:
1320 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1321 break;
1322 case 1:
1323 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1324 break;
1325 case 2:
1326 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1327 break;
1328 case 3:
1329 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1330 break;
1331 }
1332 }
1333
1334 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1335 {
1336 switch (fccno) {
1337 case 0:
1338 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1339 break;
1340 case 1:
1341 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1342 break;
1343 case 2:
1344 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1345 break;
1346 case 3:
1347 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1348 break;
1349 }
1350 }
1351
1352 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1353 {
1354 switch (fccno) {
1355 case 0:
1356 gen_helper_fcmpeq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1357 break;
1358 case 1:
1359 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1360 break;
1361 case 2:
1362 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1363 break;
1364 case 3:
1365 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1366 break;
1367 }
1368 }
1369
1370 #else
1371
1372 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1373 {
1374 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1375 }
1376
1377 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1378 {
1379 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1380 }
1381
1382 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1383 {
1384 gen_helper_fcmpq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1385 }
1386
1387 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1388 {
1389 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1390 }
1391
1392 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1393 {
1394 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1395 }
1396
1397 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1398 {
1399 gen_helper_fcmpeq(cpu_fsr, tcg_env, r_rs1, r_rs2);
1400 }
1401 #endif
1402
1403 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1404 {
1405 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1406 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1407 gen_exception(dc, TT_FP_EXCP);
1408 }
1409
1410 static int gen_trap_ifnofpu(DisasContext *dc)
1411 {
1412 #if !defined(CONFIG_USER_ONLY)
1413 if (!dc->fpu_enabled) {
1414 gen_exception(dc, TT_NFPU_INSN);
1415 return 1;
1416 }
1417 #endif
1418 return 0;
1419 }
1420
1421 /* asi moves */
1422 typedef enum {
1423 GET_ASI_HELPER,
1424 GET_ASI_EXCP,
1425 GET_ASI_DIRECT,
1426 GET_ASI_DTWINX,
1427 GET_ASI_BLOCK,
1428 GET_ASI_SHORT,
1429 GET_ASI_BCOPY,
1430 GET_ASI_BFILL,
1431 } ASIType;
1432
1433 typedef struct {
1434 ASIType type;
1435 int asi;
1436 int mem_idx;
1437 MemOp memop;
1438 } DisasASI;
1439
1440 /*
1441 * Build DisasASI.
1442 * For asi == -1, treat as non-asi.
1443 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1444 */
1445 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1446 {
1447 ASIType type = GET_ASI_HELPER;
1448 int mem_idx = dc->mem_idx;
1449
1450 if (asi == -1) {
1451 /* Artificial "non-asi" case. */
1452 type = GET_ASI_DIRECT;
1453 goto done;
1454 }
1455
1456 #ifndef TARGET_SPARC64
1457 /* Before v9, all asis are immediate and privileged. */
1458 if (asi < 0) {
1459 gen_exception(dc, TT_ILL_INSN);
1460 type = GET_ASI_EXCP;
1461 } else if (supervisor(dc)
1462 /* Note that LEON accepts ASI_USERDATA in user mode, for
1463 use with CASA. Also note that previous versions of
1464 QEMU allowed (and old versions of gcc emitted) ASI_P
1465 for LEON, which is incorrect. */
1466 || (asi == ASI_USERDATA
1467 && (dc->def->features & CPU_FEATURE_CASA))) {
1468 switch (asi) {
1469 case ASI_USERDATA: /* User data access */
1470 mem_idx = MMU_USER_IDX;
1471 type = GET_ASI_DIRECT;
1472 break;
1473 case ASI_KERNELDATA: /* Supervisor data access */
1474 mem_idx = MMU_KERNEL_IDX;
1475 type = GET_ASI_DIRECT;
1476 break;
1477 case ASI_M_BYPASS: /* MMU passthrough */
1478 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1479 mem_idx = MMU_PHYS_IDX;
1480 type = GET_ASI_DIRECT;
1481 break;
1482 case ASI_M_BCOPY: /* Block copy, sta access */
1483 mem_idx = MMU_KERNEL_IDX;
1484 type = GET_ASI_BCOPY;
1485 break;
1486 case ASI_M_BFILL: /* Block fill, stda access */
1487 mem_idx = MMU_KERNEL_IDX;
1488 type = GET_ASI_BFILL;
1489 break;
1490 }
1491
1492 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1493 * permissions check in get_physical_address(..).
1494 */
1495 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1496 } else {
1497 gen_exception(dc, TT_PRIV_INSN);
1498 type = GET_ASI_EXCP;
1499 }
1500 #else
1501 if (asi < 0) {
1502 asi = dc->asi;
1503 }
1504 /* With v9, all asis below 0x80 are privileged. */
1505 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1506 down that bit into DisasContext. For the moment that's ok,
1507 since the direct implementations below doesn't have any ASIs
1508 in the restricted [0x30, 0x7f] range, and the check will be
1509 done properly in the helper. */
1510 if (!supervisor(dc) && asi < 0x80) {
1511 gen_exception(dc, TT_PRIV_ACT);
1512 type = GET_ASI_EXCP;
1513 } else {
1514 switch (asi) {
1515 case ASI_REAL: /* Bypass */
1516 case ASI_REAL_IO: /* Bypass, non-cacheable */
1517 case ASI_REAL_L: /* Bypass LE */
1518 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1519 case ASI_TWINX_REAL: /* Real address, twinx */
1520 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1521 case ASI_QUAD_LDD_PHYS:
1522 case ASI_QUAD_LDD_PHYS_L:
1523 mem_idx = MMU_PHYS_IDX;
1524 break;
1525 case ASI_N: /* Nucleus */
1526 case ASI_NL: /* Nucleus LE */
1527 case ASI_TWINX_N:
1528 case ASI_TWINX_NL:
1529 case ASI_NUCLEUS_QUAD_LDD:
1530 case ASI_NUCLEUS_QUAD_LDD_L:
1531 if (hypervisor(dc)) {
1532 mem_idx = MMU_PHYS_IDX;
1533 } else {
1534 mem_idx = MMU_NUCLEUS_IDX;
1535 }
1536 break;
1537 case ASI_AIUP: /* As if user primary */
1538 case ASI_AIUPL: /* As if user primary LE */
1539 case ASI_TWINX_AIUP:
1540 case ASI_TWINX_AIUP_L:
1541 case ASI_BLK_AIUP_4V:
1542 case ASI_BLK_AIUP_L_4V:
1543 case ASI_BLK_AIUP:
1544 case ASI_BLK_AIUPL:
1545 mem_idx = MMU_USER_IDX;
1546 break;
1547 case ASI_AIUS: /* As if user secondary */
1548 case ASI_AIUSL: /* As if user secondary LE */
1549 case ASI_TWINX_AIUS:
1550 case ASI_TWINX_AIUS_L:
1551 case ASI_BLK_AIUS_4V:
1552 case ASI_BLK_AIUS_L_4V:
1553 case ASI_BLK_AIUS:
1554 case ASI_BLK_AIUSL:
1555 mem_idx = MMU_USER_SECONDARY_IDX;
1556 break;
1557 case ASI_S: /* Secondary */
1558 case ASI_SL: /* Secondary LE */
1559 case ASI_TWINX_S:
1560 case ASI_TWINX_SL:
1561 case ASI_BLK_COMMIT_S:
1562 case ASI_BLK_S:
1563 case ASI_BLK_SL:
1564 case ASI_FL8_S:
1565 case ASI_FL8_SL:
1566 case ASI_FL16_S:
1567 case ASI_FL16_SL:
1568 if (mem_idx == MMU_USER_IDX) {
1569 mem_idx = MMU_USER_SECONDARY_IDX;
1570 } else if (mem_idx == MMU_KERNEL_IDX) {
1571 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1572 }
1573 break;
1574 case ASI_P: /* Primary */
1575 case ASI_PL: /* Primary LE */
1576 case ASI_TWINX_P:
1577 case ASI_TWINX_PL:
1578 case ASI_BLK_COMMIT_P:
1579 case ASI_BLK_P:
1580 case ASI_BLK_PL:
1581 case ASI_FL8_P:
1582 case ASI_FL8_PL:
1583 case ASI_FL16_P:
1584 case ASI_FL16_PL:
1585 break;
1586 }
1587 switch (asi) {
1588 case ASI_REAL:
1589 case ASI_REAL_IO:
1590 case ASI_REAL_L:
1591 case ASI_REAL_IO_L:
1592 case ASI_N:
1593 case ASI_NL:
1594 case ASI_AIUP:
1595 case ASI_AIUPL:
1596 case ASI_AIUS:
1597 case ASI_AIUSL:
1598 case ASI_S:
1599 case ASI_SL:
1600 case ASI_P:
1601 case ASI_PL:
1602 type = GET_ASI_DIRECT;
1603 break;
1604 case ASI_TWINX_REAL:
1605 case ASI_TWINX_REAL_L:
1606 case ASI_TWINX_N:
1607 case ASI_TWINX_NL:
1608 case ASI_TWINX_AIUP:
1609 case ASI_TWINX_AIUP_L:
1610 case ASI_TWINX_AIUS:
1611 case ASI_TWINX_AIUS_L:
1612 case ASI_TWINX_P:
1613 case ASI_TWINX_PL:
1614 case ASI_TWINX_S:
1615 case ASI_TWINX_SL:
1616 case ASI_QUAD_LDD_PHYS:
1617 case ASI_QUAD_LDD_PHYS_L:
1618 case ASI_NUCLEUS_QUAD_LDD:
1619 case ASI_NUCLEUS_QUAD_LDD_L:
1620 type = GET_ASI_DTWINX;
1621 break;
1622 case ASI_BLK_COMMIT_P:
1623 case ASI_BLK_COMMIT_S:
1624 case ASI_BLK_AIUP_4V:
1625 case ASI_BLK_AIUP_L_4V:
1626 case ASI_BLK_AIUP:
1627 case ASI_BLK_AIUPL:
1628 case ASI_BLK_AIUS_4V:
1629 case ASI_BLK_AIUS_L_4V:
1630 case ASI_BLK_AIUS:
1631 case ASI_BLK_AIUSL:
1632 case ASI_BLK_S:
1633 case ASI_BLK_SL:
1634 case ASI_BLK_P:
1635 case ASI_BLK_PL:
1636 type = GET_ASI_BLOCK;
1637 break;
1638 case ASI_FL8_S:
1639 case ASI_FL8_SL:
1640 case ASI_FL8_P:
1641 case ASI_FL8_PL:
1642 memop = MO_UB;
1643 type = GET_ASI_SHORT;
1644 break;
1645 case ASI_FL16_S:
1646 case ASI_FL16_SL:
1647 case ASI_FL16_P:
1648 case ASI_FL16_PL:
1649 memop = MO_TEUW;
1650 type = GET_ASI_SHORT;
1651 break;
1652 }
1653 /* The little-endian asis all have bit 3 set. */
1654 if (asi & 8) {
1655 memop ^= MO_BSWAP;
1656 }
1657 }
1658 #endif
1659
1660 done:
1661 return (DisasASI){ type, asi, mem_idx, memop };
1662 }
1663
1664 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1665 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1666 TCGv_i32 asi, TCGv_i32 mop)
1667 {
1668 g_assert_not_reached();
1669 }
1670
1671 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1672 TCGv_i32 asi, TCGv_i32 mop)
1673 {
1674 g_assert_not_reached();
1675 }
1676 #endif
1677
1678 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1679 {
1680 switch (da->type) {
1681 case GET_ASI_EXCP:
1682 break;
1683 case GET_ASI_DTWINX: /* Reserved for ldda. */
1684 gen_exception(dc, TT_ILL_INSN);
1685 break;
1686 case GET_ASI_DIRECT:
1687 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1688 break;
1689 default:
1690 {
1691 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1692 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1693
1694 save_state(dc);
1695 #ifdef TARGET_SPARC64
1696 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1697 #else
1698 {
1699 TCGv_i64 t64 = tcg_temp_new_i64();
1700 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1701 tcg_gen_trunc_i64_tl(dst, t64);
1702 }
1703 #endif
1704 }
1705 break;
1706 }
1707 }
1708
1709 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1710 {
1711 switch (da->type) {
1712 case GET_ASI_EXCP:
1713 break;
1714
1715 case GET_ASI_DTWINX: /* Reserved for stda. */
1716 if (TARGET_LONG_BITS == 32) {
1717 gen_exception(dc, TT_ILL_INSN);
1718 break;
1719 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1720 /* Pre OpenSPARC CPUs don't have these */
1721 gen_exception(dc, TT_ILL_INSN);
1722 break;
1723 }
1724 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1725 /* fall through */
1726
1727 case GET_ASI_DIRECT:
1728 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1729 break;
1730
1731 case GET_ASI_BCOPY:
1732 assert(TARGET_LONG_BITS == 32);
1733 /*
1734 * Copy 32 bytes from the address in SRC to ADDR.
1735 *
1736 * From Ross RT625 hyperSPARC manual, section 4.6:
1737 * "Block Copy and Block Fill will work only on cache line boundaries."
1738 *
1739 * It does not specify if an unaliged address is truncated or trapped.
1740 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1741 * is obviously wrong. The only place I can see this used is in the
1742 * Linux kernel which begins with page alignment, advancing by 32,
1743 * so is always aligned. Assume truncation as the simpler option.
1744 *
1745 * Since the loads and stores are paired, allow the copy to happen
1746 * in the host endianness. The copy need not be atomic.
1747 */
1748 {
1749 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1750 TCGv saddr = tcg_temp_new();
1751 TCGv daddr = tcg_temp_new();
1752 TCGv_i128 tmp = tcg_temp_new_i128();
1753
1754 tcg_gen_andi_tl(saddr, src, -32);
1755 tcg_gen_andi_tl(daddr, addr, -32);
1756 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1757 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1758 tcg_gen_addi_tl(saddr, saddr, 16);
1759 tcg_gen_addi_tl(daddr, daddr, 16);
1760 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1761 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1762 }
1763 break;
1764
1765 default:
1766 {
1767 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1768 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1769
1770 save_state(dc);
1771 #ifdef TARGET_SPARC64
1772 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1773 #else
1774 {
1775 TCGv_i64 t64 = tcg_temp_new_i64();
1776 tcg_gen_extu_tl_i64(t64, src);
1777 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1778 }
1779 #endif
1780
1781 /* A write to a TLB register may alter page maps. End the TB. */
1782 dc->npc = DYNAMIC_PC;
1783 }
1784 break;
1785 }
1786 }
1787
1788 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1789 TCGv dst, TCGv src, TCGv addr)
1790 {
1791 switch (da->type) {
1792 case GET_ASI_EXCP:
1793 break;
1794 case GET_ASI_DIRECT:
1795 tcg_gen_atomic_xchg_tl(dst, addr, src,
1796 da->mem_idx, da->memop | MO_ALIGN);
1797 break;
1798 default:
1799 /* ??? Should be DAE_invalid_asi. */
1800 gen_exception(dc, TT_DATA_ACCESS);
1801 break;
1802 }
1803 }
1804
1805 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1806 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1807 {
1808 switch (da->type) {
1809 case GET_ASI_EXCP:
1810 return;
1811 case GET_ASI_DIRECT:
1812 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1813 da->mem_idx, da->memop | MO_ALIGN);
1814 break;
1815 default:
1816 /* ??? Should be DAE_invalid_asi. */
1817 gen_exception(dc, TT_DATA_ACCESS);
1818 break;
1819 }
1820 }
1821
1822 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1823 {
1824 switch (da->type) {
1825 case GET_ASI_EXCP:
1826 break;
1827 case GET_ASI_DIRECT:
1828 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1829 da->mem_idx, MO_UB);
1830 break;
1831 default:
1832 /* ??? In theory, this should be raise DAE_invalid_asi.
1833 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1834 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1835 gen_helper_exit_atomic(tcg_env);
1836 } else {
1837 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1838 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1839 TCGv_i64 s64, t64;
1840
1841 save_state(dc);
1842 t64 = tcg_temp_new_i64();
1843 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1844
1845 s64 = tcg_constant_i64(0xff);
1846 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1847
1848 tcg_gen_trunc_i64_tl(dst, t64);
1849
1850 /* End the TB. */
1851 dc->npc = DYNAMIC_PC;
1852 }
1853 break;
1854 }
1855 }
1856
1857 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1858 TCGv addr, int rd)
1859 {
1860 MemOp memop = da->memop;
1861 MemOp size = memop & MO_SIZE;
1862 TCGv_i32 d32;
1863 TCGv_i64 d64;
1864 TCGv addr_tmp;
1865
1866 /* TODO: Use 128-bit load/store below. */
1867 if (size == MO_128) {
1868 memop = (memop & ~MO_SIZE) | MO_64;
1869 }
1870
1871 switch (da->type) {
1872 case GET_ASI_EXCP:
1873 break;
1874
1875 case GET_ASI_DIRECT:
1876 memop |= MO_ALIGN_4;
1877 switch (size) {
1878 case MO_32:
1879 d32 = tcg_temp_new_i32();
1880 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1881 gen_store_fpr_F(dc, rd, d32);
1882 break;
1883
1884 case MO_64:
1885 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1886 break;
1887
1888 case MO_128:
1889 d64 = tcg_temp_new_i64();
1890 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1891 addr_tmp = tcg_temp_new();
1892 tcg_gen_addi_tl(addr_tmp, addr, 8);
1893 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1894 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1895 break;
1896 default:
1897 g_assert_not_reached();
1898 }
1899 break;
1900
1901 case GET_ASI_BLOCK:
1902 /* Valid for lddfa on aligned registers only. */
1903 if (orig_size == MO_64 && (rd & 7) == 0) {
1904 /* The first operation checks required alignment. */
1905 addr_tmp = tcg_temp_new();
1906 for (int i = 0; ; ++i) {
1907 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1908 memop | (i == 0 ? MO_ALIGN_64 : 0));
1909 if (i == 7) {
1910 break;
1911 }
1912 tcg_gen_addi_tl(addr_tmp, addr, 8);
1913 addr = addr_tmp;
1914 }
1915 } else {
1916 gen_exception(dc, TT_ILL_INSN);
1917 }
1918 break;
1919
1920 case GET_ASI_SHORT:
1921 /* Valid for lddfa only. */
1922 if (orig_size == MO_64) {
1923 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1924 memop | MO_ALIGN);
1925 } else {
1926 gen_exception(dc, TT_ILL_INSN);
1927 }
1928 break;
1929
1930 default:
1931 {
1932 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1933 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1934
1935 save_state(dc);
1936 /* According to the table in the UA2011 manual, the only
1937 other asis that are valid for ldfa/lddfa/ldqfa are
1938 the NO_FAULT asis. We still need a helper for these,
1939 but we can just use the integer asi helper for them. */
1940 switch (size) {
1941 case MO_32:
1942 d64 = tcg_temp_new_i64();
1943 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1944 d32 = tcg_temp_new_i32();
1945 tcg_gen_extrl_i64_i32(d32, d64);
1946 gen_store_fpr_F(dc, rd, d32);
1947 break;
1948 case MO_64:
1949 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1950 r_asi, r_mop);
1951 break;
1952 case MO_128:
1953 d64 = tcg_temp_new_i64();
1954 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1955 addr_tmp = tcg_temp_new();
1956 tcg_gen_addi_tl(addr_tmp, addr, 8);
1957 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1958 r_asi, r_mop);
1959 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1960 break;
1961 default:
1962 g_assert_not_reached();
1963 }
1964 }
1965 break;
1966 }
1967 }
1968
1969 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1970 TCGv addr, int rd)
1971 {
1972 MemOp memop = da->memop;
1973 MemOp size = memop & MO_SIZE;
1974 TCGv_i32 d32;
1975 TCGv addr_tmp;
1976
1977 /* TODO: Use 128-bit load/store below. */
1978 if (size == MO_128) {
1979 memop = (memop & ~MO_SIZE) | MO_64;
1980 }
1981
1982 switch (da->type) {
1983 case GET_ASI_EXCP:
1984 break;
1985
1986 case GET_ASI_DIRECT:
1987 memop |= MO_ALIGN_4;
1988 switch (size) {
1989 case MO_32:
1990 d32 = gen_load_fpr_F(dc, rd);
1991 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1992 break;
1993 case MO_64:
1994 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1995 memop | MO_ALIGN_4);
1996 break;
1997 case MO_128:
1998 /* Only 4-byte alignment required. However, it is legal for the
1999 cpu to signal the alignment fault, and the OS trap handler is
2000 required to fix it up. Requiring 16-byte alignment here avoids
2001 having to probe the second page before performing the first
2002 write. */
2003 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2004 memop | MO_ALIGN_16);
2005 addr_tmp = tcg_temp_new();
2006 tcg_gen_addi_tl(addr_tmp, addr, 8);
2007 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2008 break;
2009 default:
2010 g_assert_not_reached();
2011 }
2012 break;
2013
2014 case GET_ASI_BLOCK:
2015 /* Valid for stdfa on aligned registers only. */
2016 if (orig_size == MO_64 && (rd & 7) == 0) {
2017 /* The first operation checks required alignment. */
2018 addr_tmp = tcg_temp_new();
2019 for (int i = 0; ; ++i) {
2020 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2021 memop | (i == 0 ? MO_ALIGN_64 : 0));
2022 if (i == 7) {
2023 break;
2024 }
2025 tcg_gen_addi_tl(addr_tmp, addr, 8);
2026 addr = addr_tmp;
2027 }
2028 } else {
2029 gen_exception(dc, TT_ILL_INSN);
2030 }
2031 break;
2032
2033 case GET_ASI_SHORT:
2034 /* Valid for stdfa only. */
2035 if (orig_size == MO_64) {
2036 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2037 memop | MO_ALIGN);
2038 } else {
2039 gen_exception(dc, TT_ILL_INSN);
2040 }
2041 break;
2042
2043 default:
2044 /* According to the table in the UA2011 manual, the only
2045 other asis that are valid for ldfa/lddfa/ldqfa are
2046 the PST* asis, which aren't currently handled. */
2047 gen_exception(dc, TT_ILL_INSN);
2048 break;
2049 }
2050 }
2051
2052 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2053 {
2054 TCGv hi = gen_dest_gpr(dc, rd);
2055 TCGv lo = gen_dest_gpr(dc, rd + 1);
2056
2057 switch (da->type) {
2058 case GET_ASI_EXCP:
2059 return;
2060
2061 case GET_ASI_DTWINX:
2062 #ifdef TARGET_SPARC64
2063 {
2064 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2065 TCGv_i128 t = tcg_temp_new_i128();
2066
2067 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2068 /*
2069 * Note that LE twinx acts as if each 64-bit register result is
2070 * byte swapped. We perform one 128-bit LE load, so must swap
2071 * the order of the writebacks.
2072 */
2073 if ((mop & MO_BSWAP) == MO_TE) {
2074 tcg_gen_extr_i128_i64(lo, hi, t);
2075 } else {
2076 tcg_gen_extr_i128_i64(hi, lo, t);
2077 }
2078 }
2079 break;
2080 #else
2081 g_assert_not_reached();
2082 #endif
2083
2084 case GET_ASI_DIRECT:
2085 {
2086 TCGv_i64 tmp = tcg_temp_new_i64();
2087
2088 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2089
2090 /* Note that LE ldda acts as if each 32-bit register
2091 result is byte swapped. Having just performed one
2092 64-bit bswap, we need now to swap the writebacks. */
2093 if ((da->memop & MO_BSWAP) == MO_TE) {
2094 tcg_gen_extr_i64_tl(lo, hi, tmp);
2095 } else {
2096 tcg_gen_extr_i64_tl(hi, lo, tmp);
2097 }
2098 }
2099 break;
2100
2101 default:
2102 /* ??? In theory we've handled all of the ASIs that are valid
2103 for ldda, and this should raise DAE_invalid_asi. However,
2104 real hardware allows others. This can be seen with e.g.
2105 FreeBSD 10.3 wrt ASI_IC_TAG. */
2106 {
2107 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2108 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2109 TCGv_i64 tmp = tcg_temp_new_i64();
2110
2111 save_state(dc);
2112 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2113
2114 /* See above. */
2115 if ((da->memop & MO_BSWAP) == MO_TE) {
2116 tcg_gen_extr_i64_tl(lo, hi, tmp);
2117 } else {
2118 tcg_gen_extr_i64_tl(hi, lo, tmp);
2119 }
2120 }
2121 break;
2122 }
2123
2124 gen_store_gpr(dc, rd, hi);
2125 gen_store_gpr(dc, rd + 1, lo);
2126 }
2127
2128 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2129 {
2130 TCGv hi = gen_load_gpr(dc, rd);
2131 TCGv lo = gen_load_gpr(dc, rd + 1);
2132
2133 switch (da->type) {
2134 case GET_ASI_EXCP:
2135 break;
2136
2137 case GET_ASI_DTWINX:
2138 #ifdef TARGET_SPARC64
2139 {
2140 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2141 TCGv_i128 t = tcg_temp_new_i128();
2142
2143 /*
2144 * Note that LE twinx acts as if each 64-bit register result is
2145 * byte swapped. We perform one 128-bit LE store, so must swap
2146 * the order of the construction.
2147 */
2148 if ((mop & MO_BSWAP) == MO_TE) {
2149 tcg_gen_concat_i64_i128(t, lo, hi);
2150 } else {
2151 tcg_gen_concat_i64_i128(t, hi, lo);
2152 }
2153 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2154 }
2155 break;
2156 #else
2157 g_assert_not_reached();
2158 #endif
2159
2160 case GET_ASI_DIRECT:
2161 {
2162 TCGv_i64 t64 = tcg_temp_new_i64();
2163
2164 /* Note that LE stda acts as if each 32-bit register result is
2165 byte swapped. We will perform one 64-bit LE store, so now
2166 we must swap the order of the construction. */
2167 if ((da->memop & MO_BSWAP) == MO_TE) {
2168 tcg_gen_concat_tl_i64(t64, lo, hi);
2169 } else {
2170 tcg_gen_concat_tl_i64(t64, hi, lo);
2171 }
2172 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2173 }
2174 break;
2175
2176 case GET_ASI_BFILL:
2177 assert(TARGET_LONG_BITS == 32);
2178 /*
2179 * Store 32 bytes of [rd:rd+1] to ADDR.
2180 * See comments for GET_ASI_COPY above.
2181 */
2182 {
2183 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2184 TCGv_i64 t8 = tcg_temp_new_i64();
2185 TCGv_i128 t16 = tcg_temp_new_i128();
2186 TCGv daddr = tcg_temp_new();
2187
2188 tcg_gen_concat_tl_i64(t8, lo, hi);
2189 tcg_gen_concat_i64_i128(t16, t8, t8);
2190 tcg_gen_andi_tl(daddr, addr, -32);
2191 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2192 tcg_gen_addi_tl(daddr, daddr, 16);
2193 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2194 }
2195 break;
2196
2197 default:
2198 /* ??? In theory we've handled all of the ASIs that are valid
2199 for stda, and this should raise DAE_invalid_asi. */
2200 {
2201 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2202 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2203 TCGv_i64 t64 = tcg_temp_new_i64();
2204
2205 /* See above. */
2206 if ((da->memop & MO_BSWAP) == MO_TE) {
2207 tcg_gen_concat_tl_i64(t64, lo, hi);
2208 } else {
2209 tcg_gen_concat_tl_i64(t64, hi, lo);
2210 }
2211
2212 save_state(dc);
2213 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2214 }
2215 break;
2216 }
2217 }
2218
2219 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2220 {
2221 #ifdef TARGET_SPARC64
2222 TCGv_i32 c32, zero, dst, s1, s2;
2223 TCGv_i64 c64 = tcg_temp_new_i64();
2224
2225 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2226 or fold the comparison down to 32 bits and use movcond_i32. Choose
2227 the later. */
2228 c32 = tcg_temp_new_i32();
2229 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2230 tcg_gen_extrl_i64_i32(c32, c64);
2231
2232 s1 = gen_load_fpr_F(dc, rs);
2233 s2 = gen_load_fpr_F(dc, rd);
2234 dst = tcg_temp_new_i32();
2235 zero = tcg_constant_i32(0);
2236
2237 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2238
2239 gen_store_fpr_F(dc, rd, dst);
2240 #else
2241 qemu_build_not_reached();
2242 #endif
2243 }
2244
2245 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2246 {
2247 #ifdef TARGET_SPARC64
2248 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2249 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2250 gen_load_fpr_D(dc, rs),
2251 gen_load_fpr_D(dc, rd));
2252 gen_store_fpr_D(dc, rd, dst);
2253 #else
2254 qemu_build_not_reached();
2255 #endif
2256 }
2257
2258 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2259 {
2260 #ifdef TARGET_SPARC64
2261 int qd = QFPREG(rd);
2262 int qs = QFPREG(rs);
2263 TCGv c2 = tcg_constant_tl(cmp->c2);
2264
2265 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2266 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2267 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2268 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2269
2270 gen_update_fprs_dirty(dc, qd);
2271 #else
2272 qemu_build_not_reached();
2273 #endif
2274 }
2275
2276 #ifdef TARGET_SPARC64
2277 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2278 {
2279 TCGv_i32 r_tl = tcg_temp_new_i32();
2280
2281 /* load env->tl into r_tl */
2282 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2283
2284 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2285 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2286
2287 /* calculate offset to current trap state from env->ts, reuse r_tl */
2288 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2289 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2290
2291 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2292 {
2293 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2294 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2295 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2296 }
2297 }
2298 #endif
2299
2300 static int extract_dfpreg(DisasContext *dc, int x)
2301 {
2302 return DFPREG(x);
2303 }
2304
2305 static int extract_qfpreg(DisasContext *dc, int x)
2306 {
2307 return QFPREG(x);
2308 }
2309
2310 /* Include the auto-generated decoder. */
2311 #include "decode-insns.c.inc"
2312
2313 #define TRANS(NAME, AVAIL, FUNC, ...) \
2314 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2315 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2316
2317 #define avail_ALL(C) true
2318 #ifdef TARGET_SPARC64
2319 # define avail_32(C) false
2320 # define avail_ASR17(C) false
2321 # define avail_CASA(C) true
2322 # define avail_DIV(C) true
2323 # define avail_MUL(C) true
2324 # define avail_POWERDOWN(C) false
2325 # define avail_64(C) true
2326 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2327 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2328 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2329 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2330 #else
2331 # define avail_32(C) true
2332 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2333 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2334 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2335 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2336 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2337 # define avail_64(C) false
2338 # define avail_GL(C) false
2339 # define avail_HYPV(C) false
2340 # define avail_VIS1(C) false
2341 # define avail_VIS2(C) false
2342 #endif
2343
2344 /* Default case for non jump instructions. */
2345 static bool advance_pc(DisasContext *dc)
2346 {
2347 TCGLabel *l1;
2348
2349 finishing_insn(dc);
2350
2351 if (dc->npc & 3) {
2352 switch (dc->npc) {
2353 case DYNAMIC_PC:
2354 case DYNAMIC_PC_LOOKUP:
2355 dc->pc = dc->npc;
2356 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2357 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2358 break;
2359
2360 case JUMP_PC:
2361 /* we can do a static jump */
2362 l1 = gen_new_label();
2363 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2364
2365 /* jump not taken */
2366 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2367
2368 /* jump taken */
2369 gen_set_label(l1);
2370 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2371
2372 dc->base.is_jmp = DISAS_NORETURN;
2373 break;
2374
2375 default:
2376 g_assert_not_reached();
2377 }
2378 } else {
2379 dc->pc = dc->npc;
2380 dc->npc = dc->npc + 4;
2381 }
2382 return true;
2383 }
2384
2385 /*
2386 * Major opcodes 00 and 01 -- branches, call, and sethi
2387 */
2388
2389 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2390 bool annul, int disp)
2391 {
2392 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2393 target_ulong npc;
2394
2395 finishing_insn(dc);
2396
2397 if (cmp->cond == TCG_COND_ALWAYS) {
2398 if (annul) {
2399 dc->pc = dest;
2400 dc->npc = dest + 4;
2401 } else {
2402 gen_mov_pc_npc(dc);
2403 dc->npc = dest;
2404 }
2405 return true;
2406 }
2407
2408 if (cmp->cond == TCG_COND_NEVER) {
2409 npc = dc->npc;
2410 if (npc & 3) {
2411 gen_mov_pc_npc(dc);
2412 if (annul) {
2413 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2414 }
2415 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2416 } else {
2417 dc->pc = npc + (annul ? 4 : 0);
2418 dc->npc = dc->pc + 4;
2419 }
2420 return true;
2421 }
2422
2423 flush_cond(dc);
2424 npc = dc->npc;
2425
2426 if (annul) {
2427 TCGLabel *l1 = gen_new_label();
2428
2429 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2430 gen_goto_tb(dc, 0, npc, dest);
2431 gen_set_label(l1);
2432 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2433
2434 dc->base.is_jmp = DISAS_NORETURN;
2435 } else {
2436 if (npc & 3) {
2437 switch (npc) {
2438 case DYNAMIC_PC:
2439 case DYNAMIC_PC_LOOKUP:
2440 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2441 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2442 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2443 cmp->c1, tcg_constant_tl(cmp->c2),
2444 tcg_constant_tl(dest), cpu_npc);
2445 dc->pc = npc;
2446 break;
2447 default:
2448 g_assert_not_reached();
2449 }
2450 } else {
2451 dc->pc = npc;
2452 dc->npc = JUMP_PC;
2453 dc->jump = *cmp;
2454 dc->jump_pc[0] = dest;
2455 dc->jump_pc[1] = npc + 4;
2456
2457 /* The condition for cpu_cond is always NE -- normalize. */
2458 if (cmp->cond == TCG_COND_NE) {
2459 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2460 } else {
2461 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2462 }
2463 dc->cpu_cond_live = true;
2464 }
2465 }
2466 return true;
2467 }
2468
2469 static bool raise_priv(DisasContext *dc)
2470 {
2471 gen_exception(dc, TT_PRIV_INSN);
2472 return true;
2473 }
2474
2475 static bool raise_unimpfpop(DisasContext *dc)
2476 {
2477 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2478 return true;
2479 }
2480
2481 static bool gen_trap_float128(DisasContext *dc)
2482 {
2483 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2484 return false;
2485 }
2486 return raise_unimpfpop(dc);
2487 }
2488
2489 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2490 {
2491 DisasCompare cmp;
2492
2493 gen_compare(&cmp, a->cc, a->cond, dc);
2494 return advance_jump_cond(dc, &cmp, a->a, a->i);
2495 }
2496
2497 TRANS(Bicc, ALL, do_bpcc, a)
2498 TRANS(BPcc, 64, do_bpcc, a)
2499
2500 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2501 {
2502 DisasCompare cmp;
2503
2504 if (gen_trap_ifnofpu(dc)) {
2505 return true;
2506 }
2507 gen_fcompare(&cmp, a->cc, a->cond);
2508 return advance_jump_cond(dc, &cmp, a->a, a->i);
2509 }
2510
2511 TRANS(FBPfcc, 64, do_fbpfcc, a)
2512 TRANS(FBfcc, ALL, do_fbpfcc, a)
2513
2514 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2515 {
2516 DisasCompare cmp;
2517
2518 if (!avail_64(dc)) {
2519 return false;
2520 }
2521 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2522 return false;
2523 }
2524 return advance_jump_cond(dc, &cmp, a->a, a->i);
2525 }
2526
2527 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2528 {
2529 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2530
2531 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2532 gen_mov_pc_npc(dc);
2533 dc->npc = target;
2534 return true;
2535 }
2536
2537 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2538 {
2539 /*
2540 * For sparc32, always generate the no-coprocessor exception.
2541 * For sparc64, always generate illegal instruction.
2542 */
2543 #ifdef TARGET_SPARC64
2544 return false;
2545 #else
2546 gen_exception(dc, TT_NCP_INSN);
2547 return true;
2548 #endif
2549 }
2550
2551 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2552 {
2553 /* Special-case %g0 because that's the canonical nop. */
2554 if (a->rd) {
2555 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2556 }
2557 return advance_pc(dc);
2558 }
2559
2560 /*
2561 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2562 */
2563
2564 static bool do_tcc(DisasContext *dc, int cond, int cc,
2565 int rs1, bool imm, int rs2_or_imm)
2566 {
2567 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2568 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2569 DisasCompare cmp;
2570 TCGLabel *lab;
2571 TCGv_i32 trap;
2572
2573 /* Trap never. */
2574 if (cond == 0) {
2575 return advance_pc(dc);
2576 }
2577
2578 /*
2579 * Immediate traps are the most common case. Since this value is
2580 * live across the branch, it really pays to evaluate the constant.
2581 */
2582 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2583 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2584 } else {
2585 trap = tcg_temp_new_i32();
2586 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2587 if (imm) {
2588 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2589 } else {
2590 TCGv_i32 t2 = tcg_temp_new_i32();
2591 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2592 tcg_gen_add_i32(trap, trap, t2);
2593 }
2594 tcg_gen_andi_i32(trap, trap, mask);
2595 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2596 }
2597
2598 finishing_insn(dc);
2599
2600 /* Trap always. */
2601 if (cond == 8) {
2602 save_state(dc);
2603 gen_helper_raise_exception(tcg_env, trap);
2604 dc->base.is_jmp = DISAS_NORETURN;
2605 return true;
2606 }
2607
2608 /* Conditional trap. */
2609 flush_cond(dc);
2610 lab = delay_exceptionv(dc, trap);
2611 gen_compare(&cmp, cc, cond, dc);
2612 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2613
2614 return advance_pc(dc);
2615 }
2616
2617 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2618 {
2619 if (avail_32(dc) && a->cc) {
2620 return false;
2621 }
2622 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2623 }
2624
2625 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2626 {
2627 if (avail_64(dc)) {
2628 return false;
2629 }
2630 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2631 }
2632
2633 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2634 {
2635 if (avail_32(dc)) {
2636 return false;
2637 }
2638 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2639 }
2640
2641 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2642 {
2643 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2644 return advance_pc(dc);
2645 }
2646
2647 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2648 {
2649 if (avail_32(dc)) {
2650 return false;
2651 }
2652 if (a->mmask) {
2653 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2654 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2655 }
2656 if (a->cmask) {
2657 /* For #Sync, etc, end the TB to recognize interrupts. */
2658 dc->base.is_jmp = DISAS_EXIT;
2659 }
2660 return advance_pc(dc);
2661 }
2662
2663 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2664 TCGv (*func)(DisasContext *, TCGv))
2665 {
2666 if (!priv) {
2667 return raise_priv(dc);
2668 }
2669 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2670 return advance_pc(dc);
2671 }
2672
2673 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2674 {
2675 return cpu_y;
2676 }
2677
2678 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2679 {
2680 /*
2681 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2682 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2683 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2684 */
2685 if (avail_64(dc) && a->rs1 != 0) {
2686 return false;
2687 }
2688 return do_rd_special(dc, true, a->rd, do_rdy);
2689 }
2690
2691 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2692 {
2693 uint32_t val;
2694
2695 /*
2696 * TODO: There are many more fields to be filled,
2697 * some of which are writable.
2698 */
2699 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2700 val |= 1 << 8; /* [8] V8 */
2701
2702 return tcg_constant_tl(val);
2703 }
2704
2705 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2706
2707 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2708 {
2709 gen_helper_rdccr(dst, tcg_env);
2710 return dst;
2711 }
2712
2713 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2714
2715 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2716 {
2717 #ifdef TARGET_SPARC64
2718 return tcg_constant_tl(dc->asi);
2719 #else
2720 qemu_build_not_reached();
2721 #endif
2722 }
2723
2724 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2725
2726 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2727 {
2728 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2729
2730 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2731 if (translator_io_start(&dc->base)) {
2732 dc->base.is_jmp = DISAS_EXIT;
2733 }
2734 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2735 tcg_constant_i32(dc->mem_idx));
2736 return dst;
2737 }
2738
2739 /* TODO: non-priv access only allowed when enabled. */
2740 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2741
2742 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2743 {
2744 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2745 }
2746
2747 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2748
2749 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2750 {
2751 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2752 return dst;
2753 }
2754
2755 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2756
2757 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2758 {
2759 gen_trap_ifnofpu(dc);
2760 return cpu_gsr;
2761 }
2762
2763 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2764
2765 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2766 {
2767 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2768 return dst;
2769 }
2770
2771 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2772
2773 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2774 {
2775 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2776 return dst;
2777 }
2778
2779 /* TODO: non-priv access only allowed when enabled. */
2780 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2781
2782 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2783 {
2784 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2785
2786 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2787 if (translator_io_start(&dc->base)) {
2788 dc->base.is_jmp = DISAS_EXIT;
2789 }
2790 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2791 tcg_constant_i32(dc->mem_idx));
2792 return dst;
2793 }
2794
2795 /* TODO: non-priv access only allowed when enabled. */
2796 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2797
2798 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2799 {
2800 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2801 return dst;
2802 }
2803
2804 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2805 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2806
2807 /*
2808 * UltraSPARC-T1 Strand status.
2809 * HYPV check maybe not enough, UA2005 & UA2007 describe
2810 * this ASR as impl. dep
2811 */
2812 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2813 {
2814 return tcg_constant_tl(1);
2815 }
2816
2817 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2818
2819 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2820 {
2821 gen_helper_rdpsr(dst, tcg_env);
2822 return dst;
2823 }
2824
2825 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2826
2827 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2828 {
2829 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2830 return dst;
2831 }
2832
2833 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2834
2835 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2836 {
2837 TCGv_i32 tl = tcg_temp_new_i32();
2838 TCGv_ptr tp = tcg_temp_new_ptr();
2839
2840 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2841 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2842 tcg_gen_shli_i32(tl, tl, 3);
2843 tcg_gen_ext_i32_ptr(tp, tl);
2844 tcg_gen_add_ptr(tp, tp, tcg_env);
2845
2846 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2847 return dst;
2848 }
2849
2850 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2851
2852 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2853 {
2854 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2855 return dst;
2856 }
2857
2858 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2859
2860 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2861 {
2862 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2863 return dst;
2864 }
2865
2866 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2867
2868 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2869 {
2870 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2871 return dst;
2872 }
2873
2874 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2875
2876 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2877 {
2878 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2879 return dst;
2880 }
2881
2882 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2883 do_rdhstick_cmpr)
2884
2885 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2886 {
2887 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2888 return dst;
2889 }
2890
2891 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2892
2893 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2894 {
2895 #ifdef TARGET_SPARC64
2896 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2897
2898 gen_load_trap_state_at_tl(r_tsptr);
2899 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2900 return dst;
2901 #else
2902 qemu_build_not_reached();
2903 #endif
2904 }
2905
2906 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2907
2908 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2909 {
2910 #ifdef TARGET_SPARC64
2911 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2912
2913 gen_load_trap_state_at_tl(r_tsptr);
2914 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2915 return dst;
2916 #else
2917 qemu_build_not_reached();
2918 #endif
2919 }
2920
2921 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2922
2923 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2924 {
2925 #ifdef TARGET_SPARC64
2926 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2927
2928 gen_load_trap_state_at_tl(r_tsptr);
2929 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2930 return dst;
2931 #else
2932 qemu_build_not_reached();
2933 #endif
2934 }
2935
2936 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2937
2938 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2939 {
2940 #ifdef TARGET_SPARC64
2941 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2942
2943 gen_load_trap_state_at_tl(r_tsptr);
2944 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2945 return dst;
2946 #else
2947 qemu_build_not_reached();
2948 #endif
2949 }
2950
2951 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2952 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2953
2954 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2955 {
2956 return cpu_tbr;
2957 }
2958
2959 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2960 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2961
2962 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2963 {
2964 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2965 return dst;
2966 }
2967
2968 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2969
2970 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2971 {
2972 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2973 return dst;
2974 }
2975
2976 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2977
2978 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2979 {
2980 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2981 return dst;
2982 }
2983
2984 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2985
2986 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2987 {
2988 gen_helper_rdcwp(dst, tcg_env);
2989 return dst;
2990 }
2991
2992 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2993
2994 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2995 {
2996 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2997 return dst;
2998 }
2999
3000 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3001
3002 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3003 {
3004 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3005 return dst;
3006 }
3007
3008 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3009 do_rdcanrestore)
3010
3011 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3012 {
3013 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3014 return dst;
3015 }
3016
3017 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3018
3019 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3020 {
3021 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3022 return dst;
3023 }
3024
3025 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3026
3027 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3028 {
3029 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3030 return dst;
3031 }
3032
3033 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3034
3035 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3036 {
3037 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3038 return dst;
3039 }
3040
3041 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3042
3043 /* UA2005 strand status */
3044 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3045 {
3046 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3047 return dst;
3048 }
3049
3050 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3051
3052 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3053 {
3054 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3055 return dst;
3056 }
3057
3058 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3059
3060 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3061 {
3062 if (avail_64(dc)) {
3063 gen_helper_flushw(tcg_env);
3064 return advance_pc(dc);
3065 }
3066 return false;
3067 }
3068
3069 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3070 void (*func)(DisasContext *, TCGv))
3071 {
3072 TCGv src;
3073
3074 /* For simplicity, we under-decoded the rs2 form. */
3075 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3076 return false;
3077 }
3078 if (!priv) {
3079 return raise_priv(dc);
3080 }
3081
3082 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3083 src = tcg_constant_tl(a->rs2_or_imm);
3084 } else {
3085 TCGv src1 = gen_load_gpr(dc, a->rs1);
3086 if (a->rs2_or_imm == 0) {
3087 src = src1;
3088 } else {
3089 src = tcg_temp_new();
3090 if (a->imm) {
3091 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3092 } else {
3093 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3094 }
3095 }
3096 }
3097 func(dc, src);
3098 return advance_pc(dc);
3099 }
3100
3101 static void do_wry(DisasContext *dc, TCGv src)
3102 {
3103 tcg_gen_ext32u_tl(cpu_y, src);
3104 }
3105
3106 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3107
3108 static void do_wrccr(DisasContext *dc, TCGv src)
3109 {
3110 gen_helper_wrccr(tcg_env, src);
3111 }
3112
3113 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3114
3115 static void do_wrasi(DisasContext *dc, TCGv src)
3116 {
3117 TCGv tmp = tcg_temp_new();
3118
3119 tcg_gen_ext8u_tl(tmp, src);
3120 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3121 /* End TB to notice changed ASI. */
3122 dc->base.is_jmp = DISAS_EXIT;
3123 }
3124
3125 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3126
3127 static void do_wrfprs(DisasContext *dc, TCGv src)
3128 {
3129 #ifdef TARGET_SPARC64
3130 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3131 dc->fprs_dirty = 0;
3132 dc->base.is_jmp = DISAS_EXIT;
3133 #else
3134 qemu_build_not_reached();
3135 #endif
3136 }
3137
3138 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3139
3140 static void do_wrgsr(DisasContext *dc, TCGv src)
3141 {
3142 gen_trap_ifnofpu(dc);
3143 tcg_gen_mov_tl(cpu_gsr, src);
3144 }
3145
3146 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3147
3148 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3149 {
3150 gen_helper_set_softint(tcg_env, src);
3151 }
3152
3153 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3154
3155 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3156 {
3157 gen_helper_clear_softint(tcg_env, src);
3158 }
3159
3160 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3161
3162 static void do_wrsoftint(DisasContext *dc, TCGv src)
3163 {
3164 gen_helper_write_softint(tcg_env, src);
3165 }
3166
3167 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3168
3169 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3170 {
3171 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3172
3173 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3174 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3175 translator_io_start(&dc->base);
3176 gen_helper_tick_set_limit(r_tickptr, src);
3177 /* End TB to handle timer interrupt */
3178 dc->base.is_jmp = DISAS_EXIT;
3179 }
3180
3181 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3182
3183 static void do_wrstick(DisasContext *dc, TCGv src)
3184 {
3185 #ifdef TARGET_SPARC64
3186 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3187
3188 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3189 translator_io_start(&dc->base);
3190 gen_helper_tick_set_count(r_tickptr, src);
3191 /* End TB to handle timer interrupt */
3192 dc->base.is_jmp = DISAS_EXIT;
3193 #else
3194 qemu_build_not_reached();
3195 #endif
3196 }
3197
3198 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3199
3200 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3201 {
3202 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3203
3204 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3205 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3206 translator_io_start(&dc->base);
3207 gen_helper_tick_set_limit(r_tickptr, src);
3208 /* End TB to handle timer interrupt */
3209 dc->base.is_jmp = DISAS_EXIT;
3210 }
3211
3212 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3213
3214 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3215 {
3216 finishing_insn(dc);
3217 save_state(dc);
3218 gen_helper_power_down(tcg_env);
3219 }
3220
3221 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3222
3223 static void do_wrpsr(DisasContext *dc, TCGv src)
3224 {
3225 gen_helper_wrpsr(tcg_env, src);
3226 dc->base.is_jmp = DISAS_EXIT;
3227 }
3228
3229 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3230
3231 static void do_wrwim(DisasContext *dc, TCGv src)
3232 {
3233 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3234 TCGv tmp = tcg_temp_new();
3235
3236 tcg_gen_andi_tl(tmp, src, mask);
3237 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3238 }
3239
3240 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3241
3242 static void do_wrtpc(DisasContext *dc, TCGv src)
3243 {
3244 #ifdef TARGET_SPARC64
3245 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3246
3247 gen_load_trap_state_at_tl(r_tsptr);
3248 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3249 #else
3250 qemu_build_not_reached();
3251 #endif
3252 }
3253
3254 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3255
3256 static void do_wrtnpc(DisasContext *dc, TCGv src)
3257 {
3258 #ifdef TARGET_SPARC64
3259 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3260
3261 gen_load_trap_state_at_tl(r_tsptr);
3262 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3263 #else
3264 qemu_build_not_reached();
3265 #endif
3266 }
3267
3268 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3269
3270 static void do_wrtstate(DisasContext *dc, TCGv src)
3271 {
3272 #ifdef TARGET_SPARC64
3273 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3274
3275 gen_load_trap_state_at_tl(r_tsptr);
3276 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3277 #else
3278 qemu_build_not_reached();
3279 #endif
3280 }
3281
3282 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3283
3284 static void do_wrtt(DisasContext *dc, TCGv src)
3285 {
3286 #ifdef TARGET_SPARC64
3287 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3288
3289 gen_load_trap_state_at_tl(r_tsptr);
3290 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3291 #else
3292 qemu_build_not_reached();
3293 #endif
3294 }
3295
3296 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3297
3298 static void do_wrtick(DisasContext *dc, TCGv src)
3299 {
3300 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3301
3302 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3303 translator_io_start(&dc->base);
3304 gen_helper_tick_set_count(r_tickptr, src);
3305 /* End TB to handle timer interrupt */
3306 dc->base.is_jmp = DISAS_EXIT;
3307 }
3308
3309 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3310
3311 static void do_wrtba(DisasContext *dc, TCGv src)
3312 {
3313 tcg_gen_mov_tl(cpu_tbr, src);
3314 }
3315
3316 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3317
3318 static void do_wrpstate(DisasContext *dc, TCGv src)
3319 {
3320 save_state(dc);
3321 if (translator_io_start(&dc->base)) {
3322 dc->base.is_jmp = DISAS_EXIT;
3323 }
3324 gen_helper_wrpstate(tcg_env, src);
3325 dc->npc = DYNAMIC_PC;
3326 }
3327
3328 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3329
3330 static void do_wrtl(DisasContext *dc, TCGv src)
3331 {
3332 save_state(dc);
3333 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3334 dc->npc = DYNAMIC_PC;
3335 }
3336
3337 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3338
3339 static void do_wrpil(DisasContext *dc, TCGv src)
3340 {
3341 if (translator_io_start(&dc->base)) {
3342 dc->base.is_jmp = DISAS_EXIT;
3343 }
3344 gen_helper_wrpil(tcg_env, src);
3345 }
3346
3347 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3348
3349 static void do_wrcwp(DisasContext *dc, TCGv src)
3350 {
3351 gen_helper_wrcwp(tcg_env, src);
3352 }
3353
3354 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3355
3356 static void do_wrcansave(DisasContext *dc, TCGv src)
3357 {
3358 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3359 }
3360
3361 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3362
3363 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3364 {
3365 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3366 }
3367
3368 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3369
3370 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3371 {
3372 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3373 }
3374
3375 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3376
3377 static void do_wrotherwin(DisasContext *dc, TCGv src)
3378 {
3379 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3380 }
3381
3382 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3383
3384 static void do_wrwstate(DisasContext *dc, TCGv src)
3385 {
3386 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3387 }
3388
3389 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3390
3391 static void do_wrgl(DisasContext *dc, TCGv src)
3392 {
3393 gen_helper_wrgl(tcg_env, src);
3394 }
3395
3396 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3397
3398 /* UA2005 strand status */
3399 static void do_wrssr(DisasContext *dc, TCGv src)
3400 {
3401 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3402 }
3403
3404 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3405
3406 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3407
3408 static void do_wrhpstate(DisasContext *dc, TCGv src)
3409 {
3410 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3411 dc->base.is_jmp = DISAS_EXIT;
3412 }
3413
3414 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3415
3416 static void do_wrhtstate(DisasContext *dc, TCGv src)
3417 {
3418 TCGv_i32 tl = tcg_temp_new_i32();
3419 TCGv_ptr tp = tcg_temp_new_ptr();
3420
3421 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3422 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3423 tcg_gen_shli_i32(tl, tl, 3);
3424 tcg_gen_ext_i32_ptr(tp, tl);
3425 tcg_gen_add_ptr(tp, tp, tcg_env);
3426
3427 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3428 }
3429
3430 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3431
3432 static void do_wrhintp(DisasContext *dc, TCGv src)
3433 {
3434 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3435 }
3436
3437 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3438
3439 static void do_wrhtba(DisasContext *dc, TCGv src)
3440 {
3441 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3442 }
3443
3444 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3445
3446 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3447 {
3448 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3449
3450 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3451 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3452 translator_io_start(&dc->base);
3453 gen_helper_tick_set_limit(r_tickptr, src);
3454 /* End TB to handle timer interrupt */
3455 dc->base.is_jmp = DISAS_EXIT;
3456 }
3457
3458 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3459 do_wrhstick_cmpr)
3460
3461 static bool do_saved_restored(DisasContext *dc, bool saved)
3462 {
3463 if (!supervisor(dc)) {
3464 return raise_priv(dc);
3465 }
3466 if (saved) {
3467 gen_helper_saved(tcg_env);
3468 } else {
3469 gen_helper_restored(tcg_env);
3470 }
3471 return advance_pc(dc);
3472 }
3473
3474 TRANS(SAVED, 64, do_saved_restored, true)
3475 TRANS(RESTORED, 64, do_saved_restored, false)
3476
3477 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3478 {
3479 return advance_pc(dc);
3480 }
3481
3482 /*
3483 * TODO: Need a feature bit for sparcv8.
3484 * In the meantime, treat all 32-bit cpus like sparcv7.
3485 */
3486 TRANS(NOP_v7, 32, trans_NOP, a)
3487 TRANS(NOP_v9, 64, trans_NOP, a)
3488
3489 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3490 void (*func)(TCGv, TCGv, TCGv),
3491 void (*funci)(TCGv, TCGv, target_long),
3492 bool logic_cc)
3493 {
3494 TCGv dst, src1;
3495
3496 /* For simplicity, we under-decoded the rs2 form. */
3497 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3498 return false;
3499 }
3500
3501 if (logic_cc) {
3502 dst = cpu_cc_N;
3503 } else {
3504 dst = gen_dest_gpr(dc, a->rd);
3505 }
3506 src1 = gen_load_gpr(dc, a->rs1);
3507
3508 if (a->imm || a->rs2_or_imm == 0) {
3509 if (funci) {
3510 funci(dst, src1, a->rs2_or_imm);
3511 } else {
3512 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3513 }
3514 } else {
3515 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3516 }
3517
3518 if (logic_cc) {
3519 if (TARGET_LONG_BITS == 64) {
3520 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3521 tcg_gen_movi_tl(cpu_icc_C, 0);
3522 }
3523 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3524 tcg_gen_movi_tl(cpu_cc_C, 0);
3525 tcg_gen_movi_tl(cpu_cc_V, 0);
3526 }
3527
3528 gen_store_gpr(dc, a->rd, dst);
3529 return advance_pc(dc);
3530 }
3531
3532 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3533 void (*func)(TCGv, TCGv, TCGv),
3534 void (*funci)(TCGv, TCGv, target_long),
3535 void (*func_cc)(TCGv, TCGv, TCGv))
3536 {
3537 if (a->cc) {
3538 return do_arith_int(dc, a, func_cc, NULL, false);
3539 }
3540 return do_arith_int(dc, a, func, funci, false);
3541 }
3542
3543 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3544 void (*func)(TCGv, TCGv, TCGv),
3545 void (*funci)(TCGv, TCGv, target_long))
3546 {
3547 return do_arith_int(dc, a, func, funci, a->cc);
3548 }
3549
3550 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3551 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3552 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3553 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3554
3555 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3556 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3557 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3558 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3559
3560 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3561 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3562 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3563 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3564 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3565
3566 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3567 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3568 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3569 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3570
3571 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3572 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3573
3574 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3575 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3576
3577 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3578 {
3579 /* OR with %g0 is the canonical alias for MOV. */
3580 if (!a->cc && a->rs1 == 0) {
3581 if (a->imm || a->rs2_or_imm == 0) {
3582 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3583 } else if (a->rs2_or_imm & ~0x1f) {
3584 /* For simplicity, we under-decoded the rs2 form. */
3585 return false;
3586 } else {
3587 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3588 }
3589 return advance_pc(dc);
3590 }
3591 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3592 }
3593
3594 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3595 {
3596 TCGv_i64 t1, t2;
3597 TCGv dst;
3598
3599 if (!avail_DIV(dc)) {
3600 return false;
3601 }
3602 /* For simplicity, we under-decoded the rs2 form. */
3603 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3604 return false;
3605 }
3606
3607 if (unlikely(a->rs2_or_imm == 0)) {
3608 gen_exception(dc, TT_DIV_ZERO);
3609 return true;
3610 }
3611
3612 if (a->imm) {
3613 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3614 } else {
3615 TCGLabel *lab;
3616 TCGv_i32 n2;
3617
3618 finishing_insn(dc);
3619 flush_cond(dc);
3620
3621 n2 = tcg_temp_new_i32();
3622 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3623
3624 lab = delay_exception(dc, TT_DIV_ZERO);
3625 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3626
3627 t2 = tcg_temp_new_i64();
3628 #ifdef TARGET_SPARC64
3629 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3630 #else
3631 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3632 #endif
3633 }
3634
3635 t1 = tcg_temp_new_i64();
3636 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3637
3638 tcg_gen_divu_i64(t1, t1, t2);
3639 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3640
3641 dst = gen_dest_gpr(dc, a->rd);
3642 tcg_gen_trunc_i64_tl(dst, t1);
3643 gen_store_gpr(dc, a->rd, dst);
3644 return advance_pc(dc);
3645 }
3646
3647 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3648 {
3649 TCGv dst, src1, src2;
3650
3651 if (!avail_64(dc)) {
3652 return false;
3653 }
3654 /* For simplicity, we under-decoded the rs2 form. */
3655 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3656 return false;
3657 }
3658
3659 if (unlikely(a->rs2_or_imm == 0)) {
3660 gen_exception(dc, TT_DIV_ZERO);
3661 return true;
3662 }
3663
3664 if (a->imm) {
3665 src2 = tcg_constant_tl(a->rs2_or_imm);
3666 } else {
3667 TCGLabel *lab;
3668
3669 finishing_insn(dc);
3670 flush_cond(dc);
3671
3672 lab = delay_exception(dc, TT_DIV_ZERO);
3673 src2 = cpu_regs[a->rs2_or_imm];
3674 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3675 }
3676
3677 dst = gen_dest_gpr(dc, a->rd);
3678 src1 = gen_load_gpr(dc, a->rs1);
3679
3680 tcg_gen_divu_tl(dst, src1, src2);
3681 gen_store_gpr(dc, a->rd, dst);
3682 return advance_pc(dc);
3683 }
3684
3685 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3686 {
3687 TCGv dst, src1, src2;
3688
3689 if (!avail_64(dc)) {
3690 return false;
3691 }
3692 /* For simplicity, we under-decoded the rs2 form. */
3693 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3694 return false;
3695 }
3696
3697 if (unlikely(a->rs2_or_imm == 0)) {
3698 gen_exception(dc, TT_DIV_ZERO);
3699 return true;
3700 }
3701
3702 dst = gen_dest_gpr(dc, a->rd);
3703 src1 = gen_load_gpr(dc, a->rs1);
3704
3705 if (a->imm) {
3706 if (unlikely(a->rs2_or_imm == -1)) {
3707 tcg_gen_neg_tl(dst, src1);
3708 gen_store_gpr(dc, a->rd, dst);
3709 return advance_pc(dc);
3710 }
3711 src2 = tcg_constant_tl(a->rs2_or_imm);
3712 } else {
3713 TCGLabel *lab;
3714 TCGv t1, t2;
3715
3716 finishing_insn(dc);
3717 flush_cond(dc);
3718
3719 lab = delay_exception(dc, TT_DIV_ZERO);
3720 src2 = cpu_regs[a->rs2_or_imm];
3721 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3722
3723 /*
3724 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3725 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3726 */
3727 t1 = tcg_temp_new();
3728 t2 = tcg_temp_new();
3729 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3730 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3731 tcg_gen_and_tl(t1, t1, t2);
3732 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3733 tcg_constant_tl(1), src2);
3734 src2 = t1;
3735 }
3736
3737 tcg_gen_div_tl(dst, src1, src2);
3738 gen_store_gpr(dc, a->rd, dst);
3739 return advance_pc(dc);
3740 }
3741
3742 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3743 int width, bool cc, bool left)
3744 {
3745 TCGv dst, s1, s2, lo1, lo2;
3746 uint64_t amask, tabl, tabr;
3747 int shift, imask, omask;
3748
3749 dst = gen_dest_gpr(dc, a->rd);
3750 s1 = gen_load_gpr(dc, a->rs1);
3751 s2 = gen_load_gpr(dc, a->rs2);
3752
3753 if (cc) {
3754 gen_op_subcc(cpu_cc_N, s1, s2);
3755 }
3756
3757 /*
3758 * Theory of operation: there are two tables, left and right (not to
3759 * be confused with the left and right versions of the opcode). These
3760 * are indexed by the low 3 bits of the inputs. To make things "easy",
3761 * these tables are loaded into two constants, TABL and TABR below.
3762 * The operation index = (input & imask) << shift calculates the index
3763 * into the constant, while val = (table >> index) & omask calculates
3764 * the value we're looking for.
3765 */
3766 switch (width) {
3767 case 8:
3768 imask = 0x7;
3769 shift = 3;
3770 omask = 0xff;
3771 if (left) {
3772 tabl = 0x80c0e0f0f8fcfeffULL;
3773 tabr = 0xff7f3f1f0f070301ULL;
3774 } else {
3775 tabl = 0x0103070f1f3f7fffULL;
3776 tabr = 0xfffefcf8f0e0c080ULL;
3777 }
3778 break;
3779 case 16:
3780 imask = 0x6;
3781 shift = 1;
3782 omask = 0xf;
3783 if (left) {
3784 tabl = 0x8cef;
3785 tabr = 0xf731;
3786 } else {
3787 tabl = 0x137f;
3788 tabr = 0xfec8;
3789 }
3790 break;
3791 case 32:
3792 imask = 0x4;
3793 shift = 0;
3794 omask = 0x3;
3795 if (left) {
3796 tabl = (2 << 2) | 3;
3797 tabr = (3 << 2) | 1;
3798 } else {
3799 tabl = (1 << 2) | 3;
3800 tabr = (3 << 2) | 2;
3801 }
3802 break;
3803 default:
3804 abort();
3805 }
3806
3807 lo1 = tcg_temp_new();
3808 lo2 = tcg_temp_new();
3809 tcg_gen_andi_tl(lo1, s1, imask);
3810 tcg_gen_andi_tl(lo2, s2, imask);
3811 tcg_gen_shli_tl(lo1, lo1, shift);
3812 tcg_gen_shli_tl(lo2, lo2, shift);
3813
3814 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3815 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3816 tcg_gen_andi_tl(lo1, lo1, omask);
3817 tcg_gen_andi_tl(lo2, lo2, omask);
3818
3819 amask = address_mask_i(dc, -8);
3820 tcg_gen_andi_tl(s1, s1, amask);
3821 tcg_gen_andi_tl(s2, s2, amask);
3822
3823 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3824 tcg_gen_and_tl(lo2, lo2, lo1);
3825 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3826
3827 gen_store_gpr(dc, a->rd, dst);
3828 return advance_pc(dc);
3829 }
3830
3831 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3832 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3833 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3834 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3835 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3836 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3837
3838 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3839 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3840 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3841 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3842 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3843 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3844
3845 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3846 void (*func)(TCGv, TCGv, TCGv))
3847 {
3848 TCGv dst = gen_dest_gpr(dc, a->rd);
3849 TCGv src1 = gen_load_gpr(dc, a->rs1);
3850 TCGv src2 = gen_load_gpr(dc, a->rs2);
3851
3852 func(dst, src1, src2);
3853 gen_store_gpr(dc, a->rd, dst);
3854 return advance_pc(dc);
3855 }
3856
3857 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3858 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3859 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3860
3861 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3862 {
3863 #ifdef TARGET_SPARC64
3864 TCGv tmp = tcg_temp_new();
3865
3866 tcg_gen_add_tl(tmp, s1, s2);
3867 tcg_gen_andi_tl(dst, tmp, -8);
3868 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3869 #else
3870 g_assert_not_reached();
3871 #endif
3872 }
3873
3874 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3875 {
3876 #ifdef TARGET_SPARC64
3877 TCGv tmp = tcg_temp_new();
3878
3879 tcg_gen_add_tl(tmp, s1, s2);
3880 tcg_gen_andi_tl(dst, tmp, -8);
3881 tcg_gen_neg_tl(tmp, tmp);
3882 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3883 #else
3884 g_assert_not_reached();
3885 #endif
3886 }
3887
3888 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3889 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3890
3891 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3892 {
3893 #ifdef TARGET_SPARC64
3894 tcg_gen_add_tl(dst, s1, s2);
3895 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3896 #else
3897 g_assert_not_reached();
3898 #endif
3899 }
3900
3901 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3902
3903 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3904 {
3905 TCGv dst, src1, src2;
3906
3907 /* Reject 64-bit shifts for sparc32. */
3908 if (avail_32(dc) && a->x) {
3909 return false;
3910 }
3911
3912 src2 = tcg_temp_new();
3913 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3914 src1 = gen_load_gpr(dc, a->rs1);
3915 dst = gen_dest_gpr(dc, a->rd);
3916
3917 if (l) {
3918 tcg_gen_shl_tl(dst, src1, src2);
3919 if (!a->x) {
3920 tcg_gen_ext32u_tl(dst, dst);
3921 }
3922 } else if (u) {
3923 if (!a->x) {
3924 tcg_gen_ext32u_tl(dst, src1);
3925 src1 = dst;
3926 }
3927 tcg_gen_shr_tl(dst, src1, src2);
3928 } else {
3929 if (!a->x) {
3930 tcg_gen_ext32s_tl(dst, src1);
3931 src1 = dst;
3932 }
3933 tcg_gen_sar_tl(dst, src1, src2);
3934 }
3935 gen_store_gpr(dc, a->rd, dst);
3936 return advance_pc(dc);
3937 }
3938
3939 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3940 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3941 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3942
3943 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3944 {
3945 TCGv dst, src1;
3946
3947 /* Reject 64-bit shifts for sparc32. */
3948 if (avail_32(dc) && (a->x || a->i >= 32)) {
3949 return false;
3950 }
3951
3952 src1 = gen_load_gpr(dc, a->rs1);
3953 dst = gen_dest_gpr(dc, a->rd);
3954
3955 if (avail_32(dc) || a->x) {
3956 if (l) {
3957 tcg_gen_shli_tl(dst, src1, a->i);
3958 } else if (u) {
3959 tcg_gen_shri_tl(dst, src1, a->i);
3960 } else {
3961 tcg_gen_sari_tl(dst, src1, a->i);
3962 }
3963 } else {
3964 if (l) {
3965 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3966 } else if (u) {
3967 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3968 } else {
3969 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3970 }
3971 }
3972 gen_store_gpr(dc, a->rd, dst);
3973 return advance_pc(dc);
3974 }
3975
3976 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3977 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3978 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3979
3980 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3981 {
3982 /* For simplicity, we under-decoded the rs2 form. */
3983 if (!imm && rs2_or_imm & ~0x1f) {
3984 return NULL;
3985 }
3986 if (imm || rs2_or_imm == 0) {
3987 return tcg_constant_tl(rs2_or_imm);
3988 } else {
3989 return cpu_regs[rs2_or_imm];
3990 }
3991 }
3992
3993 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3994 {
3995 TCGv dst = gen_load_gpr(dc, rd);
3996 TCGv c2 = tcg_constant_tl(cmp->c2);
3997
3998 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3999 gen_store_gpr(dc, rd, dst);
4000 return advance_pc(dc);
4001 }
4002
4003 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4004 {
4005 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4006 DisasCompare cmp;
4007
4008 if (src2 == NULL) {
4009 return false;
4010 }
4011 gen_compare(&cmp, a->cc, a->cond, dc);
4012 return do_mov_cond(dc, &cmp, a->rd, src2);
4013 }
4014
4015 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4016 {
4017 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4018 DisasCompare cmp;
4019
4020 if (src2 == NULL) {
4021 return false;
4022 }
4023 gen_fcompare(&cmp, a->cc, a->cond);
4024 return do_mov_cond(dc, &cmp, a->rd, src2);
4025 }
4026
4027 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4028 {
4029 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4030 DisasCompare cmp;
4031
4032 if (src2 == NULL) {
4033 return false;
4034 }
4035 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4036 return false;
4037 }
4038 return do_mov_cond(dc, &cmp, a->rd, src2);
4039 }
4040
4041 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4042 bool (*func)(DisasContext *dc, int rd, TCGv src))
4043 {
4044 TCGv src1, sum;
4045
4046 /* For simplicity, we under-decoded the rs2 form. */
4047 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4048 return false;
4049 }
4050
4051 /*
4052 * Always load the sum into a new temporary.
4053 * This is required to capture the value across a window change,
4054 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4055 */
4056 sum = tcg_temp_new();
4057 src1 = gen_load_gpr(dc, a->rs1);
4058 if (a->imm || a->rs2_or_imm == 0) {
4059 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4060 } else {
4061 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4062 }
4063 return func(dc, a->rd, sum);
4064 }
4065
4066 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4067 {
4068 /*
4069 * Preserve pc across advance, so that we can delay
4070 * the writeback to rd until after src is consumed.
4071 */
4072 target_ulong cur_pc = dc->pc;
4073
4074 gen_check_align(dc, src, 3);
4075
4076 gen_mov_pc_npc(dc);
4077 tcg_gen_mov_tl(cpu_npc, src);
4078 gen_address_mask(dc, cpu_npc);
4079 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4080
4081 dc->npc = DYNAMIC_PC_LOOKUP;
4082 return true;
4083 }
4084
4085 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4086
4087 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4088 {
4089 if (!supervisor(dc)) {
4090 return raise_priv(dc);
4091 }
4092
4093 gen_check_align(dc, src, 3);
4094
4095 gen_mov_pc_npc(dc);
4096 tcg_gen_mov_tl(cpu_npc, src);
4097 gen_helper_rett(tcg_env);
4098
4099 dc->npc = DYNAMIC_PC;
4100 return true;
4101 }
4102
4103 TRANS(RETT, 32, do_add_special, a, do_rett)
4104
4105 static bool do_return(DisasContext *dc, int rd, TCGv src)
4106 {
4107 gen_check_align(dc, src, 3);
4108 gen_helper_restore(tcg_env);
4109
4110 gen_mov_pc_npc(dc);
4111 tcg_gen_mov_tl(cpu_npc, src);
4112 gen_address_mask(dc, cpu_npc);
4113
4114 dc->npc = DYNAMIC_PC_LOOKUP;
4115 return true;
4116 }
4117
4118 TRANS(RETURN, 64, do_add_special, a, do_return)
4119
4120 static bool do_save(DisasContext *dc, int rd, TCGv src)
4121 {
4122 gen_helper_save(tcg_env);
4123 gen_store_gpr(dc, rd, src);
4124 return advance_pc(dc);
4125 }
4126
4127 TRANS(SAVE, ALL, do_add_special, a, do_save)
4128
4129 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4130 {
4131 gen_helper_restore(tcg_env);
4132 gen_store_gpr(dc, rd, src);
4133 return advance_pc(dc);
4134 }
4135
4136 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4137
4138 static bool do_done_retry(DisasContext *dc, bool done)
4139 {
4140 if (!supervisor(dc)) {
4141 return raise_priv(dc);
4142 }
4143 dc->npc = DYNAMIC_PC;
4144 dc->pc = DYNAMIC_PC;
4145 translator_io_start(&dc->base);
4146 if (done) {
4147 gen_helper_done(tcg_env);
4148 } else {
4149 gen_helper_retry(tcg_env);
4150 }
4151 return true;
4152 }
4153
4154 TRANS(DONE, 64, do_done_retry, true)
4155 TRANS(RETRY, 64, do_done_retry, false)
4156
4157 /*
4158 * Major opcode 11 -- load and store instructions
4159 */
4160
4161 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4162 {
4163 TCGv addr, tmp = NULL;
4164
4165 /* For simplicity, we under-decoded the rs2 form. */
4166 if (!imm && rs2_or_imm & ~0x1f) {
4167 return NULL;
4168 }
4169
4170 addr = gen_load_gpr(dc, rs1);
4171 if (rs2_or_imm) {
4172 tmp = tcg_temp_new();
4173 if (imm) {
4174 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4175 } else {
4176 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4177 }
4178 addr = tmp;
4179 }
4180 if (AM_CHECK(dc)) {
4181 if (!tmp) {
4182 tmp = tcg_temp_new();
4183 }
4184 tcg_gen_ext32u_tl(tmp, addr);
4185 addr = tmp;
4186 }
4187 return addr;
4188 }
4189
4190 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4191 {
4192 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4193 DisasASI da;
4194
4195 if (addr == NULL) {
4196 return false;
4197 }
4198 da = resolve_asi(dc, a->asi, mop);
4199
4200 reg = gen_dest_gpr(dc, a->rd);
4201 gen_ld_asi(dc, &da, reg, addr);
4202 gen_store_gpr(dc, a->rd, reg);
4203 return advance_pc(dc);
4204 }
4205
4206 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4207 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4208 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4209 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4210 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4211 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4212 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4213
4214 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4215 {
4216 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4217 DisasASI da;
4218
4219 if (addr == NULL) {
4220 return false;
4221 }
4222 da = resolve_asi(dc, a->asi, mop);
4223
4224 reg = gen_load_gpr(dc, a->rd);
4225 gen_st_asi(dc, &da, reg, addr);
4226 return advance_pc(dc);
4227 }
4228
4229 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4230 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4231 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4232 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4233
4234 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4235 {
4236 TCGv addr;
4237 DisasASI da;
4238
4239 if (a->rd & 1) {
4240 return false;
4241 }
4242 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4243 if (addr == NULL) {
4244 return false;
4245 }
4246 da = resolve_asi(dc, a->asi, MO_TEUQ);
4247 gen_ldda_asi(dc, &da, addr, a->rd);
4248 return advance_pc(dc);
4249 }
4250
4251 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4252 {
4253 TCGv addr;
4254 DisasASI da;
4255
4256 if (a->rd & 1) {
4257 return false;
4258 }
4259 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4260 if (addr == NULL) {
4261 return false;
4262 }
4263 da = resolve_asi(dc, a->asi, MO_TEUQ);
4264 gen_stda_asi(dc, &da, addr, a->rd);
4265 return advance_pc(dc);
4266 }
4267
4268 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4269 {
4270 TCGv addr, reg;
4271 DisasASI da;
4272
4273 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4274 if (addr == NULL) {
4275 return false;
4276 }
4277 da = resolve_asi(dc, a->asi, MO_UB);
4278
4279 reg = gen_dest_gpr(dc, a->rd);
4280 gen_ldstub_asi(dc, &da, reg, addr);
4281 gen_store_gpr(dc, a->rd, reg);
4282 return advance_pc(dc);
4283 }
4284
4285 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4286 {
4287 TCGv addr, dst, src;
4288 DisasASI da;
4289
4290 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4291 if (addr == NULL) {
4292 return false;
4293 }
4294 da = resolve_asi(dc, a->asi, MO_TEUL);
4295
4296 dst = gen_dest_gpr(dc, a->rd);
4297 src = gen_load_gpr(dc, a->rd);
4298 gen_swap_asi(dc, &da, dst, src, addr);
4299 gen_store_gpr(dc, a->rd, dst);
4300 return advance_pc(dc);
4301 }
4302
4303 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4304 {
4305 TCGv addr, o, n, c;
4306 DisasASI da;
4307
4308 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4309 if (addr == NULL) {
4310 return false;
4311 }
4312 da = resolve_asi(dc, a->asi, mop);
4313
4314 o = gen_dest_gpr(dc, a->rd);
4315 n = gen_load_gpr(dc, a->rd);
4316 c = gen_load_gpr(dc, a->rs2_or_imm);
4317 gen_cas_asi(dc, &da, o, n, c, addr);
4318 gen_store_gpr(dc, a->rd, o);
4319 return advance_pc(dc);
4320 }
4321
4322 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4323 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4324
4325 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4326 {
4327 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4328 DisasASI da;
4329
4330 if (addr == NULL) {
4331 return false;
4332 }
4333 if (gen_trap_ifnofpu(dc)) {
4334 return true;
4335 }
4336 if (sz == MO_128 && gen_trap_float128(dc)) {
4337 return true;
4338 }
4339 da = resolve_asi(dc, a->asi, MO_TE | sz);
4340 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4341 gen_update_fprs_dirty(dc, a->rd);
4342 return advance_pc(dc);
4343 }
4344
4345 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4346 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4347 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4348
4349 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4350 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4351 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4352
4353 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4354 {
4355 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4356 DisasASI da;
4357
4358 if (addr == NULL) {
4359 return false;
4360 }
4361 if (gen_trap_ifnofpu(dc)) {
4362 return true;
4363 }
4364 if (sz == MO_128 && gen_trap_float128(dc)) {
4365 return true;
4366 }
4367 da = resolve_asi(dc, a->asi, MO_TE | sz);
4368 gen_stf_asi(dc, &da, sz, addr, a->rd);
4369 return advance_pc(dc);
4370 }
4371
4372 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4373 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4374 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4375
4376 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4377 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4378 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4379
4380 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4381 {
4382 if (!avail_32(dc)) {
4383 return false;
4384 }
4385 if (!supervisor(dc)) {
4386 return raise_priv(dc);
4387 }
4388 if (gen_trap_ifnofpu(dc)) {
4389 return true;
4390 }
4391 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4392 return true;
4393 }
4394
4395 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4396 target_ulong new_mask, target_ulong old_mask)
4397 {
4398 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4399 if (addr == NULL) {
4400 return false;
4401 }
4402 if (gen_trap_ifnofpu(dc)) {
4403 return true;
4404 }
4405 tmp = tcg_temp_new();
4406 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4407 tcg_gen_andi_tl(tmp, tmp, new_mask);
4408 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4409 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4410 gen_helper_set_fsr(tcg_env, cpu_fsr);
4411 return advance_pc(dc);
4412 }
4413
4414 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4415 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4416
4417 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4418 {
4419 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4420 TCGv fsr;
4421
4422 if (addr == NULL) {
4423 return false;
4424 }
4425 if (gen_trap_ifnofpu(dc)) {
4426 return true;
4427 }
4428
4429 fsr = tcg_temp_new();
4430 gen_helper_get_fsr(fsr, tcg_env);
4431 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4432 return advance_pc(dc);
4433 }
4434
4435 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4436 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4437
4438 static bool do_fc(DisasContext *dc, int rd, bool c)
4439 {
4440 uint64_t mask;
4441
4442 if (gen_trap_ifnofpu(dc)) {
4443 return true;
4444 }
4445
4446 if (rd & 1) {
4447 mask = MAKE_64BIT_MASK(0, 32);
4448 } else {
4449 mask = MAKE_64BIT_MASK(32, 32);
4450 }
4451 if (c) {
4452 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4453 } else {
4454 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4455 }
4456 gen_update_fprs_dirty(dc, rd);
4457 return advance_pc(dc);
4458 }
4459
4460 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4461 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4462
4463 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4464 {
4465 if (gen_trap_ifnofpu(dc)) {
4466 return true;
4467 }
4468
4469 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4470 gen_update_fprs_dirty(dc, rd);
4471 return advance_pc(dc);
4472 }
4473
4474 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4475 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4476
4477 static bool do_ff(DisasContext *dc, arg_r_r *a,
4478 void (*func)(TCGv_i32, TCGv_i32))
4479 {
4480 TCGv_i32 tmp;
4481
4482 if (gen_trap_ifnofpu(dc)) {
4483 return true;
4484 }
4485
4486 tmp = gen_load_fpr_F(dc, a->rs);
4487 func(tmp, tmp);
4488 gen_store_fpr_F(dc, a->rd, tmp);
4489 return advance_pc(dc);
4490 }
4491
4492 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4493 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4494 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4495 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4496 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4497
4498 static bool do_fd(DisasContext *dc, arg_r_r *a,
4499 void (*func)(TCGv_i32, TCGv_i64))
4500 {
4501 TCGv_i32 dst;
4502 TCGv_i64 src;
4503
4504 if (gen_trap_ifnofpu(dc)) {
4505 return true;
4506 }
4507
4508 dst = tcg_temp_new_i32();
4509 src = gen_load_fpr_D(dc, a->rs);
4510 func(dst, src);
4511 gen_store_fpr_F(dc, a->rd, dst);
4512 return advance_pc(dc);
4513 }
4514
4515 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4516 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4517
4518 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4519 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4520 {
4521 TCGv_i32 tmp;
4522
4523 if (gen_trap_ifnofpu(dc)) {
4524 return true;
4525 }
4526
4527 gen_op_clear_ieee_excp_and_FTT();
4528 tmp = gen_load_fpr_F(dc, a->rs);
4529 func(tmp, tcg_env, tmp);
4530 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4531 gen_store_fpr_F(dc, a->rd, tmp);
4532 return advance_pc(dc);
4533 }
4534
4535 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4536 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4537 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4538
4539 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4540 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4541 {
4542 TCGv_i32 dst;
4543 TCGv_i64 src;
4544
4545 if (gen_trap_ifnofpu(dc)) {
4546 return true;
4547 }
4548
4549 gen_op_clear_ieee_excp_and_FTT();
4550 dst = tcg_temp_new_i32();
4551 src = gen_load_fpr_D(dc, a->rs);
4552 func(dst, tcg_env, src);
4553 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4554 gen_store_fpr_F(dc, a->rd, dst);
4555 return advance_pc(dc);
4556 }
4557
4558 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4559 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4560 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4561
4562 static bool do_dd(DisasContext *dc, arg_r_r *a,
4563 void (*func)(TCGv_i64, TCGv_i64))
4564 {
4565 TCGv_i64 dst, src;
4566
4567 if (gen_trap_ifnofpu(dc)) {
4568 return true;
4569 }
4570
4571 dst = gen_dest_fpr_D(dc, a->rd);
4572 src = gen_load_fpr_D(dc, a->rs);
4573 func(dst, src);
4574 gen_store_fpr_D(dc, a->rd, dst);
4575 return advance_pc(dc);
4576 }
4577
4578 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4579 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4580 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4581 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4582 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4583
4584 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4585 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4586 {
4587 TCGv_i64 dst, src;
4588
4589 if (gen_trap_ifnofpu(dc)) {
4590 return true;
4591 }
4592
4593 gen_op_clear_ieee_excp_and_FTT();
4594 dst = gen_dest_fpr_D(dc, a->rd);
4595 src = gen_load_fpr_D(dc, a->rs);
4596 func(dst, tcg_env, src);
4597 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4598 gen_store_fpr_D(dc, a->rd, dst);
4599 return advance_pc(dc);
4600 }
4601
4602 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4603 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4604 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4605
4606 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4607 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4608 {
4609 TCGv_i64 dst;
4610 TCGv_i32 src;
4611
4612 if (gen_trap_ifnofpu(dc)) {
4613 return true;
4614 }
4615
4616 gen_op_clear_ieee_excp_and_FTT();
4617 dst = gen_dest_fpr_D(dc, a->rd);
4618 src = gen_load_fpr_F(dc, a->rs);
4619 func(dst, tcg_env, src);
4620 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4621 gen_store_fpr_D(dc, a->rd, dst);
4622 return advance_pc(dc);
4623 }
4624
4625 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4626 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4627 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4628
4629 static bool do_qq(DisasContext *dc, arg_r_r *a,
4630 void (*func)(TCGv_i128, TCGv_i128))
4631 {
4632 TCGv_i128 t;
4633
4634 if (gen_trap_ifnofpu(dc)) {
4635 return true;
4636 }
4637 if (gen_trap_float128(dc)) {
4638 return true;
4639 }
4640
4641 gen_op_clear_ieee_excp_and_FTT();
4642 t = gen_load_fpr_Q(dc, a->rs);
4643 func(t, t);
4644 gen_store_fpr_Q(dc, a->rd, t);
4645 return advance_pc(dc);
4646 }
4647
4648 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4649 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4650 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4651
4652 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4653 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4654 {
4655 TCGv_i128 t;
4656
4657 if (gen_trap_ifnofpu(dc)) {
4658 return true;
4659 }
4660 if (gen_trap_float128(dc)) {
4661 return true;
4662 }
4663
4664 gen_op_clear_ieee_excp_and_FTT();
4665
4666 t = gen_load_fpr_Q(dc, a->rs);
4667 func(t, tcg_env, t);
4668 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4669 gen_store_fpr_Q(dc, a->rd, t);
4670 return advance_pc(dc);
4671 }
4672
4673 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4674
4675 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4676 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4677 {
4678 TCGv_i128 src;
4679 TCGv_i32 dst;
4680
4681 if (gen_trap_ifnofpu(dc)) {
4682 return true;
4683 }
4684 if (gen_trap_float128(dc)) {
4685 return true;
4686 }
4687
4688 gen_op_clear_ieee_excp_and_FTT();
4689 src = gen_load_fpr_Q(dc, a->rs);
4690 dst = tcg_temp_new_i32();
4691 func(dst, tcg_env, src);
4692 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4693 gen_store_fpr_F(dc, a->rd, dst);
4694 return advance_pc(dc);
4695 }
4696
4697 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4698 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4699
4700 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4701 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4702 {
4703 TCGv_i128 src;
4704 TCGv_i64 dst;
4705
4706 if (gen_trap_ifnofpu(dc)) {
4707 return true;
4708 }
4709 if (gen_trap_float128(dc)) {
4710 return true;
4711 }
4712
4713 gen_op_clear_ieee_excp_and_FTT();
4714 src = gen_load_fpr_Q(dc, a->rs);
4715 dst = gen_dest_fpr_D(dc, a->rd);
4716 func(dst, tcg_env, src);
4717 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4718 gen_store_fpr_D(dc, a->rd, dst);
4719 return advance_pc(dc);
4720 }
4721
4722 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4723 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4724
4725 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4726 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4727 {
4728 TCGv_i32 src;
4729 TCGv_i128 dst;
4730
4731 if (gen_trap_ifnofpu(dc)) {
4732 return true;
4733 }
4734 if (gen_trap_float128(dc)) {
4735 return true;
4736 }
4737
4738 gen_op_clear_ieee_excp_and_FTT();
4739 src = gen_load_fpr_F(dc, a->rs);
4740 dst = tcg_temp_new_i128();
4741 func(dst, tcg_env, src);
4742 gen_store_fpr_Q(dc, a->rd, dst);
4743 return advance_pc(dc);
4744 }
4745
4746 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4747 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4748
4749 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4750 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4751 {
4752 TCGv_i64 src;
4753 TCGv_i128 dst;
4754
4755 if (gen_trap_ifnofpu(dc)) {
4756 return true;
4757 }
4758 if (gen_trap_float128(dc)) {
4759 return true;
4760 }
4761
4762 gen_op_clear_ieee_excp_and_FTT();
4763 src = gen_load_fpr_D(dc, a->rs);
4764 dst = tcg_temp_new_i128();
4765 func(dst, tcg_env, src);
4766 gen_store_fpr_Q(dc, a->rd, dst);
4767 return advance_pc(dc);
4768 }
4769
4770 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4771 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4772
4773 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4774 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4775 {
4776 TCGv_i32 src1, src2;
4777
4778 if (gen_trap_ifnofpu(dc)) {
4779 return true;
4780 }
4781
4782 src1 = gen_load_fpr_F(dc, a->rs1);
4783 src2 = gen_load_fpr_F(dc, a->rs2);
4784 func(src1, src1, src2);
4785 gen_store_fpr_F(dc, a->rd, src1);
4786 return advance_pc(dc);
4787 }
4788
4789 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4790 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4791 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4792 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4793 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4794 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4795 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4796 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4797 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4798 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4799 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4800 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4801
4802 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4803 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4804 {
4805 TCGv_i32 src1, src2;
4806
4807 if (gen_trap_ifnofpu(dc)) {
4808 return true;
4809 }
4810
4811 gen_op_clear_ieee_excp_and_FTT();
4812 src1 = gen_load_fpr_F(dc, a->rs1);
4813 src2 = gen_load_fpr_F(dc, a->rs2);
4814 func(src1, tcg_env, src1, src2);
4815 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4816 gen_store_fpr_F(dc, a->rd, src1);
4817 return advance_pc(dc);
4818 }
4819
4820 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4821 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4822 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4823 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4824
4825 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4826 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4827 {
4828 TCGv_i64 dst, src1, src2;
4829
4830 if (gen_trap_ifnofpu(dc)) {
4831 return true;
4832 }
4833
4834 dst = gen_dest_fpr_D(dc, a->rd);
4835 src1 = gen_load_fpr_D(dc, a->rs1);
4836 src2 = gen_load_fpr_D(dc, a->rs2);
4837 func(dst, src1, src2);
4838 gen_store_fpr_D(dc, a->rd, dst);
4839 return advance_pc(dc);
4840 }
4841
4842 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4843 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4844 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4845 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4846 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4847 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4848 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4849 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4850 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4851
4852 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4853 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4854 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4855 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4856 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4857 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4858 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4859 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4860 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4861 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4862 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4863 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4864
4865 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4866 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4867 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4868
4869 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4870 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4871 {
4872 TCGv_i64 src1, src2;
4873 TCGv dst;
4874
4875 if (gen_trap_ifnofpu(dc)) {
4876 return true;
4877 }
4878
4879 dst = gen_dest_gpr(dc, a->rd);
4880 src1 = gen_load_fpr_D(dc, a->rs1);
4881 src2 = gen_load_fpr_D(dc, a->rs2);
4882 func(dst, src1, src2);
4883 gen_store_gpr(dc, a->rd, dst);
4884 return advance_pc(dc);
4885 }
4886
4887 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4888 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4889 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4890 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4891
4892 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4893 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4894 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4895 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4896
4897 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4898 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4899 {
4900 TCGv_i64 dst, src1, src2;
4901
4902 if (gen_trap_ifnofpu(dc)) {
4903 return true;
4904 }
4905
4906 gen_op_clear_ieee_excp_and_FTT();
4907 dst = gen_dest_fpr_D(dc, a->rd);
4908 src1 = gen_load_fpr_D(dc, a->rs1);
4909 src2 = gen_load_fpr_D(dc, a->rs2);
4910 func(dst, tcg_env, src1, src2);
4911 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4912 gen_store_fpr_D(dc, a->rd, dst);
4913 return advance_pc(dc);
4914 }
4915
4916 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4917 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4918 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4919 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4920
4921 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4922 {
4923 TCGv_i64 dst;
4924 TCGv_i32 src1, src2;
4925
4926 if (gen_trap_ifnofpu(dc)) {
4927 return true;
4928 }
4929 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4930 return raise_unimpfpop(dc);
4931 }
4932
4933 gen_op_clear_ieee_excp_and_FTT();
4934 dst = gen_dest_fpr_D(dc, a->rd);
4935 src1 = gen_load_fpr_F(dc, a->rs1);
4936 src2 = gen_load_fpr_F(dc, a->rs2);
4937 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4938 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4939 gen_store_fpr_D(dc, a->rd, dst);
4940 return advance_pc(dc);
4941 }
4942
4943 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4944 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4945 {
4946 TCGv_i64 dst, src0, src1, src2;
4947
4948 if (gen_trap_ifnofpu(dc)) {
4949 return true;
4950 }
4951
4952 dst = gen_dest_fpr_D(dc, a->rd);
4953 src0 = gen_load_fpr_D(dc, a->rd);
4954 src1 = gen_load_fpr_D(dc, a->rs1);
4955 src2 = gen_load_fpr_D(dc, a->rs2);
4956 func(dst, src0, src1, src2);
4957 gen_store_fpr_D(dc, a->rd, dst);
4958 return advance_pc(dc);
4959 }
4960
4961 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4962
4963 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4964 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4965 {
4966 TCGv_i128 src1, src2;
4967
4968 if (gen_trap_ifnofpu(dc)) {
4969 return true;
4970 }
4971 if (gen_trap_float128(dc)) {
4972 return true;
4973 }
4974
4975 gen_op_clear_ieee_excp_and_FTT();
4976 src1 = gen_load_fpr_Q(dc, a->rs1);
4977 src2 = gen_load_fpr_Q(dc, a->rs2);
4978 func(src1, tcg_env, src1, src2);
4979 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4980 gen_store_fpr_Q(dc, a->rd, src1);
4981 return advance_pc(dc);
4982 }
4983
4984 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4985 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4986 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4987 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4988
4989 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4990 {
4991 TCGv_i64 src1, src2;
4992 TCGv_i128 dst;
4993
4994 if (gen_trap_ifnofpu(dc)) {
4995 return true;
4996 }
4997 if (gen_trap_float128(dc)) {
4998 return true;
4999 }
5000
5001 gen_op_clear_ieee_excp_and_FTT();
5002 src1 = gen_load_fpr_D(dc, a->rs1);
5003 src2 = gen_load_fpr_D(dc, a->rs2);
5004 dst = tcg_temp_new_i128();
5005 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5006 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5007 gen_store_fpr_Q(dc, a->rd, dst);
5008 return advance_pc(dc);
5009 }
5010
5011 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5012 void (*func)(DisasContext *, DisasCompare *, int, int))
5013 {
5014 DisasCompare cmp;
5015
5016 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5017 return false;
5018 }
5019 if (gen_trap_ifnofpu(dc)) {
5020 return true;
5021 }
5022 if (is_128 && gen_trap_float128(dc)) {
5023 return true;
5024 }
5025
5026 gen_op_clear_ieee_excp_and_FTT();
5027 func(dc, &cmp, a->rd, a->rs2);
5028 return advance_pc(dc);
5029 }
5030
5031 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5032 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5033 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5034
5035 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5036 void (*func)(DisasContext *, DisasCompare *, int, int))
5037 {
5038 DisasCompare cmp;
5039
5040 if (gen_trap_ifnofpu(dc)) {
5041 return true;
5042 }
5043 if (is_128 && gen_trap_float128(dc)) {
5044 return true;
5045 }
5046
5047 gen_op_clear_ieee_excp_and_FTT();
5048 gen_compare(&cmp, a->cc, a->cond, dc);
5049 func(dc, &cmp, a->rd, a->rs2);
5050 return advance_pc(dc);
5051 }
5052
5053 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5054 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5055 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5056
5057 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5058 void (*func)(DisasContext *, DisasCompare *, int, int))
5059 {
5060 DisasCompare cmp;
5061
5062 if (gen_trap_ifnofpu(dc)) {
5063 return true;
5064 }
5065 if (is_128 && gen_trap_float128(dc)) {
5066 return true;
5067 }
5068
5069 gen_op_clear_ieee_excp_and_FTT();
5070 gen_fcompare(&cmp, a->cc, a->cond);
5071 func(dc, &cmp, a->rd, a->rs2);
5072 return advance_pc(dc);
5073 }
5074
5075 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5076 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5077 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5078
5079 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5080 {
5081 TCGv_i32 src1, src2;
5082
5083 if (avail_32(dc) && a->cc != 0) {
5084 return false;
5085 }
5086 if (gen_trap_ifnofpu(dc)) {
5087 return true;
5088 }
5089
5090 gen_op_clear_ieee_excp_and_FTT();
5091 src1 = gen_load_fpr_F(dc, a->rs1);
5092 src2 = gen_load_fpr_F(dc, a->rs2);
5093 if (e) {
5094 gen_op_fcmpes(a->cc, src1, src2);
5095 } else {
5096 gen_op_fcmps(a->cc, src1, src2);
5097 }
5098 return advance_pc(dc);
5099 }
5100
5101 TRANS(FCMPs, ALL, do_fcmps, a, false)
5102 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5103
5104 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5105 {
5106 TCGv_i64 src1, src2;
5107
5108 if (avail_32(dc) && a->cc != 0) {
5109 return false;
5110 }
5111 if (gen_trap_ifnofpu(dc)) {
5112 return true;
5113 }
5114
5115 gen_op_clear_ieee_excp_and_FTT();
5116 src1 = gen_load_fpr_D(dc, a->rs1);
5117 src2 = gen_load_fpr_D(dc, a->rs2);
5118 if (e) {
5119 gen_op_fcmped(a->cc, src1, src2);
5120 } else {
5121 gen_op_fcmpd(a->cc, src1, src2);
5122 }
5123 return advance_pc(dc);
5124 }
5125
5126 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5127 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5128
5129 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5130 {
5131 TCGv_i128 src1, src2;
5132
5133 if (avail_32(dc) && a->cc != 0) {
5134 return false;
5135 }
5136 if (gen_trap_ifnofpu(dc)) {
5137 return true;
5138 }
5139 if (gen_trap_float128(dc)) {
5140 return true;
5141 }
5142
5143 gen_op_clear_ieee_excp_and_FTT();
5144 src1 = gen_load_fpr_Q(dc, a->rs1);
5145 src2 = gen_load_fpr_Q(dc, a->rs2);
5146 if (e) {
5147 gen_op_fcmpeq(a->cc, src1, src2);
5148 } else {
5149 gen_op_fcmpq(a->cc, src1, src2);
5150 }
5151 return advance_pc(dc);
5152 }
5153
5154 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5155 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5156
5157 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5158 {
5159 DisasContext *dc = container_of(dcbase, DisasContext, base);
5160 CPUSPARCState *env = cpu_env(cs);
5161 int bound;
5162
5163 dc->pc = dc->base.pc_first;
5164 dc->npc = (target_ulong)dc->base.tb->cs_base;
5165 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5166 dc->def = &env->def;
5167 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5168 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5169 #ifndef CONFIG_USER_ONLY
5170 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5171 #endif
5172 #ifdef TARGET_SPARC64
5173 dc->fprs_dirty = 0;
5174 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5175 #ifndef CONFIG_USER_ONLY
5176 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5177 #endif
5178 #endif
5179 /*
5180 * if we reach a page boundary, we stop generation so that the
5181 * PC of a TT_TFAULT exception is always in the right page
5182 */
5183 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5184 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5185 }
5186
5187 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5188 {
5189 }
5190
5191 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5192 {
5193 DisasContext *dc = container_of(dcbase, DisasContext, base);
5194 target_ulong npc = dc->npc;
5195
5196 if (npc & 3) {
5197 switch (npc) {
5198 case JUMP_PC:
5199 assert(dc->jump_pc[1] == dc->pc + 4);
5200 npc = dc->jump_pc[0] | JUMP_PC;
5201 break;
5202 case DYNAMIC_PC:
5203 case DYNAMIC_PC_LOOKUP:
5204 npc = DYNAMIC_PC;
5205 break;
5206 default:
5207 g_assert_not_reached();
5208 }
5209 }
5210 tcg_gen_insn_start(dc->pc, npc);
5211 }
5212
5213 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5214 {
5215 DisasContext *dc = container_of(dcbase, DisasContext, base);
5216 CPUSPARCState *env = cpu_env(cs);
5217 unsigned int insn;
5218
5219 insn = translator_ldl(env, &dc->base, dc->pc);
5220 dc->base.pc_next += 4;
5221
5222 if (!decode(dc, insn)) {
5223 gen_exception(dc, TT_ILL_INSN);
5224 }
5225
5226 if (dc->base.is_jmp == DISAS_NORETURN) {
5227 return;
5228 }
5229 if (dc->pc != dc->base.pc_next) {
5230 dc->base.is_jmp = DISAS_TOO_MANY;
5231 }
5232 }
5233
5234 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5235 {
5236 DisasContext *dc = container_of(dcbase, DisasContext, base);
5237 DisasDelayException *e, *e_next;
5238 bool may_lookup;
5239
5240 finishing_insn(dc);
5241
5242 switch (dc->base.is_jmp) {
5243 case DISAS_NEXT:
5244 case DISAS_TOO_MANY:
5245 if (((dc->pc | dc->npc) & 3) == 0) {
5246 /* static PC and NPC: we can use direct chaining */
5247 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5248 break;
5249 }
5250
5251 may_lookup = true;
5252 if (dc->pc & 3) {
5253 switch (dc->pc) {
5254 case DYNAMIC_PC_LOOKUP:
5255 break;
5256 case DYNAMIC_PC:
5257 may_lookup = false;
5258 break;
5259 default:
5260 g_assert_not_reached();
5261 }
5262 } else {
5263 tcg_gen_movi_tl(cpu_pc, dc->pc);
5264 }
5265
5266 if (dc->npc & 3) {
5267 switch (dc->npc) {
5268 case JUMP_PC:
5269 gen_generic_branch(dc);
5270 break;
5271 case DYNAMIC_PC:
5272 may_lookup = false;
5273 break;
5274 case DYNAMIC_PC_LOOKUP:
5275 break;
5276 default:
5277 g_assert_not_reached();
5278 }
5279 } else {
5280 tcg_gen_movi_tl(cpu_npc, dc->npc);
5281 }
5282 if (may_lookup) {
5283 tcg_gen_lookup_and_goto_ptr();
5284 } else {
5285 tcg_gen_exit_tb(NULL, 0);
5286 }
5287 break;
5288
5289 case DISAS_NORETURN:
5290 break;
5291
5292 case DISAS_EXIT:
5293 /* Exit TB */
5294 save_state(dc);
5295 tcg_gen_exit_tb(NULL, 0);
5296 break;
5297
5298 default:
5299 g_assert_not_reached();
5300 }
5301
5302 for (e = dc->delay_excp_list; e ; e = e_next) {
5303 gen_set_label(e->lab);
5304
5305 tcg_gen_movi_tl(cpu_pc, e->pc);
5306 if (e->npc % 4 == 0) {
5307 tcg_gen_movi_tl(cpu_npc, e->npc);
5308 }
5309 gen_helper_raise_exception(tcg_env, e->excp);
5310
5311 e_next = e->next;
5312 g_free(e);
5313 }
5314 }
5315
5316 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5317 CPUState *cpu, FILE *logfile)
5318 {
5319 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5320 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5321 }
5322
5323 static const TranslatorOps sparc_tr_ops = {
5324 .init_disas_context = sparc_tr_init_disas_context,
5325 .tb_start = sparc_tr_tb_start,
5326 .insn_start = sparc_tr_insn_start,
5327 .translate_insn = sparc_tr_translate_insn,
5328 .tb_stop = sparc_tr_tb_stop,
5329 .disas_log = sparc_tr_disas_log,
5330 };
5331
5332 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5333 vaddr pc, void *host_pc)
5334 {
5335 DisasContext dc = {};
5336
5337 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5338 }
5339
5340 void sparc_tcg_init(void)
5341 {
5342 static const char gregnames[32][4] = {
5343 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5344 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5345 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5346 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5347 };
5348 static const char fregnames[32][4] = {
5349 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5350 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5351 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5352 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5353 };
5354
5355 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5356 #ifdef TARGET_SPARC64
5357 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5358 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5359 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5360 #endif
5361 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5362 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5363 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5364 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5365 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5366 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5367 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5368 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5369 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5370 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5371 };
5372
5373 unsigned int i;
5374
5375 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5376 offsetof(CPUSPARCState, regwptr),
5377 "regwptr");
5378
5379 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5380 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5381 }
5382
5383 cpu_regs[0] = NULL;
5384 for (i = 1; i < 8; ++i) {
5385 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5386 offsetof(CPUSPARCState, gregs[i]),
5387 gregnames[i]);
5388 }
5389
5390 for (i = 8; i < 32; ++i) {
5391 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5392 (i - 8) * sizeof(target_ulong),
5393 gregnames[i]);
5394 }
5395
5396 for (i = 0; i < TARGET_DPREGS; i++) {
5397 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5398 offsetof(CPUSPARCState, fpr[i]),
5399 fregnames[i]);
5400 }
5401
5402 #ifdef TARGET_SPARC64
5403 cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5404 offsetof(CPUSPARCState, fprs), "fprs");
5405 #endif
5406 }
5407
5408 void sparc_restore_state_to_opc(CPUState *cs,
5409 const TranslationBlock *tb,
5410 const uint64_t *data)
5411 {
5412 SPARCCPU *cpu = SPARC_CPU(cs);
5413 CPUSPARCState *env = &cpu->env;
5414 target_ulong pc = data[0];
5415 target_ulong npc = data[1];
5416
5417 env->pc = pc;
5418 if (npc == DYNAMIC_PC) {
5419 /* dynamic NPC: already stored */
5420 } else if (npc & JUMP_PC) {
5421 /* jump PC: use 'cond' and the jump targets of the translation */
5422 if (env->cond) {
5423 env->npc = npc & ~3;
5424 } else {
5425 env->npc = pc + 4;
5426 }
5427 } else {
5428 env->npc = npc;
5429 }
5430 }