]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Remove cpu_fsr
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_flushw(E) qemu_build_not_reached()
47 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
48 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
49 # define gen_helper_restored(E) qemu_build_not_reached()
50 # define gen_helper_retry(E) qemu_build_not_reached()
51 # define gen_helper_saved(E) qemu_build_not_reached()
52 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
53 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
54 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
55 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
62 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
63 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
86 # define FSR_LDXFSR_MASK 0
87 # define FSR_LDXFSR_OLDMASK 0
88 # define MAXTL_MASK 0
89 #endif
90
91 /* Dynamic PC, must exit to main loop. */
92 #define DYNAMIC_PC 1
93 /* Dynamic PC, one of two values according to jump_pc[T2]. */
94 #define JUMP_PC 2
95 /* Dynamic PC, may lookup next TB. */
96 #define DYNAMIC_PC_LOOKUP 3
97
98 #define DISAS_EXIT DISAS_TARGET_0
99
100 /* global register indexes */
101 static TCGv_ptr cpu_regwptr;
102 static TCGv cpu_pc, cpu_npc;
103 static TCGv cpu_regs[32];
104 static TCGv cpu_y;
105 static TCGv cpu_tbr;
106 static TCGv cpu_cond;
107 static TCGv cpu_cc_N;
108 static TCGv cpu_cc_V;
109 static TCGv cpu_icc_Z;
110 static TCGv cpu_icc_C;
111 #ifdef TARGET_SPARC64
112 static TCGv cpu_xcc_Z;
113 static TCGv cpu_xcc_C;
114 static TCGv_i32 cpu_fprs;
115 static TCGv cpu_gsr;
116 #else
117 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
118 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
119 #endif
120
121 #ifdef TARGET_SPARC64
122 #define cpu_cc_Z cpu_xcc_Z
123 #define cpu_cc_C cpu_xcc_C
124 #else
125 #define cpu_cc_Z cpu_icc_Z
126 #define cpu_cc_C cpu_icc_C
127 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
128 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
129 #endif
130
131 /* Floating point registers */
132 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
133
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 #endif
142
143 typedef struct DisasCompare {
144 TCGCond cond;
145 TCGv c1;
146 int c2;
147 } DisasCompare;
148
149 typedef struct DisasDelayException {
150 struct DisasDelayException *next;
151 TCGLabel *lab;
152 TCGv_i32 excp;
153 /* Saved state at parent insn. */
154 target_ulong pc;
155 target_ulong npc;
156 } DisasDelayException;
157
158 typedef struct DisasContext {
159 DisasContextBase base;
160 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162
163 /* Used when JUMP_PC value is used. */
164 DisasCompare jump;
165 target_ulong jump_pc[2];
166
167 int mem_idx;
168 bool cpu_cond_live;
169 bool fpu_enabled;
170 bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172 bool supervisor;
173 #ifdef TARGET_SPARC64
174 bool hypervisor;
175 #endif
176 #endif
177
178 sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180 int fprs_dirty;
181 int asi;
182 #endif
183 DisasDelayException *delay_excp_list;
184 } DisasContext;
185
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
193
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207
208 #define IS_IMM (insn & (1<<13))
209
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213 int bit = (rd < 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc->fprs_dirty & bit)) {
217 dc->fprs_dirty |= bit;
218 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219 }
220 #endif
221 }
222
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226 TCGv_i32 ret = tcg_temp_new_i32();
227 if (src & 1) {
228 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229 } else {
230 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231 }
232 return ret;
233 }
234
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237 TCGv_i64 t = tcg_temp_new_i64();
238
239 tcg_gen_extu_i32_i64(t, v);
240 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241 (dst & 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc, dst);
243 }
244
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
246 {
247 src = DFPREG(src);
248 return cpu_fpr[src / 2];
249 }
250
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
252 {
253 dst = DFPREG(dst);
254 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255 gen_update_fprs_dirty(dc, dst);
256 }
257
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
259 {
260 return cpu_fpr[DFPREG(dst) / 2];
261 }
262
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265 TCGv_i128 ret = tcg_temp_new_i128();
266
267 src = QFPREG(src);
268 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269 return ret;
270 }
271
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
273 {
274 dst = DFPREG(dst);
275 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276 gen_update_fprs_dirty(dc, dst);
277 }
278
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #define hypervisor(dc) 0
283 #else
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) (dc->hypervisor)
286 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
287 #else
288 #define supervisor(dc) (dc->supervisor)
289 #define hypervisor(dc) 0
290 #endif
291 #endif
292
293 #if !defined(TARGET_SPARC64)
294 # define AM_CHECK(dc) false
295 #elif defined(TARGET_ABI32)
296 # define AM_CHECK(dc) true
297 #elif defined(CONFIG_USER_ONLY)
298 # define AM_CHECK(dc) false
299 #else
300 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
301 #endif
302
303 static void gen_address_mask(DisasContext *dc, TCGv addr)
304 {
305 if (AM_CHECK(dc)) {
306 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
307 }
308 }
309
310 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
311 {
312 return AM_CHECK(dc) ? (uint32_t)addr : addr;
313 }
314
315 static TCGv gen_load_gpr(DisasContext *dc, int reg)
316 {
317 if (reg > 0) {
318 assert(reg < 32);
319 return cpu_regs[reg];
320 } else {
321 TCGv t = tcg_temp_new();
322 tcg_gen_movi_tl(t, 0);
323 return t;
324 }
325 }
326
327 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
328 {
329 if (reg > 0) {
330 assert(reg < 32);
331 tcg_gen_mov_tl(cpu_regs[reg], v);
332 }
333 }
334
335 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
336 {
337 if (reg > 0) {
338 assert(reg < 32);
339 return cpu_regs[reg];
340 } else {
341 return tcg_temp_new();
342 }
343 }
344
345 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
346 {
347 return translator_use_goto_tb(&s->base, pc) &&
348 translator_use_goto_tb(&s->base, npc);
349 }
350
351 static void gen_goto_tb(DisasContext *s, int tb_num,
352 target_ulong pc, target_ulong npc)
353 {
354 if (use_goto_tb(s, pc, npc)) {
355 /* jump to same page: we can use a direct jump */
356 tcg_gen_goto_tb(tb_num);
357 tcg_gen_movi_tl(cpu_pc, pc);
358 tcg_gen_movi_tl(cpu_npc, npc);
359 tcg_gen_exit_tb(s->base.tb, tb_num);
360 } else {
361 /* jump to another page: we can use an indirect jump */
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_lookup_and_goto_ptr();
365 }
366 }
367
368 static TCGv gen_carry32(void)
369 {
370 if (TARGET_LONG_BITS == 64) {
371 TCGv t = tcg_temp_new();
372 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
373 return t;
374 }
375 return cpu_icc_C;
376 }
377
378 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
379 {
380 TCGv z = tcg_constant_tl(0);
381
382 if (cin) {
383 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
384 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
385 } else {
386 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
387 }
388 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
389 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
390 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
391 if (TARGET_LONG_BITS == 64) {
392 /*
393 * Carry-in to bit 32 is result ^ src1 ^ src2.
394 * We already have the src xor term in Z, from computation of V.
395 */
396 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
397 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
398 }
399 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
400 tcg_gen_mov_tl(dst, cpu_cc_N);
401 }
402
403 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
404 {
405 gen_op_addcc_int(dst, src1, src2, NULL);
406 }
407
408 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
409 {
410 TCGv t = tcg_temp_new();
411
412 /* Save the tag bits around modification of dst. */
413 tcg_gen_or_tl(t, src1, src2);
414
415 gen_op_addcc(dst, src1, src2);
416
417 /* Incorprate tag bits into icc.V */
418 tcg_gen_andi_tl(t, t, 3);
419 tcg_gen_neg_tl(t, t);
420 tcg_gen_ext32u_tl(t, t);
421 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
422 }
423
424 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
425 {
426 tcg_gen_add_tl(dst, src1, src2);
427 tcg_gen_add_tl(dst, dst, gen_carry32());
428 }
429
430 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
431 {
432 gen_op_addcc_int(dst, src1, src2, gen_carry32());
433 }
434
435 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
436 {
437 TCGv z = tcg_constant_tl(0);
438
439 if (cin) {
440 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
441 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
442 } else {
443 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
444 }
445 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
446 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
447 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
448 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
449 #ifdef TARGET_SPARC64
450 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
451 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
452 #endif
453 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
454 tcg_gen_mov_tl(dst, cpu_cc_N);
455 }
456
457 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
458 {
459 gen_op_subcc_int(dst, src1, src2, NULL);
460 }
461
462 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
463 {
464 TCGv t = tcg_temp_new();
465
466 /* Save the tag bits around modification of dst. */
467 tcg_gen_or_tl(t, src1, src2);
468
469 gen_op_subcc(dst, src1, src2);
470
471 /* Incorprate tag bits into icc.V */
472 tcg_gen_andi_tl(t, t, 3);
473 tcg_gen_neg_tl(t, t);
474 tcg_gen_ext32u_tl(t, t);
475 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
476 }
477
478 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
479 {
480 tcg_gen_sub_tl(dst, src1, src2);
481 tcg_gen_sub_tl(dst, dst, gen_carry32());
482 }
483
484 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
485 {
486 gen_op_subcc_int(dst, src1, src2, gen_carry32());
487 }
488
489 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
490 {
491 TCGv zero = tcg_constant_tl(0);
492 TCGv t_src1 = tcg_temp_new();
493 TCGv t_src2 = tcg_temp_new();
494 TCGv t0 = tcg_temp_new();
495
496 tcg_gen_ext32u_tl(t_src1, src1);
497 tcg_gen_ext32u_tl(t_src2, src2);
498
499 /*
500 * if (!(env->y & 1))
501 * src2 = 0;
502 */
503 tcg_gen_andi_tl(t0, cpu_y, 0x1);
504 tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
505
506 /*
507 * b2 = src1 & 1;
508 * y = (b2 << 31) | (y >> 1);
509 */
510 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
511 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
512
513 // b1 = N ^ V;
514 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
515
516 /*
517 * src1 = (b1 << 31) | (src1 >> 1)
518 */
519 tcg_gen_andi_tl(t0, t0, 1u << 31);
520 tcg_gen_shri_tl(t_src1, t_src1, 1);
521 tcg_gen_or_tl(t_src1, t_src1, t0);
522
523 gen_op_addcc(dst, t_src1, t_src2);
524 }
525
526 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
527 {
528 #if TARGET_LONG_BITS == 32
529 if (sign_ext) {
530 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
531 } else {
532 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
533 }
534 #else
535 TCGv t0 = tcg_temp_new_i64();
536 TCGv t1 = tcg_temp_new_i64();
537
538 if (sign_ext) {
539 tcg_gen_ext32s_i64(t0, src1);
540 tcg_gen_ext32s_i64(t1, src2);
541 } else {
542 tcg_gen_ext32u_i64(t0, src1);
543 tcg_gen_ext32u_i64(t1, src2);
544 }
545
546 tcg_gen_mul_i64(dst, t0, t1);
547 tcg_gen_shri_i64(cpu_y, dst, 32);
548 #endif
549 }
550
551 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
552 {
553 /* zero-extend truncated operands before multiplication */
554 gen_op_multiply(dst, src1, src2, 0);
555 }
556
557 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
558 {
559 /* sign-extend truncated operands before multiplication */
560 gen_op_multiply(dst, src1, src2, 1);
561 }
562
563 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
564 {
565 #ifdef TARGET_SPARC64
566 gen_helper_sdiv(dst, tcg_env, src1, src2);
567 tcg_gen_ext32s_tl(dst, dst);
568 #else
569 TCGv_i64 t64 = tcg_temp_new_i64();
570 gen_helper_sdiv(t64, tcg_env, src1, src2);
571 tcg_gen_trunc_i64_tl(dst, t64);
572 #endif
573 }
574
575 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
576 {
577 TCGv_i64 t64;
578
579 #ifdef TARGET_SPARC64
580 t64 = cpu_cc_V;
581 #else
582 t64 = tcg_temp_new_i64();
583 #endif
584
585 gen_helper_udiv(t64, tcg_env, src1, src2);
586
587 #ifdef TARGET_SPARC64
588 tcg_gen_ext32u_tl(cpu_cc_N, t64);
589 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
590 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
591 tcg_gen_movi_tl(cpu_icc_C, 0);
592 #else
593 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
594 #endif
595 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
596 tcg_gen_movi_tl(cpu_cc_C, 0);
597 tcg_gen_mov_tl(dst, cpu_cc_N);
598 }
599
600 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
601 {
602 TCGv_i64 t64;
603
604 #ifdef TARGET_SPARC64
605 t64 = cpu_cc_V;
606 #else
607 t64 = tcg_temp_new_i64();
608 #endif
609
610 gen_helper_sdiv(t64, tcg_env, src1, src2);
611
612 #ifdef TARGET_SPARC64
613 tcg_gen_ext32s_tl(cpu_cc_N, t64);
614 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
615 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
616 tcg_gen_movi_tl(cpu_icc_C, 0);
617 #else
618 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
619 #endif
620 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
621 tcg_gen_movi_tl(cpu_cc_C, 0);
622 tcg_gen_mov_tl(dst, cpu_cc_N);
623 }
624
625 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
626 {
627 gen_helper_taddcctv(dst, tcg_env, src1, src2);
628 }
629
630 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
631 {
632 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
633 }
634
635 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
636 {
637 tcg_gen_ctpop_tl(dst, src2);
638 }
639
640 #ifndef TARGET_SPARC64
641 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
642 {
643 g_assert_not_reached();
644 }
645 #endif
646
647 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
648 {
649 gen_helper_array8(dst, src1, src2);
650 tcg_gen_shli_tl(dst, dst, 1);
651 }
652
653 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
654 {
655 gen_helper_array8(dst, src1, src2);
656 tcg_gen_shli_tl(dst, dst, 2);
657 }
658
659 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
660 {
661 #ifdef TARGET_SPARC64
662 gen_helper_fpack16(dst, cpu_gsr, src);
663 #else
664 g_assert_not_reached();
665 #endif
666 }
667
668 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
669 {
670 #ifdef TARGET_SPARC64
671 gen_helper_fpackfix(dst, cpu_gsr, src);
672 #else
673 g_assert_not_reached();
674 #endif
675 }
676
677 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
678 {
679 #ifdef TARGET_SPARC64
680 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
681 #else
682 g_assert_not_reached();
683 #endif
684 }
685
686 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
687 {
688 #ifdef TARGET_SPARC64
689 TCGv t1, t2, shift;
690
691 t1 = tcg_temp_new();
692 t2 = tcg_temp_new();
693 shift = tcg_temp_new();
694
695 tcg_gen_andi_tl(shift, cpu_gsr, 7);
696 tcg_gen_shli_tl(shift, shift, 3);
697 tcg_gen_shl_tl(t1, s1, shift);
698
699 /*
700 * A shift of 64 does not produce 0 in TCG. Divide this into a
701 * shift of (up to 63) followed by a constant shift of 1.
702 */
703 tcg_gen_xori_tl(shift, shift, 63);
704 tcg_gen_shr_tl(t2, s2, shift);
705 tcg_gen_shri_tl(t2, t2, 1);
706
707 tcg_gen_or_tl(dst, t1, t2);
708 #else
709 g_assert_not_reached();
710 #endif
711 }
712
713 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
714 {
715 #ifdef TARGET_SPARC64
716 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
717 #else
718 g_assert_not_reached();
719 #endif
720 }
721
722 // 1
723 static void gen_op_eval_ba(TCGv dst)
724 {
725 tcg_gen_movi_tl(dst, 1);
726 }
727
728 // 0
729 static void gen_op_eval_bn(TCGv dst)
730 {
731 tcg_gen_movi_tl(dst, 0);
732 }
733
734 /*
735 FPSR bit field FCC1 | FCC0:
736 0 =
737 1 <
738 2 >
739 3 unordered
740 */
741 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
742 unsigned int fcc_offset)
743 {
744 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
745 tcg_gen_andi_tl(reg, reg, 0x1);
746 }
747
748 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
749 {
750 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
751 tcg_gen_andi_tl(reg, reg, 0x1);
752 }
753
754 // !0: FCC0 | FCC1
755 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
756 {
757 TCGv t0 = tcg_temp_new();
758 gen_mov_reg_FCC0(dst, src, fcc_offset);
759 gen_mov_reg_FCC1(t0, src, fcc_offset);
760 tcg_gen_or_tl(dst, dst, t0);
761 }
762
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
765 {
766 TCGv t0 = tcg_temp_new();
767 gen_mov_reg_FCC0(dst, src, fcc_offset);
768 gen_mov_reg_FCC1(t0, src, fcc_offset);
769 tcg_gen_xor_tl(dst, dst, t0);
770 }
771
772 // 1 or 3: FCC0
773 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
774 {
775 gen_mov_reg_FCC0(dst, src, fcc_offset);
776 }
777
778 // 1: FCC0 & !FCC1
779 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_andc_tl(dst, dst, t0);
785 }
786
787 // 2 or 3: FCC1
788 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 gen_mov_reg_FCC1(dst, src, fcc_offset);
791 }
792
793 // 2: !FCC0 & FCC1
794 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
795 {
796 TCGv t0 = tcg_temp_new();
797 gen_mov_reg_FCC0(dst, src, fcc_offset);
798 gen_mov_reg_FCC1(t0, src, fcc_offset);
799 tcg_gen_andc_tl(dst, t0, dst);
800 }
801
802 // 3: FCC0 & FCC1
803 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_and_tl(dst, dst, t0);
809 }
810
811 // 0: !(FCC0 | FCC1)
812 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 TCGv t0 = tcg_temp_new();
815 gen_mov_reg_FCC0(dst, src, fcc_offset);
816 gen_mov_reg_FCC1(t0, src, fcc_offset);
817 tcg_gen_or_tl(dst, dst, t0);
818 tcg_gen_xori_tl(dst, dst, 0x1);
819 }
820
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
823 {
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_xor_tl(dst, dst, t0);
828 tcg_gen_xori_tl(dst, dst, 0x1);
829 }
830
831 // 0 or 2: !FCC0
832 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
833 {
834 gen_mov_reg_FCC0(dst, src, fcc_offset);
835 tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841 TCGv t0 = tcg_temp_new();
842 gen_mov_reg_FCC0(dst, src, fcc_offset);
843 gen_mov_reg_FCC1(t0, src, fcc_offset);
844 tcg_gen_andc_tl(dst, dst, t0);
845 tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847
848 // 0 or 1: !FCC1
849 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851 gen_mov_reg_FCC1(dst, src, fcc_offset);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 TCGv t0 = tcg_temp_new();
859 gen_mov_reg_FCC0(dst, src, fcc_offset);
860 gen_mov_reg_FCC1(t0, src, fcc_offset);
861 tcg_gen_andc_tl(dst, t0, dst);
862 tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868 TCGv t0 = tcg_temp_new();
869 gen_mov_reg_FCC0(dst, src, fcc_offset);
870 gen_mov_reg_FCC1(t0, src, fcc_offset);
871 tcg_gen_and_tl(dst, dst, t0);
872 tcg_gen_xori_tl(dst, dst, 0x1);
873 }
874
875 static void finishing_insn(DisasContext *dc)
876 {
877 /*
878 * From here, there is no future path through an unwinding exception.
879 * If the current insn cannot raise an exception, the computation of
880 * cpu_cond may be able to be elided.
881 */
882 if (dc->cpu_cond_live) {
883 tcg_gen_discard_tl(cpu_cond);
884 dc->cpu_cond_live = false;
885 }
886 }
887
888 static void gen_generic_branch(DisasContext *dc)
889 {
890 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
891 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
892 TCGv c2 = tcg_constant_tl(dc->jump.c2);
893
894 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
895 }
896
897 /* call this function before using the condition register as it may
898 have been set for a jump */
899 static void flush_cond(DisasContext *dc)
900 {
901 if (dc->npc == JUMP_PC) {
902 gen_generic_branch(dc);
903 dc->npc = DYNAMIC_PC_LOOKUP;
904 }
905 }
906
907 static void save_npc(DisasContext *dc)
908 {
909 if (dc->npc & 3) {
910 switch (dc->npc) {
911 case JUMP_PC:
912 gen_generic_branch(dc);
913 dc->npc = DYNAMIC_PC_LOOKUP;
914 break;
915 case DYNAMIC_PC:
916 case DYNAMIC_PC_LOOKUP:
917 break;
918 default:
919 g_assert_not_reached();
920 }
921 } else {
922 tcg_gen_movi_tl(cpu_npc, dc->npc);
923 }
924 }
925
926 static void save_state(DisasContext *dc)
927 {
928 tcg_gen_movi_tl(cpu_pc, dc->pc);
929 save_npc(dc);
930 }
931
932 static void gen_exception(DisasContext *dc, int which)
933 {
934 finishing_insn(dc);
935 save_state(dc);
936 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
937 dc->base.is_jmp = DISAS_NORETURN;
938 }
939
940 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
941 {
942 DisasDelayException *e = g_new0(DisasDelayException, 1);
943
944 e->next = dc->delay_excp_list;
945 dc->delay_excp_list = e;
946
947 e->lab = gen_new_label();
948 e->excp = excp;
949 e->pc = dc->pc;
950 /* Caller must have used flush_cond before branch. */
951 assert(e->npc != JUMP_PC);
952 e->npc = dc->npc;
953
954 return e->lab;
955 }
956
957 static TCGLabel *delay_exception(DisasContext *dc, int excp)
958 {
959 return delay_exceptionv(dc, tcg_constant_i32(excp));
960 }
961
962 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
963 {
964 TCGv t = tcg_temp_new();
965 TCGLabel *lab;
966
967 tcg_gen_andi_tl(t, addr, mask);
968
969 flush_cond(dc);
970 lab = delay_exception(dc, TT_UNALIGNED);
971 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
972 }
973
974 static void gen_mov_pc_npc(DisasContext *dc)
975 {
976 finishing_insn(dc);
977
978 if (dc->npc & 3) {
979 switch (dc->npc) {
980 case JUMP_PC:
981 gen_generic_branch(dc);
982 tcg_gen_mov_tl(cpu_pc, cpu_npc);
983 dc->pc = DYNAMIC_PC_LOOKUP;
984 break;
985 case DYNAMIC_PC:
986 case DYNAMIC_PC_LOOKUP:
987 tcg_gen_mov_tl(cpu_pc, cpu_npc);
988 dc->pc = dc->npc;
989 break;
990 default:
991 g_assert_not_reached();
992 }
993 } else {
994 dc->pc = dc->npc;
995 }
996 }
997
998 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
999 DisasContext *dc)
1000 {
1001 TCGv t1;
1002
1003 cmp->c1 = t1 = tcg_temp_new();
1004 cmp->c2 = 0;
1005
1006 switch (cond & 7) {
1007 case 0x0: /* never */
1008 cmp->cond = TCG_COND_NEVER;
1009 cmp->c1 = tcg_constant_tl(0);
1010 break;
1011
1012 case 0x1: /* eq: Z */
1013 cmp->cond = TCG_COND_EQ;
1014 if (TARGET_LONG_BITS == 32 || xcc) {
1015 tcg_gen_mov_tl(t1, cpu_cc_Z);
1016 } else {
1017 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1018 }
1019 break;
1020
1021 case 0x2: /* le: Z | (N ^ V) */
1022 /*
1023 * Simplify:
1024 * cc_Z || (N ^ V) < 0 NE
1025 * cc_Z && !((N ^ V) < 0) EQ
1026 * cc_Z & ~((N ^ V) >> TLB) EQ
1027 */
1028 cmp->cond = TCG_COND_EQ;
1029 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1030 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1031 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1032 if (TARGET_LONG_BITS == 64 && !xcc) {
1033 tcg_gen_ext32u_tl(t1, t1);
1034 }
1035 break;
1036
1037 case 0x3: /* lt: N ^ V */
1038 cmp->cond = TCG_COND_LT;
1039 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1040 if (TARGET_LONG_BITS == 64 && !xcc) {
1041 tcg_gen_ext32s_tl(t1, t1);
1042 }
1043 break;
1044
1045 case 0x4: /* leu: Z | C */
1046 /*
1047 * Simplify:
1048 * cc_Z == 0 || cc_C != 0 NE
1049 * cc_Z != 0 && cc_C == 0 EQ
1050 * cc_Z & (cc_C ? 0 : -1) EQ
1051 * cc_Z & (cc_C - 1) EQ
1052 */
1053 cmp->cond = TCG_COND_EQ;
1054 if (TARGET_LONG_BITS == 32 || xcc) {
1055 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1056 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1057 } else {
1058 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1059 tcg_gen_subi_tl(t1, t1, 1);
1060 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1061 tcg_gen_ext32u_tl(t1, t1);
1062 }
1063 break;
1064
1065 case 0x5: /* ltu: C */
1066 cmp->cond = TCG_COND_NE;
1067 if (TARGET_LONG_BITS == 32 || xcc) {
1068 tcg_gen_mov_tl(t1, cpu_cc_C);
1069 } else {
1070 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1071 }
1072 break;
1073
1074 case 0x6: /* neg: N */
1075 cmp->cond = TCG_COND_LT;
1076 if (TARGET_LONG_BITS == 32 || xcc) {
1077 tcg_gen_mov_tl(t1, cpu_cc_N);
1078 } else {
1079 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1080 }
1081 break;
1082
1083 case 0x7: /* vs: V */
1084 cmp->cond = TCG_COND_LT;
1085 if (TARGET_LONG_BITS == 32 || xcc) {
1086 tcg_gen_mov_tl(t1, cpu_cc_V);
1087 } else {
1088 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1089 }
1090 break;
1091 }
1092 if (cond & 8) {
1093 cmp->cond = tcg_invert_cond(cmp->cond);
1094 }
1095 }
1096
1097 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1098 {
1099 unsigned int offset;
1100 TCGv r_dst, fsr;
1101
1102 /* For now we still generate a straight boolean result. */
1103 cmp->cond = TCG_COND_NE;
1104 cmp->c1 = r_dst = tcg_temp_new();
1105 cmp->c2 = 0;
1106
1107 switch (cc) {
1108 default:
1109 case 0x0:
1110 offset = 0;
1111 break;
1112 case 0x1:
1113 offset = 32 - 10;
1114 break;
1115 case 0x2:
1116 offset = 34 - 10;
1117 break;
1118 case 0x3:
1119 offset = 36 - 10;
1120 break;
1121 }
1122
1123 fsr = tcg_temp_new();
1124 tcg_gen_ld_tl(fsr, tcg_env, offsetof(CPUSPARCState, fsr));
1125 switch (cond) {
1126 case 0x0:
1127 gen_op_eval_bn(r_dst);
1128 break;
1129 case 0x1:
1130 gen_op_eval_fbne(r_dst, fsr, offset);
1131 break;
1132 case 0x2:
1133 gen_op_eval_fblg(r_dst, fsr, offset);
1134 break;
1135 case 0x3:
1136 gen_op_eval_fbul(r_dst, fsr, offset);
1137 break;
1138 case 0x4:
1139 gen_op_eval_fbl(r_dst, fsr, offset);
1140 break;
1141 case 0x5:
1142 gen_op_eval_fbug(r_dst, fsr, offset);
1143 break;
1144 case 0x6:
1145 gen_op_eval_fbg(r_dst, fsr, offset);
1146 break;
1147 case 0x7:
1148 gen_op_eval_fbu(r_dst, fsr, offset);
1149 break;
1150 case 0x8:
1151 gen_op_eval_ba(r_dst);
1152 break;
1153 case 0x9:
1154 gen_op_eval_fbe(r_dst, fsr, offset);
1155 break;
1156 case 0xa:
1157 gen_op_eval_fbue(r_dst, fsr, offset);
1158 break;
1159 case 0xb:
1160 gen_op_eval_fbge(r_dst, fsr, offset);
1161 break;
1162 case 0xc:
1163 gen_op_eval_fbuge(r_dst, fsr, offset);
1164 break;
1165 case 0xd:
1166 gen_op_eval_fble(r_dst, fsr, offset);
1167 break;
1168 case 0xe:
1169 gen_op_eval_fbule(r_dst, fsr, offset);
1170 break;
1171 case 0xf:
1172 gen_op_eval_fbo(r_dst, fsr, offset);
1173 break;
1174 }
1175 }
1176
1177 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1178 {
1179 static const TCGCond cond_reg[4] = {
1180 TCG_COND_NEVER, /* reserved */
1181 TCG_COND_EQ,
1182 TCG_COND_LE,
1183 TCG_COND_LT,
1184 };
1185 TCGCond tcond;
1186
1187 if ((cond & 3) == 0) {
1188 return false;
1189 }
1190 tcond = cond_reg[cond & 3];
1191 if (cond & 4) {
1192 tcond = tcg_invert_cond(tcond);
1193 }
1194
1195 cmp->cond = tcond;
1196 cmp->c1 = tcg_temp_new();
1197 cmp->c2 = 0;
1198 tcg_gen_mov_tl(cmp->c1, r_src);
1199 return true;
1200 }
1201
1202 static void gen_op_clear_ieee_excp_and_FTT(void)
1203 {
1204 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1205 offsetof(CPUSPARCState, fsr_cexc_ftt));
1206 }
1207
1208 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1209 {
1210 gen_op_clear_ieee_excp_and_FTT();
1211 tcg_gen_mov_i32(dst, src);
1212 }
1213
1214 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1215 {
1216 gen_op_clear_ieee_excp_and_FTT();
1217 tcg_gen_xori_i32(dst, src, 1u << 31);
1218 }
1219
1220 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1221 {
1222 gen_op_clear_ieee_excp_and_FTT();
1223 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1224 }
1225
1226 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1227 {
1228 gen_op_clear_ieee_excp_and_FTT();
1229 tcg_gen_mov_i64(dst, src);
1230 }
1231
1232 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1233 {
1234 gen_op_clear_ieee_excp_and_FTT();
1235 tcg_gen_xori_i64(dst, src, 1ull << 63);
1236 }
1237
1238 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1239 {
1240 gen_op_clear_ieee_excp_and_FTT();
1241 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1242 }
1243
1244 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1245 {
1246 TCGv_i64 l = tcg_temp_new_i64();
1247 TCGv_i64 h = tcg_temp_new_i64();
1248
1249 tcg_gen_extr_i128_i64(l, h, src);
1250 tcg_gen_xori_i64(h, h, 1ull << 63);
1251 tcg_gen_concat_i64_i128(dst, l, h);
1252 }
1253
1254 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1255 {
1256 TCGv_i64 l = tcg_temp_new_i64();
1257 TCGv_i64 h = tcg_temp_new_i64();
1258
1259 tcg_gen_extr_i128_i64(l, h, src);
1260 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1261 tcg_gen_concat_i64_i128(dst, l, h);
1262 }
1263
1264 #ifdef TARGET_SPARC64
1265 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1266 {
1267 switch (fccno) {
1268 case 0:
1269 gen_helper_fcmps(tcg_env, r_rs1, r_rs2);
1270 break;
1271 case 1:
1272 gen_helper_fcmps_fcc1(tcg_env, r_rs1, r_rs2);
1273 break;
1274 case 2:
1275 gen_helper_fcmps_fcc2(tcg_env, r_rs1, r_rs2);
1276 break;
1277 case 3:
1278 gen_helper_fcmps_fcc3(tcg_env, r_rs1, r_rs2);
1279 break;
1280 }
1281 }
1282
1283 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1284 {
1285 switch (fccno) {
1286 case 0:
1287 gen_helper_fcmpd(tcg_env, r_rs1, r_rs2);
1288 break;
1289 case 1:
1290 gen_helper_fcmpd_fcc1(tcg_env, r_rs1, r_rs2);
1291 break;
1292 case 2:
1293 gen_helper_fcmpd_fcc2(tcg_env, r_rs1, r_rs2);
1294 break;
1295 case 3:
1296 gen_helper_fcmpd_fcc3(tcg_env, r_rs1, r_rs2);
1297 break;
1298 }
1299 }
1300
1301 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1302 {
1303 switch (fccno) {
1304 case 0:
1305 gen_helper_fcmpq(tcg_env, r_rs1, r_rs2);
1306 break;
1307 case 1:
1308 gen_helper_fcmpq_fcc1(tcg_env, r_rs1, r_rs2);
1309 break;
1310 case 2:
1311 gen_helper_fcmpq_fcc2(tcg_env, r_rs1, r_rs2);
1312 break;
1313 case 3:
1314 gen_helper_fcmpq_fcc3(tcg_env, r_rs1, r_rs2);
1315 break;
1316 }
1317 }
1318
1319 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1320 {
1321 switch (fccno) {
1322 case 0:
1323 gen_helper_fcmpes(tcg_env, r_rs1, r_rs2);
1324 break;
1325 case 1:
1326 gen_helper_fcmpes_fcc1(tcg_env, r_rs1, r_rs2);
1327 break;
1328 case 2:
1329 gen_helper_fcmpes_fcc2(tcg_env, r_rs1, r_rs2);
1330 break;
1331 case 3:
1332 gen_helper_fcmpes_fcc3(tcg_env, r_rs1, r_rs2);
1333 break;
1334 }
1335 }
1336
1337 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1338 {
1339 switch (fccno) {
1340 case 0:
1341 gen_helper_fcmped(tcg_env, r_rs1, r_rs2);
1342 break;
1343 case 1:
1344 gen_helper_fcmped_fcc1(tcg_env, r_rs1, r_rs2);
1345 break;
1346 case 2:
1347 gen_helper_fcmped_fcc2(tcg_env, r_rs1, r_rs2);
1348 break;
1349 case 3:
1350 gen_helper_fcmped_fcc3(tcg_env, r_rs1, r_rs2);
1351 break;
1352 }
1353 }
1354
1355 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1356 {
1357 switch (fccno) {
1358 case 0:
1359 gen_helper_fcmpeq(tcg_env, r_rs1, r_rs2);
1360 break;
1361 case 1:
1362 gen_helper_fcmpeq_fcc1(tcg_env, r_rs1, r_rs2);
1363 break;
1364 case 2:
1365 gen_helper_fcmpeq_fcc2(tcg_env, r_rs1, r_rs2);
1366 break;
1367 case 3:
1368 gen_helper_fcmpeq_fcc3(tcg_env, r_rs1, r_rs2);
1369 break;
1370 }
1371 }
1372
1373 #else
1374
1375 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1376 {
1377 gen_helper_fcmps(tcg_env, r_rs1, r_rs2);
1378 }
1379
1380 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1381 {
1382 gen_helper_fcmpd(tcg_env, r_rs1, r_rs2);
1383 }
1384
1385 static void gen_op_fcmpq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1386 {
1387 gen_helper_fcmpq(tcg_env, r_rs1, r_rs2);
1388 }
1389
1390 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1391 {
1392 gen_helper_fcmpes(tcg_env, r_rs1, r_rs2);
1393 }
1394
1395 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1396 {
1397 gen_helper_fcmped(tcg_env, r_rs1, r_rs2);
1398 }
1399
1400 static void gen_op_fcmpeq(int fccno, TCGv_i128 r_rs1, TCGv_i128 r_rs2)
1401 {
1402 gen_helper_fcmpeq(tcg_env, r_rs1, r_rs2);
1403 }
1404 #endif
1405
1406 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1407 {
1408 /*
1409 * CEXC is only set when succesfully completing an FPop,
1410 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1411 * Thus we can simply store FTT into this field.
1412 */
1413 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1414 offsetof(CPUSPARCState, fsr_cexc_ftt));
1415 gen_exception(dc, TT_FP_EXCP);
1416 }
1417
1418 static int gen_trap_ifnofpu(DisasContext *dc)
1419 {
1420 #if !defined(CONFIG_USER_ONLY)
1421 if (!dc->fpu_enabled) {
1422 gen_exception(dc, TT_NFPU_INSN);
1423 return 1;
1424 }
1425 #endif
1426 return 0;
1427 }
1428
1429 /* asi moves */
1430 typedef enum {
1431 GET_ASI_HELPER,
1432 GET_ASI_EXCP,
1433 GET_ASI_DIRECT,
1434 GET_ASI_DTWINX,
1435 GET_ASI_BLOCK,
1436 GET_ASI_SHORT,
1437 GET_ASI_BCOPY,
1438 GET_ASI_BFILL,
1439 } ASIType;
1440
1441 typedef struct {
1442 ASIType type;
1443 int asi;
1444 int mem_idx;
1445 MemOp memop;
1446 } DisasASI;
1447
1448 /*
1449 * Build DisasASI.
1450 * For asi == -1, treat as non-asi.
1451 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1452 */
1453 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1454 {
1455 ASIType type = GET_ASI_HELPER;
1456 int mem_idx = dc->mem_idx;
1457
1458 if (asi == -1) {
1459 /* Artificial "non-asi" case. */
1460 type = GET_ASI_DIRECT;
1461 goto done;
1462 }
1463
1464 #ifndef TARGET_SPARC64
1465 /* Before v9, all asis are immediate and privileged. */
1466 if (asi < 0) {
1467 gen_exception(dc, TT_ILL_INSN);
1468 type = GET_ASI_EXCP;
1469 } else if (supervisor(dc)
1470 /* Note that LEON accepts ASI_USERDATA in user mode, for
1471 use with CASA. Also note that previous versions of
1472 QEMU allowed (and old versions of gcc emitted) ASI_P
1473 for LEON, which is incorrect. */
1474 || (asi == ASI_USERDATA
1475 && (dc->def->features & CPU_FEATURE_CASA))) {
1476 switch (asi) {
1477 case ASI_USERDATA: /* User data access */
1478 mem_idx = MMU_USER_IDX;
1479 type = GET_ASI_DIRECT;
1480 break;
1481 case ASI_KERNELDATA: /* Supervisor data access */
1482 mem_idx = MMU_KERNEL_IDX;
1483 type = GET_ASI_DIRECT;
1484 break;
1485 case ASI_M_BYPASS: /* MMU passthrough */
1486 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1487 mem_idx = MMU_PHYS_IDX;
1488 type = GET_ASI_DIRECT;
1489 break;
1490 case ASI_M_BCOPY: /* Block copy, sta access */
1491 mem_idx = MMU_KERNEL_IDX;
1492 type = GET_ASI_BCOPY;
1493 break;
1494 case ASI_M_BFILL: /* Block fill, stda access */
1495 mem_idx = MMU_KERNEL_IDX;
1496 type = GET_ASI_BFILL;
1497 break;
1498 }
1499
1500 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1501 * permissions check in get_physical_address(..).
1502 */
1503 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1504 } else {
1505 gen_exception(dc, TT_PRIV_INSN);
1506 type = GET_ASI_EXCP;
1507 }
1508 #else
1509 if (asi < 0) {
1510 asi = dc->asi;
1511 }
1512 /* With v9, all asis below 0x80 are privileged. */
1513 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1514 down that bit into DisasContext. For the moment that's ok,
1515 since the direct implementations below doesn't have any ASIs
1516 in the restricted [0x30, 0x7f] range, and the check will be
1517 done properly in the helper. */
1518 if (!supervisor(dc) && asi < 0x80) {
1519 gen_exception(dc, TT_PRIV_ACT);
1520 type = GET_ASI_EXCP;
1521 } else {
1522 switch (asi) {
1523 case ASI_REAL: /* Bypass */
1524 case ASI_REAL_IO: /* Bypass, non-cacheable */
1525 case ASI_REAL_L: /* Bypass LE */
1526 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1527 case ASI_TWINX_REAL: /* Real address, twinx */
1528 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1529 case ASI_QUAD_LDD_PHYS:
1530 case ASI_QUAD_LDD_PHYS_L:
1531 mem_idx = MMU_PHYS_IDX;
1532 break;
1533 case ASI_N: /* Nucleus */
1534 case ASI_NL: /* Nucleus LE */
1535 case ASI_TWINX_N:
1536 case ASI_TWINX_NL:
1537 case ASI_NUCLEUS_QUAD_LDD:
1538 case ASI_NUCLEUS_QUAD_LDD_L:
1539 if (hypervisor(dc)) {
1540 mem_idx = MMU_PHYS_IDX;
1541 } else {
1542 mem_idx = MMU_NUCLEUS_IDX;
1543 }
1544 break;
1545 case ASI_AIUP: /* As if user primary */
1546 case ASI_AIUPL: /* As if user primary LE */
1547 case ASI_TWINX_AIUP:
1548 case ASI_TWINX_AIUP_L:
1549 case ASI_BLK_AIUP_4V:
1550 case ASI_BLK_AIUP_L_4V:
1551 case ASI_BLK_AIUP:
1552 case ASI_BLK_AIUPL:
1553 mem_idx = MMU_USER_IDX;
1554 break;
1555 case ASI_AIUS: /* As if user secondary */
1556 case ASI_AIUSL: /* As if user secondary LE */
1557 case ASI_TWINX_AIUS:
1558 case ASI_TWINX_AIUS_L:
1559 case ASI_BLK_AIUS_4V:
1560 case ASI_BLK_AIUS_L_4V:
1561 case ASI_BLK_AIUS:
1562 case ASI_BLK_AIUSL:
1563 mem_idx = MMU_USER_SECONDARY_IDX;
1564 break;
1565 case ASI_S: /* Secondary */
1566 case ASI_SL: /* Secondary LE */
1567 case ASI_TWINX_S:
1568 case ASI_TWINX_SL:
1569 case ASI_BLK_COMMIT_S:
1570 case ASI_BLK_S:
1571 case ASI_BLK_SL:
1572 case ASI_FL8_S:
1573 case ASI_FL8_SL:
1574 case ASI_FL16_S:
1575 case ASI_FL16_SL:
1576 if (mem_idx == MMU_USER_IDX) {
1577 mem_idx = MMU_USER_SECONDARY_IDX;
1578 } else if (mem_idx == MMU_KERNEL_IDX) {
1579 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1580 }
1581 break;
1582 case ASI_P: /* Primary */
1583 case ASI_PL: /* Primary LE */
1584 case ASI_TWINX_P:
1585 case ASI_TWINX_PL:
1586 case ASI_BLK_COMMIT_P:
1587 case ASI_BLK_P:
1588 case ASI_BLK_PL:
1589 case ASI_FL8_P:
1590 case ASI_FL8_PL:
1591 case ASI_FL16_P:
1592 case ASI_FL16_PL:
1593 break;
1594 }
1595 switch (asi) {
1596 case ASI_REAL:
1597 case ASI_REAL_IO:
1598 case ASI_REAL_L:
1599 case ASI_REAL_IO_L:
1600 case ASI_N:
1601 case ASI_NL:
1602 case ASI_AIUP:
1603 case ASI_AIUPL:
1604 case ASI_AIUS:
1605 case ASI_AIUSL:
1606 case ASI_S:
1607 case ASI_SL:
1608 case ASI_P:
1609 case ASI_PL:
1610 type = GET_ASI_DIRECT;
1611 break;
1612 case ASI_TWINX_REAL:
1613 case ASI_TWINX_REAL_L:
1614 case ASI_TWINX_N:
1615 case ASI_TWINX_NL:
1616 case ASI_TWINX_AIUP:
1617 case ASI_TWINX_AIUP_L:
1618 case ASI_TWINX_AIUS:
1619 case ASI_TWINX_AIUS_L:
1620 case ASI_TWINX_P:
1621 case ASI_TWINX_PL:
1622 case ASI_TWINX_S:
1623 case ASI_TWINX_SL:
1624 case ASI_QUAD_LDD_PHYS:
1625 case ASI_QUAD_LDD_PHYS_L:
1626 case ASI_NUCLEUS_QUAD_LDD:
1627 case ASI_NUCLEUS_QUAD_LDD_L:
1628 type = GET_ASI_DTWINX;
1629 break;
1630 case ASI_BLK_COMMIT_P:
1631 case ASI_BLK_COMMIT_S:
1632 case ASI_BLK_AIUP_4V:
1633 case ASI_BLK_AIUP_L_4V:
1634 case ASI_BLK_AIUP:
1635 case ASI_BLK_AIUPL:
1636 case ASI_BLK_AIUS_4V:
1637 case ASI_BLK_AIUS_L_4V:
1638 case ASI_BLK_AIUS:
1639 case ASI_BLK_AIUSL:
1640 case ASI_BLK_S:
1641 case ASI_BLK_SL:
1642 case ASI_BLK_P:
1643 case ASI_BLK_PL:
1644 type = GET_ASI_BLOCK;
1645 break;
1646 case ASI_FL8_S:
1647 case ASI_FL8_SL:
1648 case ASI_FL8_P:
1649 case ASI_FL8_PL:
1650 memop = MO_UB;
1651 type = GET_ASI_SHORT;
1652 break;
1653 case ASI_FL16_S:
1654 case ASI_FL16_SL:
1655 case ASI_FL16_P:
1656 case ASI_FL16_PL:
1657 memop = MO_TEUW;
1658 type = GET_ASI_SHORT;
1659 break;
1660 }
1661 /* The little-endian asis all have bit 3 set. */
1662 if (asi & 8) {
1663 memop ^= MO_BSWAP;
1664 }
1665 }
1666 #endif
1667
1668 done:
1669 return (DisasASI){ type, asi, mem_idx, memop };
1670 }
1671
1672 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1673 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1674 TCGv_i32 asi, TCGv_i32 mop)
1675 {
1676 g_assert_not_reached();
1677 }
1678
1679 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1680 TCGv_i32 asi, TCGv_i32 mop)
1681 {
1682 g_assert_not_reached();
1683 }
1684 #endif
1685
1686 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1687 {
1688 switch (da->type) {
1689 case GET_ASI_EXCP:
1690 break;
1691 case GET_ASI_DTWINX: /* Reserved for ldda. */
1692 gen_exception(dc, TT_ILL_INSN);
1693 break;
1694 case GET_ASI_DIRECT:
1695 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1696 break;
1697 default:
1698 {
1699 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1700 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1701
1702 save_state(dc);
1703 #ifdef TARGET_SPARC64
1704 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1705 #else
1706 {
1707 TCGv_i64 t64 = tcg_temp_new_i64();
1708 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1709 tcg_gen_trunc_i64_tl(dst, t64);
1710 }
1711 #endif
1712 }
1713 break;
1714 }
1715 }
1716
1717 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1718 {
1719 switch (da->type) {
1720 case GET_ASI_EXCP:
1721 break;
1722
1723 case GET_ASI_DTWINX: /* Reserved for stda. */
1724 if (TARGET_LONG_BITS == 32) {
1725 gen_exception(dc, TT_ILL_INSN);
1726 break;
1727 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1728 /* Pre OpenSPARC CPUs don't have these */
1729 gen_exception(dc, TT_ILL_INSN);
1730 break;
1731 }
1732 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1733 /* fall through */
1734
1735 case GET_ASI_DIRECT:
1736 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1737 break;
1738
1739 case GET_ASI_BCOPY:
1740 assert(TARGET_LONG_BITS == 32);
1741 /*
1742 * Copy 32 bytes from the address in SRC to ADDR.
1743 *
1744 * From Ross RT625 hyperSPARC manual, section 4.6:
1745 * "Block Copy and Block Fill will work only on cache line boundaries."
1746 *
1747 * It does not specify if an unaliged address is truncated or trapped.
1748 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1749 * is obviously wrong. The only place I can see this used is in the
1750 * Linux kernel which begins with page alignment, advancing by 32,
1751 * so is always aligned. Assume truncation as the simpler option.
1752 *
1753 * Since the loads and stores are paired, allow the copy to happen
1754 * in the host endianness. The copy need not be atomic.
1755 */
1756 {
1757 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1758 TCGv saddr = tcg_temp_new();
1759 TCGv daddr = tcg_temp_new();
1760 TCGv_i128 tmp = tcg_temp_new_i128();
1761
1762 tcg_gen_andi_tl(saddr, src, -32);
1763 tcg_gen_andi_tl(daddr, addr, -32);
1764 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1765 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1766 tcg_gen_addi_tl(saddr, saddr, 16);
1767 tcg_gen_addi_tl(daddr, daddr, 16);
1768 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1769 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1770 }
1771 break;
1772
1773 default:
1774 {
1775 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1776 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1777
1778 save_state(dc);
1779 #ifdef TARGET_SPARC64
1780 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1781 #else
1782 {
1783 TCGv_i64 t64 = tcg_temp_new_i64();
1784 tcg_gen_extu_tl_i64(t64, src);
1785 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1786 }
1787 #endif
1788
1789 /* A write to a TLB register may alter page maps. End the TB. */
1790 dc->npc = DYNAMIC_PC;
1791 }
1792 break;
1793 }
1794 }
1795
1796 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1797 TCGv dst, TCGv src, TCGv addr)
1798 {
1799 switch (da->type) {
1800 case GET_ASI_EXCP:
1801 break;
1802 case GET_ASI_DIRECT:
1803 tcg_gen_atomic_xchg_tl(dst, addr, src,
1804 da->mem_idx, da->memop | MO_ALIGN);
1805 break;
1806 default:
1807 /* ??? Should be DAE_invalid_asi. */
1808 gen_exception(dc, TT_DATA_ACCESS);
1809 break;
1810 }
1811 }
1812
1813 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1814 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1815 {
1816 switch (da->type) {
1817 case GET_ASI_EXCP:
1818 return;
1819 case GET_ASI_DIRECT:
1820 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1821 da->mem_idx, da->memop | MO_ALIGN);
1822 break;
1823 default:
1824 /* ??? Should be DAE_invalid_asi. */
1825 gen_exception(dc, TT_DATA_ACCESS);
1826 break;
1827 }
1828 }
1829
1830 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1831 {
1832 switch (da->type) {
1833 case GET_ASI_EXCP:
1834 break;
1835 case GET_ASI_DIRECT:
1836 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1837 da->mem_idx, MO_UB);
1838 break;
1839 default:
1840 /* ??? In theory, this should be raise DAE_invalid_asi.
1841 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1842 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1843 gen_helper_exit_atomic(tcg_env);
1844 } else {
1845 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1846 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1847 TCGv_i64 s64, t64;
1848
1849 save_state(dc);
1850 t64 = tcg_temp_new_i64();
1851 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1852
1853 s64 = tcg_constant_i64(0xff);
1854 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1855
1856 tcg_gen_trunc_i64_tl(dst, t64);
1857
1858 /* End the TB. */
1859 dc->npc = DYNAMIC_PC;
1860 }
1861 break;
1862 }
1863 }
1864
1865 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1866 TCGv addr, int rd)
1867 {
1868 MemOp memop = da->memop;
1869 MemOp size = memop & MO_SIZE;
1870 TCGv_i32 d32;
1871 TCGv_i64 d64;
1872 TCGv addr_tmp;
1873
1874 /* TODO: Use 128-bit load/store below. */
1875 if (size == MO_128) {
1876 memop = (memop & ~MO_SIZE) | MO_64;
1877 }
1878
1879 switch (da->type) {
1880 case GET_ASI_EXCP:
1881 break;
1882
1883 case GET_ASI_DIRECT:
1884 memop |= MO_ALIGN_4;
1885 switch (size) {
1886 case MO_32:
1887 d32 = tcg_temp_new_i32();
1888 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1889 gen_store_fpr_F(dc, rd, d32);
1890 break;
1891
1892 case MO_64:
1893 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1894 break;
1895
1896 case MO_128:
1897 d64 = tcg_temp_new_i64();
1898 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1899 addr_tmp = tcg_temp_new();
1900 tcg_gen_addi_tl(addr_tmp, addr, 8);
1901 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1902 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1903 break;
1904 default:
1905 g_assert_not_reached();
1906 }
1907 break;
1908
1909 case GET_ASI_BLOCK:
1910 /* Valid for lddfa on aligned registers only. */
1911 if (orig_size == MO_64 && (rd & 7) == 0) {
1912 /* The first operation checks required alignment. */
1913 addr_tmp = tcg_temp_new();
1914 for (int i = 0; ; ++i) {
1915 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1916 memop | (i == 0 ? MO_ALIGN_64 : 0));
1917 if (i == 7) {
1918 break;
1919 }
1920 tcg_gen_addi_tl(addr_tmp, addr, 8);
1921 addr = addr_tmp;
1922 }
1923 } else {
1924 gen_exception(dc, TT_ILL_INSN);
1925 }
1926 break;
1927
1928 case GET_ASI_SHORT:
1929 /* Valid for lddfa only. */
1930 if (orig_size == MO_64) {
1931 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1932 memop | MO_ALIGN);
1933 } else {
1934 gen_exception(dc, TT_ILL_INSN);
1935 }
1936 break;
1937
1938 default:
1939 {
1940 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1941 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1942
1943 save_state(dc);
1944 /* According to the table in the UA2011 manual, the only
1945 other asis that are valid for ldfa/lddfa/ldqfa are
1946 the NO_FAULT asis. We still need a helper for these,
1947 but we can just use the integer asi helper for them. */
1948 switch (size) {
1949 case MO_32:
1950 d64 = tcg_temp_new_i64();
1951 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1952 d32 = tcg_temp_new_i32();
1953 tcg_gen_extrl_i64_i32(d32, d64);
1954 gen_store_fpr_F(dc, rd, d32);
1955 break;
1956 case MO_64:
1957 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1958 r_asi, r_mop);
1959 break;
1960 case MO_128:
1961 d64 = tcg_temp_new_i64();
1962 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1963 addr_tmp = tcg_temp_new();
1964 tcg_gen_addi_tl(addr_tmp, addr, 8);
1965 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1966 r_asi, r_mop);
1967 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1968 break;
1969 default:
1970 g_assert_not_reached();
1971 }
1972 }
1973 break;
1974 }
1975 }
1976
1977 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1978 TCGv addr, int rd)
1979 {
1980 MemOp memop = da->memop;
1981 MemOp size = memop & MO_SIZE;
1982 TCGv_i32 d32;
1983 TCGv addr_tmp;
1984
1985 /* TODO: Use 128-bit load/store below. */
1986 if (size == MO_128) {
1987 memop = (memop & ~MO_SIZE) | MO_64;
1988 }
1989
1990 switch (da->type) {
1991 case GET_ASI_EXCP:
1992 break;
1993
1994 case GET_ASI_DIRECT:
1995 memop |= MO_ALIGN_4;
1996 switch (size) {
1997 case MO_32:
1998 d32 = gen_load_fpr_F(dc, rd);
1999 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2000 break;
2001 case MO_64:
2002 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2003 memop | MO_ALIGN_4);
2004 break;
2005 case MO_128:
2006 /* Only 4-byte alignment required. However, it is legal for the
2007 cpu to signal the alignment fault, and the OS trap handler is
2008 required to fix it up. Requiring 16-byte alignment here avoids
2009 having to probe the second page before performing the first
2010 write. */
2011 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2012 memop | MO_ALIGN_16);
2013 addr_tmp = tcg_temp_new();
2014 tcg_gen_addi_tl(addr_tmp, addr, 8);
2015 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2016 break;
2017 default:
2018 g_assert_not_reached();
2019 }
2020 break;
2021
2022 case GET_ASI_BLOCK:
2023 /* Valid for stdfa on aligned registers only. */
2024 if (orig_size == MO_64 && (rd & 7) == 0) {
2025 /* The first operation checks required alignment. */
2026 addr_tmp = tcg_temp_new();
2027 for (int i = 0; ; ++i) {
2028 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2029 memop | (i == 0 ? MO_ALIGN_64 : 0));
2030 if (i == 7) {
2031 break;
2032 }
2033 tcg_gen_addi_tl(addr_tmp, addr, 8);
2034 addr = addr_tmp;
2035 }
2036 } else {
2037 gen_exception(dc, TT_ILL_INSN);
2038 }
2039 break;
2040
2041 case GET_ASI_SHORT:
2042 /* Valid for stdfa only. */
2043 if (orig_size == MO_64) {
2044 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2045 memop | MO_ALIGN);
2046 } else {
2047 gen_exception(dc, TT_ILL_INSN);
2048 }
2049 break;
2050
2051 default:
2052 /* According to the table in the UA2011 manual, the only
2053 other asis that are valid for ldfa/lddfa/ldqfa are
2054 the PST* asis, which aren't currently handled. */
2055 gen_exception(dc, TT_ILL_INSN);
2056 break;
2057 }
2058 }
2059
2060 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2061 {
2062 TCGv hi = gen_dest_gpr(dc, rd);
2063 TCGv lo = gen_dest_gpr(dc, rd + 1);
2064
2065 switch (da->type) {
2066 case GET_ASI_EXCP:
2067 return;
2068
2069 case GET_ASI_DTWINX:
2070 #ifdef TARGET_SPARC64
2071 {
2072 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2073 TCGv_i128 t = tcg_temp_new_i128();
2074
2075 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2076 /*
2077 * Note that LE twinx acts as if each 64-bit register result is
2078 * byte swapped. We perform one 128-bit LE load, so must swap
2079 * the order of the writebacks.
2080 */
2081 if ((mop & MO_BSWAP) == MO_TE) {
2082 tcg_gen_extr_i128_i64(lo, hi, t);
2083 } else {
2084 tcg_gen_extr_i128_i64(hi, lo, t);
2085 }
2086 }
2087 break;
2088 #else
2089 g_assert_not_reached();
2090 #endif
2091
2092 case GET_ASI_DIRECT:
2093 {
2094 TCGv_i64 tmp = tcg_temp_new_i64();
2095
2096 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2097
2098 /* Note that LE ldda acts as if each 32-bit register
2099 result is byte swapped. Having just performed one
2100 64-bit bswap, we need now to swap the writebacks. */
2101 if ((da->memop & MO_BSWAP) == MO_TE) {
2102 tcg_gen_extr_i64_tl(lo, hi, tmp);
2103 } else {
2104 tcg_gen_extr_i64_tl(hi, lo, tmp);
2105 }
2106 }
2107 break;
2108
2109 default:
2110 /* ??? In theory we've handled all of the ASIs that are valid
2111 for ldda, and this should raise DAE_invalid_asi. However,
2112 real hardware allows others. This can be seen with e.g.
2113 FreeBSD 10.3 wrt ASI_IC_TAG. */
2114 {
2115 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2116 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2117 TCGv_i64 tmp = tcg_temp_new_i64();
2118
2119 save_state(dc);
2120 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2121
2122 /* See above. */
2123 if ((da->memop & MO_BSWAP) == MO_TE) {
2124 tcg_gen_extr_i64_tl(lo, hi, tmp);
2125 } else {
2126 tcg_gen_extr_i64_tl(hi, lo, tmp);
2127 }
2128 }
2129 break;
2130 }
2131
2132 gen_store_gpr(dc, rd, hi);
2133 gen_store_gpr(dc, rd + 1, lo);
2134 }
2135
2136 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2137 {
2138 TCGv hi = gen_load_gpr(dc, rd);
2139 TCGv lo = gen_load_gpr(dc, rd + 1);
2140
2141 switch (da->type) {
2142 case GET_ASI_EXCP:
2143 break;
2144
2145 case GET_ASI_DTWINX:
2146 #ifdef TARGET_SPARC64
2147 {
2148 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2149 TCGv_i128 t = tcg_temp_new_i128();
2150
2151 /*
2152 * Note that LE twinx acts as if each 64-bit register result is
2153 * byte swapped. We perform one 128-bit LE store, so must swap
2154 * the order of the construction.
2155 */
2156 if ((mop & MO_BSWAP) == MO_TE) {
2157 tcg_gen_concat_i64_i128(t, lo, hi);
2158 } else {
2159 tcg_gen_concat_i64_i128(t, hi, lo);
2160 }
2161 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2162 }
2163 break;
2164 #else
2165 g_assert_not_reached();
2166 #endif
2167
2168 case GET_ASI_DIRECT:
2169 {
2170 TCGv_i64 t64 = tcg_temp_new_i64();
2171
2172 /* Note that LE stda acts as if each 32-bit register result is
2173 byte swapped. We will perform one 64-bit LE store, so now
2174 we must swap the order of the construction. */
2175 if ((da->memop & MO_BSWAP) == MO_TE) {
2176 tcg_gen_concat_tl_i64(t64, lo, hi);
2177 } else {
2178 tcg_gen_concat_tl_i64(t64, hi, lo);
2179 }
2180 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2181 }
2182 break;
2183
2184 case GET_ASI_BFILL:
2185 assert(TARGET_LONG_BITS == 32);
2186 /*
2187 * Store 32 bytes of [rd:rd+1] to ADDR.
2188 * See comments for GET_ASI_COPY above.
2189 */
2190 {
2191 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2192 TCGv_i64 t8 = tcg_temp_new_i64();
2193 TCGv_i128 t16 = tcg_temp_new_i128();
2194 TCGv daddr = tcg_temp_new();
2195
2196 tcg_gen_concat_tl_i64(t8, lo, hi);
2197 tcg_gen_concat_i64_i128(t16, t8, t8);
2198 tcg_gen_andi_tl(daddr, addr, -32);
2199 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2200 tcg_gen_addi_tl(daddr, daddr, 16);
2201 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2202 }
2203 break;
2204
2205 default:
2206 /* ??? In theory we've handled all of the ASIs that are valid
2207 for stda, and this should raise DAE_invalid_asi. */
2208 {
2209 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2210 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2211 TCGv_i64 t64 = tcg_temp_new_i64();
2212
2213 /* See above. */
2214 if ((da->memop & MO_BSWAP) == MO_TE) {
2215 tcg_gen_concat_tl_i64(t64, lo, hi);
2216 } else {
2217 tcg_gen_concat_tl_i64(t64, hi, lo);
2218 }
2219
2220 save_state(dc);
2221 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2222 }
2223 break;
2224 }
2225 }
2226
2227 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2228 {
2229 #ifdef TARGET_SPARC64
2230 TCGv_i32 c32, zero, dst, s1, s2;
2231 TCGv_i64 c64 = tcg_temp_new_i64();
2232
2233 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2234 or fold the comparison down to 32 bits and use movcond_i32. Choose
2235 the later. */
2236 c32 = tcg_temp_new_i32();
2237 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2238 tcg_gen_extrl_i64_i32(c32, c64);
2239
2240 s1 = gen_load_fpr_F(dc, rs);
2241 s2 = gen_load_fpr_F(dc, rd);
2242 dst = tcg_temp_new_i32();
2243 zero = tcg_constant_i32(0);
2244
2245 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2246
2247 gen_store_fpr_F(dc, rd, dst);
2248 #else
2249 qemu_build_not_reached();
2250 #endif
2251 }
2252
2253 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2254 {
2255 #ifdef TARGET_SPARC64
2256 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2257 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2258 gen_load_fpr_D(dc, rs),
2259 gen_load_fpr_D(dc, rd));
2260 gen_store_fpr_D(dc, rd, dst);
2261 #else
2262 qemu_build_not_reached();
2263 #endif
2264 }
2265
2266 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2267 {
2268 #ifdef TARGET_SPARC64
2269 int qd = QFPREG(rd);
2270 int qs = QFPREG(rs);
2271 TCGv c2 = tcg_constant_tl(cmp->c2);
2272
2273 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2274 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2275 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2276 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2277
2278 gen_update_fprs_dirty(dc, qd);
2279 #else
2280 qemu_build_not_reached();
2281 #endif
2282 }
2283
2284 #ifdef TARGET_SPARC64
2285 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2286 {
2287 TCGv_i32 r_tl = tcg_temp_new_i32();
2288
2289 /* load env->tl into r_tl */
2290 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2291
2292 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2293 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2294
2295 /* calculate offset to current trap state from env->ts, reuse r_tl */
2296 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2297 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2298
2299 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2300 {
2301 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2302 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2303 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2304 }
2305 }
2306 #endif
2307
2308 static int extract_dfpreg(DisasContext *dc, int x)
2309 {
2310 return DFPREG(x);
2311 }
2312
2313 static int extract_qfpreg(DisasContext *dc, int x)
2314 {
2315 return QFPREG(x);
2316 }
2317
2318 /* Include the auto-generated decoder. */
2319 #include "decode-insns.c.inc"
2320
2321 #define TRANS(NAME, AVAIL, FUNC, ...) \
2322 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2323 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2324
2325 #define avail_ALL(C) true
2326 #ifdef TARGET_SPARC64
2327 # define avail_32(C) false
2328 # define avail_ASR17(C) false
2329 # define avail_CASA(C) true
2330 # define avail_DIV(C) true
2331 # define avail_MUL(C) true
2332 # define avail_POWERDOWN(C) false
2333 # define avail_64(C) true
2334 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2335 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2336 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2337 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2338 #else
2339 # define avail_32(C) true
2340 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2341 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2342 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2343 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2344 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2345 # define avail_64(C) false
2346 # define avail_GL(C) false
2347 # define avail_HYPV(C) false
2348 # define avail_VIS1(C) false
2349 # define avail_VIS2(C) false
2350 #endif
2351
2352 /* Default case for non jump instructions. */
2353 static bool advance_pc(DisasContext *dc)
2354 {
2355 TCGLabel *l1;
2356
2357 finishing_insn(dc);
2358
2359 if (dc->npc & 3) {
2360 switch (dc->npc) {
2361 case DYNAMIC_PC:
2362 case DYNAMIC_PC_LOOKUP:
2363 dc->pc = dc->npc;
2364 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2365 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2366 break;
2367
2368 case JUMP_PC:
2369 /* we can do a static jump */
2370 l1 = gen_new_label();
2371 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2372
2373 /* jump not taken */
2374 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2375
2376 /* jump taken */
2377 gen_set_label(l1);
2378 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2379
2380 dc->base.is_jmp = DISAS_NORETURN;
2381 break;
2382
2383 default:
2384 g_assert_not_reached();
2385 }
2386 } else {
2387 dc->pc = dc->npc;
2388 dc->npc = dc->npc + 4;
2389 }
2390 return true;
2391 }
2392
2393 /*
2394 * Major opcodes 00 and 01 -- branches, call, and sethi
2395 */
2396
2397 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2398 bool annul, int disp)
2399 {
2400 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2401 target_ulong npc;
2402
2403 finishing_insn(dc);
2404
2405 if (cmp->cond == TCG_COND_ALWAYS) {
2406 if (annul) {
2407 dc->pc = dest;
2408 dc->npc = dest + 4;
2409 } else {
2410 gen_mov_pc_npc(dc);
2411 dc->npc = dest;
2412 }
2413 return true;
2414 }
2415
2416 if (cmp->cond == TCG_COND_NEVER) {
2417 npc = dc->npc;
2418 if (npc & 3) {
2419 gen_mov_pc_npc(dc);
2420 if (annul) {
2421 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2422 }
2423 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2424 } else {
2425 dc->pc = npc + (annul ? 4 : 0);
2426 dc->npc = dc->pc + 4;
2427 }
2428 return true;
2429 }
2430
2431 flush_cond(dc);
2432 npc = dc->npc;
2433
2434 if (annul) {
2435 TCGLabel *l1 = gen_new_label();
2436
2437 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2438 gen_goto_tb(dc, 0, npc, dest);
2439 gen_set_label(l1);
2440 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2441
2442 dc->base.is_jmp = DISAS_NORETURN;
2443 } else {
2444 if (npc & 3) {
2445 switch (npc) {
2446 case DYNAMIC_PC:
2447 case DYNAMIC_PC_LOOKUP:
2448 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2449 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2450 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2451 cmp->c1, tcg_constant_tl(cmp->c2),
2452 tcg_constant_tl(dest), cpu_npc);
2453 dc->pc = npc;
2454 break;
2455 default:
2456 g_assert_not_reached();
2457 }
2458 } else {
2459 dc->pc = npc;
2460 dc->npc = JUMP_PC;
2461 dc->jump = *cmp;
2462 dc->jump_pc[0] = dest;
2463 dc->jump_pc[1] = npc + 4;
2464
2465 /* The condition for cpu_cond is always NE -- normalize. */
2466 if (cmp->cond == TCG_COND_NE) {
2467 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2468 } else {
2469 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2470 }
2471 dc->cpu_cond_live = true;
2472 }
2473 }
2474 return true;
2475 }
2476
2477 static bool raise_priv(DisasContext *dc)
2478 {
2479 gen_exception(dc, TT_PRIV_INSN);
2480 return true;
2481 }
2482
2483 static bool raise_unimpfpop(DisasContext *dc)
2484 {
2485 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2486 return true;
2487 }
2488
2489 static bool gen_trap_float128(DisasContext *dc)
2490 {
2491 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2492 return false;
2493 }
2494 return raise_unimpfpop(dc);
2495 }
2496
2497 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2498 {
2499 DisasCompare cmp;
2500
2501 gen_compare(&cmp, a->cc, a->cond, dc);
2502 return advance_jump_cond(dc, &cmp, a->a, a->i);
2503 }
2504
2505 TRANS(Bicc, ALL, do_bpcc, a)
2506 TRANS(BPcc, 64, do_bpcc, a)
2507
2508 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2509 {
2510 DisasCompare cmp;
2511
2512 if (gen_trap_ifnofpu(dc)) {
2513 return true;
2514 }
2515 gen_fcompare(&cmp, a->cc, a->cond);
2516 return advance_jump_cond(dc, &cmp, a->a, a->i);
2517 }
2518
2519 TRANS(FBPfcc, 64, do_fbpfcc, a)
2520 TRANS(FBfcc, ALL, do_fbpfcc, a)
2521
2522 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2523 {
2524 DisasCompare cmp;
2525
2526 if (!avail_64(dc)) {
2527 return false;
2528 }
2529 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2530 return false;
2531 }
2532 return advance_jump_cond(dc, &cmp, a->a, a->i);
2533 }
2534
2535 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2536 {
2537 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2538
2539 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2540 gen_mov_pc_npc(dc);
2541 dc->npc = target;
2542 return true;
2543 }
2544
2545 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2546 {
2547 /*
2548 * For sparc32, always generate the no-coprocessor exception.
2549 * For sparc64, always generate illegal instruction.
2550 */
2551 #ifdef TARGET_SPARC64
2552 return false;
2553 #else
2554 gen_exception(dc, TT_NCP_INSN);
2555 return true;
2556 #endif
2557 }
2558
2559 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2560 {
2561 /* Special-case %g0 because that's the canonical nop. */
2562 if (a->rd) {
2563 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2564 }
2565 return advance_pc(dc);
2566 }
2567
2568 /*
2569 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2570 */
2571
2572 static bool do_tcc(DisasContext *dc, int cond, int cc,
2573 int rs1, bool imm, int rs2_or_imm)
2574 {
2575 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2576 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2577 DisasCompare cmp;
2578 TCGLabel *lab;
2579 TCGv_i32 trap;
2580
2581 /* Trap never. */
2582 if (cond == 0) {
2583 return advance_pc(dc);
2584 }
2585
2586 /*
2587 * Immediate traps are the most common case. Since this value is
2588 * live across the branch, it really pays to evaluate the constant.
2589 */
2590 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2591 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2592 } else {
2593 trap = tcg_temp_new_i32();
2594 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2595 if (imm) {
2596 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2597 } else {
2598 TCGv_i32 t2 = tcg_temp_new_i32();
2599 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2600 tcg_gen_add_i32(trap, trap, t2);
2601 }
2602 tcg_gen_andi_i32(trap, trap, mask);
2603 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2604 }
2605
2606 finishing_insn(dc);
2607
2608 /* Trap always. */
2609 if (cond == 8) {
2610 save_state(dc);
2611 gen_helper_raise_exception(tcg_env, trap);
2612 dc->base.is_jmp = DISAS_NORETURN;
2613 return true;
2614 }
2615
2616 /* Conditional trap. */
2617 flush_cond(dc);
2618 lab = delay_exceptionv(dc, trap);
2619 gen_compare(&cmp, cc, cond, dc);
2620 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2621
2622 return advance_pc(dc);
2623 }
2624
2625 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2626 {
2627 if (avail_32(dc) && a->cc) {
2628 return false;
2629 }
2630 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2631 }
2632
2633 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2634 {
2635 if (avail_64(dc)) {
2636 return false;
2637 }
2638 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2639 }
2640
2641 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2642 {
2643 if (avail_32(dc)) {
2644 return false;
2645 }
2646 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2647 }
2648
2649 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2650 {
2651 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2652 return advance_pc(dc);
2653 }
2654
2655 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2656 {
2657 if (avail_32(dc)) {
2658 return false;
2659 }
2660 if (a->mmask) {
2661 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2662 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2663 }
2664 if (a->cmask) {
2665 /* For #Sync, etc, end the TB to recognize interrupts. */
2666 dc->base.is_jmp = DISAS_EXIT;
2667 }
2668 return advance_pc(dc);
2669 }
2670
2671 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2672 TCGv (*func)(DisasContext *, TCGv))
2673 {
2674 if (!priv) {
2675 return raise_priv(dc);
2676 }
2677 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2678 return advance_pc(dc);
2679 }
2680
2681 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2682 {
2683 return cpu_y;
2684 }
2685
2686 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2687 {
2688 /*
2689 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2690 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2691 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2692 */
2693 if (avail_64(dc) && a->rs1 != 0) {
2694 return false;
2695 }
2696 return do_rd_special(dc, true, a->rd, do_rdy);
2697 }
2698
2699 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2700 {
2701 uint32_t val;
2702
2703 /*
2704 * TODO: There are many more fields to be filled,
2705 * some of which are writable.
2706 */
2707 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2708 val |= 1 << 8; /* [8] V8 */
2709
2710 return tcg_constant_tl(val);
2711 }
2712
2713 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2714
2715 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2716 {
2717 gen_helper_rdccr(dst, tcg_env);
2718 return dst;
2719 }
2720
2721 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2722
2723 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2724 {
2725 #ifdef TARGET_SPARC64
2726 return tcg_constant_tl(dc->asi);
2727 #else
2728 qemu_build_not_reached();
2729 #endif
2730 }
2731
2732 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2733
2734 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2735 {
2736 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2737
2738 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2739 if (translator_io_start(&dc->base)) {
2740 dc->base.is_jmp = DISAS_EXIT;
2741 }
2742 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2743 tcg_constant_i32(dc->mem_idx));
2744 return dst;
2745 }
2746
2747 /* TODO: non-priv access only allowed when enabled. */
2748 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2749
2750 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2751 {
2752 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2753 }
2754
2755 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2756
2757 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2758 {
2759 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2760 return dst;
2761 }
2762
2763 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2764
2765 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2766 {
2767 gen_trap_ifnofpu(dc);
2768 return cpu_gsr;
2769 }
2770
2771 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2772
2773 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2774 {
2775 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2776 return dst;
2777 }
2778
2779 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2780
2781 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2782 {
2783 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2784 return dst;
2785 }
2786
2787 /* TODO: non-priv access only allowed when enabled. */
2788 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2789
2790 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2791 {
2792 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2793
2794 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2795 if (translator_io_start(&dc->base)) {
2796 dc->base.is_jmp = DISAS_EXIT;
2797 }
2798 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2799 tcg_constant_i32(dc->mem_idx));
2800 return dst;
2801 }
2802
2803 /* TODO: non-priv access only allowed when enabled. */
2804 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2805
2806 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2807 {
2808 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2809 return dst;
2810 }
2811
2812 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2813 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2814
2815 /*
2816 * UltraSPARC-T1 Strand status.
2817 * HYPV check maybe not enough, UA2005 & UA2007 describe
2818 * this ASR as impl. dep
2819 */
2820 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2821 {
2822 return tcg_constant_tl(1);
2823 }
2824
2825 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2826
2827 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2828 {
2829 gen_helper_rdpsr(dst, tcg_env);
2830 return dst;
2831 }
2832
2833 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2834
2835 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2836 {
2837 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2838 return dst;
2839 }
2840
2841 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2842
2843 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2844 {
2845 TCGv_i32 tl = tcg_temp_new_i32();
2846 TCGv_ptr tp = tcg_temp_new_ptr();
2847
2848 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2849 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2850 tcg_gen_shli_i32(tl, tl, 3);
2851 tcg_gen_ext_i32_ptr(tp, tl);
2852 tcg_gen_add_ptr(tp, tp, tcg_env);
2853
2854 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2855 return dst;
2856 }
2857
2858 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2859
2860 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2861 {
2862 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2863 return dst;
2864 }
2865
2866 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2867
2868 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2869 {
2870 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2871 return dst;
2872 }
2873
2874 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2875
2876 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2877 {
2878 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2879 return dst;
2880 }
2881
2882 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2883
2884 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2885 {
2886 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2887 return dst;
2888 }
2889
2890 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2891 do_rdhstick_cmpr)
2892
2893 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2894 {
2895 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2896 return dst;
2897 }
2898
2899 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2900
2901 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2902 {
2903 #ifdef TARGET_SPARC64
2904 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2905
2906 gen_load_trap_state_at_tl(r_tsptr);
2907 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2908 return dst;
2909 #else
2910 qemu_build_not_reached();
2911 #endif
2912 }
2913
2914 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2915
2916 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2917 {
2918 #ifdef TARGET_SPARC64
2919 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2920
2921 gen_load_trap_state_at_tl(r_tsptr);
2922 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2923 return dst;
2924 #else
2925 qemu_build_not_reached();
2926 #endif
2927 }
2928
2929 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2930
2931 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2932 {
2933 #ifdef TARGET_SPARC64
2934 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2935
2936 gen_load_trap_state_at_tl(r_tsptr);
2937 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2938 return dst;
2939 #else
2940 qemu_build_not_reached();
2941 #endif
2942 }
2943
2944 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2945
2946 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2947 {
2948 #ifdef TARGET_SPARC64
2949 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2950
2951 gen_load_trap_state_at_tl(r_tsptr);
2952 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2953 return dst;
2954 #else
2955 qemu_build_not_reached();
2956 #endif
2957 }
2958
2959 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2960 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2961
2962 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2963 {
2964 return cpu_tbr;
2965 }
2966
2967 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2968 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2969
2970 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2971 {
2972 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2973 return dst;
2974 }
2975
2976 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2977
2978 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2979 {
2980 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2981 return dst;
2982 }
2983
2984 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2985
2986 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2987 {
2988 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2989 return dst;
2990 }
2991
2992 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2993
2994 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2995 {
2996 gen_helper_rdcwp(dst, tcg_env);
2997 return dst;
2998 }
2999
3000 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3001
3002 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3003 {
3004 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3005 return dst;
3006 }
3007
3008 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3009
3010 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3011 {
3012 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3013 return dst;
3014 }
3015
3016 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3017 do_rdcanrestore)
3018
3019 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3020 {
3021 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3022 return dst;
3023 }
3024
3025 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3026
3027 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3028 {
3029 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3030 return dst;
3031 }
3032
3033 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3034
3035 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3036 {
3037 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3038 return dst;
3039 }
3040
3041 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3042
3043 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3044 {
3045 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3046 return dst;
3047 }
3048
3049 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3050
3051 /* UA2005 strand status */
3052 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3053 {
3054 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3055 return dst;
3056 }
3057
3058 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3059
3060 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3061 {
3062 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3063 return dst;
3064 }
3065
3066 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3067
3068 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3069 {
3070 if (avail_64(dc)) {
3071 gen_helper_flushw(tcg_env);
3072 return advance_pc(dc);
3073 }
3074 return false;
3075 }
3076
3077 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3078 void (*func)(DisasContext *, TCGv))
3079 {
3080 TCGv src;
3081
3082 /* For simplicity, we under-decoded the rs2 form. */
3083 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3084 return false;
3085 }
3086 if (!priv) {
3087 return raise_priv(dc);
3088 }
3089
3090 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3091 src = tcg_constant_tl(a->rs2_or_imm);
3092 } else {
3093 TCGv src1 = gen_load_gpr(dc, a->rs1);
3094 if (a->rs2_or_imm == 0) {
3095 src = src1;
3096 } else {
3097 src = tcg_temp_new();
3098 if (a->imm) {
3099 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3100 } else {
3101 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3102 }
3103 }
3104 }
3105 func(dc, src);
3106 return advance_pc(dc);
3107 }
3108
3109 static void do_wry(DisasContext *dc, TCGv src)
3110 {
3111 tcg_gen_ext32u_tl(cpu_y, src);
3112 }
3113
3114 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3115
3116 static void do_wrccr(DisasContext *dc, TCGv src)
3117 {
3118 gen_helper_wrccr(tcg_env, src);
3119 }
3120
3121 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3122
3123 static void do_wrasi(DisasContext *dc, TCGv src)
3124 {
3125 TCGv tmp = tcg_temp_new();
3126
3127 tcg_gen_ext8u_tl(tmp, src);
3128 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3129 /* End TB to notice changed ASI. */
3130 dc->base.is_jmp = DISAS_EXIT;
3131 }
3132
3133 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3134
3135 static void do_wrfprs(DisasContext *dc, TCGv src)
3136 {
3137 #ifdef TARGET_SPARC64
3138 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3139 dc->fprs_dirty = 0;
3140 dc->base.is_jmp = DISAS_EXIT;
3141 #else
3142 qemu_build_not_reached();
3143 #endif
3144 }
3145
3146 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3147
3148 static void do_wrgsr(DisasContext *dc, TCGv src)
3149 {
3150 gen_trap_ifnofpu(dc);
3151 tcg_gen_mov_tl(cpu_gsr, src);
3152 }
3153
3154 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3155
3156 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3157 {
3158 gen_helper_set_softint(tcg_env, src);
3159 }
3160
3161 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3162
3163 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3164 {
3165 gen_helper_clear_softint(tcg_env, src);
3166 }
3167
3168 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3169
3170 static void do_wrsoftint(DisasContext *dc, TCGv src)
3171 {
3172 gen_helper_write_softint(tcg_env, src);
3173 }
3174
3175 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3176
3177 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3178 {
3179 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3180
3181 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3182 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3183 translator_io_start(&dc->base);
3184 gen_helper_tick_set_limit(r_tickptr, src);
3185 /* End TB to handle timer interrupt */
3186 dc->base.is_jmp = DISAS_EXIT;
3187 }
3188
3189 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3190
3191 static void do_wrstick(DisasContext *dc, TCGv src)
3192 {
3193 #ifdef TARGET_SPARC64
3194 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3195
3196 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3197 translator_io_start(&dc->base);
3198 gen_helper_tick_set_count(r_tickptr, src);
3199 /* End TB to handle timer interrupt */
3200 dc->base.is_jmp = DISAS_EXIT;
3201 #else
3202 qemu_build_not_reached();
3203 #endif
3204 }
3205
3206 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3207
3208 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3209 {
3210 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3211
3212 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3213 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3214 translator_io_start(&dc->base);
3215 gen_helper_tick_set_limit(r_tickptr, src);
3216 /* End TB to handle timer interrupt */
3217 dc->base.is_jmp = DISAS_EXIT;
3218 }
3219
3220 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3221
3222 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3223 {
3224 finishing_insn(dc);
3225 save_state(dc);
3226 gen_helper_power_down(tcg_env);
3227 }
3228
3229 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3230
3231 static void do_wrpsr(DisasContext *dc, TCGv src)
3232 {
3233 gen_helper_wrpsr(tcg_env, src);
3234 dc->base.is_jmp = DISAS_EXIT;
3235 }
3236
3237 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3238
3239 static void do_wrwim(DisasContext *dc, TCGv src)
3240 {
3241 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3242 TCGv tmp = tcg_temp_new();
3243
3244 tcg_gen_andi_tl(tmp, src, mask);
3245 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3246 }
3247
3248 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3249
3250 static void do_wrtpc(DisasContext *dc, TCGv src)
3251 {
3252 #ifdef TARGET_SPARC64
3253 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3254
3255 gen_load_trap_state_at_tl(r_tsptr);
3256 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3257 #else
3258 qemu_build_not_reached();
3259 #endif
3260 }
3261
3262 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3263
3264 static void do_wrtnpc(DisasContext *dc, TCGv src)
3265 {
3266 #ifdef TARGET_SPARC64
3267 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3268
3269 gen_load_trap_state_at_tl(r_tsptr);
3270 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3271 #else
3272 qemu_build_not_reached();
3273 #endif
3274 }
3275
3276 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3277
3278 static void do_wrtstate(DisasContext *dc, TCGv src)
3279 {
3280 #ifdef TARGET_SPARC64
3281 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3282
3283 gen_load_trap_state_at_tl(r_tsptr);
3284 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3285 #else
3286 qemu_build_not_reached();
3287 #endif
3288 }
3289
3290 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3291
3292 static void do_wrtt(DisasContext *dc, TCGv src)
3293 {
3294 #ifdef TARGET_SPARC64
3295 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3296
3297 gen_load_trap_state_at_tl(r_tsptr);
3298 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3299 #else
3300 qemu_build_not_reached();
3301 #endif
3302 }
3303
3304 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3305
3306 static void do_wrtick(DisasContext *dc, TCGv src)
3307 {
3308 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3309
3310 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3311 translator_io_start(&dc->base);
3312 gen_helper_tick_set_count(r_tickptr, src);
3313 /* End TB to handle timer interrupt */
3314 dc->base.is_jmp = DISAS_EXIT;
3315 }
3316
3317 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3318
3319 static void do_wrtba(DisasContext *dc, TCGv src)
3320 {
3321 tcg_gen_mov_tl(cpu_tbr, src);
3322 }
3323
3324 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3325
3326 static void do_wrpstate(DisasContext *dc, TCGv src)
3327 {
3328 save_state(dc);
3329 if (translator_io_start(&dc->base)) {
3330 dc->base.is_jmp = DISAS_EXIT;
3331 }
3332 gen_helper_wrpstate(tcg_env, src);
3333 dc->npc = DYNAMIC_PC;
3334 }
3335
3336 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3337
3338 static void do_wrtl(DisasContext *dc, TCGv src)
3339 {
3340 save_state(dc);
3341 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3342 dc->npc = DYNAMIC_PC;
3343 }
3344
3345 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3346
3347 static void do_wrpil(DisasContext *dc, TCGv src)
3348 {
3349 if (translator_io_start(&dc->base)) {
3350 dc->base.is_jmp = DISAS_EXIT;
3351 }
3352 gen_helper_wrpil(tcg_env, src);
3353 }
3354
3355 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3356
3357 static void do_wrcwp(DisasContext *dc, TCGv src)
3358 {
3359 gen_helper_wrcwp(tcg_env, src);
3360 }
3361
3362 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3363
3364 static void do_wrcansave(DisasContext *dc, TCGv src)
3365 {
3366 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3367 }
3368
3369 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3370
3371 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3372 {
3373 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3374 }
3375
3376 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3377
3378 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3379 {
3380 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3381 }
3382
3383 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3384
3385 static void do_wrotherwin(DisasContext *dc, TCGv src)
3386 {
3387 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3388 }
3389
3390 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3391
3392 static void do_wrwstate(DisasContext *dc, TCGv src)
3393 {
3394 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3395 }
3396
3397 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3398
3399 static void do_wrgl(DisasContext *dc, TCGv src)
3400 {
3401 gen_helper_wrgl(tcg_env, src);
3402 }
3403
3404 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3405
3406 /* UA2005 strand status */
3407 static void do_wrssr(DisasContext *dc, TCGv src)
3408 {
3409 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3410 }
3411
3412 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3413
3414 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3415
3416 static void do_wrhpstate(DisasContext *dc, TCGv src)
3417 {
3418 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3419 dc->base.is_jmp = DISAS_EXIT;
3420 }
3421
3422 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3423
3424 static void do_wrhtstate(DisasContext *dc, TCGv src)
3425 {
3426 TCGv_i32 tl = tcg_temp_new_i32();
3427 TCGv_ptr tp = tcg_temp_new_ptr();
3428
3429 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3430 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3431 tcg_gen_shli_i32(tl, tl, 3);
3432 tcg_gen_ext_i32_ptr(tp, tl);
3433 tcg_gen_add_ptr(tp, tp, tcg_env);
3434
3435 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3436 }
3437
3438 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3439
3440 static void do_wrhintp(DisasContext *dc, TCGv src)
3441 {
3442 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3443 }
3444
3445 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3446
3447 static void do_wrhtba(DisasContext *dc, TCGv src)
3448 {
3449 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3450 }
3451
3452 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3453
3454 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3455 {
3456 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3457
3458 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3459 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3460 translator_io_start(&dc->base);
3461 gen_helper_tick_set_limit(r_tickptr, src);
3462 /* End TB to handle timer interrupt */
3463 dc->base.is_jmp = DISAS_EXIT;
3464 }
3465
3466 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3467 do_wrhstick_cmpr)
3468
3469 static bool do_saved_restored(DisasContext *dc, bool saved)
3470 {
3471 if (!supervisor(dc)) {
3472 return raise_priv(dc);
3473 }
3474 if (saved) {
3475 gen_helper_saved(tcg_env);
3476 } else {
3477 gen_helper_restored(tcg_env);
3478 }
3479 return advance_pc(dc);
3480 }
3481
3482 TRANS(SAVED, 64, do_saved_restored, true)
3483 TRANS(RESTORED, 64, do_saved_restored, false)
3484
3485 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3486 {
3487 return advance_pc(dc);
3488 }
3489
3490 /*
3491 * TODO: Need a feature bit for sparcv8.
3492 * In the meantime, treat all 32-bit cpus like sparcv7.
3493 */
3494 TRANS(NOP_v7, 32, trans_NOP, a)
3495 TRANS(NOP_v9, 64, trans_NOP, a)
3496
3497 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3498 void (*func)(TCGv, TCGv, TCGv),
3499 void (*funci)(TCGv, TCGv, target_long),
3500 bool logic_cc)
3501 {
3502 TCGv dst, src1;
3503
3504 /* For simplicity, we under-decoded the rs2 form. */
3505 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3506 return false;
3507 }
3508
3509 if (logic_cc) {
3510 dst = cpu_cc_N;
3511 } else {
3512 dst = gen_dest_gpr(dc, a->rd);
3513 }
3514 src1 = gen_load_gpr(dc, a->rs1);
3515
3516 if (a->imm || a->rs2_or_imm == 0) {
3517 if (funci) {
3518 funci(dst, src1, a->rs2_or_imm);
3519 } else {
3520 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3521 }
3522 } else {
3523 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3524 }
3525
3526 if (logic_cc) {
3527 if (TARGET_LONG_BITS == 64) {
3528 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3529 tcg_gen_movi_tl(cpu_icc_C, 0);
3530 }
3531 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3532 tcg_gen_movi_tl(cpu_cc_C, 0);
3533 tcg_gen_movi_tl(cpu_cc_V, 0);
3534 }
3535
3536 gen_store_gpr(dc, a->rd, dst);
3537 return advance_pc(dc);
3538 }
3539
3540 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3541 void (*func)(TCGv, TCGv, TCGv),
3542 void (*funci)(TCGv, TCGv, target_long),
3543 void (*func_cc)(TCGv, TCGv, TCGv))
3544 {
3545 if (a->cc) {
3546 return do_arith_int(dc, a, func_cc, NULL, false);
3547 }
3548 return do_arith_int(dc, a, func, funci, false);
3549 }
3550
3551 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3552 void (*func)(TCGv, TCGv, TCGv),
3553 void (*funci)(TCGv, TCGv, target_long))
3554 {
3555 return do_arith_int(dc, a, func, funci, a->cc);
3556 }
3557
3558 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3559 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3560 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3561 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3562
3563 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3564 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3565 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3566 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3567
3568 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3569 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3570 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3571 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3572 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3573
3574 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3575 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3576 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3577 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3578
3579 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3580 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3581
3582 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3583 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3584
3585 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3586 {
3587 /* OR with %g0 is the canonical alias for MOV. */
3588 if (!a->cc && a->rs1 == 0) {
3589 if (a->imm || a->rs2_or_imm == 0) {
3590 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3591 } else if (a->rs2_or_imm & ~0x1f) {
3592 /* For simplicity, we under-decoded the rs2 form. */
3593 return false;
3594 } else {
3595 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3596 }
3597 return advance_pc(dc);
3598 }
3599 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3600 }
3601
3602 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3603 {
3604 TCGv_i64 t1, t2;
3605 TCGv dst;
3606
3607 if (!avail_DIV(dc)) {
3608 return false;
3609 }
3610 /* For simplicity, we under-decoded the rs2 form. */
3611 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3612 return false;
3613 }
3614
3615 if (unlikely(a->rs2_or_imm == 0)) {
3616 gen_exception(dc, TT_DIV_ZERO);
3617 return true;
3618 }
3619
3620 if (a->imm) {
3621 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3622 } else {
3623 TCGLabel *lab;
3624 TCGv_i32 n2;
3625
3626 finishing_insn(dc);
3627 flush_cond(dc);
3628
3629 n2 = tcg_temp_new_i32();
3630 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3631
3632 lab = delay_exception(dc, TT_DIV_ZERO);
3633 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3634
3635 t2 = tcg_temp_new_i64();
3636 #ifdef TARGET_SPARC64
3637 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3638 #else
3639 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3640 #endif
3641 }
3642
3643 t1 = tcg_temp_new_i64();
3644 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3645
3646 tcg_gen_divu_i64(t1, t1, t2);
3647 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3648
3649 dst = gen_dest_gpr(dc, a->rd);
3650 tcg_gen_trunc_i64_tl(dst, t1);
3651 gen_store_gpr(dc, a->rd, dst);
3652 return advance_pc(dc);
3653 }
3654
3655 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3656 {
3657 TCGv dst, src1, src2;
3658
3659 if (!avail_64(dc)) {
3660 return false;
3661 }
3662 /* For simplicity, we under-decoded the rs2 form. */
3663 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3664 return false;
3665 }
3666
3667 if (unlikely(a->rs2_or_imm == 0)) {
3668 gen_exception(dc, TT_DIV_ZERO);
3669 return true;
3670 }
3671
3672 if (a->imm) {
3673 src2 = tcg_constant_tl(a->rs2_or_imm);
3674 } else {
3675 TCGLabel *lab;
3676
3677 finishing_insn(dc);
3678 flush_cond(dc);
3679
3680 lab = delay_exception(dc, TT_DIV_ZERO);
3681 src2 = cpu_regs[a->rs2_or_imm];
3682 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3683 }
3684
3685 dst = gen_dest_gpr(dc, a->rd);
3686 src1 = gen_load_gpr(dc, a->rs1);
3687
3688 tcg_gen_divu_tl(dst, src1, src2);
3689 gen_store_gpr(dc, a->rd, dst);
3690 return advance_pc(dc);
3691 }
3692
3693 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3694 {
3695 TCGv dst, src1, src2;
3696
3697 if (!avail_64(dc)) {
3698 return false;
3699 }
3700 /* For simplicity, we under-decoded the rs2 form. */
3701 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3702 return false;
3703 }
3704
3705 if (unlikely(a->rs2_or_imm == 0)) {
3706 gen_exception(dc, TT_DIV_ZERO);
3707 return true;
3708 }
3709
3710 dst = gen_dest_gpr(dc, a->rd);
3711 src1 = gen_load_gpr(dc, a->rs1);
3712
3713 if (a->imm) {
3714 if (unlikely(a->rs2_or_imm == -1)) {
3715 tcg_gen_neg_tl(dst, src1);
3716 gen_store_gpr(dc, a->rd, dst);
3717 return advance_pc(dc);
3718 }
3719 src2 = tcg_constant_tl(a->rs2_or_imm);
3720 } else {
3721 TCGLabel *lab;
3722 TCGv t1, t2;
3723
3724 finishing_insn(dc);
3725 flush_cond(dc);
3726
3727 lab = delay_exception(dc, TT_DIV_ZERO);
3728 src2 = cpu_regs[a->rs2_or_imm];
3729 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3730
3731 /*
3732 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3733 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3734 */
3735 t1 = tcg_temp_new();
3736 t2 = tcg_temp_new();
3737 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3738 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3739 tcg_gen_and_tl(t1, t1, t2);
3740 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3741 tcg_constant_tl(1), src2);
3742 src2 = t1;
3743 }
3744
3745 tcg_gen_div_tl(dst, src1, src2);
3746 gen_store_gpr(dc, a->rd, dst);
3747 return advance_pc(dc);
3748 }
3749
3750 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3751 int width, bool cc, bool left)
3752 {
3753 TCGv dst, s1, s2, lo1, lo2;
3754 uint64_t amask, tabl, tabr;
3755 int shift, imask, omask;
3756
3757 dst = gen_dest_gpr(dc, a->rd);
3758 s1 = gen_load_gpr(dc, a->rs1);
3759 s2 = gen_load_gpr(dc, a->rs2);
3760
3761 if (cc) {
3762 gen_op_subcc(cpu_cc_N, s1, s2);
3763 }
3764
3765 /*
3766 * Theory of operation: there are two tables, left and right (not to
3767 * be confused with the left and right versions of the opcode). These
3768 * are indexed by the low 3 bits of the inputs. To make things "easy",
3769 * these tables are loaded into two constants, TABL and TABR below.
3770 * The operation index = (input & imask) << shift calculates the index
3771 * into the constant, while val = (table >> index) & omask calculates
3772 * the value we're looking for.
3773 */
3774 switch (width) {
3775 case 8:
3776 imask = 0x7;
3777 shift = 3;
3778 omask = 0xff;
3779 if (left) {
3780 tabl = 0x80c0e0f0f8fcfeffULL;
3781 tabr = 0xff7f3f1f0f070301ULL;
3782 } else {
3783 tabl = 0x0103070f1f3f7fffULL;
3784 tabr = 0xfffefcf8f0e0c080ULL;
3785 }
3786 break;
3787 case 16:
3788 imask = 0x6;
3789 shift = 1;
3790 omask = 0xf;
3791 if (left) {
3792 tabl = 0x8cef;
3793 tabr = 0xf731;
3794 } else {
3795 tabl = 0x137f;
3796 tabr = 0xfec8;
3797 }
3798 break;
3799 case 32:
3800 imask = 0x4;
3801 shift = 0;
3802 omask = 0x3;
3803 if (left) {
3804 tabl = (2 << 2) | 3;
3805 tabr = (3 << 2) | 1;
3806 } else {
3807 tabl = (1 << 2) | 3;
3808 tabr = (3 << 2) | 2;
3809 }
3810 break;
3811 default:
3812 abort();
3813 }
3814
3815 lo1 = tcg_temp_new();
3816 lo2 = tcg_temp_new();
3817 tcg_gen_andi_tl(lo1, s1, imask);
3818 tcg_gen_andi_tl(lo2, s2, imask);
3819 tcg_gen_shli_tl(lo1, lo1, shift);
3820 tcg_gen_shli_tl(lo2, lo2, shift);
3821
3822 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3823 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3824 tcg_gen_andi_tl(lo1, lo1, omask);
3825 tcg_gen_andi_tl(lo2, lo2, omask);
3826
3827 amask = address_mask_i(dc, -8);
3828 tcg_gen_andi_tl(s1, s1, amask);
3829 tcg_gen_andi_tl(s2, s2, amask);
3830
3831 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3832 tcg_gen_and_tl(lo2, lo2, lo1);
3833 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3834
3835 gen_store_gpr(dc, a->rd, dst);
3836 return advance_pc(dc);
3837 }
3838
3839 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3840 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3841 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3842 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3843 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3844 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3845
3846 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3847 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3848 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3849 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3850 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3851 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3852
3853 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3854 void (*func)(TCGv, TCGv, TCGv))
3855 {
3856 TCGv dst = gen_dest_gpr(dc, a->rd);
3857 TCGv src1 = gen_load_gpr(dc, a->rs1);
3858 TCGv src2 = gen_load_gpr(dc, a->rs2);
3859
3860 func(dst, src1, src2);
3861 gen_store_gpr(dc, a->rd, dst);
3862 return advance_pc(dc);
3863 }
3864
3865 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3866 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3867 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3868
3869 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3870 {
3871 #ifdef TARGET_SPARC64
3872 TCGv tmp = tcg_temp_new();
3873
3874 tcg_gen_add_tl(tmp, s1, s2);
3875 tcg_gen_andi_tl(dst, tmp, -8);
3876 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3877 #else
3878 g_assert_not_reached();
3879 #endif
3880 }
3881
3882 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3883 {
3884 #ifdef TARGET_SPARC64
3885 TCGv tmp = tcg_temp_new();
3886
3887 tcg_gen_add_tl(tmp, s1, s2);
3888 tcg_gen_andi_tl(dst, tmp, -8);
3889 tcg_gen_neg_tl(tmp, tmp);
3890 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3891 #else
3892 g_assert_not_reached();
3893 #endif
3894 }
3895
3896 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3897 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3898
3899 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3900 {
3901 #ifdef TARGET_SPARC64
3902 tcg_gen_add_tl(dst, s1, s2);
3903 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3904 #else
3905 g_assert_not_reached();
3906 #endif
3907 }
3908
3909 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3910
3911 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3912 {
3913 TCGv dst, src1, src2;
3914
3915 /* Reject 64-bit shifts for sparc32. */
3916 if (avail_32(dc) && a->x) {
3917 return false;
3918 }
3919
3920 src2 = tcg_temp_new();
3921 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3922 src1 = gen_load_gpr(dc, a->rs1);
3923 dst = gen_dest_gpr(dc, a->rd);
3924
3925 if (l) {
3926 tcg_gen_shl_tl(dst, src1, src2);
3927 if (!a->x) {
3928 tcg_gen_ext32u_tl(dst, dst);
3929 }
3930 } else if (u) {
3931 if (!a->x) {
3932 tcg_gen_ext32u_tl(dst, src1);
3933 src1 = dst;
3934 }
3935 tcg_gen_shr_tl(dst, src1, src2);
3936 } else {
3937 if (!a->x) {
3938 tcg_gen_ext32s_tl(dst, src1);
3939 src1 = dst;
3940 }
3941 tcg_gen_sar_tl(dst, src1, src2);
3942 }
3943 gen_store_gpr(dc, a->rd, dst);
3944 return advance_pc(dc);
3945 }
3946
3947 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3948 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3949 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3950
3951 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3952 {
3953 TCGv dst, src1;
3954
3955 /* Reject 64-bit shifts for sparc32. */
3956 if (avail_32(dc) && (a->x || a->i >= 32)) {
3957 return false;
3958 }
3959
3960 src1 = gen_load_gpr(dc, a->rs1);
3961 dst = gen_dest_gpr(dc, a->rd);
3962
3963 if (avail_32(dc) || a->x) {
3964 if (l) {
3965 tcg_gen_shli_tl(dst, src1, a->i);
3966 } else if (u) {
3967 tcg_gen_shri_tl(dst, src1, a->i);
3968 } else {
3969 tcg_gen_sari_tl(dst, src1, a->i);
3970 }
3971 } else {
3972 if (l) {
3973 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3974 } else if (u) {
3975 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3976 } else {
3977 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3978 }
3979 }
3980 gen_store_gpr(dc, a->rd, dst);
3981 return advance_pc(dc);
3982 }
3983
3984 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3985 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3986 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3987
3988 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3989 {
3990 /* For simplicity, we under-decoded the rs2 form. */
3991 if (!imm && rs2_or_imm & ~0x1f) {
3992 return NULL;
3993 }
3994 if (imm || rs2_or_imm == 0) {
3995 return tcg_constant_tl(rs2_or_imm);
3996 } else {
3997 return cpu_regs[rs2_or_imm];
3998 }
3999 }
4000
4001 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4002 {
4003 TCGv dst = gen_load_gpr(dc, rd);
4004 TCGv c2 = tcg_constant_tl(cmp->c2);
4005
4006 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4007 gen_store_gpr(dc, rd, dst);
4008 return advance_pc(dc);
4009 }
4010
4011 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4012 {
4013 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4014 DisasCompare cmp;
4015
4016 if (src2 == NULL) {
4017 return false;
4018 }
4019 gen_compare(&cmp, a->cc, a->cond, dc);
4020 return do_mov_cond(dc, &cmp, a->rd, src2);
4021 }
4022
4023 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4024 {
4025 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4026 DisasCompare cmp;
4027
4028 if (src2 == NULL) {
4029 return false;
4030 }
4031 gen_fcompare(&cmp, a->cc, a->cond);
4032 return do_mov_cond(dc, &cmp, a->rd, src2);
4033 }
4034
4035 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4036 {
4037 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4038 DisasCompare cmp;
4039
4040 if (src2 == NULL) {
4041 return false;
4042 }
4043 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4044 return false;
4045 }
4046 return do_mov_cond(dc, &cmp, a->rd, src2);
4047 }
4048
4049 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4050 bool (*func)(DisasContext *dc, int rd, TCGv src))
4051 {
4052 TCGv src1, sum;
4053
4054 /* For simplicity, we under-decoded the rs2 form. */
4055 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4056 return false;
4057 }
4058
4059 /*
4060 * Always load the sum into a new temporary.
4061 * This is required to capture the value across a window change,
4062 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4063 */
4064 sum = tcg_temp_new();
4065 src1 = gen_load_gpr(dc, a->rs1);
4066 if (a->imm || a->rs2_or_imm == 0) {
4067 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4068 } else {
4069 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4070 }
4071 return func(dc, a->rd, sum);
4072 }
4073
4074 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4075 {
4076 /*
4077 * Preserve pc across advance, so that we can delay
4078 * the writeback to rd until after src is consumed.
4079 */
4080 target_ulong cur_pc = dc->pc;
4081
4082 gen_check_align(dc, src, 3);
4083
4084 gen_mov_pc_npc(dc);
4085 tcg_gen_mov_tl(cpu_npc, src);
4086 gen_address_mask(dc, cpu_npc);
4087 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4088
4089 dc->npc = DYNAMIC_PC_LOOKUP;
4090 return true;
4091 }
4092
4093 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4094
4095 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4096 {
4097 if (!supervisor(dc)) {
4098 return raise_priv(dc);
4099 }
4100
4101 gen_check_align(dc, src, 3);
4102
4103 gen_mov_pc_npc(dc);
4104 tcg_gen_mov_tl(cpu_npc, src);
4105 gen_helper_rett(tcg_env);
4106
4107 dc->npc = DYNAMIC_PC;
4108 return true;
4109 }
4110
4111 TRANS(RETT, 32, do_add_special, a, do_rett)
4112
4113 static bool do_return(DisasContext *dc, int rd, TCGv src)
4114 {
4115 gen_check_align(dc, src, 3);
4116 gen_helper_restore(tcg_env);
4117
4118 gen_mov_pc_npc(dc);
4119 tcg_gen_mov_tl(cpu_npc, src);
4120 gen_address_mask(dc, cpu_npc);
4121
4122 dc->npc = DYNAMIC_PC_LOOKUP;
4123 return true;
4124 }
4125
4126 TRANS(RETURN, 64, do_add_special, a, do_return)
4127
4128 static bool do_save(DisasContext *dc, int rd, TCGv src)
4129 {
4130 gen_helper_save(tcg_env);
4131 gen_store_gpr(dc, rd, src);
4132 return advance_pc(dc);
4133 }
4134
4135 TRANS(SAVE, ALL, do_add_special, a, do_save)
4136
4137 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4138 {
4139 gen_helper_restore(tcg_env);
4140 gen_store_gpr(dc, rd, src);
4141 return advance_pc(dc);
4142 }
4143
4144 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4145
4146 static bool do_done_retry(DisasContext *dc, bool done)
4147 {
4148 if (!supervisor(dc)) {
4149 return raise_priv(dc);
4150 }
4151 dc->npc = DYNAMIC_PC;
4152 dc->pc = DYNAMIC_PC;
4153 translator_io_start(&dc->base);
4154 if (done) {
4155 gen_helper_done(tcg_env);
4156 } else {
4157 gen_helper_retry(tcg_env);
4158 }
4159 return true;
4160 }
4161
4162 TRANS(DONE, 64, do_done_retry, true)
4163 TRANS(RETRY, 64, do_done_retry, false)
4164
4165 /*
4166 * Major opcode 11 -- load and store instructions
4167 */
4168
4169 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4170 {
4171 TCGv addr, tmp = NULL;
4172
4173 /* For simplicity, we under-decoded the rs2 form. */
4174 if (!imm && rs2_or_imm & ~0x1f) {
4175 return NULL;
4176 }
4177
4178 addr = gen_load_gpr(dc, rs1);
4179 if (rs2_or_imm) {
4180 tmp = tcg_temp_new();
4181 if (imm) {
4182 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4183 } else {
4184 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4185 }
4186 addr = tmp;
4187 }
4188 if (AM_CHECK(dc)) {
4189 if (!tmp) {
4190 tmp = tcg_temp_new();
4191 }
4192 tcg_gen_ext32u_tl(tmp, addr);
4193 addr = tmp;
4194 }
4195 return addr;
4196 }
4197
4198 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4199 {
4200 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4201 DisasASI da;
4202
4203 if (addr == NULL) {
4204 return false;
4205 }
4206 da = resolve_asi(dc, a->asi, mop);
4207
4208 reg = gen_dest_gpr(dc, a->rd);
4209 gen_ld_asi(dc, &da, reg, addr);
4210 gen_store_gpr(dc, a->rd, reg);
4211 return advance_pc(dc);
4212 }
4213
4214 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4215 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4216 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4217 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4218 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4219 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4220 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4221
4222 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4223 {
4224 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4225 DisasASI da;
4226
4227 if (addr == NULL) {
4228 return false;
4229 }
4230 da = resolve_asi(dc, a->asi, mop);
4231
4232 reg = gen_load_gpr(dc, a->rd);
4233 gen_st_asi(dc, &da, reg, addr);
4234 return advance_pc(dc);
4235 }
4236
4237 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4238 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4239 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4240 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4241
4242 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4243 {
4244 TCGv addr;
4245 DisasASI da;
4246
4247 if (a->rd & 1) {
4248 return false;
4249 }
4250 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4251 if (addr == NULL) {
4252 return false;
4253 }
4254 da = resolve_asi(dc, a->asi, MO_TEUQ);
4255 gen_ldda_asi(dc, &da, addr, a->rd);
4256 return advance_pc(dc);
4257 }
4258
4259 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4260 {
4261 TCGv addr;
4262 DisasASI da;
4263
4264 if (a->rd & 1) {
4265 return false;
4266 }
4267 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4268 if (addr == NULL) {
4269 return false;
4270 }
4271 da = resolve_asi(dc, a->asi, MO_TEUQ);
4272 gen_stda_asi(dc, &da, addr, a->rd);
4273 return advance_pc(dc);
4274 }
4275
4276 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4277 {
4278 TCGv addr, reg;
4279 DisasASI da;
4280
4281 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4282 if (addr == NULL) {
4283 return false;
4284 }
4285 da = resolve_asi(dc, a->asi, MO_UB);
4286
4287 reg = gen_dest_gpr(dc, a->rd);
4288 gen_ldstub_asi(dc, &da, reg, addr);
4289 gen_store_gpr(dc, a->rd, reg);
4290 return advance_pc(dc);
4291 }
4292
4293 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4294 {
4295 TCGv addr, dst, src;
4296 DisasASI da;
4297
4298 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4299 if (addr == NULL) {
4300 return false;
4301 }
4302 da = resolve_asi(dc, a->asi, MO_TEUL);
4303
4304 dst = gen_dest_gpr(dc, a->rd);
4305 src = gen_load_gpr(dc, a->rd);
4306 gen_swap_asi(dc, &da, dst, src, addr);
4307 gen_store_gpr(dc, a->rd, dst);
4308 return advance_pc(dc);
4309 }
4310
4311 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4312 {
4313 TCGv addr, o, n, c;
4314 DisasASI da;
4315
4316 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4317 if (addr == NULL) {
4318 return false;
4319 }
4320 da = resolve_asi(dc, a->asi, mop);
4321
4322 o = gen_dest_gpr(dc, a->rd);
4323 n = gen_load_gpr(dc, a->rd);
4324 c = gen_load_gpr(dc, a->rs2_or_imm);
4325 gen_cas_asi(dc, &da, o, n, c, addr);
4326 gen_store_gpr(dc, a->rd, o);
4327 return advance_pc(dc);
4328 }
4329
4330 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4331 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4332
4333 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4334 {
4335 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4336 DisasASI da;
4337
4338 if (addr == NULL) {
4339 return false;
4340 }
4341 if (gen_trap_ifnofpu(dc)) {
4342 return true;
4343 }
4344 if (sz == MO_128 && gen_trap_float128(dc)) {
4345 return true;
4346 }
4347 da = resolve_asi(dc, a->asi, MO_TE | sz);
4348 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4349 gen_update_fprs_dirty(dc, a->rd);
4350 return advance_pc(dc);
4351 }
4352
4353 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4354 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4355 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4356
4357 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4358 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4359 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4360
4361 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4362 {
4363 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4364 DisasASI da;
4365
4366 if (addr == NULL) {
4367 return false;
4368 }
4369 if (gen_trap_ifnofpu(dc)) {
4370 return true;
4371 }
4372 if (sz == MO_128 && gen_trap_float128(dc)) {
4373 return true;
4374 }
4375 da = resolve_asi(dc, a->asi, MO_TE | sz);
4376 gen_stf_asi(dc, &da, sz, addr, a->rd);
4377 return advance_pc(dc);
4378 }
4379
4380 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4381 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4382 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4383
4384 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4385 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4386 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4387
4388 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4389 {
4390 if (!avail_32(dc)) {
4391 return false;
4392 }
4393 if (!supervisor(dc)) {
4394 return raise_priv(dc);
4395 }
4396 if (gen_trap_ifnofpu(dc)) {
4397 return true;
4398 }
4399 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4400 return true;
4401 }
4402
4403 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4404 target_ulong new_mask, target_ulong old_mask)
4405 {
4406 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4407 TCGv tnew, told;
4408
4409 if (addr == NULL) {
4410 return false;
4411 }
4412 if (gen_trap_ifnofpu(dc)) {
4413 return true;
4414 }
4415 tnew = tcg_temp_new();
4416 told = tcg_temp_new();
4417 tcg_gen_qemu_ld_tl(tnew, addr, dc->mem_idx, mop | MO_ALIGN);
4418 tcg_gen_ld_tl(told, tcg_env, offsetof(CPUSPARCState, fsr));
4419 tcg_gen_andi_tl(tnew, tnew, new_mask);
4420 tcg_gen_andi_tl(told, told, old_mask);
4421 tcg_gen_or_tl(tnew, tnew, told);
4422 gen_helper_set_fsr_noftt(tcg_env, tnew);
4423 return advance_pc(dc);
4424 }
4425
4426 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4427 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4428
4429 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4430 {
4431 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4432 TCGv fsr;
4433
4434 if (addr == NULL) {
4435 return false;
4436 }
4437 if (gen_trap_ifnofpu(dc)) {
4438 return true;
4439 }
4440
4441 fsr = tcg_temp_new();
4442 gen_helper_get_fsr(fsr, tcg_env);
4443 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4444 return advance_pc(dc);
4445 }
4446
4447 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4448 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4449
4450 static bool do_fc(DisasContext *dc, int rd, bool c)
4451 {
4452 uint64_t mask;
4453
4454 if (gen_trap_ifnofpu(dc)) {
4455 return true;
4456 }
4457
4458 if (rd & 1) {
4459 mask = MAKE_64BIT_MASK(0, 32);
4460 } else {
4461 mask = MAKE_64BIT_MASK(32, 32);
4462 }
4463 if (c) {
4464 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4465 } else {
4466 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4467 }
4468 gen_update_fprs_dirty(dc, rd);
4469 return advance_pc(dc);
4470 }
4471
4472 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4473 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4474
4475 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4476 {
4477 if (gen_trap_ifnofpu(dc)) {
4478 return true;
4479 }
4480
4481 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4482 gen_update_fprs_dirty(dc, rd);
4483 return advance_pc(dc);
4484 }
4485
4486 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4487 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4488
4489 static bool do_ff(DisasContext *dc, arg_r_r *a,
4490 void (*func)(TCGv_i32, TCGv_i32))
4491 {
4492 TCGv_i32 tmp;
4493
4494 if (gen_trap_ifnofpu(dc)) {
4495 return true;
4496 }
4497
4498 tmp = gen_load_fpr_F(dc, a->rs);
4499 func(tmp, tmp);
4500 gen_store_fpr_F(dc, a->rd, tmp);
4501 return advance_pc(dc);
4502 }
4503
4504 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4505 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4506 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4507 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4508 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4509
4510 static bool do_fd(DisasContext *dc, arg_r_r *a,
4511 void (*func)(TCGv_i32, TCGv_i64))
4512 {
4513 TCGv_i32 dst;
4514 TCGv_i64 src;
4515
4516 if (gen_trap_ifnofpu(dc)) {
4517 return true;
4518 }
4519
4520 dst = tcg_temp_new_i32();
4521 src = gen_load_fpr_D(dc, a->rs);
4522 func(dst, src);
4523 gen_store_fpr_F(dc, a->rd, dst);
4524 return advance_pc(dc);
4525 }
4526
4527 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4528 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4529
4530 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4531 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4532 {
4533 TCGv_i32 tmp;
4534
4535 if (gen_trap_ifnofpu(dc)) {
4536 return true;
4537 }
4538
4539 tmp = gen_load_fpr_F(dc, a->rs);
4540 func(tmp, tcg_env, tmp);
4541 gen_store_fpr_F(dc, a->rd, tmp);
4542 return advance_pc(dc);
4543 }
4544
4545 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4546 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4547 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4548
4549 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4550 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4551 {
4552 TCGv_i32 dst;
4553 TCGv_i64 src;
4554
4555 if (gen_trap_ifnofpu(dc)) {
4556 return true;
4557 }
4558
4559 dst = tcg_temp_new_i32();
4560 src = gen_load_fpr_D(dc, a->rs);
4561 func(dst, tcg_env, src);
4562 gen_store_fpr_F(dc, a->rd, dst);
4563 return advance_pc(dc);
4564 }
4565
4566 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4567 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4568 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4569
4570 static bool do_dd(DisasContext *dc, arg_r_r *a,
4571 void (*func)(TCGv_i64, TCGv_i64))
4572 {
4573 TCGv_i64 dst, src;
4574
4575 if (gen_trap_ifnofpu(dc)) {
4576 return true;
4577 }
4578
4579 dst = gen_dest_fpr_D(dc, a->rd);
4580 src = gen_load_fpr_D(dc, a->rs);
4581 func(dst, src);
4582 gen_store_fpr_D(dc, a->rd, dst);
4583 return advance_pc(dc);
4584 }
4585
4586 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4587 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4588 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4589 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4590 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4591
4592 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4593 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4594 {
4595 TCGv_i64 dst, src;
4596
4597 if (gen_trap_ifnofpu(dc)) {
4598 return true;
4599 }
4600
4601 dst = gen_dest_fpr_D(dc, a->rd);
4602 src = gen_load_fpr_D(dc, a->rs);
4603 func(dst, tcg_env, src);
4604 gen_store_fpr_D(dc, a->rd, dst);
4605 return advance_pc(dc);
4606 }
4607
4608 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4609 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4610 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4611
4612 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4613 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4614 {
4615 TCGv_i64 dst;
4616 TCGv_i32 src;
4617
4618 if (gen_trap_ifnofpu(dc)) {
4619 return true;
4620 }
4621
4622 dst = gen_dest_fpr_D(dc, a->rd);
4623 src = gen_load_fpr_F(dc, a->rs);
4624 func(dst, tcg_env, src);
4625 gen_store_fpr_D(dc, a->rd, dst);
4626 return advance_pc(dc);
4627 }
4628
4629 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4630 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4631 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4632
4633 static bool do_qq(DisasContext *dc, arg_r_r *a,
4634 void (*func)(TCGv_i128, TCGv_i128))
4635 {
4636 TCGv_i128 t;
4637
4638 if (gen_trap_ifnofpu(dc)) {
4639 return true;
4640 }
4641 if (gen_trap_float128(dc)) {
4642 return true;
4643 }
4644
4645 gen_op_clear_ieee_excp_and_FTT();
4646 t = gen_load_fpr_Q(dc, a->rs);
4647 func(t, t);
4648 gen_store_fpr_Q(dc, a->rd, t);
4649 return advance_pc(dc);
4650 }
4651
4652 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4653 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4654 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4655
4656 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4657 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4658 {
4659 TCGv_i128 t;
4660
4661 if (gen_trap_ifnofpu(dc)) {
4662 return true;
4663 }
4664 if (gen_trap_float128(dc)) {
4665 return true;
4666 }
4667
4668 t = gen_load_fpr_Q(dc, a->rs);
4669 func(t, tcg_env, t);
4670 gen_store_fpr_Q(dc, a->rd, t);
4671 return advance_pc(dc);
4672 }
4673
4674 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4675
4676 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4677 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4678 {
4679 TCGv_i128 src;
4680 TCGv_i32 dst;
4681
4682 if (gen_trap_ifnofpu(dc)) {
4683 return true;
4684 }
4685 if (gen_trap_float128(dc)) {
4686 return true;
4687 }
4688
4689 src = gen_load_fpr_Q(dc, a->rs);
4690 dst = tcg_temp_new_i32();
4691 func(dst, tcg_env, src);
4692 gen_store_fpr_F(dc, a->rd, dst);
4693 return advance_pc(dc);
4694 }
4695
4696 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4697 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4698
4699 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4700 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4701 {
4702 TCGv_i128 src;
4703 TCGv_i64 dst;
4704
4705 if (gen_trap_ifnofpu(dc)) {
4706 return true;
4707 }
4708 if (gen_trap_float128(dc)) {
4709 return true;
4710 }
4711
4712 src = gen_load_fpr_Q(dc, a->rs);
4713 dst = gen_dest_fpr_D(dc, a->rd);
4714 func(dst, tcg_env, src);
4715 gen_store_fpr_D(dc, a->rd, dst);
4716 return advance_pc(dc);
4717 }
4718
4719 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4720 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4721
4722 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4723 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4724 {
4725 TCGv_i32 src;
4726 TCGv_i128 dst;
4727
4728 if (gen_trap_ifnofpu(dc)) {
4729 return true;
4730 }
4731 if (gen_trap_float128(dc)) {
4732 return true;
4733 }
4734
4735 src = gen_load_fpr_F(dc, a->rs);
4736 dst = tcg_temp_new_i128();
4737 func(dst, tcg_env, src);
4738 gen_store_fpr_Q(dc, a->rd, dst);
4739 return advance_pc(dc);
4740 }
4741
4742 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4743 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4744
4745 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4746 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4747 {
4748 TCGv_i64 src;
4749 TCGv_i128 dst;
4750
4751 if (gen_trap_ifnofpu(dc)) {
4752 return true;
4753 }
4754 if (gen_trap_float128(dc)) {
4755 return true;
4756 }
4757
4758 src = gen_load_fpr_D(dc, a->rs);
4759 dst = tcg_temp_new_i128();
4760 func(dst, tcg_env, src);
4761 gen_store_fpr_Q(dc, a->rd, dst);
4762 return advance_pc(dc);
4763 }
4764
4765 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4766 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4767
4768 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4769 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4770 {
4771 TCGv_i32 src1, src2;
4772
4773 if (gen_trap_ifnofpu(dc)) {
4774 return true;
4775 }
4776
4777 src1 = gen_load_fpr_F(dc, a->rs1);
4778 src2 = gen_load_fpr_F(dc, a->rs2);
4779 func(src1, src1, src2);
4780 gen_store_fpr_F(dc, a->rd, src1);
4781 return advance_pc(dc);
4782 }
4783
4784 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4785 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4786 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4787 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4788 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4789 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4790 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4791 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4792 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4793 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4794 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4795 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4796
4797 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4798 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4799 {
4800 TCGv_i32 src1, src2;
4801
4802 if (gen_trap_ifnofpu(dc)) {
4803 return true;
4804 }
4805
4806 src1 = gen_load_fpr_F(dc, a->rs1);
4807 src2 = gen_load_fpr_F(dc, a->rs2);
4808 func(src1, tcg_env, src1, src2);
4809 gen_store_fpr_F(dc, a->rd, src1);
4810 return advance_pc(dc);
4811 }
4812
4813 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4814 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4815 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4816 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4817
4818 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4819 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4820 {
4821 TCGv_i64 dst, src1, src2;
4822
4823 if (gen_trap_ifnofpu(dc)) {
4824 return true;
4825 }
4826
4827 dst = gen_dest_fpr_D(dc, a->rd);
4828 src1 = gen_load_fpr_D(dc, a->rs1);
4829 src2 = gen_load_fpr_D(dc, a->rs2);
4830 func(dst, src1, src2);
4831 gen_store_fpr_D(dc, a->rd, dst);
4832 return advance_pc(dc);
4833 }
4834
4835 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4836 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4837 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4838 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4839 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4840 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4841 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4842 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4843 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4844
4845 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4846 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4847 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4848 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4849 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4850 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4851 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4852 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4853 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4854 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4855 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4856 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4857
4858 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4859 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4860 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4861
4862 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4863 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4864 {
4865 TCGv_i64 src1, src2;
4866 TCGv dst;
4867
4868 if (gen_trap_ifnofpu(dc)) {
4869 return true;
4870 }
4871
4872 dst = gen_dest_gpr(dc, a->rd);
4873 src1 = gen_load_fpr_D(dc, a->rs1);
4874 src2 = gen_load_fpr_D(dc, a->rs2);
4875 func(dst, src1, src2);
4876 gen_store_gpr(dc, a->rd, dst);
4877 return advance_pc(dc);
4878 }
4879
4880 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4881 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4882 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4883 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4884
4885 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4886 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4887 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4888 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4889
4890 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4891 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4892 {
4893 TCGv_i64 dst, src1, src2;
4894
4895 if (gen_trap_ifnofpu(dc)) {
4896 return true;
4897 }
4898
4899 dst = gen_dest_fpr_D(dc, a->rd);
4900 src1 = gen_load_fpr_D(dc, a->rs1);
4901 src2 = gen_load_fpr_D(dc, a->rs2);
4902 func(dst, tcg_env, src1, src2);
4903 gen_store_fpr_D(dc, a->rd, dst);
4904 return advance_pc(dc);
4905 }
4906
4907 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4908 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4909 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4910 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4911
4912 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4913 {
4914 TCGv_i64 dst;
4915 TCGv_i32 src1, src2;
4916
4917 if (gen_trap_ifnofpu(dc)) {
4918 return true;
4919 }
4920 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4921 return raise_unimpfpop(dc);
4922 }
4923
4924 dst = gen_dest_fpr_D(dc, a->rd);
4925 src1 = gen_load_fpr_F(dc, a->rs1);
4926 src2 = gen_load_fpr_F(dc, a->rs2);
4927 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4928 gen_store_fpr_D(dc, a->rd, dst);
4929 return advance_pc(dc);
4930 }
4931
4932 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4933 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4934 {
4935 TCGv_i64 dst, src0, src1, src2;
4936
4937 if (gen_trap_ifnofpu(dc)) {
4938 return true;
4939 }
4940
4941 dst = gen_dest_fpr_D(dc, a->rd);
4942 src0 = gen_load_fpr_D(dc, a->rd);
4943 src1 = gen_load_fpr_D(dc, a->rs1);
4944 src2 = gen_load_fpr_D(dc, a->rs2);
4945 func(dst, src0, src1, src2);
4946 gen_store_fpr_D(dc, a->rd, dst);
4947 return advance_pc(dc);
4948 }
4949
4950 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4951
4952 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4953 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4954 {
4955 TCGv_i128 src1, src2;
4956
4957 if (gen_trap_ifnofpu(dc)) {
4958 return true;
4959 }
4960 if (gen_trap_float128(dc)) {
4961 return true;
4962 }
4963
4964 src1 = gen_load_fpr_Q(dc, a->rs1);
4965 src2 = gen_load_fpr_Q(dc, a->rs2);
4966 func(src1, tcg_env, src1, src2);
4967 gen_store_fpr_Q(dc, a->rd, src1);
4968 return advance_pc(dc);
4969 }
4970
4971 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4972 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4973 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4974 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4975
4976 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4977 {
4978 TCGv_i64 src1, src2;
4979 TCGv_i128 dst;
4980
4981 if (gen_trap_ifnofpu(dc)) {
4982 return true;
4983 }
4984 if (gen_trap_float128(dc)) {
4985 return true;
4986 }
4987
4988 src1 = gen_load_fpr_D(dc, a->rs1);
4989 src2 = gen_load_fpr_D(dc, a->rs2);
4990 dst = tcg_temp_new_i128();
4991 gen_helper_fdmulq(dst, tcg_env, src1, src2);
4992 gen_store_fpr_Q(dc, a->rd, dst);
4993 return advance_pc(dc);
4994 }
4995
4996 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4997 void (*func)(DisasContext *, DisasCompare *, int, int))
4998 {
4999 DisasCompare cmp;
5000
5001 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5002 return false;
5003 }
5004 if (gen_trap_ifnofpu(dc)) {
5005 return true;
5006 }
5007 if (is_128 && gen_trap_float128(dc)) {
5008 return true;
5009 }
5010
5011 gen_op_clear_ieee_excp_and_FTT();
5012 func(dc, &cmp, a->rd, a->rs2);
5013 return advance_pc(dc);
5014 }
5015
5016 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5017 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5018 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5019
5020 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5021 void (*func)(DisasContext *, DisasCompare *, int, int))
5022 {
5023 DisasCompare cmp;
5024
5025 if (gen_trap_ifnofpu(dc)) {
5026 return true;
5027 }
5028 if (is_128 && gen_trap_float128(dc)) {
5029 return true;
5030 }
5031
5032 gen_op_clear_ieee_excp_and_FTT();
5033 gen_compare(&cmp, a->cc, a->cond, dc);
5034 func(dc, &cmp, a->rd, a->rs2);
5035 return advance_pc(dc);
5036 }
5037
5038 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5039 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5040 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5041
5042 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5043 void (*func)(DisasContext *, DisasCompare *, int, int))
5044 {
5045 DisasCompare cmp;
5046
5047 if (gen_trap_ifnofpu(dc)) {
5048 return true;
5049 }
5050 if (is_128 && gen_trap_float128(dc)) {
5051 return true;
5052 }
5053
5054 gen_op_clear_ieee_excp_and_FTT();
5055 gen_fcompare(&cmp, a->cc, a->cond);
5056 func(dc, &cmp, a->rd, a->rs2);
5057 return advance_pc(dc);
5058 }
5059
5060 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5061 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5062 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5063
5064 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5065 {
5066 TCGv_i32 src1, src2;
5067
5068 if (avail_32(dc) && a->cc != 0) {
5069 return false;
5070 }
5071 if (gen_trap_ifnofpu(dc)) {
5072 return true;
5073 }
5074
5075 src1 = gen_load_fpr_F(dc, a->rs1);
5076 src2 = gen_load_fpr_F(dc, a->rs2);
5077 if (e) {
5078 gen_op_fcmpes(a->cc, src1, src2);
5079 } else {
5080 gen_op_fcmps(a->cc, src1, src2);
5081 }
5082 return advance_pc(dc);
5083 }
5084
5085 TRANS(FCMPs, ALL, do_fcmps, a, false)
5086 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5087
5088 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5089 {
5090 TCGv_i64 src1, src2;
5091
5092 if (avail_32(dc) && a->cc != 0) {
5093 return false;
5094 }
5095 if (gen_trap_ifnofpu(dc)) {
5096 return true;
5097 }
5098
5099 src1 = gen_load_fpr_D(dc, a->rs1);
5100 src2 = gen_load_fpr_D(dc, a->rs2);
5101 if (e) {
5102 gen_op_fcmped(a->cc, src1, src2);
5103 } else {
5104 gen_op_fcmpd(a->cc, src1, src2);
5105 }
5106 return advance_pc(dc);
5107 }
5108
5109 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5110 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5111
5112 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5113 {
5114 TCGv_i128 src1, src2;
5115
5116 if (avail_32(dc) && a->cc != 0) {
5117 return false;
5118 }
5119 if (gen_trap_ifnofpu(dc)) {
5120 return true;
5121 }
5122 if (gen_trap_float128(dc)) {
5123 return true;
5124 }
5125
5126 src1 = gen_load_fpr_Q(dc, a->rs1);
5127 src2 = gen_load_fpr_Q(dc, a->rs2);
5128 if (e) {
5129 gen_op_fcmpeq(a->cc, src1, src2);
5130 } else {
5131 gen_op_fcmpq(a->cc, src1, src2);
5132 }
5133 return advance_pc(dc);
5134 }
5135
5136 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5137 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5138
5139 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5140 {
5141 DisasContext *dc = container_of(dcbase, DisasContext, base);
5142 CPUSPARCState *env = cpu_env(cs);
5143 int bound;
5144
5145 dc->pc = dc->base.pc_first;
5146 dc->npc = (target_ulong)dc->base.tb->cs_base;
5147 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5148 dc->def = &env->def;
5149 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5150 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5151 #ifndef CONFIG_USER_ONLY
5152 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5153 #endif
5154 #ifdef TARGET_SPARC64
5155 dc->fprs_dirty = 0;
5156 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5157 #ifndef CONFIG_USER_ONLY
5158 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5159 #endif
5160 #endif
5161 /*
5162 * if we reach a page boundary, we stop generation so that the
5163 * PC of a TT_TFAULT exception is always in the right page
5164 */
5165 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5166 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5167 }
5168
5169 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5170 {
5171 }
5172
5173 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5174 {
5175 DisasContext *dc = container_of(dcbase, DisasContext, base);
5176 target_ulong npc = dc->npc;
5177
5178 if (npc & 3) {
5179 switch (npc) {
5180 case JUMP_PC:
5181 assert(dc->jump_pc[1] == dc->pc + 4);
5182 npc = dc->jump_pc[0] | JUMP_PC;
5183 break;
5184 case DYNAMIC_PC:
5185 case DYNAMIC_PC_LOOKUP:
5186 npc = DYNAMIC_PC;
5187 break;
5188 default:
5189 g_assert_not_reached();
5190 }
5191 }
5192 tcg_gen_insn_start(dc->pc, npc);
5193 }
5194
5195 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5196 {
5197 DisasContext *dc = container_of(dcbase, DisasContext, base);
5198 CPUSPARCState *env = cpu_env(cs);
5199 unsigned int insn;
5200
5201 insn = translator_ldl(env, &dc->base, dc->pc);
5202 dc->base.pc_next += 4;
5203
5204 if (!decode(dc, insn)) {
5205 gen_exception(dc, TT_ILL_INSN);
5206 }
5207
5208 if (dc->base.is_jmp == DISAS_NORETURN) {
5209 return;
5210 }
5211 if (dc->pc != dc->base.pc_next) {
5212 dc->base.is_jmp = DISAS_TOO_MANY;
5213 }
5214 }
5215
5216 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5217 {
5218 DisasContext *dc = container_of(dcbase, DisasContext, base);
5219 DisasDelayException *e, *e_next;
5220 bool may_lookup;
5221
5222 finishing_insn(dc);
5223
5224 switch (dc->base.is_jmp) {
5225 case DISAS_NEXT:
5226 case DISAS_TOO_MANY:
5227 if (((dc->pc | dc->npc) & 3) == 0) {
5228 /* static PC and NPC: we can use direct chaining */
5229 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5230 break;
5231 }
5232
5233 may_lookup = true;
5234 if (dc->pc & 3) {
5235 switch (dc->pc) {
5236 case DYNAMIC_PC_LOOKUP:
5237 break;
5238 case DYNAMIC_PC:
5239 may_lookup = false;
5240 break;
5241 default:
5242 g_assert_not_reached();
5243 }
5244 } else {
5245 tcg_gen_movi_tl(cpu_pc, dc->pc);
5246 }
5247
5248 if (dc->npc & 3) {
5249 switch (dc->npc) {
5250 case JUMP_PC:
5251 gen_generic_branch(dc);
5252 break;
5253 case DYNAMIC_PC:
5254 may_lookup = false;
5255 break;
5256 case DYNAMIC_PC_LOOKUP:
5257 break;
5258 default:
5259 g_assert_not_reached();
5260 }
5261 } else {
5262 tcg_gen_movi_tl(cpu_npc, dc->npc);
5263 }
5264 if (may_lookup) {
5265 tcg_gen_lookup_and_goto_ptr();
5266 } else {
5267 tcg_gen_exit_tb(NULL, 0);
5268 }
5269 break;
5270
5271 case DISAS_NORETURN:
5272 break;
5273
5274 case DISAS_EXIT:
5275 /* Exit TB */
5276 save_state(dc);
5277 tcg_gen_exit_tb(NULL, 0);
5278 break;
5279
5280 default:
5281 g_assert_not_reached();
5282 }
5283
5284 for (e = dc->delay_excp_list; e ; e = e_next) {
5285 gen_set_label(e->lab);
5286
5287 tcg_gen_movi_tl(cpu_pc, e->pc);
5288 if (e->npc % 4 == 0) {
5289 tcg_gen_movi_tl(cpu_npc, e->npc);
5290 }
5291 gen_helper_raise_exception(tcg_env, e->excp);
5292
5293 e_next = e->next;
5294 g_free(e);
5295 }
5296 }
5297
5298 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5299 CPUState *cpu, FILE *logfile)
5300 {
5301 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5302 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5303 }
5304
5305 static const TranslatorOps sparc_tr_ops = {
5306 .init_disas_context = sparc_tr_init_disas_context,
5307 .tb_start = sparc_tr_tb_start,
5308 .insn_start = sparc_tr_insn_start,
5309 .translate_insn = sparc_tr_translate_insn,
5310 .tb_stop = sparc_tr_tb_stop,
5311 .disas_log = sparc_tr_disas_log,
5312 };
5313
5314 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5315 vaddr pc, void *host_pc)
5316 {
5317 DisasContext dc = {};
5318
5319 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5320 }
5321
5322 void sparc_tcg_init(void)
5323 {
5324 static const char gregnames[32][4] = {
5325 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5326 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5327 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5328 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5329 };
5330 static const char fregnames[32][4] = {
5331 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5332 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5333 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5334 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5335 };
5336
5337 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5338 #ifdef TARGET_SPARC64
5339 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5340 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5341 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5342 #endif
5343 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5344 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5345 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5346 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5347 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5348 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5349 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5350 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5351 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5352 };
5353
5354 unsigned int i;
5355
5356 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5357 offsetof(CPUSPARCState, regwptr),
5358 "regwptr");
5359
5360 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5361 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5362 }
5363
5364 cpu_regs[0] = NULL;
5365 for (i = 1; i < 8; ++i) {
5366 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5367 offsetof(CPUSPARCState, gregs[i]),
5368 gregnames[i]);
5369 }
5370
5371 for (i = 8; i < 32; ++i) {
5372 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5373 (i - 8) * sizeof(target_ulong),
5374 gregnames[i]);
5375 }
5376
5377 for (i = 0; i < TARGET_DPREGS; i++) {
5378 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5379 offsetof(CPUSPARCState, fpr[i]),
5380 fregnames[i]);
5381 }
5382
5383 #ifdef TARGET_SPARC64
5384 cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5385 offsetof(CPUSPARCState, fprs), "fprs");
5386 #endif
5387 }
5388
5389 void sparc_restore_state_to_opc(CPUState *cs,
5390 const TranslationBlock *tb,
5391 const uint64_t *data)
5392 {
5393 SPARCCPU *cpu = SPARC_CPU(cs);
5394 CPUSPARCState *env = &cpu->env;
5395 target_ulong pc = data[0];
5396 target_ulong npc = data[1];
5397
5398 env->pc = pc;
5399 if (npc == DYNAMIC_PC) {
5400 /* dynamic NPC: already stored */
5401 } else if (npc & JUMP_PC) {
5402 /* jump PC: use 'cond' and the jump targets of the translation */
5403 if (env->cond) {
5404 env->npc = npc & ~3;
5405 } else {
5406 env->npc = pc + 4;
5407 }
5408 } else {
5409 env->npc = npc;
5410 }
5411 }