]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Use i128 for FADDq, FSUBq, FMULq, FDIVq
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_flushw(E) qemu_build_not_reached()
47 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
48 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
49 # define gen_helper_restored(E) qemu_build_not_reached()
50 # define gen_helper_retry(E) qemu_build_not_reached()
51 # define gen_helper_saved(E) qemu_build_not_reached()
52 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
53 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
54 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
55 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
62 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
63 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
86 # define FSR_LDXFSR_MASK 0
87 # define FSR_LDXFSR_OLDMASK 0
88 # define MAXTL_MASK 0
89 #endif
90
91 /* Dynamic PC, must exit to main loop. */
92 #define DYNAMIC_PC 1
93 /* Dynamic PC, one of two values according to jump_pc[T2]. */
94 #define JUMP_PC 2
95 /* Dynamic PC, may lookup next TB. */
96 #define DYNAMIC_PC_LOOKUP 3
97
98 #define DISAS_EXIT DISAS_TARGET_0
99
100 /* global register indexes */
101 static TCGv_ptr cpu_regwptr;
102 static TCGv cpu_fsr, cpu_pc, cpu_npc;
103 static TCGv cpu_regs[32];
104 static TCGv cpu_y;
105 static TCGv cpu_tbr;
106 static TCGv cpu_cond;
107 static TCGv cpu_cc_N;
108 static TCGv cpu_cc_V;
109 static TCGv cpu_icc_Z;
110 static TCGv cpu_icc_C;
111 #ifdef TARGET_SPARC64
112 static TCGv cpu_xcc_Z;
113 static TCGv cpu_xcc_C;
114 static TCGv_i32 cpu_fprs;
115 static TCGv cpu_gsr;
116 #else
117 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
118 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
119 #endif
120
121 #ifdef TARGET_SPARC64
122 #define cpu_cc_Z cpu_xcc_Z
123 #define cpu_cc_C cpu_xcc_C
124 #else
125 #define cpu_cc_Z cpu_icc_Z
126 #define cpu_cc_C cpu_icc_C
127 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
128 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
129 #endif
130
131 /* Floating point registers */
132 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
133
134 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
135 #ifdef TARGET_SPARC64
136 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
137 # define env64_field_offsetof(X) env_field_offsetof(X)
138 #else
139 # define env32_field_offsetof(X) env_field_offsetof(X)
140 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
141 #endif
142
143 typedef struct DisasCompare {
144 TCGCond cond;
145 TCGv c1;
146 int c2;
147 } DisasCompare;
148
149 typedef struct DisasDelayException {
150 struct DisasDelayException *next;
151 TCGLabel *lab;
152 TCGv_i32 excp;
153 /* Saved state at parent insn. */
154 target_ulong pc;
155 target_ulong npc;
156 } DisasDelayException;
157
158 typedef struct DisasContext {
159 DisasContextBase base;
160 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
161 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
162
163 /* Used when JUMP_PC value is used. */
164 DisasCompare jump;
165 target_ulong jump_pc[2];
166
167 int mem_idx;
168 bool cpu_cond_live;
169 bool fpu_enabled;
170 bool address_mask_32bit;
171 #ifndef CONFIG_USER_ONLY
172 bool supervisor;
173 #ifdef TARGET_SPARC64
174 bool hypervisor;
175 #endif
176 #endif
177
178 sparc_def_t *def;
179 #ifdef TARGET_SPARC64
180 int fprs_dirty;
181 int asi;
182 #endif
183 DisasDelayException *delay_excp_list;
184 } DisasContext;
185
186 // This function uses non-native bit order
187 #define GET_FIELD(X, FROM, TO) \
188 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
189
190 // This function uses the order in the manuals, i.e. bit 0 is 2^0
191 #define GET_FIELD_SP(X, FROM, TO) \
192 GET_FIELD(X, 31 - (TO), 31 - (FROM))
193
194 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
195 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
196
197 #ifdef TARGET_SPARC64
198 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
199 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
200 #else
201 #define DFPREG(r) (r & 0x1e)
202 #define QFPREG(r) (r & 0x1c)
203 #endif
204
205 #define UA2005_HTRAP_MASK 0xff
206 #define V8_TRAP_MASK 0x7f
207
208 #define IS_IMM (insn & (1<<13))
209
210 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
211 {
212 #if defined(TARGET_SPARC64)
213 int bit = (rd < 32) ? 1 : 2;
214 /* If we know we've already set this bit within the TB,
215 we can avoid setting it again. */
216 if (!(dc->fprs_dirty & bit)) {
217 dc->fprs_dirty |= bit;
218 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
219 }
220 #endif
221 }
222
223 /* floating point registers moves */
224 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
225 {
226 TCGv_i32 ret = tcg_temp_new_i32();
227 if (src & 1) {
228 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
229 } else {
230 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
231 }
232 return ret;
233 }
234
235 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
236 {
237 TCGv_i64 t = tcg_temp_new_i64();
238
239 tcg_gen_extu_i32_i64(t, v);
240 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
241 (dst & 1 ? 0 : 32), 32);
242 gen_update_fprs_dirty(dc, dst);
243 }
244
245 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
246 {
247 src = DFPREG(src);
248 return cpu_fpr[src / 2];
249 }
250
251 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
252 {
253 dst = DFPREG(dst);
254 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
255 gen_update_fprs_dirty(dc, dst);
256 }
257
258 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
259 {
260 return cpu_fpr[DFPREG(dst) / 2];
261 }
262
263 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
264 {
265 TCGv_i128 ret = tcg_temp_new_i128();
266
267 src = QFPREG(src);
268 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
269 return ret;
270 }
271
272 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
273 {
274 dst = DFPREG(dst);
275 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
276 gen_update_fprs_dirty(dc, dst);
277 }
278
279 static void gen_op_load_fpr_QT0(unsigned int src)
280 {
281 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
282 offsetof(CPU_QuadU, ll.upper));
283 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
284 offsetof(CPU_QuadU, ll.lower));
285 }
286
287 static void gen_op_load_fpr_QT1(unsigned int src)
288 {
289 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
290 offsetof(CPU_QuadU, ll.upper));
291 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
292 offsetof(CPU_QuadU, ll.lower));
293 }
294
295 static void gen_op_store_QT0_fpr(unsigned int dst)
296 {
297 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
298 offsetof(CPU_QuadU, ll.upper));
299 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
300 offsetof(CPU_QuadU, ll.lower));
301 }
302
303 /* moves */
304 #ifdef CONFIG_USER_ONLY
305 #define supervisor(dc) 0
306 #define hypervisor(dc) 0
307 #else
308 #ifdef TARGET_SPARC64
309 #define hypervisor(dc) (dc->hypervisor)
310 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
311 #else
312 #define supervisor(dc) (dc->supervisor)
313 #define hypervisor(dc) 0
314 #endif
315 #endif
316
317 #if !defined(TARGET_SPARC64)
318 # define AM_CHECK(dc) false
319 #elif defined(TARGET_ABI32)
320 # define AM_CHECK(dc) true
321 #elif defined(CONFIG_USER_ONLY)
322 # define AM_CHECK(dc) false
323 #else
324 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
325 #endif
326
327 static void gen_address_mask(DisasContext *dc, TCGv addr)
328 {
329 if (AM_CHECK(dc)) {
330 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
331 }
332 }
333
334 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
335 {
336 return AM_CHECK(dc) ? (uint32_t)addr : addr;
337 }
338
339 static TCGv gen_load_gpr(DisasContext *dc, int reg)
340 {
341 if (reg > 0) {
342 assert(reg < 32);
343 return cpu_regs[reg];
344 } else {
345 TCGv t = tcg_temp_new();
346 tcg_gen_movi_tl(t, 0);
347 return t;
348 }
349 }
350
351 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
352 {
353 if (reg > 0) {
354 assert(reg < 32);
355 tcg_gen_mov_tl(cpu_regs[reg], v);
356 }
357 }
358
359 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
360 {
361 if (reg > 0) {
362 assert(reg < 32);
363 return cpu_regs[reg];
364 } else {
365 return tcg_temp_new();
366 }
367 }
368
369 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
370 {
371 return translator_use_goto_tb(&s->base, pc) &&
372 translator_use_goto_tb(&s->base, npc);
373 }
374
375 static void gen_goto_tb(DisasContext *s, int tb_num,
376 target_ulong pc, target_ulong npc)
377 {
378 if (use_goto_tb(s, pc, npc)) {
379 /* jump to same page: we can use a direct jump */
380 tcg_gen_goto_tb(tb_num);
381 tcg_gen_movi_tl(cpu_pc, pc);
382 tcg_gen_movi_tl(cpu_npc, npc);
383 tcg_gen_exit_tb(s->base.tb, tb_num);
384 } else {
385 /* jump to another page: we can use an indirect jump */
386 tcg_gen_movi_tl(cpu_pc, pc);
387 tcg_gen_movi_tl(cpu_npc, npc);
388 tcg_gen_lookup_and_goto_ptr();
389 }
390 }
391
392 static TCGv gen_carry32(void)
393 {
394 if (TARGET_LONG_BITS == 64) {
395 TCGv t = tcg_temp_new();
396 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
397 return t;
398 }
399 return cpu_icc_C;
400 }
401
402 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
403 {
404 TCGv z = tcg_constant_tl(0);
405
406 if (cin) {
407 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
408 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
409 } else {
410 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
411 }
412 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
413 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
414 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
415 if (TARGET_LONG_BITS == 64) {
416 /*
417 * Carry-in to bit 32 is result ^ src1 ^ src2.
418 * We already have the src xor term in Z, from computation of V.
419 */
420 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
421 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
422 }
423 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
424 tcg_gen_mov_tl(dst, cpu_cc_N);
425 }
426
427 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
428 {
429 gen_op_addcc_int(dst, src1, src2, NULL);
430 }
431
432 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
433 {
434 TCGv t = tcg_temp_new();
435
436 /* Save the tag bits around modification of dst. */
437 tcg_gen_or_tl(t, src1, src2);
438
439 gen_op_addcc(dst, src1, src2);
440
441 /* Incorprate tag bits into icc.V */
442 tcg_gen_andi_tl(t, t, 3);
443 tcg_gen_neg_tl(t, t);
444 tcg_gen_ext32u_tl(t, t);
445 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
446 }
447
448 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
449 {
450 tcg_gen_add_tl(dst, src1, src2);
451 tcg_gen_add_tl(dst, dst, gen_carry32());
452 }
453
454 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
455 {
456 gen_op_addcc_int(dst, src1, src2, gen_carry32());
457 }
458
459 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
460 {
461 TCGv z = tcg_constant_tl(0);
462
463 if (cin) {
464 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
465 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
466 } else {
467 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
468 }
469 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
470 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
471 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
472 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
473 #ifdef TARGET_SPARC64
474 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
475 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
476 #endif
477 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
478 tcg_gen_mov_tl(dst, cpu_cc_N);
479 }
480
481 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
482 {
483 gen_op_subcc_int(dst, src1, src2, NULL);
484 }
485
486 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
487 {
488 TCGv t = tcg_temp_new();
489
490 /* Save the tag bits around modification of dst. */
491 tcg_gen_or_tl(t, src1, src2);
492
493 gen_op_subcc(dst, src1, src2);
494
495 /* Incorprate tag bits into icc.V */
496 tcg_gen_andi_tl(t, t, 3);
497 tcg_gen_neg_tl(t, t);
498 tcg_gen_ext32u_tl(t, t);
499 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
500 }
501
502 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
503 {
504 tcg_gen_sub_tl(dst, src1, src2);
505 tcg_gen_sub_tl(dst, dst, gen_carry32());
506 }
507
508 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
509 {
510 gen_op_subcc_int(dst, src1, src2, gen_carry32());
511 }
512
513 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
514 {
515 TCGv zero = tcg_constant_tl(0);
516 TCGv t_src1 = tcg_temp_new();
517 TCGv t_src2 = tcg_temp_new();
518 TCGv t0 = tcg_temp_new();
519
520 tcg_gen_ext32u_tl(t_src1, src1);
521 tcg_gen_ext32u_tl(t_src2, src2);
522
523 /*
524 * if (!(env->y & 1))
525 * src2 = 0;
526 */
527 tcg_gen_andi_tl(t0, cpu_y, 0x1);
528 tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
529
530 /*
531 * b2 = src1 & 1;
532 * y = (b2 << 31) | (y >> 1);
533 */
534 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
535 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
536
537 // b1 = N ^ V;
538 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
539
540 /*
541 * src1 = (b1 << 31) | (src1 >> 1)
542 */
543 tcg_gen_andi_tl(t0, t0, 1u << 31);
544 tcg_gen_shri_tl(t_src1, t_src1, 1);
545 tcg_gen_or_tl(t_src1, t_src1, t0);
546
547 gen_op_addcc(dst, t_src1, t_src2);
548 }
549
550 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
551 {
552 #if TARGET_LONG_BITS == 32
553 if (sign_ext) {
554 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
555 } else {
556 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
557 }
558 #else
559 TCGv t0 = tcg_temp_new_i64();
560 TCGv t1 = tcg_temp_new_i64();
561
562 if (sign_ext) {
563 tcg_gen_ext32s_i64(t0, src1);
564 tcg_gen_ext32s_i64(t1, src2);
565 } else {
566 tcg_gen_ext32u_i64(t0, src1);
567 tcg_gen_ext32u_i64(t1, src2);
568 }
569
570 tcg_gen_mul_i64(dst, t0, t1);
571 tcg_gen_shri_i64(cpu_y, dst, 32);
572 #endif
573 }
574
575 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
576 {
577 /* zero-extend truncated operands before multiplication */
578 gen_op_multiply(dst, src1, src2, 0);
579 }
580
581 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
582 {
583 /* sign-extend truncated operands before multiplication */
584 gen_op_multiply(dst, src1, src2, 1);
585 }
586
587 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
588 {
589 #ifdef TARGET_SPARC64
590 gen_helper_sdiv(dst, tcg_env, src1, src2);
591 tcg_gen_ext32s_tl(dst, dst);
592 #else
593 TCGv_i64 t64 = tcg_temp_new_i64();
594 gen_helper_sdiv(t64, tcg_env, src1, src2);
595 tcg_gen_trunc_i64_tl(dst, t64);
596 #endif
597 }
598
599 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
600 {
601 TCGv_i64 t64;
602
603 #ifdef TARGET_SPARC64
604 t64 = cpu_cc_V;
605 #else
606 t64 = tcg_temp_new_i64();
607 #endif
608
609 gen_helper_udiv(t64, tcg_env, src1, src2);
610
611 #ifdef TARGET_SPARC64
612 tcg_gen_ext32u_tl(cpu_cc_N, t64);
613 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
614 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
615 tcg_gen_movi_tl(cpu_icc_C, 0);
616 #else
617 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
618 #endif
619 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
620 tcg_gen_movi_tl(cpu_cc_C, 0);
621 tcg_gen_mov_tl(dst, cpu_cc_N);
622 }
623
624 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
625 {
626 TCGv_i64 t64;
627
628 #ifdef TARGET_SPARC64
629 t64 = cpu_cc_V;
630 #else
631 t64 = tcg_temp_new_i64();
632 #endif
633
634 gen_helper_sdiv(t64, tcg_env, src1, src2);
635
636 #ifdef TARGET_SPARC64
637 tcg_gen_ext32s_tl(cpu_cc_N, t64);
638 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
639 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
640 tcg_gen_movi_tl(cpu_icc_C, 0);
641 #else
642 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
643 #endif
644 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
645 tcg_gen_movi_tl(cpu_cc_C, 0);
646 tcg_gen_mov_tl(dst, cpu_cc_N);
647 }
648
649 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
650 {
651 gen_helper_taddcctv(dst, tcg_env, src1, src2);
652 }
653
654 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
655 {
656 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
657 }
658
659 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
660 {
661 tcg_gen_ctpop_tl(dst, src2);
662 }
663
664 #ifndef TARGET_SPARC64
665 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
666 {
667 g_assert_not_reached();
668 }
669 #endif
670
671 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
672 {
673 gen_helper_array8(dst, src1, src2);
674 tcg_gen_shli_tl(dst, dst, 1);
675 }
676
677 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
678 {
679 gen_helper_array8(dst, src1, src2);
680 tcg_gen_shli_tl(dst, dst, 2);
681 }
682
683 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
684 {
685 #ifdef TARGET_SPARC64
686 gen_helper_fpack16(dst, cpu_gsr, src);
687 #else
688 g_assert_not_reached();
689 #endif
690 }
691
692 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
693 {
694 #ifdef TARGET_SPARC64
695 gen_helper_fpackfix(dst, cpu_gsr, src);
696 #else
697 g_assert_not_reached();
698 #endif
699 }
700
701 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
702 {
703 #ifdef TARGET_SPARC64
704 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
705 #else
706 g_assert_not_reached();
707 #endif
708 }
709
710 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
711 {
712 #ifdef TARGET_SPARC64
713 TCGv t1, t2, shift;
714
715 t1 = tcg_temp_new();
716 t2 = tcg_temp_new();
717 shift = tcg_temp_new();
718
719 tcg_gen_andi_tl(shift, cpu_gsr, 7);
720 tcg_gen_shli_tl(shift, shift, 3);
721 tcg_gen_shl_tl(t1, s1, shift);
722
723 /*
724 * A shift of 64 does not produce 0 in TCG. Divide this into a
725 * shift of (up to 63) followed by a constant shift of 1.
726 */
727 tcg_gen_xori_tl(shift, shift, 63);
728 tcg_gen_shr_tl(t2, s2, shift);
729 tcg_gen_shri_tl(t2, t2, 1);
730
731 tcg_gen_or_tl(dst, t1, t2);
732 #else
733 g_assert_not_reached();
734 #endif
735 }
736
737 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
738 {
739 #ifdef TARGET_SPARC64
740 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
741 #else
742 g_assert_not_reached();
743 #endif
744 }
745
746 // 1
747 static void gen_op_eval_ba(TCGv dst)
748 {
749 tcg_gen_movi_tl(dst, 1);
750 }
751
752 // 0
753 static void gen_op_eval_bn(TCGv dst)
754 {
755 tcg_gen_movi_tl(dst, 0);
756 }
757
758 /*
759 FPSR bit field FCC1 | FCC0:
760 0 =
761 1 <
762 2 >
763 3 unordered
764 */
765 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
766 unsigned int fcc_offset)
767 {
768 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
769 tcg_gen_andi_tl(reg, reg, 0x1);
770 }
771
772 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
773 {
774 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
775 tcg_gen_andi_tl(reg, reg, 0x1);
776 }
777
778 // !0: FCC0 | FCC1
779 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
780 {
781 TCGv t0 = tcg_temp_new();
782 gen_mov_reg_FCC0(dst, src, fcc_offset);
783 gen_mov_reg_FCC1(t0, src, fcc_offset);
784 tcg_gen_or_tl(dst, dst, t0);
785 }
786
787 // 1 or 2: FCC0 ^ FCC1
788 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
789 {
790 TCGv t0 = tcg_temp_new();
791 gen_mov_reg_FCC0(dst, src, fcc_offset);
792 gen_mov_reg_FCC1(t0, src, fcc_offset);
793 tcg_gen_xor_tl(dst, dst, t0);
794 }
795
796 // 1 or 3: FCC0
797 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
798 {
799 gen_mov_reg_FCC0(dst, src, fcc_offset);
800 }
801
802 // 1: FCC0 & !FCC1
803 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
804 {
805 TCGv t0 = tcg_temp_new();
806 gen_mov_reg_FCC0(dst, src, fcc_offset);
807 gen_mov_reg_FCC1(t0, src, fcc_offset);
808 tcg_gen_andc_tl(dst, dst, t0);
809 }
810
811 // 2 or 3: FCC1
812 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
813 {
814 gen_mov_reg_FCC1(dst, src, fcc_offset);
815 }
816
817 // 2: !FCC0 & FCC1
818 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
819 {
820 TCGv t0 = tcg_temp_new();
821 gen_mov_reg_FCC0(dst, src, fcc_offset);
822 gen_mov_reg_FCC1(t0, src, fcc_offset);
823 tcg_gen_andc_tl(dst, t0, dst);
824 }
825
826 // 3: FCC0 & FCC1
827 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
828 {
829 TCGv t0 = tcg_temp_new();
830 gen_mov_reg_FCC0(dst, src, fcc_offset);
831 gen_mov_reg_FCC1(t0, src, fcc_offset);
832 tcg_gen_and_tl(dst, dst, t0);
833 }
834
835 // 0: !(FCC0 | FCC1)
836 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
837 {
838 TCGv t0 = tcg_temp_new();
839 gen_mov_reg_FCC0(dst, src, fcc_offset);
840 gen_mov_reg_FCC1(t0, src, fcc_offset);
841 tcg_gen_or_tl(dst, dst, t0);
842 tcg_gen_xori_tl(dst, dst, 0x1);
843 }
844
845 // 0 or 3: !(FCC0 ^ FCC1)
846 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
847 {
848 TCGv t0 = tcg_temp_new();
849 gen_mov_reg_FCC0(dst, src, fcc_offset);
850 gen_mov_reg_FCC1(t0, src, fcc_offset);
851 tcg_gen_xor_tl(dst, dst, t0);
852 tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854
855 // 0 or 2: !FCC0
856 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 tcg_gen_xori_tl(dst, dst, 0x1);
860 }
861
862 // !1: !(FCC0 & !FCC1)
863 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
864 {
865 TCGv t0 = tcg_temp_new();
866 gen_mov_reg_FCC0(dst, src, fcc_offset);
867 gen_mov_reg_FCC1(t0, src, fcc_offset);
868 tcg_gen_andc_tl(dst, dst, t0);
869 tcg_gen_xori_tl(dst, dst, 0x1);
870 }
871
872 // 0 or 1: !FCC1
873 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
874 {
875 gen_mov_reg_FCC1(dst, src, fcc_offset);
876 tcg_gen_xori_tl(dst, dst, 0x1);
877 }
878
879 // !2: !(!FCC0 & FCC1)
880 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
881 {
882 TCGv t0 = tcg_temp_new();
883 gen_mov_reg_FCC0(dst, src, fcc_offset);
884 gen_mov_reg_FCC1(t0, src, fcc_offset);
885 tcg_gen_andc_tl(dst, t0, dst);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 }
888
889 // !3: !(FCC0 & FCC1)
890 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
891 {
892 TCGv t0 = tcg_temp_new();
893 gen_mov_reg_FCC0(dst, src, fcc_offset);
894 gen_mov_reg_FCC1(t0, src, fcc_offset);
895 tcg_gen_and_tl(dst, dst, t0);
896 tcg_gen_xori_tl(dst, dst, 0x1);
897 }
898
899 static void finishing_insn(DisasContext *dc)
900 {
901 /*
902 * From here, there is no future path through an unwinding exception.
903 * If the current insn cannot raise an exception, the computation of
904 * cpu_cond may be able to be elided.
905 */
906 if (dc->cpu_cond_live) {
907 tcg_gen_discard_tl(cpu_cond);
908 dc->cpu_cond_live = false;
909 }
910 }
911
912 static void gen_generic_branch(DisasContext *dc)
913 {
914 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
915 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
916 TCGv c2 = tcg_constant_tl(dc->jump.c2);
917
918 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
919 }
920
921 /* call this function before using the condition register as it may
922 have been set for a jump */
923 static void flush_cond(DisasContext *dc)
924 {
925 if (dc->npc == JUMP_PC) {
926 gen_generic_branch(dc);
927 dc->npc = DYNAMIC_PC_LOOKUP;
928 }
929 }
930
931 static void save_npc(DisasContext *dc)
932 {
933 if (dc->npc & 3) {
934 switch (dc->npc) {
935 case JUMP_PC:
936 gen_generic_branch(dc);
937 dc->npc = DYNAMIC_PC_LOOKUP;
938 break;
939 case DYNAMIC_PC:
940 case DYNAMIC_PC_LOOKUP:
941 break;
942 default:
943 g_assert_not_reached();
944 }
945 } else {
946 tcg_gen_movi_tl(cpu_npc, dc->npc);
947 }
948 }
949
950 static void save_state(DisasContext *dc)
951 {
952 tcg_gen_movi_tl(cpu_pc, dc->pc);
953 save_npc(dc);
954 }
955
956 static void gen_exception(DisasContext *dc, int which)
957 {
958 finishing_insn(dc);
959 save_state(dc);
960 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
961 dc->base.is_jmp = DISAS_NORETURN;
962 }
963
964 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
965 {
966 DisasDelayException *e = g_new0(DisasDelayException, 1);
967
968 e->next = dc->delay_excp_list;
969 dc->delay_excp_list = e;
970
971 e->lab = gen_new_label();
972 e->excp = excp;
973 e->pc = dc->pc;
974 /* Caller must have used flush_cond before branch. */
975 assert(e->npc != JUMP_PC);
976 e->npc = dc->npc;
977
978 return e->lab;
979 }
980
981 static TCGLabel *delay_exception(DisasContext *dc, int excp)
982 {
983 return delay_exceptionv(dc, tcg_constant_i32(excp));
984 }
985
986 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
987 {
988 TCGv t = tcg_temp_new();
989 TCGLabel *lab;
990
991 tcg_gen_andi_tl(t, addr, mask);
992
993 flush_cond(dc);
994 lab = delay_exception(dc, TT_UNALIGNED);
995 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
996 }
997
998 static void gen_mov_pc_npc(DisasContext *dc)
999 {
1000 finishing_insn(dc);
1001
1002 if (dc->npc & 3) {
1003 switch (dc->npc) {
1004 case JUMP_PC:
1005 gen_generic_branch(dc);
1006 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1007 dc->pc = DYNAMIC_PC_LOOKUP;
1008 break;
1009 case DYNAMIC_PC:
1010 case DYNAMIC_PC_LOOKUP:
1011 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1012 dc->pc = dc->npc;
1013 break;
1014 default:
1015 g_assert_not_reached();
1016 }
1017 } else {
1018 dc->pc = dc->npc;
1019 }
1020 }
1021
1022 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1023 DisasContext *dc)
1024 {
1025 TCGv t1;
1026
1027 cmp->c1 = t1 = tcg_temp_new();
1028 cmp->c2 = 0;
1029
1030 switch (cond & 7) {
1031 case 0x0: /* never */
1032 cmp->cond = TCG_COND_NEVER;
1033 cmp->c1 = tcg_constant_tl(0);
1034 break;
1035
1036 case 0x1: /* eq: Z */
1037 cmp->cond = TCG_COND_EQ;
1038 if (TARGET_LONG_BITS == 32 || xcc) {
1039 tcg_gen_mov_tl(t1, cpu_cc_Z);
1040 } else {
1041 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1042 }
1043 break;
1044
1045 case 0x2: /* le: Z | (N ^ V) */
1046 /*
1047 * Simplify:
1048 * cc_Z || (N ^ V) < 0 NE
1049 * cc_Z && !((N ^ V) < 0) EQ
1050 * cc_Z & ~((N ^ V) >> TLB) EQ
1051 */
1052 cmp->cond = TCG_COND_EQ;
1053 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1054 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1055 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1056 if (TARGET_LONG_BITS == 64 && !xcc) {
1057 tcg_gen_ext32u_tl(t1, t1);
1058 }
1059 break;
1060
1061 case 0x3: /* lt: N ^ V */
1062 cmp->cond = TCG_COND_LT;
1063 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1064 if (TARGET_LONG_BITS == 64 && !xcc) {
1065 tcg_gen_ext32s_tl(t1, t1);
1066 }
1067 break;
1068
1069 case 0x4: /* leu: Z | C */
1070 /*
1071 * Simplify:
1072 * cc_Z == 0 || cc_C != 0 NE
1073 * cc_Z != 0 && cc_C == 0 EQ
1074 * cc_Z & (cc_C ? 0 : -1) EQ
1075 * cc_Z & (cc_C - 1) EQ
1076 */
1077 cmp->cond = TCG_COND_EQ;
1078 if (TARGET_LONG_BITS == 32 || xcc) {
1079 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1080 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1081 } else {
1082 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1083 tcg_gen_subi_tl(t1, t1, 1);
1084 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1085 tcg_gen_ext32u_tl(t1, t1);
1086 }
1087 break;
1088
1089 case 0x5: /* ltu: C */
1090 cmp->cond = TCG_COND_NE;
1091 if (TARGET_LONG_BITS == 32 || xcc) {
1092 tcg_gen_mov_tl(t1, cpu_cc_C);
1093 } else {
1094 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1095 }
1096 break;
1097
1098 case 0x6: /* neg: N */
1099 cmp->cond = TCG_COND_LT;
1100 if (TARGET_LONG_BITS == 32 || xcc) {
1101 tcg_gen_mov_tl(t1, cpu_cc_N);
1102 } else {
1103 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1104 }
1105 break;
1106
1107 case 0x7: /* vs: V */
1108 cmp->cond = TCG_COND_LT;
1109 if (TARGET_LONG_BITS == 32 || xcc) {
1110 tcg_gen_mov_tl(t1, cpu_cc_V);
1111 } else {
1112 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1113 }
1114 break;
1115 }
1116 if (cond & 8) {
1117 cmp->cond = tcg_invert_cond(cmp->cond);
1118 }
1119 }
1120
1121 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1122 {
1123 unsigned int offset;
1124 TCGv r_dst;
1125
1126 /* For now we still generate a straight boolean result. */
1127 cmp->cond = TCG_COND_NE;
1128 cmp->c1 = r_dst = tcg_temp_new();
1129 cmp->c2 = 0;
1130
1131 switch (cc) {
1132 default:
1133 case 0x0:
1134 offset = 0;
1135 break;
1136 case 0x1:
1137 offset = 32 - 10;
1138 break;
1139 case 0x2:
1140 offset = 34 - 10;
1141 break;
1142 case 0x3:
1143 offset = 36 - 10;
1144 break;
1145 }
1146
1147 switch (cond) {
1148 case 0x0:
1149 gen_op_eval_bn(r_dst);
1150 break;
1151 case 0x1:
1152 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1153 break;
1154 case 0x2:
1155 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1156 break;
1157 case 0x3:
1158 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1159 break;
1160 case 0x4:
1161 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1162 break;
1163 case 0x5:
1164 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1165 break;
1166 case 0x6:
1167 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1168 break;
1169 case 0x7:
1170 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1171 break;
1172 case 0x8:
1173 gen_op_eval_ba(r_dst);
1174 break;
1175 case 0x9:
1176 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1177 break;
1178 case 0xa:
1179 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1180 break;
1181 case 0xb:
1182 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1183 break;
1184 case 0xc:
1185 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1186 break;
1187 case 0xd:
1188 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1189 break;
1190 case 0xe:
1191 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1192 break;
1193 case 0xf:
1194 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1195 break;
1196 }
1197 }
1198
1199 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1200 {
1201 static const TCGCond cond_reg[4] = {
1202 TCG_COND_NEVER, /* reserved */
1203 TCG_COND_EQ,
1204 TCG_COND_LE,
1205 TCG_COND_LT,
1206 };
1207 TCGCond tcond;
1208
1209 if ((cond & 3) == 0) {
1210 return false;
1211 }
1212 tcond = cond_reg[cond & 3];
1213 if (cond & 4) {
1214 tcond = tcg_invert_cond(tcond);
1215 }
1216
1217 cmp->cond = tcond;
1218 cmp->c1 = tcg_temp_new();
1219 cmp->c2 = 0;
1220 tcg_gen_mov_tl(cmp->c1, r_src);
1221 return true;
1222 }
1223
1224 static void gen_op_clear_ieee_excp_and_FTT(void)
1225 {
1226 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1227 }
1228
1229 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1230 {
1231 gen_op_clear_ieee_excp_and_FTT();
1232 tcg_gen_mov_i32(dst, src);
1233 }
1234
1235 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1236 {
1237 gen_op_clear_ieee_excp_and_FTT();
1238 tcg_gen_xori_i32(dst, src, 1u << 31);
1239 }
1240
1241 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1242 {
1243 gen_op_clear_ieee_excp_and_FTT();
1244 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1245 }
1246
1247 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1248 {
1249 gen_op_clear_ieee_excp_and_FTT();
1250 tcg_gen_mov_i64(dst, src);
1251 }
1252
1253 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1254 {
1255 gen_op_clear_ieee_excp_and_FTT();
1256 tcg_gen_xori_i64(dst, src, 1ull << 63);
1257 }
1258
1259 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1260 {
1261 gen_op_clear_ieee_excp_and_FTT();
1262 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1263 }
1264
1265 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1266 {
1267 TCGv_i64 l = tcg_temp_new_i64();
1268 TCGv_i64 h = tcg_temp_new_i64();
1269
1270 tcg_gen_extr_i128_i64(l, h, src);
1271 tcg_gen_xori_i64(h, h, 1ull << 63);
1272 tcg_gen_concat_i64_i128(dst, l, h);
1273 }
1274
1275 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1276 {
1277 TCGv_i64 l = tcg_temp_new_i64();
1278 TCGv_i64 h = tcg_temp_new_i64();
1279
1280 tcg_gen_extr_i128_i64(l, h, src);
1281 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1282 tcg_gen_concat_i64_i128(dst, l, h);
1283 }
1284
1285 #ifdef TARGET_SPARC64
1286 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1287 {
1288 switch (fccno) {
1289 case 0:
1290 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1291 break;
1292 case 1:
1293 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1294 break;
1295 case 2:
1296 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1297 break;
1298 case 3:
1299 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1300 break;
1301 }
1302 }
1303
1304 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1305 {
1306 switch (fccno) {
1307 case 0:
1308 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1309 break;
1310 case 1:
1311 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1312 break;
1313 case 2:
1314 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1315 break;
1316 case 3:
1317 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1318 break;
1319 }
1320 }
1321
1322 static void gen_op_fcmpq(int fccno)
1323 {
1324 switch (fccno) {
1325 case 0:
1326 gen_helper_fcmpq(cpu_fsr, tcg_env);
1327 break;
1328 case 1:
1329 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1330 break;
1331 case 2:
1332 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1333 break;
1334 case 3:
1335 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1336 break;
1337 }
1338 }
1339
1340 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1341 {
1342 switch (fccno) {
1343 case 0:
1344 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1345 break;
1346 case 1:
1347 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1348 break;
1349 case 2:
1350 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1351 break;
1352 case 3:
1353 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1354 break;
1355 }
1356 }
1357
1358 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1359 {
1360 switch (fccno) {
1361 case 0:
1362 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1363 break;
1364 case 1:
1365 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1366 break;
1367 case 2:
1368 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1369 break;
1370 case 3:
1371 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1372 break;
1373 }
1374 }
1375
1376 static void gen_op_fcmpeq(int fccno)
1377 {
1378 switch (fccno) {
1379 case 0:
1380 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1381 break;
1382 case 1:
1383 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1384 break;
1385 case 2:
1386 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1387 break;
1388 case 3:
1389 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1390 break;
1391 }
1392 }
1393
1394 #else
1395
1396 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1397 {
1398 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1399 }
1400
1401 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1402 {
1403 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1404 }
1405
1406 static void gen_op_fcmpq(int fccno)
1407 {
1408 gen_helper_fcmpq(cpu_fsr, tcg_env);
1409 }
1410
1411 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1412 {
1413 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1414 }
1415
1416 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1417 {
1418 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1419 }
1420
1421 static void gen_op_fcmpeq(int fccno)
1422 {
1423 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1424 }
1425 #endif
1426
1427 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1428 {
1429 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1430 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1431 gen_exception(dc, TT_FP_EXCP);
1432 }
1433
1434 static int gen_trap_ifnofpu(DisasContext *dc)
1435 {
1436 #if !defined(CONFIG_USER_ONLY)
1437 if (!dc->fpu_enabled) {
1438 gen_exception(dc, TT_NFPU_INSN);
1439 return 1;
1440 }
1441 #endif
1442 return 0;
1443 }
1444
1445 /* asi moves */
1446 typedef enum {
1447 GET_ASI_HELPER,
1448 GET_ASI_EXCP,
1449 GET_ASI_DIRECT,
1450 GET_ASI_DTWINX,
1451 GET_ASI_BLOCK,
1452 GET_ASI_SHORT,
1453 GET_ASI_BCOPY,
1454 GET_ASI_BFILL,
1455 } ASIType;
1456
1457 typedef struct {
1458 ASIType type;
1459 int asi;
1460 int mem_idx;
1461 MemOp memop;
1462 } DisasASI;
1463
1464 /*
1465 * Build DisasASI.
1466 * For asi == -1, treat as non-asi.
1467 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1468 */
1469 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1470 {
1471 ASIType type = GET_ASI_HELPER;
1472 int mem_idx = dc->mem_idx;
1473
1474 if (asi == -1) {
1475 /* Artificial "non-asi" case. */
1476 type = GET_ASI_DIRECT;
1477 goto done;
1478 }
1479
1480 #ifndef TARGET_SPARC64
1481 /* Before v9, all asis are immediate and privileged. */
1482 if (asi < 0) {
1483 gen_exception(dc, TT_ILL_INSN);
1484 type = GET_ASI_EXCP;
1485 } else if (supervisor(dc)
1486 /* Note that LEON accepts ASI_USERDATA in user mode, for
1487 use with CASA. Also note that previous versions of
1488 QEMU allowed (and old versions of gcc emitted) ASI_P
1489 for LEON, which is incorrect. */
1490 || (asi == ASI_USERDATA
1491 && (dc->def->features & CPU_FEATURE_CASA))) {
1492 switch (asi) {
1493 case ASI_USERDATA: /* User data access */
1494 mem_idx = MMU_USER_IDX;
1495 type = GET_ASI_DIRECT;
1496 break;
1497 case ASI_KERNELDATA: /* Supervisor data access */
1498 mem_idx = MMU_KERNEL_IDX;
1499 type = GET_ASI_DIRECT;
1500 break;
1501 case ASI_M_BYPASS: /* MMU passthrough */
1502 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1503 mem_idx = MMU_PHYS_IDX;
1504 type = GET_ASI_DIRECT;
1505 break;
1506 case ASI_M_BCOPY: /* Block copy, sta access */
1507 mem_idx = MMU_KERNEL_IDX;
1508 type = GET_ASI_BCOPY;
1509 break;
1510 case ASI_M_BFILL: /* Block fill, stda access */
1511 mem_idx = MMU_KERNEL_IDX;
1512 type = GET_ASI_BFILL;
1513 break;
1514 }
1515
1516 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1517 * permissions check in get_physical_address(..).
1518 */
1519 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1520 } else {
1521 gen_exception(dc, TT_PRIV_INSN);
1522 type = GET_ASI_EXCP;
1523 }
1524 #else
1525 if (asi < 0) {
1526 asi = dc->asi;
1527 }
1528 /* With v9, all asis below 0x80 are privileged. */
1529 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1530 down that bit into DisasContext. For the moment that's ok,
1531 since the direct implementations below doesn't have any ASIs
1532 in the restricted [0x30, 0x7f] range, and the check will be
1533 done properly in the helper. */
1534 if (!supervisor(dc) && asi < 0x80) {
1535 gen_exception(dc, TT_PRIV_ACT);
1536 type = GET_ASI_EXCP;
1537 } else {
1538 switch (asi) {
1539 case ASI_REAL: /* Bypass */
1540 case ASI_REAL_IO: /* Bypass, non-cacheable */
1541 case ASI_REAL_L: /* Bypass LE */
1542 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1543 case ASI_TWINX_REAL: /* Real address, twinx */
1544 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1545 case ASI_QUAD_LDD_PHYS:
1546 case ASI_QUAD_LDD_PHYS_L:
1547 mem_idx = MMU_PHYS_IDX;
1548 break;
1549 case ASI_N: /* Nucleus */
1550 case ASI_NL: /* Nucleus LE */
1551 case ASI_TWINX_N:
1552 case ASI_TWINX_NL:
1553 case ASI_NUCLEUS_QUAD_LDD:
1554 case ASI_NUCLEUS_QUAD_LDD_L:
1555 if (hypervisor(dc)) {
1556 mem_idx = MMU_PHYS_IDX;
1557 } else {
1558 mem_idx = MMU_NUCLEUS_IDX;
1559 }
1560 break;
1561 case ASI_AIUP: /* As if user primary */
1562 case ASI_AIUPL: /* As if user primary LE */
1563 case ASI_TWINX_AIUP:
1564 case ASI_TWINX_AIUP_L:
1565 case ASI_BLK_AIUP_4V:
1566 case ASI_BLK_AIUP_L_4V:
1567 case ASI_BLK_AIUP:
1568 case ASI_BLK_AIUPL:
1569 mem_idx = MMU_USER_IDX;
1570 break;
1571 case ASI_AIUS: /* As if user secondary */
1572 case ASI_AIUSL: /* As if user secondary LE */
1573 case ASI_TWINX_AIUS:
1574 case ASI_TWINX_AIUS_L:
1575 case ASI_BLK_AIUS_4V:
1576 case ASI_BLK_AIUS_L_4V:
1577 case ASI_BLK_AIUS:
1578 case ASI_BLK_AIUSL:
1579 mem_idx = MMU_USER_SECONDARY_IDX;
1580 break;
1581 case ASI_S: /* Secondary */
1582 case ASI_SL: /* Secondary LE */
1583 case ASI_TWINX_S:
1584 case ASI_TWINX_SL:
1585 case ASI_BLK_COMMIT_S:
1586 case ASI_BLK_S:
1587 case ASI_BLK_SL:
1588 case ASI_FL8_S:
1589 case ASI_FL8_SL:
1590 case ASI_FL16_S:
1591 case ASI_FL16_SL:
1592 if (mem_idx == MMU_USER_IDX) {
1593 mem_idx = MMU_USER_SECONDARY_IDX;
1594 } else if (mem_idx == MMU_KERNEL_IDX) {
1595 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1596 }
1597 break;
1598 case ASI_P: /* Primary */
1599 case ASI_PL: /* Primary LE */
1600 case ASI_TWINX_P:
1601 case ASI_TWINX_PL:
1602 case ASI_BLK_COMMIT_P:
1603 case ASI_BLK_P:
1604 case ASI_BLK_PL:
1605 case ASI_FL8_P:
1606 case ASI_FL8_PL:
1607 case ASI_FL16_P:
1608 case ASI_FL16_PL:
1609 break;
1610 }
1611 switch (asi) {
1612 case ASI_REAL:
1613 case ASI_REAL_IO:
1614 case ASI_REAL_L:
1615 case ASI_REAL_IO_L:
1616 case ASI_N:
1617 case ASI_NL:
1618 case ASI_AIUP:
1619 case ASI_AIUPL:
1620 case ASI_AIUS:
1621 case ASI_AIUSL:
1622 case ASI_S:
1623 case ASI_SL:
1624 case ASI_P:
1625 case ASI_PL:
1626 type = GET_ASI_DIRECT;
1627 break;
1628 case ASI_TWINX_REAL:
1629 case ASI_TWINX_REAL_L:
1630 case ASI_TWINX_N:
1631 case ASI_TWINX_NL:
1632 case ASI_TWINX_AIUP:
1633 case ASI_TWINX_AIUP_L:
1634 case ASI_TWINX_AIUS:
1635 case ASI_TWINX_AIUS_L:
1636 case ASI_TWINX_P:
1637 case ASI_TWINX_PL:
1638 case ASI_TWINX_S:
1639 case ASI_TWINX_SL:
1640 case ASI_QUAD_LDD_PHYS:
1641 case ASI_QUAD_LDD_PHYS_L:
1642 case ASI_NUCLEUS_QUAD_LDD:
1643 case ASI_NUCLEUS_QUAD_LDD_L:
1644 type = GET_ASI_DTWINX;
1645 break;
1646 case ASI_BLK_COMMIT_P:
1647 case ASI_BLK_COMMIT_S:
1648 case ASI_BLK_AIUP_4V:
1649 case ASI_BLK_AIUP_L_4V:
1650 case ASI_BLK_AIUP:
1651 case ASI_BLK_AIUPL:
1652 case ASI_BLK_AIUS_4V:
1653 case ASI_BLK_AIUS_L_4V:
1654 case ASI_BLK_AIUS:
1655 case ASI_BLK_AIUSL:
1656 case ASI_BLK_S:
1657 case ASI_BLK_SL:
1658 case ASI_BLK_P:
1659 case ASI_BLK_PL:
1660 type = GET_ASI_BLOCK;
1661 break;
1662 case ASI_FL8_S:
1663 case ASI_FL8_SL:
1664 case ASI_FL8_P:
1665 case ASI_FL8_PL:
1666 memop = MO_UB;
1667 type = GET_ASI_SHORT;
1668 break;
1669 case ASI_FL16_S:
1670 case ASI_FL16_SL:
1671 case ASI_FL16_P:
1672 case ASI_FL16_PL:
1673 memop = MO_TEUW;
1674 type = GET_ASI_SHORT;
1675 break;
1676 }
1677 /* The little-endian asis all have bit 3 set. */
1678 if (asi & 8) {
1679 memop ^= MO_BSWAP;
1680 }
1681 }
1682 #endif
1683
1684 done:
1685 return (DisasASI){ type, asi, mem_idx, memop };
1686 }
1687
1688 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1689 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1690 TCGv_i32 asi, TCGv_i32 mop)
1691 {
1692 g_assert_not_reached();
1693 }
1694
1695 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1696 TCGv_i32 asi, TCGv_i32 mop)
1697 {
1698 g_assert_not_reached();
1699 }
1700 #endif
1701
1702 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1703 {
1704 switch (da->type) {
1705 case GET_ASI_EXCP:
1706 break;
1707 case GET_ASI_DTWINX: /* Reserved for ldda. */
1708 gen_exception(dc, TT_ILL_INSN);
1709 break;
1710 case GET_ASI_DIRECT:
1711 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1712 break;
1713 default:
1714 {
1715 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1716 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1717
1718 save_state(dc);
1719 #ifdef TARGET_SPARC64
1720 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1721 #else
1722 {
1723 TCGv_i64 t64 = tcg_temp_new_i64();
1724 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1725 tcg_gen_trunc_i64_tl(dst, t64);
1726 }
1727 #endif
1728 }
1729 break;
1730 }
1731 }
1732
1733 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1734 {
1735 switch (da->type) {
1736 case GET_ASI_EXCP:
1737 break;
1738
1739 case GET_ASI_DTWINX: /* Reserved for stda. */
1740 if (TARGET_LONG_BITS == 32) {
1741 gen_exception(dc, TT_ILL_INSN);
1742 break;
1743 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1744 /* Pre OpenSPARC CPUs don't have these */
1745 gen_exception(dc, TT_ILL_INSN);
1746 break;
1747 }
1748 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1749 /* fall through */
1750
1751 case GET_ASI_DIRECT:
1752 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1753 break;
1754
1755 case GET_ASI_BCOPY:
1756 assert(TARGET_LONG_BITS == 32);
1757 /*
1758 * Copy 32 bytes from the address in SRC to ADDR.
1759 *
1760 * From Ross RT625 hyperSPARC manual, section 4.6:
1761 * "Block Copy and Block Fill will work only on cache line boundaries."
1762 *
1763 * It does not specify if an unaliged address is truncated or trapped.
1764 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1765 * is obviously wrong. The only place I can see this used is in the
1766 * Linux kernel which begins with page alignment, advancing by 32,
1767 * so is always aligned. Assume truncation as the simpler option.
1768 *
1769 * Since the loads and stores are paired, allow the copy to happen
1770 * in the host endianness. The copy need not be atomic.
1771 */
1772 {
1773 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1774 TCGv saddr = tcg_temp_new();
1775 TCGv daddr = tcg_temp_new();
1776 TCGv_i128 tmp = tcg_temp_new_i128();
1777
1778 tcg_gen_andi_tl(saddr, src, -32);
1779 tcg_gen_andi_tl(daddr, addr, -32);
1780 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1781 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1782 tcg_gen_addi_tl(saddr, saddr, 16);
1783 tcg_gen_addi_tl(daddr, daddr, 16);
1784 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1785 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1786 }
1787 break;
1788
1789 default:
1790 {
1791 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1792 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1793
1794 save_state(dc);
1795 #ifdef TARGET_SPARC64
1796 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1797 #else
1798 {
1799 TCGv_i64 t64 = tcg_temp_new_i64();
1800 tcg_gen_extu_tl_i64(t64, src);
1801 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1802 }
1803 #endif
1804
1805 /* A write to a TLB register may alter page maps. End the TB. */
1806 dc->npc = DYNAMIC_PC;
1807 }
1808 break;
1809 }
1810 }
1811
1812 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1813 TCGv dst, TCGv src, TCGv addr)
1814 {
1815 switch (da->type) {
1816 case GET_ASI_EXCP:
1817 break;
1818 case GET_ASI_DIRECT:
1819 tcg_gen_atomic_xchg_tl(dst, addr, src,
1820 da->mem_idx, da->memop | MO_ALIGN);
1821 break;
1822 default:
1823 /* ??? Should be DAE_invalid_asi. */
1824 gen_exception(dc, TT_DATA_ACCESS);
1825 break;
1826 }
1827 }
1828
1829 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1830 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1831 {
1832 switch (da->type) {
1833 case GET_ASI_EXCP:
1834 return;
1835 case GET_ASI_DIRECT:
1836 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1837 da->mem_idx, da->memop | MO_ALIGN);
1838 break;
1839 default:
1840 /* ??? Should be DAE_invalid_asi. */
1841 gen_exception(dc, TT_DATA_ACCESS);
1842 break;
1843 }
1844 }
1845
1846 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1847 {
1848 switch (da->type) {
1849 case GET_ASI_EXCP:
1850 break;
1851 case GET_ASI_DIRECT:
1852 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1853 da->mem_idx, MO_UB);
1854 break;
1855 default:
1856 /* ??? In theory, this should be raise DAE_invalid_asi.
1857 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1858 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1859 gen_helper_exit_atomic(tcg_env);
1860 } else {
1861 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1862 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1863 TCGv_i64 s64, t64;
1864
1865 save_state(dc);
1866 t64 = tcg_temp_new_i64();
1867 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1868
1869 s64 = tcg_constant_i64(0xff);
1870 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1871
1872 tcg_gen_trunc_i64_tl(dst, t64);
1873
1874 /* End the TB. */
1875 dc->npc = DYNAMIC_PC;
1876 }
1877 break;
1878 }
1879 }
1880
1881 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1882 TCGv addr, int rd)
1883 {
1884 MemOp memop = da->memop;
1885 MemOp size = memop & MO_SIZE;
1886 TCGv_i32 d32;
1887 TCGv_i64 d64;
1888 TCGv addr_tmp;
1889
1890 /* TODO: Use 128-bit load/store below. */
1891 if (size == MO_128) {
1892 memop = (memop & ~MO_SIZE) | MO_64;
1893 }
1894
1895 switch (da->type) {
1896 case GET_ASI_EXCP:
1897 break;
1898
1899 case GET_ASI_DIRECT:
1900 memop |= MO_ALIGN_4;
1901 switch (size) {
1902 case MO_32:
1903 d32 = tcg_temp_new_i32();
1904 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1905 gen_store_fpr_F(dc, rd, d32);
1906 break;
1907
1908 case MO_64:
1909 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1910 break;
1911
1912 case MO_128:
1913 d64 = tcg_temp_new_i64();
1914 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1915 addr_tmp = tcg_temp_new();
1916 tcg_gen_addi_tl(addr_tmp, addr, 8);
1917 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1918 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1919 break;
1920 default:
1921 g_assert_not_reached();
1922 }
1923 break;
1924
1925 case GET_ASI_BLOCK:
1926 /* Valid for lddfa on aligned registers only. */
1927 if (orig_size == MO_64 && (rd & 7) == 0) {
1928 /* The first operation checks required alignment. */
1929 addr_tmp = tcg_temp_new();
1930 for (int i = 0; ; ++i) {
1931 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1932 memop | (i == 0 ? MO_ALIGN_64 : 0));
1933 if (i == 7) {
1934 break;
1935 }
1936 tcg_gen_addi_tl(addr_tmp, addr, 8);
1937 addr = addr_tmp;
1938 }
1939 } else {
1940 gen_exception(dc, TT_ILL_INSN);
1941 }
1942 break;
1943
1944 case GET_ASI_SHORT:
1945 /* Valid for lddfa only. */
1946 if (orig_size == MO_64) {
1947 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1948 memop | MO_ALIGN);
1949 } else {
1950 gen_exception(dc, TT_ILL_INSN);
1951 }
1952 break;
1953
1954 default:
1955 {
1956 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1957 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1958
1959 save_state(dc);
1960 /* According to the table in the UA2011 manual, the only
1961 other asis that are valid for ldfa/lddfa/ldqfa are
1962 the NO_FAULT asis. We still need a helper for these,
1963 but we can just use the integer asi helper for them. */
1964 switch (size) {
1965 case MO_32:
1966 d64 = tcg_temp_new_i64();
1967 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1968 d32 = tcg_temp_new_i32();
1969 tcg_gen_extrl_i64_i32(d32, d64);
1970 gen_store_fpr_F(dc, rd, d32);
1971 break;
1972 case MO_64:
1973 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1974 r_asi, r_mop);
1975 break;
1976 case MO_128:
1977 d64 = tcg_temp_new_i64();
1978 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1979 addr_tmp = tcg_temp_new();
1980 tcg_gen_addi_tl(addr_tmp, addr, 8);
1981 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1982 r_asi, r_mop);
1983 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1984 break;
1985 default:
1986 g_assert_not_reached();
1987 }
1988 }
1989 break;
1990 }
1991 }
1992
1993 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1994 TCGv addr, int rd)
1995 {
1996 MemOp memop = da->memop;
1997 MemOp size = memop & MO_SIZE;
1998 TCGv_i32 d32;
1999 TCGv addr_tmp;
2000
2001 /* TODO: Use 128-bit load/store below. */
2002 if (size == MO_128) {
2003 memop = (memop & ~MO_SIZE) | MO_64;
2004 }
2005
2006 switch (da->type) {
2007 case GET_ASI_EXCP:
2008 break;
2009
2010 case GET_ASI_DIRECT:
2011 memop |= MO_ALIGN_4;
2012 switch (size) {
2013 case MO_32:
2014 d32 = gen_load_fpr_F(dc, rd);
2015 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2016 break;
2017 case MO_64:
2018 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2019 memop | MO_ALIGN_4);
2020 break;
2021 case MO_128:
2022 /* Only 4-byte alignment required. However, it is legal for the
2023 cpu to signal the alignment fault, and the OS trap handler is
2024 required to fix it up. Requiring 16-byte alignment here avoids
2025 having to probe the second page before performing the first
2026 write. */
2027 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2028 memop | MO_ALIGN_16);
2029 addr_tmp = tcg_temp_new();
2030 tcg_gen_addi_tl(addr_tmp, addr, 8);
2031 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2032 break;
2033 default:
2034 g_assert_not_reached();
2035 }
2036 break;
2037
2038 case GET_ASI_BLOCK:
2039 /* Valid for stdfa on aligned registers only. */
2040 if (orig_size == MO_64 && (rd & 7) == 0) {
2041 /* The first operation checks required alignment. */
2042 addr_tmp = tcg_temp_new();
2043 for (int i = 0; ; ++i) {
2044 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2045 memop | (i == 0 ? MO_ALIGN_64 : 0));
2046 if (i == 7) {
2047 break;
2048 }
2049 tcg_gen_addi_tl(addr_tmp, addr, 8);
2050 addr = addr_tmp;
2051 }
2052 } else {
2053 gen_exception(dc, TT_ILL_INSN);
2054 }
2055 break;
2056
2057 case GET_ASI_SHORT:
2058 /* Valid for stdfa only. */
2059 if (orig_size == MO_64) {
2060 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2061 memop | MO_ALIGN);
2062 } else {
2063 gen_exception(dc, TT_ILL_INSN);
2064 }
2065 break;
2066
2067 default:
2068 /* According to the table in the UA2011 manual, the only
2069 other asis that are valid for ldfa/lddfa/ldqfa are
2070 the PST* asis, which aren't currently handled. */
2071 gen_exception(dc, TT_ILL_INSN);
2072 break;
2073 }
2074 }
2075
2076 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2077 {
2078 TCGv hi = gen_dest_gpr(dc, rd);
2079 TCGv lo = gen_dest_gpr(dc, rd + 1);
2080
2081 switch (da->type) {
2082 case GET_ASI_EXCP:
2083 return;
2084
2085 case GET_ASI_DTWINX:
2086 #ifdef TARGET_SPARC64
2087 {
2088 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2089 TCGv_i128 t = tcg_temp_new_i128();
2090
2091 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2092 /*
2093 * Note that LE twinx acts as if each 64-bit register result is
2094 * byte swapped. We perform one 128-bit LE load, so must swap
2095 * the order of the writebacks.
2096 */
2097 if ((mop & MO_BSWAP) == MO_TE) {
2098 tcg_gen_extr_i128_i64(lo, hi, t);
2099 } else {
2100 tcg_gen_extr_i128_i64(hi, lo, t);
2101 }
2102 }
2103 break;
2104 #else
2105 g_assert_not_reached();
2106 #endif
2107
2108 case GET_ASI_DIRECT:
2109 {
2110 TCGv_i64 tmp = tcg_temp_new_i64();
2111
2112 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2113
2114 /* Note that LE ldda acts as if each 32-bit register
2115 result is byte swapped. Having just performed one
2116 64-bit bswap, we need now to swap the writebacks. */
2117 if ((da->memop & MO_BSWAP) == MO_TE) {
2118 tcg_gen_extr_i64_tl(lo, hi, tmp);
2119 } else {
2120 tcg_gen_extr_i64_tl(hi, lo, tmp);
2121 }
2122 }
2123 break;
2124
2125 default:
2126 /* ??? In theory we've handled all of the ASIs that are valid
2127 for ldda, and this should raise DAE_invalid_asi. However,
2128 real hardware allows others. This can be seen with e.g.
2129 FreeBSD 10.3 wrt ASI_IC_TAG. */
2130 {
2131 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2132 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2133 TCGv_i64 tmp = tcg_temp_new_i64();
2134
2135 save_state(dc);
2136 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2137
2138 /* See above. */
2139 if ((da->memop & MO_BSWAP) == MO_TE) {
2140 tcg_gen_extr_i64_tl(lo, hi, tmp);
2141 } else {
2142 tcg_gen_extr_i64_tl(hi, lo, tmp);
2143 }
2144 }
2145 break;
2146 }
2147
2148 gen_store_gpr(dc, rd, hi);
2149 gen_store_gpr(dc, rd + 1, lo);
2150 }
2151
2152 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2153 {
2154 TCGv hi = gen_load_gpr(dc, rd);
2155 TCGv lo = gen_load_gpr(dc, rd + 1);
2156
2157 switch (da->type) {
2158 case GET_ASI_EXCP:
2159 break;
2160
2161 case GET_ASI_DTWINX:
2162 #ifdef TARGET_SPARC64
2163 {
2164 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2165 TCGv_i128 t = tcg_temp_new_i128();
2166
2167 /*
2168 * Note that LE twinx acts as if each 64-bit register result is
2169 * byte swapped. We perform one 128-bit LE store, so must swap
2170 * the order of the construction.
2171 */
2172 if ((mop & MO_BSWAP) == MO_TE) {
2173 tcg_gen_concat_i64_i128(t, lo, hi);
2174 } else {
2175 tcg_gen_concat_i64_i128(t, hi, lo);
2176 }
2177 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2178 }
2179 break;
2180 #else
2181 g_assert_not_reached();
2182 #endif
2183
2184 case GET_ASI_DIRECT:
2185 {
2186 TCGv_i64 t64 = tcg_temp_new_i64();
2187
2188 /* Note that LE stda acts as if each 32-bit register result is
2189 byte swapped. We will perform one 64-bit LE store, so now
2190 we must swap the order of the construction. */
2191 if ((da->memop & MO_BSWAP) == MO_TE) {
2192 tcg_gen_concat_tl_i64(t64, lo, hi);
2193 } else {
2194 tcg_gen_concat_tl_i64(t64, hi, lo);
2195 }
2196 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2197 }
2198 break;
2199
2200 case GET_ASI_BFILL:
2201 assert(TARGET_LONG_BITS == 32);
2202 /*
2203 * Store 32 bytes of [rd:rd+1] to ADDR.
2204 * See comments for GET_ASI_COPY above.
2205 */
2206 {
2207 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2208 TCGv_i64 t8 = tcg_temp_new_i64();
2209 TCGv_i128 t16 = tcg_temp_new_i128();
2210 TCGv daddr = tcg_temp_new();
2211
2212 tcg_gen_concat_tl_i64(t8, lo, hi);
2213 tcg_gen_concat_i64_i128(t16, t8, t8);
2214 tcg_gen_andi_tl(daddr, addr, -32);
2215 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2216 tcg_gen_addi_tl(daddr, daddr, 16);
2217 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2218 }
2219 break;
2220
2221 default:
2222 /* ??? In theory we've handled all of the ASIs that are valid
2223 for stda, and this should raise DAE_invalid_asi. */
2224 {
2225 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2226 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2227 TCGv_i64 t64 = tcg_temp_new_i64();
2228
2229 /* See above. */
2230 if ((da->memop & MO_BSWAP) == MO_TE) {
2231 tcg_gen_concat_tl_i64(t64, lo, hi);
2232 } else {
2233 tcg_gen_concat_tl_i64(t64, hi, lo);
2234 }
2235
2236 save_state(dc);
2237 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2238 }
2239 break;
2240 }
2241 }
2242
2243 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2244 {
2245 #ifdef TARGET_SPARC64
2246 TCGv_i32 c32, zero, dst, s1, s2;
2247 TCGv_i64 c64 = tcg_temp_new_i64();
2248
2249 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2250 or fold the comparison down to 32 bits and use movcond_i32. Choose
2251 the later. */
2252 c32 = tcg_temp_new_i32();
2253 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2254 tcg_gen_extrl_i64_i32(c32, c64);
2255
2256 s1 = gen_load_fpr_F(dc, rs);
2257 s2 = gen_load_fpr_F(dc, rd);
2258 dst = tcg_temp_new_i32();
2259 zero = tcg_constant_i32(0);
2260
2261 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2262
2263 gen_store_fpr_F(dc, rd, dst);
2264 #else
2265 qemu_build_not_reached();
2266 #endif
2267 }
2268
2269 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2270 {
2271 #ifdef TARGET_SPARC64
2272 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2273 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2274 gen_load_fpr_D(dc, rs),
2275 gen_load_fpr_D(dc, rd));
2276 gen_store_fpr_D(dc, rd, dst);
2277 #else
2278 qemu_build_not_reached();
2279 #endif
2280 }
2281
2282 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2283 {
2284 #ifdef TARGET_SPARC64
2285 int qd = QFPREG(rd);
2286 int qs = QFPREG(rs);
2287 TCGv c2 = tcg_constant_tl(cmp->c2);
2288
2289 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2290 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2291 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2292 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2293
2294 gen_update_fprs_dirty(dc, qd);
2295 #else
2296 qemu_build_not_reached();
2297 #endif
2298 }
2299
2300 #ifdef TARGET_SPARC64
2301 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2302 {
2303 TCGv_i32 r_tl = tcg_temp_new_i32();
2304
2305 /* load env->tl into r_tl */
2306 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2307
2308 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2309 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2310
2311 /* calculate offset to current trap state from env->ts, reuse r_tl */
2312 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2313 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2314
2315 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2316 {
2317 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2318 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2319 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2320 }
2321 }
2322 #endif
2323
2324 static int extract_dfpreg(DisasContext *dc, int x)
2325 {
2326 return DFPREG(x);
2327 }
2328
2329 static int extract_qfpreg(DisasContext *dc, int x)
2330 {
2331 return QFPREG(x);
2332 }
2333
2334 /* Include the auto-generated decoder. */
2335 #include "decode-insns.c.inc"
2336
2337 #define TRANS(NAME, AVAIL, FUNC, ...) \
2338 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2339 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2340
2341 #define avail_ALL(C) true
2342 #ifdef TARGET_SPARC64
2343 # define avail_32(C) false
2344 # define avail_ASR17(C) false
2345 # define avail_CASA(C) true
2346 # define avail_DIV(C) true
2347 # define avail_MUL(C) true
2348 # define avail_POWERDOWN(C) false
2349 # define avail_64(C) true
2350 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2351 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2352 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2353 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2354 #else
2355 # define avail_32(C) true
2356 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2357 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2358 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2359 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2360 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2361 # define avail_64(C) false
2362 # define avail_GL(C) false
2363 # define avail_HYPV(C) false
2364 # define avail_VIS1(C) false
2365 # define avail_VIS2(C) false
2366 #endif
2367
2368 /* Default case for non jump instructions. */
2369 static bool advance_pc(DisasContext *dc)
2370 {
2371 TCGLabel *l1;
2372
2373 finishing_insn(dc);
2374
2375 if (dc->npc & 3) {
2376 switch (dc->npc) {
2377 case DYNAMIC_PC:
2378 case DYNAMIC_PC_LOOKUP:
2379 dc->pc = dc->npc;
2380 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2381 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2382 break;
2383
2384 case JUMP_PC:
2385 /* we can do a static jump */
2386 l1 = gen_new_label();
2387 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2388
2389 /* jump not taken */
2390 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2391
2392 /* jump taken */
2393 gen_set_label(l1);
2394 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2395
2396 dc->base.is_jmp = DISAS_NORETURN;
2397 break;
2398
2399 default:
2400 g_assert_not_reached();
2401 }
2402 } else {
2403 dc->pc = dc->npc;
2404 dc->npc = dc->npc + 4;
2405 }
2406 return true;
2407 }
2408
2409 /*
2410 * Major opcodes 00 and 01 -- branches, call, and sethi
2411 */
2412
2413 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2414 bool annul, int disp)
2415 {
2416 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2417 target_ulong npc;
2418
2419 finishing_insn(dc);
2420
2421 if (cmp->cond == TCG_COND_ALWAYS) {
2422 if (annul) {
2423 dc->pc = dest;
2424 dc->npc = dest + 4;
2425 } else {
2426 gen_mov_pc_npc(dc);
2427 dc->npc = dest;
2428 }
2429 return true;
2430 }
2431
2432 if (cmp->cond == TCG_COND_NEVER) {
2433 npc = dc->npc;
2434 if (npc & 3) {
2435 gen_mov_pc_npc(dc);
2436 if (annul) {
2437 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2438 }
2439 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2440 } else {
2441 dc->pc = npc + (annul ? 4 : 0);
2442 dc->npc = dc->pc + 4;
2443 }
2444 return true;
2445 }
2446
2447 flush_cond(dc);
2448 npc = dc->npc;
2449
2450 if (annul) {
2451 TCGLabel *l1 = gen_new_label();
2452
2453 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2454 gen_goto_tb(dc, 0, npc, dest);
2455 gen_set_label(l1);
2456 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2457
2458 dc->base.is_jmp = DISAS_NORETURN;
2459 } else {
2460 if (npc & 3) {
2461 switch (npc) {
2462 case DYNAMIC_PC:
2463 case DYNAMIC_PC_LOOKUP:
2464 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2465 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2466 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2467 cmp->c1, tcg_constant_tl(cmp->c2),
2468 tcg_constant_tl(dest), cpu_npc);
2469 dc->pc = npc;
2470 break;
2471 default:
2472 g_assert_not_reached();
2473 }
2474 } else {
2475 dc->pc = npc;
2476 dc->npc = JUMP_PC;
2477 dc->jump = *cmp;
2478 dc->jump_pc[0] = dest;
2479 dc->jump_pc[1] = npc + 4;
2480
2481 /* The condition for cpu_cond is always NE -- normalize. */
2482 if (cmp->cond == TCG_COND_NE) {
2483 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2484 } else {
2485 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2486 }
2487 dc->cpu_cond_live = true;
2488 }
2489 }
2490 return true;
2491 }
2492
2493 static bool raise_priv(DisasContext *dc)
2494 {
2495 gen_exception(dc, TT_PRIV_INSN);
2496 return true;
2497 }
2498
2499 static bool raise_unimpfpop(DisasContext *dc)
2500 {
2501 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2502 return true;
2503 }
2504
2505 static bool gen_trap_float128(DisasContext *dc)
2506 {
2507 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2508 return false;
2509 }
2510 return raise_unimpfpop(dc);
2511 }
2512
2513 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2514 {
2515 DisasCompare cmp;
2516
2517 gen_compare(&cmp, a->cc, a->cond, dc);
2518 return advance_jump_cond(dc, &cmp, a->a, a->i);
2519 }
2520
2521 TRANS(Bicc, ALL, do_bpcc, a)
2522 TRANS(BPcc, 64, do_bpcc, a)
2523
2524 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2525 {
2526 DisasCompare cmp;
2527
2528 if (gen_trap_ifnofpu(dc)) {
2529 return true;
2530 }
2531 gen_fcompare(&cmp, a->cc, a->cond);
2532 return advance_jump_cond(dc, &cmp, a->a, a->i);
2533 }
2534
2535 TRANS(FBPfcc, 64, do_fbpfcc, a)
2536 TRANS(FBfcc, ALL, do_fbpfcc, a)
2537
2538 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2539 {
2540 DisasCompare cmp;
2541
2542 if (!avail_64(dc)) {
2543 return false;
2544 }
2545 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2546 return false;
2547 }
2548 return advance_jump_cond(dc, &cmp, a->a, a->i);
2549 }
2550
2551 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2552 {
2553 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2554
2555 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2556 gen_mov_pc_npc(dc);
2557 dc->npc = target;
2558 return true;
2559 }
2560
2561 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2562 {
2563 /*
2564 * For sparc32, always generate the no-coprocessor exception.
2565 * For sparc64, always generate illegal instruction.
2566 */
2567 #ifdef TARGET_SPARC64
2568 return false;
2569 #else
2570 gen_exception(dc, TT_NCP_INSN);
2571 return true;
2572 #endif
2573 }
2574
2575 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2576 {
2577 /* Special-case %g0 because that's the canonical nop. */
2578 if (a->rd) {
2579 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2580 }
2581 return advance_pc(dc);
2582 }
2583
2584 /*
2585 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2586 */
2587
2588 static bool do_tcc(DisasContext *dc, int cond, int cc,
2589 int rs1, bool imm, int rs2_or_imm)
2590 {
2591 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2592 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2593 DisasCompare cmp;
2594 TCGLabel *lab;
2595 TCGv_i32 trap;
2596
2597 /* Trap never. */
2598 if (cond == 0) {
2599 return advance_pc(dc);
2600 }
2601
2602 /*
2603 * Immediate traps are the most common case. Since this value is
2604 * live across the branch, it really pays to evaluate the constant.
2605 */
2606 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2607 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2608 } else {
2609 trap = tcg_temp_new_i32();
2610 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2611 if (imm) {
2612 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2613 } else {
2614 TCGv_i32 t2 = tcg_temp_new_i32();
2615 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2616 tcg_gen_add_i32(trap, trap, t2);
2617 }
2618 tcg_gen_andi_i32(trap, trap, mask);
2619 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2620 }
2621
2622 finishing_insn(dc);
2623
2624 /* Trap always. */
2625 if (cond == 8) {
2626 save_state(dc);
2627 gen_helper_raise_exception(tcg_env, trap);
2628 dc->base.is_jmp = DISAS_NORETURN;
2629 return true;
2630 }
2631
2632 /* Conditional trap. */
2633 flush_cond(dc);
2634 lab = delay_exceptionv(dc, trap);
2635 gen_compare(&cmp, cc, cond, dc);
2636 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2637
2638 return advance_pc(dc);
2639 }
2640
2641 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2642 {
2643 if (avail_32(dc) && a->cc) {
2644 return false;
2645 }
2646 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2647 }
2648
2649 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2650 {
2651 if (avail_64(dc)) {
2652 return false;
2653 }
2654 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2655 }
2656
2657 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2658 {
2659 if (avail_32(dc)) {
2660 return false;
2661 }
2662 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2663 }
2664
2665 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2666 {
2667 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2668 return advance_pc(dc);
2669 }
2670
2671 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2672 {
2673 if (avail_32(dc)) {
2674 return false;
2675 }
2676 if (a->mmask) {
2677 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2678 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2679 }
2680 if (a->cmask) {
2681 /* For #Sync, etc, end the TB to recognize interrupts. */
2682 dc->base.is_jmp = DISAS_EXIT;
2683 }
2684 return advance_pc(dc);
2685 }
2686
2687 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2688 TCGv (*func)(DisasContext *, TCGv))
2689 {
2690 if (!priv) {
2691 return raise_priv(dc);
2692 }
2693 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2694 return advance_pc(dc);
2695 }
2696
2697 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2698 {
2699 return cpu_y;
2700 }
2701
2702 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2703 {
2704 /*
2705 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2706 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2707 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2708 */
2709 if (avail_64(dc) && a->rs1 != 0) {
2710 return false;
2711 }
2712 return do_rd_special(dc, true, a->rd, do_rdy);
2713 }
2714
2715 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2716 {
2717 uint32_t val;
2718
2719 /*
2720 * TODO: There are many more fields to be filled,
2721 * some of which are writable.
2722 */
2723 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2724 val |= 1 << 8; /* [8] V8 */
2725
2726 return tcg_constant_tl(val);
2727 }
2728
2729 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2730
2731 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2732 {
2733 gen_helper_rdccr(dst, tcg_env);
2734 return dst;
2735 }
2736
2737 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2738
2739 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2740 {
2741 #ifdef TARGET_SPARC64
2742 return tcg_constant_tl(dc->asi);
2743 #else
2744 qemu_build_not_reached();
2745 #endif
2746 }
2747
2748 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2749
2750 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2751 {
2752 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2753
2754 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2755 if (translator_io_start(&dc->base)) {
2756 dc->base.is_jmp = DISAS_EXIT;
2757 }
2758 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2759 tcg_constant_i32(dc->mem_idx));
2760 return dst;
2761 }
2762
2763 /* TODO: non-priv access only allowed when enabled. */
2764 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2765
2766 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2767 {
2768 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2769 }
2770
2771 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2772
2773 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2774 {
2775 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2776 return dst;
2777 }
2778
2779 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2780
2781 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2782 {
2783 gen_trap_ifnofpu(dc);
2784 return cpu_gsr;
2785 }
2786
2787 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2788
2789 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2790 {
2791 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2792 return dst;
2793 }
2794
2795 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2796
2797 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2798 {
2799 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2800 return dst;
2801 }
2802
2803 /* TODO: non-priv access only allowed when enabled. */
2804 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2805
2806 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2807 {
2808 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2809
2810 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2811 if (translator_io_start(&dc->base)) {
2812 dc->base.is_jmp = DISAS_EXIT;
2813 }
2814 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2815 tcg_constant_i32(dc->mem_idx));
2816 return dst;
2817 }
2818
2819 /* TODO: non-priv access only allowed when enabled. */
2820 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2821
2822 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2823 {
2824 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2825 return dst;
2826 }
2827
2828 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2829 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2830
2831 /*
2832 * UltraSPARC-T1 Strand status.
2833 * HYPV check maybe not enough, UA2005 & UA2007 describe
2834 * this ASR as impl. dep
2835 */
2836 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2837 {
2838 return tcg_constant_tl(1);
2839 }
2840
2841 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2842
2843 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2844 {
2845 gen_helper_rdpsr(dst, tcg_env);
2846 return dst;
2847 }
2848
2849 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2850
2851 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2852 {
2853 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2854 return dst;
2855 }
2856
2857 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2858
2859 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2860 {
2861 TCGv_i32 tl = tcg_temp_new_i32();
2862 TCGv_ptr tp = tcg_temp_new_ptr();
2863
2864 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2865 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2866 tcg_gen_shli_i32(tl, tl, 3);
2867 tcg_gen_ext_i32_ptr(tp, tl);
2868 tcg_gen_add_ptr(tp, tp, tcg_env);
2869
2870 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2871 return dst;
2872 }
2873
2874 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2875
2876 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2877 {
2878 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2879 return dst;
2880 }
2881
2882 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2883
2884 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2885 {
2886 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2887 return dst;
2888 }
2889
2890 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2891
2892 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2893 {
2894 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2895 return dst;
2896 }
2897
2898 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2899
2900 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2901 {
2902 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2903 return dst;
2904 }
2905
2906 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2907 do_rdhstick_cmpr)
2908
2909 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2910 {
2911 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2912 return dst;
2913 }
2914
2915 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2916
2917 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2918 {
2919 #ifdef TARGET_SPARC64
2920 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2921
2922 gen_load_trap_state_at_tl(r_tsptr);
2923 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2924 return dst;
2925 #else
2926 qemu_build_not_reached();
2927 #endif
2928 }
2929
2930 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2931
2932 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2933 {
2934 #ifdef TARGET_SPARC64
2935 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2936
2937 gen_load_trap_state_at_tl(r_tsptr);
2938 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2939 return dst;
2940 #else
2941 qemu_build_not_reached();
2942 #endif
2943 }
2944
2945 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2946
2947 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2948 {
2949 #ifdef TARGET_SPARC64
2950 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2951
2952 gen_load_trap_state_at_tl(r_tsptr);
2953 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2954 return dst;
2955 #else
2956 qemu_build_not_reached();
2957 #endif
2958 }
2959
2960 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2961
2962 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2963 {
2964 #ifdef TARGET_SPARC64
2965 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2966
2967 gen_load_trap_state_at_tl(r_tsptr);
2968 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2969 return dst;
2970 #else
2971 qemu_build_not_reached();
2972 #endif
2973 }
2974
2975 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2976 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2977
2978 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2979 {
2980 return cpu_tbr;
2981 }
2982
2983 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2984 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2985
2986 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2987 {
2988 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2989 return dst;
2990 }
2991
2992 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2993
2994 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2995 {
2996 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2997 return dst;
2998 }
2999
3000 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3001
3002 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3003 {
3004 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3005 return dst;
3006 }
3007
3008 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3009
3010 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3011 {
3012 gen_helper_rdcwp(dst, tcg_env);
3013 return dst;
3014 }
3015
3016 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3017
3018 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3019 {
3020 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3021 return dst;
3022 }
3023
3024 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3025
3026 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3027 {
3028 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3029 return dst;
3030 }
3031
3032 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3033 do_rdcanrestore)
3034
3035 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3036 {
3037 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3038 return dst;
3039 }
3040
3041 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3042
3043 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3044 {
3045 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3046 return dst;
3047 }
3048
3049 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3050
3051 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3052 {
3053 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3054 return dst;
3055 }
3056
3057 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3058
3059 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3060 {
3061 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3062 return dst;
3063 }
3064
3065 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3066
3067 /* UA2005 strand status */
3068 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3069 {
3070 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3071 return dst;
3072 }
3073
3074 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3075
3076 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3077 {
3078 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3079 return dst;
3080 }
3081
3082 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3083
3084 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3085 {
3086 if (avail_64(dc)) {
3087 gen_helper_flushw(tcg_env);
3088 return advance_pc(dc);
3089 }
3090 return false;
3091 }
3092
3093 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3094 void (*func)(DisasContext *, TCGv))
3095 {
3096 TCGv src;
3097
3098 /* For simplicity, we under-decoded the rs2 form. */
3099 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3100 return false;
3101 }
3102 if (!priv) {
3103 return raise_priv(dc);
3104 }
3105
3106 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3107 src = tcg_constant_tl(a->rs2_or_imm);
3108 } else {
3109 TCGv src1 = gen_load_gpr(dc, a->rs1);
3110 if (a->rs2_or_imm == 0) {
3111 src = src1;
3112 } else {
3113 src = tcg_temp_new();
3114 if (a->imm) {
3115 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3116 } else {
3117 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3118 }
3119 }
3120 }
3121 func(dc, src);
3122 return advance_pc(dc);
3123 }
3124
3125 static void do_wry(DisasContext *dc, TCGv src)
3126 {
3127 tcg_gen_ext32u_tl(cpu_y, src);
3128 }
3129
3130 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3131
3132 static void do_wrccr(DisasContext *dc, TCGv src)
3133 {
3134 gen_helper_wrccr(tcg_env, src);
3135 }
3136
3137 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3138
3139 static void do_wrasi(DisasContext *dc, TCGv src)
3140 {
3141 TCGv tmp = tcg_temp_new();
3142
3143 tcg_gen_ext8u_tl(tmp, src);
3144 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3145 /* End TB to notice changed ASI. */
3146 dc->base.is_jmp = DISAS_EXIT;
3147 }
3148
3149 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3150
3151 static void do_wrfprs(DisasContext *dc, TCGv src)
3152 {
3153 #ifdef TARGET_SPARC64
3154 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3155 dc->fprs_dirty = 0;
3156 dc->base.is_jmp = DISAS_EXIT;
3157 #else
3158 qemu_build_not_reached();
3159 #endif
3160 }
3161
3162 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3163
3164 static void do_wrgsr(DisasContext *dc, TCGv src)
3165 {
3166 gen_trap_ifnofpu(dc);
3167 tcg_gen_mov_tl(cpu_gsr, src);
3168 }
3169
3170 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3171
3172 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3173 {
3174 gen_helper_set_softint(tcg_env, src);
3175 }
3176
3177 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3178
3179 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3180 {
3181 gen_helper_clear_softint(tcg_env, src);
3182 }
3183
3184 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3185
3186 static void do_wrsoftint(DisasContext *dc, TCGv src)
3187 {
3188 gen_helper_write_softint(tcg_env, src);
3189 }
3190
3191 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3192
3193 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3194 {
3195 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3196
3197 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3198 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3199 translator_io_start(&dc->base);
3200 gen_helper_tick_set_limit(r_tickptr, src);
3201 /* End TB to handle timer interrupt */
3202 dc->base.is_jmp = DISAS_EXIT;
3203 }
3204
3205 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3206
3207 static void do_wrstick(DisasContext *dc, TCGv src)
3208 {
3209 #ifdef TARGET_SPARC64
3210 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3211
3212 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3213 translator_io_start(&dc->base);
3214 gen_helper_tick_set_count(r_tickptr, src);
3215 /* End TB to handle timer interrupt */
3216 dc->base.is_jmp = DISAS_EXIT;
3217 #else
3218 qemu_build_not_reached();
3219 #endif
3220 }
3221
3222 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3223
3224 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3225 {
3226 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3227
3228 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3229 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3230 translator_io_start(&dc->base);
3231 gen_helper_tick_set_limit(r_tickptr, src);
3232 /* End TB to handle timer interrupt */
3233 dc->base.is_jmp = DISAS_EXIT;
3234 }
3235
3236 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3237
3238 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3239 {
3240 finishing_insn(dc);
3241 save_state(dc);
3242 gen_helper_power_down(tcg_env);
3243 }
3244
3245 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3246
3247 static void do_wrpsr(DisasContext *dc, TCGv src)
3248 {
3249 gen_helper_wrpsr(tcg_env, src);
3250 dc->base.is_jmp = DISAS_EXIT;
3251 }
3252
3253 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3254
3255 static void do_wrwim(DisasContext *dc, TCGv src)
3256 {
3257 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3258 TCGv tmp = tcg_temp_new();
3259
3260 tcg_gen_andi_tl(tmp, src, mask);
3261 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3262 }
3263
3264 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3265
3266 static void do_wrtpc(DisasContext *dc, TCGv src)
3267 {
3268 #ifdef TARGET_SPARC64
3269 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3270
3271 gen_load_trap_state_at_tl(r_tsptr);
3272 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3273 #else
3274 qemu_build_not_reached();
3275 #endif
3276 }
3277
3278 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3279
3280 static void do_wrtnpc(DisasContext *dc, TCGv src)
3281 {
3282 #ifdef TARGET_SPARC64
3283 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3284
3285 gen_load_trap_state_at_tl(r_tsptr);
3286 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3287 #else
3288 qemu_build_not_reached();
3289 #endif
3290 }
3291
3292 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3293
3294 static void do_wrtstate(DisasContext *dc, TCGv src)
3295 {
3296 #ifdef TARGET_SPARC64
3297 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3298
3299 gen_load_trap_state_at_tl(r_tsptr);
3300 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3301 #else
3302 qemu_build_not_reached();
3303 #endif
3304 }
3305
3306 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3307
3308 static void do_wrtt(DisasContext *dc, TCGv src)
3309 {
3310 #ifdef TARGET_SPARC64
3311 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3312
3313 gen_load_trap_state_at_tl(r_tsptr);
3314 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3315 #else
3316 qemu_build_not_reached();
3317 #endif
3318 }
3319
3320 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3321
3322 static void do_wrtick(DisasContext *dc, TCGv src)
3323 {
3324 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3325
3326 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3327 translator_io_start(&dc->base);
3328 gen_helper_tick_set_count(r_tickptr, src);
3329 /* End TB to handle timer interrupt */
3330 dc->base.is_jmp = DISAS_EXIT;
3331 }
3332
3333 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3334
3335 static void do_wrtba(DisasContext *dc, TCGv src)
3336 {
3337 tcg_gen_mov_tl(cpu_tbr, src);
3338 }
3339
3340 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3341
3342 static void do_wrpstate(DisasContext *dc, TCGv src)
3343 {
3344 save_state(dc);
3345 if (translator_io_start(&dc->base)) {
3346 dc->base.is_jmp = DISAS_EXIT;
3347 }
3348 gen_helper_wrpstate(tcg_env, src);
3349 dc->npc = DYNAMIC_PC;
3350 }
3351
3352 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3353
3354 static void do_wrtl(DisasContext *dc, TCGv src)
3355 {
3356 save_state(dc);
3357 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3358 dc->npc = DYNAMIC_PC;
3359 }
3360
3361 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3362
3363 static void do_wrpil(DisasContext *dc, TCGv src)
3364 {
3365 if (translator_io_start(&dc->base)) {
3366 dc->base.is_jmp = DISAS_EXIT;
3367 }
3368 gen_helper_wrpil(tcg_env, src);
3369 }
3370
3371 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3372
3373 static void do_wrcwp(DisasContext *dc, TCGv src)
3374 {
3375 gen_helper_wrcwp(tcg_env, src);
3376 }
3377
3378 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3379
3380 static void do_wrcansave(DisasContext *dc, TCGv src)
3381 {
3382 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3383 }
3384
3385 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3386
3387 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3388 {
3389 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3390 }
3391
3392 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3393
3394 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3395 {
3396 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3397 }
3398
3399 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3400
3401 static void do_wrotherwin(DisasContext *dc, TCGv src)
3402 {
3403 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3404 }
3405
3406 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3407
3408 static void do_wrwstate(DisasContext *dc, TCGv src)
3409 {
3410 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3411 }
3412
3413 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3414
3415 static void do_wrgl(DisasContext *dc, TCGv src)
3416 {
3417 gen_helper_wrgl(tcg_env, src);
3418 }
3419
3420 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3421
3422 /* UA2005 strand status */
3423 static void do_wrssr(DisasContext *dc, TCGv src)
3424 {
3425 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3426 }
3427
3428 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3429
3430 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3431
3432 static void do_wrhpstate(DisasContext *dc, TCGv src)
3433 {
3434 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3435 dc->base.is_jmp = DISAS_EXIT;
3436 }
3437
3438 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3439
3440 static void do_wrhtstate(DisasContext *dc, TCGv src)
3441 {
3442 TCGv_i32 tl = tcg_temp_new_i32();
3443 TCGv_ptr tp = tcg_temp_new_ptr();
3444
3445 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3446 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3447 tcg_gen_shli_i32(tl, tl, 3);
3448 tcg_gen_ext_i32_ptr(tp, tl);
3449 tcg_gen_add_ptr(tp, tp, tcg_env);
3450
3451 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3452 }
3453
3454 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3455
3456 static void do_wrhintp(DisasContext *dc, TCGv src)
3457 {
3458 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3459 }
3460
3461 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3462
3463 static void do_wrhtba(DisasContext *dc, TCGv src)
3464 {
3465 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3466 }
3467
3468 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3469
3470 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3471 {
3472 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3473
3474 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3475 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3476 translator_io_start(&dc->base);
3477 gen_helper_tick_set_limit(r_tickptr, src);
3478 /* End TB to handle timer interrupt */
3479 dc->base.is_jmp = DISAS_EXIT;
3480 }
3481
3482 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3483 do_wrhstick_cmpr)
3484
3485 static bool do_saved_restored(DisasContext *dc, bool saved)
3486 {
3487 if (!supervisor(dc)) {
3488 return raise_priv(dc);
3489 }
3490 if (saved) {
3491 gen_helper_saved(tcg_env);
3492 } else {
3493 gen_helper_restored(tcg_env);
3494 }
3495 return advance_pc(dc);
3496 }
3497
3498 TRANS(SAVED, 64, do_saved_restored, true)
3499 TRANS(RESTORED, 64, do_saved_restored, false)
3500
3501 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3502 {
3503 return advance_pc(dc);
3504 }
3505
3506 /*
3507 * TODO: Need a feature bit for sparcv8.
3508 * In the meantime, treat all 32-bit cpus like sparcv7.
3509 */
3510 TRANS(NOP_v7, 32, trans_NOP, a)
3511 TRANS(NOP_v9, 64, trans_NOP, a)
3512
3513 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3514 void (*func)(TCGv, TCGv, TCGv),
3515 void (*funci)(TCGv, TCGv, target_long),
3516 bool logic_cc)
3517 {
3518 TCGv dst, src1;
3519
3520 /* For simplicity, we under-decoded the rs2 form. */
3521 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3522 return false;
3523 }
3524
3525 if (logic_cc) {
3526 dst = cpu_cc_N;
3527 } else {
3528 dst = gen_dest_gpr(dc, a->rd);
3529 }
3530 src1 = gen_load_gpr(dc, a->rs1);
3531
3532 if (a->imm || a->rs2_or_imm == 0) {
3533 if (funci) {
3534 funci(dst, src1, a->rs2_or_imm);
3535 } else {
3536 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3537 }
3538 } else {
3539 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3540 }
3541
3542 if (logic_cc) {
3543 if (TARGET_LONG_BITS == 64) {
3544 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3545 tcg_gen_movi_tl(cpu_icc_C, 0);
3546 }
3547 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3548 tcg_gen_movi_tl(cpu_cc_C, 0);
3549 tcg_gen_movi_tl(cpu_cc_V, 0);
3550 }
3551
3552 gen_store_gpr(dc, a->rd, dst);
3553 return advance_pc(dc);
3554 }
3555
3556 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3557 void (*func)(TCGv, TCGv, TCGv),
3558 void (*funci)(TCGv, TCGv, target_long),
3559 void (*func_cc)(TCGv, TCGv, TCGv))
3560 {
3561 if (a->cc) {
3562 return do_arith_int(dc, a, func_cc, NULL, false);
3563 }
3564 return do_arith_int(dc, a, func, funci, false);
3565 }
3566
3567 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3568 void (*func)(TCGv, TCGv, TCGv),
3569 void (*funci)(TCGv, TCGv, target_long))
3570 {
3571 return do_arith_int(dc, a, func, funci, a->cc);
3572 }
3573
3574 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3575 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3576 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3577 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3578
3579 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3580 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3581 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3582 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3583
3584 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3585 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3586 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3587 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3588 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3589
3590 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3591 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3592 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3593 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3594
3595 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3596 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3597
3598 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3599 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3600
3601 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3602 {
3603 /* OR with %g0 is the canonical alias for MOV. */
3604 if (!a->cc && a->rs1 == 0) {
3605 if (a->imm || a->rs2_or_imm == 0) {
3606 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3607 } else if (a->rs2_or_imm & ~0x1f) {
3608 /* For simplicity, we under-decoded the rs2 form. */
3609 return false;
3610 } else {
3611 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3612 }
3613 return advance_pc(dc);
3614 }
3615 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3616 }
3617
3618 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3619 {
3620 TCGv_i64 t1, t2;
3621 TCGv dst;
3622
3623 if (!avail_DIV(dc)) {
3624 return false;
3625 }
3626 /* For simplicity, we under-decoded the rs2 form. */
3627 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3628 return false;
3629 }
3630
3631 if (unlikely(a->rs2_or_imm == 0)) {
3632 gen_exception(dc, TT_DIV_ZERO);
3633 return true;
3634 }
3635
3636 if (a->imm) {
3637 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3638 } else {
3639 TCGLabel *lab;
3640 TCGv_i32 n2;
3641
3642 finishing_insn(dc);
3643 flush_cond(dc);
3644
3645 n2 = tcg_temp_new_i32();
3646 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3647
3648 lab = delay_exception(dc, TT_DIV_ZERO);
3649 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3650
3651 t2 = tcg_temp_new_i64();
3652 #ifdef TARGET_SPARC64
3653 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3654 #else
3655 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3656 #endif
3657 }
3658
3659 t1 = tcg_temp_new_i64();
3660 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3661
3662 tcg_gen_divu_i64(t1, t1, t2);
3663 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3664
3665 dst = gen_dest_gpr(dc, a->rd);
3666 tcg_gen_trunc_i64_tl(dst, t1);
3667 gen_store_gpr(dc, a->rd, dst);
3668 return advance_pc(dc);
3669 }
3670
3671 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3672 {
3673 TCGv dst, src1, src2;
3674
3675 if (!avail_64(dc)) {
3676 return false;
3677 }
3678 /* For simplicity, we under-decoded the rs2 form. */
3679 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3680 return false;
3681 }
3682
3683 if (unlikely(a->rs2_or_imm == 0)) {
3684 gen_exception(dc, TT_DIV_ZERO);
3685 return true;
3686 }
3687
3688 if (a->imm) {
3689 src2 = tcg_constant_tl(a->rs2_or_imm);
3690 } else {
3691 TCGLabel *lab;
3692
3693 finishing_insn(dc);
3694 flush_cond(dc);
3695
3696 lab = delay_exception(dc, TT_DIV_ZERO);
3697 src2 = cpu_regs[a->rs2_or_imm];
3698 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3699 }
3700
3701 dst = gen_dest_gpr(dc, a->rd);
3702 src1 = gen_load_gpr(dc, a->rs1);
3703
3704 tcg_gen_divu_tl(dst, src1, src2);
3705 gen_store_gpr(dc, a->rd, dst);
3706 return advance_pc(dc);
3707 }
3708
3709 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3710 {
3711 TCGv dst, src1, src2;
3712
3713 if (!avail_64(dc)) {
3714 return false;
3715 }
3716 /* For simplicity, we under-decoded the rs2 form. */
3717 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3718 return false;
3719 }
3720
3721 if (unlikely(a->rs2_or_imm == 0)) {
3722 gen_exception(dc, TT_DIV_ZERO);
3723 return true;
3724 }
3725
3726 dst = gen_dest_gpr(dc, a->rd);
3727 src1 = gen_load_gpr(dc, a->rs1);
3728
3729 if (a->imm) {
3730 if (unlikely(a->rs2_or_imm == -1)) {
3731 tcg_gen_neg_tl(dst, src1);
3732 gen_store_gpr(dc, a->rd, dst);
3733 return advance_pc(dc);
3734 }
3735 src2 = tcg_constant_tl(a->rs2_or_imm);
3736 } else {
3737 TCGLabel *lab;
3738 TCGv t1, t2;
3739
3740 finishing_insn(dc);
3741 flush_cond(dc);
3742
3743 lab = delay_exception(dc, TT_DIV_ZERO);
3744 src2 = cpu_regs[a->rs2_or_imm];
3745 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3746
3747 /*
3748 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3749 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3750 */
3751 t1 = tcg_temp_new();
3752 t2 = tcg_temp_new();
3753 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3754 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3755 tcg_gen_and_tl(t1, t1, t2);
3756 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3757 tcg_constant_tl(1), src2);
3758 src2 = t1;
3759 }
3760
3761 tcg_gen_div_tl(dst, src1, src2);
3762 gen_store_gpr(dc, a->rd, dst);
3763 return advance_pc(dc);
3764 }
3765
3766 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3767 int width, bool cc, bool left)
3768 {
3769 TCGv dst, s1, s2, lo1, lo2;
3770 uint64_t amask, tabl, tabr;
3771 int shift, imask, omask;
3772
3773 dst = gen_dest_gpr(dc, a->rd);
3774 s1 = gen_load_gpr(dc, a->rs1);
3775 s2 = gen_load_gpr(dc, a->rs2);
3776
3777 if (cc) {
3778 gen_op_subcc(cpu_cc_N, s1, s2);
3779 }
3780
3781 /*
3782 * Theory of operation: there are two tables, left and right (not to
3783 * be confused with the left and right versions of the opcode). These
3784 * are indexed by the low 3 bits of the inputs. To make things "easy",
3785 * these tables are loaded into two constants, TABL and TABR below.
3786 * The operation index = (input & imask) << shift calculates the index
3787 * into the constant, while val = (table >> index) & omask calculates
3788 * the value we're looking for.
3789 */
3790 switch (width) {
3791 case 8:
3792 imask = 0x7;
3793 shift = 3;
3794 omask = 0xff;
3795 if (left) {
3796 tabl = 0x80c0e0f0f8fcfeffULL;
3797 tabr = 0xff7f3f1f0f070301ULL;
3798 } else {
3799 tabl = 0x0103070f1f3f7fffULL;
3800 tabr = 0xfffefcf8f0e0c080ULL;
3801 }
3802 break;
3803 case 16:
3804 imask = 0x6;
3805 shift = 1;
3806 omask = 0xf;
3807 if (left) {
3808 tabl = 0x8cef;
3809 tabr = 0xf731;
3810 } else {
3811 tabl = 0x137f;
3812 tabr = 0xfec8;
3813 }
3814 break;
3815 case 32:
3816 imask = 0x4;
3817 shift = 0;
3818 omask = 0x3;
3819 if (left) {
3820 tabl = (2 << 2) | 3;
3821 tabr = (3 << 2) | 1;
3822 } else {
3823 tabl = (1 << 2) | 3;
3824 tabr = (3 << 2) | 2;
3825 }
3826 break;
3827 default:
3828 abort();
3829 }
3830
3831 lo1 = tcg_temp_new();
3832 lo2 = tcg_temp_new();
3833 tcg_gen_andi_tl(lo1, s1, imask);
3834 tcg_gen_andi_tl(lo2, s2, imask);
3835 tcg_gen_shli_tl(lo1, lo1, shift);
3836 tcg_gen_shli_tl(lo2, lo2, shift);
3837
3838 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3839 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3840 tcg_gen_andi_tl(lo1, lo1, omask);
3841 tcg_gen_andi_tl(lo2, lo2, omask);
3842
3843 amask = address_mask_i(dc, -8);
3844 tcg_gen_andi_tl(s1, s1, amask);
3845 tcg_gen_andi_tl(s2, s2, amask);
3846
3847 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3848 tcg_gen_and_tl(lo2, lo2, lo1);
3849 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3850
3851 gen_store_gpr(dc, a->rd, dst);
3852 return advance_pc(dc);
3853 }
3854
3855 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3856 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3857 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3858 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3859 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3860 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3861
3862 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3863 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3864 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3865 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3866 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3867 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3868
3869 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3870 void (*func)(TCGv, TCGv, TCGv))
3871 {
3872 TCGv dst = gen_dest_gpr(dc, a->rd);
3873 TCGv src1 = gen_load_gpr(dc, a->rs1);
3874 TCGv src2 = gen_load_gpr(dc, a->rs2);
3875
3876 func(dst, src1, src2);
3877 gen_store_gpr(dc, a->rd, dst);
3878 return advance_pc(dc);
3879 }
3880
3881 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3882 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3883 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3884
3885 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3886 {
3887 #ifdef TARGET_SPARC64
3888 TCGv tmp = tcg_temp_new();
3889
3890 tcg_gen_add_tl(tmp, s1, s2);
3891 tcg_gen_andi_tl(dst, tmp, -8);
3892 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3893 #else
3894 g_assert_not_reached();
3895 #endif
3896 }
3897
3898 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3899 {
3900 #ifdef TARGET_SPARC64
3901 TCGv tmp = tcg_temp_new();
3902
3903 tcg_gen_add_tl(tmp, s1, s2);
3904 tcg_gen_andi_tl(dst, tmp, -8);
3905 tcg_gen_neg_tl(tmp, tmp);
3906 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3907 #else
3908 g_assert_not_reached();
3909 #endif
3910 }
3911
3912 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3913 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3914
3915 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3916 {
3917 #ifdef TARGET_SPARC64
3918 tcg_gen_add_tl(dst, s1, s2);
3919 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3920 #else
3921 g_assert_not_reached();
3922 #endif
3923 }
3924
3925 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3926
3927 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3928 {
3929 TCGv dst, src1, src2;
3930
3931 /* Reject 64-bit shifts for sparc32. */
3932 if (avail_32(dc) && a->x) {
3933 return false;
3934 }
3935
3936 src2 = tcg_temp_new();
3937 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3938 src1 = gen_load_gpr(dc, a->rs1);
3939 dst = gen_dest_gpr(dc, a->rd);
3940
3941 if (l) {
3942 tcg_gen_shl_tl(dst, src1, src2);
3943 if (!a->x) {
3944 tcg_gen_ext32u_tl(dst, dst);
3945 }
3946 } else if (u) {
3947 if (!a->x) {
3948 tcg_gen_ext32u_tl(dst, src1);
3949 src1 = dst;
3950 }
3951 tcg_gen_shr_tl(dst, src1, src2);
3952 } else {
3953 if (!a->x) {
3954 tcg_gen_ext32s_tl(dst, src1);
3955 src1 = dst;
3956 }
3957 tcg_gen_sar_tl(dst, src1, src2);
3958 }
3959 gen_store_gpr(dc, a->rd, dst);
3960 return advance_pc(dc);
3961 }
3962
3963 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3964 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3965 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3966
3967 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3968 {
3969 TCGv dst, src1;
3970
3971 /* Reject 64-bit shifts for sparc32. */
3972 if (avail_32(dc) && (a->x || a->i >= 32)) {
3973 return false;
3974 }
3975
3976 src1 = gen_load_gpr(dc, a->rs1);
3977 dst = gen_dest_gpr(dc, a->rd);
3978
3979 if (avail_32(dc) || a->x) {
3980 if (l) {
3981 tcg_gen_shli_tl(dst, src1, a->i);
3982 } else if (u) {
3983 tcg_gen_shri_tl(dst, src1, a->i);
3984 } else {
3985 tcg_gen_sari_tl(dst, src1, a->i);
3986 }
3987 } else {
3988 if (l) {
3989 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3990 } else if (u) {
3991 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3992 } else {
3993 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3994 }
3995 }
3996 gen_store_gpr(dc, a->rd, dst);
3997 return advance_pc(dc);
3998 }
3999
4000 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4001 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4002 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4003
4004 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4005 {
4006 /* For simplicity, we under-decoded the rs2 form. */
4007 if (!imm && rs2_or_imm & ~0x1f) {
4008 return NULL;
4009 }
4010 if (imm || rs2_or_imm == 0) {
4011 return tcg_constant_tl(rs2_or_imm);
4012 } else {
4013 return cpu_regs[rs2_or_imm];
4014 }
4015 }
4016
4017 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4018 {
4019 TCGv dst = gen_load_gpr(dc, rd);
4020 TCGv c2 = tcg_constant_tl(cmp->c2);
4021
4022 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4023 gen_store_gpr(dc, rd, dst);
4024 return advance_pc(dc);
4025 }
4026
4027 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4028 {
4029 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4030 DisasCompare cmp;
4031
4032 if (src2 == NULL) {
4033 return false;
4034 }
4035 gen_compare(&cmp, a->cc, a->cond, dc);
4036 return do_mov_cond(dc, &cmp, a->rd, src2);
4037 }
4038
4039 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4040 {
4041 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4042 DisasCompare cmp;
4043
4044 if (src2 == NULL) {
4045 return false;
4046 }
4047 gen_fcompare(&cmp, a->cc, a->cond);
4048 return do_mov_cond(dc, &cmp, a->rd, src2);
4049 }
4050
4051 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4052 {
4053 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4054 DisasCompare cmp;
4055
4056 if (src2 == NULL) {
4057 return false;
4058 }
4059 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4060 return false;
4061 }
4062 return do_mov_cond(dc, &cmp, a->rd, src2);
4063 }
4064
4065 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4066 bool (*func)(DisasContext *dc, int rd, TCGv src))
4067 {
4068 TCGv src1, sum;
4069
4070 /* For simplicity, we under-decoded the rs2 form. */
4071 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4072 return false;
4073 }
4074
4075 /*
4076 * Always load the sum into a new temporary.
4077 * This is required to capture the value across a window change,
4078 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4079 */
4080 sum = tcg_temp_new();
4081 src1 = gen_load_gpr(dc, a->rs1);
4082 if (a->imm || a->rs2_or_imm == 0) {
4083 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4084 } else {
4085 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4086 }
4087 return func(dc, a->rd, sum);
4088 }
4089
4090 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4091 {
4092 /*
4093 * Preserve pc across advance, so that we can delay
4094 * the writeback to rd until after src is consumed.
4095 */
4096 target_ulong cur_pc = dc->pc;
4097
4098 gen_check_align(dc, src, 3);
4099
4100 gen_mov_pc_npc(dc);
4101 tcg_gen_mov_tl(cpu_npc, src);
4102 gen_address_mask(dc, cpu_npc);
4103 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4104
4105 dc->npc = DYNAMIC_PC_LOOKUP;
4106 return true;
4107 }
4108
4109 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4110
4111 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4112 {
4113 if (!supervisor(dc)) {
4114 return raise_priv(dc);
4115 }
4116
4117 gen_check_align(dc, src, 3);
4118
4119 gen_mov_pc_npc(dc);
4120 tcg_gen_mov_tl(cpu_npc, src);
4121 gen_helper_rett(tcg_env);
4122
4123 dc->npc = DYNAMIC_PC;
4124 return true;
4125 }
4126
4127 TRANS(RETT, 32, do_add_special, a, do_rett)
4128
4129 static bool do_return(DisasContext *dc, int rd, TCGv src)
4130 {
4131 gen_check_align(dc, src, 3);
4132 gen_helper_restore(tcg_env);
4133
4134 gen_mov_pc_npc(dc);
4135 tcg_gen_mov_tl(cpu_npc, src);
4136 gen_address_mask(dc, cpu_npc);
4137
4138 dc->npc = DYNAMIC_PC_LOOKUP;
4139 return true;
4140 }
4141
4142 TRANS(RETURN, 64, do_add_special, a, do_return)
4143
4144 static bool do_save(DisasContext *dc, int rd, TCGv src)
4145 {
4146 gen_helper_save(tcg_env);
4147 gen_store_gpr(dc, rd, src);
4148 return advance_pc(dc);
4149 }
4150
4151 TRANS(SAVE, ALL, do_add_special, a, do_save)
4152
4153 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4154 {
4155 gen_helper_restore(tcg_env);
4156 gen_store_gpr(dc, rd, src);
4157 return advance_pc(dc);
4158 }
4159
4160 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4161
4162 static bool do_done_retry(DisasContext *dc, bool done)
4163 {
4164 if (!supervisor(dc)) {
4165 return raise_priv(dc);
4166 }
4167 dc->npc = DYNAMIC_PC;
4168 dc->pc = DYNAMIC_PC;
4169 translator_io_start(&dc->base);
4170 if (done) {
4171 gen_helper_done(tcg_env);
4172 } else {
4173 gen_helper_retry(tcg_env);
4174 }
4175 return true;
4176 }
4177
4178 TRANS(DONE, 64, do_done_retry, true)
4179 TRANS(RETRY, 64, do_done_retry, false)
4180
4181 /*
4182 * Major opcode 11 -- load and store instructions
4183 */
4184
4185 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4186 {
4187 TCGv addr, tmp = NULL;
4188
4189 /* For simplicity, we under-decoded the rs2 form. */
4190 if (!imm && rs2_or_imm & ~0x1f) {
4191 return NULL;
4192 }
4193
4194 addr = gen_load_gpr(dc, rs1);
4195 if (rs2_or_imm) {
4196 tmp = tcg_temp_new();
4197 if (imm) {
4198 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4199 } else {
4200 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4201 }
4202 addr = tmp;
4203 }
4204 if (AM_CHECK(dc)) {
4205 if (!tmp) {
4206 tmp = tcg_temp_new();
4207 }
4208 tcg_gen_ext32u_tl(tmp, addr);
4209 addr = tmp;
4210 }
4211 return addr;
4212 }
4213
4214 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4215 {
4216 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4217 DisasASI da;
4218
4219 if (addr == NULL) {
4220 return false;
4221 }
4222 da = resolve_asi(dc, a->asi, mop);
4223
4224 reg = gen_dest_gpr(dc, a->rd);
4225 gen_ld_asi(dc, &da, reg, addr);
4226 gen_store_gpr(dc, a->rd, reg);
4227 return advance_pc(dc);
4228 }
4229
4230 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4231 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4232 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4233 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4234 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4235 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4236 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4237
4238 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4239 {
4240 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4241 DisasASI da;
4242
4243 if (addr == NULL) {
4244 return false;
4245 }
4246 da = resolve_asi(dc, a->asi, mop);
4247
4248 reg = gen_load_gpr(dc, a->rd);
4249 gen_st_asi(dc, &da, reg, addr);
4250 return advance_pc(dc);
4251 }
4252
4253 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4254 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4255 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4256 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4257
4258 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4259 {
4260 TCGv addr;
4261 DisasASI da;
4262
4263 if (a->rd & 1) {
4264 return false;
4265 }
4266 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4267 if (addr == NULL) {
4268 return false;
4269 }
4270 da = resolve_asi(dc, a->asi, MO_TEUQ);
4271 gen_ldda_asi(dc, &da, addr, a->rd);
4272 return advance_pc(dc);
4273 }
4274
4275 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4276 {
4277 TCGv addr;
4278 DisasASI da;
4279
4280 if (a->rd & 1) {
4281 return false;
4282 }
4283 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4284 if (addr == NULL) {
4285 return false;
4286 }
4287 da = resolve_asi(dc, a->asi, MO_TEUQ);
4288 gen_stda_asi(dc, &da, addr, a->rd);
4289 return advance_pc(dc);
4290 }
4291
4292 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4293 {
4294 TCGv addr, reg;
4295 DisasASI da;
4296
4297 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4298 if (addr == NULL) {
4299 return false;
4300 }
4301 da = resolve_asi(dc, a->asi, MO_UB);
4302
4303 reg = gen_dest_gpr(dc, a->rd);
4304 gen_ldstub_asi(dc, &da, reg, addr);
4305 gen_store_gpr(dc, a->rd, reg);
4306 return advance_pc(dc);
4307 }
4308
4309 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4310 {
4311 TCGv addr, dst, src;
4312 DisasASI da;
4313
4314 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4315 if (addr == NULL) {
4316 return false;
4317 }
4318 da = resolve_asi(dc, a->asi, MO_TEUL);
4319
4320 dst = gen_dest_gpr(dc, a->rd);
4321 src = gen_load_gpr(dc, a->rd);
4322 gen_swap_asi(dc, &da, dst, src, addr);
4323 gen_store_gpr(dc, a->rd, dst);
4324 return advance_pc(dc);
4325 }
4326
4327 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4328 {
4329 TCGv addr, o, n, c;
4330 DisasASI da;
4331
4332 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4333 if (addr == NULL) {
4334 return false;
4335 }
4336 da = resolve_asi(dc, a->asi, mop);
4337
4338 o = gen_dest_gpr(dc, a->rd);
4339 n = gen_load_gpr(dc, a->rd);
4340 c = gen_load_gpr(dc, a->rs2_or_imm);
4341 gen_cas_asi(dc, &da, o, n, c, addr);
4342 gen_store_gpr(dc, a->rd, o);
4343 return advance_pc(dc);
4344 }
4345
4346 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4347 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4348
4349 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4350 {
4351 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4352 DisasASI da;
4353
4354 if (addr == NULL) {
4355 return false;
4356 }
4357 if (gen_trap_ifnofpu(dc)) {
4358 return true;
4359 }
4360 if (sz == MO_128 && gen_trap_float128(dc)) {
4361 return true;
4362 }
4363 da = resolve_asi(dc, a->asi, MO_TE | sz);
4364 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4365 gen_update_fprs_dirty(dc, a->rd);
4366 return advance_pc(dc);
4367 }
4368
4369 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4370 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4371 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4372
4373 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4374 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4375 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4376
4377 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4378 {
4379 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4380 DisasASI da;
4381
4382 if (addr == NULL) {
4383 return false;
4384 }
4385 if (gen_trap_ifnofpu(dc)) {
4386 return true;
4387 }
4388 if (sz == MO_128 && gen_trap_float128(dc)) {
4389 return true;
4390 }
4391 da = resolve_asi(dc, a->asi, MO_TE | sz);
4392 gen_stf_asi(dc, &da, sz, addr, a->rd);
4393 return advance_pc(dc);
4394 }
4395
4396 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4397 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4398 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4399
4400 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4401 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4402 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4403
4404 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4405 {
4406 if (!avail_32(dc)) {
4407 return false;
4408 }
4409 if (!supervisor(dc)) {
4410 return raise_priv(dc);
4411 }
4412 if (gen_trap_ifnofpu(dc)) {
4413 return true;
4414 }
4415 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4416 return true;
4417 }
4418
4419 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4420 target_ulong new_mask, target_ulong old_mask)
4421 {
4422 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4423 if (addr == NULL) {
4424 return false;
4425 }
4426 if (gen_trap_ifnofpu(dc)) {
4427 return true;
4428 }
4429 tmp = tcg_temp_new();
4430 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4431 tcg_gen_andi_tl(tmp, tmp, new_mask);
4432 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4433 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4434 gen_helper_set_fsr(tcg_env, cpu_fsr);
4435 return advance_pc(dc);
4436 }
4437
4438 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4439 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4440
4441 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4442 {
4443 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4444 if (addr == NULL) {
4445 return false;
4446 }
4447 if (gen_trap_ifnofpu(dc)) {
4448 return true;
4449 }
4450 tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4451 return advance_pc(dc);
4452 }
4453
4454 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4455 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4456
4457 static bool do_fc(DisasContext *dc, int rd, bool c)
4458 {
4459 uint64_t mask;
4460
4461 if (gen_trap_ifnofpu(dc)) {
4462 return true;
4463 }
4464
4465 if (rd & 1) {
4466 mask = MAKE_64BIT_MASK(0, 32);
4467 } else {
4468 mask = MAKE_64BIT_MASK(32, 32);
4469 }
4470 if (c) {
4471 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4472 } else {
4473 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4474 }
4475 gen_update_fprs_dirty(dc, rd);
4476 return advance_pc(dc);
4477 }
4478
4479 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4480 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4481
4482 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4483 {
4484 if (gen_trap_ifnofpu(dc)) {
4485 return true;
4486 }
4487
4488 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4489 gen_update_fprs_dirty(dc, rd);
4490 return advance_pc(dc);
4491 }
4492
4493 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4494 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4495
4496 static bool do_ff(DisasContext *dc, arg_r_r *a,
4497 void (*func)(TCGv_i32, TCGv_i32))
4498 {
4499 TCGv_i32 tmp;
4500
4501 if (gen_trap_ifnofpu(dc)) {
4502 return true;
4503 }
4504
4505 tmp = gen_load_fpr_F(dc, a->rs);
4506 func(tmp, tmp);
4507 gen_store_fpr_F(dc, a->rd, tmp);
4508 return advance_pc(dc);
4509 }
4510
4511 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4512 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4513 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4514 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4515 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4516
4517 static bool do_fd(DisasContext *dc, arg_r_r *a,
4518 void (*func)(TCGv_i32, TCGv_i64))
4519 {
4520 TCGv_i32 dst;
4521 TCGv_i64 src;
4522
4523 if (gen_trap_ifnofpu(dc)) {
4524 return true;
4525 }
4526
4527 dst = tcg_temp_new_i32();
4528 src = gen_load_fpr_D(dc, a->rs);
4529 func(dst, src);
4530 gen_store_fpr_F(dc, a->rd, dst);
4531 return advance_pc(dc);
4532 }
4533
4534 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4535 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4536
4537 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4538 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4539 {
4540 TCGv_i32 tmp;
4541
4542 if (gen_trap_ifnofpu(dc)) {
4543 return true;
4544 }
4545
4546 gen_op_clear_ieee_excp_and_FTT();
4547 tmp = gen_load_fpr_F(dc, a->rs);
4548 func(tmp, tcg_env, tmp);
4549 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4550 gen_store_fpr_F(dc, a->rd, tmp);
4551 return advance_pc(dc);
4552 }
4553
4554 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4555 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4556 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4557
4558 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4559 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4560 {
4561 TCGv_i32 dst;
4562 TCGv_i64 src;
4563
4564 if (gen_trap_ifnofpu(dc)) {
4565 return true;
4566 }
4567
4568 gen_op_clear_ieee_excp_and_FTT();
4569 dst = tcg_temp_new_i32();
4570 src = gen_load_fpr_D(dc, a->rs);
4571 func(dst, tcg_env, src);
4572 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4573 gen_store_fpr_F(dc, a->rd, dst);
4574 return advance_pc(dc);
4575 }
4576
4577 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4578 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4579 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4580
4581 static bool do_dd(DisasContext *dc, arg_r_r *a,
4582 void (*func)(TCGv_i64, TCGv_i64))
4583 {
4584 TCGv_i64 dst, src;
4585
4586 if (gen_trap_ifnofpu(dc)) {
4587 return true;
4588 }
4589
4590 dst = gen_dest_fpr_D(dc, a->rd);
4591 src = gen_load_fpr_D(dc, a->rs);
4592 func(dst, src);
4593 gen_store_fpr_D(dc, a->rd, dst);
4594 return advance_pc(dc);
4595 }
4596
4597 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4598 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4599 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4600 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4601 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4602
4603 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4604 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4605 {
4606 TCGv_i64 dst, src;
4607
4608 if (gen_trap_ifnofpu(dc)) {
4609 return true;
4610 }
4611
4612 gen_op_clear_ieee_excp_and_FTT();
4613 dst = gen_dest_fpr_D(dc, a->rd);
4614 src = gen_load_fpr_D(dc, a->rs);
4615 func(dst, tcg_env, src);
4616 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4617 gen_store_fpr_D(dc, a->rd, dst);
4618 return advance_pc(dc);
4619 }
4620
4621 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4622 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4623 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4624
4625 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4626 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4627 {
4628 TCGv_i64 dst;
4629 TCGv_i32 src;
4630
4631 if (gen_trap_ifnofpu(dc)) {
4632 return true;
4633 }
4634
4635 gen_op_clear_ieee_excp_and_FTT();
4636 dst = gen_dest_fpr_D(dc, a->rd);
4637 src = gen_load_fpr_F(dc, a->rs);
4638 func(dst, tcg_env, src);
4639 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4640 gen_store_fpr_D(dc, a->rd, dst);
4641 return advance_pc(dc);
4642 }
4643
4644 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4645 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4646 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4647
4648 static bool do_qq(DisasContext *dc, arg_r_r *a,
4649 void (*func)(TCGv_i128, TCGv_i128))
4650 {
4651 TCGv_i128 t;
4652
4653 if (gen_trap_ifnofpu(dc)) {
4654 return true;
4655 }
4656 if (gen_trap_float128(dc)) {
4657 return true;
4658 }
4659
4660 gen_op_clear_ieee_excp_and_FTT();
4661 t = gen_load_fpr_Q(dc, a->rs);
4662 func(t, t);
4663 gen_store_fpr_Q(dc, a->rd, t);
4664 return advance_pc(dc);
4665 }
4666
4667 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4668 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4669 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4670
4671 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4672 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4673 {
4674 TCGv_i128 t;
4675
4676 if (gen_trap_ifnofpu(dc)) {
4677 return true;
4678 }
4679 if (gen_trap_float128(dc)) {
4680 return true;
4681 }
4682
4683 gen_op_clear_ieee_excp_and_FTT();
4684
4685 t = gen_load_fpr_Q(dc, a->rs);
4686 func(t, tcg_env, t);
4687 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4688 gen_store_fpr_Q(dc, a->rd, t);
4689 return advance_pc(dc);
4690 }
4691
4692 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4693
4694 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4695 void (*func)(TCGv_i32, TCGv_env))
4696 {
4697 TCGv_i32 dst;
4698
4699 if (gen_trap_ifnofpu(dc)) {
4700 return true;
4701 }
4702 if (gen_trap_float128(dc)) {
4703 return true;
4704 }
4705
4706 gen_op_clear_ieee_excp_and_FTT();
4707 gen_op_load_fpr_QT1(QFPREG(a->rs));
4708 dst = tcg_temp_new_i32();
4709 func(dst, tcg_env);
4710 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4711 gen_store_fpr_F(dc, a->rd, dst);
4712 return advance_pc(dc);
4713 }
4714
4715 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4716 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4717
4718 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4719 void (*func)(TCGv_i64, TCGv_env))
4720 {
4721 TCGv_i64 dst;
4722
4723 if (gen_trap_ifnofpu(dc)) {
4724 return true;
4725 }
4726 if (gen_trap_float128(dc)) {
4727 return true;
4728 }
4729
4730 gen_op_clear_ieee_excp_and_FTT();
4731 gen_op_load_fpr_QT1(QFPREG(a->rs));
4732 dst = gen_dest_fpr_D(dc, a->rd);
4733 func(dst, tcg_env);
4734 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4735 gen_store_fpr_D(dc, a->rd, dst);
4736 return advance_pc(dc);
4737 }
4738
4739 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4740 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4741
4742 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4743 void (*func)(TCGv_env, TCGv_i32))
4744 {
4745 TCGv_i32 src;
4746
4747 if (gen_trap_ifnofpu(dc)) {
4748 return true;
4749 }
4750 if (gen_trap_float128(dc)) {
4751 return true;
4752 }
4753
4754 gen_op_clear_ieee_excp_and_FTT();
4755 src = gen_load_fpr_F(dc, a->rs);
4756 func(tcg_env, src);
4757 gen_op_store_QT0_fpr(QFPREG(a->rd));
4758 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4759 return advance_pc(dc);
4760 }
4761
4762 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4763 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4764
4765 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4766 void (*func)(TCGv_env, TCGv_i64))
4767 {
4768 TCGv_i64 src;
4769
4770 if (gen_trap_ifnofpu(dc)) {
4771 return true;
4772 }
4773 if (gen_trap_float128(dc)) {
4774 return true;
4775 }
4776
4777 gen_op_clear_ieee_excp_and_FTT();
4778 src = gen_load_fpr_D(dc, a->rs);
4779 func(tcg_env, src);
4780 gen_op_store_QT0_fpr(QFPREG(a->rd));
4781 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4782 return advance_pc(dc);
4783 }
4784
4785 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4786 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4787
4788 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4789 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4790 {
4791 TCGv_i32 src1, src2;
4792
4793 if (gen_trap_ifnofpu(dc)) {
4794 return true;
4795 }
4796
4797 src1 = gen_load_fpr_F(dc, a->rs1);
4798 src2 = gen_load_fpr_F(dc, a->rs2);
4799 func(src1, src1, src2);
4800 gen_store_fpr_F(dc, a->rd, src1);
4801 return advance_pc(dc);
4802 }
4803
4804 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4805 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4806 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4807 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4808 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4809 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4810 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4811 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4812 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4813 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4814 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4815 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4816
4817 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4818 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4819 {
4820 TCGv_i32 src1, src2;
4821
4822 if (gen_trap_ifnofpu(dc)) {
4823 return true;
4824 }
4825
4826 gen_op_clear_ieee_excp_and_FTT();
4827 src1 = gen_load_fpr_F(dc, a->rs1);
4828 src2 = gen_load_fpr_F(dc, a->rs2);
4829 func(src1, tcg_env, src1, src2);
4830 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4831 gen_store_fpr_F(dc, a->rd, src1);
4832 return advance_pc(dc);
4833 }
4834
4835 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4836 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4837 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4838 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4839
4840 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4841 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4842 {
4843 TCGv_i64 dst, src1, src2;
4844
4845 if (gen_trap_ifnofpu(dc)) {
4846 return true;
4847 }
4848
4849 dst = gen_dest_fpr_D(dc, a->rd);
4850 src1 = gen_load_fpr_D(dc, a->rs1);
4851 src2 = gen_load_fpr_D(dc, a->rs2);
4852 func(dst, src1, src2);
4853 gen_store_fpr_D(dc, a->rd, dst);
4854 return advance_pc(dc);
4855 }
4856
4857 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4858 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4859 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4860 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4861 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4862 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4863 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4864 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4865 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4866
4867 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4868 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4869 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4870 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4871 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4872 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4873 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4874 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4875 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4876 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4877 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4878 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4879
4880 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4881 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4882 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4883
4884 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4885 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4886 {
4887 TCGv_i64 src1, src2;
4888 TCGv dst;
4889
4890 if (gen_trap_ifnofpu(dc)) {
4891 return true;
4892 }
4893
4894 dst = gen_dest_gpr(dc, a->rd);
4895 src1 = gen_load_fpr_D(dc, a->rs1);
4896 src2 = gen_load_fpr_D(dc, a->rs2);
4897 func(dst, src1, src2);
4898 gen_store_gpr(dc, a->rd, dst);
4899 return advance_pc(dc);
4900 }
4901
4902 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4903 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4904 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4905 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4906
4907 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4908 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4909 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4910 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4911
4912 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4913 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4914 {
4915 TCGv_i64 dst, src1, src2;
4916
4917 if (gen_trap_ifnofpu(dc)) {
4918 return true;
4919 }
4920
4921 gen_op_clear_ieee_excp_and_FTT();
4922 dst = gen_dest_fpr_D(dc, a->rd);
4923 src1 = gen_load_fpr_D(dc, a->rs1);
4924 src2 = gen_load_fpr_D(dc, a->rs2);
4925 func(dst, tcg_env, src1, src2);
4926 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4927 gen_store_fpr_D(dc, a->rd, dst);
4928 return advance_pc(dc);
4929 }
4930
4931 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4932 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4933 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4934 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4935
4936 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4937 {
4938 TCGv_i64 dst;
4939 TCGv_i32 src1, src2;
4940
4941 if (gen_trap_ifnofpu(dc)) {
4942 return true;
4943 }
4944 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4945 return raise_unimpfpop(dc);
4946 }
4947
4948 gen_op_clear_ieee_excp_and_FTT();
4949 dst = gen_dest_fpr_D(dc, a->rd);
4950 src1 = gen_load_fpr_F(dc, a->rs1);
4951 src2 = gen_load_fpr_F(dc, a->rs2);
4952 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4953 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4954 gen_store_fpr_D(dc, a->rd, dst);
4955 return advance_pc(dc);
4956 }
4957
4958 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4959 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4960 {
4961 TCGv_i64 dst, src0, src1, src2;
4962
4963 if (gen_trap_ifnofpu(dc)) {
4964 return true;
4965 }
4966
4967 dst = gen_dest_fpr_D(dc, a->rd);
4968 src0 = gen_load_fpr_D(dc, a->rd);
4969 src1 = gen_load_fpr_D(dc, a->rs1);
4970 src2 = gen_load_fpr_D(dc, a->rs2);
4971 func(dst, src0, src1, src2);
4972 gen_store_fpr_D(dc, a->rd, dst);
4973 return advance_pc(dc);
4974 }
4975
4976 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4977
4978 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4979 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4980 {
4981 TCGv_i128 src1, src2;
4982
4983 if (gen_trap_ifnofpu(dc)) {
4984 return true;
4985 }
4986 if (gen_trap_float128(dc)) {
4987 return true;
4988 }
4989
4990 gen_op_clear_ieee_excp_and_FTT();
4991 src1 = gen_load_fpr_Q(dc, a->rs1);
4992 src2 = gen_load_fpr_Q(dc, a->rs2);
4993 func(src1, tcg_env, src1, src2);
4994 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4995 gen_store_fpr_Q(dc, a->rd, src1);
4996 return advance_pc(dc);
4997 }
4998
4999 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5000 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5001 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5002 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5003
5004 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5005 {
5006 TCGv_i64 src1, src2;
5007
5008 if (gen_trap_ifnofpu(dc)) {
5009 return true;
5010 }
5011 if (gen_trap_float128(dc)) {
5012 return true;
5013 }
5014
5015 gen_op_clear_ieee_excp_and_FTT();
5016 src1 = gen_load_fpr_D(dc, a->rs1);
5017 src2 = gen_load_fpr_D(dc, a->rs2);
5018 gen_helper_fdmulq(tcg_env, src1, src2);
5019 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5020 gen_op_store_QT0_fpr(QFPREG(a->rd));
5021 gen_update_fprs_dirty(dc, QFPREG(a->rd));
5022 return advance_pc(dc);
5023 }
5024
5025 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5026 void (*func)(DisasContext *, DisasCompare *, int, int))
5027 {
5028 DisasCompare cmp;
5029
5030 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5031 return false;
5032 }
5033 if (gen_trap_ifnofpu(dc)) {
5034 return true;
5035 }
5036 if (is_128 && gen_trap_float128(dc)) {
5037 return true;
5038 }
5039
5040 gen_op_clear_ieee_excp_and_FTT();
5041 func(dc, &cmp, a->rd, a->rs2);
5042 return advance_pc(dc);
5043 }
5044
5045 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5046 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5047 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5048
5049 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5050 void (*func)(DisasContext *, DisasCompare *, int, int))
5051 {
5052 DisasCompare cmp;
5053
5054 if (gen_trap_ifnofpu(dc)) {
5055 return true;
5056 }
5057 if (is_128 && gen_trap_float128(dc)) {
5058 return true;
5059 }
5060
5061 gen_op_clear_ieee_excp_and_FTT();
5062 gen_compare(&cmp, a->cc, a->cond, dc);
5063 func(dc, &cmp, a->rd, a->rs2);
5064 return advance_pc(dc);
5065 }
5066
5067 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5068 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5069 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5070
5071 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5072 void (*func)(DisasContext *, DisasCompare *, int, int))
5073 {
5074 DisasCompare cmp;
5075
5076 if (gen_trap_ifnofpu(dc)) {
5077 return true;
5078 }
5079 if (is_128 && gen_trap_float128(dc)) {
5080 return true;
5081 }
5082
5083 gen_op_clear_ieee_excp_and_FTT();
5084 gen_fcompare(&cmp, a->cc, a->cond);
5085 func(dc, &cmp, a->rd, a->rs2);
5086 return advance_pc(dc);
5087 }
5088
5089 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5090 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5091 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5092
5093 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5094 {
5095 TCGv_i32 src1, src2;
5096
5097 if (avail_32(dc) && a->cc != 0) {
5098 return false;
5099 }
5100 if (gen_trap_ifnofpu(dc)) {
5101 return true;
5102 }
5103
5104 gen_op_clear_ieee_excp_and_FTT();
5105 src1 = gen_load_fpr_F(dc, a->rs1);
5106 src2 = gen_load_fpr_F(dc, a->rs2);
5107 if (e) {
5108 gen_op_fcmpes(a->cc, src1, src2);
5109 } else {
5110 gen_op_fcmps(a->cc, src1, src2);
5111 }
5112 return advance_pc(dc);
5113 }
5114
5115 TRANS(FCMPs, ALL, do_fcmps, a, false)
5116 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5117
5118 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5119 {
5120 TCGv_i64 src1, src2;
5121
5122 if (avail_32(dc) && a->cc != 0) {
5123 return false;
5124 }
5125 if (gen_trap_ifnofpu(dc)) {
5126 return true;
5127 }
5128
5129 gen_op_clear_ieee_excp_and_FTT();
5130 src1 = gen_load_fpr_D(dc, a->rs1);
5131 src2 = gen_load_fpr_D(dc, a->rs2);
5132 if (e) {
5133 gen_op_fcmped(a->cc, src1, src2);
5134 } else {
5135 gen_op_fcmpd(a->cc, src1, src2);
5136 }
5137 return advance_pc(dc);
5138 }
5139
5140 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5141 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5142
5143 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5144 {
5145 if (avail_32(dc) && a->cc != 0) {
5146 return false;
5147 }
5148 if (gen_trap_ifnofpu(dc)) {
5149 return true;
5150 }
5151 if (gen_trap_float128(dc)) {
5152 return true;
5153 }
5154
5155 gen_op_clear_ieee_excp_and_FTT();
5156 gen_op_load_fpr_QT0(QFPREG(a->rs1));
5157 gen_op_load_fpr_QT1(QFPREG(a->rs2));
5158 if (e) {
5159 gen_op_fcmpeq(a->cc);
5160 } else {
5161 gen_op_fcmpq(a->cc);
5162 }
5163 return advance_pc(dc);
5164 }
5165
5166 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5167 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5168
5169 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5170 {
5171 DisasContext *dc = container_of(dcbase, DisasContext, base);
5172 CPUSPARCState *env = cpu_env(cs);
5173 int bound;
5174
5175 dc->pc = dc->base.pc_first;
5176 dc->npc = (target_ulong)dc->base.tb->cs_base;
5177 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5178 dc->def = &env->def;
5179 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5180 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5181 #ifndef CONFIG_USER_ONLY
5182 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5183 #endif
5184 #ifdef TARGET_SPARC64
5185 dc->fprs_dirty = 0;
5186 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5187 #ifndef CONFIG_USER_ONLY
5188 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5189 #endif
5190 #endif
5191 /*
5192 * if we reach a page boundary, we stop generation so that the
5193 * PC of a TT_TFAULT exception is always in the right page
5194 */
5195 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5196 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5197 }
5198
5199 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5200 {
5201 }
5202
5203 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5204 {
5205 DisasContext *dc = container_of(dcbase, DisasContext, base);
5206 target_ulong npc = dc->npc;
5207
5208 if (npc & 3) {
5209 switch (npc) {
5210 case JUMP_PC:
5211 assert(dc->jump_pc[1] == dc->pc + 4);
5212 npc = dc->jump_pc[0] | JUMP_PC;
5213 break;
5214 case DYNAMIC_PC:
5215 case DYNAMIC_PC_LOOKUP:
5216 npc = DYNAMIC_PC;
5217 break;
5218 default:
5219 g_assert_not_reached();
5220 }
5221 }
5222 tcg_gen_insn_start(dc->pc, npc);
5223 }
5224
5225 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5226 {
5227 DisasContext *dc = container_of(dcbase, DisasContext, base);
5228 CPUSPARCState *env = cpu_env(cs);
5229 unsigned int insn;
5230
5231 insn = translator_ldl(env, &dc->base, dc->pc);
5232 dc->base.pc_next += 4;
5233
5234 if (!decode(dc, insn)) {
5235 gen_exception(dc, TT_ILL_INSN);
5236 }
5237
5238 if (dc->base.is_jmp == DISAS_NORETURN) {
5239 return;
5240 }
5241 if (dc->pc != dc->base.pc_next) {
5242 dc->base.is_jmp = DISAS_TOO_MANY;
5243 }
5244 }
5245
5246 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5247 {
5248 DisasContext *dc = container_of(dcbase, DisasContext, base);
5249 DisasDelayException *e, *e_next;
5250 bool may_lookup;
5251
5252 finishing_insn(dc);
5253
5254 switch (dc->base.is_jmp) {
5255 case DISAS_NEXT:
5256 case DISAS_TOO_MANY:
5257 if (((dc->pc | dc->npc) & 3) == 0) {
5258 /* static PC and NPC: we can use direct chaining */
5259 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5260 break;
5261 }
5262
5263 may_lookup = true;
5264 if (dc->pc & 3) {
5265 switch (dc->pc) {
5266 case DYNAMIC_PC_LOOKUP:
5267 break;
5268 case DYNAMIC_PC:
5269 may_lookup = false;
5270 break;
5271 default:
5272 g_assert_not_reached();
5273 }
5274 } else {
5275 tcg_gen_movi_tl(cpu_pc, dc->pc);
5276 }
5277
5278 if (dc->npc & 3) {
5279 switch (dc->npc) {
5280 case JUMP_PC:
5281 gen_generic_branch(dc);
5282 break;
5283 case DYNAMIC_PC:
5284 may_lookup = false;
5285 break;
5286 case DYNAMIC_PC_LOOKUP:
5287 break;
5288 default:
5289 g_assert_not_reached();
5290 }
5291 } else {
5292 tcg_gen_movi_tl(cpu_npc, dc->npc);
5293 }
5294 if (may_lookup) {
5295 tcg_gen_lookup_and_goto_ptr();
5296 } else {
5297 tcg_gen_exit_tb(NULL, 0);
5298 }
5299 break;
5300
5301 case DISAS_NORETURN:
5302 break;
5303
5304 case DISAS_EXIT:
5305 /* Exit TB */
5306 save_state(dc);
5307 tcg_gen_exit_tb(NULL, 0);
5308 break;
5309
5310 default:
5311 g_assert_not_reached();
5312 }
5313
5314 for (e = dc->delay_excp_list; e ; e = e_next) {
5315 gen_set_label(e->lab);
5316
5317 tcg_gen_movi_tl(cpu_pc, e->pc);
5318 if (e->npc % 4 == 0) {
5319 tcg_gen_movi_tl(cpu_npc, e->npc);
5320 }
5321 gen_helper_raise_exception(tcg_env, e->excp);
5322
5323 e_next = e->next;
5324 g_free(e);
5325 }
5326 }
5327
5328 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5329 CPUState *cpu, FILE *logfile)
5330 {
5331 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5332 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5333 }
5334
5335 static const TranslatorOps sparc_tr_ops = {
5336 .init_disas_context = sparc_tr_init_disas_context,
5337 .tb_start = sparc_tr_tb_start,
5338 .insn_start = sparc_tr_insn_start,
5339 .translate_insn = sparc_tr_translate_insn,
5340 .tb_stop = sparc_tr_tb_stop,
5341 .disas_log = sparc_tr_disas_log,
5342 };
5343
5344 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5345 vaddr pc, void *host_pc)
5346 {
5347 DisasContext dc = {};
5348
5349 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5350 }
5351
5352 void sparc_tcg_init(void)
5353 {
5354 static const char gregnames[32][4] = {
5355 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5356 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5357 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5358 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5359 };
5360 static const char fregnames[32][4] = {
5361 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5362 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5363 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5364 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5365 };
5366
5367 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5368 #ifdef TARGET_SPARC64
5369 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5370 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5371 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5372 #endif
5373 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5374 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5375 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5376 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5377 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5378 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5379 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5380 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5381 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5382 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5383 };
5384
5385 unsigned int i;
5386
5387 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5388 offsetof(CPUSPARCState, regwptr),
5389 "regwptr");
5390
5391 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5392 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5393 }
5394
5395 cpu_regs[0] = NULL;
5396 for (i = 1; i < 8; ++i) {
5397 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5398 offsetof(CPUSPARCState, gregs[i]),
5399 gregnames[i]);
5400 }
5401
5402 for (i = 8; i < 32; ++i) {
5403 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5404 (i - 8) * sizeof(target_ulong),
5405 gregnames[i]);
5406 }
5407
5408 for (i = 0; i < TARGET_DPREGS; i++) {
5409 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5410 offsetof(CPUSPARCState, fpr[i]),
5411 fregnames[i]);
5412 }
5413
5414 #ifdef TARGET_SPARC64
5415 cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5416 offsetof(CPUSPARCState, fprs), "fprs");
5417 #endif
5418 }
5419
5420 void sparc_restore_state_to_opc(CPUState *cs,
5421 const TranslationBlock *tb,
5422 const uint64_t *data)
5423 {
5424 SPARCCPU *cpu = SPARC_CPU(cs);
5425 CPUSPARCState *env = &cpu->env;
5426 target_ulong pc = data[0];
5427 target_ulong npc = data[1];
5428
5429 env->pc = pc;
5430 if (npc == DYNAMIC_PC) {
5431 /* dynamic NPC: already stored */
5432 } else if (npc & JUMP_PC) {
5433 /* jump PC: use 'cond' and the jump targets of the translation */
5434 if (env->cond) {
5435 env->npc = npc & ~3;
5436 } else {
5437 env->npc = pc + 4;
5438 }
5439 } else {
5440 env->npc = npc;
5441 }
5442 }