]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/translate.c
target/sparc: Discard cpu_cond at the end of each insn
[mirror_qemu.git] / target / sparc / translate.c
1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
92 # define FSR_LDXFSR_MASK 0
93 # define FSR_LDXFSR_OLDMASK 0
94 # define MAXTL_MASK 0
95 #endif
96
97 /* Dynamic PC, must exit to main loop. */
98 #define DYNAMIC_PC 1
99 /* Dynamic PC, one of two values according to jump_pc[T2]. */
100 #define JUMP_PC 2
101 /* Dynamic PC, may lookup next TB. */
102 #define DYNAMIC_PC_LOOKUP 3
103
104 #define DISAS_EXIT DISAS_TARGET_0
105
106 /* global register indexes */
107 static TCGv_ptr cpu_regwptr;
108 static TCGv cpu_fsr, cpu_pc, cpu_npc;
109 static TCGv cpu_regs[32];
110 static TCGv cpu_y;
111 static TCGv cpu_tbr;
112 static TCGv cpu_cond;
113 static TCGv cpu_cc_N;
114 static TCGv cpu_cc_V;
115 static TCGv cpu_icc_Z;
116 static TCGv cpu_icc_C;
117 #ifdef TARGET_SPARC64
118 static TCGv cpu_xcc_Z;
119 static TCGv cpu_xcc_C;
120 static TCGv_i32 cpu_fprs;
121 static TCGv cpu_gsr;
122 #else
123 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
124 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
125 #endif
126
127 #ifdef TARGET_SPARC64
128 #define cpu_cc_Z cpu_xcc_Z
129 #define cpu_cc_C cpu_xcc_C
130 #else
131 #define cpu_cc_Z cpu_icc_Z
132 #define cpu_cc_C cpu_icc_C
133 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
134 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
135 #endif
136
137 /* Floating point registers */
138 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
139
140 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
141 #ifdef TARGET_SPARC64
142 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
143 # define env64_field_offsetof(X) env_field_offsetof(X)
144 #else
145 # define env32_field_offsetof(X) env_field_offsetof(X)
146 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
147 #endif
148
149 typedef struct DisasCompare {
150 TCGCond cond;
151 TCGv c1;
152 int c2;
153 } DisasCompare;
154
155 typedef struct DisasDelayException {
156 struct DisasDelayException *next;
157 TCGLabel *lab;
158 TCGv_i32 excp;
159 /* Saved state at parent insn. */
160 target_ulong pc;
161 target_ulong npc;
162 } DisasDelayException;
163
164 typedef struct DisasContext {
165 DisasContextBase base;
166 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
167 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
168
169 /* Used when JUMP_PC value is used. */
170 DisasCompare jump;
171 target_ulong jump_pc[2];
172
173 int mem_idx;
174 bool cpu_cond_live;
175 bool fpu_enabled;
176 bool address_mask_32bit;
177 #ifndef CONFIG_USER_ONLY
178 bool supervisor;
179 #ifdef TARGET_SPARC64
180 bool hypervisor;
181 #endif
182 #endif
183
184 sparc_def_t *def;
185 #ifdef TARGET_SPARC64
186 int fprs_dirty;
187 int asi;
188 #endif
189 DisasDelayException *delay_excp_list;
190 } DisasContext;
191
192 // This function uses non-native bit order
193 #define GET_FIELD(X, FROM, TO) \
194 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
195
196 // This function uses the order in the manuals, i.e. bit 0 is 2^0
197 #define GET_FIELD_SP(X, FROM, TO) \
198 GET_FIELD(X, 31 - (TO), 31 - (FROM))
199
200 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
201 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
202
203 #ifdef TARGET_SPARC64
204 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
205 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
206 #else
207 #define DFPREG(r) (r & 0x1e)
208 #define QFPREG(r) (r & 0x1c)
209 #endif
210
211 #define UA2005_HTRAP_MASK 0xff
212 #define V8_TRAP_MASK 0x7f
213
214 #define IS_IMM (insn & (1<<13))
215
216 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
217 {
218 #if defined(TARGET_SPARC64)
219 int bit = (rd < 32) ? 1 : 2;
220 /* If we know we've already set this bit within the TB,
221 we can avoid setting it again. */
222 if (!(dc->fprs_dirty & bit)) {
223 dc->fprs_dirty |= bit;
224 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
225 }
226 #endif
227 }
228
229 /* floating point registers moves */
230 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
231 {
232 TCGv_i32 ret = tcg_temp_new_i32();
233 if (src & 1) {
234 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
235 } else {
236 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
237 }
238 return ret;
239 }
240
241 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
242 {
243 TCGv_i64 t = tcg_temp_new_i64();
244
245 tcg_gen_extu_i32_i64(t, v);
246 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
247 (dst & 1 ? 0 : 32), 32);
248 gen_update_fprs_dirty(dc, dst);
249 }
250
251 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
252 {
253 return tcg_temp_new_i32();
254 }
255
256 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
257 {
258 src = DFPREG(src);
259 return cpu_fpr[src / 2];
260 }
261
262 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
263 {
264 dst = DFPREG(dst);
265 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
266 gen_update_fprs_dirty(dc, dst);
267 }
268
269 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
270 {
271 return cpu_fpr[DFPREG(dst) / 2];
272 }
273
274 static void gen_op_load_fpr_QT0(unsigned int src)
275 {
276 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
277 offsetof(CPU_QuadU, ll.upper));
278 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
279 offsetof(CPU_QuadU, ll.lower));
280 }
281
282 static void gen_op_load_fpr_QT1(unsigned int src)
283 {
284 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
285 offsetof(CPU_QuadU, ll.upper));
286 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
287 offsetof(CPU_QuadU, ll.lower));
288 }
289
290 static void gen_op_store_QT0_fpr(unsigned int dst)
291 {
292 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
293 offsetof(CPU_QuadU, ll.upper));
294 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
295 offsetof(CPU_QuadU, ll.lower));
296 }
297
298 /* moves */
299 #ifdef CONFIG_USER_ONLY
300 #define supervisor(dc) 0
301 #define hypervisor(dc) 0
302 #else
303 #ifdef TARGET_SPARC64
304 #define hypervisor(dc) (dc->hypervisor)
305 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
306 #else
307 #define supervisor(dc) (dc->supervisor)
308 #define hypervisor(dc) 0
309 #endif
310 #endif
311
312 #if !defined(TARGET_SPARC64)
313 # define AM_CHECK(dc) false
314 #elif defined(TARGET_ABI32)
315 # define AM_CHECK(dc) true
316 #elif defined(CONFIG_USER_ONLY)
317 # define AM_CHECK(dc) false
318 #else
319 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
320 #endif
321
322 static void gen_address_mask(DisasContext *dc, TCGv addr)
323 {
324 if (AM_CHECK(dc)) {
325 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
326 }
327 }
328
329 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
330 {
331 return AM_CHECK(dc) ? (uint32_t)addr : addr;
332 }
333
334 static TCGv gen_load_gpr(DisasContext *dc, int reg)
335 {
336 if (reg > 0) {
337 assert(reg < 32);
338 return cpu_regs[reg];
339 } else {
340 TCGv t = tcg_temp_new();
341 tcg_gen_movi_tl(t, 0);
342 return t;
343 }
344 }
345
346 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
347 {
348 if (reg > 0) {
349 assert(reg < 32);
350 tcg_gen_mov_tl(cpu_regs[reg], v);
351 }
352 }
353
354 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
355 {
356 if (reg > 0) {
357 assert(reg < 32);
358 return cpu_regs[reg];
359 } else {
360 return tcg_temp_new();
361 }
362 }
363
364 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
365 {
366 return translator_use_goto_tb(&s->base, pc) &&
367 translator_use_goto_tb(&s->base, npc);
368 }
369
370 static void gen_goto_tb(DisasContext *s, int tb_num,
371 target_ulong pc, target_ulong npc)
372 {
373 if (use_goto_tb(s, pc, npc)) {
374 /* jump to same page: we can use a direct jump */
375 tcg_gen_goto_tb(tb_num);
376 tcg_gen_movi_tl(cpu_pc, pc);
377 tcg_gen_movi_tl(cpu_npc, npc);
378 tcg_gen_exit_tb(s->base.tb, tb_num);
379 } else {
380 /* jump to another page: we can use an indirect jump */
381 tcg_gen_movi_tl(cpu_pc, pc);
382 tcg_gen_movi_tl(cpu_npc, npc);
383 tcg_gen_lookup_and_goto_ptr();
384 }
385 }
386
387 static TCGv gen_carry32(void)
388 {
389 if (TARGET_LONG_BITS == 64) {
390 TCGv t = tcg_temp_new();
391 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
392 return t;
393 }
394 return cpu_icc_C;
395 }
396
397 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
398 {
399 TCGv z = tcg_constant_tl(0);
400
401 if (cin) {
402 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
403 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
404 } else {
405 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
406 }
407 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
408 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
409 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
410 if (TARGET_LONG_BITS == 64) {
411 /*
412 * Carry-in to bit 32 is result ^ src1 ^ src2.
413 * We already have the src xor term in Z, from computation of V.
414 */
415 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
416 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
417 }
418 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
419 tcg_gen_mov_tl(dst, cpu_cc_N);
420 }
421
422 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
423 {
424 gen_op_addcc_int(dst, src1, src2, NULL);
425 }
426
427 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
428 {
429 TCGv t = tcg_temp_new();
430
431 /* Save the tag bits around modification of dst. */
432 tcg_gen_or_tl(t, src1, src2);
433
434 gen_op_addcc(dst, src1, src2);
435
436 /* Incorprate tag bits into icc.V */
437 tcg_gen_andi_tl(t, t, 3);
438 tcg_gen_neg_tl(t, t);
439 tcg_gen_ext32u_tl(t, t);
440 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
441 }
442
443 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
444 {
445 tcg_gen_add_tl(dst, src1, src2);
446 tcg_gen_add_tl(dst, dst, gen_carry32());
447 }
448
449 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
450 {
451 gen_op_addcc_int(dst, src1, src2, gen_carry32());
452 }
453
454 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
455 {
456 TCGv z = tcg_constant_tl(0);
457
458 if (cin) {
459 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
460 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
461 } else {
462 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
463 }
464 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
465 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
466 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
467 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
468 #ifdef TARGET_SPARC64
469 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
470 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
471 #endif
472 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
473 tcg_gen_mov_tl(dst, cpu_cc_N);
474 }
475
476 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
477 {
478 gen_op_subcc_int(dst, src1, src2, NULL);
479 }
480
481 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
482 {
483 TCGv t = tcg_temp_new();
484
485 /* Save the tag bits around modification of dst. */
486 tcg_gen_or_tl(t, src1, src2);
487
488 gen_op_subcc(dst, src1, src2);
489
490 /* Incorprate tag bits into icc.V */
491 tcg_gen_andi_tl(t, t, 3);
492 tcg_gen_neg_tl(t, t);
493 tcg_gen_ext32u_tl(t, t);
494 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
495 }
496
497 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
498 {
499 tcg_gen_sub_tl(dst, src1, src2);
500 tcg_gen_sub_tl(dst, dst, gen_carry32());
501 }
502
503 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
504 {
505 gen_op_subcc_int(dst, src1, src2, gen_carry32());
506 }
507
508 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
509 {
510 TCGv zero = tcg_constant_tl(0);
511 TCGv t_src1 = tcg_temp_new();
512 TCGv t_src2 = tcg_temp_new();
513 TCGv t0 = tcg_temp_new();
514
515 tcg_gen_ext32u_tl(t_src1, src1);
516 tcg_gen_ext32u_tl(t_src2, src2);
517
518 /*
519 * if (!(env->y & 1))
520 * src2 = 0;
521 */
522 tcg_gen_andi_tl(t0, cpu_y, 0x1);
523 tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
524
525 /*
526 * b2 = src1 & 1;
527 * y = (b2 << 31) | (y >> 1);
528 */
529 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
530 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
531
532 // b1 = N ^ V;
533 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
534
535 /*
536 * src1 = (b1 << 31) | (src1 >> 1)
537 */
538 tcg_gen_andi_tl(t0, t0, 1u << 31);
539 tcg_gen_shri_tl(t_src1, t_src1, 1);
540 tcg_gen_or_tl(t_src1, t_src1, t0);
541
542 gen_op_addcc(dst, t_src1, t_src2);
543 }
544
545 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
546 {
547 #if TARGET_LONG_BITS == 32
548 if (sign_ext) {
549 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
550 } else {
551 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
552 }
553 #else
554 TCGv t0 = tcg_temp_new_i64();
555 TCGv t1 = tcg_temp_new_i64();
556
557 if (sign_ext) {
558 tcg_gen_ext32s_i64(t0, src1);
559 tcg_gen_ext32s_i64(t1, src2);
560 } else {
561 tcg_gen_ext32u_i64(t0, src1);
562 tcg_gen_ext32u_i64(t1, src2);
563 }
564
565 tcg_gen_mul_i64(dst, t0, t1);
566 tcg_gen_shri_i64(cpu_y, dst, 32);
567 #endif
568 }
569
570 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
571 {
572 /* zero-extend truncated operands before multiplication */
573 gen_op_multiply(dst, src1, src2, 0);
574 }
575
576 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
577 {
578 /* sign-extend truncated operands before multiplication */
579 gen_op_multiply(dst, src1, src2, 1);
580 }
581
582 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
583 {
584 gen_helper_udivx(dst, tcg_env, src1, src2);
585 }
586
587 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
588 {
589 gen_helper_sdivx(dst, tcg_env, src1, src2);
590 }
591
592 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
593 {
594 #ifdef TARGET_SPARC64
595 gen_helper_udiv(dst, tcg_env, src1, src2);
596 tcg_gen_ext32u_tl(dst, dst);
597 #else
598 TCGv_i64 t64 = tcg_temp_new_i64();
599 gen_helper_udiv(t64, tcg_env, src1, src2);
600 tcg_gen_trunc_i64_tl(dst, t64);
601 #endif
602 }
603
604 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
605 {
606 #ifdef TARGET_SPARC64
607 gen_helper_sdiv(dst, tcg_env, src1, src2);
608 tcg_gen_ext32s_tl(dst, dst);
609 #else
610 TCGv_i64 t64 = tcg_temp_new_i64();
611 gen_helper_sdiv(t64, tcg_env, src1, src2);
612 tcg_gen_trunc_i64_tl(dst, t64);
613 #endif
614 }
615
616 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
617 {
618 TCGv_i64 t64;
619
620 #ifdef TARGET_SPARC64
621 t64 = cpu_cc_V;
622 #else
623 t64 = tcg_temp_new_i64();
624 #endif
625
626 gen_helper_udiv(t64, tcg_env, src1, src2);
627
628 #ifdef TARGET_SPARC64
629 tcg_gen_ext32u_tl(cpu_cc_N, t64);
630 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
631 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
632 tcg_gen_movi_tl(cpu_icc_C, 0);
633 #else
634 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
635 #endif
636 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
637 tcg_gen_movi_tl(cpu_cc_C, 0);
638 tcg_gen_mov_tl(dst, cpu_cc_N);
639 }
640
641 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
642 {
643 TCGv_i64 t64;
644
645 #ifdef TARGET_SPARC64
646 t64 = cpu_cc_V;
647 #else
648 t64 = tcg_temp_new_i64();
649 #endif
650
651 gen_helper_sdiv(t64, tcg_env, src1, src2);
652
653 #ifdef TARGET_SPARC64
654 tcg_gen_ext32s_tl(cpu_cc_N, t64);
655 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
656 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
657 tcg_gen_movi_tl(cpu_icc_C, 0);
658 #else
659 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
660 #endif
661 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
662 tcg_gen_movi_tl(cpu_cc_C, 0);
663 tcg_gen_mov_tl(dst, cpu_cc_N);
664 }
665
666 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
667 {
668 gen_helper_taddcctv(dst, tcg_env, src1, src2);
669 }
670
671 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
672 {
673 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
674 }
675
676 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
677 {
678 tcg_gen_ctpop_tl(dst, src2);
679 }
680
681 #ifndef TARGET_SPARC64
682 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
683 {
684 g_assert_not_reached();
685 }
686 #endif
687
688 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
689 {
690 gen_helper_array8(dst, src1, src2);
691 tcg_gen_shli_tl(dst, dst, 1);
692 }
693
694 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
695 {
696 gen_helper_array8(dst, src1, src2);
697 tcg_gen_shli_tl(dst, dst, 2);
698 }
699
700 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
701 {
702 #ifdef TARGET_SPARC64
703 gen_helper_fpack16(dst, cpu_gsr, src);
704 #else
705 g_assert_not_reached();
706 #endif
707 }
708
709 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
710 {
711 #ifdef TARGET_SPARC64
712 gen_helper_fpackfix(dst, cpu_gsr, src);
713 #else
714 g_assert_not_reached();
715 #endif
716 }
717
718 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
719 {
720 #ifdef TARGET_SPARC64
721 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
722 #else
723 g_assert_not_reached();
724 #endif
725 }
726
727 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
728 {
729 #ifdef TARGET_SPARC64
730 TCGv t1, t2, shift;
731
732 t1 = tcg_temp_new();
733 t2 = tcg_temp_new();
734 shift = tcg_temp_new();
735
736 tcg_gen_andi_tl(shift, cpu_gsr, 7);
737 tcg_gen_shli_tl(shift, shift, 3);
738 tcg_gen_shl_tl(t1, s1, shift);
739
740 /*
741 * A shift of 64 does not produce 0 in TCG. Divide this into a
742 * shift of (up to 63) followed by a constant shift of 1.
743 */
744 tcg_gen_xori_tl(shift, shift, 63);
745 tcg_gen_shr_tl(t2, s2, shift);
746 tcg_gen_shri_tl(t2, t2, 1);
747
748 tcg_gen_or_tl(dst, t1, t2);
749 #else
750 g_assert_not_reached();
751 #endif
752 }
753
754 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
755 {
756 #ifdef TARGET_SPARC64
757 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
758 #else
759 g_assert_not_reached();
760 #endif
761 }
762
763 // 1
764 static void gen_op_eval_ba(TCGv dst)
765 {
766 tcg_gen_movi_tl(dst, 1);
767 }
768
769 // 0
770 static void gen_op_eval_bn(TCGv dst)
771 {
772 tcg_gen_movi_tl(dst, 0);
773 }
774
775 /*
776 FPSR bit field FCC1 | FCC0:
777 0 =
778 1 <
779 2 >
780 3 unordered
781 */
782 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
783 unsigned int fcc_offset)
784 {
785 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
786 tcg_gen_andi_tl(reg, reg, 0x1);
787 }
788
789 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
790 {
791 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
792 tcg_gen_andi_tl(reg, reg, 0x1);
793 }
794
795 // !0: FCC0 | FCC1
796 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
797 {
798 TCGv t0 = tcg_temp_new();
799 gen_mov_reg_FCC0(dst, src, fcc_offset);
800 gen_mov_reg_FCC1(t0, src, fcc_offset);
801 tcg_gen_or_tl(dst, dst, t0);
802 }
803
804 // 1 or 2: FCC0 ^ FCC1
805 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
806 {
807 TCGv t0 = tcg_temp_new();
808 gen_mov_reg_FCC0(dst, src, fcc_offset);
809 gen_mov_reg_FCC1(t0, src, fcc_offset);
810 tcg_gen_xor_tl(dst, dst, t0);
811 }
812
813 // 1 or 3: FCC0
814 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
815 {
816 gen_mov_reg_FCC0(dst, src, fcc_offset);
817 }
818
819 // 1: FCC0 & !FCC1
820 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
821 {
822 TCGv t0 = tcg_temp_new();
823 gen_mov_reg_FCC0(dst, src, fcc_offset);
824 gen_mov_reg_FCC1(t0, src, fcc_offset);
825 tcg_gen_andc_tl(dst, dst, t0);
826 }
827
828 // 2 or 3: FCC1
829 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
830 {
831 gen_mov_reg_FCC1(dst, src, fcc_offset);
832 }
833
834 // 2: !FCC0 & FCC1
835 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
836 {
837 TCGv t0 = tcg_temp_new();
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
839 gen_mov_reg_FCC1(t0, src, fcc_offset);
840 tcg_gen_andc_tl(dst, t0, dst);
841 }
842
843 // 3: FCC0 & FCC1
844 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
845 {
846 TCGv t0 = tcg_temp_new();
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(t0, src, fcc_offset);
849 tcg_gen_and_tl(dst, dst, t0);
850 }
851
852 // 0: !(FCC0 | FCC1)
853 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
854 {
855 TCGv t0 = tcg_temp_new();
856 gen_mov_reg_FCC0(dst, src, fcc_offset);
857 gen_mov_reg_FCC1(t0, src, fcc_offset);
858 tcg_gen_or_tl(dst, dst, t0);
859 tcg_gen_xori_tl(dst, dst, 0x1);
860 }
861
862 // 0 or 3: !(FCC0 ^ FCC1)
863 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
864 {
865 TCGv t0 = tcg_temp_new();
866 gen_mov_reg_FCC0(dst, src, fcc_offset);
867 gen_mov_reg_FCC1(t0, src, fcc_offset);
868 tcg_gen_xor_tl(dst, dst, t0);
869 tcg_gen_xori_tl(dst, dst, 0x1);
870 }
871
872 // 0 or 2: !FCC0
873 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
874 {
875 gen_mov_reg_FCC0(dst, src, fcc_offset);
876 tcg_gen_xori_tl(dst, dst, 0x1);
877 }
878
879 // !1: !(FCC0 & !FCC1)
880 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
881 {
882 TCGv t0 = tcg_temp_new();
883 gen_mov_reg_FCC0(dst, src, fcc_offset);
884 gen_mov_reg_FCC1(t0, src, fcc_offset);
885 tcg_gen_andc_tl(dst, dst, t0);
886 tcg_gen_xori_tl(dst, dst, 0x1);
887 }
888
889 // 0 or 1: !FCC1
890 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
891 {
892 gen_mov_reg_FCC1(dst, src, fcc_offset);
893 tcg_gen_xori_tl(dst, dst, 0x1);
894 }
895
896 // !2: !(!FCC0 & FCC1)
897 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
898 {
899 TCGv t0 = tcg_temp_new();
900 gen_mov_reg_FCC0(dst, src, fcc_offset);
901 gen_mov_reg_FCC1(t0, src, fcc_offset);
902 tcg_gen_andc_tl(dst, t0, dst);
903 tcg_gen_xori_tl(dst, dst, 0x1);
904 }
905
906 // !3: !(FCC0 & FCC1)
907 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
908 {
909 TCGv t0 = tcg_temp_new();
910 gen_mov_reg_FCC0(dst, src, fcc_offset);
911 gen_mov_reg_FCC1(t0, src, fcc_offset);
912 tcg_gen_and_tl(dst, dst, t0);
913 tcg_gen_xori_tl(dst, dst, 0x1);
914 }
915
916 static void finishing_insn(DisasContext *dc)
917 {
918 /*
919 * From here, there is no future path through an unwinding exception.
920 * If the current insn cannot raise an exception, the computation of
921 * cpu_cond may be able to be elided.
922 */
923 if (dc->cpu_cond_live) {
924 tcg_gen_discard_tl(cpu_cond);
925 dc->cpu_cond_live = false;
926 }
927 }
928
929 static void gen_generic_branch(DisasContext *dc)
930 {
931 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
932 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
933 TCGv c2 = tcg_constant_tl(dc->jump.c2);
934
935 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
936 }
937
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext *dc)
941 {
942 if (dc->npc == JUMP_PC) {
943 gen_generic_branch(dc);
944 dc->npc = DYNAMIC_PC_LOOKUP;
945 }
946 }
947
948 static void save_npc(DisasContext *dc)
949 {
950 if (dc->npc & 3) {
951 switch (dc->npc) {
952 case JUMP_PC:
953 gen_generic_branch(dc);
954 dc->npc = DYNAMIC_PC_LOOKUP;
955 break;
956 case DYNAMIC_PC:
957 case DYNAMIC_PC_LOOKUP:
958 break;
959 default:
960 g_assert_not_reached();
961 }
962 } else {
963 tcg_gen_movi_tl(cpu_npc, dc->npc);
964 }
965 }
966
967 static void save_state(DisasContext *dc)
968 {
969 tcg_gen_movi_tl(cpu_pc, dc->pc);
970 save_npc(dc);
971 }
972
973 static void gen_exception(DisasContext *dc, int which)
974 {
975 finishing_insn(dc);
976 save_state(dc);
977 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
978 dc->base.is_jmp = DISAS_NORETURN;
979 }
980
981 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
982 {
983 DisasDelayException *e = g_new0(DisasDelayException, 1);
984
985 e->next = dc->delay_excp_list;
986 dc->delay_excp_list = e;
987
988 e->lab = gen_new_label();
989 e->excp = excp;
990 e->pc = dc->pc;
991 /* Caller must have used flush_cond before branch. */
992 assert(e->npc != JUMP_PC);
993 e->npc = dc->npc;
994
995 return e->lab;
996 }
997
998 static TCGLabel *delay_exception(DisasContext *dc, int excp)
999 {
1000 return delay_exceptionv(dc, tcg_constant_i32(excp));
1001 }
1002
1003 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1004 {
1005 TCGv t = tcg_temp_new();
1006 TCGLabel *lab;
1007
1008 tcg_gen_andi_tl(t, addr, mask);
1009
1010 flush_cond(dc);
1011 lab = delay_exception(dc, TT_UNALIGNED);
1012 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1013 }
1014
1015 static void gen_mov_pc_npc(DisasContext *dc)
1016 {
1017 finishing_insn(dc);
1018
1019 if (dc->npc & 3) {
1020 switch (dc->npc) {
1021 case JUMP_PC:
1022 gen_generic_branch(dc);
1023 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1024 dc->pc = DYNAMIC_PC_LOOKUP;
1025 break;
1026 case DYNAMIC_PC:
1027 case DYNAMIC_PC_LOOKUP:
1028 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029 dc->pc = dc->npc;
1030 break;
1031 default:
1032 g_assert_not_reached();
1033 }
1034 } else {
1035 dc->pc = dc->npc;
1036 }
1037 }
1038
1039 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1040 DisasContext *dc)
1041 {
1042 TCGv t1;
1043
1044 cmp->c1 = t1 = tcg_temp_new();
1045 cmp->c2 = 0;
1046
1047 switch (cond & 7) {
1048 case 0x0: /* never */
1049 cmp->cond = TCG_COND_NEVER;
1050 cmp->c1 = tcg_constant_tl(0);
1051 break;
1052
1053 case 0x1: /* eq: Z */
1054 cmp->cond = TCG_COND_EQ;
1055 if (TARGET_LONG_BITS == 32 || xcc) {
1056 tcg_gen_mov_tl(t1, cpu_cc_Z);
1057 } else {
1058 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1059 }
1060 break;
1061
1062 case 0x2: /* le: Z | (N ^ V) */
1063 /*
1064 * Simplify:
1065 * cc_Z || (N ^ V) < 0 NE
1066 * cc_Z && !((N ^ V) < 0) EQ
1067 * cc_Z & ~((N ^ V) >> TLB) EQ
1068 */
1069 cmp->cond = TCG_COND_EQ;
1070 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1071 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1072 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1073 if (TARGET_LONG_BITS == 64 && !xcc) {
1074 tcg_gen_ext32u_tl(t1, t1);
1075 }
1076 break;
1077
1078 case 0x3: /* lt: N ^ V */
1079 cmp->cond = TCG_COND_LT;
1080 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1081 if (TARGET_LONG_BITS == 64 && !xcc) {
1082 tcg_gen_ext32s_tl(t1, t1);
1083 }
1084 break;
1085
1086 case 0x4: /* leu: Z | C */
1087 /*
1088 * Simplify:
1089 * cc_Z == 0 || cc_C != 0 NE
1090 * cc_Z != 0 && cc_C == 0 EQ
1091 * cc_Z & (cc_C ? 0 : -1) EQ
1092 * cc_Z & (cc_C - 1) EQ
1093 */
1094 cmp->cond = TCG_COND_EQ;
1095 if (TARGET_LONG_BITS == 32 || xcc) {
1096 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1097 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1098 } else {
1099 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1100 tcg_gen_subi_tl(t1, t1, 1);
1101 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1102 tcg_gen_ext32u_tl(t1, t1);
1103 }
1104 break;
1105
1106 case 0x5: /* ltu: C */
1107 cmp->cond = TCG_COND_NE;
1108 if (TARGET_LONG_BITS == 32 || xcc) {
1109 tcg_gen_mov_tl(t1, cpu_cc_C);
1110 } else {
1111 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1112 }
1113 break;
1114
1115 case 0x6: /* neg: N */
1116 cmp->cond = TCG_COND_LT;
1117 if (TARGET_LONG_BITS == 32 || xcc) {
1118 tcg_gen_mov_tl(t1, cpu_cc_N);
1119 } else {
1120 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1121 }
1122 break;
1123
1124 case 0x7: /* vs: V */
1125 cmp->cond = TCG_COND_LT;
1126 if (TARGET_LONG_BITS == 32 || xcc) {
1127 tcg_gen_mov_tl(t1, cpu_cc_V);
1128 } else {
1129 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1130 }
1131 break;
1132 }
1133 if (cond & 8) {
1134 cmp->cond = tcg_invert_cond(cmp->cond);
1135 }
1136 }
1137
1138 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1139 {
1140 unsigned int offset;
1141 TCGv r_dst;
1142
1143 /* For now we still generate a straight boolean result. */
1144 cmp->cond = TCG_COND_NE;
1145 cmp->c1 = r_dst = tcg_temp_new();
1146 cmp->c2 = 0;
1147
1148 switch (cc) {
1149 default:
1150 case 0x0:
1151 offset = 0;
1152 break;
1153 case 0x1:
1154 offset = 32 - 10;
1155 break;
1156 case 0x2:
1157 offset = 34 - 10;
1158 break;
1159 case 0x3:
1160 offset = 36 - 10;
1161 break;
1162 }
1163
1164 switch (cond) {
1165 case 0x0:
1166 gen_op_eval_bn(r_dst);
1167 break;
1168 case 0x1:
1169 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1170 break;
1171 case 0x2:
1172 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1173 break;
1174 case 0x3:
1175 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1176 break;
1177 case 0x4:
1178 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1179 break;
1180 case 0x5:
1181 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1182 break;
1183 case 0x6:
1184 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1185 break;
1186 case 0x7:
1187 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1188 break;
1189 case 0x8:
1190 gen_op_eval_ba(r_dst);
1191 break;
1192 case 0x9:
1193 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1194 break;
1195 case 0xa:
1196 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1197 break;
1198 case 0xb:
1199 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1200 break;
1201 case 0xc:
1202 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1203 break;
1204 case 0xd:
1205 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1206 break;
1207 case 0xe:
1208 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1209 break;
1210 case 0xf:
1211 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1212 break;
1213 }
1214 }
1215
1216 // Inverted logic
1217 static const TCGCond gen_tcg_cond_reg[8] = {
1218 TCG_COND_NEVER, /* reserved */
1219 TCG_COND_NE,
1220 TCG_COND_GT,
1221 TCG_COND_GE,
1222 TCG_COND_NEVER, /* reserved */
1223 TCG_COND_EQ,
1224 TCG_COND_LE,
1225 TCG_COND_LT,
1226 };
1227
1228 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1229 {
1230 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1231 cmp->c1 = tcg_temp_new();
1232 cmp->c2 = 0;
1233 tcg_gen_mov_tl(cmp->c1, r_src);
1234 }
1235
1236 static void gen_op_clear_ieee_excp_and_FTT(void)
1237 {
1238 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1239 }
1240
1241 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1242 {
1243 gen_op_clear_ieee_excp_and_FTT();
1244 tcg_gen_mov_i32(dst, src);
1245 }
1246
1247 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1248 {
1249 gen_op_clear_ieee_excp_and_FTT();
1250 gen_helper_fnegs(dst, src);
1251 }
1252
1253 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1254 {
1255 gen_op_clear_ieee_excp_and_FTT();
1256 gen_helper_fabss(dst, src);
1257 }
1258
1259 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1260 {
1261 gen_op_clear_ieee_excp_and_FTT();
1262 tcg_gen_mov_i64(dst, src);
1263 }
1264
1265 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1266 {
1267 gen_op_clear_ieee_excp_and_FTT();
1268 gen_helper_fnegd(dst, src);
1269 }
1270
1271 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1272 {
1273 gen_op_clear_ieee_excp_and_FTT();
1274 gen_helper_fabsd(dst, src);
1275 }
1276
1277 #ifdef TARGET_SPARC64
1278 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1279 {
1280 switch (fccno) {
1281 case 0:
1282 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1283 break;
1284 case 1:
1285 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1286 break;
1287 case 2:
1288 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1289 break;
1290 case 3:
1291 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1292 break;
1293 }
1294 }
1295
1296 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1297 {
1298 switch (fccno) {
1299 case 0:
1300 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1301 break;
1302 case 1:
1303 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1304 break;
1305 case 2:
1306 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1307 break;
1308 case 3:
1309 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1310 break;
1311 }
1312 }
1313
1314 static void gen_op_fcmpq(int fccno)
1315 {
1316 switch (fccno) {
1317 case 0:
1318 gen_helper_fcmpq(cpu_fsr, tcg_env);
1319 break;
1320 case 1:
1321 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1322 break;
1323 case 2:
1324 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1325 break;
1326 case 3:
1327 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1328 break;
1329 }
1330 }
1331
1332 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1333 {
1334 switch (fccno) {
1335 case 0:
1336 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1337 break;
1338 case 1:
1339 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1340 break;
1341 case 2:
1342 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1343 break;
1344 case 3:
1345 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1346 break;
1347 }
1348 }
1349
1350 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1351 {
1352 switch (fccno) {
1353 case 0:
1354 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1355 break;
1356 case 1:
1357 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1358 break;
1359 case 2:
1360 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1361 break;
1362 case 3:
1363 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1364 break;
1365 }
1366 }
1367
1368 static void gen_op_fcmpeq(int fccno)
1369 {
1370 switch (fccno) {
1371 case 0:
1372 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1373 break;
1374 case 1:
1375 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1376 break;
1377 case 2:
1378 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1379 break;
1380 case 3:
1381 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1382 break;
1383 }
1384 }
1385
1386 #else
1387
1388 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1389 {
1390 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1391 }
1392
1393 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1394 {
1395 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1396 }
1397
1398 static void gen_op_fcmpq(int fccno)
1399 {
1400 gen_helper_fcmpq(cpu_fsr, tcg_env);
1401 }
1402
1403 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1404 {
1405 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1406 }
1407
1408 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1409 {
1410 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1411 }
1412
1413 static void gen_op_fcmpeq(int fccno)
1414 {
1415 gen_helper_fcmpeq(cpu_fsr, tcg_env);
1416 }
1417 #endif
1418
1419 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1420 {
1421 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1422 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1423 gen_exception(dc, TT_FP_EXCP);
1424 }
1425
1426 static int gen_trap_ifnofpu(DisasContext *dc)
1427 {
1428 #if !defined(CONFIG_USER_ONLY)
1429 if (!dc->fpu_enabled) {
1430 gen_exception(dc, TT_NFPU_INSN);
1431 return 1;
1432 }
1433 #endif
1434 return 0;
1435 }
1436
1437 /* asi moves */
1438 typedef enum {
1439 GET_ASI_HELPER,
1440 GET_ASI_EXCP,
1441 GET_ASI_DIRECT,
1442 GET_ASI_DTWINX,
1443 GET_ASI_BLOCK,
1444 GET_ASI_SHORT,
1445 GET_ASI_BCOPY,
1446 GET_ASI_BFILL,
1447 } ASIType;
1448
1449 typedef struct {
1450 ASIType type;
1451 int asi;
1452 int mem_idx;
1453 MemOp memop;
1454 } DisasASI;
1455
1456 /*
1457 * Build DisasASI.
1458 * For asi == -1, treat as non-asi.
1459 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1460 */
1461 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1462 {
1463 ASIType type = GET_ASI_HELPER;
1464 int mem_idx = dc->mem_idx;
1465
1466 if (asi == -1) {
1467 /* Artificial "non-asi" case. */
1468 type = GET_ASI_DIRECT;
1469 goto done;
1470 }
1471
1472 #ifndef TARGET_SPARC64
1473 /* Before v9, all asis are immediate and privileged. */
1474 if (asi < 0) {
1475 gen_exception(dc, TT_ILL_INSN);
1476 type = GET_ASI_EXCP;
1477 } else if (supervisor(dc)
1478 /* Note that LEON accepts ASI_USERDATA in user mode, for
1479 use with CASA. Also note that previous versions of
1480 QEMU allowed (and old versions of gcc emitted) ASI_P
1481 for LEON, which is incorrect. */
1482 || (asi == ASI_USERDATA
1483 && (dc->def->features & CPU_FEATURE_CASA))) {
1484 switch (asi) {
1485 case ASI_USERDATA: /* User data access */
1486 mem_idx = MMU_USER_IDX;
1487 type = GET_ASI_DIRECT;
1488 break;
1489 case ASI_KERNELDATA: /* Supervisor data access */
1490 mem_idx = MMU_KERNEL_IDX;
1491 type = GET_ASI_DIRECT;
1492 break;
1493 case ASI_M_BYPASS: /* MMU passthrough */
1494 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1495 mem_idx = MMU_PHYS_IDX;
1496 type = GET_ASI_DIRECT;
1497 break;
1498 case ASI_M_BCOPY: /* Block copy, sta access */
1499 mem_idx = MMU_KERNEL_IDX;
1500 type = GET_ASI_BCOPY;
1501 break;
1502 case ASI_M_BFILL: /* Block fill, stda access */
1503 mem_idx = MMU_KERNEL_IDX;
1504 type = GET_ASI_BFILL;
1505 break;
1506 }
1507
1508 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1509 * permissions check in get_physical_address(..).
1510 */
1511 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1512 } else {
1513 gen_exception(dc, TT_PRIV_INSN);
1514 type = GET_ASI_EXCP;
1515 }
1516 #else
1517 if (asi < 0) {
1518 asi = dc->asi;
1519 }
1520 /* With v9, all asis below 0x80 are privileged. */
1521 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1522 down that bit into DisasContext. For the moment that's ok,
1523 since the direct implementations below doesn't have any ASIs
1524 in the restricted [0x30, 0x7f] range, and the check will be
1525 done properly in the helper. */
1526 if (!supervisor(dc) && asi < 0x80) {
1527 gen_exception(dc, TT_PRIV_ACT);
1528 type = GET_ASI_EXCP;
1529 } else {
1530 switch (asi) {
1531 case ASI_REAL: /* Bypass */
1532 case ASI_REAL_IO: /* Bypass, non-cacheable */
1533 case ASI_REAL_L: /* Bypass LE */
1534 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1535 case ASI_TWINX_REAL: /* Real address, twinx */
1536 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1537 case ASI_QUAD_LDD_PHYS:
1538 case ASI_QUAD_LDD_PHYS_L:
1539 mem_idx = MMU_PHYS_IDX;
1540 break;
1541 case ASI_N: /* Nucleus */
1542 case ASI_NL: /* Nucleus LE */
1543 case ASI_TWINX_N:
1544 case ASI_TWINX_NL:
1545 case ASI_NUCLEUS_QUAD_LDD:
1546 case ASI_NUCLEUS_QUAD_LDD_L:
1547 if (hypervisor(dc)) {
1548 mem_idx = MMU_PHYS_IDX;
1549 } else {
1550 mem_idx = MMU_NUCLEUS_IDX;
1551 }
1552 break;
1553 case ASI_AIUP: /* As if user primary */
1554 case ASI_AIUPL: /* As if user primary LE */
1555 case ASI_TWINX_AIUP:
1556 case ASI_TWINX_AIUP_L:
1557 case ASI_BLK_AIUP_4V:
1558 case ASI_BLK_AIUP_L_4V:
1559 case ASI_BLK_AIUP:
1560 case ASI_BLK_AIUPL:
1561 mem_idx = MMU_USER_IDX;
1562 break;
1563 case ASI_AIUS: /* As if user secondary */
1564 case ASI_AIUSL: /* As if user secondary LE */
1565 case ASI_TWINX_AIUS:
1566 case ASI_TWINX_AIUS_L:
1567 case ASI_BLK_AIUS_4V:
1568 case ASI_BLK_AIUS_L_4V:
1569 case ASI_BLK_AIUS:
1570 case ASI_BLK_AIUSL:
1571 mem_idx = MMU_USER_SECONDARY_IDX;
1572 break;
1573 case ASI_S: /* Secondary */
1574 case ASI_SL: /* Secondary LE */
1575 case ASI_TWINX_S:
1576 case ASI_TWINX_SL:
1577 case ASI_BLK_COMMIT_S:
1578 case ASI_BLK_S:
1579 case ASI_BLK_SL:
1580 case ASI_FL8_S:
1581 case ASI_FL8_SL:
1582 case ASI_FL16_S:
1583 case ASI_FL16_SL:
1584 if (mem_idx == MMU_USER_IDX) {
1585 mem_idx = MMU_USER_SECONDARY_IDX;
1586 } else if (mem_idx == MMU_KERNEL_IDX) {
1587 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1588 }
1589 break;
1590 case ASI_P: /* Primary */
1591 case ASI_PL: /* Primary LE */
1592 case ASI_TWINX_P:
1593 case ASI_TWINX_PL:
1594 case ASI_BLK_COMMIT_P:
1595 case ASI_BLK_P:
1596 case ASI_BLK_PL:
1597 case ASI_FL8_P:
1598 case ASI_FL8_PL:
1599 case ASI_FL16_P:
1600 case ASI_FL16_PL:
1601 break;
1602 }
1603 switch (asi) {
1604 case ASI_REAL:
1605 case ASI_REAL_IO:
1606 case ASI_REAL_L:
1607 case ASI_REAL_IO_L:
1608 case ASI_N:
1609 case ASI_NL:
1610 case ASI_AIUP:
1611 case ASI_AIUPL:
1612 case ASI_AIUS:
1613 case ASI_AIUSL:
1614 case ASI_S:
1615 case ASI_SL:
1616 case ASI_P:
1617 case ASI_PL:
1618 type = GET_ASI_DIRECT;
1619 break;
1620 case ASI_TWINX_REAL:
1621 case ASI_TWINX_REAL_L:
1622 case ASI_TWINX_N:
1623 case ASI_TWINX_NL:
1624 case ASI_TWINX_AIUP:
1625 case ASI_TWINX_AIUP_L:
1626 case ASI_TWINX_AIUS:
1627 case ASI_TWINX_AIUS_L:
1628 case ASI_TWINX_P:
1629 case ASI_TWINX_PL:
1630 case ASI_TWINX_S:
1631 case ASI_TWINX_SL:
1632 case ASI_QUAD_LDD_PHYS:
1633 case ASI_QUAD_LDD_PHYS_L:
1634 case ASI_NUCLEUS_QUAD_LDD:
1635 case ASI_NUCLEUS_QUAD_LDD_L:
1636 type = GET_ASI_DTWINX;
1637 break;
1638 case ASI_BLK_COMMIT_P:
1639 case ASI_BLK_COMMIT_S:
1640 case ASI_BLK_AIUP_4V:
1641 case ASI_BLK_AIUP_L_4V:
1642 case ASI_BLK_AIUP:
1643 case ASI_BLK_AIUPL:
1644 case ASI_BLK_AIUS_4V:
1645 case ASI_BLK_AIUS_L_4V:
1646 case ASI_BLK_AIUS:
1647 case ASI_BLK_AIUSL:
1648 case ASI_BLK_S:
1649 case ASI_BLK_SL:
1650 case ASI_BLK_P:
1651 case ASI_BLK_PL:
1652 type = GET_ASI_BLOCK;
1653 break;
1654 case ASI_FL8_S:
1655 case ASI_FL8_SL:
1656 case ASI_FL8_P:
1657 case ASI_FL8_PL:
1658 memop = MO_UB;
1659 type = GET_ASI_SHORT;
1660 break;
1661 case ASI_FL16_S:
1662 case ASI_FL16_SL:
1663 case ASI_FL16_P:
1664 case ASI_FL16_PL:
1665 memop = MO_TEUW;
1666 type = GET_ASI_SHORT;
1667 break;
1668 }
1669 /* The little-endian asis all have bit 3 set. */
1670 if (asi & 8) {
1671 memop ^= MO_BSWAP;
1672 }
1673 }
1674 #endif
1675
1676 done:
1677 return (DisasASI){ type, asi, mem_idx, memop };
1678 }
1679
1680 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1681 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1682 TCGv_i32 asi, TCGv_i32 mop)
1683 {
1684 g_assert_not_reached();
1685 }
1686
1687 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1688 TCGv_i32 asi, TCGv_i32 mop)
1689 {
1690 g_assert_not_reached();
1691 }
1692 #endif
1693
1694 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1695 {
1696 switch (da->type) {
1697 case GET_ASI_EXCP:
1698 break;
1699 case GET_ASI_DTWINX: /* Reserved for ldda. */
1700 gen_exception(dc, TT_ILL_INSN);
1701 break;
1702 case GET_ASI_DIRECT:
1703 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1704 break;
1705 default:
1706 {
1707 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1708 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1709
1710 save_state(dc);
1711 #ifdef TARGET_SPARC64
1712 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1713 #else
1714 {
1715 TCGv_i64 t64 = tcg_temp_new_i64();
1716 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1717 tcg_gen_trunc_i64_tl(dst, t64);
1718 }
1719 #endif
1720 }
1721 break;
1722 }
1723 }
1724
1725 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1726 {
1727 switch (da->type) {
1728 case GET_ASI_EXCP:
1729 break;
1730
1731 case GET_ASI_DTWINX: /* Reserved for stda. */
1732 if (TARGET_LONG_BITS == 32) {
1733 gen_exception(dc, TT_ILL_INSN);
1734 break;
1735 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1736 /* Pre OpenSPARC CPUs don't have these */
1737 gen_exception(dc, TT_ILL_INSN);
1738 break;
1739 }
1740 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1741 /* fall through */
1742
1743 case GET_ASI_DIRECT:
1744 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1745 break;
1746
1747 case GET_ASI_BCOPY:
1748 assert(TARGET_LONG_BITS == 32);
1749 /* Copy 32 bytes from the address in SRC to ADDR. */
1750 /* ??? The original qemu code suggests 4-byte alignment, dropping
1751 the low bits, but the only place I can see this used is in the
1752 Linux kernel with 32 byte alignment, which would make more sense
1753 as a cacheline-style operation. */
1754 {
1755 TCGv saddr = tcg_temp_new();
1756 TCGv daddr = tcg_temp_new();
1757 TCGv four = tcg_constant_tl(4);
1758 TCGv_i32 tmp = tcg_temp_new_i32();
1759 int i;
1760
1761 tcg_gen_andi_tl(saddr, src, -4);
1762 tcg_gen_andi_tl(daddr, addr, -4);
1763 for (i = 0; i < 32; i += 4) {
1764 /* Since the loads and stores are paired, allow the
1765 copy to happen in the host endianness. */
1766 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1767 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1768 tcg_gen_add_tl(saddr, saddr, four);
1769 tcg_gen_add_tl(daddr, daddr, four);
1770 }
1771 }
1772 break;
1773
1774 default:
1775 {
1776 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1777 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1778
1779 save_state(dc);
1780 #ifdef TARGET_SPARC64
1781 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1782 #else
1783 {
1784 TCGv_i64 t64 = tcg_temp_new_i64();
1785 tcg_gen_extu_tl_i64(t64, src);
1786 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1787 }
1788 #endif
1789
1790 /* A write to a TLB register may alter page maps. End the TB. */
1791 dc->npc = DYNAMIC_PC;
1792 }
1793 break;
1794 }
1795 }
1796
1797 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1798 TCGv dst, TCGv src, TCGv addr)
1799 {
1800 switch (da->type) {
1801 case GET_ASI_EXCP:
1802 break;
1803 case GET_ASI_DIRECT:
1804 tcg_gen_atomic_xchg_tl(dst, addr, src,
1805 da->mem_idx, da->memop | MO_ALIGN);
1806 break;
1807 default:
1808 /* ??? Should be DAE_invalid_asi. */
1809 gen_exception(dc, TT_DATA_ACCESS);
1810 break;
1811 }
1812 }
1813
1814 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1815 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1816 {
1817 switch (da->type) {
1818 case GET_ASI_EXCP:
1819 return;
1820 case GET_ASI_DIRECT:
1821 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1822 da->mem_idx, da->memop | MO_ALIGN);
1823 break;
1824 default:
1825 /* ??? Should be DAE_invalid_asi. */
1826 gen_exception(dc, TT_DATA_ACCESS);
1827 break;
1828 }
1829 }
1830
1831 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1832 {
1833 switch (da->type) {
1834 case GET_ASI_EXCP:
1835 break;
1836 case GET_ASI_DIRECT:
1837 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1838 da->mem_idx, MO_UB);
1839 break;
1840 default:
1841 /* ??? In theory, this should be raise DAE_invalid_asi.
1842 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1843 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1844 gen_helper_exit_atomic(tcg_env);
1845 } else {
1846 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1847 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1848 TCGv_i64 s64, t64;
1849
1850 save_state(dc);
1851 t64 = tcg_temp_new_i64();
1852 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1853
1854 s64 = tcg_constant_i64(0xff);
1855 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1856
1857 tcg_gen_trunc_i64_tl(dst, t64);
1858
1859 /* End the TB. */
1860 dc->npc = DYNAMIC_PC;
1861 }
1862 break;
1863 }
1864 }
1865
1866 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1867 TCGv addr, int rd)
1868 {
1869 MemOp memop = da->memop;
1870 MemOp size = memop & MO_SIZE;
1871 TCGv_i32 d32;
1872 TCGv_i64 d64;
1873 TCGv addr_tmp;
1874
1875 /* TODO: Use 128-bit load/store below. */
1876 if (size == MO_128) {
1877 memop = (memop & ~MO_SIZE) | MO_64;
1878 }
1879
1880 switch (da->type) {
1881 case GET_ASI_EXCP:
1882 break;
1883
1884 case GET_ASI_DIRECT:
1885 memop |= MO_ALIGN_4;
1886 switch (size) {
1887 case MO_32:
1888 d32 = gen_dest_fpr_F(dc);
1889 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1890 gen_store_fpr_F(dc, rd, d32);
1891 break;
1892
1893 case MO_64:
1894 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1895 break;
1896
1897 case MO_128:
1898 d64 = tcg_temp_new_i64();
1899 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1900 addr_tmp = tcg_temp_new();
1901 tcg_gen_addi_tl(addr_tmp, addr, 8);
1902 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1903 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1904 break;
1905 default:
1906 g_assert_not_reached();
1907 }
1908 break;
1909
1910 case GET_ASI_BLOCK:
1911 /* Valid for lddfa on aligned registers only. */
1912 if (orig_size == MO_64 && (rd & 7) == 0) {
1913 /* The first operation checks required alignment. */
1914 addr_tmp = tcg_temp_new();
1915 for (int i = 0; ; ++i) {
1916 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1917 memop | (i == 0 ? MO_ALIGN_64 : 0));
1918 if (i == 7) {
1919 break;
1920 }
1921 tcg_gen_addi_tl(addr_tmp, addr, 8);
1922 addr = addr_tmp;
1923 }
1924 } else {
1925 gen_exception(dc, TT_ILL_INSN);
1926 }
1927 break;
1928
1929 case GET_ASI_SHORT:
1930 /* Valid for lddfa only. */
1931 if (orig_size == MO_64) {
1932 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1933 memop | MO_ALIGN);
1934 } else {
1935 gen_exception(dc, TT_ILL_INSN);
1936 }
1937 break;
1938
1939 default:
1940 {
1941 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1942 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1943
1944 save_state(dc);
1945 /* According to the table in the UA2011 manual, the only
1946 other asis that are valid for ldfa/lddfa/ldqfa are
1947 the NO_FAULT asis. We still need a helper for these,
1948 but we can just use the integer asi helper for them. */
1949 switch (size) {
1950 case MO_32:
1951 d64 = tcg_temp_new_i64();
1952 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1953 d32 = gen_dest_fpr_F(dc);
1954 tcg_gen_extrl_i64_i32(d32, d64);
1955 gen_store_fpr_F(dc, rd, d32);
1956 break;
1957 case MO_64:
1958 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1959 r_asi, r_mop);
1960 break;
1961 case MO_128:
1962 d64 = tcg_temp_new_i64();
1963 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1964 addr_tmp = tcg_temp_new();
1965 tcg_gen_addi_tl(addr_tmp, addr, 8);
1966 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1967 r_asi, r_mop);
1968 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1969 break;
1970 default:
1971 g_assert_not_reached();
1972 }
1973 }
1974 break;
1975 }
1976 }
1977
1978 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1979 TCGv addr, int rd)
1980 {
1981 MemOp memop = da->memop;
1982 MemOp size = memop & MO_SIZE;
1983 TCGv_i32 d32;
1984 TCGv addr_tmp;
1985
1986 /* TODO: Use 128-bit load/store below. */
1987 if (size == MO_128) {
1988 memop = (memop & ~MO_SIZE) | MO_64;
1989 }
1990
1991 switch (da->type) {
1992 case GET_ASI_EXCP:
1993 break;
1994
1995 case GET_ASI_DIRECT:
1996 memop |= MO_ALIGN_4;
1997 switch (size) {
1998 case MO_32:
1999 d32 = gen_load_fpr_F(dc, rd);
2000 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2001 break;
2002 case MO_64:
2003 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2004 memop | MO_ALIGN_4);
2005 break;
2006 case MO_128:
2007 /* Only 4-byte alignment required. However, it is legal for the
2008 cpu to signal the alignment fault, and the OS trap handler is
2009 required to fix it up. Requiring 16-byte alignment here avoids
2010 having to probe the second page before performing the first
2011 write. */
2012 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2013 memop | MO_ALIGN_16);
2014 addr_tmp = tcg_temp_new();
2015 tcg_gen_addi_tl(addr_tmp, addr, 8);
2016 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2017 break;
2018 default:
2019 g_assert_not_reached();
2020 }
2021 break;
2022
2023 case GET_ASI_BLOCK:
2024 /* Valid for stdfa on aligned registers only. */
2025 if (orig_size == MO_64 && (rd & 7) == 0) {
2026 /* The first operation checks required alignment. */
2027 addr_tmp = tcg_temp_new();
2028 for (int i = 0; ; ++i) {
2029 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2030 memop | (i == 0 ? MO_ALIGN_64 : 0));
2031 if (i == 7) {
2032 break;
2033 }
2034 tcg_gen_addi_tl(addr_tmp, addr, 8);
2035 addr = addr_tmp;
2036 }
2037 } else {
2038 gen_exception(dc, TT_ILL_INSN);
2039 }
2040 break;
2041
2042 case GET_ASI_SHORT:
2043 /* Valid for stdfa only. */
2044 if (orig_size == MO_64) {
2045 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2046 memop | MO_ALIGN);
2047 } else {
2048 gen_exception(dc, TT_ILL_INSN);
2049 }
2050 break;
2051
2052 default:
2053 /* According to the table in the UA2011 manual, the only
2054 other asis that are valid for ldfa/lddfa/ldqfa are
2055 the PST* asis, which aren't currently handled. */
2056 gen_exception(dc, TT_ILL_INSN);
2057 break;
2058 }
2059 }
2060
2061 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2062 {
2063 TCGv hi = gen_dest_gpr(dc, rd);
2064 TCGv lo = gen_dest_gpr(dc, rd + 1);
2065
2066 switch (da->type) {
2067 case GET_ASI_EXCP:
2068 return;
2069
2070 case GET_ASI_DTWINX:
2071 #ifdef TARGET_SPARC64
2072 {
2073 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2074 TCGv_i128 t = tcg_temp_new_i128();
2075
2076 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2077 /*
2078 * Note that LE twinx acts as if each 64-bit register result is
2079 * byte swapped. We perform one 128-bit LE load, so must swap
2080 * the order of the writebacks.
2081 */
2082 if ((mop & MO_BSWAP) == MO_TE) {
2083 tcg_gen_extr_i128_i64(lo, hi, t);
2084 } else {
2085 tcg_gen_extr_i128_i64(hi, lo, t);
2086 }
2087 }
2088 break;
2089 #else
2090 g_assert_not_reached();
2091 #endif
2092
2093 case GET_ASI_DIRECT:
2094 {
2095 TCGv_i64 tmp = tcg_temp_new_i64();
2096
2097 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2098
2099 /* Note that LE ldda acts as if each 32-bit register
2100 result is byte swapped. Having just performed one
2101 64-bit bswap, we need now to swap the writebacks. */
2102 if ((da->memop & MO_BSWAP) == MO_TE) {
2103 tcg_gen_extr_i64_tl(lo, hi, tmp);
2104 } else {
2105 tcg_gen_extr_i64_tl(hi, lo, tmp);
2106 }
2107 }
2108 break;
2109
2110 default:
2111 /* ??? In theory we've handled all of the ASIs that are valid
2112 for ldda, and this should raise DAE_invalid_asi. However,
2113 real hardware allows others. This can be seen with e.g.
2114 FreeBSD 10.3 wrt ASI_IC_TAG. */
2115 {
2116 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2117 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2118 TCGv_i64 tmp = tcg_temp_new_i64();
2119
2120 save_state(dc);
2121 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2122
2123 /* See above. */
2124 if ((da->memop & MO_BSWAP) == MO_TE) {
2125 tcg_gen_extr_i64_tl(lo, hi, tmp);
2126 } else {
2127 tcg_gen_extr_i64_tl(hi, lo, tmp);
2128 }
2129 }
2130 break;
2131 }
2132
2133 gen_store_gpr(dc, rd, hi);
2134 gen_store_gpr(dc, rd + 1, lo);
2135 }
2136
2137 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2138 {
2139 TCGv hi = gen_load_gpr(dc, rd);
2140 TCGv lo = gen_load_gpr(dc, rd + 1);
2141
2142 switch (da->type) {
2143 case GET_ASI_EXCP:
2144 break;
2145
2146 case GET_ASI_DTWINX:
2147 #ifdef TARGET_SPARC64
2148 {
2149 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2150 TCGv_i128 t = tcg_temp_new_i128();
2151
2152 /*
2153 * Note that LE twinx acts as if each 64-bit register result is
2154 * byte swapped. We perform one 128-bit LE store, so must swap
2155 * the order of the construction.
2156 */
2157 if ((mop & MO_BSWAP) == MO_TE) {
2158 tcg_gen_concat_i64_i128(t, lo, hi);
2159 } else {
2160 tcg_gen_concat_i64_i128(t, hi, lo);
2161 }
2162 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2163 }
2164 break;
2165 #else
2166 g_assert_not_reached();
2167 #endif
2168
2169 case GET_ASI_DIRECT:
2170 {
2171 TCGv_i64 t64 = tcg_temp_new_i64();
2172
2173 /* Note that LE stda acts as if each 32-bit register result is
2174 byte swapped. We will perform one 64-bit LE store, so now
2175 we must swap the order of the construction. */
2176 if ((da->memop & MO_BSWAP) == MO_TE) {
2177 tcg_gen_concat_tl_i64(t64, lo, hi);
2178 } else {
2179 tcg_gen_concat_tl_i64(t64, hi, lo);
2180 }
2181 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2182 }
2183 break;
2184
2185 case GET_ASI_BFILL:
2186 assert(TARGET_LONG_BITS == 32);
2187 /* Store 32 bytes of T64 to ADDR. */
2188 /* ??? The original qemu code suggests 8-byte alignment, dropping
2189 the low bits, but the only place I can see this used is in the
2190 Linux kernel with 32 byte alignment, which would make more sense
2191 as a cacheline-style operation. */
2192 {
2193 TCGv_i64 t64 = tcg_temp_new_i64();
2194 TCGv d_addr = tcg_temp_new();
2195 TCGv eight = tcg_constant_tl(8);
2196 int i;
2197
2198 tcg_gen_concat_tl_i64(t64, lo, hi);
2199 tcg_gen_andi_tl(d_addr, addr, -8);
2200 for (i = 0; i < 32; i += 8) {
2201 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2202 tcg_gen_add_tl(d_addr, d_addr, eight);
2203 }
2204 }
2205 break;
2206
2207 default:
2208 /* ??? In theory we've handled all of the ASIs that are valid
2209 for stda, and this should raise DAE_invalid_asi. */
2210 {
2211 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2212 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2213 TCGv_i64 t64 = tcg_temp_new_i64();
2214
2215 /* See above. */
2216 if ((da->memop & MO_BSWAP) == MO_TE) {
2217 tcg_gen_concat_tl_i64(t64, lo, hi);
2218 } else {
2219 tcg_gen_concat_tl_i64(t64, hi, lo);
2220 }
2221
2222 save_state(dc);
2223 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2224 }
2225 break;
2226 }
2227 }
2228
2229 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2230 {
2231 #ifdef TARGET_SPARC64
2232 TCGv_i32 c32, zero, dst, s1, s2;
2233 TCGv_i64 c64 = tcg_temp_new_i64();
2234
2235 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2236 or fold the comparison down to 32 bits and use movcond_i32. Choose
2237 the later. */
2238 c32 = tcg_temp_new_i32();
2239 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2240 tcg_gen_extrl_i64_i32(c32, c64);
2241
2242 s1 = gen_load_fpr_F(dc, rs);
2243 s2 = gen_load_fpr_F(dc, rd);
2244 dst = gen_dest_fpr_F(dc);
2245 zero = tcg_constant_i32(0);
2246
2247 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2248
2249 gen_store_fpr_F(dc, rd, dst);
2250 #else
2251 qemu_build_not_reached();
2252 #endif
2253 }
2254
2255 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2256 {
2257 #ifdef TARGET_SPARC64
2258 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2259 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2260 gen_load_fpr_D(dc, rs),
2261 gen_load_fpr_D(dc, rd));
2262 gen_store_fpr_D(dc, rd, dst);
2263 #else
2264 qemu_build_not_reached();
2265 #endif
2266 }
2267
2268 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2269 {
2270 #ifdef TARGET_SPARC64
2271 int qd = QFPREG(rd);
2272 int qs = QFPREG(rs);
2273 TCGv c2 = tcg_constant_tl(cmp->c2);
2274
2275 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2276 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2277 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2278 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2279
2280 gen_update_fprs_dirty(dc, qd);
2281 #else
2282 qemu_build_not_reached();
2283 #endif
2284 }
2285
2286 #ifdef TARGET_SPARC64
2287 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2288 {
2289 TCGv_i32 r_tl = tcg_temp_new_i32();
2290
2291 /* load env->tl into r_tl */
2292 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2293
2294 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2295 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2296
2297 /* calculate offset to current trap state from env->ts, reuse r_tl */
2298 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2299 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2300
2301 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2302 {
2303 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2304 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2305 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2306 }
2307 }
2308 #endif
2309
2310 static int extract_dfpreg(DisasContext *dc, int x)
2311 {
2312 return DFPREG(x);
2313 }
2314
2315 static int extract_qfpreg(DisasContext *dc, int x)
2316 {
2317 return QFPREG(x);
2318 }
2319
2320 /* Include the auto-generated decoder. */
2321 #include "decode-insns.c.inc"
2322
2323 #define TRANS(NAME, AVAIL, FUNC, ...) \
2324 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2325 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2326
2327 #define avail_ALL(C) true
2328 #ifdef TARGET_SPARC64
2329 # define avail_32(C) false
2330 # define avail_ASR17(C) false
2331 # define avail_CASA(C) true
2332 # define avail_DIV(C) true
2333 # define avail_MUL(C) true
2334 # define avail_POWERDOWN(C) false
2335 # define avail_64(C) true
2336 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2337 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2338 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2339 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2340 #else
2341 # define avail_32(C) true
2342 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2343 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2344 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2345 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2346 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2347 # define avail_64(C) false
2348 # define avail_GL(C) false
2349 # define avail_HYPV(C) false
2350 # define avail_VIS1(C) false
2351 # define avail_VIS2(C) false
2352 #endif
2353
2354 /* Default case for non jump instructions. */
2355 static bool advance_pc(DisasContext *dc)
2356 {
2357 TCGLabel *l1;
2358
2359 finishing_insn(dc);
2360
2361 if (dc->npc & 3) {
2362 switch (dc->npc) {
2363 case DYNAMIC_PC:
2364 case DYNAMIC_PC_LOOKUP:
2365 dc->pc = dc->npc;
2366 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2367 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2368 break;
2369
2370 case JUMP_PC:
2371 /* we can do a static jump */
2372 l1 = gen_new_label();
2373 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2374
2375 /* jump not taken */
2376 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2377
2378 /* jump taken */
2379 gen_set_label(l1);
2380 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2381
2382 dc->base.is_jmp = DISAS_NORETURN;
2383 break;
2384
2385 default:
2386 g_assert_not_reached();
2387 }
2388 } else {
2389 dc->pc = dc->npc;
2390 dc->npc = dc->npc + 4;
2391 }
2392 return true;
2393 }
2394
2395 /*
2396 * Major opcodes 00 and 01 -- branches, call, and sethi
2397 */
2398
2399 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2400 bool annul, int disp)
2401 {
2402 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2403 target_ulong npc;
2404
2405 finishing_insn(dc);
2406
2407 if (cmp->cond == TCG_COND_ALWAYS) {
2408 if (annul) {
2409 dc->pc = dest;
2410 dc->npc = dest + 4;
2411 } else {
2412 gen_mov_pc_npc(dc);
2413 dc->npc = dest;
2414 }
2415 return true;
2416 }
2417
2418 if (cmp->cond == TCG_COND_NEVER) {
2419 npc = dc->npc;
2420 if (npc & 3) {
2421 gen_mov_pc_npc(dc);
2422 if (annul) {
2423 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2424 }
2425 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2426 } else {
2427 dc->pc = npc + (annul ? 4 : 0);
2428 dc->npc = dc->pc + 4;
2429 }
2430 return true;
2431 }
2432
2433 flush_cond(dc);
2434 npc = dc->npc;
2435
2436 if (annul) {
2437 TCGLabel *l1 = gen_new_label();
2438
2439 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2440 gen_goto_tb(dc, 0, npc, dest);
2441 gen_set_label(l1);
2442 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2443
2444 dc->base.is_jmp = DISAS_NORETURN;
2445 } else {
2446 if (npc & 3) {
2447 switch (npc) {
2448 case DYNAMIC_PC:
2449 case DYNAMIC_PC_LOOKUP:
2450 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2451 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2452 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2453 cmp->c1, tcg_constant_tl(cmp->c2),
2454 tcg_constant_tl(dest), cpu_npc);
2455 dc->pc = npc;
2456 break;
2457 default:
2458 g_assert_not_reached();
2459 }
2460 } else {
2461 dc->pc = npc;
2462 dc->npc = JUMP_PC;
2463 dc->jump = *cmp;
2464 dc->jump_pc[0] = dest;
2465 dc->jump_pc[1] = npc + 4;
2466
2467 /* The condition for cpu_cond is always NE -- normalize. */
2468 if (cmp->cond == TCG_COND_NE) {
2469 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2470 } else {
2471 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2472 }
2473 dc->cpu_cond_live = true;
2474 }
2475 }
2476 return true;
2477 }
2478
2479 static bool raise_priv(DisasContext *dc)
2480 {
2481 gen_exception(dc, TT_PRIV_INSN);
2482 return true;
2483 }
2484
2485 static bool raise_unimpfpop(DisasContext *dc)
2486 {
2487 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2488 return true;
2489 }
2490
2491 static bool gen_trap_float128(DisasContext *dc)
2492 {
2493 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2494 return false;
2495 }
2496 return raise_unimpfpop(dc);
2497 }
2498
2499 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2500 {
2501 DisasCompare cmp;
2502
2503 gen_compare(&cmp, a->cc, a->cond, dc);
2504 return advance_jump_cond(dc, &cmp, a->a, a->i);
2505 }
2506
2507 TRANS(Bicc, ALL, do_bpcc, a)
2508 TRANS(BPcc, 64, do_bpcc, a)
2509
2510 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2511 {
2512 DisasCompare cmp;
2513
2514 if (gen_trap_ifnofpu(dc)) {
2515 return true;
2516 }
2517 gen_fcompare(&cmp, a->cc, a->cond);
2518 return advance_jump_cond(dc, &cmp, a->a, a->i);
2519 }
2520
2521 TRANS(FBPfcc, 64, do_fbpfcc, a)
2522 TRANS(FBfcc, ALL, do_fbpfcc, a)
2523
2524 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2525 {
2526 DisasCompare cmp;
2527
2528 if (!avail_64(dc)) {
2529 return false;
2530 }
2531 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2532 return false;
2533 }
2534
2535 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2536 return advance_jump_cond(dc, &cmp, a->a, a->i);
2537 }
2538
2539 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2540 {
2541 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2542
2543 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2544 gen_mov_pc_npc(dc);
2545 dc->npc = target;
2546 return true;
2547 }
2548
2549 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2550 {
2551 /*
2552 * For sparc32, always generate the no-coprocessor exception.
2553 * For sparc64, always generate illegal instruction.
2554 */
2555 #ifdef TARGET_SPARC64
2556 return false;
2557 #else
2558 gen_exception(dc, TT_NCP_INSN);
2559 return true;
2560 #endif
2561 }
2562
2563 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2564 {
2565 /* Special-case %g0 because that's the canonical nop. */
2566 if (a->rd) {
2567 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2568 }
2569 return advance_pc(dc);
2570 }
2571
2572 /*
2573 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2574 */
2575
2576 static bool do_tcc(DisasContext *dc, int cond, int cc,
2577 int rs1, bool imm, int rs2_or_imm)
2578 {
2579 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2580 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2581 DisasCompare cmp;
2582 TCGLabel *lab;
2583 TCGv_i32 trap;
2584
2585 /* Trap never. */
2586 if (cond == 0) {
2587 return advance_pc(dc);
2588 }
2589
2590 /*
2591 * Immediate traps are the most common case. Since this value is
2592 * live across the branch, it really pays to evaluate the constant.
2593 */
2594 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2595 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2596 } else {
2597 trap = tcg_temp_new_i32();
2598 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2599 if (imm) {
2600 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2601 } else {
2602 TCGv_i32 t2 = tcg_temp_new_i32();
2603 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2604 tcg_gen_add_i32(trap, trap, t2);
2605 }
2606 tcg_gen_andi_i32(trap, trap, mask);
2607 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2608 }
2609
2610 finishing_insn(dc);
2611
2612 /* Trap always. */
2613 if (cond == 8) {
2614 save_state(dc);
2615 gen_helper_raise_exception(tcg_env, trap);
2616 dc->base.is_jmp = DISAS_NORETURN;
2617 return true;
2618 }
2619
2620 /* Conditional trap. */
2621 flush_cond(dc);
2622 lab = delay_exceptionv(dc, trap);
2623 gen_compare(&cmp, cc, cond, dc);
2624 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2625
2626 return advance_pc(dc);
2627 }
2628
2629 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2630 {
2631 if (avail_32(dc) && a->cc) {
2632 return false;
2633 }
2634 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2635 }
2636
2637 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2638 {
2639 if (avail_64(dc)) {
2640 return false;
2641 }
2642 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2643 }
2644
2645 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2646 {
2647 if (avail_32(dc)) {
2648 return false;
2649 }
2650 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2651 }
2652
2653 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2654 {
2655 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2656 return advance_pc(dc);
2657 }
2658
2659 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2660 {
2661 if (avail_32(dc)) {
2662 return false;
2663 }
2664 if (a->mmask) {
2665 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2666 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2667 }
2668 if (a->cmask) {
2669 /* For #Sync, etc, end the TB to recognize interrupts. */
2670 dc->base.is_jmp = DISAS_EXIT;
2671 }
2672 return advance_pc(dc);
2673 }
2674
2675 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2676 TCGv (*func)(DisasContext *, TCGv))
2677 {
2678 if (!priv) {
2679 return raise_priv(dc);
2680 }
2681 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2682 return advance_pc(dc);
2683 }
2684
2685 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2686 {
2687 return cpu_y;
2688 }
2689
2690 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2691 {
2692 /*
2693 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2694 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2695 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2696 */
2697 if (avail_64(dc) && a->rs1 != 0) {
2698 return false;
2699 }
2700 return do_rd_special(dc, true, a->rd, do_rdy);
2701 }
2702
2703 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2704 {
2705 uint32_t val;
2706
2707 /*
2708 * TODO: There are many more fields to be filled,
2709 * some of which are writable.
2710 */
2711 val = dc->def->nwindows - 1; /* [4:0] NWIN */
2712 val |= 1 << 8; /* [8] V8 */
2713
2714 return tcg_constant_tl(val);
2715 }
2716
2717 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2718
2719 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2720 {
2721 gen_helper_rdccr(dst, tcg_env);
2722 return dst;
2723 }
2724
2725 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2726
2727 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2728 {
2729 #ifdef TARGET_SPARC64
2730 return tcg_constant_tl(dc->asi);
2731 #else
2732 qemu_build_not_reached();
2733 #endif
2734 }
2735
2736 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2737
2738 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2739 {
2740 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2741
2742 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2743 if (translator_io_start(&dc->base)) {
2744 dc->base.is_jmp = DISAS_EXIT;
2745 }
2746 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2747 tcg_constant_i32(dc->mem_idx));
2748 return dst;
2749 }
2750
2751 /* TODO: non-priv access only allowed when enabled. */
2752 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2753
2754 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2755 {
2756 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2757 }
2758
2759 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2760
2761 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2762 {
2763 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2764 return dst;
2765 }
2766
2767 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2768
2769 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2770 {
2771 gen_trap_ifnofpu(dc);
2772 return cpu_gsr;
2773 }
2774
2775 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2776
2777 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2778 {
2779 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2780 return dst;
2781 }
2782
2783 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2784
2785 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2786 {
2787 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2788 return dst;
2789 }
2790
2791 /* TODO: non-priv access only allowed when enabled. */
2792 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2793
2794 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2795 {
2796 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2797
2798 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2799 if (translator_io_start(&dc->base)) {
2800 dc->base.is_jmp = DISAS_EXIT;
2801 }
2802 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2803 tcg_constant_i32(dc->mem_idx));
2804 return dst;
2805 }
2806
2807 /* TODO: non-priv access only allowed when enabled. */
2808 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2809
2810 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2811 {
2812 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2813 return dst;
2814 }
2815
2816 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2817 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2818
2819 /*
2820 * UltraSPARC-T1 Strand status.
2821 * HYPV check maybe not enough, UA2005 & UA2007 describe
2822 * this ASR as impl. dep
2823 */
2824 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2825 {
2826 return tcg_constant_tl(1);
2827 }
2828
2829 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2830
2831 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2832 {
2833 gen_helper_rdpsr(dst, tcg_env);
2834 return dst;
2835 }
2836
2837 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2838
2839 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2840 {
2841 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2842 return dst;
2843 }
2844
2845 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2846
2847 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2848 {
2849 TCGv_i32 tl = tcg_temp_new_i32();
2850 TCGv_ptr tp = tcg_temp_new_ptr();
2851
2852 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2853 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2854 tcg_gen_shli_i32(tl, tl, 3);
2855 tcg_gen_ext_i32_ptr(tp, tl);
2856 tcg_gen_add_ptr(tp, tp, tcg_env);
2857
2858 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2859 return dst;
2860 }
2861
2862 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2863
2864 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2865 {
2866 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2867 return dst;
2868 }
2869
2870 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2871
2872 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2873 {
2874 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2875 return dst;
2876 }
2877
2878 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2879
2880 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2881 {
2882 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2883 return dst;
2884 }
2885
2886 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2887
2888 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2889 {
2890 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2891 return dst;
2892 }
2893
2894 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2895 do_rdhstick_cmpr)
2896
2897 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2898 {
2899 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2900 return dst;
2901 }
2902
2903 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2904
2905 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2906 {
2907 #ifdef TARGET_SPARC64
2908 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2909
2910 gen_load_trap_state_at_tl(r_tsptr);
2911 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2912 return dst;
2913 #else
2914 qemu_build_not_reached();
2915 #endif
2916 }
2917
2918 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2919
2920 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2921 {
2922 #ifdef TARGET_SPARC64
2923 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2924
2925 gen_load_trap_state_at_tl(r_tsptr);
2926 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2927 return dst;
2928 #else
2929 qemu_build_not_reached();
2930 #endif
2931 }
2932
2933 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2934
2935 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2936 {
2937 #ifdef TARGET_SPARC64
2938 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2939
2940 gen_load_trap_state_at_tl(r_tsptr);
2941 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2942 return dst;
2943 #else
2944 qemu_build_not_reached();
2945 #endif
2946 }
2947
2948 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2949
2950 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2951 {
2952 #ifdef TARGET_SPARC64
2953 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2954
2955 gen_load_trap_state_at_tl(r_tsptr);
2956 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2957 return dst;
2958 #else
2959 qemu_build_not_reached();
2960 #endif
2961 }
2962
2963 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2964 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2965
2966 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2967 {
2968 return cpu_tbr;
2969 }
2970
2971 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2972 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2973
2974 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2975 {
2976 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2977 return dst;
2978 }
2979
2980 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2981
2982 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2983 {
2984 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2985 return dst;
2986 }
2987
2988 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2989
2990 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2991 {
2992 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2993 return dst;
2994 }
2995
2996 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2997
2998 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2999 {
3000 gen_helper_rdcwp(dst, tcg_env);
3001 return dst;
3002 }
3003
3004 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3005
3006 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3007 {
3008 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3009 return dst;
3010 }
3011
3012 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3013
3014 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3015 {
3016 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3017 return dst;
3018 }
3019
3020 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3021 do_rdcanrestore)
3022
3023 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3024 {
3025 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3026 return dst;
3027 }
3028
3029 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3030
3031 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3032 {
3033 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3034 return dst;
3035 }
3036
3037 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3038
3039 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3040 {
3041 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3042 return dst;
3043 }
3044
3045 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3046
3047 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3048 {
3049 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3050 return dst;
3051 }
3052
3053 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3054
3055 /* UA2005 strand status */
3056 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3057 {
3058 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3059 return dst;
3060 }
3061
3062 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3063
3064 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3065 {
3066 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3067 return dst;
3068 }
3069
3070 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3071
3072 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3073 {
3074 if (avail_64(dc)) {
3075 gen_helper_flushw(tcg_env);
3076 return advance_pc(dc);
3077 }
3078 return false;
3079 }
3080
3081 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3082 void (*func)(DisasContext *, TCGv))
3083 {
3084 TCGv src;
3085
3086 /* For simplicity, we under-decoded the rs2 form. */
3087 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3088 return false;
3089 }
3090 if (!priv) {
3091 return raise_priv(dc);
3092 }
3093
3094 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3095 src = tcg_constant_tl(a->rs2_or_imm);
3096 } else {
3097 TCGv src1 = gen_load_gpr(dc, a->rs1);
3098 if (a->rs2_or_imm == 0) {
3099 src = src1;
3100 } else {
3101 src = tcg_temp_new();
3102 if (a->imm) {
3103 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3104 } else {
3105 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3106 }
3107 }
3108 }
3109 func(dc, src);
3110 return advance_pc(dc);
3111 }
3112
3113 static void do_wry(DisasContext *dc, TCGv src)
3114 {
3115 tcg_gen_ext32u_tl(cpu_y, src);
3116 }
3117
3118 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3119
3120 static void do_wrccr(DisasContext *dc, TCGv src)
3121 {
3122 gen_helper_wrccr(tcg_env, src);
3123 }
3124
3125 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3126
3127 static void do_wrasi(DisasContext *dc, TCGv src)
3128 {
3129 TCGv tmp = tcg_temp_new();
3130
3131 tcg_gen_ext8u_tl(tmp, src);
3132 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3133 /* End TB to notice changed ASI. */
3134 dc->base.is_jmp = DISAS_EXIT;
3135 }
3136
3137 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3138
3139 static void do_wrfprs(DisasContext *dc, TCGv src)
3140 {
3141 #ifdef TARGET_SPARC64
3142 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3143 dc->fprs_dirty = 0;
3144 dc->base.is_jmp = DISAS_EXIT;
3145 #else
3146 qemu_build_not_reached();
3147 #endif
3148 }
3149
3150 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3151
3152 static void do_wrgsr(DisasContext *dc, TCGv src)
3153 {
3154 gen_trap_ifnofpu(dc);
3155 tcg_gen_mov_tl(cpu_gsr, src);
3156 }
3157
3158 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3159
3160 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3161 {
3162 gen_helper_set_softint(tcg_env, src);
3163 }
3164
3165 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3166
3167 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3168 {
3169 gen_helper_clear_softint(tcg_env, src);
3170 }
3171
3172 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3173
3174 static void do_wrsoftint(DisasContext *dc, TCGv src)
3175 {
3176 gen_helper_write_softint(tcg_env, src);
3177 }
3178
3179 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3180
3181 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3182 {
3183 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3184
3185 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3186 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3187 translator_io_start(&dc->base);
3188 gen_helper_tick_set_limit(r_tickptr, src);
3189 /* End TB to handle timer interrupt */
3190 dc->base.is_jmp = DISAS_EXIT;
3191 }
3192
3193 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3194
3195 static void do_wrstick(DisasContext *dc, TCGv src)
3196 {
3197 #ifdef TARGET_SPARC64
3198 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3199
3200 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3201 translator_io_start(&dc->base);
3202 gen_helper_tick_set_count(r_tickptr, src);
3203 /* End TB to handle timer interrupt */
3204 dc->base.is_jmp = DISAS_EXIT;
3205 #else
3206 qemu_build_not_reached();
3207 #endif
3208 }
3209
3210 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3211
3212 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3213 {
3214 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3215
3216 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3217 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3218 translator_io_start(&dc->base);
3219 gen_helper_tick_set_limit(r_tickptr, src);
3220 /* End TB to handle timer interrupt */
3221 dc->base.is_jmp = DISAS_EXIT;
3222 }
3223
3224 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3225
3226 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3227 {
3228 finishing_insn(dc);
3229 save_state(dc);
3230 gen_helper_power_down(tcg_env);
3231 }
3232
3233 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3234
3235 static void do_wrpsr(DisasContext *dc, TCGv src)
3236 {
3237 gen_helper_wrpsr(tcg_env, src);
3238 dc->base.is_jmp = DISAS_EXIT;
3239 }
3240
3241 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3242
3243 static void do_wrwim(DisasContext *dc, TCGv src)
3244 {
3245 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3246 TCGv tmp = tcg_temp_new();
3247
3248 tcg_gen_andi_tl(tmp, src, mask);
3249 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3250 }
3251
3252 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3253
3254 static void do_wrtpc(DisasContext *dc, TCGv src)
3255 {
3256 #ifdef TARGET_SPARC64
3257 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3258
3259 gen_load_trap_state_at_tl(r_tsptr);
3260 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3261 #else
3262 qemu_build_not_reached();
3263 #endif
3264 }
3265
3266 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3267
3268 static void do_wrtnpc(DisasContext *dc, TCGv src)
3269 {
3270 #ifdef TARGET_SPARC64
3271 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3272
3273 gen_load_trap_state_at_tl(r_tsptr);
3274 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3275 #else
3276 qemu_build_not_reached();
3277 #endif
3278 }
3279
3280 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3281
3282 static void do_wrtstate(DisasContext *dc, TCGv src)
3283 {
3284 #ifdef TARGET_SPARC64
3285 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3286
3287 gen_load_trap_state_at_tl(r_tsptr);
3288 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3289 #else
3290 qemu_build_not_reached();
3291 #endif
3292 }
3293
3294 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3295
3296 static void do_wrtt(DisasContext *dc, TCGv src)
3297 {
3298 #ifdef TARGET_SPARC64
3299 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3300
3301 gen_load_trap_state_at_tl(r_tsptr);
3302 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3303 #else
3304 qemu_build_not_reached();
3305 #endif
3306 }
3307
3308 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3309
3310 static void do_wrtick(DisasContext *dc, TCGv src)
3311 {
3312 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3313
3314 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3315 translator_io_start(&dc->base);
3316 gen_helper_tick_set_count(r_tickptr, src);
3317 /* End TB to handle timer interrupt */
3318 dc->base.is_jmp = DISAS_EXIT;
3319 }
3320
3321 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3322
3323 static void do_wrtba(DisasContext *dc, TCGv src)
3324 {
3325 tcg_gen_mov_tl(cpu_tbr, src);
3326 }
3327
3328 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3329
3330 static void do_wrpstate(DisasContext *dc, TCGv src)
3331 {
3332 save_state(dc);
3333 if (translator_io_start(&dc->base)) {
3334 dc->base.is_jmp = DISAS_EXIT;
3335 }
3336 gen_helper_wrpstate(tcg_env, src);
3337 dc->npc = DYNAMIC_PC;
3338 }
3339
3340 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3341
3342 static void do_wrtl(DisasContext *dc, TCGv src)
3343 {
3344 save_state(dc);
3345 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3346 dc->npc = DYNAMIC_PC;
3347 }
3348
3349 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3350
3351 static void do_wrpil(DisasContext *dc, TCGv src)
3352 {
3353 if (translator_io_start(&dc->base)) {
3354 dc->base.is_jmp = DISAS_EXIT;
3355 }
3356 gen_helper_wrpil(tcg_env, src);
3357 }
3358
3359 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3360
3361 static void do_wrcwp(DisasContext *dc, TCGv src)
3362 {
3363 gen_helper_wrcwp(tcg_env, src);
3364 }
3365
3366 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3367
3368 static void do_wrcansave(DisasContext *dc, TCGv src)
3369 {
3370 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3371 }
3372
3373 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3374
3375 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3376 {
3377 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3378 }
3379
3380 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3381
3382 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3383 {
3384 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3385 }
3386
3387 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3388
3389 static void do_wrotherwin(DisasContext *dc, TCGv src)
3390 {
3391 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3392 }
3393
3394 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3395
3396 static void do_wrwstate(DisasContext *dc, TCGv src)
3397 {
3398 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3399 }
3400
3401 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3402
3403 static void do_wrgl(DisasContext *dc, TCGv src)
3404 {
3405 gen_helper_wrgl(tcg_env, src);
3406 }
3407
3408 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3409
3410 /* UA2005 strand status */
3411 static void do_wrssr(DisasContext *dc, TCGv src)
3412 {
3413 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3414 }
3415
3416 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3417
3418 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3419
3420 static void do_wrhpstate(DisasContext *dc, TCGv src)
3421 {
3422 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3423 dc->base.is_jmp = DISAS_EXIT;
3424 }
3425
3426 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3427
3428 static void do_wrhtstate(DisasContext *dc, TCGv src)
3429 {
3430 TCGv_i32 tl = tcg_temp_new_i32();
3431 TCGv_ptr tp = tcg_temp_new_ptr();
3432
3433 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3434 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3435 tcg_gen_shli_i32(tl, tl, 3);
3436 tcg_gen_ext_i32_ptr(tp, tl);
3437 tcg_gen_add_ptr(tp, tp, tcg_env);
3438
3439 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3440 }
3441
3442 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3443
3444 static void do_wrhintp(DisasContext *dc, TCGv src)
3445 {
3446 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3447 }
3448
3449 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3450
3451 static void do_wrhtba(DisasContext *dc, TCGv src)
3452 {
3453 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3454 }
3455
3456 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3457
3458 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3459 {
3460 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3461
3462 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3463 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3464 translator_io_start(&dc->base);
3465 gen_helper_tick_set_limit(r_tickptr, src);
3466 /* End TB to handle timer interrupt */
3467 dc->base.is_jmp = DISAS_EXIT;
3468 }
3469
3470 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3471 do_wrhstick_cmpr)
3472
3473 static bool do_saved_restored(DisasContext *dc, bool saved)
3474 {
3475 if (!supervisor(dc)) {
3476 return raise_priv(dc);
3477 }
3478 if (saved) {
3479 gen_helper_saved(tcg_env);
3480 } else {
3481 gen_helper_restored(tcg_env);
3482 }
3483 return advance_pc(dc);
3484 }
3485
3486 TRANS(SAVED, 64, do_saved_restored, true)
3487 TRANS(RESTORED, 64, do_saved_restored, false)
3488
3489 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3490 {
3491 return advance_pc(dc);
3492 }
3493
3494 /*
3495 * TODO: Need a feature bit for sparcv8.
3496 * In the meantime, treat all 32-bit cpus like sparcv7.
3497 */
3498 TRANS(NOP_v7, 32, trans_NOP, a)
3499 TRANS(NOP_v9, 64, trans_NOP, a)
3500
3501 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3502 void (*func)(TCGv, TCGv, TCGv),
3503 void (*funci)(TCGv, TCGv, target_long),
3504 bool logic_cc)
3505 {
3506 TCGv dst, src1;
3507
3508 /* For simplicity, we under-decoded the rs2 form. */
3509 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3510 return false;
3511 }
3512
3513 if (logic_cc) {
3514 dst = cpu_cc_N;
3515 } else {
3516 dst = gen_dest_gpr(dc, a->rd);
3517 }
3518 src1 = gen_load_gpr(dc, a->rs1);
3519
3520 if (a->imm || a->rs2_or_imm == 0) {
3521 if (funci) {
3522 funci(dst, src1, a->rs2_or_imm);
3523 } else {
3524 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3525 }
3526 } else {
3527 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3528 }
3529
3530 if (logic_cc) {
3531 if (TARGET_LONG_BITS == 64) {
3532 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3533 tcg_gen_movi_tl(cpu_icc_C, 0);
3534 }
3535 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3536 tcg_gen_movi_tl(cpu_cc_C, 0);
3537 tcg_gen_movi_tl(cpu_cc_V, 0);
3538 }
3539
3540 gen_store_gpr(dc, a->rd, dst);
3541 return advance_pc(dc);
3542 }
3543
3544 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3545 void (*func)(TCGv, TCGv, TCGv),
3546 void (*funci)(TCGv, TCGv, target_long),
3547 void (*func_cc)(TCGv, TCGv, TCGv))
3548 {
3549 if (a->cc) {
3550 return do_arith_int(dc, a, func_cc, NULL, false);
3551 }
3552 return do_arith_int(dc, a, func, funci, false);
3553 }
3554
3555 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3556 void (*func)(TCGv, TCGv, TCGv),
3557 void (*funci)(TCGv, TCGv, target_long))
3558 {
3559 return do_arith_int(dc, a, func, funci, a->cc);
3560 }
3561
3562 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3563 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3564 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3565 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3566
3567 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3568 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3569 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3570 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3571
3572 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3573 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3574 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3575 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3576 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3577
3578 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3579 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3580 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3581 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3582
3583 TRANS(UDIVX, 64, do_arith, a, gen_op_udivx, NULL, NULL)
3584 TRANS(SDIVX, 64, do_arith, a, gen_op_sdivx, NULL, NULL)
3585 TRANS(UDIV, DIV, do_arith, a, gen_op_udiv, NULL, gen_op_udivcc)
3586 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3587
3588 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3589 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3590
3591 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3592 {
3593 /* OR with %g0 is the canonical alias for MOV. */
3594 if (!a->cc && a->rs1 == 0) {
3595 if (a->imm || a->rs2_or_imm == 0) {
3596 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3597 } else if (a->rs2_or_imm & ~0x1f) {
3598 /* For simplicity, we under-decoded the rs2 form. */
3599 return false;
3600 } else {
3601 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3602 }
3603 return advance_pc(dc);
3604 }
3605 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3606 }
3607
3608 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3609 int width, bool cc, bool left)
3610 {
3611 TCGv dst, s1, s2, lo1, lo2;
3612 uint64_t amask, tabl, tabr;
3613 int shift, imask, omask;
3614
3615 dst = gen_dest_gpr(dc, a->rd);
3616 s1 = gen_load_gpr(dc, a->rs1);
3617 s2 = gen_load_gpr(dc, a->rs2);
3618
3619 if (cc) {
3620 gen_op_subcc(cpu_cc_N, s1, s2);
3621 }
3622
3623 /*
3624 * Theory of operation: there are two tables, left and right (not to
3625 * be confused with the left and right versions of the opcode). These
3626 * are indexed by the low 3 bits of the inputs. To make things "easy",
3627 * these tables are loaded into two constants, TABL and TABR below.
3628 * The operation index = (input & imask) << shift calculates the index
3629 * into the constant, while val = (table >> index) & omask calculates
3630 * the value we're looking for.
3631 */
3632 switch (width) {
3633 case 8:
3634 imask = 0x7;
3635 shift = 3;
3636 omask = 0xff;
3637 if (left) {
3638 tabl = 0x80c0e0f0f8fcfeffULL;
3639 tabr = 0xff7f3f1f0f070301ULL;
3640 } else {
3641 tabl = 0x0103070f1f3f7fffULL;
3642 tabr = 0xfffefcf8f0e0c080ULL;
3643 }
3644 break;
3645 case 16:
3646 imask = 0x6;
3647 shift = 1;
3648 omask = 0xf;
3649 if (left) {
3650 tabl = 0x8cef;
3651 tabr = 0xf731;
3652 } else {
3653 tabl = 0x137f;
3654 tabr = 0xfec8;
3655 }
3656 break;
3657 case 32:
3658 imask = 0x4;
3659 shift = 0;
3660 omask = 0x3;
3661 if (left) {
3662 tabl = (2 << 2) | 3;
3663 tabr = (3 << 2) | 1;
3664 } else {
3665 tabl = (1 << 2) | 3;
3666 tabr = (3 << 2) | 2;
3667 }
3668 break;
3669 default:
3670 abort();
3671 }
3672
3673 lo1 = tcg_temp_new();
3674 lo2 = tcg_temp_new();
3675 tcg_gen_andi_tl(lo1, s1, imask);
3676 tcg_gen_andi_tl(lo2, s2, imask);
3677 tcg_gen_shli_tl(lo1, lo1, shift);
3678 tcg_gen_shli_tl(lo2, lo2, shift);
3679
3680 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3681 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3682 tcg_gen_andi_tl(lo1, lo1, omask);
3683 tcg_gen_andi_tl(lo2, lo2, omask);
3684
3685 amask = address_mask_i(dc, -8);
3686 tcg_gen_andi_tl(s1, s1, amask);
3687 tcg_gen_andi_tl(s2, s2, amask);
3688
3689 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3690 tcg_gen_and_tl(lo2, lo2, lo1);
3691 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3692
3693 gen_store_gpr(dc, a->rd, dst);
3694 return advance_pc(dc);
3695 }
3696
3697 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3698 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3699 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3700 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3701 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3702 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3703
3704 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3705 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3706 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3707 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3708 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3709 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3710
3711 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3712 void (*func)(TCGv, TCGv, TCGv))
3713 {
3714 TCGv dst = gen_dest_gpr(dc, a->rd);
3715 TCGv src1 = gen_load_gpr(dc, a->rs1);
3716 TCGv src2 = gen_load_gpr(dc, a->rs2);
3717
3718 func(dst, src1, src2);
3719 gen_store_gpr(dc, a->rd, dst);
3720 return advance_pc(dc);
3721 }
3722
3723 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3724 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3725 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3726
3727 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3728 {
3729 #ifdef TARGET_SPARC64
3730 TCGv tmp = tcg_temp_new();
3731
3732 tcg_gen_add_tl(tmp, s1, s2);
3733 tcg_gen_andi_tl(dst, tmp, -8);
3734 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3735 #else
3736 g_assert_not_reached();
3737 #endif
3738 }
3739
3740 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3741 {
3742 #ifdef TARGET_SPARC64
3743 TCGv tmp = tcg_temp_new();
3744
3745 tcg_gen_add_tl(tmp, s1, s2);
3746 tcg_gen_andi_tl(dst, tmp, -8);
3747 tcg_gen_neg_tl(tmp, tmp);
3748 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3749 #else
3750 g_assert_not_reached();
3751 #endif
3752 }
3753
3754 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3755 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3756
3757 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3758 {
3759 #ifdef TARGET_SPARC64
3760 tcg_gen_add_tl(dst, s1, s2);
3761 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3762 #else
3763 g_assert_not_reached();
3764 #endif
3765 }
3766
3767 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3768
3769 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3770 {
3771 TCGv dst, src1, src2;
3772
3773 /* Reject 64-bit shifts for sparc32. */
3774 if (avail_32(dc) && a->x) {
3775 return false;
3776 }
3777
3778 src2 = tcg_temp_new();
3779 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3780 src1 = gen_load_gpr(dc, a->rs1);
3781 dst = gen_dest_gpr(dc, a->rd);
3782
3783 if (l) {
3784 tcg_gen_shl_tl(dst, src1, src2);
3785 if (!a->x) {
3786 tcg_gen_ext32u_tl(dst, dst);
3787 }
3788 } else if (u) {
3789 if (!a->x) {
3790 tcg_gen_ext32u_tl(dst, src1);
3791 src1 = dst;
3792 }
3793 tcg_gen_shr_tl(dst, src1, src2);
3794 } else {
3795 if (!a->x) {
3796 tcg_gen_ext32s_tl(dst, src1);
3797 src1 = dst;
3798 }
3799 tcg_gen_sar_tl(dst, src1, src2);
3800 }
3801 gen_store_gpr(dc, a->rd, dst);
3802 return advance_pc(dc);
3803 }
3804
3805 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3806 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3807 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3808
3809 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3810 {
3811 TCGv dst, src1;
3812
3813 /* Reject 64-bit shifts for sparc32. */
3814 if (avail_32(dc) && (a->x || a->i >= 32)) {
3815 return false;
3816 }
3817
3818 src1 = gen_load_gpr(dc, a->rs1);
3819 dst = gen_dest_gpr(dc, a->rd);
3820
3821 if (avail_32(dc) || a->x) {
3822 if (l) {
3823 tcg_gen_shli_tl(dst, src1, a->i);
3824 } else if (u) {
3825 tcg_gen_shri_tl(dst, src1, a->i);
3826 } else {
3827 tcg_gen_sari_tl(dst, src1, a->i);
3828 }
3829 } else {
3830 if (l) {
3831 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3832 } else if (u) {
3833 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3834 } else {
3835 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3836 }
3837 }
3838 gen_store_gpr(dc, a->rd, dst);
3839 return advance_pc(dc);
3840 }
3841
3842 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3843 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3844 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3845
3846 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3847 {
3848 /* For simplicity, we under-decoded the rs2 form. */
3849 if (!imm && rs2_or_imm & ~0x1f) {
3850 return NULL;
3851 }
3852 if (imm || rs2_or_imm == 0) {
3853 return tcg_constant_tl(rs2_or_imm);
3854 } else {
3855 return cpu_regs[rs2_or_imm];
3856 }
3857 }
3858
3859 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3860 {
3861 TCGv dst = gen_load_gpr(dc, rd);
3862 TCGv c2 = tcg_constant_tl(cmp->c2);
3863
3864 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3865 gen_store_gpr(dc, rd, dst);
3866 return advance_pc(dc);
3867 }
3868
3869 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3870 {
3871 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3872 DisasCompare cmp;
3873
3874 if (src2 == NULL) {
3875 return false;
3876 }
3877 gen_compare(&cmp, a->cc, a->cond, dc);
3878 return do_mov_cond(dc, &cmp, a->rd, src2);
3879 }
3880
3881 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3882 {
3883 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3884 DisasCompare cmp;
3885
3886 if (src2 == NULL) {
3887 return false;
3888 }
3889 gen_fcompare(&cmp, a->cc, a->cond);
3890 return do_mov_cond(dc, &cmp, a->rd, src2);
3891 }
3892
3893 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3894 {
3895 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3896 DisasCompare cmp;
3897
3898 if (src2 == NULL) {
3899 return false;
3900 }
3901 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
3902 return do_mov_cond(dc, &cmp, a->rd, src2);
3903 }
3904
3905 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3906 bool (*func)(DisasContext *dc, int rd, TCGv src))
3907 {
3908 TCGv src1, sum;
3909
3910 /* For simplicity, we under-decoded the rs2 form. */
3911 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3912 return false;
3913 }
3914
3915 /*
3916 * Always load the sum into a new temporary.
3917 * This is required to capture the value across a window change,
3918 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3919 */
3920 sum = tcg_temp_new();
3921 src1 = gen_load_gpr(dc, a->rs1);
3922 if (a->imm || a->rs2_or_imm == 0) {
3923 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3924 } else {
3925 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3926 }
3927 return func(dc, a->rd, sum);
3928 }
3929
3930 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3931 {
3932 /*
3933 * Preserve pc across advance, so that we can delay
3934 * the writeback to rd until after src is consumed.
3935 */
3936 target_ulong cur_pc = dc->pc;
3937
3938 gen_check_align(dc, src, 3);
3939
3940 gen_mov_pc_npc(dc);
3941 tcg_gen_mov_tl(cpu_npc, src);
3942 gen_address_mask(dc, cpu_npc);
3943 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3944
3945 dc->npc = DYNAMIC_PC_LOOKUP;
3946 return true;
3947 }
3948
3949 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3950
3951 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3952 {
3953 if (!supervisor(dc)) {
3954 return raise_priv(dc);
3955 }
3956
3957 gen_check_align(dc, src, 3);
3958
3959 gen_mov_pc_npc(dc);
3960 tcg_gen_mov_tl(cpu_npc, src);
3961 gen_helper_rett(tcg_env);
3962
3963 dc->npc = DYNAMIC_PC;
3964 return true;
3965 }
3966
3967 TRANS(RETT, 32, do_add_special, a, do_rett)
3968
3969 static bool do_return(DisasContext *dc, int rd, TCGv src)
3970 {
3971 gen_check_align(dc, src, 3);
3972
3973 gen_mov_pc_npc(dc);
3974 tcg_gen_mov_tl(cpu_npc, src);
3975 gen_address_mask(dc, cpu_npc);
3976
3977 gen_helper_restore(tcg_env);
3978 dc->npc = DYNAMIC_PC_LOOKUP;
3979 return true;
3980 }
3981
3982 TRANS(RETURN, 64, do_add_special, a, do_return)
3983
3984 static bool do_save(DisasContext *dc, int rd, TCGv src)
3985 {
3986 gen_helper_save(tcg_env);
3987 gen_store_gpr(dc, rd, src);
3988 return advance_pc(dc);
3989 }
3990
3991 TRANS(SAVE, ALL, do_add_special, a, do_save)
3992
3993 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3994 {
3995 gen_helper_restore(tcg_env);
3996 gen_store_gpr(dc, rd, src);
3997 return advance_pc(dc);
3998 }
3999
4000 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4001
4002 static bool do_done_retry(DisasContext *dc, bool done)
4003 {
4004 if (!supervisor(dc)) {
4005 return raise_priv(dc);
4006 }
4007 dc->npc = DYNAMIC_PC;
4008 dc->pc = DYNAMIC_PC;
4009 translator_io_start(&dc->base);
4010 if (done) {
4011 gen_helper_done(tcg_env);
4012 } else {
4013 gen_helper_retry(tcg_env);
4014 }
4015 return true;
4016 }
4017
4018 TRANS(DONE, 64, do_done_retry, true)
4019 TRANS(RETRY, 64, do_done_retry, false)
4020
4021 /*
4022 * Major opcode 11 -- load and store instructions
4023 */
4024
4025 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4026 {
4027 TCGv addr, tmp = NULL;
4028
4029 /* For simplicity, we under-decoded the rs2 form. */
4030 if (!imm && rs2_or_imm & ~0x1f) {
4031 return NULL;
4032 }
4033
4034 addr = gen_load_gpr(dc, rs1);
4035 if (rs2_or_imm) {
4036 tmp = tcg_temp_new();
4037 if (imm) {
4038 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4039 } else {
4040 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4041 }
4042 addr = tmp;
4043 }
4044 if (AM_CHECK(dc)) {
4045 if (!tmp) {
4046 tmp = tcg_temp_new();
4047 }
4048 tcg_gen_ext32u_tl(tmp, addr);
4049 addr = tmp;
4050 }
4051 return addr;
4052 }
4053
4054 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4055 {
4056 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4057 DisasASI da;
4058
4059 if (addr == NULL) {
4060 return false;
4061 }
4062 da = resolve_asi(dc, a->asi, mop);
4063
4064 reg = gen_dest_gpr(dc, a->rd);
4065 gen_ld_asi(dc, &da, reg, addr);
4066 gen_store_gpr(dc, a->rd, reg);
4067 return advance_pc(dc);
4068 }
4069
4070 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4071 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4072 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4073 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4074 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4075 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4076 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4077
4078 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4079 {
4080 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4081 DisasASI da;
4082
4083 if (addr == NULL) {
4084 return false;
4085 }
4086 da = resolve_asi(dc, a->asi, mop);
4087
4088 reg = gen_load_gpr(dc, a->rd);
4089 gen_st_asi(dc, &da, reg, addr);
4090 return advance_pc(dc);
4091 }
4092
4093 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4094 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4095 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4096 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4097
4098 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4099 {
4100 TCGv addr;
4101 DisasASI da;
4102
4103 if (a->rd & 1) {
4104 return false;
4105 }
4106 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4107 if (addr == NULL) {
4108 return false;
4109 }
4110 da = resolve_asi(dc, a->asi, MO_TEUQ);
4111 gen_ldda_asi(dc, &da, addr, a->rd);
4112 return advance_pc(dc);
4113 }
4114
4115 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4116 {
4117 TCGv addr;
4118 DisasASI da;
4119
4120 if (a->rd & 1) {
4121 return false;
4122 }
4123 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4124 if (addr == NULL) {
4125 return false;
4126 }
4127 da = resolve_asi(dc, a->asi, MO_TEUQ);
4128 gen_stda_asi(dc, &da, addr, a->rd);
4129 return advance_pc(dc);
4130 }
4131
4132 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4133 {
4134 TCGv addr, reg;
4135 DisasASI da;
4136
4137 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4138 if (addr == NULL) {
4139 return false;
4140 }
4141 da = resolve_asi(dc, a->asi, MO_UB);
4142
4143 reg = gen_dest_gpr(dc, a->rd);
4144 gen_ldstub_asi(dc, &da, reg, addr);
4145 gen_store_gpr(dc, a->rd, reg);
4146 return advance_pc(dc);
4147 }
4148
4149 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4150 {
4151 TCGv addr, dst, src;
4152 DisasASI da;
4153
4154 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4155 if (addr == NULL) {
4156 return false;
4157 }
4158 da = resolve_asi(dc, a->asi, MO_TEUL);
4159
4160 dst = gen_dest_gpr(dc, a->rd);
4161 src = gen_load_gpr(dc, a->rd);
4162 gen_swap_asi(dc, &da, dst, src, addr);
4163 gen_store_gpr(dc, a->rd, dst);
4164 return advance_pc(dc);
4165 }
4166
4167 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4168 {
4169 TCGv addr, o, n, c;
4170 DisasASI da;
4171
4172 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4173 if (addr == NULL) {
4174 return false;
4175 }
4176 da = resolve_asi(dc, a->asi, mop);
4177
4178 o = gen_dest_gpr(dc, a->rd);
4179 n = gen_load_gpr(dc, a->rd);
4180 c = gen_load_gpr(dc, a->rs2_or_imm);
4181 gen_cas_asi(dc, &da, o, n, c, addr);
4182 gen_store_gpr(dc, a->rd, o);
4183 return advance_pc(dc);
4184 }
4185
4186 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4187 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4188
4189 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4190 {
4191 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4192 DisasASI da;
4193
4194 if (addr == NULL) {
4195 return false;
4196 }
4197 if (gen_trap_ifnofpu(dc)) {
4198 return true;
4199 }
4200 if (sz == MO_128 && gen_trap_float128(dc)) {
4201 return true;
4202 }
4203 da = resolve_asi(dc, a->asi, MO_TE | sz);
4204 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4205 gen_update_fprs_dirty(dc, a->rd);
4206 return advance_pc(dc);
4207 }
4208
4209 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4210 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4211 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4212
4213 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4214 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4215 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4216
4217 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4218 {
4219 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4220 DisasASI da;
4221
4222 if (addr == NULL) {
4223 return false;
4224 }
4225 if (gen_trap_ifnofpu(dc)) {
4226 return true;
4227 }
4228 if (sz == MO_128 && gen_trap_float128(dc)) {
4229 return true;
4230 }
4231 da = resolve_asi(dc, a->asi, MO_TE | sz);
4232 gen_stf_asi(dc, &da, sz, addr, a->rd);
4233 return advance_pc(dc);
4234 }
4235
4236 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4237 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4238 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4239
4240 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4241 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4242 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4243
4244 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4245 {
4246 if (!avail_32(dc)) {
4247 return false;
4248 }
4249 if (!supervisor(dc)) {
4250 return raise_priv(dc);
4251 }
4252 if (gen_trap_ifnofpu(dc)) {
4253 return true;
4254 }
4255 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4256 return true;
4257 }
4258
4259 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4260 target_ulong new_mask, target_ulong old_mask)
4261 {
4262 TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4263 if (addr == NULL) {
4264 return false;
4265 }
4266 if (gen_trap_ifnofpu(dc)) {
4267 return true;
4268 }
4269 tmp = tcg_temp_new();
4270 tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4271 tcg_gen_andi_tl(tmp, tmp, new_mask);
4272 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4273 tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4274 gen_helper_set_fsr(tcg_env, cpu_fsr);
4275 return advance_pc(dc);
4276 }
4277
4278 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4279 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4280
4281 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4282 {
4283 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4284 if (addr == NULL) {
4285 return false;
4286 }
4287 if (gen_trap_ifnofpu(dc)) {
4288 return true;
4289 }
4290 tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4291 return advance_pc(dc);
4292 }
4293
4294 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4295 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4296
4297 static bool do_fc(DisasContext *dc, int rd, bool c)
4298 {
4299 uint64_t mask;
4300
4301 if (gen_trap_ifnofpu(dc)) {
4302 return true;
4303 }
4304
4305 if (rd & 1) {
4306 mask = MAKE_64BIT_MASK(0, 32);
4307 } else {
4308 mask = MAKE_64BIT_MASK(32, 32);
4309 }
4310 if (c) {
4311 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4312 } else {
4313 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4314 }
4315 gen_update_fprs_dirty(dc, rd);
4316 return advance_pc(dc);
4317 }
4318
4319 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4320 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4321
4322 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4323 {
4324 if (gen_trap_ifnofpu(dc)) {
4325 return true;
4326 }
4327
4328 tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4329 gen_update_fprs_dirty(dc, rd);
4330 return advance_pc(dc);
4331 }
4332
4333 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4334 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4335
4336 static bool do_ff(DisasContext *dc, arg_r_r *a,
4337 void (*func)(TCGv_i32, TCGv_i32))
4338 {
4339 TCGv_i32 tmp;
4340
4341 if (gen_trap_ifnofpu(dc)) {
4342 return true;
4343 }
4344
4345 tmp = gen_load_fpr_F(dc, a->rs);
4346 func(tmp, tmp);
4347 gen_store_fpr_F(dc, a->rd, tmp);
4348 return advance_pc(dc);
4349 }
4350
4351 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4352 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4353 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4354 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4355 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4356
4357 static bool do_fd(DisasContext *dc, arg_r_r *a,
4358 void (*func)(TCGv_i32, TCGv_i64))
4359 {
4360 TCGv_i32 dst;
4361 TCGv_i64 src;
4362
4363 if (gen_trap_ifnofpu(dc)) {
4364 return true;
4365 }
4366
4367 dst = gen_dest_fpr_F(dc);
4368 src = gen_load_fpr_D(dc, a->rs);
4369 func(dst, src);
4370 gen_store_fpr_F(dc, a->rd, dst);
4371 return advance_pc(dc);
4372 }
4373
4374 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4375 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4376
4377 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4378 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4379 {
4380 TCGv_i32 tmp;
4381
4382 if (gen_trap_ifnofpu(dc)) {
4383 return true;
4384 }
4385
4386 gen_op_clear_ieee_excp_and_FTT();
4387 tmp = gen_load_fpr_F(dc, a->rs);
4388 func(tmp, tcg_env, tmp);
4389 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4390 gen_store_fpr_F(dc, a->rd, tmp);
4391 return advance_pc(dc);
4392 }
4393
4394 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4395 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4396 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4397
4398 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4399 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4400 {
4401 TCGv_i32 dst;
4402 TCGv_i64 src;
4403
4404 if (gen_trap_ifnofpu(dc)) {
4405 return true;
4406 }
4407
4408 gen_op_clear_ieee_excp_and_FTT();
4409 dst = gen_dest_fpr_F(dc);
4410 src = gen_load_fpr_D(dc, a->rs);
4411 func(dst, tcg_env, src);
4412 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4413 gen_store_fpr_F(dc, a->rd, dst);
4414 return advance_pc(dc);
4415 }
4416
4417 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4418 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4419 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4420
4421 static bool do_dd(DisasContext *dc, arg_r_r *a,
4422 void (*func)(TCGv_i64, TCGv_i64))
4423 {
4424 TCGv_i64 dst, src;
4425
4426 if (gen_trap_ifnofpu(dc)) {
4427 return true;
4428 }
4429
4430 dst = gen_dest_fpr_D(dc, a->rd);
4431 src = gen_load_fpr_D(dc, a->rs);
4432 func(dst, src);
4433 gen_store_fpr_D(dc, a->rd, dst);
4434 return advance_pc(dc);
4435 }
4436
4437 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4438 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4439 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4440 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4441 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4442
4443 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4444 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4445 {
4446 TCGv_i64 dst, src;
4447
4448 if (gen_trap_ifnofpu(dc)) {
4449 return true;
4450 }
4451
4452 gen_op_clear_ieee_excp_and_FTT();
4453 dst = gen_dest_fpr_D(dc, a->rd);
4454 src = gen_load_fpr_D(dc, a->rs);
4455 func(dst, tcg_env, src);
4456 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4457 gen_store_fpr_D(dc, a->rd, dst);
4458 return advance_pc(dc);
4459 }
4460
4461 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4462 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4463 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4464
4465 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4466 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4467 {
4468 TCGv_i64 dst;
4469 TCGv_i32 src;
4470
4471 if (gen_trap_ifnofpu(dc)) {
4472 return true;
4473 }
4474
4475 gen_op_clear_ieee_excp_and_FTT();
4476 dst = gen_dest_fpr_D(dc, a->rd);
4477 src = gen_load_fpr_F(dc, a->rs);
4478 func(dst, tcg_env, src);
4479 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4480 gen_store_fpr_D(dc, a->rd, dst);
4481 return advance_pc(dc);
4482 }
4483
4484 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4485 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4486 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4487
4488 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4489 {
4490 int rd, rs;
4491
4492 if (!avail_64(dc)) {
4493 return false;
4494 }
4495 if (gen_trap_ifnofpu(dc)) {
4496 return true;
4497 }
4498 if (gen_trap_float128(dc)) {
4499 return true;
4500 }
4501
4502 gen_op_clear_ieee_excp_and_FTT();
4503 rd = QFPREG(a->rd);
4504 rs = QFPREG(a->rs);
4505 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4506 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4507 gen_update_fprs_dirty(dc, rd);
4508 return advance_pc(dc);
4509 }
4510
4511 static bool do_qq(DisasContext *dc, arg_r_r *a,
4512 void (*func)(TCGv_env))
4513 {
4514 if (gen_trap_ifnofpu(dc)) {
4515 return true;
4516 }
4517 if (gen_trap_float128(dc)) {
4518 return true;
4519 }
4520
4521 gen_op_clear_ieee_excp_and_FTT();
4522 gen_op_load_fpr_QT1(QFPREG(a->rs));
4523 func(tcg_env);
4524 gen_op_store_QT0_fpr(QFPREG(a->rd));
4525 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4526 return advance_pc(dc);
4527 }
4528
4529 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4530 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4531
4532 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4533 void (*func)(TCGv_env))
4534 {
4535 if (gen_trap_ifnofpu(dc)) {
4536 return true;
4537 }
4538 if (gen_trap_float128(dc)) {
4539 return true;
4540 }
4541
4542 gen_op_clear_ieee_excp_and_FTT();
4543 gen_op_load_fpr_QT1(QFPREG(a->rs));
4544 func(tcg_env);
4545 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4546 gen_op_store_QT0_fpr(QFPREG(a->rd));
4547 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4548 return advance_pc(dc);
4549 }
4550
4551 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4552
4553 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4554 void (*func)(TCGv_i32, TCGv_env))
4555 {
4556 TCGv_i32 dst;
4557
4558 if (gen_trap_ifnofpu(dc)) {
4559 return true;
4560 }
4561 if (gen_trap_float128(dc)) {
4562 return true;
4563 }
4564
4565 gen_op_clear_ieee_excp_and_FTT();
4566 gen_op_load_fpr_QT1(QFPREG(a->rs));
4567 dst = gen_dest_fpr_F(dc);
4568 func(dst, tcg_env);
4569 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4570 gen_store_fpr_F(dc, a->rd, dst);
4571 return advance_pc(dc);
4572 }
4573
4574 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4575 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4576
4577 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4578 void (*func)(TCGv_i64, TCGv_env))
4579 {
4580 TCGv_i64 dst;
4581
4582 if (gen_trap_ifnofpu(dc)) {
4583 return true;
4584 }
4585 if (gen_trap_float128(dc)) {
4586 return true;
4587 }
4588
4589 gen_op_clear_ieee_excp_and_FTT();
4590 gen_op_load_fpr_QT1(QFPREG(a->rs));
4591 dst = gen_dest_fpr_D(dc, a->rd);
4592 func(dst, tcg_env);
4593 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4594 gen_store_fpr_D(dc, a->rd, dst);
4595 return advance_pc(dc);
4596 }
4597
4598 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4599 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4600
4601 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4602 void (*func)(TCGv_env, TCGv_i32))
4603 {
4604 TCGv_i32 src;
4605
4606 if (gen_trap_ifnofpu(dc)) {
4607 return true;
4608 }
4609 if (gen_trap_float128(dc)) {
4610 return true;
4611 }
4612
4613 gen_op_clear_ieee_excp_and_FTT();
4614 src = gen_load_fpr_F(dc, a->rs);
4615 func(tcg_env, src);
4616 gen_op_store_QT0_fpr(QFPREG(a->rd));
4617 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4618 return advance_pc(dc);
4619 }
4620
4621 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4622 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4623
4624 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4625 void (*func)(TCGv_env, TCGv_i64))
4626 {
4627 TCGv_i64 src;
4628
4629 if (gen_trap_ifnofpu(dc)) {
4630 return true;
4631 }
4632 if (gen_trap_float128(dc)) {
4633 return true;
4634 }
4635
4636 gen_op_clear_ieee_excp_and_FTT();
4637 src = gen_load_fpr_D(dc, a->rs);
4638 func(tcg_env, src);
4639 gen_op_store_QT0_fpr(QFPREG(a->rd));
4640 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4641 return advance_pc(dc);
4642 }
4643
4644 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4645 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4646
4647 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4648 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4649 {
4650 TCGv_i32 src1, src2;
4651
4652 if (gen_trap_ifnofpu(dc)) {
4653 return true;
4654 }
4655
4656 src1 = gen_load_fpr_F(dc, a->rs1);
4657 src2 = gen_load_fpr_F(dc, a->rs2);
4658 func(src1, src1, src2);
4659 gen_store_fpr_F(dc, a->rd, src1);
4660 return advance_pc(dc);
4661 }
4662
4663 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4664 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4665 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4666 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4667 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4668 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4669 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4670 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4671 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4672 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4673 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4674 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4675
4676 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4677 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4678 {
4679 TCGv_i32 src1, src2;
4680
4681 if (gen_trap_ifnofpu(dc)) {
4682 return true;
4683 }
4684
4685 gen_op_clear_ieee_excp_and_FTT();
4686 src1 = gen_load_fpr_F(dc, a->rs1);
4687 src2 = gen_load_fpr_F(dc, a->rs2);
4688 func(src1, tcg_env, src1, src2);
4689 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4690 gen_store_fpr_F(dc, a->rd, src1);
4691 return advance_pc(dc);
4692 }
4693
4694 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4695 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4696 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4697 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4698
4699 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4700 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4701 {
4702 TCGv_i64 dst, src1, src2;
4703
4704 if (gen_trap_ifnofpu(dc)) {
4705 return true;
4706 }
4707
4708 dst = gen_dest_fpr_D(dc, a->rd);
4709 src1 = gen_load_fpr_D(dc, a->rs1);
4710 src2 = gen_load_fpr_D(dc, a->rs2);
4711 func(dst, src1, src2);
4712 gen_store_fpr_D(dc, a->rd, dst);
4713 return advance_pc(dc);
4714 }
4715
4716 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4717 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4718 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4719 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4720 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4721 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4722 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4723 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4724 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4725
4726 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4727 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4728 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4729 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4730 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4731 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4732 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4733 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4734 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4735 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4736 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4737 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4738
4739 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4740 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4741 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4742
4743 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4744 void (*func)(TCGv, TCGv_i64, TCGv_i64))
4745 {
4746 TCGv_i64 src1, src2;
4747 TCGv dst;
4748
4749 if (gen_trap_ifnofpu(dc)) {
4750 return true;
4751 }
4752
4753 dst = gen_dest_gpr(dc, a->rd);
4754 src1 = gen_load_fpr_D(dc, a->rs1);
4755 src2 = gen_load_fpr_D(dc, a->rs2);
4756 func(dst, src1, src2);
4757 gen_store_gpr(dc, a->rd, dst);
4758 return advance_pc(dc);
4759 }
4760
4761 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4762 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4763 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4764 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4765
4766 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4767 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4768 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4769 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4770
4771 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4772 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4773 {
4774 TCGv_i64 dst, src1, src2;
4775
4776 if (gen_trap_ifnofpu(dc)) {
4777 return true;
4778 }
4779
4780 gen_op_clear_ieee_excp_and_FTT();
4781 dst = gen_dest_fpr_D(dc, a->rd);
4782 src1 = gen_load_fpr_D(dc, a->rs1);
4783 src2 = gen_load_fpr_D(dc, a->rs2);
4784 func(dst, tcg_env, src1, src2);
4785 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4786 gen_store_fpr_D(dc, a->rd, dst);
4787 return advance_pc(dc);
4788 }
4789
4790 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4791 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4792 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4793 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4794
4795 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4796 {
4797 TCGv_i64 dst;
4798 TCGv_i32 src1, src2;
4799
4800 if (gen_trap_ifnofpu(dc)) {
4801 return true;
4802 }
4803 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4804 return raise_unimpfpop(dc);
4805 }
4806
4807 gen_op_clear_ieee_excp_and_FTT();
4808 dst = gen_dest_fpr_D(dc, a->rd);
4809 src1 = gen_load_fpr_F(dc, a->rs1);
4810 src2 = gen_load_fpr_F(dc, a->rs2);
4811 gen_helper_fsmuld(dst, tcg_env, src1, src2);
4812 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4813 gen_store_fpr_D(dc, a->rd, dst);
4814 return advance_pc(dc);
4815 }
4816
4817 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4818 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4819 {
4820 TCGv_i64 dst, src0, src1, src2;
4821
4822 if (gen_trap_ifnofpu(dc)) {
4823 return true;
4824 }
4825
4826 dst = gen_dest_fpr_D(dc, a->rd);
4827 src0 = gen_load_fpr_D(dc, a->rd);
4828 src1 = gen_load_fpr_D(dc, a->rs1);
4829 src2 = gen_load_fpr_D(dc, a->rs2);
4830 func(dst, src0, src1, src2);
4831 gen_store_fpr_D(dc, a->rd, dst);
4832 return advance_pc(dc);
4833 }
4834
4835 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4836
4837 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4838 void (*func)(TCGv_env))
4839 {
4840 if (gen_trap_ifnofpu(dc)) {
4841 return true;
4842 }
4843 if (gen_trap_float128(dc)) {
4844 return true;
4845 }
4846
4847 gen_op_clear_ieee_excp_and_FTT();
4848 gen_op_load_fpr_QT0(QFPREG(a->rs1));
4849 gen_op_load_fpr_QT1(QFPREG(a->rs2));
4850 func(tcg_env);
4851 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4852 gen_op_store_QT0_fpr(QFPREG(a->rd));
4853 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4854 return advance_pc(dc);
4855 }
4856
4857 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4858 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4859 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4860 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4861
4862 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4863 {
4864 TCGv_i64 src1, src2;
4865
4866 if (gen_trap_ifnofpu(dc)) {
4867 return true;
4868 }
4869 if (gen_trap_float128(dc)) {
4870 return true;
4871 }
4872
4873 gen_op_clear_ieee_excp_and_FTT();
4874 src1 = gen_load_fpr_D(dc, a->rs1);
4875 src2 = gen_load_fpr_D(dc, a->rs2);
4876 gen_helper_fdmulq(tcg_env, src1, src2);
4877 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4878 gen_op_store_QT0_fpr(QFPREG(a->rd));
4879 gen_update_fprs_dirty(dc, QFPREG(a->rd));
4880 return advance_pc(dc);
4881 }
4882
4883 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4884 void (*func)(DisasContext *, DisasCompare *, int, int))
4885 {
4886 DisasCompare cmp;
4887
4888 if (gen_trap_ifnofpu(dc)) {
4889 return true;
4890 }
4891 if (is_128 && gen_trap_float128(dc)) {
4892 return true;
4893 }
4894
4895 gen_op_clear_ieee_excp_and_FTT();
4896 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4897 func(dc, &cmp, a->rd, a->rs2);
4898 return advance_pc(dc);
4899 }
4900
4901 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4902 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4903 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4904
4905 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4906 void (*func)(DisasContext *, DisasCompare *, int, int))
4907 {
4908 DisasCompare cmp;
4909
4910 if (gen_trap_ifnofpu(dc)) {
4911 return true;
4912 }
4913 if (is_128 && gen_trap_float128(dc)) {
4914 return true;
4915 }
4916
4917 gen_op_clear_ieee_excp_and_FTT();
4918 gen_compare(&cmp, a->cc, a->cond, dc);
4919 func(dc, &cmp, a->rd, a->rs2);
4920 return advance_pc(dc);
4921 }
4922
4923 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4924 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4925 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4926
4927 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4928 void (*func)(DisasContext *, DisasCompare *, int, int))
4929 {
4930 DisasCompare cmp;
4931
4932 if (gen_trap_ifnofpu(dc)) {
4933 return true;
4934 }
4935 if (is_128 && gen_trap_float128(dc)) {
4936 return true;
4937 }
4938
4939 gen_op_clear_ieee_excp_and_FTT();
4940 gen_fcompare(&cmp, a->cc, a->cond);
4941 func(dc, &cmp, a->rd, a->rs2);
4942 return advance_pc(dc);
4943 }
4944
4945 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4946 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4947 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4948
4949 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4950 {
4951 TCGv_i32 src1, src2;
4952
4953 if (avail_32(dc) && a->cc != 0) {
4954 return false;
4955 }
4956 if (gen_trap_ifnofpu(dc)) {
4957 return true;
4958 }
4959
4960 gen_op_clear_ieee_excp_and_FTT();
4961 src1 = gen_load_fpr_F(dc, a->rs1);
4962 src2 = gen_load_fpr_F(dc, a->rs2);
4963 if (e) {
4964 gen_op_fcmpes(a->cc, src1, src2);
4965 } else {
4966 gen_op_fcmps(a->cc, src1, src2);
4967 }
4968 return advance_pc(dc);
4969 }
4970
4971 TRANS(FCMPs, ALL, do_fcmps, a, false)
4972 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4973
4974 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4975 {
4976 TCGv_i64 src1, src2;
4977
4978 if (avail_32(dc) && a->cc != 0) {
4979 return false;
4980 }
4981 if (gen_trap_ifnofpu(dc)) {
4982 return true;
4983 }
4984
4985 gen_op_clear_ieee_excp_and_FTT();
4986 src1 = gen_load_fpr_D(dc, a->rs1);
4987 src2 = gen_load_fpr_D(dc, a->rs2);
4988 if (e) {
4989 gen_op_fcmped(a->cc, src1, src2);
4990 } else {
4991 gen_op_fcmpd(a->cc, src1, src2);
4992 }
4993 return advance_pc(dc);
4994 }
4995
4996 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4997 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4998
4999 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5000 {
5001 if (avail_32(dc) && a->cc != 0) {
5002 return false;
5003 }
5004 if (gen_trap_ifnofpu(dc)) {
5005 return true;
5006 }
5007 if (gen_trap_float128(dc)) {
5008 return true;
5009 }
5010
5011 gen_op_clear_ieee_excp_and_FTT();
5012 gen_op_load_fpr_QT0(QFPREG(a->rs1));
5013 gen_op_load_fpr_QT1(QFPREG(a->rs2));
5014 if (e) {
5015 gen_op_fcmpeq(a->cc);
5016 } else {
5017 gen_op_fcmpq(a->cc);
5018 }
5019 return advance_pc(dc);
5020 }
5021
5022 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5023 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5024
5025 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5026 {
5027 DisasContext *dc = container_of(dcbase, DisasContext, base);
5028 CPUSPARCState *env = cpu_env(cs);
5029 int bound;
5030
5031 dc->pc = dc->base.pc_first;
5032 dc->npc = (target_ulong)dc->base.tb->cs_base;
5033 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5034 dc->def = &env->def;
5035 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5036 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5037 #ifndef CONFIG_USER_ONLY
5038 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5039 #endif
5040 #ifdef TARGET_SPARC64
5041 dc->fprs_dirty = 0;
5042 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5043 #ifndef CONFIG_USER_ONLY
5044 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5045 #endif
5046 #endif
5047 /*
5048 * if we reach a page boundary, we stop generation so that the
5049 * PC of a TT_TFAULT exception is always in the right page
5050 */
5051 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5052 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5053 }
5054
5055 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5056 {
5057 }
5058
5059 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5060 {
5061 DisasContext *dc = container_of(dcbase, DisasContext, base);
5062 target_ulong npc = dc->npc;
5063
5064 if (npc & 3) {
5065 switch (npc) {
5066 case JUMP_PC:
5067 assert(dc->jump_pc[1] == dc->pc + 4);
5068 npc = dc->jump_pc[0] | JUMP_PC;
5069 break;
5070 case DYNAMIC_PC:
5071 case DYNAMIC_PC_LOOKUP:
5072 npc = DYNAMIC_PC;
5073 break;
5074 default:
5075 g_assert_not_reached();
5076 }
5077 }
5078 tcg_gen_insn_start(dc->pc, npc);
5079 }
5080
5081 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5082 {
5083 DisasContext *dc = container_of(dcbase, DisasContext, base);
5084 CPUSPARCState *env = cpu_env(cs);
5085 unsigned int insn;
5086
5087 insn = translator_ldl(env, &dc->base, dc->pc);
5088 dc->base.pc_next += 4;
5089
5090 if (!decode(dc, insn)) {
5091 gen_exception(dc, TT_ILL_INSN);
5092 }
5093
5094 if (dc->base.is_jmp == DISAS_NORETURN) {
5095 return;
5096 }
5097 if (dc->pc != dc->base.pc_next) {
5098 dc->base.is_jmp = DISAS_TOO_MANY;
5099 }
5100 }
5101
5102 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5103 {
5104 DisasContext *dc = container_of(dcbase, DisasContext, base);
5105 DisasDelayException *e, *e_next;
5106 bool may_lookup;
5107
5108 finishing_insn(dc);
5109
5110 switch (dc->base.is_jmp) {
5111 case DISAS_NEXT:
5112 case DISAS_TOO_MANY:
5113 if (((dc->pc | dc->npc) & 3) == 0) {
5114 /* static PC and NPC: we can use direct chaining */
5115 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5116 break;
5117 }
5118
5119 may_lookup = true;
5120 if (dc->pc & 3) {
5121 switch (dc->pc) {
5122 case DYNAMIC_PC_LOOKUP:
5123 break;
5124 case DYNAMIC_PC:
5125 may_lookup = false;
5126 break;
5127 default:
5128 g_assert_not_reached();
5129 }
5130 } else {
5131 tcg_gen_movi_tl(cpu_pc, dc->pc);
5132 }
5133
5134 if (dc->npc & 3) {
5135 switch (dc->npc) {
5136 case JUMP_PC:
5137 gen_generic_branch(dc);
5138 break;
5139 case DYNAMIC_PC:
5140 may_lookup = false;
5141 break;
5142 case DYNAMIC_PC_LOOKUP:
5143 break;
5144 default:
5145 g_assert_not_reached();
5146 }
5147 } else {
5148 tcg_gen_movi_tl(cpu_npc, dc->npc);
5149 }
5150 if (may_lookup) {
5151 tcg_gen_lookup_and_goto_ptr();
5152 } else {
5153 tcg_gen_exit_tb(NULL, 0);
5154 }
5155 break;
5156
5157 case DISAS_NORETURN:
5158 break;
5159
5160 case DISAS_EXIT:
5161 /* Exit TB */
5162 save_state(dc);
5163 tcg_gen_exit_tb(NULL, 0);
5164 break;
5165
5166 default:
5167 g_assert_not_reached();
5168 }
5169
5170 for (e = dc->delay_excp_list; e ; e = e_next) {
5171 gen_set_label(e->lab);
5172
5173 tcg_gen_movi_tl(cpu_pc, e->pc);
5174 if (e->npc % 4 == 0) {
5175 tcg_gen_movi_tl(cpu_npc, e->npc);
5176 }
5177 gen_helper_raise_exception(tcg_env, e->excp);
5178
5179 e_next = e->next;
5180 g_free(e);
5181 }
5182 }
5183
5184 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5185 CPUState *cpu, FILE *logfile)
5186 {
5187 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5188 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5189 }
5190
5191 static const TranslatorOps sparc_tr_ops = {
5192 .init_disas_context = sparc_tr_init_disas_context,
5193 .tb_start = sparc_tr_tb_start,
5194 .insn_start = sparc_tr_insn_start,
5195 .translate_insn = sparc_tr_translate_insn,
5196 .tb_stop = sparc_tr_tb_stop,
5197 .disas_log = sparc_tr_disas_log,
5198 };
5199
5200 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5201 target_ulong pc, void *host_pc)
5202 {
5203 DisasContext dc = {};
5204
5205 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5206 }
5207
5208 void sparc_tcg_init(void)
5209 {
5210 static const char gregnames[32][4] = {
5211 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5212 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5213 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5214 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5215 };
5216 static const char fregnames[32][4] = {
5217 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5218 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5219 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5220 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5221 };
5222
5223 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5224 #ifdef TARGET_SPARC64
5225 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5226 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5227 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5228 #endif
5229 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5230 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5231 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5232 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5233 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5234 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5235 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5236 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5237 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5238 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5239 };
5240
5241 unsigned int i;
5242
5243 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5244 offsetof(CPUSPARCState, regwptr),
5245 "regwptr");
5246
5247 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5248 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5249 }
5250
5251 cpu_regs[0] = NULL;
5252 for (i = 1; i < 8; ++i) {
5253 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5254 offsetof(CPUSPARCState, gregs[i]),
5255 gregnames[i]);
5256 }
5257
5258 for (i = 8; i < 32; ++i) {
5259 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5260 (i - 8) * sizeof(target_ulong),
5261 gregnames[i]);
5262 }
5263
5264 for (i = 0; i < TARGET_DPREGS; i++) {
5265 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5266 offsetof(CPUSPARCState, fpr[i]),
5267 fregnames[i]);
5268 }
5269
5270 #ifdef TARGET_SPARC64
5271 cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5272 offsetof(CPUSPARCState, fprs), "fprs");
5273 #endif
5274 }
5275
5276 void sparc_restore_state_to_opc(CPUState *cs,
5277 const TranslationBlock *tb,
5278 const uint64_t *data)
5279 {
5280 SPARCCPU *cpu = SPARC_CPU(cs);
5281 CPUSPARCState *env = &cpu->env;
5282 target_ulong pc = data[0];
5283 target_ulong npc = data[1];
5284
5285 env->pc = pc;
5286 if (npc == DYNAMIC_PC) {
5287 /* dynamic NPC: already stored */
5288 } else if (npc & JUMP_PC) {
5289 /* jump PC: use 'cond' and the jump targets of the translation */
5290 if (env->cond) {
5291 env->npc = npc & ~3;
5292 } else {
5293 env->npc = pc + 4;
5294 }
5295 } else {
5296 env->npc = npc;
5297 }
5298 }