2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "tricore-opcodes.h"
40 static TCGv cpu_gpr_a
[16];
41 static TCGv cpu_gpr_d
[16];
43 static TCGv cpu_PSW_C
;
44 static TCGv cpu_PSW_V
;
45 static TCGv cpu_PSW_SV
;
46 static TCGv cpu_PSW_AV
;
47 static TCGv cpu_PSW_SAV
;
49 static TCGv_ptr cpu_env
;
51 #include "exec/gen-icount.h"
53 static const char *regnames_a
[] = {
54 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
55 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
56 "a12" , "a13" , "a14" , "a15",
59 static const char *regnames_d
[] = {
60 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
61 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
62 "d12" , "d13" , "d14" , "d15",
65 typedef struct DisasContext
{
66 struct TranslationBlock
*tb
;
67 target_ulong pc
, saved_pc
, next_pc
;
69 int singlestep_enabled
;
70 /* Routine used to access memory */
72 uint32_t hflags
, saved_hflags
;
91 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
92 fprintf_function cpu_fprintf
, int flags
)
94 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
95 CPUTriCoreState
*env
= &cpu
->env
;
101 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
102 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
103 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
104 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
105 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
106 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
108 for (i
= 0; i
< 16; ++i
) {
110 cpu_fprintf(f
, "\nGPR A%02d:", i
);
112 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
114 for (i
= 0; i
< 16; ++i
) {
116 cpu_fprintf(f
, "\nGPR D%02d:", i
);
118 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
120 cpu_fprintf(f
, "\n");
124 * Functions to generate micro-ops
127 /* Makros for generating helpers */
129 #define gen_helper_1arg(name, arg) do { \
130 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
131 gen_helper_##name(cpu_env, helper_tmp); \
132 tcg_temp_free_i32(helper_tmp); \
135 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
136 TCGv arg00 = tcg_temp_new(); \
137 TCGv arg01 = tcg_temp_new(); \
138 TCGv arg11 = tcg_temp_new(); \
139 tcg_gen_sari_tl(arg00, arg0, 16); \
140 tcg_gen_ext16s_tl(arg01, arg0); \
141 tcg_gen_ext16s_tl(arg11, arg1); \
142 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
143 tcg_temp_free(arg00); \
144 tcg_temp_free(arg01); \
145 tcg_temp_free(arg11); \
148 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
149 TCGv arg00 = tcg_temp_new(); \
150 TCGv arg01 = tcg_temp_new(); \
151 TCGv arg10 = tcg_temp_new(); \
152 TCGv arg11 = tcg_temp_new(); \
153 tcg_gen_sari_tl(arg00, arg0, 16); \
154 tcg_gen_ext16s_tl(arg01, arg0); \
155 tcg_gen_sari_tl(arg11, arg1, 16); \
156 tcg_gen_ext16s_tl(arg10, arg1); \
157 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
158 tcg_temp_free(arg00); \
159 tcg_temp_free(arg01); \
160 tcg_temp_free(arg10); \
161 tcg_temp_free(arg11); \
164 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
165 TCGv arg00 = tcg_temp_new(); \
166 TCGv arg01 = tcg_temp_new(); \
167 TCGv arg10 = tcg_temp_new(); \
168 TCGv arg11 = tcg_temp_new(); \
169 tcg_gen_sari_tl(arg00, arg0, 16); \
170 tcg_gen_ext16s_tl(arg01, arg0); \
171 tcg_gen_sari_tl(arg10, arg1, 16); \
172 tcg_gen_ext16s_tl(arg11, arg1); \
173 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
174 tcg_temp_free(arg00); \
175 tcg_temp_free(arg01); \
176 tcg_temp_free(arg10); \
177 tcg_temp_free(arg11); \
180 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
181 TCGv arg00 = tcg_temp_new(); \
182 TCGv arg01 = tcg_temp_new(); \
183 TCGv arg11 = tcg_temp_new(); \
184 tcg_gen_sari_tl(arg01, arg0, 16); \
185 tcg_gen_ext16s_tl(arg00, arg0); \
186 tcg_gen_sari_tl(arg11, arg1, 16); \
187 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
188 tcg_temp_free(arg00); \
189 tcg_temp_free(arg01); \
190 tcg_temp_free(arg11); \
193 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
194 TCGv_i64 ret = tcg_temp_new_i64(); \
195 TCGv_i64 arg1 = tcg_temp_new_i64(); \
197 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
198 gen_helper_##name(ret, arg1, arg2); \
199 tcg_gen_extr_i64_i32(rl, rh, ret); \
201 tcg_temp_free_i64(ret); \
202 tcg_temp_free_i64(arg1); \
205 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
206 TCGv_i64 ret = tcg_temp_new_i64(); \
208 gen_helper_##name(ret, cpu_env, arg1, arg2); \
209 tcg_gen_extr_i64_i32(rl, rh, ret); \
211 tcg_temp_free_i64(ret); \
214 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
215 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
216 ((offset & 0x0fffff) << 1))
218 /* Functions for load/save to/from memory */
220 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
221 int16_t con
, TCGMemOp mop
)
223 TCGv temp
= tcg_temp_new();
224 tcg_gen_addi_tl(temp
, r2
, con
);
225 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
229 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
230 int16_t con
, TCGMemOp mop
)
232 TCGv temp
= tcg_temp_new();
233 tcg_gen_addi_tl(temp
, r2
, con
);
234 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
238 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
240 TCGv_i64 temp
= tcg_temp_new_i64();
242 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
243 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
245 tcg_temp_free_i64(temp
);
248 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
251 TCGv temp
= tcg_temp_new();
252 tcg_gen_addi_tl(temp
, base
, con
);
253 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
257 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
259 TCGv_i64 temp
= tcg_temp_new_i64();
261 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
262 /* write back to two 32 bit regs */
263 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
265 tcg_temp_free_i64(temp
);
268 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
271 TCGv temp
= tcg_temp_new();
272 tcg_gen_addi_tl(temp
, base
, con
);
273 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
277 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
290 TCGv temp
= tcg_temp_new();
291 tcg_gen_addi_tl(temp
, r2
, off
);
292 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
293 tcg_gen_mov_tl(r2
, temp
);
297 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
298 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
300 TCGv temp
= tcg_temp_new();
301 TCGv temp2
= tcg_temp_new();
303 /* temp = (M(EA, word) */
304 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 /* temp = temp & ~E[a][63:32]) */
306 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
307 /* temp2 = (E[a][31:0] & E[a][63:32]); */
308 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
309 /* temp = temp | temp2; */
310 tcg_gen_or_tl(temp
, temp
, temp2
);
311 /* M(EA, word) = temp; */
312 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
315 tcg_temp_free(temp2
);
318 /* tmp = M(EA, word);
321 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
323 TCGv temp
= tcg_temp_new();
325 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
326 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
327 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
332 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
334 TCGv temp
= tcg_temp_new();
335 TCGv temp2
= tcg_temp_new();
336 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
337 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
338 cpu_gpr_d
[reg
], temp
);
339 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
340 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
343 tcg_temp_free(temp2
);
346 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
348 TCGv temp
= tcg_temp_new();
349 TCGv temp2
= tcg_temp_new();
350 TCGv temp3
= tcg_temp_new();
352 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
353 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
354 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
355 tcg_gen_or_tl(temp2
, temp2
, temp3
);
356 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
357 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
360 tcg_temp_free(temp2
);
361 tcg_temp_free(temp3
);
365 /* We generate loads and store to core special function register (csfr) through
366 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
367 makros R, A and E, which allow read-only, all and endinit protected access.
368 These makros also specify in which ISA version the csfr was introduced. */
369 #define R(ADDRESS, REG, FEATURE) \
371 if (tricore_feature(env, FEATURE)) { \
372 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
375 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
376 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
377 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
379 /* since we're caching PSW make this a special case */
380 if (offset
== 0xfe04) {
381 gen_helper_psw_read(ret
, cpu_env
);
392 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
393 since no execption occurs */
394 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
396 if (tricore_feature(env, FEATURE)) { \
397 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
400 /* Endinit protected registers
401 TODO: Since the endinit bit is in a register of a not yet implemented
402 watchdog device, we handle endinit protected registers like
403 all-access registers for now. */
404 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
405 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
408 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
409 /* since we're caching PSW make this a special case */
410 if (offset
== 0xfe04) {
411 gen_helper_psw_write(cpu_env
, r1
);
418 /* generate privilege trap */
422 /* Functions for arithmetic instructions */
424 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
426 TCGv t0
= tcg_temp_new_i32();
427 TCGv result
= tcg_temp_new_i32();
428 /* Addition and set V/SV bits */
429 tcg_gen_add_tl(result
, r1
, r2
);
431 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
432 tcg_gen_xor_tl(t0
, r1
, r2
);
433 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
435 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
436 /* Calc AV/SAV bits */
437 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
438 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
440 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
441 /* write back result */
442 tcg_gen_mov_tl(ret
, result
);
444 tcg_temp_free(result
);
449 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
451 TCGv temp
= tcg_temp_new();
452 TCGv_i64 t0
= tcg_temp_new_i64();
453 TCGv_i64 t1
= tcg_temp_new_i64();
454 TCGv_i64 result
= tcg_temp_new_i64();
456 tcg_gen_add_i64(result
, r1
, r2
);
458 tcg_gen_xor_i64(t1
, result
, r1
);
459 tcg_gen_xor_i64(t0
, r1
, r2
);
460 tcg_gen_andc_i64(t1
, t1
, t0
);
461 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
463 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
464 /* calc AV/SAV bits */
465 tcg_gen_extrh_i64_i32(temp
, result
);
466 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
467 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
469 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
470 /* write back result */
471 tcg_gen_mov_i64(ret
, result
);
474 tcg_temp_free_i64(result
);
475 tcg_temp_free_i64(t0
);
476 tcg_temp_free_i64(t1
);
480 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
481 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
482 void(*op2
)(TCGv
, TCGv
, TCGv
))
484 TCGv temp
= tcg_temp_new();
485 TCGv temp2
= tcg_temp_new();
486 TCGv temp3
= tcg_temp_new();
487 TCGv temp4
= tcg_temp_new();
489 (*op1
)(temp
, r1_low
, r2
);
491 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
492 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
493 if (op1
== tcg_gen_add_tl
) {
494 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
496 tcg_gen_and_tl(temp2
, temp2
, temp3
);
499 (*op2
)(temp3
, r1_high
, r3
);
501 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
502 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
503 if (op2
== tcg_gen_add_tl
) {
504 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
506 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
508 /* combine V0/V1 bits */
509 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
513 tcg_gen_mov_tl(ret_low
, temp
);
514 tcg_gen_mov_tl(ret_high
, temp3
);
516 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
517 tcg_gen_xor_tl(temp
, temp
, ret_low
);
518 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
519 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
520 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
522 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
525 tcg_temp_free(temp2
);
526 tcg_temp_free(temp3
);
527 tcg_temp_free(temp4
);
530 /* ret = r2 + (r1 * r3); */
531 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
533 TCGv_i64 t1
= tcg_temp_new_i64();
534 TCGv_i64 t2
= tcg_temp_new_i64();
535 TCGv_i64 t3
= tcg_temp_new_i64();
537 tcg_gen_ext_i32_i64(t1
, r1
);
538 tcg_gen_ext_i32_i64(t2
, r2
);
539 tcg_gen_ext_i32_i64(t3
, r3
);
541 tcg_gen_mul_i64(t1
, t1
, t3
);
542 tcg_gen_add_i64(t1
, t2
, t1
);
544 tcg_gen_extrl_i64_i32(ret
, t1
);
547 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
548 /* t1 < -0x80000000 */
549 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
550 tcg_gen_or_i64(t2
, t2
, t3
);
551 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
552 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
554 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
555 /* Calc AV/SAV bits */
556 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
557 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
559 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
561 tcg_temp_free_i64(t1
);
562 tcg_temp_free_i64(t2
);
563 tcg_temp_free_i64(t3
);
566 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
568 TCGv temp
= tcg_const_i32(con
);
569 gen_madd32_d(ret
, r1
, r2
, temp
);
574 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
577 TCGv t1
= tcg_temp_new();
578 TCGv t2
= tcg_temp_new();
579 TCGv t3
= tcg_temp_new();
580 TCGv t4
= tcg_temp_new();
582 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
583 /* only the add can overflow */
584 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
586 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
587 tcg_gen_xor_tl(t1
, r2_high
, t2
);
588 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
590 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
591 /* Calc AV/SAV bits */
592 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
593 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
595 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
596 /* write back the result */
597 tcg_gen_mov_tl(ret_low
, t3
);
598 tcg_gen_mov_tl(ret_high
, t4
);
607 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
610 TCGv_i64 t1
= tcg_temp_new_i64();
611 TCGv_i64 t2
= tcg_temp_new_i64();
612 TCGv_i64 t3
= tcg_temp_new_i64();
614 tcg_gen_extu_i32_i64(t1
, r1
);
615 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
616 tcg_gen_extu_i32_i64(t3
, r3
);
618 tcg_gen_mul_i64(t1
, t1
, t3
);
619 tcg_gen_add_i64(t2
, t2
, t1
);
620 /* write back result */
621 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
622 /* only the add overflows, if t2 < t1
624 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
625 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
626 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
628 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
629 /* Calc AV/SAV bits */
630 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
631 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
633 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
635 tcg_temp_free_i64(t1
);
636 tcg_temp_free_i64(t2
);
637 tcg_temp_free_i64(t3
);
641 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
644 TCGv temp
= tcg_const_i32(con
);
645 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
650 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
653 TCGv temp
= tcg_const_i32(con
);
654 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
659 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
660 TCGv r3
, uint32_t n
, uint32_t mode
)
662 TCGv temp
= tcg_const_i32(n
);
663 TCGv temp2
= tcg_temp_new();
664 TCGv_i64 temp64
= tcg_temp_new_i64();
667 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
670 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
673 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
676 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
679 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
680 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
681 tcg_gen_add_tl
, tcg_gen_add_tl
);
683 tcg_temp_free(temp2
);
684 tcg_temp_free_i64(temp64
);
688 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
689 TCGv r3
, uint32_t n
, uint32_t mode
)
691 TCGv temp
= tcg_const_i32(n
);
692 TCGv temp2
= tcg_temp_new();
693 TCGv_i64 temp64
= tcg_temp_new_i64();
696 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
699 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
702 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
705 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
708 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
709 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
710 tcg_gen_sub_tl
, tcg_gen_add_tl
);
712 tcg_temp_free(temp2
);
713 tcg_temp_free_i64(temp64
);
717 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
718 TCGv r3
, uint32_t n
, uint32_t mode
)
720 TCGv temp
= tcg_const_i32(n
);
721 TCGv_i64 temp64
= tcg_temp_new_i64();
722 TCGv_i64 temp64_2
= tcg_temp_new_i64();
723 TCGv_i64 temp64_3
= tcg_temp_new_i64();
726 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
729 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
732 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
735 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
738 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
739 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
740 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
741 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
742 tcg_gen_shli_i64(temp64
, temp64
, 16);
744 gen_add64_d(temp64_2
, temp64_3
, temp64
);
745 /* write back result */
746 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
749 tcg_temp_free_i64(temp64
);
750 tcg_temp_free_i64(temp64_2
);
751 tcg_temp_free_i64(temp64_3
);
754 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
757 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
758 TCGv r3
, uint32_t n
, uint32_t mode
)
760 TCGv temp
= tcg_const_i32(n
);
761 TCGv temp2
= tcg_temp_new();
762 TCGv temp3
= tcg_temp_new();
763 TCGv_i64 temp64
= tcg_temp_new_i64();
767 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
770 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
773 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
776 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
779 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
780 gen_adds(ret_low
, r1_low
, temp
);
781 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
782 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
783 gen_adds(ret_high
, r1_high
, temp2
);
785 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
786 /* combine av bits */
787 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
790 tcg_temp_free(temp2
);
791 tcg_temp_free(temp3
);
792 tcg_temp_free_i64(temp64
);
796 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
799 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
800 TCGv r3
, uint32_t n
, uint32_t mode
)
802 TCGv temp
= tcg_const_i32(n
);
803 TCGv temp2
= tcg_temp_new();
804 TCGv temp3
= tcg_temp_new();
805 TCGv_i64 temp64
= tcg_temp_new_i64();
809 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
812 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
815 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
818 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
821 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
822 gen_subs(ret_low
, r1_low
, temp
);
823 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
824 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
825 gen_adds(ret_high
, r1_high
, temp2
);
827 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
828 /* combine av bits */
829 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
832 tcg_temp_free(temp2
);
833 tcg_temp_free(temp3
);
834 tcg_temp_free_i64(temp64
);
839 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
840 TCGv r3
, uint32_t n
, uint32_t mode
)
842 TCGv temp
= tcg_const_i32(n
);
843 TCGv_i64 temp64
= tcg_temp_new_i64();
844 TCGv_i64 temp64_2
= tcg_temp_new_i64();
848 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
851 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
854 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
857 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
860 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
861 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
862 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
863 tcg_gen_shli_i64(temp64
, temp64
, 16);
864 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
866 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
867 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
870 tcg_temp_free_i64(temp64
);
871 tcg_temp_free_i64(temp64_2
);
876 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
877 TCGv r3
, uint32_t n
, uint32_t mode
)
879 TCGv temp
= tcg_const_i32(n
);
880 TCGv_i64 temp64
= tcg_temp_new_i64();
881 TCGv_i64 temp64_2
= tcg_temp_new_i64();
882 TCGv_i64 temp64_3
= tcg_temp_new_i64();
885 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
888 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
891 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
894 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
897 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
898 gen_add64_d(temp64_3
, temp64_2
, temp64
);
899 /* write back result */
900 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
903 tcg_temp_free_i64(temp64
);
904 tcg_temp_free_i64(temp64_2
);
905 tcg_temp_free_i64(temp64_3
);
909 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
910 TCGv r3
, uint32_t n
, uint32_t mode
)
912 TCGv temp
= tcg_const_i32(n
);
913 TCGv_i64 temp64
= tcg_temp_new_i64();
914 TCGv_i64 temp64_2
= tcg_temp_new_i64();
917 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
920 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
923 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
926 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
929 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
930 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
931 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
934 tcg_temp_free_i64(temp64
);
935 tcg_temp_free_i64(temp64_2
);
939 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
942 TCGv temp
= tcg_const_i32(n
);
943 TCGv_i64 temp64
= tcg_temp_new_i64();
946 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
949 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
952 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
955 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
958 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
961 tcg_temp_free_i64(temp64
);
965 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
967 TCGv temp
= tcg_temp_new();
968 TCGv temp2
= tcg_temp_new();
970 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
971 tcg_gen_shli_tl(temp
, r1
, 16);
972 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
975 tcg_temp_free(temp2
);
979 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
981 TCGv temp
= tcg_const_i32(n
);
982 TCGv temp2
= tcg_temp_new();
983 TCGv_i64 temp64
= tcg_temp_new_i64();
986 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
989 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
992 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
995 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
998 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
999 tcg_gen_shli_tl(temp
, r1
, 16);
1000 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
1002 tcg_temp_free(temp
);
1003 tcg_temp_free(temp2
);
1004 tcg_temp_free_i64(temp64
);
1009 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1010 uint32_t n
, uint32_t mode
)
1012 TCGv temp
= tcg_const_i32(n
);
1013 TCGv_i64 temp64
= tcg_temp_new_i64();
1016 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1019 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1022 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1025 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1028 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1030 tcg_temp_free(temp
);
1031 tcg_temp_free_i64(temp64
);
1035 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1037 TCGv temp
= tcg_temp_new();
1038 TCGv temp2
= tcg_temp_new();
1040 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1041 tcg_gen_shli_tl(temp
, r1
, 16);
1042 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1044 tcg_temp_free(temp
);
1045 tcg_temp_free(temp2
);
1049 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1051 TCGv temp
= tcg_const_i32(n
);
1052 TCGv temp2
= tcg_temp_new();
1053 TCGv_i64 temp64
= tcg_temp_new_i64();
1056 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1059 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1062 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1065 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1068 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1069 tcg_gen_shli_tl(temp
, r1
, 16);
1070 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1072 tcg_temp_free(temp
);
1073 tcg_temp_free(temp2
);
1074 tcg_temp_free_i64(temp64
);
1078 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1080 TCGv temp
= tcg_const_i32(n
);
1081 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1082 tcg_temp_free(temp
);
1086 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1088 TCGv temp
= tcg_const_i32(n
);
1089 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1090 tcg_temp_free(temp
);
1094 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1095 uint32_t up_shift
, CPUTriCoreState
*env
)
1097 TCGv temp
= tcg_temp_new();
1098 TCGv temp2
= tcg_temp_new();
1099 TCGv temp3
= tcg_temp_new();
1100 TCGv_i64 t1
= tcg_temp_new_i64();
1101 TCGv_i64 t2
= tcg_temp_new_i64();
1102 TCGv_i64 t3
= tcg_temp_new_i64();
1104 tcg_gen_ext_i32_i64(t2
, arg2
);
1105 tcg_gen_ext_i32_i64(t3
, arg3
);
1107 tcg_gen_mul_i64(t2
, t2
, t3
);
1108 tcg_gen_shli_i64(t2
, t2
, n
);
1110 tcg_gen_ext_i32_i64(t1
, arg1
);
1111 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1113 tcg_gen_add_i64(t3
, t1
, t2
);
1114 tcg_gen_extrl_i64_i32(temp3
, t3
);
1116 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1117 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1118 tcg_gen_or_i64(t1
, t1
, t2
);
1119 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1120 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1121 /* We produce an overflow on the host if the mul before was
1122 (0x80000000 * 0x80000000) << 1). If this is the
1123 case, we negate the ovf. */
1125 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1126 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1127 tcg_gen_and_tl(temp
, temp
, temp2
);
1128 tcg_gen_shli_tl(temp
, temp
, 31);
1129 /* negate v bit, if special condition */
1130 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1133 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1134 /* Calc AV/SAV bits */
1135 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1136 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1138 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1139 /* write back result */
1140 tcg_gen_mov_tl(ret
, temp3
);
1142 tcg_temp_free(temp
);
1143 tcg_temp_free(temp2
);
1144 tcg_temp_free(temp3
);
1145 tcg_temp_free_i64(t1
);
1146 tcg_temp_free_i64(t2
);
1147 tcg_temp_free_i64(t3
);
1151 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1153 TCGv temp
= tcg_temp_new();
1154 TCGv temp2
= tcg_temp_new();
1156 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1157 } else { /* n is expected to be 1 */
1158 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1159 tcg_gen_shli_tl(temp
, temp
, 1);
1160 /* catch special case r1 = r2 = 0x8000 */
1161 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1162 tcg_gen_sub_tl(temp
, temp
, temp2
);
1164 gen_add_d(ret
, arg1
, temp
);
1166 tcg_temp_free(temp
);
1167 tcg_temp_free(temp2
);
1171 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1173 TCGv temp
= tcg_temp_new();
1174 TCGv temp2
= tcg_temp_new();
1176 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1177 } else { /* n is expected to be 1 */
1178 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1179 tcg_gen_shli_tl(temp
, temp
, 1);
1180 /* catch special case r1 = r2 = 0x8000 */
1181 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1182 tcg_gen_sub_tl(temp
, temp
, temp2
);
1184 gen_adds(ret
, arg1
, temp
);
1186 tcg_temp_free(temp
);
1187 tcg_temp_free(temp2
);
1191 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1192 TCGv arg3
, uint32_t n
)
1194 TCGv temp
= tcg_temp_new();
1195 TCGv temp2
= tcg_temp_new();
1196 TCGv_i64 t1
= tcg_temp_new_i64();
1197 TCGv_i64 t2
= tcg_temp_new_i64();
1198 TCGv_i64 t3
= tcg_temp_new_i64();
1201 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1202 } else { /* n is expected to be 1 */
1203 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1204 tcg_gen_shli_tl(temp
, temp
, 1);
1205 /* catch special case r1 = r2 = 0x8000 */
1206 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1207 tcg_gen_sub_tl(temp
, temp
, temp2
);
1209 tcg_gen_ext_i32_i64(t2
, temp
);
1210 tcg_gen_shli_i64(t2
, t2
, 16);
1211 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1212 gen_add64_d(t3
, t1
, t2
);
1213 /* write back result */
1214 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1216 tcg_temp_free_i64(t1
);
1217 tcg_temp_free_i64(t2
);
1218 tcg_temp_free_i64(t3
);
1219 tcg_temp_free(temp
);
1220 tcg_temp_free(temp2
);
1224 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1225 TCGv arg3
, uint32_t n
)
1227 TCGv temp
= tcg_temp_new();
1228 TCGv temp2
= tcg_temp_new();
1229 TCGv_i64 t1
= tcg_temp_new_i64();
1230 TCGv_i64 t2
= tcg_temp_new_i64();
1233 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1234 } else { /* n is expected to be 1 */
1235 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1236 tcg_gen_shli_tl(temp
, temp
, 1);
1237 /* catch special case r1 = r2 = 0x8000 */
1238 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1239 tcg_gen_sub_tl(temp
, temp
, temp2
);
1241 tcg_gen_ext_i32_i64(t2
, temp
);
1242 tcg_gen_shli_i64(t2
, t2
, 16);
1243 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1245 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1246 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1248 tcg_temp_free(temp
);
1249 tcg_temp_free(temp2
);
1250 tcg_temp_free_i64(t1
);
1251 tcg_temp_free_i64(t2
);
1255 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1256 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1258 TCGv_i64 t1
= tcg_temp_new_i64();
1259 TCGv_i64 t2
= tcg_temp_new_i64();
1260 TCGv_i64 t3
= tcg_temp_new_i64();
1261 TCGv_i64 t4
= tcg_temp_new_i64();
1264 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1265 tcg_gen_ext_i32_i64(t2
, arg2
);
1266 tcg_gen_ext_i32_i64(t3
, arg3
);
1268 tcg_gen_mul_i64(t2
, t2
, t3
);
1270 tcg_gen_shli_i64(t2
, t2
, 1);
1272 tcg_gen_add_i64(t4
, t1
, t2
);
1274 tcg_gen_xor_i64(t3
, t4
, t1
);
1275 tcg_gen_xor_i64(t2
, t1
, t2
);
1276 tcg_gen_andc_i64(t3
, t3
, t2
);
1277 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1278 /* We produce an overflow on the host if the mul before was
1279 (0x80000000 * 0x80000000) << 1). If this is the
1280 case, we negate the ovf. */
1282 temp
= tcg_temp_new();
1283 temp2
= tcg_temp_new();
1284 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1285 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1286 tcg_gen_and_tl(temp
, temp
, temp2
);
1287 tcg_gen_shli_tl(temp
, temp
, 31);
1288 /* negate v bit, if special condition */
1289 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1291 tcg_temp_free(temp
);
1292 tcg_temp_free(temp2
);
1294 /* write back result */
1295 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1297 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1298 /* Calc AV/SAV bits */
1299 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1300 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1302 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1304 tcg_temp_free_i64(t1
);
1305 tcg_temp_free_i64(t2
);
1306 tcg_temp_free_i64(t3
);
1307 tcg_temp_free_i64(t4
);
1311 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1314 TCGv_i64 t1
= tcg_temp_new_i64();
1315 TCGv_i64 t2
= tcg_temp_new_i64();
1316 TCGv_i64 t3
= tcg_temp_new_i64();
1318 tcg_gen_ext_i32_i64(t1
, arg1
);
1319 tcg_gen_ext_i32_i64(t2
, arg2
);
1320 tcg_gen_ext_i32_i64(t3
, arg3
);
1322 tcg_gen_mul_i64(t2
, t2
, t3
);
1323 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1325 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1327 tcg_temp_free_i64(t1
);
1328 tcg_temp_free_i64(t2
);
1329 tcg_temp_free_i64(t3
);
1333 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1334 TCGv arg3
, uint32_t n
)
1336 TCGv_i64 r1
= tcg_temp_new_i64();
1337 TCGv temp
= tcg_const_i32(n
);
1339 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1340 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1341 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1343 tcg_temp_free_i64(r1
);
1344 tcg_temp_free(temp
);
1346 /* ret = r2 - (r1 * r3); */
1347 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1349 TCGv_i64 t1
= tcg_temp_new_i64();
1350 TCGv_i64 t2
= tcg_temp_new_i64();
1351 TCGv_i64 t3
= tcg_temp_new_i64();
1353 tcg_gen_ext_i32_i64(t1
, r1
);
1354 tcg_gen_ext_i32_i64(t2
, r2
);
1355 tcg_gen_ext_i32_i64(t3
, r3
);
1357 tcg_gen_mul_i64(t1
, t1
, t3
);
1358 tcg_gen_sub_i64(t1
, t2
, t1
);
1360 tcg_gen_extrl_i64_i32(ret
, t1
);
1363 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1364 /* result < -0x80000000 */
1365 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1366 tcg_gen_or_i64(t2
, t2
, t3
);
1367 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1368 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1371 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1372 /* Calc AV/SAV bits */
1373 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1374 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1376 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1378 tcg_temp_free_i64(t1
);
1379 tcg_temp_free_i64(t2
);
1380 tcg_temp_free_i64(t3
);
1383 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1385 TCGv temp
= tcg_const_i32(con
);
1386 gen_msub32_d(ret
, r1
, r2
, temp
);
1387 tcg_temp_free(temp
);
1391 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1394 TCGv t1
= tcg_temp_new();
1395 TCGv t2
= tcg_temp_new();
1396 TCGv t3
= tcg_temp_new();
1397 TCGv t4
= tcg_temp_new();
1399 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1400 /* only the sub can overflow */
1401 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1403 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1404 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1405 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1407 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1408 /* Calc AV/SAV bits */
1409 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1410 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1412 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1413 /* write back the result */
1414 tcg_gen_mov_tl(ret_low
, t3
);
1415 tcg_gen_mov_tl(ret_high
, t4
);
1424 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1427 TCGv temp
= tcg_const_i32(con
);
1428 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1429 tcg_temp_free(temp
);
1433 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1436 TCGv_i64 t1
= tcg_temp_new_i64();
1437 TCGv_i64 t2
= tcg_temp_new_i64();
1438 TCGv_i64 t3
= tcg_temp_new_i64();
1440 tcg_gen_extu_i32_i64(t1
, r1
);
1441 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1442 tcg_gen_extu_i32_i64(t3
, r3
);
1444 tcg_gen_mul_i64(t1
, t1
, t3
);
1445 tcg_gen_sub_i64(t3
, t2
, t1
);
1446 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1447 /* calc V bit, only the sub can overflow, if t1 > t2 */
1448 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1449 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1450 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1452 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1453 /* Calc AV/SAV bits */
1454 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1455 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1457 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1459 tcg_temp_free_i64(t1
);
1460 tcg_temp_free_i64(t2
);
1461 tcg_temp_free_i64(t3
);
1465 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1468 TCGv temp
= tcg_const_i32(con
);
1469 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1470 tcg_temp_free(temp
);
1473 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1475 TCGv temp
= tcg_const_i32(r2
);
1476 gen_add_d(ret
, r1
, temp
);
1477 tcg_temp_free(temp
);
1479 /* calculate the carry bit too */
1480 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1482 TCGv t0
= tcg_temp_new_i32();
1483 TCGv result
= tcg_temp_new_i32();
1485 tcg_gen_movi_tl(t0
, 0);
1486 /* Addition and set C/V/SV bits */
1487 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1489 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1490 tcg_gen_xor_tl(t0
, r1
, r2
);
1491 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1493 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1494 /* Calc AV/SAV bits */
1495 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1496 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1498 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1499 /* write back result */
1500 tcg_gen_mov_tl(ret
, result
);
1502 tcg_temp_free(result
);
1506 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1508 TCGv temp
= tcg_const_i32(con
);
1509 gen_add_CC(ret
, r1
, temp
);
1510 tcg_temp_free(temp
);
1513 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1515 TCGv carry
= tcg_temp_new_i32();
1516 TCGv t0
= tcg_temp_new_i32();
1517 TCGv result
= tcg_temp_new_i32();
1519 tcg_gen_movi_tl(t0
, 0);
1520 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1521 /* Addition, carry and set C/V/SV bits */
1522 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1523 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1525 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1526 tcg_gen_xor_tl(t0
, r1
, r2
);
1527 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1529 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1530 /* Calc AV/SAV bits */
1531 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1532 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1534 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1535 /* write back result */
1536 tcg_gen_mov_tl(ret
, result
);
1538 tcg_temp_free(result
);
1540 tcg_temp_free(carry
);
1543 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1545 TCGv temp
= tcg_const_i32(con
);
1546 gen_addc_CC(ret
, r1
, temp
);
1547 tcg_temp_free(temp
);
1550 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1553 TCGv temp
= tcg_temp_new();
1554 TCGv temp2
= tcg_temp_new();
1555 TCGv result
= tcg_temp_new();
1556 TCGv mask
= tcg_temp_new();
1557 TCGv t0
= tcg_const_i32(0);
1559 /* create mask for sticky bits */
1560 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1561 tcg_gen_shli_tl(mask
, mask
, 31);
1563 tcg_gen_add_tl(result
, r1
, r2
);
1565 tcg_gen_xor_tl(temp
, result
, r1
);
1566 tcg_gen_xor_tl(temp2
, r1
, r2
);
1567 tcg_gen_andc_tl(temp
, temp
, temp2
);
1568 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1570 tcg_gen_and_tl(temp
, temp
, mask
);
1571 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1573 tcg_gen_add_tl(temp
, result
, result
);
1574 tcg_gen_xor_tl(temp
, temp
, result
);
1575 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1577 tcg_gen_and_tl(temp
, temp
, mask
);
1578 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1579 /* write back result */
1580 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1583 tcg_temp_free(temp
);
1584 tcg_temp_free(temp2
);
1585 tcg_temp_free(result
);
1586 tcg_temp_free(mask
);
1589 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1592 TCGv temp
= tcg_const_i32(r2
);
1593 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1594 tcg_temp_free(temp
);
1597 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1599 TCGv temp
= tcg_temp_new_i32();
1600 TCGv result
= tcg_temp_new_i32();
1602 tcg_gen_sub_tl(result
, r1
, r2
);
1604 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1605 tcg_gen_xor_tl(temp
, r1
, r2
);
1606 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1608 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1610 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1611 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1613 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1614 /* write back result */
1615 tcg_gen_mov_tl(ret
, result
);
1617 tcg_temp_free(temp
);
1618 tcg_temp_free(result
);
1622 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1624 TCGv temp
= tcg_temp_new();
1625 TCGv_i64 t0
= tcg_temp_new_i64();
1626 TCGv_i64 t1
= tcg_temp_new_i64();
1627 TCGv_i64 result
= tcg_temp_new_i64();
1629 tcg_gen_sub_i64(result
, r1
, r2
);
1631 tcg_gen_xor_i64(t1
, result
, r1
);
1632 tcg_gen_xor_i64(t0
, r1
, r2
);
1633 tcg_gen_and_i64(t1
, t1
, t0
);
1634 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1636 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1637 /* calc AV/SAV bits */
1638 tcg_gen_extrh_i64_i32(temp
, result
);
1639 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1640 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1642 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1643 /* write back result */
1644 tcg_gen_mov_i64(ret
, result
);
1646 tcg_temp_free(temp
);
1647 tcg_temp_free_i64(result
);
1648 tcg_temp_free_i64(t0
);
1649 tcg_temp_free_i64(t1
);
1652 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1654 TCGv result
= tcg_temp_new();
1655 TCGv temp
= tcg_temp_new();
1657 tcg_gen_sub_tl(result
, r1
, r2
);
1659 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1661 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1662 tcg_gen_xor_tl(temp
, r1
, r2
);
1663 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1665 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1667 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1668 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1670 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1671 /* write back result */
1672 tcg_gen_mov_tl(ret
, result
);
1674 tcg_temp_free(result
);
1675 tcg_temp_free(temp
);
1678 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1680 TCGv temp
= tcg_temp_new();
1681 tcg_gen_not_tl(temp
, r2
);
1682 gen_addc_CC(ret
, r1
, temp
);
1683 tcg_temp_free(temp
);
1686 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1689 TCGv temp
= tcg_temp_new();
1690 TCGv temp2
= tcg_temp_new();
1691 TCGv result
= tcg_temp_new();
1692 TCGv mask
= tcg_temp_new();
1693 TCGv t0
= tcg_const_i32(0);
1695 /* create mask for sticky bits */
1696 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1697 tcg_gen_shli_tl(mask
, mask
, 31);
1699 tcg_gen_sub_tl(result
, r1
, r2
);
1701 tcg_gen_xor_tl(temp
, result
, r1
);
1702 tcg_gen_xor_tl(temp2
, r1
, r2
);
1703 tcg_gen_and_tl(temp
, temp
, temp2
);
1704 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1706 tcg_gen_and_tl(temp
, temp
, mask
);
1707 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1709 tcg_gen_add_tl(temp
, result
, result
);
1710 tcg_gen_xor_tl(temp
, temp
, result
);
1711 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1713 tcg_gen_and_tl(temp
, temp
, mask
);
1714 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1715 /* write back result */
1716 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1719 tcg_temp_free(temp
);
1720 tcg_temp_free(temp2
);
1721 tcg_temp_free(result
);
1722 tcg_temp_free(mask
);
1726 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1727 TCGv r3
, uint32_t n
, uint32_t mode
)
1729 TCGv temp
= tcg_const_i32(n
);
1730 TCGv temp2
= tcg_temp_new();
1731 TCGv_i64 temp64
= tcg_temp_new_i64();
1734 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1737 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1740 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1743 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1746 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1747 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1748 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1749 tcg_temp_free(temp
);
1750 tcg_temp_free(temp2
);
1751 tcg_temp_free_i64(temp64
);
1755 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1756 TCGv r3
, uint32_t n
, uint32_t mode
)
1758 TCGv temp
= tcg_const_i32(n
);
1759 TCGv temp2
= tcg_temp_new();
1760 TCGv temp3
= tcg_temp_new();
1761 TCGv_i64 temp64
= tcg_temp_new_i64();
1765 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1768 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1771 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1774 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1777 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1778 gen_subs(ret_low
, r1_low
, temp
);
1779 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1780 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1781 gen_subs(ret_high
, r1_high
, temp2
);
1782 /* combine v bits */
1783 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1784 /* combine av bits */
1785 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1787 tcg_temp_free(temp
);
1788 tcg_temp_free(temp2
);
1789 tcg_temp_free(temp3
);
1790 tcg_temp_free_i64(temp64
);
1794 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1795 TCGv r3
, uint32_t n
, uint32_t mode
)
1797 TCGv temp
= tcg_const_i32(n
);
1798 TCGv_i64 temp64
= tcg_temp_new_i64();
1799 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1800 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1803 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1806 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1809 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1812 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1815 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1816 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1817 /* write back result */
1818 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1820 tcg_temp_free(temp
);
1821 tcg_temp_free_i64(temp64
);
1822 tcg_temp_free_i64(temp64_2
);
1823 tcg_temp_free_i64(temp64_3
);
1827 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1828 TCGv r3
, uint32_t n
, uint32_t mode
)
1830 TCGv temp
= tcg_const_i32(n
);
1831 TCGv_i64 temp64
= tcg_temp_new_i64();
1832 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1835 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1838 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1841 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1844 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1847 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1848 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1849 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1851 tcg_temp_free(temp
);
1852 tcg_temp_free_i64(temp64
);
1853 tcg_temp_free_i64(temp64_2
);
1857 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1860 TCGv temp
= tcg_const_i32(n
);
1861 TCGv_i64 temp64
= tcg_temp_new_i64();
1864 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1867 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1870 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1873 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1876 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1878 tcg_temp_free(temp
);
1879 tcg_temp_free_i64(temp64
);
1883 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1885 TCGv temp
= tcg_temp_new();
1886 TCGv temp2
= tcg_temp_new();
1888 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1889 tcg_gen_shli_tl(temp
, r1
, 16);
1890 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1892 tcg_temp_free(temp
);
1893 tcg_temp_free(temp2
);
1897 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1898 uint32_t n
, uint32_t mode
)
1900 TCGv temp
= tcg_const_i32(n
);
1901 TCGv_i64 temp64
= tcg_temp_new_i64();
1904 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1907 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1910 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1913 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1916 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1918 tcg_temp_free(temp
);
1919 tcg_temp_free_i64(temp64
);
1923 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1925 TCGv temp
= tcg_temp_new();
1926 TCGv temp2
= tcg_temp_new();
1928 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1929 tcg_gen_shli_tl(temp
, r1
, 16);
1930 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1932 tcg_temp_free(temp
);
1933 tcg_temp_free(temp2
);
1937 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1939 TCGv temp
= tcg_const_i32(n
);
1940 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1941 tcg_temp_free(temp
);
1945 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1947 TCGv temp
= tcg_const_i32(n
);
1948 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1949 tcg_temp_free(temp
);
1953 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1954 uint32_t up_shift
, CPUTriCoreState
*env
)
1956 TCGv temp
= tcg_temp_new();
1957 TCGv temp2
= tcg_temp_new();
1958 TCGv temp3
= tcg_temp_new();
1959 TCGv_i64 t1
= tcg_temp_new_i64();
1960 TCGv_i64 t2
= tcg_temp_new_i64();
1961 TCGv_i64 t3
= tcg_temp_new_i64();
1962 TCGv_i64 t4
= tcg_temp_new_i64();
1964 tcg_gen_ext_i32_i64(t2
, arg2
);
1965 tcg_gen_ext_i32_i64(t3
, arg3
);
1967 tcg_gen_mul_i64(t2
, t2
, t3
);
1969 tcg_gen_ext_i32_i64(t1
, arg1
);
1970 /* if we shift part of the fraction out, we need to round up */
1971 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1972 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1973 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1974 tcg_gen_add_i64(t2
, t2
, t4
);
1976 tcg_gen_sub_i64(t3
, t1
, t2
);
1977 tcg_gen_extrl_i64_i32(temp3
, t3
);
1979 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1980 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1981 tcg_gen_or_i64(t1
, t1
, t2
);
1982 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1983 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1985 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1986 /* Calc AV/SAV bits */
1987 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1988 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1990 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1991 /* write back result */
1992 tcg_gen_mov_tl(ret
, temp3
);
1994 tcg_temp_free(temp
);
1995 tcg_temp_free(temp2
);
1996 tcg_temp_free(temp3
);
1997 tcg_temp_free_i64(t1
);
1998 tcg_temp_free_i64(t2
);
1999 tcg_temp_free_i64(t3
);
2000 tcg_temp_free_i64(t4
);
2004 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2006 TCGv temp
= tcg_temp_new();
2007 TCGv temp2
= tcg_temp_new();
2009 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2010 } else { /* n is expected to be 1 */
2011 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2012 tcg_gen_shli_tl(temp
, temp
, 1);
2013 /* catch special case r1 = r2 = 0x8000 */
2014 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2015 tcg_gen_sub_tl(temp
, temp
, temp2
);
2017 gen_sub_d(ret
, arg1
, temp
);
2019 tcg_temp_free(temp
);
2020 tcg_temp_free(temp2
);
2024 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2026 TCGv temp
= tcg_temp_new();
2027 TCGv temp2
= tcg_temp_new();
2029 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2030 } else { /* n is expected to be 1 */
2031 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2032 tcg_gen_shli_tl(temp
, temp
, 1);
2033 /* catch special case r1 = r2 = 0x8000 */
2034 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2035 tcg_gen_sub_tl(temp
, temp
, temp2
);
2037 gen_subs(ret
, arg1
, temp
);
2039 tcg_temp_free(temp
);
2040 tcg_temp_free(temp2
);
2044 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2045 TCGv arg3
, uint32_t n
)
2047 TCGv temp
= tcg_temp_new();
2048 TCGv temp2
= tcg_temp_new();
2049 TCGv_i64 t1
= tcg_temp_new_i64();
2050 TCGv_i64 t2
= tcg_temp_new_i64();
2051 TCGv_i64 t3
= tcg_temp_new_i64();
2054 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2055 } else { /* n is expected to be 1 */
2056 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2057 tcg_gen_shli_tl(temp
, temp
, 1);
2058 /* catch special case r1 = r2 = 0x8000 */
2059 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2060 tcg_gen_sub_tl(temp
, temp
, temp2
);
2062 tcg_gen_ext_i32_i64(t2
, temp
);
2063 tcg_gen_shli_i64(t2
, t2
, 16);
2064 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2065 gen_sub64_d(t3
, t1
, t2
);
2066 /* write back result */
2067 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2069 tcg_temp_free_i64(t1
);
2070 tcg_temp_free_i64(t2
);
2071 tcg_temp_free_i64(t3
);
2072 tcg_temp_free(temp
);
2073 tcg_temp_free(temp2
);
2077 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2078 TCGv arg3
, uint32_t n
)
2080 TCGv temp
= tcg_temp_new();
2081 TCGv temp2
= tcg_temp_new();
2082 TCGv_i64 t1
= tcg_temp_new_i64();
2083 TCGv_i64 t2
= tcg_temp_new_i64();
2086 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2087 } else { /* n is expected to be 1 */
2088 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2089 tcg_gen_shli_tl(temp
, temp
, 1);
2090 /* catch special case r1 = r2 = 0x8000 */
2091 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2092 tcg_gen_sub_tl(temp
, temp
, temp2
);
2094 tcg_gen_ext_i32_i64(t2
, temp
);
2095 tcg_gen_shli_i64(t2
, t2
, 16);
2096 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2098 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2099 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2101 tcg_temp_free(temp
);
2102 tcg_temp_free(temp2
);
2103 tcg_temp_free_i64(t1
);
2104 tcg_temp_free_i64(t2
);
2108 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2109 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2111 TCGv_i64 t1
= tcg_temp_new_i64();
2112 TCGv_i64 t2
= tcg_temp_new_i64();
2113 TCGv_i64 t3
= tcg_temp_new_i64();
2114 TCGv_i64 t4
= tcg_temp_new_i64();
2117 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2118 tcg_gen_ext_i32_i64(t2
, arg2
);
2119 tcg_gen_ext_i32_i64(t3
, arg3
);
2121 tcg_gen_mul_i64(t2
, t2
, t3
);
2123 tcg_gen_shli_i64(t2
, t2
, 1);
2125 tcg_gen_sub_i64(t4
, t1
, t2
);
2127 tcg_gen_xor_i64(t3
, t4
, t1
);
2128 tcg_gen_xor_i64(t2
, t1
, t2
);
2129 tcg_gen_and_i64(t3
, t3
, t2
);
2130 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
2131 /* We produce an overflow on the host if the mul before was
2132 (0x80000000 * 0x80000000) << 1). If this is the
2133 case, we negate the ovf. */
2135 temp
= tcg_temp_new();
2136 temp2
= tcg_temp_new();
2137 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2138 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2139 tcg_gen_and_tl(temp
, temp
, temp2
);
2140 tcg_gen_shli_tl(temp
, temp
, 31);
2141 /* negate v bit, if special condition */
2142 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2144 tcg_temp_free(temp
);
2145 tcg_temp_free(temp2
);
2147 /* write back result */
2148 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2150 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2151 /* Calc AV/SAV bits */
2152 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2153 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2155 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2157 tcg_temp_free_i64(t1
);
2158 tcg_temp_free_i64(t2
);
2159 tcg_temp_free_i64(t3
);
2160 tcg_temp_free_i64(t4
);
2164 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2167 TCGv_i64 t1
= tcg_temp_new_i64();
2168 TCGv_i64 t2
= tcg_temp_new_i64();
2169 TCGv_i64 t3
= tcg_temp_new_i64();
2170 TCGv_i64 t4
= tcg_temp_new_i64();
2172 tcg_gen_ext_i32_i64(t1
, arg1
);
2173 tcg_gen_ext_i32_i64(t2
, arg2
);
2174 tcg_gen_ext_i32_i64(t3
, arg3
);
2176 tcg_gen_mul_i64(t2
, t2
, t3
);
2177 /* if we shift part of the fraction out, we need to round up */
2178 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2179 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2180 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2181 tcg_gen_add_i64(t3
, t3
, t4
);
2183 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2185 tcg_temp_free_i64(t1
);
2186 tcg_temp_free_i64(t2
);
2187 tcg_temp_free_i64(t3
);
2188 tcg_temp_free_i64(t4
);
2192 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2193 TCGv arg3
, uint32_t n
)
2195 TCGv_i64 r1
= tcg_temp_new_i64();
2196 TCGv temp
= tcg_const_i32(n
);
2198 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2199 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2200 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2202 tcg_temp_free_i64(r1
);
2203 tcg_temp_free(temp
);
2207 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2208 TCGv r3
, uint32_t n
, uint32_t mode
)
2210 TCGv temp
= tcg_const_i32(n
);
2211 TCGv temp2
= tcg_temp_new();
2212 TCGv_i64 temp64
= tcg_temp_new_i64();
2215 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2218 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2221 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2224 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2227 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2228 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2229 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2230 tcg_temp_free(temp
);
2231 tcg_temp_free(temp2
);
2232 tcg_temp_free_i64(temp64
);
2236 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2237 TCGv r3
, uint32_t n
, uint32_t mode
)
2239 TCGv temp
= tcg_const_i32(n
);
2240 TCGv_i64 temp64
= tcg_temp_new_i64();
2241 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2242 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2245 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2248 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2251 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2254 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2257 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2258 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2259 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2260 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2261 tcg_gen_shli_i64(temp64
, temp64
, 16);
2263 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2264 /* write back result */
2265 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2267 tcg_temp_free(temp
);
2268 tcg_temp_free_i64(temp64
);
2269 tcg_temp_free_i64(temp64_2
);
2270 tcg_temp_free_i64(temp64_3
);
2274 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2276 TCGv temp
= tcg_const_i32(n
);
2277 TCGv temp2
= tcg_temp_new();
2278 TCGv_i64 temp64
= tcg_temp_new_i64();
2281 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2284 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2287 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2290 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2293 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2294 tcg_gen_shli_tl(temp
, r1
, 16);
2295 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2297 tcg_temp_free(temp
);
2298 tcg_temp_free(temp2
);
2299 tcg_temp_free_i64(temp64
);
2303 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2304 TCGv r3
, uint32_t n
, uint32_t mode
)
2306 TCGv temp
= tcg_const_i32(n
);
2307 TCGv temp2
= tcg_temp_new();
2308 TCGv temp3
= tcg_temp_new();
2309 TCGv_i64 temp64
= tcg_temp_new_i64();
2313 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2316 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2319 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2322 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2325 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2326 gen_adds(ret_low
, r1_low
, temp
);
2327 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2328 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2329 gen_subs(ret_high
, r1_high
, temp2
);
2330 /* combine v bits */
2331 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2332 /* combine av bits */
2333 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2335 tcg_temp_free(temp
);
2336 tcg_temp_free(temp2
);
2337 tcg_temp_free(temp3
);
2338 tcg_temp_free_i64(temp64
);
2342 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2343 TCGv r3
, uint32_t n
, uint32_t mode
)
2345 TCGv temp
= tcg_const_i32(n
);
2346 TCGv_i64 temp64
= tcg_temp_new_i64();
2347 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2351 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2354 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2357 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2360 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2363 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2364 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2365 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2366 tcg_gen_shli_i64(temp64
, temp64
, 16);
2367 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2369 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2370 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2372 tcg_temp_free(temp
);
2373 tcg_temp_free_i64(temp64
);
2374 tcg_temp_free_i64(temp64_2
);
2378 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2380 TCGv temp
= tcg_const_i32(n
);
2381 TCGv temp2
= tcg_temp_new();
2382 TCGv_i64 temp64
= tcg_temp_new_i64();
2385 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2388 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2391 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2394 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2397 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2398 tcg_gen_shli_tl(temp
, r1
, 16);
2399 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2401 tcg_temp_free(temp
);
2402 tcg_temp_free(temp2
);
2403 tcg_temp_free_i64(temp64
);
2406 static inline void gen_abs(TCGv ret
, TCGv r1
)
2408 TCGv temp
= tcg_temp_new();
2409 TCGv t0
= tcg_const_i32(0);
2411 tcg_gen_neg_tl(temp
, r1
);
2412 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2413 /* overflow can only happen, if r1 = 0x80000000 */
2414 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2415 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2417 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2419 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2420 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2422 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2424 tcg_temp_free(temp
);
2428 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2430 TCGv temp
= tcg_temp_new_i32();
2431 TCGv result
= tcg_temp_new_i32();
2433 tcg_gen_sub_tl(result
, r1
, r2
);
2434 tcg_gen_sub_tl(temp
, r2
, r1
);
2435 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2438 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2439 tcg_gen_xor_tl(temp
, result
, r2
);
2440 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2441 tcg_gen_xor_tl(temp
, r1
, r2
);
2442 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2444 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2446 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2447 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2449 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2450 /* write back result */
2451 tcg_gen_mov_tl(ret
, result
);
2453 tcg_temp_free(temp
);
2454 tcg_temp_free(result
);
2457 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2459 TCGv temp
= tcg_const_i32(con
);
2460 gen_absdif(ret
, r1
, temp
);
2461 tcg_temp_free(temp
);
2464 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2466 TCGv temp
= tcg_const_i32(con
);
2467 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2468 tcg_temp_free(temp
);
2471 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2473 TCGv high
= tcg_temp_new();
2474 TCGv low
= tcg_temp_new();
2476 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2477 tcg_gen_mov_tl(ret
, low
);
2479 tcg_gen_sari_tl(low
, low
, 31);
2480 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2481 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2483 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2485 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2486 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2488 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2490 tcg_temp_free(high
);
2494 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2496 TCGv temp
= tcg_const_i32(con
);
2497 gen_mul_i32s(ret
, r1
, temp
);
2498 tcg_temp_free(temp
);
2501 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2503 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2505 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2507 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2509 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2510 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2512 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2515 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2518 TCGv temp
= tcg_const_i32(con
);
2519 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2520 tcg_temp_free(temp
);
2523 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2525 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2527 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2529 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2531 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2532 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2534 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2537 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2540 TCGv temp
= tcg_const_i32(con
);
2541 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2542 tcg_temp_free(temp
);
2545 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2547 TCGv temp
= tcg_const_i32(con
);
2548 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2549 tcg_temp_free(temp
);
2552 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2554 TCGv temp
= tcg_const_i32(con
);
2555 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2556 tcg_temp_free(temp
);
2558 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2559 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2561 TCGv temp
= tcg_const_i32(con
);
2562 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2563 tcg_temp_free(temp
);
2566 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2568 TCGv temp
= tcg_const_i32(con
);
2569 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2570 tcg_temp_free(temp
);
2574 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2576 TCGv temp
= tcg_temp_new();
2577 TCGv_i64 temp_64
= tcg_temp_new_i64();
2578 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2581 if (up_shift
== 32) {
2582 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2583 } else if (up_shift
== 16) {
2584 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2585 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2587 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2588 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2589 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2591 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2594 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2595 } else { /* n is expected to be 1 */
2596 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2597 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2599 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2601 if (up_shift
== 0) {
2602 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2604 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2606 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2607 /* overflow only occurs if r1 = r2 = 0x8000 */
2608 if (up_shift
== 0) {/* result is 64 bit */
2609 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2611 } else { /* result is 32 bit */
2612 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2615 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2616 /* calc sv overflow bit */
2617 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2619 /* calc av overflow bit */
2620 if (up_shift
== 0) {
2621 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2622 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2624 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2625 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2627 /* calc sav overflow bit */
2628 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2629 tcg_temp_free(temp
);
2630 tcg_temp_free_i64(temp_64
);
2631 tcg_temp_free_i64(temp2_64
);
2635 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2637 TCGv temp
= tcg_temp_new();
2639 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2640 } else { /* n is expected to be 1 */
2641 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2642 tcg_gen_shli_tl(ret
, ret
, 1);
2643 /* catch special case r1 = r2 = 0x8000 */
2644 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2645 tcg_gen_sub_tl(ret
, ret
, temp
);
2648 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2649 /* calc av overflow bit */
2650 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2651 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2652 /* calc sav overflow bit */
2653 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2655 tcg_temp_free(temp
);
2658 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2660 TCGv temp
= tcg_temp_new();
2662 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2663 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2665 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2666 tcg_gen_shli_tl(ret
, ret
, 1);
2667 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2668 /* catch special case r1 = r2 = 0x8000 */
2669 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2670 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2671 tcg_gen_sub_tl(ret
, ret
, temp
);
2674 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2675 /* calc av overflow bit */
2676 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2677 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2678 /* calc sav overflow bit */
2679 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2680 /* cut halfword off */
2681 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2683 tcg_temp_free(temp
);
2687 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2690 TCGv_i64 temp64
= tcg_temp_new_i64();
2691 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2692 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2693 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2694 tcg_temp_free_i64(temp64
);
2698 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2701 TCGv temp
= tcg_const_i32(con
);
2702 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2703 tcg_temp_free(temp
);
2707 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2710 TCGv_i64 temp64
= tcg_temp_new_i64();
2711 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2712 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2713 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2714 tcg_temp_free_i64(temp64
);
2718 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2721 TCGv temp
= tcg_const_i32(con
);
2722 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2723 tcg_temp_free(temp
);
2726 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2728 TCGv temp
= tcg_const_i32(con
);
2729 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2730 tcg_temp_free(temp
);
2733 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2735 TCGv temp
= tcg_const_i32(con
);
2736 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2737 tcg_temp_free(temp
);
2741 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2744 TCGv_i64 temp64
= tcg_temp_new_i64();
2745 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2746 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2747 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2748 tcg_temp_free_i64(temp64
);
2752 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2755 TCGv temp
= tcg_const_i32(con
);
2756 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2757 tcg_temp_free(temp
);
2761 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2764 TCGv_i64 temp64
= tcg_temp_new_i64();
2765 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2766 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2767 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2768 tcg_temp_free_i64(temp64
);
2772 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2775 TCGv temp
= tcg_const_i32(con
);
2776 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2777 tcg_temp_free(temp
);
2780 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2782 TCGv sat_neg
= tcg_const_i32(low
);
2783 TCGv temp
= tcg_const_i32(up
);
2785 /* sat_neg = (arg < low ) ? low : arg; */
2786 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2788 /* ret = (sat_neg > up ) ? up : sat_neg; */
2789 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2791 tcg_temp_free(sat_neg
);
2792 tcg_temp_free(temp
);
2795 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2797 TCGv temp
= tcg_const_i32(up
);
2798 /* sat_neg = (arg > up ) ? up : arg; */
2799 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2800 tcg_temp_free(temp
);
2803 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2805 if (shift_count
== -32) {
2806 tcg_gen_movi_tl(ret
, 0);
2807 } else if (shift_count
>= 0) {
2808 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2810 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2814 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2816 TCGv temp_low
, temp_high
;
2818 if (shiftcount
== -16) {
2819 tcg_gen_movi_tl(ret
, 0);
2821 temp_high
= tcg_temp_new();
2822 temp_low
= tcg_temp_new();
2824 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2825 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2826 gen_shi(temp_low
, temp_low
, shiftcount
);
2827 gen_shi(ret
, temp_high
, shiftcount
);
2828 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2830 tcg_temp_free(temp_low
);
2831 tcg_temp_free(temp_high
);
2835 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2837 uint32_t msk
, msk_start
;
2838 TCGv temp
= tcg_temp_new();
2839 TCGv temp2
= tcg_temp_new();
2840 TCGv t_0
= tcg_const_i32(0);
2842 if (shift_count
== 0) {
2843 /* Clear PSW.C and PSW.V */
2844 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2845 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2846 tcg_gen_mov_tl(ret
, r1
);
2847 } else if (shift_count
== -32) {
2849 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2850 /* fill ret completly with sign bit */
2851 tcg_gen_sari_tl(ret
, r1
, 31);
2853 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2854 } else if (shift_count
> 0) {
2855 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2856 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2859 msk_start
= 32 - shift_count
;
2860 msk
= ((1 << shift_count
) - 1) << msk_start
;
2861 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2862 /* calc v/sv bits */
2863 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2864 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2865 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2866 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2868 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2870 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2872 tcg_temp_free(t_max
);
2873 tcg_temp_free(t_min
);
2876 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2878 msk
= (1 << -shift_count
) - 1;
2879 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2881 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2883 /* calc av overflow bit */
2884 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2885 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2886 /* calc sav overflow bit */
2887 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2889 tcg_temp_free(temp
);
2890 tcg_temp_free(temp2
);
2894 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2896 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2899 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2901 TCGv temp
= tcg_const_i32(con
);
2902 gen_shas(ret
, r1
, temp
);
2903 tcg_temp_free(temp
);
2906 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2910 if (shift_count
== 0) {
2911 tcg_gen_mov_tl(ret
, r1
);
2912 } else if (shift_count
> 0) {
2913 low
= tcg_temp_new();
2914 high
= tcg_temp_new();
2916 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2917 tcg_gen_shli_tl(low
, r1
, shift_count
);
2918 tcg_gen_shli_tl(ret
, high
, shift_count
);
2919 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2922 tcg_temp_free(high
);
2924 low
= tcg_temp_new();
2925 high
= tcg_temp_new();
2927 tcg_gen_ext16s_tl(low
, r1
);
2928 tcg_gen_sari_tl(low
, low
, -shift_count
);
2929 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2930 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2933 tcg_temp_free(high
);
2938 /* ret = {ret[30:0], (r1 cond r2)}; */
2939 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2941 TCGv temp
= tcg_temp_new();
2942 TCGv temp2
= tcg_temp_new();
2944 tcg_gen_shli_tl(temp
, ret
, 1);
2945 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2946 tcg_gen_or_tl(ret
, temp
, temp2
);
2948 tcg_temp_free(temp
);
2949 tcg_temp_free(temp2
);
2952 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2954 TCGv temp
= tcg_const_i32(con
);
2955 gen_sh_cond(cond
, ret
, r1
, temp
);
2956 tcg_temp_free(temp
);
2959 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2961 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2964 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2966 TCGv temp
= tcg_const_i32(con
);
2967 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2968 tcg_temp_free(temp
);
2971 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2973 TCGv temp
= tcg_const_i32(con
);
2974 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2975 tcg_temp_free(temp
);
2978 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2980 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2983 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2985 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2988 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2990 void(*op1
)(TCGv
, TCGv
, TCGv
),
2991 void(*op2
)(TCGv
, TCGv
, TCGv
))
2995 temp1
= tcg_temp_new();
2996 temp2
= tcg_temp_new();
2998 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2999 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3001 (*op1
)(temp1
, temp1
, temp2
);
3002 (*op2
)(temp1
, ret
, temp1
);
3004 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
3006 tcg_temp_free(temp1
);
3007 tcg_temp_free(temp2
);
3010 /* ret = r1[pos1] op1 r2[pos2]; */
3011 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
3013 void(*op1
)(TCGv
, TCGv
, TCGv
))
3017 temp1
= tcg_temp_new();
3018 temp2
= tcg_temp_new();
3020 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3021 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3023 (*op1
)(ret
, temp1
, temp2
);
3025 tcg_gen_andi_tl(ret
, ret
, 0x1);
3027 tcg_temp_free(temp1
);
3028 tcg_temp_free(temp2
);
3031 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3032 void(*op
)(TCGv
, TCGv
, TCGv
))
3034 TCGv temp
= tcg_temp_new();
3035 TCGv temp2
= tcg_temp_new();
3036 /* temp = (arg1 cond arg2 )*/
3037 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3039 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3040 /* temp = temp insn temp2 */
3041 (*op
)(temp
, temp
, temp2
);
3042 /* ret = {ret[31:1], temp} */
3043 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3045 tcg_temp_free(temp
);
3046 tcg_temp_free(temp2
);
3050 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3051 void(*op
)(TCGv
, TCGv
, TCGv
))
3053 TCGv temp
= tcg_const_i32(con
);
3054 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3055 tcg_temp_free(temp
);
3058 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3059 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3061 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3062 tcg_gen_neg_tl(ret
, ret
);
3065 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3067 TCGv b0
= tcg_temp_new();
3068 TCGv b1
= tcg_temp_new();
3069 TCGv b2
= tcg_temp_new();
3070 TCGv b3
= tcg_temp_new();
3073 tcg_gen_andi_tl(b0
, r1
, 0xff);
3074 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3077 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3078 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3081 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3082 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3085 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3086 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3089 tcg_gen_or_tl(ret
, b0
, b1
);
3090 tcg_gen_or_tl(ret
, ret
, b2
);
3091 tcg_gen_or_tl(ret
, ret
, b3
);
3099 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3101 TCGv h0
= tcg_temp_new();
3102 TCGv h1
= tcg_temp_new();
3105 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3106 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3109 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3110 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3113 tcg_gen_or_tl(ret
, h0
, h1
);
3118 /* mask = ((1 << width) -1) << pos;
3119 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3120 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3122 TCGv mask
= tcg_temp_new();
3123 TCGv temp
= tcg_temp_new();
3124 TCGv temp2
= tcg_temp_new();
3126 tcg_gen_movi_tl(mask
, 1);
3127 tcg_gen_shl_tl(mask
, mask
, width
);
3128 tcg_gen_subi_tl(mask
, mask
, 1);
3129 tcg_gen_shl_tl(mask
, mask
, pos
);
3131 tcg_gen_shl_tl(temp
, r2
, pos
);
3132 tcg_gen_and_tl(temp
, temp
, mask
);
3133 tcg_gen_andc_tl(temp2
, r1
, mask
);
3134 tcg_gen_or_tl(ret
, temp
, temp2
);
3136 tcg_temp_free(mask
);
3137 tcg_temp_free(temp
);
3138 tcg_temp_free(temp2
);
3141 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3143 TCGv_i64 temp
= tcg_temp_new_i64();
3145 gen_helper_bsplit(temp
, r1
);
3146 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3148 tcg_temp_free_i64(temp
);
3151 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3153 TCGv_i64 temp
= tcg_temp_new_i64();
3155 gen_helper_unpack(temp
, r1
);
3156 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3158 tcg_temp_free_i64(temp
);
3162 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3164 TCGv_i64 ret
= tcg_temp_new_i64();
3166 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3167 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3169 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3171 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3173 tcg_temp_free_i64(ret
);
3177 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3179 TCGv_i64 ret
= tcg_temp_new_i64();
3181 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3182 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
3184 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
3186 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3188 tcg_temp_free_i64(ret
);
3191 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
3193 TCGv temp
= tcg_temp_new();
3195 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
3196 tcg_gen_xor_tl(temp
, temp
, arg_low
);
3197 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
3198 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
3199 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3201 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3202 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3203 tcg_temp_free(temp
);
3206 static void gen_calc_usb_mulr_h(TCGv arg
)
3208 TCGv temp
= tcg_temp_new();
3210 tcg_gen_add_tl(temp
, arg
, arg
);
3211 tcg_gen_xor_tl(temp
, temp
, arg
);
3212 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
3213 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3215 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3217 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3218 tcg_temp_free(temp
);
3221 /* helpers for generating program flow micro-ops */
3223 static inline void gen_save_pc(target_ulong pc
)
3225 tcg_gen_movi_tl(cpu_PC
, pc
);
3228 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
3230 TranslationBlock
*tb
;
3232 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
3233 likely(!ctx
->singlestep_enabled
)) {
3236 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3239 if (ctx
->singlestep_enabled
) {
3240 /* raise exception debug */
3246 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3247 TCGv r2
, int16_t address
)
3249 TCGLabel
*jumpLabel
= gen_new_label();
3250 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
3252 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
3254 gen_set_label(jumpLabel
);
3255 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
3258 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3259 int r2
, int16_t address
)
3261 TCGv temp
= tcg_const_i32(r2
);
3262 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
3263 tcg_temp_free(temp
);
3266 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
3268 TCGLabel
*l1
= gen_new_label();
3270 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
3271 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
3272 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
3274 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
3277 static void gen_fcall_save_ctx(DisasContext
*ctx
)
3279 TCGv temp
= tcg_temp_new();
3281 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
3282 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
3283 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3284 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
3286 tcg_temp_free(temp
);
3289 static void gen_fret(DisasContext
*ctx
)
3291 TCGv temp
= tcg_temp_new();
3293 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
3294 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
3295 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
3296 tcg_gen_mov_tl(cpu_PC
, temp
);
3298 ctx
->bstate
= BS_BRANCH
;
3300 tcg_temp_free(temp
);
3303 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
3304 int r2
, int32_t constant
, int32_t offset
)
3310 /* SB-format jumps */
3313 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3315 case OPC1_32_B_CALL
:
3316 case OPC1_16_SB_CALL
:
3317 gen_helper_1arg(call
, ctx
->next_pc
);
3318 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3321 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
3323 case OPC1_16_SB_JNZ
:
3324 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
3326 /* SBC-format jumps */
3327 case OPC1_16_SBC_JEQ
:
3328 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
3330 case OPC1_16_SBC_JNE
:
3331 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
3333 /* SBRN-format jumps */
3334 case OPC1_16_SBRN_JZ_T
:
3335 temp
= tcg_temp_new();
3336 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3337 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3338 tcg_temp_free(temp
);
3340 case OPC1_16_SBRN_JNZ_T
:
3341 temp
= tcg_temp_new();
3342 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3343 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3344 tcg_temp_free(temp
);
3346 /* SBR-format jumps */
3347 case OPC1_16_SBR_JEQ
:
3348 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3351 case OPC1_16_SBR_JNE
:
3352 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3355 case OPC1_16_SBR_JNZ
:
3356 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
3358 case OPC1_16_SBR_JNZ_A
:
3359 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3361 case OPC1_16_SBR_JGEZ
:
3362 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
3364 case OPC1_16_SBR_JGTZ
:
3365 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
3367 case OPC1_16_SBR_JLEZ
:
3368 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
3370 case OPC1_16_SBR_JLTZ
:
3371 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
3373 case OPC1_16_SBR_JZ
:
3374 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
3376 case OPC1_16_SBR_JZ_A
:
3377 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3379 case OPC1_16_SBR_LOOP
:
3380 gen_loop(ctx
, r1
, offset
* 2 - 32);
3382 /* SR-format jumps */
3384 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3387 case OPC2_32_SYS_RET
:
3388 case OPC2_16_SR_RET
:
3389 gen_helper_ret(cpu_env
);
3393 case OPC1_32_B_CALLA
:
3394 gen_helper_1arg(call
, ctx
->next_pc
);
3395 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3397 case OPC1_32_B_FCALL
:
3398 gen_fcall_save_ctx(ctx
);
3399 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3401 case OPC1_32_B_FCALLA
:
3402 gen_fcall_save_ctx(ctx
);
3403 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3406 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3409 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3412 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3413 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3416 case OPCM_32_BRC_EQ_NEQ
:
3417 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3418 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3420 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3423 case OPCM_32_BRC_GE
:
3424 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3425 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3427 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3428 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3432 case OPCM_32_BRC_JLT
:
3433 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3434 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3436 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3437 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3441 case OPCM_32_BRC_JNE
:
3442 temp
= tcg_temp_new();
3443 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3444 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3445 /* subi is unconditional */
3446 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3447 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3449 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3450 /* addi is unconditional */
3451 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3452 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3454 tcg_temp_free(temp
);
3457 case OPCM_32_BRN_JTT
:
3458 n
= MASK_OP_BRN_N(ctx
->opcode
);
3460 temp
= tcg_temp_new();
3461 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3463 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3464 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3466 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3468 tcg_temp_free(temp
);
3471 case OPCM_32_BRR_EQ_NEQ
:
3472 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3473 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3476 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3480 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3481 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3482 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3485 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3489 case OPCM_32_BRR_GE
:
3490 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3491 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3494 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3498 case OPCM_32_BRR_JLT
:
3499 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3500 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3503 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3507 case OPCM_32_BRR_LOOP
:
3508 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3509 gen_loop(ctx
, r2
, offset
* 2);
3511 /* OPC2_32_BRR_LOOPU */
3512 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3515 case OPCM_32_BRR_JNE
:
3516 temp
= tcg_temp_new();
3517 temp2
= tcg_temp_new();
3518 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3519 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3520 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3521 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3522 /* subi is unconditional */
3523 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3524 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3526 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3527 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3528 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3529 /* addi is unconditional */
3530 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3531 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3533 tcg_temp_free(temp
);
3534 tcg_temp_free(temp2
);
3536 case OPCM_32_BRR_JNZ
:
3537 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3538 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3540 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3544 printf("Branch Error at %x\n", ctx
->pc
);
3546 ctx
->bstate
= BS_BRANCH
;
3551 * Functions for decoding instructions
3554 static void decode_src_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int op1
)
3560 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3561 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3564 case OPC1_16_SRC_ADD
:
3565 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3567 case OPC1_16_SRC_ADD_A15
:
3568 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3570 case OPC1_16_SRC_ADD_15A
:
3571 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3573 case OPC1_16_SRC_ADD_A
:
3574 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3576 case OPC1_16_SRC_CADD
:
3577 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3580 case OPC1_16_SRC_CADDN
:
3581 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3584 case OPC1_16_SRC_CMOV
:
3585 temp
= tcg_const_tl(0);
3586 temp2
= tcg_const_tl(const4
);
3587 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3588 temp2
, cpu_gpr_d
[r1
]);
3589 tcg_temp_free(temp
);
3590 tcg_temp_free(temp2
);
3592 case OPC1_16_SRC_CMOVN
:
3593 temp
= tcg_const_tl(0);
3594 temp2
= tcg_const_tl(const4
);
3595 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3596 temp2
, cpu_gpr_d
[r1
]);
3597 tcg_temp_free(temp
);
3598 tcg_temp_free(temp2
);
3600 case OPC1_16_SRC_EQ
:
3601 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3604 case OPC1_16_SRC_LT
:
3605 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3608 case OPC1_16_SRC_MOV
:
3609 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3611 case OPC1_16_SRC_MOV_A
:
3612 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3613 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3615 case OPC1_16_SRC_MOV_E
:
3616 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3617 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3618 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3619 } /* TODO: else raise illegal opcode trap */
3621 case OPC1_16_SRC_SH
:
3622 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3624 case OPC1_16_SRC_SHA
:
3625 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3630 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3635 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3636 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3639 case OPC1_16_SRR_ADD
:
3640 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3642 case OPC1_16_SRR_ADD_A15
:
3643 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3645 case OPC1_16_SRR_ADD_15A
:
3646 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3648 case OPC1_16_SRR_ADD_A
:
3649 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3651 case OPC1_16_SRR_ADDS
:
3652 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3654 case OPC1_16_SRR_AND
:
3655 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3657 case OPC1_16_SRR_CMOV
:
3658 temp
= tcg_const_tl(0);
3659 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3660 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3661 tcg_temp_free(temp
);
3663 case OPC1_16_SRR_CMOVN
:
3664 temp
= tcg_const_tl(0);
3665 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3666 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3667 tcg_temp_free(temp
);
3669 case OPC1_16_SRR_EQ
:
3670 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3673 case OPC1_16_SRR_LT
:
3674 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3677 case OPC1_16_SRR_MOV
:
3678 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3680 case OPC1_16_SRR_MOV_A
:
3681 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3683 case OPC1_16_SRR_MOV_AA
:
3684 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3686 case OPC1_16_SRR_MOV_D
:
3687 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3689 case OPC1_16_SRR_MUL
:
3690 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3692 case OPC1_16_SRR_OR
:
3693 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3695 case OPC1_16_SRR_SUB
:
3696 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3698 case OPC1_16_SRR_SUB_A15B
:
3699 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3701 case OPC1_16_SRR_SUB_15AB
:
3702 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3704 case OPC1_16_SRR_SUBS
:
3705 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3707 case OPC1_16_SRR_XOR
:
3708 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3713 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3717 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3718 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3721 case OPC1_16_SSR_ST_A
:
3722 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3724 case OPC1_16_SSR_ST_A_POSTINC
:
3725 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3726 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3728 case OPC1_16_SSR_ST_B
:
3729 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3731 case OPC1_16_SSR_ST_B_POSTINC
:
3732 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3733 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3735 case OPC1_16_SSR_ST_H
:
3736 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3738 case OPC1_16_SSR_ST_H_POSTINC
:
3739 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3740 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3742 case OPC1_16_SSR_ST_W
:
3743 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3745 case OPC1_16_SSR_ST_W_POSTINC
:
3746 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3747 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3752 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3756 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3759 case OPC1_16_SC_AND
:
3760 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3762 case OPC1_16_SC_BISR
:
3763 gen_helper_1arg(bisr
, const16
& 0xff);
3765 case OPC1_16_SC_LD_A
:
3766 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3768 case OPC1_16_SC_LD_W
:
3769 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3771 case OPC1_16_SC_MOV
:
3772 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3775 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3777 case OPC1_16_SC_ST_A
:
3778 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3780 case OPC1_16_SC_ST_W
:
3781 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3783 case OPC1_16_SC_SUB_A
:
3784 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3789 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3793 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3794 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3798 case OPC1_16_SLR_LD_A
:
3799 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3801 case OPC1_16_SLR_LD_A_POSTINC
:
3802 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3803 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3805 case OPC1_16_SLR_LD_BU
:
3806 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3808 case OPC1_16_SLR_LD_BU_POSTINC
:
3809 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3810 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3812 case OPC1_16_SLR_LD_H
:
3813 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3815 case OPC1_16_SLR_LD_H_POSTINC
:
3816 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3817 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3819 case OPC1_16_SLR_LD_W
:
3820 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3822 case OPC1_16_SLR_LD_W_POSTINC
:
3823 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3824 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3829 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3834 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3835 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3839 case OPC1_16_SRO_LD_A
:
3840 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3842 case OPC1_16_SRO_LD_BU
:
3843 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3845 case OPC1_16_SRO_LD_H
:
3846 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3848 case OPC1_16_SRO_LD_W
:
3849 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3851 case OPC1_16_SRO_ST_A
:
3852 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3854 case OPC1_16_SRO_ST_B
:
3855 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3857 case OPC1_16_SRO_ST_H
:
3858 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3860 case OPC1_16_SRO_ST_W
:
3861 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3866 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3869 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3872 case OPC2_16_SR_NOP
:
3874 case OPC2_16_SR_RET
:
3875 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3877 case OPC2_16_SR_RFE
:
3878 gen_helper_rfe(cpu_env
);
3880 ctx
->bstate
= BS_BRANCH
;
3882 case OPC2_16_SR_DEBUG
:
3883 /* raise EXCP_DEBUG */
3885 case OPC2_16_SR_FRET
:
3890 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3896 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3897 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3900 case OPC2_16_SR_RSUB
:
3901 /* overflow only if r1 = -0x80000000 */
3902 temp
= tcg_const_i32(-0x80000000);
3904 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3905 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3907 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3909 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3911 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3912 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3914 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3915 tcg_temp_free(temp
);
3917 case OPC2_16_SR_SAT_B
:
3918 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3920 case OPC2_16_SR_SAT_BU
:
3921 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3923 case OPC2_16_SR_SAT_H
:
3924 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3926 case OPC2_16_SR_SAT_HU
:
3927 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3932 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3940 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3942 /* handle ADDSC.A opcode only being 6 bit long */
3943 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3944 op1
= OPC1_16_SRRS_ADDSC_A
;
3948 case OPC1_16_SRC_ADD
:
3949 case OPC1_16_SRC_ADD_A15
:
3950 case OPC1_16_SRC_ADD_15A
:
3951 case OPC1_16_SRC_ADD_A
:
3952 case OPC1_16_SRC_CADD
:
3953 case OPC1_16_SRC_CADDN
:
3954 case OPC1_16_SRC_CMOV
:
3955 case OPC1_16_SRC_CMOVN
:
3956 case OPC1_16_SRC_EQ
:
3957 case OPC1_16_SRC_LT
:
3958 case OPC1_16_SRC_MOV
:
3959 case OPC1_16_SRC_MOV_A
:
3960 case OPC1_16_SRC_MOV_E
:
3961 case OPC1_16_SRC_SH
:
3962 case OPC1_16_SRC_SHA
:
3963 decode_src_opc(env
, ctx
, op1
);
3966 case OPC1_16_SRR_ADD
:
3967 case OPC1_16_SRR_ADD_A15
:
3968 case OPC1_16_SRR_ADD_15A
:
3969 case OPC1_16_SRR_ADD_A
:
3970 case OPC1_16_SRR_ADDS
:
3971 case OPC1_16_SRR_AND
:
3972 case OPC1_16_SRR_CMOV
:
3973 case OPC1_16_SRR_CMOVN
:
3974 case OPC1_16_SRR_EQ
:
3975 case OPC1_16_SRR_LT
:
3976 case OPC1_16_SRR_MOV
:
3977 case OPC1_16_SRR_MOV_A
:
3978 case OPC1_16_SRR_MOV_AA
:
3979 case OPC1_16_SRR_MOV_D
:
3980 case OPC1_16_SRR_MUL
:
3981 case OPC1_16_SRR_OR
:
3982 case OPC1_16_SRR_SUB
:
3983 case OPC1_16_SRR_SUB_A15B
:
3984 case OPC1_16_SRR_SUB_15AB
:
3985 case OPC1_16_SRR_SUBS
:
3986 case OPC1_16_SRR_XOR
:
3987 decode_srr_opc(ctx
, op1
);
3990 case OPC1_16_SSR_ST_A
:
3991 case OPC1_16_SSR_ST_A_POSTINC
:
3992 case OPC1_16_SSR_ST_B
:
3993 case OPC1_16_SSR_ST_B_POSTINC
:
3994 case OPC1_16_SSR_ST_H
:
3995 case OPC1_16_SSR_ST_H_POSTINC
:
3996 case OPC1_16_SSR_ST_W
:
3997 case OPC1_16_SSR_ST_W_POSTINC
:
3998 decode_ssr_opc(ctx
, op1
);
4001 case OPC1_16_SRRS_ADDSC_A
:
4002 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
4003 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
4004 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
4005 temp
= tcg_temp_new();
4006 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
4007 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
4008 tcg_temp_free(temp
);
4011 case OPC1_16_SLRO_LD_A
:
4012 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4013 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4014 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4016 case OPC1_16_SLRO_LD_BU
:
4017 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4018 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4019 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4021 case OPC1_16_SLRO_LD_H
:
4022 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4023 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4024 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4026 case OPC1_16_SLRO_LD_W
:
4027 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4028 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4029 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4032 case OPC1_16_SB_CALL
:
4034 case OPC1_16_SB_JNZ
:
4036 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
4037 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
4040 case OPC1_16_SBC_JEQ
:
4041 case OPC1_16_SBC_JNE
:
4042 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
4043 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
4044 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4047 case OPC1_16_SBRN_JNZ_T
:
4048 case OPC1_16_SBRN_JZ_T
:
4049 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
4050 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
4051 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4054 case OPC1_16_SBR_JEQ
:
4055 case OPC1_16_SBR_JGEZ
:
4056 case OPC1_16_SBR_JGTZ
:
4057 case OPC1_16_SBR_JLEZ
:
4058 case OPC1_16_SBR_JLTZ
:
4059 case OPC1_16_SBR_JNE
:
4060 case OPC1_16_SBR_JNZ
:
4061 case OPC1_16_SBR_JNZ_A
:
4062 case OPC1_16_SBR_JZ
:
4063 case OPC1_16_SBR_JZ_A
:
4064 case OPC1_16_SBR_LOOP
:
4065 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
4066 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
4067 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
4070 case OPC1_16_SC_AND
:
4071 case OPC1_16_SC_BISR
:
4072 case OPC1_16_SC_LD_A
:
4073 case OPC1_16_SC_LD_W
:
4074 case OPC1_16_SC_MOV
:
4076 case OPC1_16_SC_ST_A
:
4077 case OPC1_16_SC_ST_W
:
4078 case OPC1_16_SC_SUB_A
:
4079 decode_sc_opc(ctx
, op1
);
4082 case OPC1_16_SLR_LD_A
:
4083 case OPC1_16_SLR_LD_A_POSTINC
:
4084 case OPC1_16_SLR_LD_BU
:
4085 case OPC1_16_SLR_LD_BU_POSTINC
:
4086 case OPC1_16_SLR_LD_H
:
4087 case OPC1_16_SLR_LD_H_POSTINC
:
4088 case OPC1_16_SLR_LD_W
:
4089 case OPC1_16_SLR_LD_W_POSTINC
:
4090 decode_slr_opc(ctx
, op1
);
4093 case OPC1_16_SRO_LD_A
:
4094 case OPC1_16_SRO_LD_BU
:
4095 case OPC1_16_SRO_LD_H
:
4096 case OPC1_16_SRO_LD_W
:
4097 case OPC1_16_SRO_ST_A
:
4098 case OPC1_16_SRO_ST_B
:
4099 case OPC1_16_SRO_ST_H
:
4100 case OPC1_16_SRO_ST_W
:
4101 decode_sro_opc(ctx
, op1
);
4104 case OPC1_16_SSRO_ST_A
:
4105 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4106 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4107 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4109 case OPC1_16_SSRO_ST_B
:
4110 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4111 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4112 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4114 case OPC1_16_SSRO_ST_H
:
4115 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4116 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4117 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4119 case OPC1_16_SSRO_ST_W
:
4120 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4121 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4122 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4125 case OPCM_16_SR_SYSTEM
:
4126 decode_sr_system(env
, ctx
);
4128 case OPCM_16_SR_ACCU
:
4129 decode_sr_accu(env
, ctx
);
4132 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4133 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
4135 case OPC1_16_SR_NOT
:
4136 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4137 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
4143 * 32 bit instructions
4147 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
4154 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4155 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4156 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4158 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4161 case OPC2_32_ABS_LD_A
:
4162 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4164 case OPC2_32_ABS_LD_D
:
4165 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4167 case OPC2_32_ABS_LD_DA
:
4168 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4170 case OPC2_32_ABS_LD_W
:
4171 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4175 tcg_temp_free(temp
);
4178 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
4185 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4186 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4187 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4189 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4192 case OPC2_32_ABS_LD_B
:
4193 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
4195 case OPC2_32_ABS_LD_BU
:
4196 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4198 case OPC2_32_ABS_LD_H
:
4199 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
4201 case OPC2_32_ABS_LD_HU
:
4202 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4206 tcg_temp_free(temp
);
4209 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
4216 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4217 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4218 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4220 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4223 case OPC2_32_ABS_LDMST
:
4224 gen_ldmst(ctx
, r1
, temp
);
4226 case OPC2_32_ABS_SWAP_W
:
4227 gen_swap(ctx
, r1
, temp
);
4231 tcg_temp_free(temp
);
4234 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
4239 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4240 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4243 case OPC2_32_ABS_LDLCX
:
4244 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
4246 case OPC2_32_ABS_LDUCX
:
4247 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
4249 case OPC2_32_ABS_STLCX
:
4250 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
4252 case OPC2_32_ABS_STUCX
:
4253 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
4258 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
4265 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4266 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4267 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4269 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4272 case OPC2_32_ABS_ST_A
:
4273 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4275 case OPC2_32_ABS_ST_D
:
4276 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4278 case OPC2_32_ABS_ST_DA
:
4279 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4281 case OPC2_32_ABS_ST_W
:
4282 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4286 tcg_temp_free(temp
);
4289 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
4296 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4297 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4298 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4300 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4303 case OPC2_32_ABS_ST_B
:
4304 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4306 case OPC2_32_ABS_ST_H
:
4307 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4310 tcg_temp_free(temp
);
4315 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4321 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4322 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4323 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4324 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4325 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4326 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4330 case OPC2_32_BIT_AND_AND_T
:
4331 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4332 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
4334 case OPC2_32_BIT_AND_ANDN_T
:
4335 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4336 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
4338 case OPC2_32_BIT_AND_NOR_T
:
4339 if (TCG_TARGET_HAS_andc_i32
) {
4340 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4341 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
4343 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4344 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4347 case OPC2_32_BIT_AND_OR_T
:
4348 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4349 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4354 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
4359 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4360 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4361 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4362 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4363 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4364 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4367 case OPC2_32_BIT_AND_T
:
4368 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4369 pos1
, pos2
, &tcg_gen_and_tl
);
4371 case OPC2_32_BIT_ANDN_T
:
4372 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4373 pos1
, pos2
, &tcg_gen_andc_tl
);
4375 case OPC2_32_BIT_NOR_T
:
4376 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4377 pos1
, pos2
, &tcg_gen_nor_tl
);
4379 case OPC2_32_BIT_OR_T
:
4380 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4381 pos1
, pos2
, &tcg_gen_or_tl
);
4386 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4392 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4393 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4394 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4395 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4396 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4397 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4399 temp
= tcg_temp_new();
4401 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4402 if (op2
== OPC2_32_BIT_INSN_T
) {
4403 tcg_gen_not_tl(temp
, temp
);
4405 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4406 tcg_temp_free(temp
);
4409 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4416 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4417 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4418 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4419 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4420 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4421 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4424 case OPC2_32_BIT_NAND_T
:
4425 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4426 pos1
, pos2
, &tcg_gen_nand_tl
);
4428 case OPC2_32_BIT_ORN_T
:
4429 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4430 pos1
, pos2
, &tcg_gen_orc_tl
);
4432 case OPC2_32_BIT_XNOR_T
:
4433 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4434 pos1
, pos2
, &tcg_gen_eqv_tl
);
4436 case OPC2_32_BIT_XOR_T
:
4437 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4438 pos1
, pos2
, &tcg_gen_xor_tl
);
4443 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
4450 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4451 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4452 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4453 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4454 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4455 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4458 case OPC2_32_BIT_OR_AND_T
:
4459 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4460 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4462 case OPC2_32_BIT_OR_ANDN_T
:
4463 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4464 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4466 case OPC2_32_BIT_OR_NOR_T
:
4467 if (TCG_TARGET_HAS_orc_i32
) {
4468 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4469 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4471 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4472 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4475 case OPC2_32_BIT_OR_OR_T
:
4476 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4477 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4482 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
4489 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4490 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4491 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4492 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4493 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4494 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4496 temp
= tcg_temp_new();
4499 case OPC2_32_BIT_SH_AND_T
:
4500 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4501 pos1
, pos2
, &tcg_gen_and_tl
);
4503 case OPC2_32_BIT_SH_ANDN_T
:
4504 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4505 pos1
, pos2
, &tcg_gen_andc_tl
);
4507 case OPC2_32_BIT_SH_NOR_T
:
4508 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4509 pos1
, pos2
, &tcg_gen_nor_tl
);
4511 case OPC2_32_BIT_SH_OR_T
:
4512 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4513 pos1
, pos2
, &tcg_gen_or_tl
);
4516 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4517 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4518 tcg_temp_free(temp
);
4521 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4528 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4529 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4530 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4531 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4532 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4533 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4535 temp
= tcg_temp_new();
4538 case OPC2_32_BIT_SH_NAND_T
:
4539 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4540 pos1
, pos2
, &tcg_gen_nand_tl
);
4542 case OPC2_32_BIT_SH_ORN_T
:
4543 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4544 pos1
, pos2
, &tcg_gen_orc_tl
);
4546 case OPC2_32_BIT_SH_XNOR_T
:
4547 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4548 pos1
, pos2
, &tcg_gen_eqv_tl
);
4550 case OPC2_32_BIT_SH_XOR_T
:
4551 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4552 pos1
, pos2
, &tcg_gen_xor_tl
);
4555 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4556 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4557 tcg_temp_free(temp
);
4563 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4571 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4572 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4573 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4574 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4577 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4578 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4579 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4580 /* instruction to access the cache */
4582 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4583 case OPC2_32_BO_CACHEA_W_POSTINC
:
4584 case OPC2_32_BO_CACHEA_I_POSTINC
:
4585 /* instruction to access the cache, but we still need to handle
4586 the addressing mode */
4587 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4589 case OPC2_32_BO_CACHEA_WI_PREINC
:
4590 case OPC2_32_BO_CACHEA_W_PREINC
:
4591 case OPC2_32_BO_CACHEA_I_PREINC
:
4592 /* instruction to access the cache, but we still need to handle
4593 the addressing mode */
4594 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4596 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4597 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4598 /* TODO: Raise illegal opcode trap,
4599 if !tricore_feature(TRICORE_FEATURE_131) */
4601 case OPC2_32_BO_CACHEI_W_POSTINC
:
4602 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4603 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4604 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4605 } /* TODO: else raise illegal opcode trap */
4607 case OPC2_32_BO_CACHEI_W_PREINC
:
4608 case OPC2_32_BO_CACHEI_WI_PREINC
:
4609 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4610 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4611 } /* TODO: else raise illegal opcode trap */
4613 case OPC2_32_BO_ST_A_SHORTOFF
:
4614 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4616 case OPC2_32_BO_ST_A_POSTINC
:
4617 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4619 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4621 case OPC2_32_BO_ST_A_PREINC
:
4622 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4624 case OPC2_32_BO_ST_B_SHORTOFF
:
4625 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4627 case OPC2_32_BO_ST_B_POSTINC
:
4628 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4630 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4632 case OPC2_32_BO_ST_B_PREINC
:
4633 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4635 case OPC2_32_BO_ST_D_SHORTOFF
:
4636 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4639 case OPC2_32_BO_ST_D_POSTINC
:
4640 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4641 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4643 case OPC2_32_BO_ST_D_PREINC
:
4644 temp
= tcg_temp_new();
4645 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4646 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4647 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4648 tcg_temp_free(temp
);
4650 case OPC2_32_BO_ST_DA_SHORTOFF
:
4651 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4654 case OPC2_32_BO_ST_DA_POSTINC
:
4655 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4656 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4658 case OPC2_32_BO_ST_DA_PREINC
:
4659 temp
= tcg_temp_new();
4660 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4661 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4662 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4663 tcg_temp_free(temp
);
4665 case OPC2_32_BO_ST_H_SHORTOFF
:
4666 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4668 case OPC2_32_BO_ST_H_POSTINC
:
4669 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4671 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4673 case OPC2_32_BO_ST_H_PREINC
:
4674 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4676 case OPC2_32_BO_ST_Q_SHORTOFF
:
4677 temp
= tcg_temp_new();
4678 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4679 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4680 tcg_temp_free(temp
);
4682 case OPC2_32_BO_ST_Q_POSTINC
:
4683 temp
= tcg_temp_new();
4684 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4685 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4687 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4688 tcg_temp_free(temp
);
4690 case OPC2_32_BO_ST_Q_PREINC
:
4691 temp
= tcg_temp_new();
4692 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4693 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4694 tcg_temp_free(temp
);
4696 case OPC2_32_BO_ST_W_SHORTOFF
:
4697 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4699 case OPC2_32_BO_ST_W_POSTINC
:
4700 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4702 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4704 case OPC2_32_BO_ST_W_PREINC
:
4705 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4710 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4716 TCGv temp
, temp2
, temp3
;
4718 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4719 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4720 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4721 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4723 temp
= tcg_temp_new();
4724 temp2
= tcg_temp_new();
4725 temp3
= tcg_const_i32(off10
);
4727 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4728 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4731 case OPC2_32_BO_CACHEA_WI_BR
:
4732 case OPC2_32_BO_CACHEA_W_BR
:
4733 case OPC2_32_BO_CACHEA_I_BR
:
4734 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4736 case OPC2_32_BO_CACHEA_WI_CIRC
:
4737 case OPC2_32_BO_CACHEA_W_CIRC
:
4738 case OPC2_32_BO_CACHEA_I_CIRC
:
4739 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4741 case OPC2_32_BO_ST_A_BR
:
4742 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4743 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4745 case OPC2_32_BO_ST_A_CIRC
:
4746 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4747 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4749 case OPC2_32_BO_ST_B_BR
:
4750 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4751 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4753 case OPC2_32_BO_ST_B_CIRC
:
4754 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4755 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4757 case OPC2_32_BO_ST_D_BR
:
4758 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4759 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4761 case OPC2_32_BO_ST_D_CIRC
:
4762 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4763 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4764 tcg_gen_addi_tl(temp
, temp
, 4);
4765 tcg_gen_rem_tl(temp
, temp
, temp2
);
4766 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4767 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4768 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4770 case OPC2_32_BO_ST_DA_BR
:
4771 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4772 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4774 case OPC2_32_BO_ST_DA_CIRC
:
4775 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4776 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4777 tcg_gen_addi_tl(temp
, temp
, 4);
4778 tcg_gen_rem_tl(temp
, temp
, temp2
);
4779 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4780 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4781 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4783 case OPC2_32_BO_ST_H_BR
:
4784 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4785 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4787 case OPC2_32_BO_ST_H_CIRC
:
4788 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4789 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4791 case OPC2_32_BO_ST_Q_BR
:
4792 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4793 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4794 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4796 case OPC2_32_BO_ST_Q_CIRC
:
4797 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4798 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4799 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4801 case OPC2_32_BO_ST_W_BR
:
4802 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4803 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4805 case OPC2_32_BO_ST_W_CIRC
:
4806 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4807 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4810 tcg_temp_free(temp
);
4811 tcg_temp_free(temp2
);
4812 tcg_temp_free(temp3
);
4815 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4823 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4824 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4825 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4826 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4829 case OPC2_32_BO_LD_A_SHORTOFF
:
4830 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4832 case OPC2_32_BO_LD_A_POSTINC
:
4833 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4835 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4837 case OPC2_32_BO_LD_A_PREINC
:
4838 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4840 case OPC2_32_BO_LD_B_SHORTOFF
:
4841 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4843 case OPC2_32_BO_LD_B_POSTINC
:
4844 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4846 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4848 case OPC2_32_BO_LD_B_PREINC
:
4849 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4851 case OPC2_32_BO_LD_BU_SHORTOFF
:
4852 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4854 case OPC2_32_BO_LD_BU_POSTINC
:
4855 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4857 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4859 case OPC2_32_BO_LD_BU_PREINC
:
4860 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4862 case OPC2_32_BO_LD_D_SHORTOFF
:
4863 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4866 case OPC2_32_BO_LD_D_POSTINC
:
4867 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4868 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4870 case OPC2_32_BO_LD_D_PREINC
:
4871 temp
= tcg_temp_new();
4872 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4873 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4874 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4875 tcg_temp_free(temp
);
4877 case OPC2_32_BO_LD_DA_SHORTOFF
:
4878 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4881 case OPC2_32_BO_LD_DA_POSTINC
:
4882 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4883 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4885 case OPC2_32_BO_LD_DA_PREINC
:
4886 temp
= tcg_temp_new();
4887 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4888 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4889 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4890 tcg_temp_free(temp
);
4892 case OPC2_32_BO_LD_H_SHORTOFF
:
4893 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4895 case OPC2_32_BO_LD_H_POSTINC
:
4896 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4898 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4900 case OPC2_32_BO_LD_H_PREINC
:
4901 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4903 case OPC2_32_BO_LD_HU_SHORTOFF
:
4904 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4906 case OPC2_32_BO_LD_HU_POSTINC
:
4907 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4909 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4911 case OPC2_32_BO_LD_HU_PREINC
:
4912 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4914 case OPC2_32_BO_LD_Q_SHORTOFF
:
4915 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4916 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4918 case OPC2_32_BO_LD_Q_POSTINC
:
4919 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4921 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4922 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4924 case OPC2_32_BO_LD_Q_PREINC
:
4925 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4926 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4928 case OPC2_32_BO_LD_W_SHORTOFF
:
4929 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4931 case OPC2_32_BO_LD_W_POSTINC
:
4932 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4934 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4936 case OPC2_32_BO_LD_W_PREINC
:
4937 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4942 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4949 TCGv temp
, temp2
, temp3
;
4951 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4952 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4953 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4954 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4956 temp
= tcg_temp_new();
4957 temp2
= tcg_temp_new();
4958 temp3
= tcg_const_i32(off10
);
4960 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4961 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4965 case OPC2_32_BO_LD_A_BR
:
4966 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4967 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4969 case OPC2_32_BO_LD_A_CIRC
:
4970 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4971 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4973 case OPC2_32_BO_LD_B_BR
:
4974 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4975 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4977 case OPC2_32_BO_LD_B_CIRC
:
4978 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4979 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4981 case OPC2_32_BO_LD_BU_BR
:
4982 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4983 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4985 case OPC2_32_BO_LD_BU_CIRC
:
4986 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4987 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4989 case OPC2_32_BO_LD_D_BR
:
4990 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4991 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4993 case OPC2_32_BO_LD_D_CIRC
:
4994 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4995 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4996 tcg_gen_addi_tl(temp
, temp
, 4);
4997 tcg_gen_rem_tl(temp
, temp
, temp2
);
4998 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4999 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5000 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5002 case OPC2_32_BO_LD_DA_BR
:
5003 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
5004 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5006 case OPC2_32_BO_LD_DA_CIRC
:
5007 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5008 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
5009 tcg_gen_addi_tl(temp
, temp
, 4);
5010 tcg_gen_rem_tl(temp
, temp
, temp2
);
5011 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5012 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5013 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5015 case OPC2_32_BO_LD_H_BR
:
5016 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5017 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5019 case OPC2_32_BO_LD_H_CIRC
:
5020 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5021 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5023 case OPC2_32_BO_LD_HU_BR
:
5024 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5025 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5027 case OPC2_32_BO_LD_HU_CIRC
:
5028 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5029 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5031 case OPC2_32_BO_LD_Q_BR
:
5032 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5033 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5034 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5036 case OPC2_32_BO_LD_Q_CIRC
:
5037 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5038 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5039 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5041 case OPC2_32_BO_LD_W_BR
:
5042 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5043 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5045 case OPC2_32_BO_LD_W_CIRC
:
5046 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5047 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5050 tcg_temp_free(temp
);
5051 tcg_temp_free(temp2
);
5052 tcg_temp_free(temp3
);
5055 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
5064 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5065 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5066 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5067 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5070 temp
= tcg_temp_new();
5071 temp2
= tcg_temp_new();
5074 case OPC2_32_BO_LDLCX_SHORTOFF
:
5075 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5076 gen_helper_ldlcx(cpu_env
, temp
);
5078 case OPC2_32_BO_LDMST_SHORTOFF
:
5079 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5080 gen_ldmst(ctx
, r1
, temp
);
5082 case OPC2_32_BO_LDMST_POSTINC
:
5083 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5084 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5086 case OPC2_32_BO_LDMST_PREINC
:
5087 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5088 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5090 case OPC2_32_BO_LDUCX_SHORTOFF
:
5091 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5092 gen_helper_lducx(cpu_env
, temp
);
5094 case OPC2_32_BO_LEA_SHORTOFF
:
5095 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
5097 case OPC2_32_BO_STLCX_SHORTOFF
:
5098 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5099 gen_helper_stlcx(cpu_env
, temp
);
5101 case OPC2_32_BO_STUCX_SHORTOFF
:
5102 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5103 gen_helper_stucx(cpu_env
, temp
);
5105 case OPC2_32_BO_SWAP_W_SHORTOFF
:
5106 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5107 gen_swap(ctx
, r1
, temp
);
5109 case OPC2_32_BO_SWAP_W_POSTINC
:
5110 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5111 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5113 case OPC2_32_BO_SWAP_W_PREINC
:
5114 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5115 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5117 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
5118 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5119 gen_cmpswap(ctx
, r1
, temp
);
5121 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
5122 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5123 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5125 case OPC2_32_BO_CMPSWAP_W_PREINC
:
5126 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5127 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5129 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
5130 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5131 gen_swapmsk(ctx
, r1
, temp
);
5133 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
5134 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5135 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5137 case OPC2_32_BO_SWAPMSK_W_PREINC
:
5138 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5139 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5142 tcg_temp_free(temp
);
5143 tcg_temp_free(temp2
);
5146 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
5153 TCGv temp
, temp2
, temp3
;
5155 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5156 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5157 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5158 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5160 temp
= tcg_temp_new();
5161 temp2
= tcg_temp_new();
5162 temp3
= tcg_const_i32(off10
);
5164 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
5165 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5168 case OPC2_32_BO_LDMST_BR
:
5169 gen_ldmst(ctx
, r1
, temp2
);
5170 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5172 case OPC2_32_BO_LDMST_CIRC
:
5173 gen_ldmst(ctx
, r1
, temp2
);
5174 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5176 case OPC2_32_BO_SWAP_W_BR
:
5177 gen_swap(ctx
, r1
, temp2
);
5178 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5180 case OPC2_32_BO_SWAP_W_CIRC
:
5181 gen_swap(ctx
, r1
, temp2
);
5182 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5184 case OPC2_32_BO_CMPSWAP_W_BR
:
5185 gen_cmpswap(ctx
, r1
, temp2
);
5186 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5188 case OPC2_32_BO_CMPSWAP_W_CIRC
:
5189 gen_cmpswap(ctx
, r1
, temp2
);
5190 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5192 case OPC2_32_BO_SWAPMSK_W_BR
:
5193 gen_swapmsk(ctx
, r1
, temp2
);
5194 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5196 case OPC2_32_BO_SWAPMSK_W_CIRC
:
5197 gen_swapmsk(ctx
, r1
, temp2
);
5198 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5202 tcg_temp_free(temp
);
5203 tcg_temp_free(temp2
);
5204 tcg_temp_free(temp3
);
5207 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
5213 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
5214 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
5215 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
5218 case OPC1_32_BOL_LD_A_LONGOFF
:
5219 temp
= tcg_temp_new();
5220 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5221 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5222 tcg_temp_free(temp
);
5224 case OPC1_32_BOL_LD_W_LONGOFF
:
5225 temp
= tcg_temp_new();
5226 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5227 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5228 tcg_temp_free(temp
);
5230 case OPC1_32_BOL_LEA_LONGOFF
:
5231 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
5233 case OPC1_32_BOL_ST_A_LONGOFF
:
5234 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5235 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5237 /* raise illegal opcode trap */
5240 case OPC1_32_BOL_ST_W_LONGOFF
:
5241 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5243 case OPC1_32_BOL_LD_B_LONGOFF
:
5244 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5245 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5247 /* raise illegal opcode trap */
5250 case OPC1_32_BOL_LD_BU_LONGOFF
:
5251 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5252 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
5254 /* raise illegal opcode trap */
5257 case OPC1_32_BOL_LD_H_LONGOFF
:
5258 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5259 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5261 /* raise illegal opcode trap */
5264 case OPC1_32_BOL_LD_HU_LONGOFF
:
5265 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5266 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
5268 /* raise illegal opcode trap */
5271 case OPC1_32_BOL_ST_B_LONGOFF
:
5272 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5273 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5275 /* raise illegal opcode trap */
5278 case OPC1_32_BOL_ST_H_LONGOFF
:
5279 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5280 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5282 /* raise illegal opcode trap */
5289 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5296 r2
= MASK_OP_RC_D(ctx
->opcode
);
5297 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5298 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5299 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5301 temp
= tcg_temp_new();
5304 case OPC2_32_RC_AND
:
5305 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5307 case OPC2_32_RC_ANDN
:
5308 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5310 case OPC2_32_RC_NAND
:
5311 tcg_gen_movi_tl(temp
, const9
);
5312 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5314 case OPC2_32_RC_NOR
:
5315 tcg_gen_movi_tl(temp
, const9
);
5316 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5319 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5321 case OPC2_32_RC_ORN
:
5322 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5325 const9
= sextract32(const9
, 0, 6);
5326 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5328 case OPC2_32_RC_SH_H
:
5329 const9
= sextract32(const9
, 0, 5);
5330 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5332 case OPC2_32_RC_SHA
:
5333 const9
= sextract32(const9
, 0, 6);
5334 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5336 case OPC2_32_RC_SHA_H
:
5337 const9
= sextract32(const9
, 0, 5);
5338 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5340 case OPC2_32_RC_SHAS
:
5341 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5343 case OPC2_32_RC_XNOR
:
5344 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5345 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5347 case OPC2_32_RC_XOR
:
5348 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5351 tcg_temp_free(temp
);
5354 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5362 r2
= MASK_OP_RC_D(ctx
->opcode
);
5363 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5364 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5366 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5368 temp
= tcg_temp_new();
5371 case OPC2_32_RC_ABSDIF
:
5372 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5374 case OPC2_32_RC_ABSDIFS
:
5375 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5377 case OPC2_32_RC_ADD
:
5378 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5380 case OPC2_32_RC_ADDC
:
5381 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5383 case OPC2_32_RC_ADDS
:
5384 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5386 case OPC2_32_RC_ADDS_U
:
5387 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5389 case OPC2_32_RC_ADDX
:
5390 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5392 case OPC2_32_RC_AND_EQ
:
5393 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5394 const9
, &tcg_gen_and_tl
);
5396 case OPC2_32_RC_AND_GE
:
5397 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5398 const9
, &tcg_gen_and_tl
);
5400 case OPC2_32_RC_AND_GE_U
:
5401 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5402 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5403 const9
, &tcg_gen_and_tl
);
5405 case OPC2_32_RC_AND_LT
:
5406 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5407 const9
, &tcg_gen_and_tl
);
5409 case OPC2_32_RC_AND_LT_U
:
5410 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5411 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5412 const9
, &tcg_gen_and_tl
);
5414 case OPC2_32_RC_AND_NE
:
5415 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5416 const9
, &tcg_gen_and_tl
);
5419 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5421 case OPC2_32_RC_EQANY_B
:
5422 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5424 case OPC2_32_RC_EQANY_H
:
5425 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5428 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5430 case OPC2_32_RC_GE_U
:
5431 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5432 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5435 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5437 case OPC2_32_RC_LT_U
:
5438 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5439 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5441 case OPC2_32_RC_MAX
:
5442 tcg_gen_movi_tl(temp
, const9
);
5443 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5444 cpu_gpr_d
[r1
], temp
);
5446 case OPC2_32_RC_MAX_U
:
5447 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5448 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5449 cpu_gpr_d
[r1
], temp
);
5451 case OPC2_32_RC_MIN
:
5452 tcg_gen_movi_tl(temp
, const9
);
5453 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5454 cpu_gpr_d
[r1
], temp
);
5456 case OPC2_32_RC_MIN_U
:
5457 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5458 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5459 cpu_gpr_d
[r1
], temp
);
5462 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5464 case OPC2_32_RC_OR_EQ
:
5465 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5466 const9
, &tcg_gen_or_tl
);
5468 case OPC2_32_RC_OR_GE
:
5469 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5470 const9
, &tcg_gen_or_tl
);
5472 case OPC2_32_RC_OR_GE_U
:
5473 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5474 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5475 const9
, &tcg_gen_or_tl
);
5477 case OPC2_32_RC_OR_LT
:
5478 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5479 const9
, &tcg_gen_or_tl
);
5481 case OPC2_32_RC_OR_LT_U
:
5482 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5483 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5484 const9
, &tcg_gen_or_tl
);
5486 case OPC2_32_RC_OR_NE
:
5487 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5488 const9
, &tcg_gen_or_tl
);
5490 case OPC2_32_RC_RSUB
:
5491 tcg_gen_movi_tl(temp
, const9
);
5492 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5494 case OPC2_32_RC_RSUBS
:
5495 tcg_gen_movi_tl(temp
, const9
);
5496 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5498 case OPC2_32_RC_RSUBS_U
:
5499 tcg_gen_movi_tl(temp
, const9
);
5500 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5502 case OPC2_32_RC_SH_EQ
:
5503 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5505 case OPC2_32_RC_SH_GE
:
5506 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5508 case OPC2_32_RC_SH_GE_U
:
5509 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5510 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5512 case OPC2_32_RC_SH_LT
:
5513 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5515 case OPC2_32_RC_SH_LT_U
:
5516 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5517 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5519 case OPC2_32_RC_SH_NE
:
5520 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5522 case OPC2_32_RC_XOR_EQ
:
5523 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5524 const9
, &tcg_gen_xor_tl
);
5526 case OPC2_32_RC_XOR_GE
:
5527 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5528 const9
, &tcg_gen_xor_tl
);
5530 case OPC2_32_RC_XOR_GE_U
:
5531 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5532 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5533 const9
, &tcg_gen_xor_tl
);
5535 case OPC2_32_RC_XOR_LT
:
5536 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5537 const9
, &tcg_gen_xor_tl
);
5539 case OPC2_32_RC_XOR_LT_U
:
5540 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5541 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5542 const9
, &tcg_gen_xor_tl
);
5544 case OPC2_32_RC_XOR_NE
:
5545 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5546 const9
, &tcg_gen_xor_tl
);
5549 tcg_temp_free(temp
);
5552 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
5557 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5558 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5561 case OPC2_32_RC_BISR
:
5562 gen_helper_1arg(bisr
, const9
);
5564 case OPC2_32_RC_SYSCALL
:
5565 /* TODO: Add exception generation */
5570 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5576 r2
= MASK_OP_RC_D(ctx
->opcode
);
5577 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5578 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5580 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5583 case OPC2_32_RC_MUL_32
:
5584 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5586 case OPC2_32_RC_MUL_64
:
5587 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5589 case OPC2_32_RC_MULS_32
:
5590 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5592 case OPC2_32_RC_MUL_U_64
:
5593 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5594 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5596 case OPC2_32_RC_MULS_U_32
:
5597 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5598 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5604 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5608 int32_t pos
, width
, const4
;
5612 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5613 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5614 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5615 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5616 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5617 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5620 case OPC2_32_RCPW_IMASK
:
5621 /* if pos + width > 31 undefined result */
5622 if (pos
+ width
<= 31) {
5623 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5624 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5627 case OPC2_32_RCPW_INSERT
:
5628 /* if pos + width > 32 undefined result */
5629 if (pos
+ width
<= 32) {
5630 temp
= tcg_const_i32(const4
);
5631 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5632 tcg_temp_free(temp
);
5640 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5644 int32_t width
, const4
;
5646 TCGv temp
, temp2
, temp3
;
5648 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5649 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5650 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5651 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5652 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5653 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5655 temp
= tcg_temp_new();
5656 temp2
= tcg_temp_new();
5659 case OPC2_32_RCRW_IMASK
:
5660 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5661 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5662 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5663 tcg_gen_movi_tl(temp2
, const4
);
5664 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5666 case OPC2_32_RCRW_INSERT
:
5667 temp3
= tcg_temp_new();
5669 tcg_gen_movi_tl(temp
, width
);
5670 tcg_gen_movi_tl(temp2
, const4
);
5671 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5672 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5674 tcg_temp_free(temp3
);
5677 tcg_temp_free(temp
);
5678 tcg_temp_free(temp2
);
5683 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5691 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5692 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5693 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5694 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5695 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5698 case OPC2_32_RCR_CADD
:
5699 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5702 case OPC2_32_RCR_CADDN
:
5703 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5706 case OPC2_32_RCR_SEL
:
5707 temp
= tcg_const_i32(0);
5708 temp2
= tcg_const_i32(const9
);
5709 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5710 cpu_gpr_d
[r1
], temp2
);
5711 tcg_temp_free(temp
);
5712 tcg_temp_free(temp2
);
5714 case OPC2_32_RCR_SELN
:
5715 temp
= tcg_const_i32(0);
5716 temp2
= tcg_const_i32(const9
);
5717 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5718 cpu_gpr_d
[r1
], temp2
);
5719 tcg_temp_free(temp
);
5720 tcg_temp_free(temp2
);
5725 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5732 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5733 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5734 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5735 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5736 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5739 case OPC2_32_RCR_MADD_32
:
5740 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5742 case OPC2_32_RCR_MADD_64
:
5743 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5744 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5746 case OPC2_32_RCR_MADDS_32
:
5747 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5749 case OPC2_32_RCR_MADDS_64
:
5750 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5751 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5753 case OPC2_32_RCR_MADD_U_64
:
5754 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5755 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5756 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5758 case OPC2_32_RCR_MADDS_U_32
:
5759 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5760 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5762 case OPC2_32_RCR_MADDS_U_64
:
5763 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5764 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5765 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5770 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5777 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5778 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5779 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5780 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5781 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5784 case OPC2_32_RCR_MSUB_32
:
5785 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5787 case OPC2_32_RCR_MSUB_64
:
5788 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5789 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5791 case OPC2_32_RCR_MSUBS_32
:
5792 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5794 case OPC2_32_RCR_MSUBS_64
:
5795 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5796 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5798 case OPC2_32_RCR_MSUB_U_64
:
5799 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5800 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5801 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5803 case OPC2_32_RCR_MSUBS_U_32
:
5804 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5805 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5807 case OPC2_32_RCR_MSUBS_U_64
:
5808 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5809 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5810 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5817 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5823 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5824 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5825 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5828 case OPC1_32_RLC_ADDI
:
5829 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5831 case OPC1_32_RLC_ADDIH
:
5832 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5834 case OPC1_32_RLC_ADDIH_A
:
5835 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5837 case OPC1_32_RLC_MFCR
:
5838 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5839 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5841 case OPC1_32_RLC_MOV
:
5842 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5844 case OPC1_32_RLC_MOV_64
:
5845 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5846 if ((r2
& 0x1) != 0) {
5847 /* TODO: raise OPD trap */
5849 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5850 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5852 /* TODO: raise illegal opcode trap */
5855 case OPC1_32_RLC_MOV_U
:
5856 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5857 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5859 case OPC1_32_RLC_MOV_H
:
5860 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5862 case OPC1_32_RLC_MOVH_A
:
5863 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5865 case OPC1_32_RLC_MTCR
:
5866 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5867 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5873 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5878 r3
= MASK_OP_RR_D(ctx
->opcode
);
5879 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5880 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5881 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5884 case OPC2_32_RR_ABS
:
5885 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5887 case OPC2_32_RR_ABS_B
:
5888 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5890 case OPC2_32_RR_ABS_H
:
5891 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5893 case OPC2_32_RR_ABSDIF
:
5894 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5896 case OPC2_32_RR_ABSDIF_B
:
5897 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5900 case OPC2_32_RR_ABSDIF_H
:
5901 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5904 case OPC2_32_RR_ABSDIFS
:
5905 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5908 case OPC2_32_RR_ABSDIFS_H
:
5909 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5912 case OPC2_32_RR_ABSS
:
5913 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5915 case OPC2_32_RR_ABSS_H
:
5916 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5918 case OPC2_32_RR_ADD
:
5919 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5921 case OPC2_32_RR_ADD_B
:
5922 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5924 case OPC2_32_RR_ADD_H
:
5925 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5927 case OPC2_32_RR_ADDC
:
5928 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5930 case OPC2_32_RR_ADDS
:
5931 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5933 case OPC2_32_RR_ADDS_H
:
5934 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5937 case OPC2_32_RR_ADDS_HU
:
5938 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5941 case OPC2_32_RR_ADDS_U
:
5942 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5945 case OPC2_32_RR_ADDX
:
5946 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5948 case OPC2_32_RR_AND_EQ
:
5949 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5950 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5952 case OPC2_32_RR_AND_GE
:
5953 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5954 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5956 case OPC2_32_RR_AND_GE_U
:
5957 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5958 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5960 case OPC2_32_RR_AND_LT
:
5961 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5962 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5964 case OPC2_32_RR_AND_LT_U
:
5965 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5966 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5968 case OPC2_32_RR_AND_NE
:
5969 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5970 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5973 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5976 case OPC2_32_RR_EQ_B
:
5977 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5979 case OPC2_32_RR_EQ_H
:
5980 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5982 case OPC2_32_RR_EQ_W
:
5983 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5985 case OPC2_32_RR_EQANY_B
:
5986 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5988 case OPC2_32_RR_EQANY_H
:
5989 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5992 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5995 case OPC2_32_RR_GE_U
:
5996 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6000 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6003 case OPC2_32_RR_LT_U
:
6004 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6007 case OPC2_32_RR_LT_B
:
6008 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6010 case OPC2_32_RR_LT_BU
:
6011 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6013 case OPC2_32_RR_LT_H
:
6014 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6016 case OPC2_32_RR_LT_HU
:
6017 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6019 case OPC2_32_RR_LT_W
:
6020 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6022 case OPC2_32_RR_LT_WU
:
6023 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6025 case OPC2_32_RR_MAX
:
6026 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6027 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6029 case OPC2_32_RR_MAX_U
:
6030 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6031 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6033 case OPC2_32_RR_MAX_B
:
6034 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6036 case OPC2_32_RR_MAX_BU
:
6037 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6039 case OPC2_32_RR_MAX_H
:
6040 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6042 case OPC2_32_RR_MAX_HU
:
6043 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6045 case OPC2_32_RR_MIN
:
6046 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6047 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6049 case OPC2_32_RR_MIN_U
:
6050 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6051 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6053 case OPC2_32_RR_MIN_B
:
6054 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6056 case OPC2_32_RR_MIN_BU
:
6057 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6059 case OPC2_32_RR_MIN_H
:
6060 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6062 case OPC2_32_RR_MIN_HU
:
6063 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6065 case OPC2_32_RR_MOV
:
6066 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6069 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6072 case OPC2_32_RR_OR_EQ
:
6073 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6074 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6076 case OPC2_32_RR_OR_GE
:
6077 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6078 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6080 case OPC2_32_RR_OR_GE_U
:
6081 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6082 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6084 case OPC2_32_RR_OR_LT
:
6085 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6086 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6088 case OPC2_32_RR_OR_LT_U
:
6089 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6090 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6092 case OPC2_32_RR_OR_NE
:
6093 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6094 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6096 case OPC2_32_RR_SAT_B
:
6097 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
6099 case OPC2_32_RR_SAT_BU
:
6100 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
6102 case OPC2_32_RR_SAT_H
:
6103 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
6105 case OPC2_32_RR_SAT_HU
:
6106 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
6108 case OPC2_32_RR_SH_EQ
:
6109 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6112 case OPC2_32_RR_SH_GE
:
6113 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6116 case OPC2_32_RR_SH_GE_U
:
6117 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6120 case OPC2_32_RR_SH_LT
:
6121 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6124 case OPC2_32_RR_SH_LT_U
:
6125 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6128 case OPC2_32_RR_SH_NE
:
6129 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6132 case OPC2_32_RR_SUB
:
6133 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6135 case OPC2_32_RR_SUB_B
:
6136 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6138 case OPC2_32_RR_SUB_H
:
6139 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6141 case OPC2_32_RR_SUBC
:
6142 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6144 case OPC2_32_RR_SUBS
:
6145 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6147 case OPC2_32_RR_SUBS_U
:
6148 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6150 case OPC2_32_RR_SUBS_H
:
6151 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6154 case OPC2_32_RR_SUBS_HU
:
6155 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6158 case OPC2_32_RR_SUBX
:
6159 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6161 case OPC2_32_RR_XOR_EQ
:
6162 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6163 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6165 case OPC2_32_RR_XOR_GE
:
6166 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6167 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6169 case OPC2_32_RR_XOR_GE_U
:
6170 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6171 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6173 case OPC2_32_RR_XOR_LT
:
6174 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6175 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6177 case OPC2_32_RR_XOR_LT_U
:
6178 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6179 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6181 case OPC2_32_RR_XOR_NE
:
6182 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6183 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6188 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
6194 r3
= MASK_OP_RR_D(ctx
->opcode
);
6195 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6196 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6198 temp
= tcg_temp_new();
6199 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6202 case OPC2_32_RR_AND
:
6203 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6205 case OPC2_32_RR_ANDN
:
6206 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6208 case OPC2_32_RR_CLO
:
6209 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6211 case OPC2_32_RR_CLO_H
:
6212 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6214 case OPC2_32_RR_CLS
:
6215 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6217 case OPC2_32_RR_CLS_H
:
6218 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6220 case OPC2_32_RR_CLZ
:
6221 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6223 case OPC2_32_RR_CLZ_H
:
6224 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6226 case OPC2_32_RR_NAND
:
6227 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6229 case OPC2_32_RR_NOR
:
6230 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6233 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6235 case OPC2_32_RR_ORN
:
6236 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6239 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6241 case OPC2_32_RR_SH_H
:
6242 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6244 case OPC2_32_RR_SHA
:
6245 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6247 case OPC2_32_RR_SHA_H
:
6248 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6250 case OPC2_32_RR_SHAS
:
6251 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6253 case OPC2_32_RR_XNOR
:
6254 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6256 case OPC2_32_RR_XOR
:
6257 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6260 tcg_temp_free(temp
);
6263 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
6269 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6270 r3
= MASK_OP_RR_D(ctx
->opcode
);
6271 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6272 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6273 n
= MASK_OP_RR_N(ctx
->opcode
);
6276 case OPC2_32_RR_ADD_A
:
6277 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6279 case OPC2_32_RR_ADDSC_A
:
6280 temp
= tcg_temp_new();
6281 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6282 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6283 tcg_temp_free(temp
);
6285 case OPC2_32_RR_ADDSC_AT
:
6286 temp
= tcg_temp_new();
6287 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6288 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6289 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6290 tcg_temp_free(temp
);
6292 case OPC2_32_RR_EQ_A
:
6293 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6296 case OPC2_32_RR_EQZ
:
6297 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6299 case OPC2_32_RR_GE_A
:
6300 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6303 case OPC2_32_RR_LT_A
:
6304 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6307 case OPC2_32_RR_MOV_A
:
6308 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6310 case OPC2_32_RR_MOV_AA
:
6311 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6313 case OPC2_32_RR_MOV_D
:
6314 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6316 case OPC2_32_RR_NE_A
:
6317 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6320 case OPC2_32_RR_NEZ_A
:
6321 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6323 case OPC2_32_RR_SUB_A
:
6324 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6329 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
6334 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6335 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6339 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6341 case OPC2_32_RR_JLI
:
6342 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
6343 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6345 case OPC2_32_RR_CALLI
:
6346 gen_helper_1arg(call
, ctx
->next_pc
);
6347 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6349 case OPC2_32_RR_FCALLI
:
6350 gen_fcall_save_ctx(ctx
);
6351 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6355 ctx
->bstate
= BS_BRANCH
;
6358 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6363 TCGv temp
, temp2
, temp3
;
6365 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6366 r3
= MASK_OP_RR_D(ctx
->opcode
);
6367 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6368 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6371 case OPC2_32_RR_BMERGE
:
6372 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6374 case OPC2_32_RR_BSPLIT
:
6375 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6377 case OPC2_32_RR_DVINIT_B
:
6378 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6381 case OPC2_32_RR_DVINIT_BU
:
6382 temp
= tcg_temp_new();
6383 temp2
= tcg_temp_new();
6384 temp3
= tcg_temp_new();
6386 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6388 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6389 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6390 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6391 tcg_gen_neg_tl(temp
, temp3
);
6392 /* use cpu_PSW_AV to compare against 0 */
6393 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6395 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6396 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6397 temp2
, cpu_gpr_d
[r2
]);
6398 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6400 /* overflow = (D[b] == 0) */
6401 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6403 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6405 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6407 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6408 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6410 tcg_temp_free(temp
);
6411 tcg_temp_free(temp2
);
6412 tcg_temp_free(temp3
);
6414 case OPC2_32_RR_DVINIT_H
:
6415 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6418 case OPC2_32_RR_DVINIT_HU
:
6419 temp
= tcg_temp_new();
6420 temp2
= tcg_temp_new();
6421 temp3
= tcg_temp_new();
6423 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6425 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6426 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6427 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6428 tcg_gen_neg_tl(temp
, temp3
);
6429 /* use cpu_PSW_AV to compare against 0 */
6430 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6432 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6433 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6434 temp2
, cpu_gpr_d
[r2
]);
6435 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6437 /* overflow = (D[b] == 0) */
6438 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6440 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6442 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6444 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6445 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6446 tcg_temp_free(temp
);
6447 tcg_temp_free(temp2
);
6448 tcg_temp_free(temp3
);
6450 case OPC2_32_RR_DVINIT
:
6451 temp
= tcg_temp_new();
6452 temp2
= tcg_temp_new();
6453 /* overflow = ((D[b] == 0) ||
6454 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6455 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6456 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6457 tcg_gen_and_tl(temp
, temp
, temp2
);
6458 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6459 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6460 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6462 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6464 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6466 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6467 /* sign extend to high reg */
6468 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6469 tcg_temp_free(temp
);
6470 tcg_temp_free(temp2
);
6472 case OPC2_32_RR_DVINIT_U
:
6473 /* overflow = (D[b] == 0) */
6474 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6475 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6477 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6479 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6481 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6482 /* zero extend to high reg*/
6483 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6485 case OPC2_32_RR_PARITY
:
6486 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6488 case OPC2_32_RR_UNPACK
:
6489 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6491 case OPC2_32_RR_CRC32
:
6492 if (tricore_feature(env
, TRICORE_FEATURE_161
)) {
6493 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6494 } /* TODO: else raise illegal opcode trap */
6496 case OPC2_32_RR_DIV
:
6497 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6498 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6500 } /* TODO: else raise illegal opcode trap */
6502 case OPC2_32_RR_DIV_U
:
6503 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6504 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6505 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6506 } /* TODO: else raise illegal opcode trap */
6512 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6520 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6521 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6522 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6523 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6524 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6527 case OPC2_32_RR1_MUL_H_32_LL
:
6528 temp64
= tcg_temp_new_i64();
6529 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6530 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6531 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6532 tcg_temp_free_i64(temp64
);
6534 case OPC2_32_RR1_MUL_H_32_LU
:
6535 temp64
= tcg_temp_new_i64();
6536 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6537 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6538 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6539 tcg_temp_free_i64(temp64
);
6541 case OPC2_32_RR1_MUL_H_32_UL
:
6542 temp64
= tcg_temp_new_i64();
6543 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6544 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6545 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6546 tcg_temp_free_i64(temp64
);
6548 case OPC2_32_RR1_MUL_H_32_UU
:
6549 temp64
= tcg_temp_new_i64();
6550 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6551 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6552 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6553 tcg_temp_free_i64(temp64
);
6555 case OPC2_32_RR1_MULM_H_64_LL
:
6556 temp64
= tcg_temp_new_i64();
6557 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6558 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6560 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6562 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6563 tcg_temp_free_i64(temp64
);
6565 case OPC2_32_RR1_MULM_H_64_LU
:
6566 temp64
= tcg_temp_new_i64();
6567 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6568 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6570 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6572 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6573 tcg_temp_free_i64(temp64
);
6575 case OPC2_32_RR1_MULM_H_64_UL
:
6576 temp64
= tcg_temp_new_i64();
6577 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6578 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6580 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6582 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6583 tcg_temp_free_i64(temp64
);
6585 case OPC2_32_RR1_MULM_H_64_UU
:
6586 temp64
= tcg_temp_new_i64();
6587 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6588 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6590 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6592 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6593 tcg_temp_free_i64(temp64
);
6596 case OPC2_32_RR1_MULR_H_16_LL
:
6597 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6598 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6600 case OPC2_32_RR1_MULR_H_16_LU
:
6601 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6602 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6604 case OPC2_32_RR1_MULR_H_16_UL
:
6605 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6606 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6608 case OPC2_32_RR1_MULR_H_16_UU
:
6609 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6610 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6616 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
6624 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6625 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6626 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6627 n
= MASK_OP_RR1_N(ctx
->opcode
);
6628 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6630 temp
= tcg_temp_new();
6631 temp2
= tcg_temp_new();
6634 case OPC2_32_RR1_MUL_Q_32
:
6635 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6637 case OPC2_32_RR1_MUL_Q_64
:
6638 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6641 case OPC2_32_RR1_MUL_Q_32_L
:
6642 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6643 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6645 case OPC2_32_RR1_MUL_Q_64_L
:
6646 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6647 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6649 case OPC2_32_RR1_MUL_Q_32_U
:
6650 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6651 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6653 case OPC2_32_RR1_MUL_Q_64_U
:
6654 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6655 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6657 case OPC2_32_RR1_MUL_Q_32_LL
:
6658 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6659 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6660 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6662 case OPC2_32_RR1_MUL_Q_32_UU
:
6663 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6664 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6665 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6667 case OPC2_32_RR1_MULR_Q_32_L
:
6668 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6669 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6670 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6672 case OPC2_32_RR1_MULR_Q_32_U
:
6673 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6674 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6675 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6678 tcg_temp_free(temp
);
6679 tcg_temp_free(temp2
);
6683 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6688 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6689 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6690 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6691 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6693 case OPC2_32_RR2_MUL_32
:
6694 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6696 case OPC2_32_RR2_MUL_64
:
6697 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6700 case OPC2_32_RR2_MULS_32
:
6701 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6704 case OPC2_32_RR2_MUL_U_64
:
6705 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6708 case OPC2_32_RR2_MULS_U_32
:
6709 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6716 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6722 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6723 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6724 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6725 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6726 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6727 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6730 case OPC2_32_RRPW_EXTR
:
6731 if (pos
+ width
<= 31) {
6732 /* optimize special cases */
6733 if ((pos
== 0) && (width
== 8)) {
6734 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6735 } else if ((pos
== 0) && (width
== 16)) {
6736 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6738 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6739 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6743 case OPC2_32_RRPW_EXTR_U
:
6745 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6747 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6748 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6751 case OPC2_32_RRPW_IMASK
:
6752 if (pos
+ width
<= 31) {
6753 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6754 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6757 case OPC2_32_RRPW_INSERT
:
6758 if (pos
+ width
<= 31) {
6759 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6767 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6773 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6774 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6775 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6776 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6777 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6780 case OPC2_32_RRR_CADD
:
6781 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6782 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6784 case OPC2_32_RRR_CADDN
:
6785 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6788 case OPC2_32_RRR_CSUB
:
6789 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6792 case OPC2_32_RRR_CSUBN
:
6793 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6796 case OPC2_32_RRR_SEL
:
6797 temp
= tcg_const_i32(0);
6798 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6799 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6800 tcg_temp_free(temp
);
6802 case OPC2_32_RRR_SELN
:
6803 temp
= tcg_const_i32(0);
6804 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6805 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6806 tcg_temp_free(temp
);
6811 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6817 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6818 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6819 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6820 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6821 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6824 case OPC2_32_RRR_DVADJ
:
6825 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6826 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6828 case OPC2_32_RRR_DVSTEP
:
6829 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6830 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6832 case OPC2_32_RRR_DVSTEP_U
:
6833 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6834 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6836 case OPC2_32_RRR_IXMAX
:
6837 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6838 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6840 case OPC2_32_RRR_IXMAX_U
:
6841 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6842 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6844 case OPC2_32_RRR_IXMIN
:
6845 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6846 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6848 case OPC2_32_RRR_IXMIN_U
:
6849 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6850 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6852 case OPC2_32_RRR_PACK
:
6853 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6854 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6860 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6863 uint32_t r1
, r2
, r3
, r4
;
6865 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6866 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6867 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6868 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6869 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6871 case OPC2_32_RRR2_MADD_32
:
6872 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6875 case OPC2_32_RRR2_MADD_64
:
6876 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6877 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6879 case OPC2_32_RRR2_MADDS_32
:
6880 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6881 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6883 case OPC2_32_RRR2_MADDS_64
:
6884 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6885 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6887 case OPC2_32_RRR2_MADD_U_64
:
6888 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6889 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6891 case OPC2_32_RRR2_MADDS_U_32
:
6892 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6893 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6895 case OPC2_32_RRR2_MADDS_U_64
:
6896 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6897 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6902 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6905 uint32_t r1
, r2
, r3
, r4
;
6907 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6908 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6909 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6910 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6911 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6914 case OPC2_32_RRR2_MSUB_32
:
6915 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6918 case OPC2_32_RRR2_MSUB_64
:
6919 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6920 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6922 case OPC2_32_RRR2_MSUBS_32
:
6923 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6924 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6926 case OPC2_32_RRR2_MSUBS_64
:
6927 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6928 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6930 case OPC2_32_RRR2_MSUB_U_64
:
6931 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6932 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6934 case OPC2_32_RRR2_MSUBS_U_32
:
6935 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6936 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6938 case OPC2_32_RRR2_MSUBS_U_64
:
6939 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6940 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6946 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6949 uint32_t r1
, r2
, r3
, r4
, n
;
6951 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6952 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6953 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6954 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6955 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6956 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6959 case OPC2_32_RRR1_MADD_H_LL
:
6960 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6961 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6963 case OPC2_32_RRR1_MADD_H_LU
:
6964 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6965 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6967 case OPC2_32_RRR1_MADD_H_UL
:
6968 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6969 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6971 case OPC2_32_RRR1_MADD_H_UU
:
6972 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6973 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6975 case OPC2_32_RRR1_MADDS_H_LL
:
6976 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6977 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6979 case OPC2_32_RRR1_MADDS_H_LU
:
6980 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6981 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6983 case OPC2_32_RRR1_MADDS_H_UL
:
6984 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6985 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6987 case OPC2_32_RRR1_MADDS_H_UU
:
6988 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6989 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6991 case OPC2_32_RRR1_MADDM_H_LL
:
6992 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6993 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6995 case OPC2_32_RRR1_MADDM_H_LU
:
6996 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6997 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6999 case OPC2_32_RRR1_MADDM_H_UL
:
7000 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7001 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7003 case OPC2_32_RRR1_MADDM_H_UU
:
7004 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7005 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7007 case OPC2_32_RRR1_MADDMS_H_LL
:
7008 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7009 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7011 case OPC2_32_RRR1_MADDMS_H_LU
:
7012 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7013 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7015 case OPC2_32_RRR1_MADDMS_H_UL
:
7016 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7017 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7019 case OPC2_32_RRR1_MADDMS_H_UU
:
7020 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7021 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7023 case OPC2_32_RRR1_MADDR_H_LL
:
7024 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7025 cpu_gpr_d
[r2
], n
, MODE_LL
);
7027 case OPC2_32_RRR1_MADDR_H_LU
:
7028 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7029 cpu_gpr_d
[r2
], n
, MODE_LU
);
7031 case OPC2_32_RRR1_MADDR_H_UL
:
7032 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7033 cpu_gpr_d
[r2
], n
, MODE_UL
);
7035 case OPC2_32_RRR1_MADDR_H_UU
:
7036 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7037 cpu_gpr_d
[r2
], n
, MODE_UU
);
7039 case OPC2_32_RRR1_MADDRS_H_LL
:
7040 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7041 cpu_gpr_d
[r2
], n
, MODE_LL
);
7043 case OPC2_32_RRR1_MADDRS_H_LU
:
7044 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7045 cpu_gpr_d
[r2
], n
, MODE_LU
);
7047 case OPC2_32_RRR1_MADDRS_H_UL
:
7048 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7049 cpu_gpr_d
[r2
], n
, MODE_UL
);
7051 case OPC2_32_RRR1_MADDRS_H_UU
:
7052 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7053 cpu_gpr_d
[r2
], n
, MODE_UU
);
7058 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7061 uint32_t r1
, r2
, r3
, r4
, n
;
7064 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7065 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7066 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7067 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7068 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7069 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7071 temp
= tcg_const_i32(n
);
7072 temp2
= tcg_temp_new();
7075 case OPC2_32_RRR1_MADD_Q_32
:
7076 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7077 cpu_gpr_d
[r2
], n
, 32, env
);
7079 case OPC2_32_RRR1_MADD_Q_64
:
7080 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7081 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7084 case OPC2_32_RRR1_MADD_Q_32_L
:
7085 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7086 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7089 case OPC2_32_RRR1_MADD_Q_64_L
:
7090 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7091 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7092 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7095 case OPC2_32_RRR1_MADD_Q_32_U
:
7096 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7097 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7100 case OPC2_32_RRR1_MADD_Q_64_U
:
7101 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7102 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7103 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7106 case OPC2_32_RRR1_MADD_Q_32_LL
:
7107 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7108 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7109 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7111 case OPC2_32_RRR1_MADD_Q_64_LL
:
7112 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7113 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7114 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7115 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7117 case OPC2_32_RRR1_MADD_Q_32_UU
:
7118 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7119 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7120 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7122 case OPC2_32_RRR1_MADD_Q_64_UU
:
7123 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7124 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7125 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7126 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7128 case OPC2_32_RRR1_MADDS_Q_32
:
7129 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7130 cpu_gpr_d
[r2
], n
, 32);
7132 case OPC2_32_RRR1_MADDS_Q_64
:
7133 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7134 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7137 case OPC2_32_RRR1_MADDS_Q_32_L
:
7138 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7139 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7142 case OPC2_32_RRR1_MADDS_Q_64_L
:
7143 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7144 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7145 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7148 case OPC2_32_RRR1_MADDS_Q_32_U
:
7149 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7150 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7153 case OPC2_32_RRR1_MADDS_Q_64_U
:
7154 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7155 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7156 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7159 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7160 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7161 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7162 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7164 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7165 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7166 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7167 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7168 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7170 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7171 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7172 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7173 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7175 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7176 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7177 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7178 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7179 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7181 case OPC2_32_RRR1_MADDR_H_64_UL
:
7182 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7183 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7185 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7186 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7187 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7189 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7190 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7191 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7192 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7194 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7195 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7196 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7197 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7199 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7200 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7201 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7202 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7204 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7205 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7206 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7207 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7210 tcg_temp_free(temp
);
7211 tcg_temp_free(temp2
);
7214 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7217 uint32_t r1
, r2
, r3
, r4
, n
;
7219 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7220 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7221 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7222 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7223 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7224 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7227 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7228 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7229 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7231 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7232 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7233 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7235 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7236 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7237 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7239 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7240 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7241 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7243 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7244 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7245 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7248 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7249 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7250 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7253 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7254 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7255 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7258 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7259 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7260 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7263 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7264 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7265 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7268 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7269 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7270 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7273 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7274 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7275 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7278 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7279 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7280 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7283 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7284 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7285 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7288 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7289 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7290 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7293 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7294 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7295 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7298 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7299 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7300 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7303 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7304 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7305 cpu_gpr_d
[r2
], n
, MODE_LL
);
7307 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7308 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7309 cpu_gpr_d
[r2
], n
, MODE_LU
);
7311 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7312 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7313 cpu_gpr_d
[r2
], n
, MODE_UL
);
7315 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7316 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7317 cpu_gpr_d
[r2
], n
, MODE_UU
);
7319 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7320 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7321 cpu_gpr_d
[r2
], n
, MODE_LL
);
7323 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7324 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7325 cpu_gpr_d
[r2
], n
, MODE_LU
);
7327 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7328 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7329 cpu_gpr_d
[r2
], n
, MODE_UL
);
7331 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7332 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7333 cpu_gpr_d
[r2
], n
, MODE_UU
);
7338 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
7341 uint32_t r1
, r2
, r3
, r4
, n
;
7343 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7344 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7345 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7346 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7347 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7348 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7351 case OPC2_32_RRR1_MSUB_H_LL
:
7352 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7353 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7355 case OPC2_32_RRR1_MSUB_H_LU
:
7356 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7357 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7359 case OPC2_32_RRR1_MSUB_H_UL
:
7360 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7361 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7363 case OPC2_32_RRR1_MSUB_H_UU
:
7364 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7365 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7367 case OPC2_32_RRR1_MSUBS_H_LL
:
7368 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7369 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7371 case OPC2_32_RRR1_MSUBS_H_LU
:
7372 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7373 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7375 case OPC2_32_RRR1_MSUBS_H_UL
:
7376 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7377 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7379 case OPC2_32_RRR1_MSUBS_H_UU
:
7380 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7381 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7383 case OPC2_32_RRR1_MSUBM_H_LL
:
7384 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7385 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7387 case OPC2_32_RRR1_MSUBM_H_LU
:
7388 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7389 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7391 case OPC2_32_RRR1_MSUBM_H_UL
:
7392 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7393 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7395 case OPC2_32_RRR1_MSUBM_H_UU
:
7396 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7397 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7399 case OPC2_32_RRR1_MSUBMS_H_LL
:
7400 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7401 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7403 case OPC2_32_RRR1_MSUBMS_H_LU
:
7404 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7405 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7407 case OPC2_32_RRR1_MSUBMS_H_UL
:
7408 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7409 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7411 case OPC2_32_RRR1_MSUBMS_H_UU
:
7412 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7413 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7415 case OPC2_32_RRR1_MSUBR_H_LL
:
7416 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7417 cpu_gpr_d
[r2
], n
, MODE_LL
);
7419 case OPC2_32_RRR1_MSUBR_H_LU
:
7420 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7421 cpu_gpr_d
[r2
], n
, MODE_LU
);
7423 case OPC2_32_RRR1_MSUBR_H_UL
:
7424 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7425 cpu_gpr_d
[r2
], n
, MODE_UL
);
7427 case OPC2_32_RRR1_MSUBR_H_UU
:
7428 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7429 cpu_gpr_d
[r2
], n
, MODE_UU
);
7431 case OPC2_32_RRR1_MSUBRS_H_LL
:
7432 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7433 cpu_gpr_d
[r2
], n
, MODE_LL
);
7435 case OPC2_32_RRR1_MSUBRS_H_LU
:
7436 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7437 cpu_gpr_d
[r2
], n
, MODE_LU
);
7439 case OPC2_32_RRR1_MSUBRS_H_UL
:
7440 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7441 cpu_gpr_d
[r2
], n
, MODE_UL
);
7443 case OPC2_32_RRR1_MSUBRS_H_UU
:
7444 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7445 cpu_gpr_d
[r2
], n
, MODE_UU
);
7450 static void decode_rrr1_msubq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7453 uint32_t r1
, r2
, r3
, r4
, n
;
7456 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7457 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7458 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7459 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7460 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7461 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7463 temp
= tcg_const_i32(n
);
7464 temp2
= tcg_temp_new();
7467 case OPC2_32_RRR1_MSUB_Q_32
:
7468 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7469 cpu_gpr_d
[r2
], n
, 32, env
);
7471 case OPC2_32_RRR1_MSUB_Q_64
:
7472 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7473 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7476 case OPC2_32_RRR1_MSUB_Q_32_L
:
7477 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7478 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7481 case OPC2_32_RRR1_MSUB_Q_64_L
:
7482 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7483 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7484 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7487 case OPC2_32_RRR1_MSUB_Q_32_U
:
7488 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7489 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7492 case OPC2_32_RRR1_MSUB_Q_64_U
:
7493 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7494 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7495 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7498 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7499 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7500 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7501 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7503 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7504 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7505 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7506 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7507 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7509 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7510 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7511 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7512 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7514 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7515 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7516 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7517 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7518 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7520 case OPC2_32_RRR1_MSUBS_Q_32
:
7521 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7522 cpu_gpr_d
[r2
], n
, 32);
7524 case OPC2_32_RRR1_MSUBS_Q_64
:
7525 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7526 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7529 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7530 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7531 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7534 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7535 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7536 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7537 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7540 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7541 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7542 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7545 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7546 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7547 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7548 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7551 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7552 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7553 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7554 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7556 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7557 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7558 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7559 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7560 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7562 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7563 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7564 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7565 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7567 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7568 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7569 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7570 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7571 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7573 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7574 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7575 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7577 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7578 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7579 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7581 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7582 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7583 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7584 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7586 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7587 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7588 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7589 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7591 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7592 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7593 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7594 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7596 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7597 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7598 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7599 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7602 tcg_temp_free(temp
);
7603 tcg_temp_free(temp2
);
7606 static void decode_rrr1_msubad_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7609 uint32_t r1
, r2
, r3
, r4
, n
;
7611 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7612 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7613 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7614 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7615 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7616 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7619 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7620 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7621 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7623 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7624 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7625 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7627 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7628 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7629 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7631 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7632 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7633 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7635 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7636 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7637 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7640 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7641 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7642 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7645 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7646 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7650 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7651 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7652 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7655 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7656 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7657 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7660 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7661 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7662 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7665 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7666 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7667 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7670 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7671 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7672 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7675 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7676 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7677 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7680 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7681 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7682 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7685 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7686 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7687 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7690 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7691 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7692 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7695 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7696 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7697 cpu_gpr_d
[r2
], n
, MODE_LL
);
7699 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7700 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7701 cpu_gpr_d
[r2
], n
, MODE_LU
);
7703 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7704 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7705 cpu_gpr_d
[r2
], n
, MODE_UL
);
7707 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7708 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7709 cpu_gpr_d
[r2
], n
, MODE_UU
);
7711 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7712 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7713 cpu_gpr_d
[r2
], n
, MODE_LL
);
7715 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7716 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7717 cpu_gpr_d
[r2
], n
, MODE_LU
);
7719 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7720 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7721 cpu_gpr_d
[r2
], n
, MODE_UL
);
7723 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7724 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7725 cpu_gpr_d
[r2
], n
, MODE_UU
);
7731 static void decode_rrrr_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7735 TCGv tmp_width
, tmp_pos
;
7737 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7738 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7739 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7740 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7741 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7743 tmp_pos
= tcg_temp_new();
7744 tmp_width
= tcg_temp_new();
7747 case OPC2_32_RRRR_DEXTR
:
7748 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7750 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7752 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7753 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7754 tcg_gen_shr_tl(tmp_pos
, cpu_gpr_d
[r2
], tmp_pos
);
7755 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, tmp_pos
);
7758 case OPC2_32_RRRR_EXTR
:
7759 case OPC2_32_RRRR_EXTR_U
:
7760 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7761 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7762 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7763 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7764 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7765 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7766 if (op2
== OPC2_32_RRRR_EXTR
) {
7767 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7769 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7772 case OPC2_32_RRRR_INSERT
:
7773 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7774 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7775 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7779 tcg_temp_free(tmp_pos
);
7780 tcg_temp_free(tmp_width
);
7784 static void decode_rrrw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7792 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7793 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7794 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7795 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7796 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7797 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7799 temp
= tcg_temp_new();
7802 case OPC2_32_RRRW_EXTR
:
7803 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7804 tcg_gen_addi_tl(temp
, temp
, width
);
7805 tcg_gen_subfi_tl(temp
, 32, temp
);
7806 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7807 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7809 case OPC2_32_RRRW_EXTR_U
:
7811 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7813 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7814 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7815 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7818 case OPC2_32_RRRW_IMASK
:
7819 temp2
= tcg_temp_new();
7821 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7822 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7823 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7824 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7825 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7827 tcg_temp_free(temp2
);
7829 case OPC2_32_RRRW_INSERT
:
7830 temp2
= tcg_temp_new();
7832 tcg_gen_movi_tl(temp
, width
);
7833 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7834 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7836 tcg_temp_free(temp2
);
7839 tcg_temp_free(temp
);
7843 static void decode_sys_interrupts(CPUTriCoreState
*env
, DisasContext
*ctx
)
7850 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7851 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7854 case OPC2_32_SYS_DEBUG
:
7855 /* raise EXCP_DEBUG */
7857 case OPC2_32_SYS_DISABLE
:
7858 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE
);
7860 case OPC2_32_SYS_DSYNC
:
7862 case OPC2_32_SYS_ENABLE
:
7863 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE
);
7865 case OPC2_32_SYS_ISYNC
:
7867 case OPC2_32_SYS_NOP
:
7869 case OPC2_32_SYS_RET
:
7870 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7872 case OPC2_32_SYS_FRET
:
7875 case OPC2_32_SYS_RFE
:
7876 gen_helper_rfe(cpu_env
);
7878 ctx
->bstate
= BS_BRANCH
;
7880 case OPC2_32_SYS_RFM
:
7881 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7882 tmp
= tcg_temp_new();
7883 l1
= gen_new_label();
7885 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7886 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7887 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7888 gen_helper_rfm(cpu_env
);
7891 ctx
->bstate
= BS_BRANCH
;
7894 /* generate privilege trap */
7897 case OPC2_32_SYS_RSLCX
:
7898 gen_helper_rslcx(cpu_env
);
7900 case OPC2_32_SYS_SVLCX
:
7901 gen_helper_svlcx(cpu_env
);
7903 case OPC2_32_SYS_RESTORE
:
7904 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
7905 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7906 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7907 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7908 } /* else raise privilege trap */
7909 } /* else raise illegal opcode trap */
7911 case OPC2_32_SYS_TRAPSV
:
7912 /* TODO: raise sticky overflow trap */
7914 case OPC2_32_SYS_TRAPV
:
7915 /* TODO: raise overflow trap */
7920 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
7924 int32_t address
, const16
;
7927 TCGv temp
, temp2
, temp3
;
7929 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7931 /* handle JNZ.T opcode only being 7 bit long */
7932 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7933 op1
= OPCM_32_BRN_JTT
;
7938 case OPCM_32_ABS_LDW
:
7939 decode_abs_ldw(env
, ctx
);
7941 case OPCM_32_ABS_LDB
:
7942 decode_abs_ldb(env
, ctx
);
7944 case OPCM_32_ABS_LDMST_SWAP
:
7945 decode_abs_ldst_swap(env
, ctx
);
7947 case OPCM_32_ABS_LDST_CONTEXT
:
7948 decode_abs_ldst_context(env
, ctx
);
7950 case OPCM_32_ABS_STORE
:
7951 decode_abs_store(env
, ctx
);
7953 case OPCM_32_ABS_STOREB_H
:
7954 decode_abs_storeb_h(env
, ctx
);
7956 case OPC1_32_ABS_STOREQ
:
7957 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7958 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7959 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7960 temp2
= tcg_temp_new();
7962 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7963 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7965 tcg_temp_free(temp2
);
7966 tcg_temp_free(temp
);
7968 case OPC1_32_ABS_LD_Q
:
7969 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7970 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7971 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7973 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7974 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7976 tcg_temp_free(temp
);
7978 case OPC1_32_ABS_LEA
:
7979 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7980 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7981 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7984 case OPC1_32_ABSB_ST_T
:
7985 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7986 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7987 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7989 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7990 temp2
= tcg_temp_new();
7992 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7993 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7994 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7995 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7997 tcg_temp_free(temp
);
7998 tcg_temp_free(temp2
);
8001 case OPC1_32_B_CALL
:
8002 case OPC1_32_B_CALLA
:
8003 case OPC1_32_B_FCALL
:
8004 case OPC1_32_B_FCALLA
:
8009 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8010 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8013 case OPCM_32_BIT_ANDACC
:
8014 decode_bit_andacc(env
, ctx
);
8016 case OPCM_32_BIT_LOGICAL_T1
:
8017 decode_bit_logical_t(env
, ctx
);
8019 case OPCM_32_BIT_INSERT
:
8020 decode_bit_insert(env
, ctx
);
8022 case OPCM_32_BIT_LOGICAL_T2
:
8023 decode_bit_logical_t2(env
, ctx
);
8025 case OPCM_32_BIT_ORAND
:
8026 decode_bit_orand(env
, ctx
);
8028 case OPCM_32_BIT_SH_LOGIC1
:
8029 decode_bit_sh_logic1(env
, ctx
);
8031 case OPCM_32_BIT_SH_LOGIC2
:
8032 decode_bit_sh_logic2(env
, ctx
);
8035 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8036 decode_bo_addrmode_post_pre_base(env
, ctx
);
8038 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8039 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
8041 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8042 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
8044 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8045 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
8047 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8048 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
8050 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8051 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
8054 case OPC1_32_BOL_LD_A_LONGOFF
:
8055 case OPC1_32_BOL_LD_W_LONGOFF
:
8056 case OPC1_32_BOL_LEA_LONGOFF
:
8057 case OPC1_32_BOL_ST_W_LONGOFF
:
8058 case OPC1_32_BOL_ST_A_LONGOFF
:
8059 case OPC1_32_BOL_LD_B_LONGOFF
:
8060 case OPC1_32_BOL_LD_BU_LONGOFF
:
8061 case OPC1_32_BOL_LD_H_LONGOFF
:
8062 case OPC1_32_BOL_LD_HU_LONGOFF
:
8063 case OPC1_32_BOL_ST_B_LONGOFF
:
8064 case OPC1_32_BOL_ST_H_LONGOFF
:
8065 decode_bol_opc(env
, ctx
, op1
);
8068 case OPCM_32_BRC_EQ_NEQ
:
8069 case OPCM_32_BRC_GE
:
8070 case OPCM_32_BRC_JLT
:
8071 case OPCM_32_BRC_JNE
:
8072 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8073 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8074 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8075 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8078 case OPCM_32_BRN_JTT
:
8079 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8080 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8081 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8084 case OPCM_32_BRR_EQ_NEQ
:
8085 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8086 case OPCM_32_BRR_GE
:
8087 case OPCM_32_BRR_JLT
:
8088 case OPCM_32_BRR_JNE
:
8089 case OPCM_32_BRR_JNZ
:
8090 case OPCM_32_BRR_LOOP
:
8091 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8092 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8093 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8094 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8097 case OPCM_32_RC_LOGICAL_SHIFT
:
8098 decode_rc_logical_shift(env
, ctx
);
8100 case OPCM_32_RC_ACCUMULATOR
:
8101 decode_rc_accumulator(env
, ctx
);
8103 case OPCM_32_RC_SERVICEROUTINE
:
8104 decode_rc_serviceroutine(env
, ctx
);
8106 case OPCM_32_RC_MUL
:
8107 decode_rc_mul(env
, ctx
);
8110 case OPCM_32_RCPW_MASK_INSERT
:
8111 decode_rcpw_insert(env
, ctx
);
8114 case OPC1_32_RCRR_INSERT
:
8115 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8116 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8117 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8118 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8119 temp
= tcg_const_i32(const16
);
8120 temp2
= tcg_temp_new(); /* width*/
8121 temp3
= tcg_temp_new(); /* pos */
8123 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8124 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8126 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8128 tcg_temp_free(temp
);
8129 tcg_temp_free(temp2
);
8130 tcg_temp_free(temp3
);
8133 case OPCM_32_RCRW_MASK_INSERT
:
8134 decode_rcrw_insert(env
, ctx
);
8137 case OPCM_32_RCR_COND_SELECT
:
8138 decode_rcr_cond_select(env
, ctx
);
8140 case OPCM_32_RCR_MADD
:
8141 decode_rcr_madd(env
, ctx
);
8143 case OPCM_32_RCR_MSUB
:
8144 decode_rcr_msub(env
, ctx
);
8147 case OPC1_32_RLC_ADDI
:
8148 case OPC1_32_RLC_ADDIH
:
8149 case OPC1_32_RLC_ADDIH_A
:
8150 case OPC1_32_RLC_MFCR
:
8151 case OPC1_32_RLC_MOV
:
8152 case OPC1_32_RLC_MOV_64
:
8153 case OPC1_32_RLC_MOV_U
:
8154 case OPC1_32_RLC_MOV_H
:
8155 case OPC1_32_RLC_MOVH_A
:
8156 case OPC1_32_RLC_MTCR
:
8157 decode_rlc_opc(env
, ctx
, op1
);
8160 case OPCM_32_RR_ACCUMULATOR
:
8161 decode_rr_accumulator(env
, ctx
);
8163 case OPCM_32_RR_LOGICAL_SHIFT
:
8164 decode_rr_logical_shift(env
, ctx
);
8166 case OPCM_32_RR_ADDRESS
:
8167 decode_rr_address(env
, ctx
);
8169 case OPCM_32_RR_IDIRECT
:
8170 decode_rr_idirect(env
, ctx
);
8172 case OPCM_32_RR_DIVIDE
:
8173 decode_rr_divide(env
, ctx
);
8176 case OPCM_32_RR1_MUL
:
8177 decode_rr1_mul(env
, ctx
);
8179 case OPCM_32_RR1_MULQ
:
8180 decode_rr1_mulq(env
, ctx
);
8183 case OPCM_32_RR2_MUL
:
8184 decode_rr2_mul(env
, ctx
);
8187 case OPCM_32_RRPW_EXTRACT_INSERT
:
8188 decode_rrpw_extract_insert(env
, ctx
);
8190 case OPC1_32_RRPW_DEXTR
:
8191 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8192 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8193 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8194 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8196 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
8198 temp
= tcg_temp_new();
8199 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], const16
);
8200 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], 32 - const16
);
8201 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
8202 tcg_temp_free(temp
);
8206 case OPCM_32_RRR_COND_SELECT
:
8207 decode_rrr_cond_select(env
, ctx
);
8209 case OPCM_32_RRR_DIVIDE
:
8210 decode_rrr_divide(env
, ctx
);
8212 case OPCM_32_RRR2_MADD
:
8213 decode_rrr2_madd(env
, ctx
);
8215 case OPCM_32_RRR2_MSUB
:
8216 decode_rrr2_msub(env
, ctx
);
8219 case OPCM_32_RRR1_MADD
:
8220 decode_rrr1_madd(env
, ctx
);
8222 case OPCM_32_RRR1_MADDQ_H
:
8223 decode_rrr1_maddq_h(env
, ctx
);
8225 case OPCM_32_RRR1_MADDSU_H
:
8226 decode_rrr1_maddsu_h(env
, ctx
);
8228 case OPCM_32_RRR1_MSUB_H
:
8229 decode_rrr1_msub(env
, ctx
);
8231 case OPCM_32_RRR1_MSUB_Q
:
8232 decode_rrr1_msubq_h(env
, ctx
);
8234 case OPCM_32_RRR1_MSUBAD_H
:
8235 decode_rrr1_msubad_h(env
, ctx
);
8238 case OPCM_32_RRRR_EXTRACT_INSERT
:
8239 decode_rrrr_extract_insert(env
, ctx
);
8241 case OPCM_32_RRRW_EXTRACT_INSERT
:
8242 decode_rrrw_extract_insert(env
, ctx
);
8245 case OPCM_32_SYS_INTERRUPTS
:
8246 decode_sys_interrupts(env
, ctx
);
8248 case OPC1_32_SYS_RSTV
:
8249 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8250 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8251 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8252 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8257 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
8259 /* 16-Bit Instruction */
8260 if ((ctx
->opcode
& 0x1) == 0) {
8261 ctx
->next_pc
= ctx
->pc
+ 2;
8262 decode_16Bit_opc(env
, ctx
);
8263 /* 32-Bit Instruction */
8265 ctx
->next_pc
= ctx
->pc
+ 4;
8266 decode_32Bit_opc(env
, ctx
);
8270 void gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8272 TriCoreCPU
*cpu
= tricore_env_get_cpu(env
);
8273 CPUState
*cs
= CPU(cpu
);
8275 target_ulong pc_start
;
8276 int num_insns
, max_insns
;
8279 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8280 if (max_insns
== 0) {
8281 max_insns
= CF_COUNT_MASK
;
8286 if (max_insns
> TCG_MAX_INSNS
) {
8287 max_insns
= TCG_MAX_INSNS
;
8294 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
8295 ctx
.bstate
= BS_NONE
;
8296 ctx
.mem_idx
= cpu_mmu_index(env
, false);
8298 tcg_clear_temp_count();
8300 while (ctx
.bstate
== BS_NONE
) {
8301 tcg_gen_insn_start(ctx
.pc
);
8304 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
8305 decode_opc(env
, &ctx
, 0);
8307 if (num_insns
>= max_insns
|| tcg_op_buf_full()) {
8308 gen_save_pc(ctx
.next_pc
);
8312 ctx
.pc
= ctx
.next_pc
;
8315 gen_tb_end(tb
, num_insns
);
8316 tb
->size
= ctx
.pc
- pc_start
;
8317 tb
->icount
= num_insns
;
8319 if (tcg_check_temp_count()) {
8320 printf("LEAK at %08x\n", env
->PC
);
8324 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8325 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8326 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
8333 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
,
8344 void cpu_state_reset(CPUTriCoreState
*env
)
8346 /* Reset Regs to Default Value */
8350 static void tricore_tcg_init_csfr(void)
8352 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
8353 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8354 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
8355 offsetof(CPUTriCoreState
, PSW
), "PSW");
8356 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
8357 offsetof(CPUTriCoreState
, PC
), "PC");
8358 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
8359 offsetof(CPUTriCoreState
, ICR
), "ICR");
8362 void tricore_tcg_init(void)
8369 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8371 for (i
= 0 ; i
< 16 ; i
++) {
8372 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
8373 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8376 for (i
= 0 ; i
< 16 ; i
++) {
8377 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
8378 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8381 tricore_tcg_init_csfr();
8382 /* init PSW flag cache */
8383 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
8384 offsetof(CPUTriCoreState
, PSW_USB_C
),
8386 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
8387 offsetof(CPUTriCoreState
, PSW_USB_V
),
8389 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
8390 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8392 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
8393 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8395 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
8396 offsetof(CPUTriCoreState
, PSW_USB_SAV
),