2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
44 static TCGv cpu_gpr_a
[16];
45 static TCGv cpu_gpr_d
[16];
47 static TCGv cpu_PSW_C
;
48 static TCGv cpu_PSW_V
;
49 static TCGv cpu_PSW_SV
;
50 static TCGv cpu_PSW_AV
;
51 static TCGv cpu_PSW_SAV
;
53 #include "exec/gen-icount.h"
55 static const char *regnames_a
[] = {
56 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
57 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
58 "a12" , "a13" , "a14" , "a15",
61 static const char *regnames_d
[] = {
62 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
63 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
64 "d12" , "d13" , "d14" , "d15",
67 typedef struct DisasContext
{
68 DisasContextBase base
;
69 target_ulong pc_succ_insn
;
71 /* Routine used to access memory */
73 uint32_t hflags
, saved_hflags
;
77 static int has_feature(DisasContext
*ctx
, int feature
)
79 return (ctx
->features
& (1ULL << feature
)) != 0;
89 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
91 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
92 CPUTriCoreState
*env
= &cpu
->env
;
98 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
99 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
100 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
101 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
102 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
103 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
105 for (i
= 0; i
< 16; ++i
) {
107 qemu_fprintf(f
, "\nGPR A%02d:", i
);
109 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
111 for (i
= 0; i
< 16; ++i
) {
113 qemu_fprintf(f
, "\nGPR D%02d:", i
);
115 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
117 qemu_fprintf(f
, "\n");
121 * Functions to generate micro-ops
124 /* Makros for generating helpers */
126 #define gen_helper_1arg(name, arg) do { \
127 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
128 gen_helper_##name(cpu_env, helper_tmp); \
131 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
132 TCGv arg00 = tcg_temp_new(); \
133 TCGv arg01 = tcg_temp_new(); \
134 TCGv arg11 = tcg_temp_new(); \
135 tcg_gen_sari_tl(arg00, arg0, 16); \
136 tcg_gen_ext16s_tl(arg01, arg0); \
137 tcg_gen_ext16s_tl(arg11, arg1); \
138 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
141 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
142 TCGv arg00 = tcg_temp_new(); \
143 TCGv arg01 = tcg_temp_new(); \
144 TCGv arg10 = tcg_temp_new(); \
145 TCGv arg11 = tcg_temp_new(); \
146 tcg_gen_sari_tl(arg00, arg0, 16); \
147 tcg_gen_ext16s_tl(arg01, arg0); \
148 tcg_gen_sari_tl(arg11, arg1, 16); \
149 tcg_gen_ext16s_tl(arg10, arg1); \
150 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
153 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
154 TCGv arg00 = tcg_temp_new(); \
155 TCGv arg01 = tcg_temp_new(); \
156 TCGv arg10 = tcg_temp_new(); \
157 TCGv arg11 = tcg_temp_new(); \
158 tcg_gen_sari_tl(arg00, arg0, 16); \
159 tcg_gen_ext16s_tl(arg01, arg0); \
160 tcg_gen_sari_tl(arg10, arg1, 16); \
161 tcg_gen_ext16s_tl(arg11, arg1); \
162 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
165 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
166 TCGv arg00 = tcg_temp_new(); \
167 TCGv arg01 = tcg_temp_new(); \
168 TCGv arg11 = tcg_temp_new(); \
169 tcg_gen_sari_tl(arg01, arg0, 16); \
170 tcg_gen_ext16s_tl(arg00, arg0); \
171 tcg_gen_sari_tl(arg11, arg1, 16); \
172 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
175 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
176 TCGv_i64 ret = tcg_temp_new_i64(); \
177 TCGv_i64 arg1 = tcg_temp_new_i64(); \
179 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
180 gen_helper_##name(ret, arg1, arg2); \
181 tcg_gen_extr_i64_i32(rl, rh, ret); \
184 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
185 TCGv_i64 ret = tcg_temp_new_i64(); \
187 gen_helper_##name(ret, cpu_env, arg1, arg2); \
188 tcg_gen_extr_i64_i32(rl, rh, ret); \
191 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
192 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
193 ((offset & 0x0fffff) << 1))
195 /* For two 32-bit registers used a 64-bit register, the first
196 registernumber needs to be even. Otherwise we trap. */
197 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
198 #define CHECK_REG_PAIR(reg) do { \
200 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
204 /* Functions for load/save to/from memory */
206 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
207 int16_t con
, MemOp mop
)
209 TCGv temp
= tcg_temp_new();
210 tcg_gen_addi_tl(temp
, r2
, con
);
211 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
214 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
215 int16_t con
, MemOp mop
)
217 TCGv temp
= tcg_temp_new();
218 tcg_gen_addi_tl(temp
, r2
, con
);
219 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
222 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
224 TCGv_i64 temp
= tcg_temp_new_i64();
226 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
227 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
230 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
233 TCGv temp
= tcg_temp_new();
234 tcg_gen_addi_tl(temp
, base
, con
);
235 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
238 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
240 TCGv_i64 temp
= tcg_temp_new_i64();
242 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
243 /* write back to two 32 bit regs */
244 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
247 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
250 TCGv temp
= tcg_temp_new();
251 tcg_gen_addi_tl(temp
, base
, con
);
252 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
255 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
258 TCGv temp
= tcg_temp_new();
259 tcg_gen_addi_tl(temp
, r2
, off
);
260 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
261 tcg_gen_mov_tl(r2
, temp
);
264 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
267 TCGv temp
= tcg_temp_new();
268 tcg_gen_addi_tl(temp
, r2
, off
);
269 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
270 tcg_gen_mov_tl(r2
, temp
);
273 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
274 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
276 TCGv temp
= tcg_temp_new();
277 TCGv temp2
= tcg_temp_new();
279 CHECK_REG_PAIR(ereg
);
280 /* temp = (M(EA, word) */
281 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
282 /* temp = temp & ~E[a][63:32]) */
283 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
284 /* temp2 = (E[a][31:0] & E[a][63:32]); */
285 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
286 /* temp = temp | temp2; */
287 tcg_gen_or_tl(temp
, temp
, temp2
);
288 /* M(EA, word) = temp; */
289 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
292 /* tmp = M(EA, word);
295 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
297 TCGv temp
= tcg_temp_new();
299 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
300 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
301 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
304 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
306 TCGv temp
= tcg_temp_new();
307 TCGv temp2
= tcg_temp_new();
308 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
309 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
310 cpu_gpr_d
[reg
], temp
);
311 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
312 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
315 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
317 TCGv temp
= tcg_temp_new();
318 TCGv temp2
= tcg_temp_new();
319 TCGv temp3
= tcg_temp_new();
321 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
322 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
323 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
324 tcg_gen_or_tl(temp2
, temp2
, temp3
);
325 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
326 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
330 /* We generate loads and store to core special function register (csfr) through
331 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
332 makros R, A and E, which allow read-only, all and endinit protected access.
333 These makros also specify in which ISA version the csfr was introduced. */
334 #define R(ADDRESS, REG, FEATURE) \
336 if (has_feature(ctx, FEATURE)) { \
337 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
340 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
341 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
342 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
344 /* since we're caching PSW make this a special case */
345 if (offset
== 0xfe04) {
346 gen_helper_psw_read(ret
, cpu_env
);
349 #include "csfr.h.inc"
357 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
358 since no execption occurs */
359 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
361 if (has_feature(ctx, FEATURE)) { \
362 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
365 /* Endinit protected registers
366 TODO: Since the endinit bit is in a register of a not yet implemented
367 watchdog device, we handle endinit protected registers like
368 all-access registers for now. */
369 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
370 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
373 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
374 /* since we're caching PSW make this a special case */
375 if (offset
== 0xfe04) {
376 gen_helper_psw_write(cpu_env
, r1
);
379 #include "csfr.h.inc"
383 /* generate privilege trap */
387 /* Functions for arithmetic instructions */
389 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
391 TCGv t0
= tcg_temp_new_i32();
392 TCGv result
= tcg_temp_new_i32();
393 /* Addition and set V/SV bits */
394 tcg_gen_add_tl(result
, r1
, r2
);
396 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
397 tcg_gen_xor_tl(t0
, r1
, r2
);
398 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
400 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
401 /* Calc AV/SAV bits */
402 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
403 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
405 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
406 /* write back result */
407 tcg_gen_mov_tl(ret
, result
);
411 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
413 TCGv temp
= tcg_temp_new();
414 TCGv_i64 t0
= tcg_temp_new_i64();
415 TCGv_i64 t1
= tcg_temp_new_i64();
416 TCGv_i64 result
= tcg_temp_new_i64();
418 tcg_gen_add_i64(result
, r1
, r2
);
420 tcg_gen_xor_i64(t1
, result
, r1
);
421 tcg_gen_xor_i64(t0
, r1
, r2
);
422 tcg_gen_andc_i64(t1
, t1
, t0
);
423 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
425 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
426 /* calc AV/SAV bits */
427 tcg_gen_extrh_i64_i32(temp
, result
);
428 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
429 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
431 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
432 /* write back result */
433 tcg_gen_mov_i64(ret
, result
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 /* ret = r2 + (r1 * r3); */
483 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
485 TCGv_i64 t1
= tcg_temp_new_i64();
486 TCGv_i64 t2
= tcg_temp_new_i64();
487 TCGv_i64 t3
= tcg_temp_new_i64();
489 tcg_gen_ext_i32_i64(t1
, r1
);
490 tcg_gen_ext_i32_i64(t2
, r2
);
491 tcg_gen_ext_i32_i64(t3
, r3
);
493 tcg_gen_mul_i64(t1
, t1
, t3
);
494 tcg_gen_add_i64(t1
, t2
, t1
);
496 tcg_gen_extrl_i64_i32(ret
, t1
);
499 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
500 /* t1 < -0x80000000 */
501 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
502 tcg_gen_or_i64(t2
, t2
, t3
);
503 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
504 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
506 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
507 /* Calc AV/SAV bits */
508 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
509 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
511 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
514 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
516 TCGv temp
= tcg_const_i32(con
);
517 gen_madd32_d(ret
, r1
, r2
, temp
);
521 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
524 TCGv t1
= tcg_temp_new();
525 TCGv t2
= tcg_temp_new();
526 TCGv t3
= tcg_temp_new();
527 TCGv t4
= tcg_temp_new();
529 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
530 /* only the add can overflow */
531 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
533 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
534 tcg_gen_xor_tl(t1
, r2_high
, t2
);
535 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
537 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
538 /* Calc AV/SAV bits */
539 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
540 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
542 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
543 /* write back the result */
544 tcg_gen_mov_tl(ret_low
, t3
);
545 tcg_gen_mov_tl(ret_high
, t4
);
549 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
552 TCGv_i64 t1
= tcg_temp_new_i64();
553 TCGv_i64 t2
= tcg_temp_new_i64();
554 TCGv_i64 t3
= tcg_temp_new_i64();
556 tcg_gen_extu_i32_i64(t1
, r1
);
557 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
558 tcg_gen_extu_i32_i64(t3
, r3
);
560 tcg_gen_mul_i64(t1
, t1
, t3
);
561 tcg_gen_add_i64(t2
, t2
, t1
);
562 /* write back result */
563 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
564 /* only the add overflows, if t2 < t1
566 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
567 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
568 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
570 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
571 /* Calc AV/SAV bits */
572 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
573 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
575 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
579 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
582 TCGv temp
= tcg_const_i32(con
);
583 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
587 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
590 TCGv temp
= tcg_const_i32(con
);
591 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
595 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
596 TCGv r3
, uint32_t n
, uint32_t mode
)
598 TCGv t_n
= tcg_constant_i32(n
);
599 TCGv temp
= tcg_temp_new();
600 TCGv temp2
= tcg_temp_new();
601 TCGv_i64 temp64
= tcg_temp_new_i64();
604 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
607 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
610 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
613 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
616 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
617 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
618 tcg_gen_add_tl
, tcg_gen_add_tl
);
622 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
623 TCGv r3
, uint32_t n
, uint32_t mode
)
625 TCGv t_n
= tcg_constant_i32(n
);
626 TCGv temp
= tcg_temp_new();
627 TCGv temp2
= tcg_temp_new();
628 TCGv_i64 temp64
= tcg_temp_new_i64();
631 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
634 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
637 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
640 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
643 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
644 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
645 tcg_gen_sub_tl
, tcg_gen_add_tl
);
649 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
650 TCGv r3
, uint32_t n
, uint32_t mode
)
652 TCGv t_n
= tcg_constant_i32(n
);
653 TCGv_i64 temp64
= tcg_temp_new_i64();
654 TCGv_i64 temp64_2
= tcg_temp_new_i64();
655 TCGv_i64 temp64_3
= tcg_temp_new_i64();
658 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
661 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
664 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
667 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
670 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
671 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
672 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
673 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
674 tcg_gen_shli_i64(temp64
, temp64
, 16);
676 gen_add64_d(temp64_2
, temp64_3
, temp64
);
677 /* write back result */
678 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
681 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
684 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
685 TCGv r3
, uint32_t n
, uint32_t mode
)
687 TCGv t_n
= tcg_constant_i32(n
);
688 TCGv temp
= tcg_temp_new();
689 TCGv temp2
= tcg_temp_new();
690 TCGv temp3
= tcg_temp_new();
691 TCGv_i64 temp64
= tcg_temp_new_i64();
695 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
698 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
701 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
704 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
707 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
708 gen_adds(ret_low
, r1_low
, temp
);
709 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
710 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
711 gen_adds(ret_high
, r1_high
, temp2
);
713 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
714 /* combine av bits */
715 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
718 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
721 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
722 TCGv r3
, uint32_t n
, uint32_t mode
)
724 TCGv t_n
= tcg_constant_i32(n
);
725 TCGv temp
= tcg_temp_new();
726 TCGv temp2
= tcg_temp_new();
727 TCGv temp3
= tcg_temp_new();
728 TCGv_i64 temp64
= tcg_temp_new_i64();
732 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
735 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
738 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
741 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
744 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
745 gen_subs(ret_low
, r1_low
, temp
);
746 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
747 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
748 gen_adds(ret_high
, r1_high
, temp2
);
750 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
751 /* combine av bits */
752 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
756 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
757 TCGv r3
, uint32_t n
, uint32_t mode
)
759 TCGv t_n
= tcg_constant_i32(n
);
760 TCGv_i64 temp64
= tcg_temp_new_i64();
761 TCGv_i64 temp64_2
= tcg_temp_new_i64();
765 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
768 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
771 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
774 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
777 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
778 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
779 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
780 tcg_gen_shli_i64(temp64
, temp64
, 16);
781 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
783 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
784 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
789 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
790 TCGv r3
, uint32_t n
, uint32_t mode
)
792 TCGv t_n
= tcg_constant_i32(n
);
793 TCGv_i64 temp64
= tcg_temp_new_i64();
794 TCGv_i64 temp64_2
= tcg_temp_new_i64();
795 TCGv_i64 temp64_3
= tcg_temp_new_i64();
798 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
801 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
804 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
807 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
810 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
811 gen_add64_d(temp64_3
, temp64_2
, temp64
);
812 /* write back result */
813 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
817 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
818 TCGv r3
, uint32_t n
, uint32_t mode
)
820 TCGv t_n
= tcg_constant_i32(n
);
821 TCGv_i64 temp64
= tcg_temp_new_i64();
822 TCGv_i64 temp64_2
= tcg_temp_new_i64();
825 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
828 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
831 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
834 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
837 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
838 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
839 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
843 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
846 TCGv t_n
= tcg_constant_i32(n
);
847 TCGv_i64 temp64
= tcg_temp_new_i64();
850 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
853 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
856 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
859 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
862 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
866 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
868 TCGv temp
= tcg_temp_new();
869 TCGv temp2
= tcg_temp_new();
871 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
872 tcg_gen_shli_tl(temp
, r1
, 16);
873 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
877 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
879 TCGv t_n
= tcg_constant_i32(n
);
880 TCGv temp
= tcg_temp_new();
881 TCGv temp2
= tcg_temp_new();
882 TCGv_i64 temp64
= tcg_temp_new_i64();
885 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
888 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
891 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
894 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
897 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
898 tcg_gen_shli_tl(temp
, r1
, 16);
899 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
904 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
905 uint32_t n
, uint32_t mode
)
907 TCGv t_n
= tcg_constant_i32(n
);
908 TCGv_i64 temp64
= tcg_temp_new_i64();
911 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
914 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
917 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
920 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
923 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
927 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
929 TCGv temp
= tcg_temp_new();
930 TCGv temp2
= tcg_temp_new();
932 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
933 tcg_gen_shli_tl(temp
, r1
, 16);
934 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
938 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
940 TCGv t_n
= tcg_constant_i32(n
);
941 TCGv temp
= tcg_temp_new();
942 TCGv temp2
= tcg_temp_new();
943 TCGv_i64 temp64
= tcg_temp_new_i64();
946 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
949 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
952 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
955 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
958 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
959 tcg_gen_shli_tl(temp
, r1
, 16);
960 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
964 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
966 TCGv t_n
= tcg_constant_i32(n
);
967 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
971 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
973 TCGv t_n
= tcg_constant_i32(n
);
974 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
978 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
981 TCGv temp
= tcg_temp_new();
982 TCGv temp2
= tcg_temp_new();
983 TCGv temp3
= tcg_temp_new();
984 TCGv_i64 t1
= tcg_temp_new_i64();
985 TCGv_i64 t2
= tcg_temp_new_i64();
986 TCGv_i64 t3
= tcg_temp_new_i64();
988 tcg_gen_ext_i32_i64(t2
, arg2
);
989 tcg_gen_ext_i32_i64(t3
, arg3
);
991 tcg_gen_mul_i64(t2
, t2
, t3
);
992 tcg_gen_shli_i64(t2
, t2
, n
);
994 tcg_gen_ext_i32_i64(t1
, arg1
);
995 tcg_gen_sari_i64(t2
, t2
, up_shift
);
997 tcg_gen_add_i64(t3
, t1
, t2
);
998 tcg_gen_extrl_i64_i32(temp3
, t3
);
1000 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1001 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1002 tcg_gen_or_i64(t1
, t1
, t2
);
1003 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1004 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1005 /* We produce an overflow on the host if the mul before was
1006 (0x80000000 * 0x80000000) << 1). If this is the
1007 case, we negate the ovf. */
1009 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1010 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1011 tcg_gen_and_tl(temp
, temp
, temp2
);
1012 tcg_gen_shli_tl(temp
, temp
, 31);
1013 /* negate v bit, if special condition */
1014 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1017 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1018 /* Calc AV/SAV bits */
1019 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1020 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1022 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1023 /* write back result */
1024 tcg_gen_mov_tl(ret
, temp3
);
1028 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1030 TCGv temp
= tcg_temp_new();
1031 TCGv temp2
= tcg_temp_new();
1033 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1034 } else { /* n is expected to be 1 */
1035 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1036 tcg_gen_shli_tl(temp
, temp
, 1);
1037 /* catch special case r1 = r2 = 0x8000 */
1038 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1039 tcg_gen_sub_tl(temp
, temp
, temp2
);
1041 gen_add_d(ret
, arg1
, temp
);
1045 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1047 TCGv temp
= tcg_temp_new();
1048 TCGv temp2
= tcg_temp_new();
1050 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1051 } else { /* n is expected to be 1 */
1052 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1053 tcg_gen_shli_tl(temp
, temp
, 1);
1054 /* catch special case r1 = r2 = 0x8000 */
1055 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1056 tcg_gen_sub_tl(temp
, temp
, temp2
);
1058 gen_adds(ret
, arg1
, temp
);
1062 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1063 TCGv arg3
, uint32_t n
)
1065 TCGv temp
= tcg_temp_new();
1066 TCGv temp2
= tcg_temp_new();
1067 TCGv_i64 t1
= tcg_temp_new_i64();
1068 TCGv_i64 t2
= tcg_temp_new_i64();
1069 TCGv_i64 t3
= tcg_temp_new_i64();
1072 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1073 } else { /* n is expected to be 1 */
1074 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1075 tcg_gen_shli_tl(temp
, temp
, 1);
1076 /* catch special case r1 = r2 = 0x8000 */
1077 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1078 tcg_gen_sub_tl(temp
, temp
, temp2
);
1080 tcg_gen_ext_i32_i64(t2
, temp
);
1081 tcg_gen_shli_i64(t2
, t2
, 16);
1082 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1083 gen_add64_d(t3
, t1
, t2
);
1084 /* write back result */
1085 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1089 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1090 TCGv arg3
, uint32_t n
)
1092 TCGv temp
= tcg_temp_new();
1093 TCGv temp2
= tcg_temp_new();
1094 TCGv_i64 t1
= tcg_temp_new_i64();
1095 TCGv_i64 t2
= tcg_temp_new_i64();
1098 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1099 } else { /* n is expected to be 1 */
1100 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1101 tcg_gen_shli_tl(temp
, temp
, 1);
1102 /* catch special case r1 = r2 = 0x8000 */
1103 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1104 tcg_gen_sub_tl(temp
, temp
, temp2
);
1106 tcg_gen_ext_i32_i64(t2
, temp
);
1107 tcg_gen_shli_i64(t2
, t2
, 16);
1108 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1110 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1111 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1115 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1116 TCGv arg3
, uint32_t n
)
1118 TCGv_i64 t1
= tcg_temp_new_i64();
1119 TCGv_i64 t2
= tcg_temp_new_i64();
1120 TCGv_i64 t3
= tcg_temp_new_i64();
1121 TCGv_i64 t4
= tcg_temp_new_i64();
1124 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1125 tcg_gen_ext_i32_i64(t2
, arg2
);
1126 tcg_gen_ext_i32_i64(t3
, arg3
);
1128 tcg_gen_mul_i64(t2
, t2
, t3
);
1130 tcg_gen_shli_i64(t2
, t2
, 1);
1132 tcg_gen_add_i64(t4
, t1
, t2
);
1134 tcg_gen_xor_i64(t3
, t4
, t1
);
1135 tcg_gen_xor_i64(t2
, t1
, t2
);
1136 tcg_gen_andc_i64(t3
, t3
, t2
);
1137 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1138 /* We produce an overflow on the host if the mul before was
1139 (0x80000000 * 0x80000000) << 1). If this is the
1140 case, we negate the ovf. */
1142 temp
= tcg_temp_new();
1143 temp2
= tcg_temp_new();
1144 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1145 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1146 tcg_gen_and_tl(temp
, temp
, temp2
);
1147 tcg_gen_shli_tl(temp
, temp
, 31);
1148 /* negate v bit, if special condition */
1149 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1151 /* write back result */
1152 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1154 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1155 /* Calc AV/SAV bits */
1156 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1157 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1159 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1163 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1166 TCGv_i64 t1
= tcg_temp_new_i64();
1167 TCGv_i64 t2
= tcg_temp_new_i64();
1168 TCGv_i64 t3
= tcg_temp_new_i64();
1170 tcg_gen_ext_i32_i64(t1
, arg1
);
1171 tcg_gen_ext_i32_i64(t2
, arg2
);
1172 tcg_gen_ext_i32_i64(t3
, arg3
);
1174 tcg_gen_mul_i64(t2
, t2
, t3
);
1175 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1177 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1181 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1182 TCGv arg3
, uint32_t n
)
1184 TCGv_i64 r1
= tcg_temp_new_i64();
1185 TCGv t_n
= tcg_constant_i32(n
);
1187 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1188 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1189 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1192 /* ret = r2 - (r1 * r3); */
1193 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1195 TCGv_i64 t1
= tcg_temp_new_i64();
1196 TCGv_i64 t2
= tcg_temp_new_i64();
1197 TCGv_i64 t3
= tcg_temp_new_i64();
1199 tcg_gen_ext_i32_i64(t1
, r1
);
1200 tcg_gen_ext_i32_i64(t2
, r2
);
1201 tcg_gen_ext_i32_i64(t3
, r3
);
1203 tcg_gen_mul_i64(t1
, t1
, t3
);
1204 tcg_gen_sub_i64(t1
, t2
, t1
);
1206 tcg_gen_extrl_i64_i32(ret
, t1
);
1209 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1210 /* result < -0x80000000 */
1211 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1212 tcg_gen_or_i64(t2
, t2
, t3
);
1213 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1214 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1217 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1218 /* Calc AV/SAV bits */
1219 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1220 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1222 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1225 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1227 TCGv temp
= tcg_const_i32(con
);
1228 gen_msub32_d(ret
, r1
, r2
, temp
);
1232 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1235 TCGv t1
= tcg_temp_new();
1236 TCGv t2
= tcg_temp_new();
1237 TCGv t3
= tcg_temp_new();
1238 TCGv t4
= tcg_temp_new();
1240 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1241 /* only the sub can overflow */
1242 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1244 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1245 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1246 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1248 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1249 /* Calc AV/SAV bits */
1250 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1251 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1253 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1254 /* write back the result */
1255 tcg_gen_mov_tl(ret_low
, t3
);
1256 tcg_gen_mov_tl(ret_high
, t4
);
1260 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1263 TCGv temp
= tcg_const_i32(con
);
1264 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1268 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1271 TCGv_i64 t1
= tcg_temp_new_i64();
1272 TCGv_i64 t2
= tcg_temp_new_i64();
1273 TCGv_i64 t3
= tcg_temp_new_i64();
1275 tcg_gen_extu_i32_i64(t1
, r1
);
1276 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1277 tcg_gen_extu_i32_i64(t3
, r3
);
1279 tcg_gen_mul_i64(t1
, t1
, t3
);
1280 tcg_gen_sub_i64(t3
, t2
, t1
);
1281 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1282 /* calc V bit, only the sub can overflow, if t1 > t2 */
1283 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1284 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1285 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1287 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1288 /* Calc AV/SAV bits */
1289 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1290 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1292 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1296 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1299 TCGv temp
= tcg_const_i32(con
);
1300 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1303 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1305 TCGv temp
= tcg_const_i32(r2
);
1306 gen_add_d(ret
, r1
, temp
);
1309 /* calculate the carry bit too */
1310 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1312 TCGv t0
= tcg_temp_new_i32();
1313 TCGv result
= tcg_temp_new_i32();
1315 tcg_gen_movi_tl(t0
, 0);
1316 /* Addition and set C/V/SV bits */
1317 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1319 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1320 tcg_gen_xor_tl(t0
, r1
, r2
);
1321 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1323 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1324 /* Calc AV/SAV bits */
1325 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1326 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1328 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1329 /* write back result */
1330 tcg_gen_mov_tl(ret
, result
);
1333 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1335 TCGv temp
= tcg_const_i32(con
);
1336 gen_add_CC(ret
, r1
, temp
);
1339 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1341 TCGv carry
= tcg_temp_new_i32();
1342 TCGv t0
= tcg_temp_new_i32();
1343 TCGv result
= tcg_temp_new_i32();
1345 tcg_gen_movi_tl(t0
, 0);
1346 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1347 /* Addition, carry and set C/V/SV bits */
1348 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1349 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1351 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1352 tcg_gen_xor_tl(t0
, r1
, r2
);
1353 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1355 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1356 /* Calc AV/SAV bits */
1357 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1358 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1360 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1361 /* write back result */
1362 tcg_gen_mov_tl(ret
, result
);
1365 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1367 TCGv temp
= tcg_const_i32(con
);
1368 gen_addc_CC(ret
, r1
, temp
);
1371 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1374 TCGv temp
= tcg_temp_new();
1375 TCGv temp2
= tcg_temp_new();
1376 TCGv result
= tcg_temp_new();
1377 TCGv mask
= tcg_temp_new();
1378 TCGv t0
= tcg_const_i32(0);
1380 /* create mask for sticky bits */
1381 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1382 tcg_gen_shli_tl(mask
, mask
, 31);
1384 tcg_gen_add_tl(result
, r1
, r2
);
1386 tcg_gen_xor_tl(temp
, result
, r1
);
1387 tcg_gen_xor_tl(temp2
, r1
, r2
);
1388 tcg_gen_andc_tl(temp
, temp
, temp2
);
1389 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1391 tcg_gen_and_tl(temp
, temp
, mask
);
1392 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1394 tcg_gen_add_tl(temp
, result
, result
);
1395 tcg_gen_xor_tl(temp
, temp
, result
);
1396 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1398 tcg_gen_and_tl(temp
, temp
, mask
);
1399 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1400 /* write back result */
1401 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1404 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1407 TCGv temp
= tcg_const_i32(r2
);
1408 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1411 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1413 TCGv temp
= tcg_temp_new_i32();
1414 TCGv result
= tcg_temp_new_i32();
1416 tcg_gen_sub_tl(result
, r1
, r2
);
1418 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1419 tcg_gen_xor_tl(temp
, r1
, r2
);
1420 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1422 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1424 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1425 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1427 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1428 /* write back result */
1429 tcg_gen_mov_tl(ret
, result
);
1433 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1435 TCGv temp
= tcg_temp_new();
1436 TCGv_i64 t0
= tcg_temp_new_i64();
1437 TCGv_i64 t1
= tcg_temp_new_i64();
1438 TCGv_i64 result
= tcg_temp_new_i64();
1440 tcg_gen_sub_i64(result
, r1
, r2
);
1442 tcg_gen_xor_i64(t1
, result
, r1
);
1443 tcg_gen_xor_i64(t0
, r1
, r2
);
1444 tcg_gen_and_i64(t1
, t1
, t0
);
1445 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1447 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1448 /* calc AV/SAV bits */
1449 tcg_gen_extrh_i64_i32(temp
, result
);
1450 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1451 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1453 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1454 /* write back result */
1455 tcg_gen_mov_i64(ret
, result
);
1458 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1460 TCGv result
= tcg_temp_new();
1461 TCGv temp
= tcg_temp_new();
1463 tcg_gen_sub_tl(result
, r1
, r2
);
1465 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1467 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1468 tcg_gen_xor_tl(temp
, r1
, r2
);
1469 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1471 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1473 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1474 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1476 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1477 /* write back result */
1478 tcg_gen_mov_tl(ret
, result
);
1481 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1483 TCGv temp
= tcg_temp_new();
1484 tcg_gen_not_tl(temp
, r2
);
1485 gen_addc_CC(ret
, r1
, temp
);
1488 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1491 TCGv temp
= tcg_temp_new();
1492 TCGv temp2
= tcg_temp_new();
1493 TCGv result
= tcg_temp_new();
1494 TCGv mask
= tcg_temp_new();
1495 TCGv t0
= tcg_const_i32(0);
1497 /* create mask for sticky bits */
1498 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1499 tcg_gen_shli_tl(mask
, mask
, 31);
1501 tcg_gen_sub_tl(result
, r1
, r2
);
1503 tcg_gen_xor_tl(temp
, result
, r1
);
1504 tcg_gen_xor_tl(temp2
, r1
, r2
);
1505 tcg_gen_and_tl(temp
, temp
, temp2
);
1506 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1508 tcg_gen_and_tl(temp
, temp
, mask
);
1509 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1511 tcg_gen_add_tl(temp
, result
, result
);
1512 tcg_gen_xor_tl(temp
, temp
, result
);
1513 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1515 tcg_gen_and_tl(temp
, temp
, mask
);
1516 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1517 /* write back result */
1518 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1522 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1523 TCGv r3
, uint32_t n
, uint32_t mode
)
1525 TCGv t_n
= tcg_constant_i32(n
);
1526 TCGv temp
= tcg_temp_new();
1527 TCGv temp2
= tcg_temp_new();
1528 TCGv_i64 temp64
= tcg_temp_new_i64();
1531 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1534 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1537 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1540 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1543 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1544 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1545 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1549 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1550 TCGv r3
, uint32_t n
, uint32_t mode
)
1552 TCGv t_n
= tcg_constant_i32(n
);
1553 TCGv temp
= tcg_temp_new();
1554 TCGv temp2
= tcg_temp_new();
1555 TCGv temp3
= tcg_temp_new();
1556 TCGv_i64 temp64
= tcg_temp_new_i64();
1560 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1563 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1566 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1569 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1572 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1573 gen_subs(ret_low
, r1_low
, temp
);
1574 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1575 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1576 gen_subs(ret_high
, r1_high
, temp2
);
1577 /* combine v bits */
1578 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1579 /* combine av bits */
1580 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1584 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1585 TCGv r3
, uint32_t n
, uint32_t mode
)
1587 TCGv t_n
= tcg_constant_i32(n
);
1588 TCGv_i64 temp64
= tcg_temp_new_i64();
1589 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1590 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1593 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1596 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1599 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1602 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1605 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1606 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1607 /* write back result */
1608 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1612 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1613 TCGv r3
, uint32_t n
, uint32_t mode
)
1615 TCGv t_n
= tcg_constant_i32(n
);
1616 TCGv_i64 temp64
= tcg_temp_new_i64();
1617 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1620 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1623 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1626 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1629 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1632 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1633 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1634 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1638 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1641 TCGv t_n
= tcg_constant_i32(n
);
1642 TCGv_i64 temp64
= tcg_temp_new_i64();
1645 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1648 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1651 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1654 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1657 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1661 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1663 TCGv temp
= tcg_temp_new();
1664 TCGv temp2
= tcg_temp_new();
1666 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1667 tcg_gen_shli_tl(temp
, r1
, 16);
1668 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1672 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1673 uint32_t n
, uint32_t mode
)
1675 TCGv t_n
= tcg_constant_i32(n
);
1676 TCGv_i64 temp64
= tcg_temp_new_i64();
1679 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1682 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1685 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1688 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1691 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1695 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1697 TCGv temp
= tcg_temp_new();
1698 TCGv temp2
= tcg_temp_new();
1700 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1701 tcg_gen_shli_tl(temp
, r1
, 16);
1702 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1706 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1708 TCGv temp
= tcg_const_i32(n
);
1709 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1713 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1715 TCGv temp
= tcg_const_i32(n
);
1716 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1720 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1723 TCGv temp3
= tcg_temp_new();
1724 TCGv_i64 t1
= tcg_temp_new_i64();
1725 TCGv_i64 t2
= tcg_temp_new_i64();
1726 TCGv_i64 t3
= tcg_temp_new_i64();
1727 TCGv_i64 t4
= tcg_temp_new_i64();
1729 tcg_gen_ext_i32_i64(t2
, arg2
);
1730 tcg_gen_ext_i32_i64(t3
, arg3
);
1732 tcg_gen_mul_i64(t2
, t2
, t3
);
1734 tcg_gen_ext_i32_i64(t1
, arg1
);
1735 /* if we shift part of the fraction out, we need to round up */
1736 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1737 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1738 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1739 tcg_gen_add_i64(t2
, t2
, t4
);
1741 tcg_gen_sub_i64(t3
, t1
, t2
);
1742 tcg_gen_extrl_i64_i32(temp3
, t3
);
1744 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1745 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1746 tcg_gen_or_i64(t1
, t1
, t2
);
1747 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1748 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1750 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1751 /* Calc AV/SAV bits */
1752 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1753 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1755 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1756 /* write back result */
1757 tcg_gen_mov_tl(ret
, temp3
);
1761 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1763 TCGv temp
= tcg_temp_new();
1764 TCGv temp2
= tcg_temp_new();
1766 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1767 } else { /* n is expected to be 1 */
1768 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1769 tcg_gen_shli_tl(temp
, temp
, 1);
1770 /* catch special case r1 = r2 = 0x8000 */
1771 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1772 tcg_gen_sub_tl(temp
, temp
, temp2
);
1774 gen_sub_d(ret
, arg1
, temp
);
1778 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1780 TCGv temp
= tcg_temp_new();
1781 TCGv temp2
= tcg_temp_new();
1783 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1784 } else { /* n is expected to be 1 */
1785 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1786 tcg_gen_shli_tl(temp
, temp
, 1);
1787 /* catch special case r1 = r2 = 0x8000 */
1788 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1789 tcg_gen_sub_tl(temp
, temp
, temp2
);
1791 gen_subs(ret
, arg1
, temp
);
1795 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1796 TCGv arg3
, uint32_t n
)
1798 TCGv temp
= tcg_temp_new();
1799 TCGv temp2
= tcg_temp_new();
1800 TCGv_i64 t1
= tcg_temp_new_i64();
1801 TCGv_i64 t2
= tcg_temp_new_i64();
1802 TCGv_i64 t3
= tcg_temp_new_i64();
1805 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1806 } else { /* n is expected to be 1 */
1807 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1808 tcg_gen_shli_tl(temp
, temp
, 1);
1809 /* catch special case r1 = r2 = 0x8000 */
1810 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1811 tcg_gen_sub_tl(temp
, temp
, temp2
);
1813 tcg_gen_ext_i32_i64(t2
, temp
);
1814 tcg_gen_shli_i64(t2
, t2
, 16);
1815 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1816 gen_sub64_d(t3
, t1
, t2
);
1817 /* write back result */
1818 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1822 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1823 TCGv arg3
, uint32_t n
)
1825 TCGv temp
= tcg_temp_new();
1826 TCGv temp2
= tcg_temp_new();
1827 TCGv_i64 t1
= tcg_temp_new_i64();
1828 TCGv_i64 t2
= tcg_temp_new_i64();
1831 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1832 } else { /* n is expected to be 1 */
1833 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1834 tcg_gen_shli_tl(temp
, temp
, 1);
1835 /* catch special case r1 = r2 = 0x8000 */
1836 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1837 tcg_gen_sub_tl(temp
, temp
, temp2
);
1839 tcg_gen_ext_i32_i64(t2
, temp
);
1840 tcg_gen_shli_i64(t2
, t2
, 16);
1841 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1843 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1844 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1848 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1849 TCGv arg3
, uint32_t n
)
1851 TCGv_i64 t1
= tcg_temp_new_i64();
1852 TCGv_i64 t2
= tcg_temp_new_i64();
1853 TCGv_i64 t3
= tcg_temp_new_i64();
1854 TCGv_i64 t4
= tcg_temp_new_i64();
1857 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1858 tcg_gen_ext_i32_i64(t2
, arg2
);
1859 tcg_gen_ext_i32_i64(t3
, arg3
);
1861 tcg_gen_mul_i64(t2
, t2
, t3
);
1863 tcg_gen_shli_i64(t2
, t2
, 1);
1865 tcg_gen_sub_i64(t4
, t1
, t2
);
1867 tcg_gen_xor_i64(t3
, t4
, t1
);
1868 tcg_gen_xor_i64(t2
, t1
, t2
);
1869 tcg_gen_and_i64(t3
, t3
, t2
);
1870 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1871 /* We produce an overflow on the host if the mul before was
1872 (0x80000000 * 0x80000000) << 1). If this is the
1873 case, we negate the ovf. */
1875 temp
= tcg_temp_new();
1876 temp2
= tcg_temp_new();
1877 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1878 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1879 tcg_gen_and_tl(temp
, temp
, temp2
);
1880 tcg_gen_shli_tl(temp
, temp
, 31);
1881 /* negate v bit, if special condition */
1882 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1884 /* write back result */
1885 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1887 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1888 /* Calc AV/SAV bits */
1889 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1890 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1892 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1896 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1899 TCGv_i64 t1
= tcg_temp_new_i64();
1900 TCGv_i64 t2
= tcg_temp_new_i64();
1901 TCGv_i64 t3
= tcg_temp_new_i64();
1902 TCGv_i64 t4
= tcg_temp_new_i64();
1904 tcg_gen_ext_i32_i64(t1
, arg1
);
1905 tcg_gen_ext_i32_i64(t2
, arg2
);
1906 tcg_gen_ext_i32_i64(t3
, arg3
);
1908 tcg_gen_mul_i64(t2
, t2
, t3
);
1909 /* if we shift part of the fraction out, we need to round up */
1910 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1911 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1912 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1913 tcg_gen_add_i64(t3
, t3
, t4
);
1915 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1919 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1920 TCGv arg3
, uint32_t n
)
1922 TCGv_i64 r1
= tcg_temp_new_i64();
1923 TCGv t_n
= tcg_constant_i32(n
);
1925 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1926 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1927 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1931 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1932 TCGv r3
, uint32_t n
, uint32_t mode
)
1934 TCGv t_n
= tcg_constant_i32(n
);
1935 TCGv temp
= tcg_temp_new();
1936 TCGv temp2
= tcg_temp_new();
1937 TCGv_i64 temp64
= tcg_temp_new_i64();
1940 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1943 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1946 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1949 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1952 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1953 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1954 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1958 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1959 TCGv r3
, uint32_t n
, uint32_t mode
)
1961 TCGv t_n
= tcg_constant_i32(n
);
1962 TCGv_i64 temp64
= tcg_temp_new_i64();
1963 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1964 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1967 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1970 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1973 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1976 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1979 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1980 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1981 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1982 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1983 tcg_gen_shli_i64(temp64
, temp64
, 16);
1985 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1986 /* write back result */
1987 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1991 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1993 TCGv t_n
= tcg_constant_i32(n
);
1994 TCGv temp
= tcg_temp_new();
1995 TCGv temp2
= tcg_temp_new();
1996 TCGv_i64 temp64
= tcg_temp_new_i64();
1999 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2002 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2005 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2008 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2011 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2012 tcg_gen_shli_tl(temp
, r1
, 16);
2013 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2017 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2018 TCGv r3
, uint32_t n
, uint32_t mode
)
2020 TCGv t_n
= tcg_constant_i32(n
);
2021 TCGv temp
= tcg_temp_new();
2022 TCGv temp2
= tcg_temp_new();
2023 TCGv temp3
= tcg_temp_new();
2024 TCGv_i64 temp64
= tcg_temp_new_i64();
2028 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2031 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2034 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2037 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2040 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2041 gen_adds(ret_low
, r1_low
, temp
);
2042 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2043 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2044 gen_subs(ret_high
, r1_high
, temp2
);
2045 /* combine v bits */
2046 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2047 /* combine av bits */
2048 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2052 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2053 TCGv r3
, uint32_t n
, uint32_t mode
)
2055 TCGv t_n
= tcg_constant_i32(n
);
2056 TCGv_i64 temp64
= tcg_temp_new_i64();
2057 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2061 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2064 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2067 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2070 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2073 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2074 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2075 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2076 tcg_gen_shli_i64(temp64
, temp64
, 16);
2077 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2079 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2080 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2084 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2086 TCGv t_n
= tcg_constant_i32(n
);
2087 TCGv temp
= tcg_temp_new();
2088 TCGv temp2
= tcg_temp_new();
2089 TCGv_i64 temp64
= tcg_temp_new_i64();
2092 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2095 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2098 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2101 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2104 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2105 tcg_gen_shli_tl(temp
, r1
, 16);
2106 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2109 static inline void gen_abs(TCGv ret
, TCGv r1
)
2111 tcg_gen_abs_tl(ret
, r1
);
2112 /* overflow can only happen, if r1 = 0x80000000 */
2113 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2114 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2116 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2118 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2119 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2121 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2124 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2126 TCGv temp
= tcg_temp_new_i32();
2127 TCGv result
= tcg_temp_new_i32();
2129 tcg_gen_sub_tl(result
, r1
, r2
);
2130 tcg_gen_sub_tl(temp
, r2
, r1
);
2131 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2134 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2135 tcg_gen_xor_tl(temp
, result
, r2
);
2136 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2137 tcg_gen_xor_tl(temp
, r1
, r2
);
2138 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2140 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2142 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2143 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2145 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2146 /* write back result */
2147 tcg_gen_mov_tl(ret
, result
);
2150 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2152 TCGv temp
= tcg_const_i32(con
);
2153 gen_absdif(ret
, r1
, temp
);
2156 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2158 TCGv temp
= tcg_const_i32(con
);
2159 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2162 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2164 TCGv high
= tcg_temp_new();
2165 TCGv low
= tcg_temp_new();
2167 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2168 tcg_gen_mov_tl(ret
, low
);
2170 tcg_gen_sari_tl(low
, low
, 31);
2171 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2172 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2174 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2176 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2177 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2179 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2182 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2184 TCGv temp
= tcg_const_i32(con
);
2185 gen_mul_i32s(ret
, r1
, temp
);
2188 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2190 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2192 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2194 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2196 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2197 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2199 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2202 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2205 TCGv temp
= tcg_const_i32(con
);
2206 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2209 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2211 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2213 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2215 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2217 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2218 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2220 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2223 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2226 TCGv temp
= tcg_const_i32(con
);
2227 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2230 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2232 TCGv temp
= tcg_const_i32(con
);
2233 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2236 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2238 TCGv temp
= tcg_const_i32(con
);
2239 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2241 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2242 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2244 TCGv temp
= tcg_const_i32(con
);
2245 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2248 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2250 TCGv temp
= tcg_const_i32(con
);
2251 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2255 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2257 TCGv_i64 temp_64
= tcg_temp_new_i64();
2258 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2261 if (up_shift
== 32) {
2262 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2263 } else if (up_shift
== 16) {
2264 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2265 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2267 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2268 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2269 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2271 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2274 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2275 } else { /* n is expected to be 1 */
2276 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2277 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2279 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2281 if (up_shift
== 0) {
2282 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2284 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2286 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2287 /* overflow only occurs if r1 = r2 = 0x8000 */
2288 if (up_shift
== 0) {/* result is 64 bit */
2289 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2291 } else { /* result is 32 bit */
2292 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2295 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2296 /* calc sv overflow bit */
2297 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2299 /* calc av overflow bit */
2300 if (up_shift
== 0) {
2301 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2302 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2304 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2305 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2307 /* calc sav overflow bit */
2308 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2312 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2314 TCGv temp
= tcg_temp_new();
2316 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2317 } else { /* n is expected to be 1 */
2318 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2319 tcg_gen_shli_tl(ret
, ret
, 1);
2320 /* catch special case r1 = r2 = 0x8000 */
2321 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2322 tcg_gen_sub_tl(ret
, ret
, temp
);
2325 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2326 /* calc av overflow bit */
2327 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2328 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2329 /* calc sav overflow bit */
2330 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2333 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2335 TCGv temp
= tcg_temp_new();
2337 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2338 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2340 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2341 tcg_gen_shli_tl(ret
, ret
, 1);
2342 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2343 /* catch special case r1 = r2 = 0x8000 */
2344 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2345 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2346 tcg_gen_sub_tl(ret
, ret
, temp
);
2349 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2350 /* calc av overflow bit */
2351 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2352 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2353 /* calc sav overflow bit */
2354 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2355 /* cut halfword off */
2356 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2360 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2363 TCGv_i64 temp64
= tcg_temp_new_i64();
2364 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2365 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2366 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2370 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2373 TCGv temp
= tcg_const_i32(con
);
2374 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2378 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2381 TCGv_i64 temp64
= tcg_temp_new_i64();
2382 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2383 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2384 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2388 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2391 TCGv temp
= tcg_const_i32(con
);
2392 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2395 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2397 TCGv temp
= tcg_const_i32(con
);
2398 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2401 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2403 TCGv temp
= tcg_const_i32(con
);
2404 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2408 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2411 TCGv_i64 temp64
= tcg_temp_new_i64();
2412 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2413 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2414 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2418 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2421 TCGv temp
= tcg_const_i32(con
);
2422 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2426 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2429 TCGv_i64 temp64
= tcg_temp_new_i64();
2430 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2431 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2432 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2436 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2439 TCGv temp
= tcg_const_i32(con
);
2440 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2443 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2445 TCGv sat_neg
= tcg_const_i32(low
);
2446 TCGv temp
= tcg_const_i32(up
);
2448 /* sat_neg = (arg < low ) ? low : arg; */
2449 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2451 /* ret = (sat_neg > up ) ? up : sat_neg; */
2452 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2455 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2457 TCGv temp
= tcg_const_i32(up
);
2458 /* sat_neg = (arg > up ) ? up : arg; */
2459 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2462 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2464 if (shift_count
== -32) {
2465 tcg_gen_movi_tl(ret
, 0);
2466 } else if (shift_count
>= 0) {
2467 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2469 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2473 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2475 TCGv temp_low
, temp_high
;
2477 if (shiftcount
== -16) {
2478 tcg_gen_movi_tl(ret
, 0);
2480 temp_high
= tcg_temp_new();
2481 temp_low
= tcg_temp_new();
2483 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2484 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2485 gen_shi(temp_low
, temp_low
, shiftcount
);
2486 gen_shi(ret
, temp_high
, shiftcount
);
2487 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2491 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2493 uint32_t msk
, msk_start
;
2494 TCGv temp
= tcg_temp_new();
2495 TCGv temp2
= tcg_temp_new();
2497 if (shift_count
== 0) {
2498 /* Clear PSW.C and PSW.V */
2499 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2500 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2501 tcg_gen_mov_tl(ret
, r1
);
2502 } else if (shift_count
== -32) {
2504 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2505 /* fill ret completely with sign bit */
2506 tcg_gen_sari_tl(ret
, r1
, 31);
2508 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2509 } else if (shift_count
> 0) {
2510 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2511 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2514 msk_start
= 32 - shift_count
;
2515 msk
= ((1 << shift_count
) - 1) << msk_start
;
2516 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2517 /* calc v/sv bits */
2518 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2519 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2520 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2521 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2523 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2525 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2528 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2530 msk
= (1 << -shift_count
) - 1;
2531 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2533 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2535 /* calc av overflow bit */
2536 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2537 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2538 /* calc sav overflow bit */
2539 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2542 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2544 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2547 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2549 TCGv temp
= tcg_const_i32(con
);
2550 gen_shas(ret
, r1
, temp
);
2553 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2557 if (shift_count
== 0) {
2558 tcg_gen_mov_tl(ret
, r1
);
2559 } else if (shift_count
> 0) {
2560 low
= tcg_temp_new();
2561 high
= tcg_temp_new();
2563 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2564 tcg_gen_shli_tl(low
, r1
, shift_count
);
2565 tcg_gen_shli_tl(ret
, high
, shift_count
);
2566 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2568 low
= tcg_temp_new();
2569 high
= tcg_temp_new();
2571 tcg_gen_ext16s_tl(low
, r1
);
2572 tcg_gen_sari_tl(low
, low
, -shift_count
);
2573 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2574 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2578 /* ret = {ret[30:0], (r1 cond r2)}; */
2579 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2581 TCGv temp
= tcg_temp_new();
2582 TCGv temp2
= tcg_temp_new();
2584 tcg_gen_shli_tl(temp
, ret
, 1);
2585 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2586 tcg_gen_or_tl(ret
, temp
, temp2
);
2589 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2591 TCGv temp
= tcg_const_i32(con
);
2592 gen_sh_cond(cond
, ret
, r1
, temp
);
2595 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2597 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2600 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2602 TCGv temp
= tcg_const_i32(con
);
2603 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2606 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2608 TCGv temp
= tcg_const_i32(con
);
2609 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2612 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2614 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2617 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2619 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2622 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2624 void(*op1
)(TCGv
, TCGv
, TCGv
),
2625 void(*op2
)(TCGv
, TCGv
, TCGv
))
2629 temp1
= tcg_temp_new();
2630 temp2
= tcg_temp_new();
2632 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2633 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2635 (*op1
)(temp1
, temp1
, temp2
);
2636 (*op2
)(temp1
, ret
, temp1
);
2638 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2641 /* ret = r1[pos1] op1 r2[pos2]; */
2642 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2644 void(*op1
)(TCGv
, TCGv
, TCGv
))
2648 temp1
= tcg_temp_new();
2649 temp2
= tcg_temp_new();
2651 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2652 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2654 (*op1
)(ret
, temp1
, temp2
);
2656 tcg_gen_andi_tl(ret
, ret
, 0x1);
2659 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2660 void(*op
)(TCGv
, TCGv
, TCGv
))
2662 TCGv temp
= tcg_temp_new();
2663 TCGv temp2
= tcg_temp_new();
2664 /* temp = (arg1 cond arg2 )*/
2665 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2667 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2668 /* temp = temp insn temp2 */
2669 (*op
)(temp
, temp
, temp2
);
2670 /* ret = {ret[31:1], temp} */
2671 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2675 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2676 void(*op
)(TCGv
, TCGv
, TCGv
))
2678 TCGv temp
= tcg_const_i32(con
);
2679 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2682 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2683 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2685 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2686 tcg_gen_neg_tl(ret
, ret
);
2689 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2691 TCGv b0
= tcg_temp_new();
2692 TCGv b1
= tcg_temp_new();
2693 TCGv b2
= tcg_temp_new();
2694 TCGv b3
= tcg_temp_new();
2697 tcg_gen_andi_tl(b0
, r1
, 0xff);
2698 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2701 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2702 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2705 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2706 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2709 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2710 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2713 tcg_gen_or_tl(ret
, b0
, b1
);
2714 tcg_gen_or_tl(ret
, ret
, b2
);
2715 tcg_gen_or_tl(ret
, ret
, b3
);
2718 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2720 TCGv h0
= tcg_temp_new();
2721 TCGv h1
= tcg_temp_new();
2724 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2725 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2728 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2729 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2732 tcg_gen_or_tl(ret
, h0
, h1
);
2735 /* mask = ((1 << width) -1) << pos;
2736 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2737 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2739 TCGv mask
= tcg_temp_new();
2740 TCGv temp
= tcg_temp_new();
2741 TCGv temp2
= tcg_temp_new();
2743 tcg_gen_movi_tl(mask
, 1);
2744 tcg_gen_shl_tl(mask
, mask
, width
);
2745 tcg_gen_subi_tl(mask
, mask
, 1);
2746 tcg_gen_shl_tl(mask
, mask
, pos
);
2748 tcg_gen_shl_tl(temp
, r2
, pos
);
2749 tcg_gen_and_tl(temp
, temp
, mask
);
2750 tcg_gen_andc_tl(temp2
, r1
, mask
);
2751 tcg_gen_or_tl(ret
, temp
, temp2
);
2754 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2756 TCGv_i64 temp
= tcg_temp_new_i64();
2758 gen_helper_bsplit(temp
, r1
);
2759 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2762 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2764 TCGv_i64 temp
= tcg_temp_new_i64();
2766 gen_helper_unpack(temp
, r1
);
2767 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2771 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2773 TCGv_i64 ret
= tcg_temp_new_i64();
2775 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2776 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2778 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2780 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2784 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2786 TCGv_i64 ret
= tcg_temp_new_i64();
2788 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2789 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2791 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2793 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2796 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2798 TCGv temp
= tcg_temp_new();
2800 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2801 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2802 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2803 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2804 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2806 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2807 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2810 static void gen_calc_usb_mulr_h(TCGv arg
)
2812 TCGv temp
= tcg_temp_new();
2814 tcg_gen_add_tl(temp
, arg
, arg
);
2815 tcg_gen_xor_tl(temp
, temp
, arg
);
2816 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2817 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2819 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2821 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2824 /* helpers for generating program flow micro-ops */
2826 static inline void gen_save_pc(target_ulong pc
)
2828 tcg_gen_movi_tl(cpu_PC
, pc
);
2831 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2833 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2836 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2839 tcg_gen_lookup_and_goto_ptr();
2843 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2845 TCGv_i32 classtemp
= tcg_const_i32(class);
2846 TCGv_i32 tintemp
= tcg_const_i32(tin
);
2848 gen_save_pc(ctx
->base
.pc_next
);
2849 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2850 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2853 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2854 TCGv r2
, int16_t address
)
2856 TCGLabel
*jumpLabel
= gen_new_label();
2857 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2859 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2861 gen_set_label(jumpLabel
);
2862 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2865 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2866 int r2
, int16_t address
)
2868 TCGv temp
= tcg_const_i32(r2
);
2869 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2872 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2874 TCGLabel
*l1
= gen_new_label();
2876 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2877 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2878 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2880 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2883 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2885 TCGv temp
= tcg_temp_new();
2887 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2888 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2889 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2890 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2893 static void gen_fret(DisasContext
*ctx
)
2895 TCGv temp
= tcg_temp_new();
2897 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2898 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2899 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2900 tcg_gen_mov_tl(cpu_PC
, temp
);
2901 tcg_gen_exit_tb(NULL
, 0);
2902 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2905 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2906 int r2
, int32_t constant
, int32_t offset
)
2912 /* SB-format jumps */
2915 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2917 case OPC1_32_B_CALL
:
2918 case OPC1_16_SB_CALL
:
2919 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2920 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2923 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2925 case OPC1_16_SB_JNZ
:
2926 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2928 /* SBC-format jumps */
2929 case OPC1_16_SBC_JEQ
:
2930 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2932 case OPC1_16_SBC_JEQ2
:
2933 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2936 case OPC1_16_SBC_JNE
:
2937 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2939 case OPC1_16_SBC_JNE2
:
2940 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2941 constant
, offset
+ 16);
2943 /* SBRN-format jumps */
2944 case OPC1_16_SBRN_JZ_T
:
2945 temp
= tcg_temp_new();
2946 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2947 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2949 case OPC1_16_SBRN_JNZ_T
:
2950 temp
= tcg_temp_new();
2951 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2952 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2954 /* SBR-format jumps */
2955 case OPC1_16_SBR_JEQ
:
2956 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2959 case OPC1_16_SBR_JEQ2
:
2960 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2963 case OPC1_16_SBR_JNE
:
2964 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2967 case OPC1_16_SBR_JNE2
:
2968 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2971 case OPC1_16_SBR_JNZ
:
2972 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2974 case OPC1_16_SBR_JNZ_A
:
2975 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2977 case OPC1_16_SBR_JGEZ
:
2978 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2980 case OPC1_16_SBR_JGTZ
:
2981 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2983 case OPC1_16_SBR_JLEZ
:
2984 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2986 case OPC1_16_SBR_JLTZ
:
2987 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2989 case OPC1_16_SBR_JZ
:
2990 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2992 case OPC1_16_SBR_JZ_A
:
2993 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2995 case OPC1_16_SBR_LOOP
:
2996 gen_loop(ctx
, r1
, offset
* 2 - 32);
2998 /* SR-format jumps */
3000 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3001 tcg_gen_exit_tb(NULL
, 0);
3003 case OPC2_32_SYS_RET
:
3004 case OPC2_16_SR_RET
:
3005 gen_helper_ret(cpu_env
);
3006 tcg_gen_exit_tb(NULL
, 0);
3009 case OPC1_32_B_CALLA
:
3010 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3011 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3013 case OPC1_32_B_FCALL
:
3014 gen_fcall_save_ctx(ctx
);
3015 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3017 case OPC1_32_B_FCALLA
:
3018 gen_fcall_save_ctx(ctx
);
3019 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3022 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3025 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3028 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3029 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3032 case OPCM_32_BRC_EQ_NEQ
:
3033 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3034 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3036 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3039 case OPCM_32_BRC_GE
:
3040 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3041 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3043 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3044 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3048 case OPCM_32_BRC_JLT
:
3049 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3050 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3052 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3053 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3057 case OPCM_32_BRC_JNE
:
3058 temp
= tcg_temp_new();
3059 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3060 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3061 /* subi is unconditional */
3062 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3063 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3065 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3066 /* addi is unconditional */
3067 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3068 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3072 case OPCM_32_BRN_JTT
:
3073 n
= MASK_OP_BRN_N(ctx
->opcode
);
3075 temp
= tcg_temp_new();
3076 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3078 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3079 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3081 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3085 case OPCM_32_BRR_EQ_NEQ
:
3086 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3087 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3090 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3094 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3095 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3096 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3099 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3103 case OPCM_32_BRR_GE
:
3104 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3105 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3108 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3112 case OPCM_32_BRR_JLT
:
3113 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3114 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3117 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3121 case OPCM_32_BRR_LOOP
:
3122 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3123 gen_loop(ctx
, r2
, offset
* 2);
3125 /* OPC2_32_BRR_LOOPU */
3126 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3129 case OPCM_32_BRR_JNE
:
3130 temp
= tcg_temp_new();
3131 temp2
= tcg_temp_new();
3132 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3133 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3134 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3135 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3136 /* subi is unconditional */
3137 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3138 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3140 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3141 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3142 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3143 /* addi is unconditional */
3144 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3145 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3148 case OPCM_32_BRR_JNZ
:
3149 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3150 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3152 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3156 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3158 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3163 * Functions for decoding instructions
3166 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3172 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3173 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3176 case OPC1_16_SRC_ADD
:
3177 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3179 case OPC1_16_SRC_ADD_A15
:
3180 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3182 case OPC1_16_SRC_ADD_15A
:
3183 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3185 case OPC1_16_SRC_ADD_A
:
3186 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3188 case OPC1_16_SRC_CADD
:
3189 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3192 case OPC1_16_SRC_CADDN
:
3193 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3196 case OPC1_16_SRC_CMOV
:
3197 temp
= tcg_const_tl(0);
3198 temp2
= tcg_const_tl(const4
);
3199 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3200 temp2
, cpu_gpr_d
[r1
]);
3202 case OPC1_16_SRC_CMOVN
:
3203 temp
= tcg_const_tl(0);
3204 temp2
= tcg_const_tl(const4
);
3205 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3206 temp2
, cpu_gpr_d
[r1
]);
3208 case OPC1_16_SRC_EQ
:
3209 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3212 case OPC1_16_SRC_LT
:
3213 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3216 case OPC1_16_SRC_MOV
:
3217 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3219 case OPC1_16_SRC_MOV_A
:
3220 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3221 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3223 case OPC1_16_SRC_MOV_E
:
3224 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3225 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3226 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3228 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3231 case OPC1_16_SRC_SH
:
3232 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3234 case OPC1_16_SRC_SHA
:
3235 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3238 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3242 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3247 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3248 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3251 case OPC1_16_SRR_ADD
:
3252 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3254 case OPC1_16_SRR_ADD_A15
:
3255 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3257 case OPC1_16_SRR_ADD_15A
:
3258 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3260 case OPC1_16_SRR_ADD_A
:
3261 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3263 case OPC1_16_SRR_ADDS
:
3264 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3266 case OPC1_16_SRR_AND
:
3267 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3269 case OPC1_16_SRR_CMOV
:
3270 temp
= tcg_const_tl(0);
3271 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3272 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3274 case OPC1_16_SRR_CMOVN
:
3275 temp
= tcg_const_tl(0);
3276 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3277 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3279 case OPC1_16_SRR_EQ
:
3280 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3283 case OPC1_16_SRR_LT
:
3284 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3287 case OPC1_16_SRR_MOV
:
3288 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3290 case OPC1_16_SRR_MOV_A
:
3291 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3293 case OPC1_16_SRR_MOV_AA
:
3294 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3296 case OPC1_16_SRR_MOV_D
:
3297 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3299 case OPC1_16_SRR_MUL
:
3300 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3302 case OPC1_16_SRR_OR
:
3303 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3305 case OPC1_16_SRR_SUB
:
3306 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3308 case OPC1_16_SRR_SUB_A15B
:
3309 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3311 case OPC1_16_SRR_SUB_15AB
:
3312 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3314 case OPC1_16_SRR_SUBS
:
3315 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3317 case OPC1_16_SRR_XOR
:
3318 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3321 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3325 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3329 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3330 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3333 case OPC1_16_SSR_ST_A
:
3334 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3336 case OPC1_16_SSR_ST_A_POSTINC
:
3337 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3338 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3340 case OPC1_16_SSR_ST_B
:
3341 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3343 case OPC1_16_SSR_ST_B_POSTINC
:
3344 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3345 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3347 case OPC1_16_SSR_ST_H
:
3348 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3350 case OPC1_16_SSR_ST_H_POSTINC
:
3351 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3352 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3354 case OPC1_16_SSR_ST_W
:
3355 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3357 case OPC1_16_SSR_ST_W_POSTINC
:
3358 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3359 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3362 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3366 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3370 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3373 case OPC1_16_SC_AND
:
3374 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3376 case OPC1_16_SC_BISR
:
3377 gen_helper_1arg(bisr
, const16
& 0xff);
3379 case OPC1_16_SC_LD_A
:
3380 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3382 case OPC1_16_SC_LD_W
:
3383 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3385 case OPC1_16_SC_MOV
:
3386 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3389 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3391 case OPC1_16_SC_ST_A
:
3392 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3394 case OPC1_16_SC_ST_W
:
3395 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3397 case OPC1_16_SC_SUB_A
:
3398 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3401 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3405 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3409 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3410 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3414 case OPC1_16_SLR_LD_A
:
3415 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3417 case OPC1_16_SLR_LD_A_POSTINC
:
3418 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3419 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3421 case OPC1_16_SLR_LD_BU
:
3422 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3424 case OPC1_16_SLR_LD_BU_POSTINC
:
3425 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3426 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3428 case OPC1_16_SLR_LD_H
:
3429 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3431 case OPC1_16_SLR_LD_H_POSTINC
:
3432 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3433 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3435 case OPC1_16_SLR_LD_W
:
3436 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3438 case OPC1_16_SLR_LD_W_POSTINC
:
3439 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3440 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3443 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3447 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3452 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3453 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3457 case OPC1_16_SRO_LD_A
:
3458 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3460 case OPC1_16_SRO_LD_BU
:
3461 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3463 case OPC1_16_SRO_LD_H
:
3464 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3466 case OPC1_16_SRO_LD_W
:
3467 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3469 case OPC1_16_SRO_ST_A
:
3470 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3472 case OPC1_16_SRO_ST_B
:
3473 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3475 case OPC1_16_SRO_ST_H
:
3476 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3478 case OPC1_16_SRO_ST_W
:
3479 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3482 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3486 static void decode_sr_system(DisasContext
*ctx
)
3489 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3492 case OPC2_16_SR_NOP
:
3494 case OPC2_16_SR_RET
:
3495 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3497 case OPC2_16_SR_RFE
:
3498 gen_helper_rfe(cpu_env
);
3499 tcg_gen_exit_tb(NULL
, 0);
3500 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3502 case OPC2_16_SR_DEBUG
:
3503 /* raise EXCP_DEBUG */
3505 case OPC2_16_SR_FRET
:
3509 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3513 static void decode_sr_accu(DisasContext
*ctx
)
3518 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3519 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3522 case OPC2_16_SR_RSUB
:
3523 /* calc V bit -- overflow only if r1 = -0x80000000 */
3524 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3525 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3527 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3529 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3531 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3532 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3534 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3536 case OPC2_16_SR_SAT_B
:
3537 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3539 case OPC2_16_SR_SAT_BU
:
3540 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3542 case OPC2_16_SR_SAT_H
:
3543 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3545 case OPC2_16_SR_SAT_HU
:
3546 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3549 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3553 static void decode_16Bit_opc(DisasContext
*ctx
)
3561 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3563 /* handle ADDSC.A opcode only being 6 bit long */
3564 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3565 op1
= OPC1_16_SRRS_ADDSC_A
;
3569 case OPC1_16_SRC_ADD
:
3570 case OPC1_16_SRC_ADD_A15
:
3571 case OPC1_16_SRC_ADD_15A
:
3572 case OPC1_16_SRC_ADD_A
:
3573 case OPC1_16_SRC_CADD
:
3574 case OPC1_16_SRC_CADDN
:
3575 case OPC1_16_SRC_CMOV
:
3576 case OPC1_16_SRC_CMOVN
:
3577 case OPC1_16_SRC_EQ
:
3578 case OPC1_16_SRC_LT
:
3579 case OPC1_16_SRC_MOV
:
3580 case OPC1_16_SRC_MOV_A
:
3581 case OPC1_16_SRC_MOV_E
:
3582 case OPC1_16_SRC_SH
:
3583 case OPC1_16_SRC_SHA
:
3584 decode_src_opc(ctx
, op1
);
3587 case OPC1_16_SRR_ADD
:
3588 case OPC1_16_SRR_ADD_A15
:
3589 case OPC1_16_SRR_ADD_15A
:
3590 case OPC1_16_SRR_ADD_A
:
3591 case OPC1_16_SRR_ADDS
:
3592 case OPC1_16_SRR_AND
:
3593 case OPC1_16_SRR_CMOV
:
3594 case OPC1_16_SRR_CMOVN
:
3595 case OPC1_16_SRR_EQ
:
3596 case OPC1_16_SRR_LT
:
3597 case OPC1_16_SRR_MOV
:
3598 case OPC1_16_SRR_MOV_A
:
3599 case OPC1_16_SRR_MOV_AA
:
3600 case OPC1_16_SRR_MOV_D
:
3601 case OPC1_16_SRR_MUL
:
3602 case OPC1_16_SRR_OR
:
3603 case OPC1_16_SRR_SUB
:
3604 case OPC1_16_SRR_SUB_A15B
:
3605 case OPC1_16_SRR_SUB_15AB
:
3606 case OPC1_16_SRR_SUBS
:
3607 case OPC1_16_SRR_XOR
:
3608 decode_srr_opc(ctx
, op1
);
3611 case OPC1_16_SSR_ST_A
:
3612 case OPC1_16_SSR_ST_A_POSTINC
:
3613 case OPC1_16_SSR_ST_B
:
3614 case OPC1_16_SSR_ST_B_POSTINC
:
3615 case OPC1_16_SSR_ST_H
:
3616 case OPC1_16_SSR_ST_H_POSTINC
:
3617 case OPC1_16_SSR_ST_W
:
3618 case OPC1_16_SSR_ST_W_POSTINC
:
3619 decode_ssr_opc(ctx
, op1
);
3622 case OPC1_16_SRRS_ADDSC_A
:
3623 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3624 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3625 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3626 temp
= tcg_temp_new();
3627 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3628 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3631 case OPC1_16_SLRO_LD_A
:
3632 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3633 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3634 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3636 case OPC1_16_SLRO_LD_BU
:
3637 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3638 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3639 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3641 case OPC1_16_SLRO_LD_H
:
3642 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3643 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3644 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3646 case OPC1_16_SLRO_LD_W
:
3647 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3648 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3649 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3652 case OPC1_16_SB_CALL
:
3654 case OPC1_16_SB_JNZ
:
3656 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3657 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3660 case OPC1_16_SBC_JEQ
:
3661 case OPC1_16_SBC_JNE
:
3662 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3663 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3664 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3666 case OPC1_16_SBC_JEQ2
:
3667 case OPC1_16_SBC_JNE2
:
3668 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3669 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3670 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3671 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3673 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3677 case OPC1_16_SBRN_JNZ_T
:
3678 case OPC1_16_SBRN_JZ_T
:
3679 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3680 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3681 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3684 case OPC1_16_SBR_JEQ2
:
3685 case OPC1_16_SBR_JNE2
:
3686 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3687 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3688 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3689 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3691 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3694 case OPC1_16_SBR_JEQ
:
3695 case OPC1_16_SBR_JGEZ
:
3696 case OPC1_16_SBR_JGTZ
:
3697 case OPC1_16_SBR_JLEZ
:
3698 case OPC1_16_SBR_JLTZ
:
3699 case OPC1_16_SBR_JNE
:
3700 case OPC1_16_SBR_JNZ
:
3701 case OPC1_16_SBR_JNZ_A
:
3702 case OPC1_16_SBR_JZ
:
3703 case OPC1_16_SBR_JZ_A
:
3704 case OPC1_16_SBR_LOOP
:
3705 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3706 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3707 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3710 case OPC1_16_SC_AND
:
3711 case OPC1_16_SC_BISR
:
3712 case OPC1_16_SC_LD_A
:
3713 case OPC1_16_SC_LD_W
:
3714 case OPC1_16_SC_MOV
:
3716 case OPC1_16_SC_ST_A
:
3717 case OPC1_16_SC_ST_W
:
3718 case OPC1_16_SC_SUB_A
:
3719 decode_sc_opc(ctx
, op1
);
3722 case OPC1_16_SLR_LD_A
:
3723 case OPC1_16_SLR_LD_A_POSTINC
:
3724 case OPC1_16_SLR_LD_BU
:
3725 case OPC1_16_SLR_LD_BU_POSTINC
:
3726 case OPC1_16_SLR_LD_H
:
3727 case OPC1_16_SLR_LD_H_POSTINC
:
3728 case OPC1_16_SLR_LD_W
:
3729 case OPC1_16_SLR_LD_W_POSTINC
:
3730 decode_slr_opc(ctx
, op1
);
3733 case OPC1_16_SRO_LD_A
:
3734 case OPC1_16_SRO_LD_BU
:
3735 case OPC1_16_SRO_LD_H
:
3736 case OPC1_16_SRO_LD_W
:
3737 case OPC1_16_SRO_ST_A
:
3738 case OPC1_16_SRO_ST_B
:
3739 case OPC1_16_SRO_ST_H
:
3740 case OPC1_16_SRO_ST_W
:
3741 decode_sro_opc(ctx
, op1
);
3744 case OPC1_16_SSRO_ST_A
:
3745 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3746 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3747 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3749 case OPC1_16_SSRO_ST_B
:
3750 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3751 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3752 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3754 case OPC1_16_SSRO_ST_H
:
3755 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3756 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3757 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3759 case OPC1_16_SSRO_ST_W
:
3760 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3761 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3762 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3765 case OPCM_16_SR_SYSTEM
:
3766 decode_sr_system(ctx
);
3768 case OPCM_16_SR_ACCU
:
3769 decode_sr_accu(ctx
);
3772 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3773 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3775 case OPC1_16_SR_NOT
:
3776 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3777 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3780 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3785 * 32 bit instructions
3789 static void decode_abs_ldw(DisasContext
*ctx
)
3796 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3797 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3798 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3800 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3803 case OPC2_32_ABS_LD_A
:
3804 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3806 case OPC2_32_ABS_LD_D
:
3808 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3810 case OPC2_32_ABS_LD_DA
:
3812 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3814 case OPC2_32_ABS_LD_W
:
3815 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3818 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3822 static void decode_abs_ldb(DisasContext
*ctx
)
3829 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3830 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3831 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3833 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3836 case OPC2_32_ABS_LD_B
:
3837 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3839 case OPC2_32_ABS_LD_BU
:
3840 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3842 case OPC2_32_ABS_LD_H
:
3843 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3845 case OPC2_32_ABS_LD_HU
:
3846 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3849 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3853 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3860 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3861 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3862 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3864 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3867 case OPC2_32_ABS_LDMST
:
3868 gen_ldmst(ctx
, r1
, temp
);
3870 case OPC2_32_ABS_SWAP_W
:
3871 gen_swap(ctx
, r1
, temp
);
3874 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3878 static void decode_abs_ldst_context(DisasContext
*ctx
)
3883 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3884 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3887 case OPC2_32_ABS_LDLCX
:
3888 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3890 case OPC2_32_ABS_LDUCX
:
3891 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3893 case OPC2_32_ABS_STLCX
:
3894 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3896 case OPC2_32_ABS_STUCX
:
3897 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3900 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3904 static void decode_abs_store(DisasContext
*ctx
)
3911 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3912 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3913 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3915 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3918 case OPC2_32_ABS_ST_A
:
3919 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3921 case OPC2_32_ABS_ST_D
:
3923 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3925 case OPC2_32_ABS_ST_DA
:
3927 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3929 case OPC2_32_ABS_ST_W
:
3930 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3933 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3937 static void decode_abs_storeb_h(DisasContext
*ctx
)
3944 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3945 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3946 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3948 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3951 case OPC2_32_ABS_ST_B
:
3952 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3954 case OPC2_32_ABS_ST_H
:
3955 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3958 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3964 static void decode_bit_andacc(DisasContext
*ctx
)
3970 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3971 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3972 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3973 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3974 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3975 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3979 case OPC2_32_BIT_AND_AND_T
:
3980 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3981 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3983 case OPC2_32_BIT_AND_ANDN_T
:
3984 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3985 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3987 case OPC2_32_BIT_AND_NOR_T
:
3988 if (TCG_TARGET_HAS_andc_i32
) {
3989 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3990 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3992 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3993 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3996 case OPC2_32_BIT_AND_OR_T
:
3997 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3998 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4001 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4005 static void decode_bit_logical_t(DisasContext
*ctx
)
4010 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4011 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4012 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4013 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4014 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4015 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4018 case OPC2_32_BIT_AND_T
:
4019 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4020 pos1
, pos2
, &tcg_gen_and_tl
);
4022 case OPC2_32_BIT_ANDN_T
:
4023 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4024 pos1
, pos2
, &tcg_gen_andc_tl
);
4026 case OPC2_32_BIT_NOR_T
:
4027 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4028 pos1
, pos2
, &tcg_gen_nor_tl
);
4030 case OPC2_32_BIT_OR_T
:
4031 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4032 pos1
, pos2
, &tcg_gen_or_tl
);
4035 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4039 static void decode_bit_insert(DisasContext
*ctx
)
4045 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4046 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4047 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4048 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4049 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4050 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4052 temp
= tcg_temp_new();
4054 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4055 if (op2
== OPC2_32_BIT_INSN_T
) {
4056 tcg_gen_not_tl(temp
, temp
);
4058 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4061 static void decode_bit_logical_t2(DisasContext
*ctx
)
4068 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4069 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4070 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4071 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4072 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4073 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4076 case OPC2_32_BIT_NAND_T
:
4077 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4078 pos1
, pos2
, &tcg_gen_nand_tl
);
4080 case OPC2_32_BIT_ORN_T
:
4081 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4082 pos1
, pos2
, &tcg_gen_orc_tl
);
4084 case OPC2_32_BIT_XNOR_T
:
4085 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4086 pos1
, pos2
, &tcg_gen_eqv_tl
);
4088 case OPC2_32_BIT_XOR_T
:
4089 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4090 pos1
, pos2
, &tcg_gen_xor_tl
);
4093 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4097 static void decode_bit_orand(DisasContext
*ctx
)
4104 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4105 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4106 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4107 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4108 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4109 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4112 case OPC2_32_BIT_OR_AND_T
:
4113 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4114 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4116 case OPC2_32_BIT_OR_ANDN_T
:
4117 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4118 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4120 case OPC2_32_BIT_OR_NOR_T
:
4121 if (TCG_TARGET_HAS_orc_i32
) {
4122 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4123 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4125 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4126 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4129 case OPC2_32_BIT_OR_OR_T
:
4130 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4131 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4134 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4138 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4145 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4146 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4147 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4148 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4149 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4150 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4152 temp
= tcg_temp_new();
4155 case OPC2_32_BIT_SH_AND_T
:
4156 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4157 pos1
, pos2
, &tcg_gen_and_tl
);
4159 case OPC2_32_BIT_SH_ANDN_T
:
4160 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4161 pos1
, pos2
, &tcg_gen_andc_tl
);
4163 case OPC2_32_BIT_SH_NOR_T
:
4164 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4165 pos1
, pos2
, &tcg_gen_nor_tl
);
4167 case OPC2_32_BIT_SH_OR_T
:
4168 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4169 pos1
, pos2
, &tcg_gen_or_tl
);
4172 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4174 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4175 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4178 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4185 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4186 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4187 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4188 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4189 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4190 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4192 temp
= tcg_temp_new();
4195 case OPC2_32_BIT_SH_NAND_T
:
4196 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4197 pos1
, pos2
, &tcg_gen_nand_tl
);
4199 case OPC2_32_BIT_SH_ORN_T
:
4200 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4201 pos1
, pos2
, &tcg_gen_orc_tl
);
4203 case OPC2_32_BIT_SH_XNOR_T
:
4204 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4205 pos1
, pos2
, &tcg_gen_eqv_tl
);
4207 case OPC2_32_BIT_SH_XOR_T
:
4208 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4209 pos1
, pos2
, &tcg_gen_xor_tl
);
4212 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4214 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4215 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4221 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4228 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4229 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4230 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4231 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4234 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4235 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4236 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4237 /* instruction to access the cache */
4239 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4240 case OPC2_32_BO_CACHEA_W_POSTINC
:
4241 case OPC2_32_BO_CACHEA_I_POSTINC
:
4242 /* instruction to access the cache, but we still need to handle
4243 the addressing mode */
4244 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4246 case OPC2_32_BO_CACHEA_WI_PREINC
:
4247 case OPC2_32_BO_CACHEA_W_PREINC
:
4248 case OPC2_32_BO_CACHEA_I_PREINC
:
4249 /* instruction to access the cache, but we still need to handle
4250 the addressing mode */
4251 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4253 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4254 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4255 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4256 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4259 case OPC2_32_BO_CACHEI_W_POSTINC
:
4260 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4261 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4262 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4264 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4267 case OPC2_32_BO_CACHEI_W_PREINC
:
4268 case OPC2_32_BO_CACHEI_WI_PREINC
:
4269 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4270 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4272 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4275 case OPC2_32_BO_ST_A_SHORTOFF
:
4276 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4278 case OPC2_32_BO_ST_A_POSTINC
:
4279 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4281 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4283 case OPC2_32_BO_ST_A_PREINC
:
4284 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4286 case OPC2_32_BO_ST_B_SHORTOFF
:
4287 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4289 case OPC2_32_BO_ST_B_POSTINC
:
4290 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4292 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4294 case OPC2_32_BO_ST_B_PREINC
:
4295 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4297 case OPC2_32_BO_ST_D_SHORTOFF
:
4299 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4302 case OPC2_32_BO_ST_D_POSTINC
:
4304 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4305 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4307 case OPC2_32_BO_ST_D_PREINC
:
4309 temp
= tcg_temp_new();
4310 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4311 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4312 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4314 case OPC2_32_BO_ST_DA_SHORTOFF
:
4316 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4319 case OPC2_32_BO_ST_DA_POSTINC
:
4321 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4322 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4324 case OPC2_32_BO_ST_DA_PREINC
:
4326 temp
= tcg_temp_new();
4327 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4328 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4329 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4331 case OPC2_32_BO_ST_H_SHORTOFF
:
4332 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4334 case OPC2_32_BO_ST_H_POSTINC
:
4335 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4337 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4339 case OPC2_32_BO_ST_H_PREINC
:
4340 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4342 case OPC2_32_BO_ST_Q_SHORTOFF
:
4343 temp
= tcg_temp_new();
4344 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4345 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4347 case OPC2_32_BO_ST_Q_POSTINC
:
4348 temp
= tcg_temp_new();
4349 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4350 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4352 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4354 case OPC2_32_BO_ST_Q_PREINC
:
4355 temp
= tcg_temp_new();
4356 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4357 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4359 case OPC2_32_BO_ST_W_SHORTOFF
:
4360 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4362 case OPC2_32_BO_ST_W_POSTINC
:
4363 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4365 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4367 case OPC2_32_BO_ST_W_PREINC
:
4368 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4371 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4375 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4380 TCGv temp
, temp2
, t_off10
;
4382 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4383 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4384 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4385 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4387 temp
= tcg_temp_new();
4388 temp2
= tcg_temp_new();
4389 t_off10
= tcg_constant_i32(off10
);
4391 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4392 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4395 case OPC2_32_BO_CACHEA_WI_BR
:
4396 case OPC2_32_BO_CACHEA_W_BR
:
4397 case OPC2_32_BO_CACHEA_I_BR
:
4398 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4400 case OPC2_32_BO_CACHEA_WI_CIRC
:
4401 case OPC2_32_BO_CACHEA_W_CIRC
:
4402 case OPC2_32_BO_CACHEA_I_CIRC
:
4403 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4405 case OPC2_32_BO_ST_A_BR
:
4406 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4407 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4409 case OPC2_32_BO_ST_A_CIRC
:
4410 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4411 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4413 case OPC2_32_BO_ST_B_BR
:
4414 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4415 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4417 case OPC2_32_BO_ST_B_CIRC
:
4418 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4419 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4421 case OPC2_32_BO_ST_D_BR
:
4423 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4424 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4426 case OPC2_32_BO_ST_D_CIRC
:
4428 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4429 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4430 tcg_gen_addi_tl(temp
, temp
, 4);
4431 tcg_gen_rem_tl(temp
, temp
, temp2
);
4432 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4433 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4434 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4436 case OPC2_32_BO_ST_DA_BR
:
4438 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4439 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4441 case OPC2_32_BO_ST_DA_CIRC
:
4443 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4444 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4445 tcg_gen_addi_tl(temp
, temp
, 4);
4446 tcg_gen_rem_tl(temp
, temp
, temp2
);
4447 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4448 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4449 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4451 case OPC2_32_BO_ST_H_BR
:
4452 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4453 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4455 case OPC2_32_BO_ST_H_CIRC
:
4456 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4457 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4459 case OPC2_32_BO_ST_Q_BR
:
4460 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4461 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4462 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4464 case OPC2_32_BO_ST_Q_CIRC
:
4465 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4466 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4467 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4469 case OPC2_32_BO_ST_W_BR
:
4470 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4471 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4473 case OPC2_32_BO_ST_W_CIRC
:
4474 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4475 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4478 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4482 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4489 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4490 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4491 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4492 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4495 case OPC2_32_BO_LD_A_SHORTOFF
:
4496 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4498 case OPC2_32_BO_LD_A_POSTINC
:
4499 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4501 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4503 case OPC2_32_BO_LD_A_PREINC
:
4504 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4506 case OPC2_32_BO_LD_B_SHORTOFF
:
4507 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4509 case OPC2_32_BO_LD_B_POSTINC
:
4510 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4512 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4514 case OPC2_32_BO_LD_B_PREINC
:
4515 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4517 case OPC2_32_BO_LD_BU_SHORTOFF
:
4518 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4520 case OPC2_32_BO_LD_BU_POSTINC
:
4521 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4523 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4525 case OPC2_32_BO_LD_BU_PREINC
:
4526 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4528 case OPC2_32_BO_LD_D_SHORTOFF
:
4530 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4533 case OPC2_32_BO_LD_D_POSTINC
:
4535 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4536 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4538 case OPC2_32_BO_LD_D_PREINC
:
4540 temp
= tcg_temp_new();
4541 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4542 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4543 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4545 case OPC2_32_BO_LD_DA_SHORTOFF
:
4547 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4550 case OPC2_32_BO_LD_DA_POSTINC
:
4552 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4553 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4555 case OPC2_32_BO_LD_DA_PREINC
:
4557 temp
= tcg_temp_new();
4558 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4559 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4560 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4562 case OPC2_32_BO_LD_H_SHORTOFF
:
4563 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4565 case OPC2_32_BO_LD_H_POSTINC
:
4566 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4568 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4570 case OPC2_32_BO_LD_H_PREINC
:
4571 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4573 case OPC2_32_BO_LD_HU_SHORTOFF
:
4574 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4576 case OPC2_32_BO_LD_HU_POSTINC
:
4577 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4579 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4581 case OPC2_32_BO_LD_HU_PREINC
:
4582 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4584 case OPC2_32_BO_LD_Q_SHORTOFF
:
4585 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4586 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4588 case OPC2_32_BO_LD_Q_POSTINC
:
4589 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4591 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4592 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4594 case OPC2_32_BO_LD_Q_PREINC
:
4595 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4596 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4598 case OPC2_32_BO_LD_W_SHORTOFF
:
4599 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4601 case OPC2_32_BO_LD_W_POSTINC
:
4602 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4604 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4606 case OPC2_32_BO_LD_W_PREINC
:
4607 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4610 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4614 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4619 TCGv temp
, temp2
, t_off10
;
4621 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4622 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4623 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4624 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4626 temp
= tcg_temp_new();
4627 temp2
= tcg_temp_new();
4628 t_off10
= tcg_constant_i32(off10
);
4630 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4631 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4635 case OPC2_32_BO_LD_A_BR
:
4636 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4637 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4639 case OPC2_32_BO_LD_A_CIRC
:
4640 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4641 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4643 case OPC2_32_BO_LD_B_BR
:
4644 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4645 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4647 case OPC2_32_BO_LD_B_CIRC
:
4648 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4649 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4651 case OPC2_32_BO_LD_BU_BR
:
4652 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4653 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4655 case OPC2_32_BO_LD_BU_CIRC
:
4656 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4657 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4659 case OPC2_32_BO_LD_D_BR
:
4661 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4662 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4664 case OPC2_32_BO_LD_D_CIRC
:
4666 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4667 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4668 tcg_gen_addi_tl(temp
, temp
, 4);
4669 tcg_gen_rem_tl(temp
, temp
, temp2
);
4670 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4671 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4672 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4674 case OPC2_32_BO_LD_DA_BR
:
4676 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4677 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4679 case OPC2_32_BO_LD_DA_CIRC
:
4681 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4682 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4683 tcg_gen_addi_tl(temp
, temp
, 4);
4684 tcg_gen_rem_tl(temp
, temp
, temp2
);
4685 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4686 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4687 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4689 case OPC2_32_BO_LD_H_BR
:
4690 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4691 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4693 case OPC2_32_BO_LD_H_CIRC
:
4694 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4695 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4697 case OPC2_32_BO_LD_HU_BR
:
4698 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4699 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4701 case OPC2_32_BO_LD_HU_CIRC
:
4702 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4703 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4705 case OPC2_32_BO_LD_Q_BR
:
4706 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4707 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4708 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4710 case OPC2_32_BO_LD_Q_CIRC
:
4711 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4712 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4713 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4715 case OPC2_32_BO_LD_W_BR
:
4716 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4717 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4719 case OPC2_32_BO_LD_W_CIRC
:
4720 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4721 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4724 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4728 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4736 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4737 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4738 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4739 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4742 temp
= tcg_temp_new();
4745 case OPC2_32_BO_LDLCX_SHORTOFF
:
4746 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4747 gen_helper_ldlcx(cpu_env
, temp
);
4749 case OPC2_32_BO_LDMST_SHORTOFF
:
4750 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4751 gen_ldmst(ctx
, r1
, temp
);
4753 case OPC2_32_BO_LDMST_POSTINC
:
4754 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4755 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4757 case OPC2_32_BO_LDMST_PREINC
:
4758 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4759 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4761 case OPC2_32_BO_LDUCX_SHORTOFF
:
4762 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4763 gen_helper_lducx(cpu_env
, temp
);
4765 case OPC2_32_BO_LEA_SHORTOFF
:
4766 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4768 case OPC2_32_BO_STLCX_SHORTOFF
:
4769 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4770 gen_helper_stlcx(cpu_env
, temp
);
4772 case OPC2_32_BO_STUCX_SHORTOFF
:
4773 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4774 gen_helper_stucx(cpu_env
, temp
);
4776 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4777 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4778 gen_swap(ctx
, r1
, temp
);
4780 case OPC2_32_BO_SWAP_W_POSTINC
:
4781 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4782 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4784 case OPC2_32_BO_SWAP_W_PREINC
:
4785 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4786 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4788 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4789 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4790 gen_cmpswap(ctx
, r1
, temp
);
4792 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4793 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4794 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4796 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4797 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4798 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4800 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4801 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4802 gen_swapmsk(ctx
, r1
, temp
);
4804 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4805 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4806 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4808 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4809 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4810 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4813 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4817 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4822 TCGv temp
, temp2
, t_off10
;
4824 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4825 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4826 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4827 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4829 temp
= tcg_temp_new();
4830 temp2
= tcg_temp_new();
4831 t_off10
= tcg_constant_i32(off10
);
4833 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4834 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4837 case OPC2_32_BO_LDMST_BR
:
4838 gen_ldmst(ctx
, r1
, temp2
);
4839 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4841 case OPC2_32_BO_LDMST_CIRC
:
4842 gen_ldmst(ctx
, r1
, temp2
);
4843 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4845 case OPC2_32_BO_SWAP_W_BR
:
4846 gen_swap(ctx
, r1
, temp2
);
4847 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4849 case OPC2_32_BO_SWAP_W_CIRC
:
4850 gen_swap(ctx
, r1
, temp2
);
4851 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4853 case OPC2_32_BO_CMPSWAP_W_BR
:
4854 gen_cmpswap(ctx
, r1
, temp2
);
4855 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4857 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4858 gen_cmpswap(ctx
, r1
, temp2
);
4859 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4861 case OPC2_32_BO_SWAPMSK_W_BR
:
4862 gen_swapmsk(ctx
, r1
, temp2
);
4863 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4865 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4866 gen_swapmsk(ctx
, r1
, temp2
);
4867 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4870 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4874 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4880 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4881 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4882 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4885 case OPC1_32_BOL_LD_A_LONGOFF
:
4886 temp
= tcg_temp_new();
4887 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4888 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4890 case OPC1_32_BOL_LD_W_LONGOFF
:
4891 temp
= tcg_temp_new();
4892 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4893 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4895 case OPC1_32_BOL_LEA_LONGOFF
:
4896 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4898 case OPC1_32_BOL_ST_A_LONGOFF
:
4899 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4900 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4902 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4905 case OPC1_32_BOL_ST_W_LONGOFF
:
4906 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4908 case OPC1_32_BOL_LD_B_LONGOFF
:
4909 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4910 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4912 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4915 case OPC1_32_BOL_LD_BU_LONGOFF
:
4916 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4917 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4919 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4922 case OPC1_32_BOL_LD_H_LONGOFF
:
4923 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4924 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4926 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4929 case OPC1_32_BOL_LD_HU_LONGOFF
:
4930 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4931 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4933 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4936 case OPC1_32_BOL_ST_B_LONGOFF
:
4937 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4938 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4940 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4943 case OPC1_32_BOL_ST_H_LONGOFF
:
4944 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4945 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4947 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4951 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4956 static void decode_rc_logical_shift(DisasContext
*ctx
)
4963 r2
= MASK_OP_RC_D(ctx
->opcode
);
4964 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4965 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4966 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4968 temp
= tcg_temp_new();
4971 case OPC2_32_RC_AND
:
4972 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4974 case OPC2_32_RC_ANDN
:
4975 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4977 case OPC2_32_RC_NAND
:
4978 tcg_gen_movi_tl(temp
, const9
);
4979 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4981 case OPC2_32_RC_NOR
:
4982 tcg_gen_movi_tl(temp
, const9
);
4983 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4986 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4988 case OPC2_32_RC_ORN
:
4989 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4992 const9
= sextract32(const9
, 0, 6);
4993 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4995 case OPC2_32_RC_SH_H
:
4996 const9
= sextract32(const9
, 0, 5);
4997 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4999 case OPC2_32_RC_SHA
:
5000 const9
= sextract32(const9
, 0, 6);
5001 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5003 case OPC2_32_RC_SHA_H
:
5004 const9
= sextract32(const9
, 0, 5);
5005 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5007 case OPC2_32_RC_SHAS
:
5008 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5010 case OPC2_32_RC_XNOR
:
5011 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5012 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5014 case OPC2_32_RC_XOR
:
5015 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5018 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5022 static void decode_rc_accumulator(DisasContext
*ctx
)
5030 r2
= MASK_OP_RC_D(ctx
->opcode
);
5031 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5032 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5034 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5036 temp
= tcg_temp_new();
5039 case OPC2_32_RC_ABSDIF
:
5040 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5042 case OPC2_32_RC_ABSDIFS
:
5043 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5045 case OPC2_32_RC_ADD
:
5046 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5048 case OPC2_32_RC_ADDC
:
5049 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5051 case OPC2_32_RC_ADDS
:
5052 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5054 case OPC2_32_RC_ADDS_U
:
5055 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5057 case OPC2_32_RC_ADDX
:
5058 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5060 case OPC2_32_RC_AND_EQ
:
5061 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5062 const9
, &tcg_gen_and_tl
);
5064 case OPC2_32_RC_AND_GE
:
5065 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5066 const9
, &tcg_gen_and_tl
);
5068 case OPC2_32_RC_AND_GE_U
:
5069 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5070 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5071 const9
, &tcg_gen_and_tl
);
5073 case OPC2_32_RC_AND_LT
:
5074 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5075 const9
, &tcg_gen_and_tl
);
5077 case OPC2_32_RC_AND_LT_U
:
5078 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5079 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5080 const9
, &tcg_gen_and_tl
);
5082 case OPC2_32_RC_AND_NE
:
5083 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5084 const9
, &tcg_gen_and_tl
);
5087 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5089 case OPC2_32_RC_EQANY_B
:
5090 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5092 case OPC2_32_RC_EQANY_H
:
5093 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5096 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5098 case OPC2_32_RC_GE_U
:
5099 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5100 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5103 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5105 case OPC2_32_RC_LT_U
:
5106 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5107 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5109 case OPC2_32_RC_MAX
:
5110 tcg_gen_movi_tl(temp
, const9
);
5111 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5112 cpu_gpr_d
[r1
], temp
);
5114 case OPC2_32_RC_MAX_U
:
5115 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5116 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5117 cpu_gpr_d
[r1
], temp
);
5119 case OPC2_32_RC_MIN
:
5120 tcg_gen_movi_tl(temp
, const9
);
5121 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5122 cpu_gpr_d
[r1
], temp
);
5124 case OPC2_32_RC_MIN_U
:
5125 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5126 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5127 cpu_gpr_d
[r1
], temp
);
5130 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5132 case OPC2_32_RC_OR_EQ
:
5133 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5134 const9
, &tcg_gen_or_tl
);
5136 case OPC2_32_RC_OR_GE
:
5137 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5138 const9
, &tcg_gen_or_tl
);
5140 case OPC2_32_RC_OR_GE_U
:
5141 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5142 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5143 const9
, &tcg_gen_or_tl
);
5145 case OPC2_32_RC_OR_LT
:
5146 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5147 const9
, &tcg_gen_or_tl
);
5149 case OPC2_32_RC_OR_LT_U
:
5150 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5151 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5152 const9
, &tcg_gen_or_tl
);
5154 case OPC2_32_RC_OR_NE
:
5155 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5156 const9
, &tcg_gen_or_tl
);
5158 case OPC2_32_RC_RSUB
:
5159 tcg_gen_movi_tl(temp
, const9
);
5160 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5162 case OPC2_32_RC_RSUBS
:
5163 tcg_gen_movi_tl(temp
, const9
);
5164 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5166 case OPC2_32_RC_RSUBS_U
:
5167 tcg_gen_movi_tl(temp
, const9
);
5168 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5170 case OPC2_32_RC_SH_EQ
:
5171 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5173 case OPC2_32_RC_SH_GE
:
5174 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5176 case OPC2_32_RC_SH_GE_U
:
5177 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5178 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5180 case OPC2_32_RC_SH_LT
:
5181 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5183 case OPC2_32_RC_SH_LT_U
:
5184 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5185 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5187 case OPC2_32_RC_SH_NE
:
5188 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5190 case OPC2_32_RC_XOR_EQ
:
5191 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5192 const9
, &tcg_gen_xor_tl
);
5194 case OPC2_32_RC_XOR_GE
:
5195 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5196 const9
, &tcg_gen_xor_tl
);
5198 case OPC2_32_RC_XOR_GE_U
:
5199 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5200 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5201 const9
, &tcg_gen_xor_tl
);
5203 case OPC2_32_RC_XOR_LT
:
5204 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5205 const9
, &tcg_gen_xor_tl
);
5207 case OPC2_32_RC_XOR_LT_U
:
5208 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5209 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5210 const9
, &tcg_gen_xor_tl
);
5212 case OPC2_32_RC_XOR_NE
:
5213 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5214 const9
, &tcg_gen_xor_tl
);
5217 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5221 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5226 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5227 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5230 case OPC2_32_RC_BISR
:
5231 gen_helper_1arg(bisr
, const9
);
5233 case OPC2_32_RC_SYSCALL
:
5234 /* TODO: Add exception generation */
5237 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5241 static void decode_rc_mul(DisasContext
*ctx
)
5247 r2
= MASK_OP_RC_D(ctx
->opcode
);
5248 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5249 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5251 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5254 case OPC2_32_RC_MUL_32
:
5255 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5257 case OPC2_32_RC_MUL_64
:
5259 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5261 case OPC2_32_RC_MULS_32
:
5262 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5264 case OPC2_32_RC_MUL_U_64
:
5265 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5267 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5269 case OPC2_32_RC_MULS_U_32
:
5270 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5271 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5274 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5279 static void decode_rcpw_insert(DisasContext
*ctx
)
5283 int32_t pos
, width
, const4
;
5287 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5288 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5289 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5290 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5291 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5292 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5295 case OPC2_32_RCPW_IMASK
:
5297 /* if pos + width > 32 undefined result */
5298 if (pos
+ width
<= 32) {
5299 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5300 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5303 case OPC2_32_RCPW_INSERT
:
5304 /* if pos + width > 32 undefined result */
5305 if (pos
+ width
<= 32) {
5306 temp
= tcg_const_i32(const4
);
5307 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5311 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5317 static void decode_rcrw_insert(DisasContext
*ctx
)
5321 int32_t width
, const4
;
5323 TCGv temp
, temp2
, temp3
;
5325 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5326 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5327 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5328 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5329 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5330 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5332 temp
= tcg_temp_new();
5333 temp2
= tcg_temp_new();
5336 case OPC2_32_RCRW_IMASK
:
5337 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5338 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5339 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5340 tcg_gen_movi_tl(temp2
, const4
);
5341 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5343 case OPC2_32_RCRW_INSERT
:
5344 temp3
= tcg_temp_new();
5346 tcg_gen_movi_tl(temp
, width
);
5347 tcg_gen_movi_tl(temp2
, const4
);
5348 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5349 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5352 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5358 static void decode_rcr_cond_select(DisasContext
*ctx
)
5366 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5367 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5368 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5369 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5370 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5373 case OPC2_32_RCR_CADD
:
5374 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5377 case OPC2_32_RCR_CADDN
:
5378 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5381 case OPC2_32_RCR_SEL
:
5382 temp
= tcg_const_i32(0);
5383 temp2
= tcg_const_i32(const9
);
5384 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5385 cpu_gpr_d
[r1
], temp2
);
5387 case OPC2_32_RCR_SELN
:
5388 temp
= tcg_const_i32(0);
5389 temp2
= tcg_const_i32(const9
);
5390 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5391 cpu_gpr_d
[r1
], temp2
);
5394 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5398 static void decode_rcr_madd(DisasContext
*ctx
)
5405 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5406 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5407 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5408 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5409 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5412 case OPC2_32_RCR_MADD_32
:
5413 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5415 case OPC2_32_RCR_MADD_64
:
5418 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5419 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5421 case OPC2_32_RCR_MADDS_32
:
5422 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5424 case OPC2_32_RCR_MADDS_64
:
5427 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5428 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5430 case OPC2_32_RCR_MADD_U_64
:
5433 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5434 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5435 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5437 case OPC2_32_RCR_MADDS_U_32
:
5438 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5439 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5441 case OPC2_32_RCR_MADDS_U_64
:
5444 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5445 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5446 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5449 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5453 static void decode_rcr_msub(DisasContext
*ctx
)
5460 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5461 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5462 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5463 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5464 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5467 case OPC2_32_RCR_MSUB_32
:
5468 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5470 case OPC2_32_RCR_MSUB_64
:
5473 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5474 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5476 case OPC2_32_RCR_MSUBS_32
:
5477 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5479 case OPC2_32_RCR_MSUBS_64
:
5482 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5483 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5485 case OPC2_32_RCR_MSUB_U_64
:
5488 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5489 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5490 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5492 case OPC2_32_RCR_MSUBS_U_32
:
5493 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5494 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5496 case OPC2_32_RCR_MSUBS_U_64
:
5499 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5500 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5501 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5504 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5510 static void decode_rlc_opc(DisasContext
*ctx
,
5516 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5517 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5518 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5521 case OPC1_32_RLC_ADDI
:
5522 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5524 case OPC1_32_RLC_ADDIH
:
5525 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5527 case OPC1_32_RLC_ADDIH_A
:
5528 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5530 case OPC1_32_RLC_MFCR
:
5531 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5532 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5534 case OPC1_32_RLC_MOV
:
5535 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5537 case OPC1_32_RLC_MOV_64
:
5538 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5540 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5541 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5543 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5546 case OPC1_32_RLC_MOV_U
:
5547 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5548 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5550 case OPC1_32_RLC_MOV_H
:
5551 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5553 case OPC1_32_RLC_MOVH_A
:
5554 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5556 case OPC1_32_RLC_MTCR
:
5557 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5558 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5561 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5566 static void decode_rr_accumulator(DisasContext
*ctx
)
5573 r3
= MASK_OP_RR_D(ctx
->opcode
);
5574 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5575 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5576 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5579 case OPC2_32_RR_ABS
:
5580 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5582 case OPC2_32_RR_ABS_B
:
5583 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5585 case OPC2_32_RR_ABS_H
:
5586 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5588 case OPC2_32_RR_ABSDIF
:
5589 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5591 case OPC2_32_RR_ABSDIF_B
:
5592 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5595 case OPC2_32_RR_ABSDIF_H
:
5596 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5599 case OPC2_32_RR_ABSDIFS
:
5600 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5603 case OPC2_32_RR_ABSDIFS_H
:
5604 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5607 case OPC2_32_RR_ABSS
:
5608 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5610 case OPC2_32_RR_ABSS_H
:
5611 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5613 case OPC2_32_RR_ADD
:
5614 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5616 case OPC2_32_RR_ADD_B
:
5617 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5619 case OPC2_32_RR_ADD_H
:
5620 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5622 case OPC2_32_RR_ADDC
:
5623 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5625 case OPC2_32_RR_ADDS
:
5626 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5628 case OPC2_32_RR_ADDS_H
:
5629 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5632 case OPC2_32_RR_ADDS_HU
:
5633 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5636 case OPC2_32_RR_ADDS_U
:
5637 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5640 case OPC2_32_RR_ADDX
:
5641 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5643 case OPC2_32_RR_AND_EQ
:
5644 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5645 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5647 case OPC2_32_RR_AND_GE
:
5648 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5649 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5651 case OPC2_32_RR_AND_GE_U
:
5652 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5653 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5655 case OPC2_32_RR_AND_LT
:
5656 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5657 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5659 case OPC2_32_RR_AND_LT_U
:
5660 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5661 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5663 case OPC2_32_RR_AND_NE
:
5664 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5665 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5668 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5671 case OPC2_32_RR_EQ_B
:
5672 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5674 case OPC2_32_RR_EQ_H
:
5675 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5677 case OPC2_32_RR_EQ_W
:
5678 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5680 case OPC2_32_RR_EQANY_B
:
5681 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5683 case OPC2_32_RR_EQANY_H
:
5684 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5687 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5690 case OPC2_32_RR_GE_U
:
5691 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5695 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5698 case OPC2_32_RR_LT_U
:
5699 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5702 case OPC2_32_RR_LT_B
:
5703 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5705 case OPC2_32_RR_LT_BU
:
5706 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5708 case OPC2_32_RR_LT_H
:
5709 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5711 case OPC2_32_RR_LT_HU
:
5712 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5714 case OPC2_32_RR_LT_W
:
5715 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5717 case OPC2_32_RR_LT_WU
:
5718 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5720 case OPC2_32_RR_MAX
:
5721 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5722 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5724 case OPC2_32_RR_MAX_U
:
5725 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5726 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5728 case OPC2_32_RR_MAX_B
:
5729 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5731 case OPC2_32_RR_MAX_BU
:
5732 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5734 case OPC2_32_RR_MAX_H
:
5735 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5737 case OPC2_32_RR_MAX_HU
:
5738 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5740 case OPC2_32_RR_MIN
:
5741 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5742 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5744 case OPC2_32_RR_MIN_U
:
5745 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5746 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5748 case OPC2_32_RR_MIN_B
:
5749 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5751 case OPC2_32_RR_MIN_BU
:
5752 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5754 case OPC2_32_RR_MIN_H
:
5755 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5757 case OPC2_32_RR_MIN_HU
:
5758 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5760 case OPC2_32_RR_MOV
:
5761 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5763 case OPC2_32_RR_MOV_64
:
5764 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5765 temp
= tcg_temp_new();
5768 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5769 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5770 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5772 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5775 case OPC2_32_RR_MOVS_64
:
5776 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5778 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5779 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5781 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5785 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5788 case OPC2_32_RR_OR_EQ
:
5789 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5790 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5792 case OPC2_32_RR_OR_GE
:
5793 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5794 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5796 case OPC2_32_RR_OR_GE_U
:
5797 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5798 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5800 case OPC2_32_RR_OR_LT
:
5801 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5802 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5804 case OPC2_32_RR_OR_LT_U
:
5805 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5806 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5808 case OPC2_32_RR_OR_NE
:
5809 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5810 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5812 case OPC2_32_RR_SAT_B
:
5813 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5815 case OPC2_32_RR_SAT_BU
:
5816 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5818 case OPC2_32_RR_SAT_H
:
5819 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5821 case OPC2_32_RR_SAT_HU
:
5822 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5824 case OPC2_32_RR_SH_EQ
:
5825 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5828 case OPC2_32_RR_SH_GE
:
5829 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5832 case OPC2_32_RR_SH_GE_U
:
5833 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5836 case OPC2_32_RR_SH_LT
:
5837 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5840 case OPC2_32_RR_SH_LT_U
:
5841 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5844 case OPC2_32_RR_SH_NE
:
5845 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5848 case OPC2_32_RR_SUB
:
5849 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5851 case OPC2_32_RR_SUB_B
:
5852 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5854 case OPC2_32_RR_SUB_H
:
5855 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5857 case OPC2_32_RR_SUBC
:
5858 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5860 case OPC2_32_RR_SUBS
:
5861 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5863 case OPC2_32_RR_SUBS_U
:
5864 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5866 case OPC2_32_RR_SUBS_H
:
5867 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5870 case OPC2_32_RR_SUBS_HU
:
5871 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5874 case OPC2_32_RR_SUBX
:
5875 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5877 case OPC2_32_RR_XOR_EQ
:
5878 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5879 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5881 case OPC2_32_RR_XOR_GE
:
5882 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5883 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5885 case OPC2_32_RR_XOR_GE_U
:
5886 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5887 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5889 case OPC2_32_RR_XOR_LT
:
5890 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5891 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5893 case OPC2_32_RR_XOR_LT_U
:
5894 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5895 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5897 case OPC2_32_RR_XOR_NE
:
5898 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5899 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5902 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5906 static void decode_rr_logical_shift(DisasContext
*ctx
)
5911 r3
= MASK_OP_RR_D(ctx
->opcode
);
5912 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5913 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5914 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5917 case OPC2_32_RR_AND
:
5918 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5920 case OPC2_32_RR_ANDN
:
5921 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5923 case OPC2_32_RR_CLO
:
5924 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5925 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5927 case OPC2_32_RR_CLO_H
:
5928 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5930 case OPC2_32_RR_CLS
:
5931 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5933 case OPC2_32_RR_CLS_H
:
5934 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5936 case OPC2_32_RR_CLZ
:
5937 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5939 case OPC2_32_RR_CLZ_H
:
5940 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5942 case OPC2_32_RR_NAND
:
5943 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5945 case OPC2_32_RR_NOR
:
5946 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5949 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5951 case OPC2_32_RR_ORN
:
5952 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5955 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5957 case OPC2_32_RR_SH_H
:
5958 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5960 case OPC2_32_RR_SHA
:
5961 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5963 case OPC2_32_RR_SHA_H
:
5964 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5966 case OPC2_32_RR_SHAS
:
5967 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5969 case OPC2_32_RR_XNOR
:
5970 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5972 case OPC2_32_RR_XOR
:
5973 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5976 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5980 static void decode_rr_address(DisasContext
*ctx
)
5986 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5987 r3
= MASK_OP_RR_D(ctx
->opcode
);
5988 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5989 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5990 n
= MASK_OP_RR_N(ctx
->opcode
);
5993 case OPC2_32_RR_ADD_A
:
5994 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5996 case OPC2_32_RR_ADDSC_A
:
5997 temp
= tcg_temp_new();
5998 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5999 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6001 case OPC2_32_RR_ADDSC_AT
:
6002 temp
= tcg_temp_new();
6003 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6004 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6005 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6007 case OPC2_32_RR_EQ_A
:
6008 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6011 case OPC2_32_RR_EQZ
:
6012 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6014 case OPC2_32_RR_GE_A
:
6015 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6018 case OPC2_32_RR_LT_A
:
6019 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6022 case OPC2_32_RR_MOV_A
:
6023 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6025 case OPC2_32_RR_MOV_AA
:
6026 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6028 case OPC2_32_RR_MOV_D
:
6029 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6031 case OPC2_32_RR_NE_A
:
6032 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6035 case OPC2_32_RR_NEZ_A
:
6036 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6038 case OPC2_32_RR_SUB_A
:
6039 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6042 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6046 static void decode_rr_idirect(DisasContext
*ctx
)
6051 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6052 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6056 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6058 case OPC2_32_RR_JLI
:
6059 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6060 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6062 case OPC2_32_RR_CALLI
:
6063 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6064 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6066 case OPC2_32_RR_FCALLI
:
6067 gen_fcall_save_ctx(ctx
);
6068 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6071 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6073 tcg_gen_exit_tb(NULL
, 0);
6074 ctx
->base
.is_jmp
= DISAS_NORETURN
;
6077 static void decode_rr_divide(DisasContext
*ctx
)
6082 TCGv temp
, temp2
, temp3
;
6084 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6085 r3
= MASK_OP_RR_D(ctx
->opcode
);
6086 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6087 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6090 case OPC2_32_RR_BMERGE
:
6091 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6093 case OPC2_32_RR_BSPLIT
:
6095 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6097 case OPC2_32_RR_DVINIT_B
:
6099 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6102 case OPC2_32_RR_DVINIT_BU
:
6103 temp
= tcg_temp_new();
6104 temp2
= tcg_temp_new();
6105 temp3
= tcg_temp_new();
6107 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6109 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6110 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6111 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6112 tcg_gen_abs_tl(temp
, temp3
);
6113 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6114 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6116 /* overflow = (D[b] == 0) */
6117 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6119 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6121 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6123 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6124 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6126 case OPC2_32_RR_DVINIT_H
:
6128 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6131 case OPC2_32_RR_DVINIT_HU
:
6132 temp
= tcg_temp_new();
6133 temp2
= tcg_temp_new();
6134 temp3
= tcg_temp_new();
6136 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6138 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6139 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6140 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6141 tcg_gen_abs_tl(temp
, temp3
);
6142 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6143 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6145 /* overflow = (D[b] == 0) */
6146 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6148 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6150 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6152 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6153 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6155 case OPC2_32_RR_DVINIT
:
6156 temp
= tcg_temp_new();
6157 temp2
= tcg_temp_new();
6159 /* overflow = ((D[b] == 0) ||
6160 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6161 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6162 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6163 tcg_gen_and_tl(temp
, temp
, temp2
);
6164 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6165 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6166 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6168 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6170 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6172 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6173 /* sign extend to high reg */
6174 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6176 case OPC2_32_RR_DVINIT_U
:
6177 /* overflow = (D[b] == 0) */
6178 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6179 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6181 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6183 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6185 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6186 /* zero extend to high reg*/
6187 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6189 case OPC2_32_RR_PARITY
:
6190 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6192 case OPC2_32_RR_UNPACK
:
6194 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6196 case OPC2_32_RR_CRC32
:
6197 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6198 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6200 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6203 case OPC2_32_RR_DIV
:
6204 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6205 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6208 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6211 case OPC2_32_RR_DIV_U
:
6212 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6213 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6214 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6216 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6219 case OPC2_32_RR_MUL_F
:
6220 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6222 case OPC2_32_RR_DIV_F
:
6223 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6225 case OPC2_32_RR_CMP_F
:
6226 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6228 case OPC2_32_RR_FTOI
:
6229 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6231 case OPC2_32_RR_ITOF
:
6232 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6234 case OPC2_32_RR_FTOUZ
:
6235 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6237 case OPC2_32_RR_UPDFL
:
6238 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6240 case OPC2_32_RR_UTOF
:
6241 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6243 case OPC2_32_RR_FTOIZ
:
6244 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6246 case OPC2_32_RR_QSEED_F
:
6247 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6250 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6255 static void decode_rr1_mul(DisasContext
*ctx
)
6263 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6264 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6265 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6266 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6267 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6270 case OPC2_32_RR1_MUL_H_32_LL
:
6271 temp64
= tcg_temp_new_i64();
6273 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6274 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6275 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6277 case OPC2_32_RR1_MUL_H_32_LU
:
6278 temp64
= tcg_temp_new_i64();
6280 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6281 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6282 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6284 case OPC2_32_RR1_MUL_H_32_UL
:
6285 temp64
= tcg_temp_new_i64();
6287 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6288 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6289 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6291 case OPC2_32_RR1_MUL_H_32_UU
:
6292 temp64
= tcg_temp_new_i64();
6294 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6295 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6296 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6298 case OPC2_32_RR1_MULM_H_64_LL
:
6299 temp64
= tcg_temp_new_i64();
6301 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6302 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6304 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6306 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6308 case OPC2_32_RR1_MULM_H_64_LU
:
6309 temp64
= tcg_temp_new_i64();
6311 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6312 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6314 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6316 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6318 case OPC2_32_RR1_MULM_H_64_UL
:
6319 temp64
= tcg_temp_new_i64();
6321 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6322 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6324 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6326 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6328 case OPC2_32_RR1_MULM_H_64_UU
:
6329 temp64
= tcg_temp_new_i64();
6331 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6332 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6334 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6336 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6338 case OPC2_32_RR1_MULR_H_16_LL
:
6339 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6340 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6342 case OPC2_32_RR1_MULR_H_16_LU
:
6343 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6344 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6346 case OPC2_32_RR1_MULR_H_16_UL
:
6347 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6348 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6350 case OPC2_32_RR1_MULR_H_16_UU
:
6351 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6352 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6355 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6359 static void decode_rr1_mulq(DisasContext
*ctx
)
6367 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6368 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6369 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6370 n
= MASK_OP_RR1_N(ctx
->opcode
);
6371 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6373 temp
= tcg_temp_new();
6374 temp2
= tcg_temp_new();
6377 case OPC2_32_RR1_MUL_Q_32
:
6378 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6380 case OPC2_32_RR1_MUL_Q_64
:
6382 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6385 case OPC2_32_RR1_MUL_Q_32_L
:
6386 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6387 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6389 case OPC2_32_RR1_MUL_Q_64_L
:
6391 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6392 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6394 case OPC2_32_RR1_MUL_Q_32_U
:
6395 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6396 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6398 case OPC2_32_RR1_MUL_Q_64_U
:
6400 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6401 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6403 case OPC2_32_RR1_MUL_Q_32_LL
:
6404 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6405 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6406 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6408 case OPC2_32_RR1_MUL_Q_32_UU
:
6409 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6410 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6411 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6413 case OPC2_32_RR1_MULR_Q_32_L
:
6414 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6415 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6416 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6418 case OPC2_32_RR1_MULR_Q_32_U
:
6419 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6420 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6421 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6424 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6429 static void decode_rr2_mul(DisasContext
*ctx
)
6434 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6435 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6436 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6437 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6439 case OPC2_32_RR2_MUL_32
:
6440 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6442 case OPC2_32_RR2_MUL_64
:
6444 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6447 case OPC2_32_RR2_MULS_32
:
6448 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6451 case OPC2_32_RR2_MUL_U_64
:
6453 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6456 case OPC2_32_RR2_MULS_U_32
:
6457 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6461 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6466 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6473 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6474 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6475 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6476 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6477 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6478 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6481 case OPC2_32_RRPW_EXTR
:
6483 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6487 if (pos
+ width
<= 32) {
6488 /* optimize special cases */
6489 if ((pos
== 0) && (width
== 8)) {
6490 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6491 } else if ((pos
== 0) && (width
== 16)) {
6492 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6494 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6495 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6499 case OPC2_32_RRPW_EXTR_U
:
6501 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6503 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6504 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6507 case OPC2_32_RRPW_IMASK
:
6510 if (pos
+ width
<= 32) {
6511 temp
= tcg_temp_new();
6512 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6513 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6514 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6518 case OPC2_32_RRPW_INSERT
:
6519 if (pos
+ width
<= 32) {
6520 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6525 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6530 static void decode_rrr_cond_select(DisasContext
*ctx
)
6536 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6537 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6538 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6539 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6540 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6543 case OPC2_32_RRR_CADD
:
6544 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6545 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6547 case OPC2_32_RRR_CADDN
:
6548 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6551 case OPC2_32_RRR_CSUB
:
6552 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6555 case OPC2_32_RRR_CSUBN
:
6556 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6559 case OPC2_32_RRR_SEL
:
6560 temp
= tcg_const_i32(0);
6561 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6562 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6564 case OPC2_32_RRR_SELN
:
6565 temp
= tcg_const_i32(0);
6566 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6567 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6570 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6574 static void decode_rrr_divide(DisasContext
*ctx
)
6580 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6581 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6582 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6583 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6584 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6587 case OPC2_32_RRR_DVADJ
:
6590 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6591 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6593 case OPC2_32_RRR_DVSTEP
:
6596 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6597 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6599 case OPC2_32_RRR_DVSTEP_U
:
6602 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6603 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6605 case OPC2_32_RRR_IXMAX
:
6608 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6609 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6611 case OPC2_32_RRR_IXMAX_U
:
6614 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6615 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6617 case OPC2_32_RRR_IXMIN
:
6620 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6621 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6623 case OPC2_32_RRR_IXMIN_U
:
6626 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6627 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6629 case OPC2_32_RRR_PACK
:
6631 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6632 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6634 case OPC2_32_RRR_ADD_F
:
6635 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6637 case OPC2_32_RRR_SUB_F
:
6638 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6640 case OPC2_32_RRR_MADD_F
:
6641 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6642 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6644 case OPC2_32_RRR_MSUB_F
:
6645 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6646 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6649 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6654 static void decode_rrr2_madd(DisasContext
*ctx
)
6657 uint32_t r1
, r2
, r3
, r4
;
6659 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6660 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6661 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6662 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6663 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6665 case OPC2_32_RRR2_MADD_32
:
6666 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6669 case OPC2_32_RRR2_MADD_64
:
6672 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6673 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6675 case OPC2_32_RRR2_MADDS_32
:
6676 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6677 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6679 case OPC2_32_RRR2_MADDS_64
:
6682 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6683 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6685 case OPC2_32_RRR2_MADD_U_64
:
6688 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6689 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6691 case OPC2_32_RRR2_MADDS_U_32
:
6692 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6693 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6695 case OPC2_32_RRR2_MADDS_U_64
:
6698 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6699 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6702 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6706 static void decode_rrr2_msub(DisasContext
*ctx
)
6709 uint32_t r1
, r2
, r3
, r4
;
6711 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6712 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6713 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6714 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6715 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6718 case OPC2_32_RRR2_MSUB_32
:
6719 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6722 case OPC2_32_RRR2_MSUB_64
:
6725 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6726 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6728 case OPC2_32_RRR2_MSUBS_32
:
6729 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6730 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6732 case OPC2_32_RRR2_MSUBS_64
:
6735 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6736 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6738 case OPC2_32_RRR2_MSUB_U_64
:
6739 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6740 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6742 case OPC2_32_RRR2_MSUBS_U_32
:
6743 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6744 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6746 case OPC2_32_RRR2_MSUBS_U_64
:
6749 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6750 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6753 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6758 static void decode_rrr1_madd(DisasContext
*ctx
)
6761 uint32_t r1
, r2
, r3
, r4
, n
;
6763 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6764 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6765 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6766 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6767 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6768 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6771 case OPC2_32_RRR1_MADD_H_LL
:
6774 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6775 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6777 case OPC2_32_RRR1_MADD_H_LU
:
6780 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6781 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6783 case OPC2_32_RRR1_MADD_H_UL
:
6786 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6787 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6789 case OPC2_32_RRR1_MADD_H_UU
:
6792 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6793 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6795 case OPC2_32_RRR1_MADDS_H_LL
:
6798 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6799 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6801 case OPC2_32_RRR1_MADDS_H_LU
:
6804 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6805 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6807 case OPC2_32_RRR1_MADDS_H_UL
:
6810 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6811 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6813 case OPC2_32_RRR1_MADDS_H_UU
:
6816 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6817 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6819 case OPC2_32_RRR1_MADDM_H_LL
:
6822 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6823 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6825 case OPC2_32_RRR1_MADDM_H_LU
:
6828 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6829 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6831 case OPC2_32_RRR1_MADDM_H_UL
:
6834 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6835 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6837 case OPC2_32_RRR1_MADDM_H_UU
:
6840 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6841 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6843 case OPC2_32_RRR1_MADDMS_H_LL
:
6846 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6847 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6849 case OPC2_32_RRR1_MADDMS_H_LU
:
6852 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6853 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6855 case OPC2_32_RRR1_MADDMS_H_UL
:
6858 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6859 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6861 case OPC2_32_RRR1_MADDMS_H_UU
:
6864 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6865 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6867 case OPC2_32_RRR1_MADDR_H_LL
:
6868 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6869 cpu_gpr_d
[r2
], n
, MODE_LL
);
6871 case OPC2_32_RRR1_MADDR_H_LU
:
6872 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6873 cpu_gpr_d
[r2
], n
, MODE_LU
);
6875 case OPC2_32_RRR1_MADDR_H_UL
:
6876 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6877 cpu_gpr_d
[r2
], n
, MODE_UL
);
6879 case OPC2_32_RRR1_MADDR_H_UU
:
6880 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6881 cpu_gpr_d
[r2
], n
, MODE_UU
);
6883 case OPC2_32_RRR1_MADDRS_H_LL
:
6884 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6885 cpu_gpr_d
[r2
], n
, MODE_LL
);
6887 case OPC2_32_RRR1_MADDRS_H_LU
:
6888 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6889 cpu_gpr_d
[r2
], n
, MODE_LU
);
6891 case OPC2_32_RRR1_MADDRS_H_UL
:
6892 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6893 cpu_gpr_d
[r2
], n
, MODE_UL
);
6895 case OPC2_32_RRR1_MADDRS_H_UU
:
6896 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6897 cpu_gpr_d
[r2
], n
, MODE_UU
);
6900 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6904 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6907 uint32_t r1
, r2
, r3
, r4
, n
;
6910 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6911 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6912 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6913 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6914 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6915 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6917 temp
= tcg_const_i32(n
);
6918 temp2
= tcg_temp_new();
6921 case OPC2_32_RRR1_MADD_Q_32
:
6922 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6923 cpu_gpr_d
[r2
], n
, 32);
6925 case OPC2_32_RRR1_MADD_Q_64
:
6928 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6929 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6932 case OPC2_32_RRR1_MADD_Q_32_L
:
6933 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6934 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6937 case OPC2_32_RRR1_MADD_Q_64_L
:
6940 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6941 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6942 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6945 case OPC2_32_RRR1_MADD_Q_32_U
:
6946 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6947 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6950 case OPC2_32_RRR1_MADD_Q_64_U
:
6953 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6954 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6955 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6958 case OPC2_32_RRR1_MADD_Q_32_LL
:
6959 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6960 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6961 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6963 case OPC2_32_RRR1_MADD_Q_64_LL
:
6966 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6967 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6968 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6969 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6971 case OPC2_32_RRR1_MADD_Q_32_UU
:
6972 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6973 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6974 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6976 case OPC2_32_RRR1_MADD_Q_64_UU
:
6979 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6980 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6981 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6982 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6984 case OPC2_32_RRR1_MADDS_Q_32
:
6985 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6986 cpu_gpr_d
[r2
], n
, 32);
6988 case OPC2_32_RRR1_MADDS_Q_64
:
6991 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6992 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6995 case OPC2_32_RRR1_MADDS_Q_32_L
:
6996 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6997 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7000 case OPC2_32_RRR1_MADDS_Q_64_L
:
7003 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7004 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7005 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7008 case OPC2_32_RRR1_MADDS_Q_32_U
:
7009 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7010 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7013 case OPC2_32_RRR1_MADDS_Q_64_U
:
7016 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7017 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7018 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7021 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7022 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7023 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7024 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7026 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7029 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7030 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7031 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7032 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7034 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7035 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7036 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7037 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7039 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7042 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7043 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7044 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7045 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7047 case OPC2_32_RRR1_MADDR_H_64_UL
:
7049 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7050 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7052 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7054 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7055 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7057 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7058 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7059 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7060 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7062 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7063 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7064 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7065 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7067 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7068 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7069 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7070 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7072 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7073 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7074 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7075 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7078 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7082 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7085 uint32_t r1
, r2
, r3
, r4
, n
;
7087 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7088 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7089 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7090 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7091 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7092 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7095 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7098 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7099 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7101 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7104 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7105 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7107 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7110 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7111 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7113 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7116 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7117 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7119 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7122 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7123 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7126 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7129 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7130 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7133 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7136 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7137 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7140 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7143 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7144 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7147 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7150 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7151 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7154 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7157 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7158 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7161 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7164 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7165 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7168 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7171 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7172 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7175 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7178 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7179 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7182 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7185 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7186 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7189 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7192 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7193 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7196 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7199 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7200 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7203 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7204 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7205 cpu_gpr_d
[r2
], n
, MODE_LL
);
7207 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7208 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7209 cpu_gpr_d
[r2
], n
, MODE_LU
);
7211 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7212 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7213 cpu_gpr_d
[r2
], n
, MODE_UL
);
7215 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7216 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7217 cpu_gpr_d
[r2
], n
, MODE_UU
);
7219 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7220 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7221 cpu_gpr_d
[r2
], n
, MODE_LL
);
7223 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7224 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7225 cpu_gpr_d
[r2
], n
, MODE_LU
);
7227 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7228 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7229 cpu_gpr_d
[r2
], n
, MODE_UL
);
7231 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7232 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7233 cpu_gpr_d
[r2
], n
, MODE_UU
);
7236 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7240 static void decode_rrr1_msub(DisasContext
*ctx
)
7243 uint32_t r1
, r2
, r3
, r4
, n
;
7245 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7246 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7247 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7248 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7249 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7250 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7253 case OPC2_32_RRR1_MSUB_H_LL
:
7256 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7257 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7259 case OPC2_32_RRR1_MSUB_H_LU
:
7262 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7263 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7265 case OPC2_32_RRR1_MSUB_H_UL
:
7268 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7269 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7271 case OPC2_32_RRR1_MSUB_H_UU
:
7274 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7275 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7277 case OPC2_32_RRR1_MSUBS_H_LL
:
7280 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7281 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7283 case OPC2_32_RRR1_MSUBS_H_LU
:
7286 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7287 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7289 case OPC2_32_RRR1_MSUBS_H_UL
:
7292 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7293 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7295 case OPC2_32_RRR1_MSUBS_H_UU
:
7298 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7299 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7301 case OPC2_32_RRR1_MSUBM_H_LL
:
7304 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7305 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7307 case OPC2_32_RRR1_MSUBM_H_LU
:
7310 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7311 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7313 case OPC2_32_RRR1_MSUBM_H_UL
:
7316 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7317 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7319 case OPC2_32_RRR1_MSUBM_H_UU
:
7322 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7323 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7325 case OPC2_32_RRR1_MSUBMS_H_LL
:
7328 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7329 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7331 case OPC2_32_RRR1_MSUBMS_H_LU
:
7334 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7335 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7337 case OPC2_32_RRR1_MSUBMS_H_UL
:
7340 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7341 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7343 case OPC2_32_RRR1_MSUBMS_H_UU
:
7346 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7347 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7349 case OPC2_32_RRR1_MSUBR_H_LL
:
7350 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7351 cpu_gpr_d
[r2
], n
, MODE_LL
);
7353 case OPC2_32_RRR1_MSUBR_H_LU
:
7354 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7355 cpu_gpr_d
[r2
], n
, MODE_LU
);
7357 case OPC2_32_RRR1_MSUBR_H_UL
:
7358 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7359 cpu_gpr_d
[r2
], n
, MODE_UL
);
7361 case OPC2_32_RRR1_MSUBR_H_UU
:
7362 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7363 cpu_gpr_d
[r2
], n
, MODE_UU
);
7365 case OPC2_32_RRR1_MSUBRS_H_LL
:
7366 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7367 cpu_gpr_d
[r2
], n
, MODE_LL
);
7369 case OPC2_32_RRR1_MSUBRS_H_LU
:
7370 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7371 cpu_gpr_d
[r2
], n
, MODE_LU
);
7373 case OPC2_32_RRR1_MSUBRS_H_UL
:
7374 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7375 cpu_gpr_d
[r2
], n
, MODE_UL
);
7377 case OPC2_32_RRR1_MSUBRS_H_UU
:
7378 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7379 cpu_gpr_d
[r2
], n
, MODE_UU
);
7382 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7386 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7389 uint32_t r1
, r2
, r3
, r4
, n
;
7392 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7393 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7394 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7395 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7396 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7397 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7399 temp
= tcg_const_i32(n
);
7400 temp2
= tcg_temp_new();
7403 case OPC2_32_RRR1_MSUB_Q_32
:
7404 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7405 cpu_gpr_d
[r2
], n
, 32);
7407 case OPC2_32_RRR1_MSUB_Q_64
:
7410 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7411 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7414 case OPC2_32_RRR1_MSUB_Q_32_L
:
7415 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7416 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7419 case OPC2_32_RRR1_MSUB_Q_64_L
:
7422 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7423 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7424 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7427 case OPC2_32_RRR1_MSUB_Q_32_U
:
7428 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7429 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7432 case OPC2_32_RRR1_MSUB_Q_64_U
:
7435 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7436 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7437 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7440 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7441 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7442 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7443 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7445 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7448 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7449 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7450 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7451 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7453 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7454 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7455 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7456 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7458 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7461 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7462 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7463 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7464 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7466 case OPC2_32_RRR1_MSUBS_Q_32
:
7467 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7468 cpu_gpr_d
[r2
], n
, 32);
7470 case OPC2_32_RRR1_MSUBS_Q_64
:
7473 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7474 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7477 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7478 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7479 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7482 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7485 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7486 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7487 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7490 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7491 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7492 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7495 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7498 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7499 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7500 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7503 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7504 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7505 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7506 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7508 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7511 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7512 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7513 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7514 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7516 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7517 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7518 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7519 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7521 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7524 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7525 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7526 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7527 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7529 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7531 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7532 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7534 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7536 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7537 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7539 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7540 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7541 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7542 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7544 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7545 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7546 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7547 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7549 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7550 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7551 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7552 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7554 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7555 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7556 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7557 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7560 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7564 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7567 uint32_t r1
, r2
, r3
, r4
, n
;
7569 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7570 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7571 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7572 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7573 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7574 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7577 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7580 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7581 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7583 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7586 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7587 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7589 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7592 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7593 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7595 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7598 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7599 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7601 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7604 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7605 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7608 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7611 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7612 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7615 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7618 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7619 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7622 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7625 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7626 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7629 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7632 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7633 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7636 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7639 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7640 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7643 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7646 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7650 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7653 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7654 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7657 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7660 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7661 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7664 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7667 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7671 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7674 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7675 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7678 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7681 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7682 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7685 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7686 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7687 cpu_gpr_d
[r2
], n
, MODE_LL
);
7689 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7690 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7691 cpu_gpr_d
[r2
], n
, MODE_LU
);
7693 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7694 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7695 cpu_gpr_d
[r2
], n
, MODE_UL
);
7697 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7698 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7699 cpu_gpr_d
[r2
], n
, MODE_UU
);
7701 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7702 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7703 cpu_gpr_d
[r2
], n
, MODE_LL
);
7705 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7706 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7707 cpu_gpr_d
[r2
], n
, MODE_LU
);
7709 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7710 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7711 cpu_gpr_d
[r2
], n
, MODE_UL
);
7713 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7714 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7715 cpu_gpr_d
[r2
], n
, MODE_UU
);
7718 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7723 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7727 TCGv tmp_width
, tmp_pos
;
7729 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7730 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7731 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7732 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7733 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7735 tmp_pos
= tcg_temp_new();
7736 tmp_width
= tcg_temp_new();
7739 case OPC2_32_RRRR_DEXTR
:
7740 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7742 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7744 TCGv msw
= tcg_temp_new();
7745 TCGv zero
= tcg_constant_tl(0);
7746 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7747 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7748 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7750 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7751 * behaviour. So check that case here and set the low bits to zero
7752 * which effectivly returns cpu_gpr_d[r1]
7754 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7755 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7758 case OPC2_32_RRRR_EXTR
:
7759 case OPC2_32_RRRR_EXTR_U
:
7761 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7762 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7763 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7764 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7765 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7766 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7767 if (op2
== OPC2_32_RRRR_EXTR
) {
7768 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7770 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7773 case OPC2_32_RRRR_INSERT
:
7775 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7776 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7777 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7781 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7786 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7794 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7795 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7796 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7797 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7798 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7799 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7801 temp
= tcg_temp_new();
7804 case OPC2_32_RRRW_EXTR
:
7805 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7806 tcg_gen_addi_tl(temp
, temp
, width
);
7807 tcg_gen_subfi_tl(temp
, 32, temp
);
7808 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7809 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7811 case OPC2_32_RRRW_EXTR_U
:
7813 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7815 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7816 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7817 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7820 case OPC2_32_RRRW_IMASK
:
7821 temp2
= tcg_temp_new();
7823 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7824 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7825 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7826 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7827 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7829 case OPC2_32_RRRW_INSERT
:
7830 temp2
= tcg_temp_new();
7832 tcg_gen_movi_tl(temp
, width
);
7833 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7834 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7837 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7842 static void decode_sys_interrupts(DisasContext
*ctx
)
7849 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7850 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7853 case OPC2_32_SYS_DEBUG
:
7854 /* raise EXCP_DEBUG */
7856 case OPC2_32_SYS_DISABLE
:
7857 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE_1_3
);
7859 case OPC2_32_SYS_DSYNC
:
7861 case OPC2_32_SYS_ENABLE
:
7862 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE_1_3
);
7864 case OPC2_32_SYS_ISYNC
:
7866 case OPC2_32_SYS_NOP
:
7868 case OPC2_32_SYS_RET
:
7869 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7871 case OPC2_32_SYS_FRET
:
7874 case OPC2_32_SYS_RFE
:
7875 gen_helper_rfe(cpu_env
);
7876 tcg_gen_exit_tb(NULL
, 0);
7877 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7879 case OPC2_32_SYS_RFM
:
7880 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7881 tmp
= tcg_temp_new();
7882 l1
= gen_new_label();
7884 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7885 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7886 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7887 gen_helper_rfm(cpu_env
);
7889 tcg_gen_exit_tb(NULL
, 0);
7890 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7892 /* generate privilege trap */
7895 case OPC2_32_SYS_RSLCX
:
7896 gen_helper_rslcx(cpu_env
);
7898 case OPC2_32_SYS_SVLCX
:
7899 gen_helper_svlcx(cpu_env
);
7901 case OPC2_32_SYS_RESTORE
:
7902 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7903 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7904 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7905 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7906 } /* else raise privilege trap */
7908 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7911 case OPC2_32_SYS_TRAPSV
:
7912 l1
= gen_new_label();
7913 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7914 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7917 case OPC2_32_SYS_TRAPV
:
7918 l1
= gen_new_label();
7919 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7920 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7924 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7928 static void decode_32Bit_opc(DisasContext
*ctx
)
7932 int32_t address
, const16
;
7935 TCGv temp
, temp2
, temp3
;
7937 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7939 /* handle JNZ.T opcode only being 7 bit long */
7940 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7941 op1
= OPCM_32_BRN_JTT
;
7946 case OPCM_32_ABS_LDW
:
7947 decode_abs_ldw(ctx
);
7949 case OPCM_32_ABS_LDB
:
7950 decode_abs_ldb(ctx
);
7952 case OPCM_32_ABS_LDMST_SWAP
:
7953 decode_abs_ldst_swap(ctx
);
7955 case OPCM_32_ABS_LDST_CONTEXT
:
7956 decode_abs_ldst_context(ctx
);
7958 case OPCM_32_ABS_STORE
:
7959 decode_abs_store(ctx
);
7961 case OPCM_32_ABS_STOREB_H
:
7962 decode_abs_storeb_h(ctx
);
7964 case OPC1_32_ABS_STOREQ
:
7965 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7966 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7967 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7968 temp2
= tcg_temp_new();
7970 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7971 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7973 case OPC1_32_ABS_LD_Q
:
7974 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7975 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7976 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7978 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7979 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7981 case OPC1_32_ABS_LEA
:
7982 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7983 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7984 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7987 case OPC1_32_ABSB_ST_T
:
7988 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7989 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7990 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7992 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7993 temp2
= tcg_temp_new();
7995 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7996 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7997 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7998 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8001 case OPC1_32_B_CALL
:
8002 case OPC1_32_B_CALLA
:
8003 case OPC1_32_B_FCALL
:
8004 case OPC1_32_B_FCALLA
:
8009 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8010 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8013 case OPCM_32_BIT_ANDACC
:
8014 decode_bit_andacc(ctx
);
8016 case OPCM_32_BIT_LOGICAL_T1
:
8017 decode_bit_logical_t(ctx
);
8019 case OPCM_32_BIT_INSERT
:
8020 decode_bit_insert(ctx
);
8022 case OPCM_32_BIT_LOGICAL_T2
:
8023 decode_bit_logical_t2(ctx
);
8025 case OPCM_32_BIT_ORAND
:
8026 decode_bit_orand(ctx
);
8028 case OPCM_32_BIT_SH_LOGIC1
:
8029 decode_bit_sh_logic1(ctx
);
8031 case OPCM_32_BIT_SH_LOGIC2
:
8032 decode_bit_sh_logic2(ctx
);
8035 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8036 decode_bo_addrmode_post_pre_base(ctx
);
8038 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8039 decode_bo_addrmode_bitreverse_circular(ctx
);
8041 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8042 decode_bo_addrmode_ld_post_pre_base(ctx
);
8044 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8045 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8047 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8048 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8050 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8051 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8054 case OPC1_32_BOL_LD_A_LONGOFF
:
8055 case OPC1_32_BOL_LD_W_LONGOFF
:
8056 case OPC1_32_BOL_LEA_LONGOFF
:
8057 case OPC1_32_BOL_ST_W_LONGOFF
:
8058 case OPC1_32_BOL_ST_A_LONGOFF
:
8059 case OPC1_32_BOL_LD_B_LONGOFF
:
8060 case OPC1_32_BOL_LD_BU_LONGOFF
:
8061 case OPC1_32_BOL_LD_H_LONGOFF
:
8062 case OPC1_32_BOL_LD_HU_LONGOFF
:
8063 case OPC1_32_BOL_ST_B_LONGOFF
:
8064 case OPC1_32_BOL_ST_H_LONGOFF
:
8065 decode_bol_opc(ctx
, op1
);
8068 case OPCM_32_BRC_EQ_NEQ
:
8069 case OPCM_32_BRC_GE
:
8070 case OPCM_32_BRC_JLT
:
8071 case OPCM_32_BRC_JNE
:
8072 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8073 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8074 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8075 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8078 case OPCM_32_BRN_JTT
:
8079 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8080 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8081 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8084 case OPCM_32_BRR_EQ_NEQ
:
8085 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8086 case OPCM_32_BRR_GE
:
8087 case OPCM_32_BRR_JLT
:
8088 case OPCM_32_BRR_JNE
:
8089 case OPCM_32_BRR_JNZ
:
8090 case OPCM_32_BRR_LOOP
:
8091 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8092 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8093 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8094 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8097 case OPCM_32_RC_LOGICAL_SHIFT
:
8098 decode_rc_logical_shift(ctx
);
8100 case OPCM_32_RC_ACCUMULATOR
:
8101 decode_rc_accumulator(ctx
);
8103 case OPCM_32_RC_SERVICEROUTINE
:
8104 decode_rc_serviceroutine(ctx
);
8106 case OPCM_32_RC_MUL
:
8110 case OPCM_32_RCPW_MASK_INSERT
:
8111 decode_rcpw_insert(ctx
);
8114 case OPC1_32_RCRR_INSERT
:
8115 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8116 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8117 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8118 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8119 temp
= tcg_const_i32(const16
);
8120 temp2
= tcg_temp_new(); /* width*/
8121 temp3
= tcg_temp_new(); /* pos */
8125 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8126 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8128 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8131 case OPCM_32_RCRW_MASK_INSERT
:
8132 decode_rcrw_insert(ctx
);
8135 case OPCM_32_RCR_COND_SELECT
:
8136 decode_rcr_cond_select(ctx
);
8138 case OPCM_32_RCR_MADD
:
8139 decode_rcr_madd(ctx
);
8141 case OPCM_32_RCR_MSUB
:
8142 decode_rcr_msub(ctx
);
8145 case OPC1_32_RLC_ADDI
:
8146 case OPC1_32_RLC_ADDIH
:
8147 case OPC1_32_RLC_ADDIH_A
:
8148 case OPC1_32_RLC_MFCR
:
8149 case OPC1_32_RLC_MOV
:
8150 case OPC1_32_RLC_MOV_64
:
8151 case OPC1_32_RLC_MOV_U
:
8152 case OPC1_32_RLC_MOV_H
:
8153 case OPC1_32_RLC_MOVH_A
:
8154 case OPC1_32_RLC_MTCR
:
8155 decode_rlc_opc(ctx
, op1
);
8158 case OPCM_32_RR_ACCUMULATOR
:
8159 decode_rr_accumulator(ctx
);
8161 case OPCM_32_RR_LOGICAL_SHIFT
:
8162 decode_rr_logical_shift(ctx
);
8164 case OPCM_32_RR_ADDRESS
:
8165 decode_rr_address(ctx
);
8167 case OPCM_32_RR_IDIRECT
:
8168 decode_rr_idirect(ctx
);
8170 case OPCM_32_RR_DIVIDE
:
8171 decode_rr_divide(ctx
);
8174 case OPCM_32_RR1_MUL
:
8175 decode_rr1_mul(ctx
);
8177 case OPCM_32_RR1_MULQ
:
8178 decode_rr1_mulq(ctx
);
8181 case OPCM_32_RR2_MUL
:
8182 decode_rr2_mul(ctx
);
8185 case OPCM_32_RRPW_EXTRACT_INSERT
:
8186 decode_rrpw_extract_insert(ctx
);
8188 case OPC1_32_RRPW_DEXTR
:
8189 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8190 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8191 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8192 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8194 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8198 case OPCM_32_RRR_COND_SELECT
:
8199 decode_rrr_cond_select(ctx
);
8201 case OPCM_32_RRR_DIVIDE
:
8202 decode_rrr_divide(ctx
);
8205 case OPCM_32_RRR2_MADD
:
8206 decode_rrr2_madd(ctx
);
8208 case OPCM_32_RRR2_MSUB
:
8209 decode_rrr2_msub(ctx
);
8212 case OPCM_32_RRR1_MADD
:
8213 decode_rrr1_madd(ctx
);
8215 case OPCM_32_RRR1_MADDQ_H
:
8216 decode_rrr1_maddq_h(ctx
);
8218 case OPCM_32_RRR1_MADDSU_H
:
8219 decode_rrr1_maddsu_h(ctx
);
8221 case OPCM_32_RRR1_MSUB_H
:
8222 decode_rrr1_msub(ctx
);
8224 case OPCM_32_RRR1_MSUB_Q
:
8225 decode_rrr1_msubq_h(ctx
);
8227 case OPCM_32_RRR1_MSUBAD_H
:
8228 decode_rrr1_msubad_h(ctx
);
8231 case OPCM_32_RRRR_EXTRACT_INSERT
:
8232 decode_rrrr_extract_insert(ctx
);
8235 case OPCM_32_RRRW_EXTRACT_INSERT
:
8236 decode_rrrw_extract_insert(ctx
);
8239 case OPCM_32_SYS_INTERRUPTS
:
8240 decode_sys_interrupts(ctx
);
8242 case OPC1_32_SYS_RSTV
:
8243 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8244 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8245 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8246 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8249 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8253 static bool tricore_insn_is_16bit(uint32_t insn
)
8255 return (insn
& 0x1) == 0;
8258 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8261 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8262 CPUTriCoreState
*env
= cs
->env_ptr
;
8263 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8264 ctx
->hflags
= (uint32_t)ctx
->base
.tb
->flags
;
8265 ctx
->features
= env
->features
;
8268 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8272 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8274 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8276 tcg_gen_insn_start(ctx
->base
.pc_next
);
8279 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8282 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8283 * (False positives are OK, false negatives are not.)
8284 * Our caller ensures we are only called if dc->base.pc_next is less than
8285 * 4 bytes from the page boundary, so we cross the page if the first
8286 * 16 bits indicate that this is a 32 bit insn.
8288 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8290 return !tricore_insn_is_16bit(insn
);
8294 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8296 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8297 CPUTriCoreState
*env
= cpu
->env_ptr
;
8301 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8302 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8304 ctx
->opcode
= insn_lo
;
8305 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8306 decode_16Bit_opc(ctx
);
8308 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8309 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8310 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8311 decode_32Bit_opc(ctx
);
8313 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8315 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8316 target_ulong page_start
;
8318 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8319 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8320 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8321 && insn_crosses_page(env
, ctx
))) {
8322 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8327 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8329 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8331 switch (ctx
->base
.is_jmp
) {
8332 case DISAS_TOO_MANY
:
8333 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8335 case DISAS_NORETURN
:
8338 g_assert_not_reached();
8342 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8343 CPUState
*cpu
, FILE *logfile
)
8345 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8346 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8349 static const TranslatorOps tricore_tr_ops
= {
8350 .init_disas_context
= tricore_tr_init_disas_context
,
8351 .tb_start
= tricore_tr_tb_start
,
8352 .insn_start
= tricore_tr_insn_start
,
8353 .translate_insn
= tricore_tr_translate_insn
,
8354 .tb_stop
= tricore_tr_tb_stop
,
8355 .disas_log
= tricore_tr_disas_log
,
8359 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8360 target_ulong pc
, void *host_pc
)
8363 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8364 &tricore_tr_ops
, &ctx
.base
);
8373 void cpu_state_reset(CPUTriCoreState
*env
)
8375 /* Reset Regs to Default Value */
8380 static void tricore_tcg_init_csfr(void)
8382 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8383 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8384 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8385 offsetof(CPUTriCoreState
, PSW
), "PSW");
8386 cpu_PC
= tcg_global_mem_new(cpu_env
,
8387 offsetof(CPUTriCoreState
, PC
), "PC");
8388 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8389 offsetof(CPUTriCoreState
, ICR
), "ICR");
8392 void tricore_tcg_init(void)
8397 for (i
= 0 ; i
< 16 ; i
++) {
8398 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8399 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8402 for (i
= 0 ; i
< 16 ; i
++) {
8403 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8404 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8407 tricore_tcg_init_csfr();
8408 /* init PSW flag cache */
8409 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8410 offsetof(CPUTriCoreState
, PSW_USB_C
),
8412 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8413 offsetof(CPUTriCoreState
, PSW_USB_V
),
8415 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8416 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8418 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8419 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8421 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8422 offsetof(CPUTriCoreState
, PSW_USB_SAV
),