2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
44 static TCGv cpu_gpr_a
[16];
45 static TCGv cpu_gpr_d
[16];
47 static TCGv cpu_PSW_C
;
48 static TCGv cpu_PSW_V
;
49 static TCGv cpu_PSW_SV
;
50 static TCGv cpu_PSW_AV
;
51 static TCGv cpu_PSW_SAV
;
53 #include "exec/gen-icount.h"
55 static const char *regnames_a
[] = {
56 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
57 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
58 "a12" , "a13" , "a14" , "a15",
61 static const char *regnames_d
[] = {
62 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
63 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
64 "d12" , "d13" , "d14" , "d15",
67 typedef struct DisasContext
{
68 DisasContextBase base
;
69 target_ulong pc_succ_insn
;
71 /* Routine used to access memory */
73 uint32_t hflags
, saved_hflags
;
77 static int has_feature(DisasContext
*ctx
, int feature
)
79 return (ctx
->features
& (1ULL << feature
)) != 0;
89 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
91 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
92 CPUTriCoreState
*env
= &cpu
->env
;
98 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
99 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
100 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
101 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
102 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
103 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
105 for (i
= 0; i
< 16; ++i
) {
107 qemu_fprintf(f
, "\nGPR A%02d:", i
);
109 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
111 for (i
= 0; i
< 16; ++i
) {
113 qemu_fprintf(f
, "\nGPR D%02d:", i
);
115 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
117 qemu_fprintf(f
, "\n");
121 * Functions to generate micro-ops
124 /* Makros for generating helpers */
126 #define gen_helper_1arg(name, arg) do { \
127 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
128 gen_helper_##name(cpu_env, helper_tmp); \
131 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
132 TCGv arg00 = tcg_temp_new(); \
133 TCGv arg01 = tcg_temp_new(); \
134 TCGv arg11 = tcg_temp_new(); \
135 tcg_gen_sari_tl(arg00, arg0, 16); \
136 tcg_gen_ext16s_tl(arg01, arg0); \
137 tcg_gen_ext16s_tl(arg11, arg1); \
138 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
141 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
142 TCGv arg00 = tcg_temp_new(); \
143 TCGv arg01 = tcg_temp_new(); \
144 TCGv arg10 = tcg_temp_new(); \
145 TCGv arg11 = tcg_temp_new(); \
146 tcg_gen_sari_tl(arg00, arg0, 16); \
147 tcg_gen_ext16s_tl(arg01, arg0); \
148 tcg_gen_sari_tl(arg11, arg1, 16); \
149 tcg_gen_ext16s_tl(arg10, arg1); \
150 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
153 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
154 TCGv arg00 = tcg_temp_new(); \
155 TCGv arg01 = tcg_temp_new(); \
156 TCGv arg10 = tcg_temp_new(); \
157 TCGv arg11 = tcg_temp_new(); \
158 tcg_gen_sari_tl(arg00, arg0, 16); \
159 tcg_gen_ext16s_tl(arg01, arg0); \
160 tcg_gen_sari_tl(arg10, arg1, 16); \
161 tcg_gen_ext16s_tl(arg11, arg1); \
162 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
165 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
166 TCGv arg00 = tcg_temp_new(); \
167 TCGv arg01 = tcg_temp_new(); \
168 TCGv arg11 = tcg_temp_new(); \
169 tcg_gen_sari_tl(arg01, arg0, 16); \
170 tcg_gen_ext16s_tl(arg00, arg0); \
171 tcg_gen_sari_tl(arg11, arg1, 16); \
172 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
175 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
176 TCGv_i64 ret = tcg_temp_new_i64(); \
177 TCGv_i64 arg1 = tcg_temp_new_i64(); \
179 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
180 gen_helper_##name(ret, arg1, arg2); \
181 tcg_gen_extr_i64_i32(rl, rh, ret); \
184 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
185 TCGv_i64 ret = tcg_temp_new_i64(); \
187 gen_helper_##name(ret, cpu_env, arg1, arg2); \
188 tcg_gen_extr_i64_i32(rl, rh, ret); \
191 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
192 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
193 ((offset & 0x0fffff) << 1))
195 /* For two 32-bit registers used a 64-bit register, the first
196 registernumber needs to be even. Otherwise we trap. */
197 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
198 #define CHECK_REG_PAIR(reg) do { \
200 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
204 /* Functions for load/save to/from memory */
206 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
207 int16_t con
, MemOp mop
)
209 TCGv temp
= tcg_temp_new();
210 tcg_gen_addi_tl(temp
, r2
, con
);
211 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
214 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
215 int16_t con
, MemOp mop
)
217 TCGv temp
= tcg_temp_new();
218 tcg_gen_addi_tl(temp
, r2
, con
);
219 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
222 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
224 TCGv_i64 temp
= tcg_temp_new_i64();
226 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
227 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
230 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
233 TCGv temp
= tcg_temp_new();
234 tcg_gen_addi_tl(temp
, base
, con
);
235 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
238 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
240 TCGv_i64 temp
= tcg_temp_new_i64();
242 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
243 /* write back to two 32 bit regs */
244 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
247 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
250 TCGv temp
= tcg_temp_new();
251 tcg_gen_addi_tl(temp
, base
, con
);
252 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
255 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
258 TCGv temp
= tcg_temp_new();
259 tcg_gen_addi_tl(temp
, r2
, off
);
260 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
261 tcg_gen_mov_tl(r2
, temp
);
264 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
267 TCGv temp
= tcg_temp_new();
268 tcg_gen_addi_tl(temp
, r2
, off
);
269 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
270 tcg_gen_mov_tl(r2
, temp
);
273 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
274 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
276 TCGv temp
= tcg_temp_new();
277 TCGv temp2
= tcg_temp_new();
279 CHECK_REG_PAIR(ereg
);
280 /* temp = (M(EA, word) */
281 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
282 /* temp = temp & ~E[a][63:32]) */
283 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
284 /* temp2 = (E[a][31:0] & E[a][63:32]); */
285 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
286 /* temp = temp | temp2; */
287 tcg_gen_or_tl(temp
, temp
, temp2
);
288 /* M(EA, word) = temp; */
289 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
292 /* tmp = M(EA, word);
295 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
297 TCGv temp
= tcg_temp_new();
299 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
300 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
301 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
304 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
306 TCGv temp
= tcg_temp_new();
307 TCGv temp2
= tcg_temp_new();
308 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
309 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
310 cpu_gpr_d
[reg
], temp
);
311 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
312 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
315 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
317 TCGv temp
= tcg_temp_new();
318 TCGv temp2
= tcg_temp_new();
319 TCGv temp3
= tcg_temp_new();
321 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
322 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
323 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
324 tcg_gen_or_tl(temp2
, temp2
, temp3
);
325 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
326 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
330 /* We generate loads and store to core special function register (csfr) through
331 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
332 makros R, A and E, which allow read-only, all and endinit protected access.
333 These makros also specify in which ISA version the csfr was introduced. */
334 #define R(ADDRESS, REG, FEATURE) \
336 if (has_feature(ctx, FEATURE)) { \
337 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
340 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
341 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
342 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
344 /* since we're caching PSW make this a special case */
345 if (offset
== 0xfe04) {
346 gen_helper_psw_read(ret
, cpu_env
);
349 #include "csfr.h.inc"
357 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
358 since no execption occurs */
359 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
361 if (has_feature(ctx, FEATURE)) { \
362 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
365 /* Endinit protected registers
366 TODO: Since the endinit bit is in a register of a not yet implemented
367 watchdog device, we handle endinit protected registers like
368 all-access registers for now. */
369 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
370 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
373 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
374 /* since we're caching PSW make this a special case */
375 if (offset
== 0xfe04) {
376 gen_helper_psw_write(cpu_env
, r1
);
379 #include "csfr.h.inc"
383 /* generate privilege trap */
387 /* Functions for arithmetic instructions */
389 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
391 TCGv t0
= tcg_temp_new_i32();
392 TCGv result
= tcg_temp_new_i32();
393 /* Addition and set V/SV bits */
394 tcg_gen_add_tl(result
, r1
, r2
);
396 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
397 tcg_gen_xor_tl(t0
, r1
, r2
);
398 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
400 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
401 /* Calc AV/SAV bits */
402 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
403 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
405 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
406 /* write back result */
407 tcg_gen_mov_tl(ret
, result
);
411 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
413 TCGv temp
= tcg_temp_new();
414 TCGv_i64 t0
= tcg_temp_new_i64();
415 TCGv_i64 t1
= tcg_temp_new_i64();
416 TCGv_i64 result
= tcg_temp_new_i64();
418 tcg_gen_add_i64(result
, r1
, r2
);
420 tcg_gen_xor_i64(t1
, result
, r1
);
421 tcg_gen_xor_i64(t0
, r1
, r2
);
422 tcg_gen_andc_i64(t1
, t1
, t0
);
423 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
425 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
426 /* calc AV/SAV bits */
427 tcg_gen_extrh_i64_i32(temp
, result
);
428 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
429 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
431 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
432 /* write back result */
433 tcg_gen_mov_i64(ret
, result
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 /* ret = r2 + (r1 * r3); */
483 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
485 TCGv_i64 t1
= tcg_temp_new_i64();
486 TCGv_i64 t2
= tcg_temp_new_i64();
487 TCGv_i64 t3
= tcg_temp_new_i64();
489 tcg_gen_ext_i32_i64(t1
, r1
);
490 tcg_gen_ext_i32_i64(t2
, r2
);
491 tcg_gen_ext_i32_i64(t3
, r3
);
493 tcg_gen_mul_i64(t1
, t1
, t3
);
494 tcg_gen_add_i64(t1
, t2
, t1
);
496 tcg_gen_extrl_i64_i32(ret
, t1
);
499 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
500 /* t1 < -0x80000000 */
501 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
502 tcg_gen_or_i64(t2
, t2
, t3
);
503 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
504 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
506 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
507 /* Calc AV/SAV bits */
508 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
509 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
511 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
514 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
516 TCGv temp
= tcg_const_i32(con
);
517 gen_madd32_d(ret
, r1
, r2
, temp
);
521 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
524 TCGv t1
= tcg_temp_new();
525 TCGv t2
= tcg_temp_new();
526 TCGv t3
= tcg_temp_new();
527 TCGv t4
= tcg_temp_new();
529 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
530 /* only the add can overflow */
531 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
533 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
534 tcg_gen_xor_tl(t1
, r2_high
, t2
);
535 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
537 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
538 /* Calc AV/SAV bits */
539 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
540 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
542 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
543 /* write back the result */
544 tcg_gen_mov_tl(ret_low
, t3
);
545 tcg_gen_mov_tl(ret_high
, t4
);
549 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
552 TCGv_i64 t1
= tcg_temp_new_i64();
553 TCGv_i64 t2
= tcg_temp_new_i64();
554 TCGv_i64 t3
= tcg_temp_new_i64();
556 tcg_gen_extu_i32_i64(t1
, r1
);
557 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
558 tcg_gen_extu_i32_i64(t3
, r3
);
560 tcg_gen_mul_i64(t1
, t1
, t3
);
561 tcg_gen_add_i64(t2
, t2
, t1
);
562 /* write back result */
563 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
564 /* only the add overflows, if t2 < t1
566 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
567 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
568 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
570 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
571 /* Calc AV/SAV bits */
572 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
573 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
575 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
579 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
582 TCGv temp
= tcg_const_i32(con
);
583 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
587 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
590 TCGv temp
= tcg_const_i32(con
);
591 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
595 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
596 TCGv r3
, uint32_t n
, uint32_t mode
)
598 TCGv temp
= tcg_const_i32(n
);
599 TCGv temp2
= tcg_temp_new();
600 TCGv_i64 temp64
= tcg_temp_new_i64();
603 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
606 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
609 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
612 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
615 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
616 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
617 tcg_gen_add_tl
, tcg_gen_add_tl
);
621 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
622 TCGv r3
, uint32_t n
, uint32_t mode
)
624 TCGv temp
= tcg_const_i32(n
);
625 TCGv temp2
= tcg_temp_new();
626 TCGv_i64 temp64
= tcg_temp_new_i64();
629 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
632 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
635 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
638 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
641 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
642 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
643 tcg_gen_sub_tl
, tcg_gen_add_tl
);
647 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
648 TCGv r3
, uint32_t n
, uint32_t mode
)
650 TCGv temp
= tcg_const_i32(n
);
651 TCGv_i64 temp64
= tcg_temp_new_i64();
652 TCGv_i64 temp64_2
= tcg_temp_new_i64();
653 TCGv_i64 temp64_3
= tcg_temp_new_i64();
656 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
659 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
662 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
665 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
668 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
669 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
670 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
671 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
672 tcg_gen_shli_i64(temp64
, temp64
, 16);
674 gen_add64_d(temp64_2
, temp64_3
, temp64
);
675 /* write back result */
676 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
679 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
682 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
683 TCGv r3
, uint32_t n
, uint32_t mode
)
685 TCGv temp
= tcg_const_i32(n
);
686 TCGv temp2
= tcg_temp_new();
687 TCGv temp3
= tcg_temp_new();
688 TCGv_i64 temp64
= tcg_temp_new_i64();
692 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
695 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
698 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
701 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
704 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
705 gen_adds(ret_low
, r1_low
, temp
);
706 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
707 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
708 gen_adds(ret_high
, r1_high
, temp2
);
710 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
711 /* combine av bits */
712 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
715 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
718 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
719 TCGv r3
, uint32_t n
, uint32_t mode
)
721 TCGv temp
= tcg_const_i32(n
);
722 TCGv temp2
= tcg_temp_new();
723 TCGv temp3
= tcg_temp_new();
724 TCGv_i64 temp64
= tcg_temp_new_i64();
728 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
731 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
734 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
737 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
740 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
741 gen_subs(ret_low
, r1_low
, temp
);
742 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
743 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
744 gen_adds(ret_high
, r1_high
, temp2
);
746 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
747 /* combine av bits */
748 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
752 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
753 TCGv r3
, uint32_t n
, uint32_t mode
)
755 TCGv temp
= tcg_const_i32(n
);
756 TCGv_i64 temp64
= tcg_temp_new_i64();
757 TCGv_i64 temp64_2
= tcg_temp_new_i64();
761 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
764 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
767 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
770 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
773 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
774 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
775 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
776 tcg_gen_shli_i64(temp64
, temp64
, 16);
777 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
779 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
780 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
785 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
786 TCGv r3
, uint32_t n
, uint32_t mode
)
788 TCGv temp
= tcg_const_i32(n
);
789 TCGv_i64 temp64
= tcg_temp_new_i64();
790 TCGv_i64 temp64_2
= tcg_temp_new_i64();
791 TCGv_i64 temp64_3
= tcg_temp_new_i64();
794 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
797 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
800 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
803 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
806 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
807 gen_add64_d(temp64_3
, temp64_2
, temp64
);
808 /* write back result */
809 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
813 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
814 TCGv r3
, uint32_t n
, uint32_t mode
)
816 TCGv temp
= tcg_const_i32(n
);
817 TCGv_i64 temp64
= tcg_temp_new_i64();
818 TCGv_i64 temp64_2
= tcg_temp_new_i64();
821 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
824 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
827 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
830 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
833 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
834 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
835 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
839 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
842 TCGv temp
= tcg_const_i32(n
);
843 TCGv_i64 temp64
= tcg_temp_new_i64();
846 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
849 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
852 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
855 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
858 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
862 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
864 TCGv temp
= tcg_temp_new();
865 TCGv temp2
= tcg_temp_new();
867 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
868 tcg_gen_shli_tl(temp
, r1
, 16);
869 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
873 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
875 TCGv temp
= tcg_const_i32(n
);
876 TCGv temp2
= tcg_temp_new();
877 TCGv_i64 temp64
= tcg_temp_new_i64();
880 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
883 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
886 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
889 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
892 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
893 tcg_gen_shli_tl(temp
, r1
, 16);
894 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
899 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
900 uint32_t n
, uint32_t mode
)
902 TCGv temp
= tcg_const_i32(n
);
903 TCGv_i64 temp64
= tcg_temp_new_i64();
906 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
909 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
912 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
915 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
918 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
922 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
924 TCGv temp
= tcg_temp_new();
925 TCGv temp2
= tcg_temp_new();
927 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
928 tcg_gen_shli_tl(temp
, r1
, 16);
929 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
933 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
935 TCGv temp
= tcg_const_i32(n
);
936 TCGv temp2
= tcg_temp_new();
937 TCGv_i64 temp64
= tcg_temp_new_i64();
940 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
943 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
946 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
949 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
952 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
953 tcg_gen_shli_tl(temp
, r1
, 16);
954 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
958 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
960 TCGv temp
= tcg_const_i32(n
);
961 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
965 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
967 TCGv temp
= tcg_const_i32(n
);
968 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
972 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
975 TCGv temp
= tcg_temp_new();
976 TCGv temp2
= tcg_temp_new();
977 TCGv temp3
= tcg_temp_new();
978 TCGv_i64 t1
= tcg_temp_new_i64();
979 TCGv_i64 t2
= tcg_temp_new_i64();
980 TCGv_i64 t3
= tcg_temp_new_i64();
982 tcg_gen_ext_i32_i64(t2
, arg2
);
983 tcg_gen_ext_i32_i64(t3
, arg3
);
985 tcg_gen_mul_i64(t2
, t2
, t3
);
986 tcg_gen_shli_i64(t2
, t2
, n
);
988 tcg_gen_ext_i32_i64(t1
, arg1
);
989 tcg_gen_sari_i64(t2
, t2
, up_shift
);
991 tcg_gen_add_i64(t3
, t1
, t2
);
992 tcg_gen_extrl_i64_i32(temp3
, t3
);
994 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
995 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
996 tcg_gen_or_i64(t1
, t1
, t2
);
997 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
998 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
999 /* We produce an overflow on the host if the mul before was
1000 (0x80000000 * 0x80000000) << 1). If this is the
1001 case, we negate the ovf. */
1003 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1004 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1005 tcg_gen_and_tl(temp
, temp
, temp2
);
1006 tcg_gen_shli_tl(temp
, temp
, 31);
1007 /* negate v bit, if special condition */
1008 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1011 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1012 /* Calc AV/SAV bits */
1013 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1014 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1016 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1017 /* write back result */
1018 tcg_gen_mov_tl(ret
, temp3
);
1022 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1024 TCGv temp
= tcg_temp_new();
1025 TCGv temp2
= tcg_temp_new();
1027 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1028 } else { /* n is expected to be 1 */
1029 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1030 tcg_gen_shli_tl(temp
, temp
, 1);
1031 /* catch special case r1 = r2 = 0x8000 */
1032 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1033 tcg_gen_sub_tl(temp
, temp
, temp2
);
1035 gen_add_d(ret
, arg1
, temp
);
1039 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1041 TCGv temp
= tcg_temp_new();
1042 TCGv temp2
= tcg_temp_new();
1044 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1045 } else { /* n is expected to be 1 */
1046 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1047 tcg_gen_shli_tl(temp
, temp
, 1);
1048 /* catch special case r1 = r2 = 0x8000 */
1049 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1050 tcg_gen_sub_tl(temp
, temp
, temp2
);
1052 gen_adds(ret
, arg1
, temp
);
1056 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1057 TCGv arg3
, uint32_t n
)
1059 TCGv temp
= tcg_temp_new();
1060 TCGv temp2
= tcg_temp_new();
1061 TCGv_i64 t1
= tcg_temp_new_i64();
1062 TCGv_i64 t2
= tcg_temp_new_i64();
1063 TCGv_i64 t3
= tcg_temp_new_i64();
1066 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1067 } else { /* n is expected to be 1 */
1068 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1069 tcg_gen_shli_tl(temp
, temp
, 1);
1070 /* catch special case r1 = r2 = 0x8000 */
1071 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1072 tcg_gen_sub_tl(temp
, temp
, temp2
);
1074 tcg_gen_ext_i32_i64(t2
, temp
);
1075 tcg_gen_shli_i64(t2
, t2
, 16);
1076 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1077 gen_add64_d(t3
, t1
, t2
);
1078 /* write back result */
1079 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1083 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1084 TCGv arg3
, uint32_t n
)
1086 TCGv temp
= tcg_temp_new();
1087 TCGv temp2
= tcg_temp_new();
1088 TCGv_i64 t1
= tcg_temp_new_i64();
1089 TCGv_i64 t2
= tcg_temp_new_i64();
1092 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1093 } else { /* n is expected to be 1 */
1094 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1095 tcg_gen_shli_tl(temp
, temp
, 1);
1096 /* catch special case r1 = r2 = 0x8000 */
1097 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1098 tcg_gen_sub_tl(temp
, temp
, temp2
);
1100 tcg_gen_ext_i32_i64(t2
, temp
);
1101 tcg_gen_shli_i64(t2
, t2
, 16);
1102 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1104 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1105 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1109 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1110 TCGv arg3
, uint32_t n
)
1112 TCGv_i64 t1
= tcg_temp_new_i64();
1113 TCGv_i64 t2
= tcg_temp_new_i64();
1114 TCGv_i64 t3
= tcg_temp_new_i64();
1115 TCGv_i64 t4
= tcg_temp_new_i64();
1118 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1119 tcg_gen_ext_i32_i64(t2
, arg2
);
1120 tcg_gen_ext_i32_i64(t3
, arg3
);
1122 tcg_gen_mul_i64(t2
, t2
, t3
);
1124 tcg_gen_shli_i64(t2
, t2
, 1);
1126 tcg_gen_add_i64(t4
, t1
, t2
);
1128 tcg_gen_xor_i64(t3
, t4
, t1
);
1129 tcg_gen_xor_i64(t2
, t1
, t2
);
1130 tcg_gen_andc_i64(t3
, t3
, t2
);
1131 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1132 /* We produce an overflow on the host if the mul before was
1133 (0x80000000 * 0x80000000) << 1). If this is the
1134 case, we negate the ovf. */
1136 temp
= tcg_temp_new();
1137 temp2
= tcg_temp_new();
1138 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1139 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1140 tcg_gen_and_tl(temp
, temp
, temp2
);
1141 tcg_gen_shli_tl(temp
, temp
, 31);
1142 /* negate v bit, if special condition */
1143 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1145 /* write back result */
1146 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1148 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1149 /* Calc AV/SAV bits */
1150 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1151 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1153 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1157 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1160 TCGv_i64 t1
= tcg_temp_new_i64();
1161 TCGv_i64 t2
= tcg_temp_new_i64();
1162 TCGv_i64 t3
= tcg_temp_new_i64();
1164 tcg_gen_ext_i32_i64(t1
, arg1
);
1165 tcg_gen_ext_i32_i64(t2
, arg2
);
1166 tcg_gen_ext_i32_i64(t3
, arg3
);
1168 tcg_gen_mul_i64(t2
, t2
, t3
);
1169 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1171 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1175 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1176 TCGv arg3
, uint32_t n
)
1178 TCGv_i64 r1
= tcg_temp_new_i64();
1179 TCGv temp
= tcg_const_i32(n
);
1181 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1182 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1183 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1186 /* ret = r2 - (r1 * r3); */
1187 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1189 TCGv_i64 t1
= tcg_temp_new_i64();
1190 TCGv_i64 t2
= tcg_temp_new_i64();
1191 TCGv_i64 t3
= tcg_temp_new_i64();
1193 tcg_gen_ext_i32_i64(t1
, r1
);
1194 tcg_gen_ext_i32_i64(t2
, r2
);
1195 tcg_gen_ext_i32_i64(t3
, r3
);
1197 tcg_gen_mul_i64(t1
, t1
, t3
);
1198 tcg_gen_sub_i64(t1
, t2
, t1
);
1200 tcg_gen_extrl_i64_i32(ret
, t1
);
1203 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1204 /* result < -0x80000000 */
1205 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1206 tcg_gen_or_i64(t2
, t2
, t3
);
1207 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1208 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1211 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1212 /* Calc AV/SAV bits */
1213 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1214 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1216 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1219 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1221 TCGv temp
= tcg_const_i32(con
);
1222 gen_msub32_d(ret
, r1
, r2
, temp
);
1226 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1229 TCGv t1
= tcg_temp_new();
1230 TCGv t2
= tcg_temp_new();
1231 TCGv t3
= tcg_temp_new();
1232 TCGv t4
= tcg_temp_new();
1234 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1235 /* only the sub can overflow */
1236 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1238 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1239 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1240 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1242 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1243 /* Calc AV/SAV bits */
1244 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1245 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1247 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1248 /* write back the result */
1249 tcg_gen_mov_tl(ret_low
, t3
);
1250 tcg_gen_mov_tl(ret_high
, t4
);
1254 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1257 TCGv temp
= tcg_const_i32(con
);
1258 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1262 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1265 TCGv_i64 t1
= tcg_temp_new_i64();
1266 TCGv_i64 t2
= tcg_temp_new_i64();
1267 TCGv_i64 t3
= tcg_temp_new_i64();
1269 tcg_gen_extu_i32_i64(t1
, r1
);
1270 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1271 tcg_gen_extu_i32_i64(t3
, r3
);
1273 tcg_gen_mul_i64(t1
, t1
, t3
);
1274 tcg_gen_sub_i64(t3
, t2
, t1
);
1275 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1276 /* calc V bit, only the sub can overflow, if t1 > t2 */
1277 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1278 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1279 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1281 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1282 /* Calc AV/SAV bits */
1283 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1284 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1286 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1290 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1293 TCGv temp
= tcg_const_i32(con
);
1294 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1297 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1299 TCGv temp
= tcg_const_i32(r2
);
1300 gen_add_d(ret
, r1
, temp
);
1303 /* calculate the carry bit too */
1304 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1306 TCGv t0
= tcg_temp_new_i32();
1307 TCGv result
= tcg_temp_new_i32();
1309 tcg_gen_movi_tl(t0
, 0);
1310 /* Addition and set C/V/SV bits */
1311 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1313 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1314 tcg_gen_xor_tl(t0
, r1
, r2
);
1315 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1317 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1318 /* Calc AV/SAV bits */
1319 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1320 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1322 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1323 /* write back result */
1324 tcg_gen_mov_tl(ret
, result
);
1327 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1329 TCGv temp
= tcg_const_i32(con
);
1330 gen_add_CC(ret
, r1
, temp
);
1333 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1335 TCGv carry
= tcg_temp_new_i32();
1336 TCGv t0
= tcg_temp_new_i32();
1337 TCGv result
= tcg_temp_new_i32();
1339 tcg_gen_movi_tl(t0
, 0);
1340 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1341 /* Addition, carry and set C/V/SV bits */
1342 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1343 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1345 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1346 tcg_gen_xor_tl(t0
, r1
, r2
);
1347 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1349 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1350 /* Calc AV/SAV bits */
1351 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1352 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1354 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1355 /* write back result */
1356 tcg_gen_mov_tl(ret
, result
);
1359 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1361 TCGv temp
= tcg_const_i32(con
);
1362 gen_addc_CC(ret
, r1
, temp
);
1365 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1368 TCGv temp
= tcg_temp_new();
1369 TCGv temp2
= tcg_temp_new();
1370 TCGv result
= tcg_temp_new();
1371 TCGv mask
= tcg_temp_new();
1372 TCGv t0
= tcg_const_i32(0);
1374 /* create mask for sticky bits */
1375 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1376 tcg_gen_shli_tl(mask
, mask
, 31);
1378 tcg_gen_add_tl(result
, r1
, r2
);
1380 tcg_gen_xor_tl(temp
, result
, r1
);
1381 tcg_gen_xor_tl(temp2
, r1
, r2
);
1382 tcg_gen_andc_tl(temp
, temp
, temp2
);
1383 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1385 tcg_gen_and_tl(temp
, temp
, mask
);
1386 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1388 tcg_gen_add_tl(temp
, result
, result
);
1389 tcg_gen_xor_tl(temp
, temp
, result
);
1390 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1392 tcg_gen_and_tl(temp
, temp
, mask
);
1393 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1394 /* write back result */
1395 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1398 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1401 TCGv temp
= tcg_const_i32(r2
);
1402 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1405 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1407 TCGv temp
= tcg_temp_new_i32();
1408 TCGv result
= tcg_temp_new_i32();
1410 tcg_gen_sub_tl(result
, r1
, r2
);
1412 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1413 tcg_gen_xor_tl(temp
, r1
, r2
);
1414 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1416 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1418 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1419 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1421 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1422 /* write back result */
1423 tcg_gen_mov_tl(ret
, result
);
1427 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1429 TCGv temp
= tcg_temp_new();
1430 TCGv_i64 t0
= tcg_temp_new_i64();
1431 TCGv_i64 t1
= tcg_temp_new_i64();
1432 TCGv_i64 result
= tcg_temp_new_i64();
1434 tcg_gen_sub_i64(result
, r1
, r2
);
1436 tcg_gen_xor_i64(t1
, result
, r1
);
1437 tcg_gen_xor_i64(t0
, r1
, r2
);
1438 tcg_gen_and_i64(t1
, t1
, t0
);
1439 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1441 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1442 /* calc AV/SAV bits */
1443 tcg_gen_extrh_i64_i32(temp
, result
);
1444 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1445 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1447 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1448 /* write back result */
1449 tcg_gen_mov_i64(ret
, result
);
1452 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1454 TCGv result
= tcg_temp_new();
1455 TCGv temp
= tcg_temp_new();
1457 tcg_gen_sub_tl(result
, r1
, r2
);
1459 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1461 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1462 tcg_gen_xor_tl(temp
, r1
, r2
);
1463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1465 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1467 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1468 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1470 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1471 /* write back result */
1472 tcg_gen_mov_tl(ret
, result
);
1475 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1477 TCGv temp
= tcg_temp_new();
1478 tcg_gen_not_tl(temp
, r2
);
1479 gen_addc_CC(ret
, r1
, temp
);
1482 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1485 TCGv temp
= tcg_temp_new();
1486 TCGv temp2
= tcg_temp_new();
1487 TCGv result
= tcg_temp_new();
1488 TCGv mask
= tcg_temp_new();
1489 TCGv t0
= tcg_const_i32(0);
1491 /* create mask for sticky bits */
1492 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1493 tcg_gen_shli_tl(mask
, mask
, 31);
1495 tcg_gen_sub_tl(result
, r1
, r2
);
1497 tcg_gen_xor_tl(temp
, result
, r1
);
1498 tcg_gen_xor_tl(temp2
, r1
, r2
);
1499 tcg_gen_and_tl(temp
, temp
, temp2
);
1500 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1502 tcg_gen_and_tl(temp
, temp
, mask
);
1503 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1505 tcg_gen_add_tl(temp
, result
, result
);
1506 tcg_gen_xor_tl(temp
, temp
, result
);
1507 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1509 tcg_gen_and_tl(temp
, temp
, mask
);
1510 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1511 /* write back result */
1512 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1516 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1517 TCGv r3
, uint32_t n
, uint32_t mode
)
1519 TCGv temp
= tcg_const_i32(n
);
1520 TCGv temp2
= tcg_temp_new();
1521 TCGv_i64 temp64
= tcg_temp_new_i64();
1524 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1527 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1530 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1533 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1536 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1537 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1538 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1542 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1543 TCGv r3
, uint32_t n
, uint32_t mode
)
1545 TCGv temp
= tcg_const_i32(n
);
1546 TCGv temp2
= tcg_temp_new();
1547 TCGv temp3
= tcg_temp_new();
1548 TCGv_i64 temp64
= tcg_temp_new_i64();
1552 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1555 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1558 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1561 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1564 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1565 gen_subs(ret_low
, r1_low
, temp
);
1566 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1567 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1568 gen_subs(ret_high
, r1_high
, temp2
);
1569 /* combine v bits */
1570 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1571 /* combine av bits */
1572 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1576 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1577 TCGv r3
, uint32_t n
, uint32_t mode
)
1579 TCGv temp
= tcg_const_i32(n
);
1580 TCGv_i64 temp64
= tcg_temp_new_i64();
1581 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1582 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1585 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1588 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1591 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1594 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1597 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1598 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1599 /* write back result */
1600 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1604 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1605 TCGv r3
, uint32_t n
, uint32_t mode
)
1607 TCGv temp
= tcg_const_i32(n
);
1608 TCGv_i64 temp64
= tcg_temp_new_i64();
1609 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1612 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1615 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1618 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1621 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1624 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1625 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1626 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1630 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1633 TCGv temp
= tcg_const_i32(n
);
1634 TCGv_i64 temp64
= tcg_temp_new_i64();
1637 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1640 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1643 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1646 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1649 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1653 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1655 TCGv temp
= tcg_temp_new();
1656 TCGv temp2
= tcg_temp_new();
1658 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1659 tcg_gen_shli_tl(temp
, r1
, 16);
1660 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1664 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1665 uint32_t n
, uint32_t mode
)
1667 TCGv temp
= tcg_const_i32(n
);
1668 TCGv_i64 temp64
= tcg_temp_new_i64();
1671 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1674 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1677 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1680 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1683 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1687 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1689 TCGv temp
= tcg_temp_new();
1690 TCGv temp2
= tcg_temp_new();
1692 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1693 tcg_gen_shli_tl(temp
, r1
, 16);
1694 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1698 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1700 TCGv temp
= tcg_const_i32(n
);
1701 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1705 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1707 TCGv temp
= tcg_const_i32(n
);
1708 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1712 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1715 TCGv temp3
= tcg_temp_new();
1716 TCGv_i64 t1
= tcg_temp_new_i64();
1717 TCGv_i64 t2
= tcg_temp_new_i64();
1718 TCGv_i64 t3
= tcg_temp_new_i64();
1719 TCGv_i64 t4
= tcg_temp_new_i64();
1721 tcg_gen_ext_i32_i64(t2
, arg2
);
1722 tcg_gen_ext_i32_i64(t3
, arg3
);
1724 tcg_gen_mul_i64(t2
, t2
, t3
);
1726 tcg_gen_ext_i32_i64(t1
, arg1
);
1727 /* if we shift part of the fraction out, we need to round up */
1728 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1729 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1730 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1731 tcg_gen_add_i64(t2
, t2
, t4
);
1733 tcg_gen_sub_i64(t3
, t1
, t2
);
1734 tcg_gen_extrl_i64_i32(temp3
, t3
);
1736 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1737 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1738 tcg_gen_or_i64(t1
, t1
, t2
);
1739 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1740 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1742 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1743 /* Calc AV/SAV bits */
1744 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1745 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1747 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1748 /* write back result */
1749 tcg_gen_mov_tl(ret
, temp3
);
1753 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1755 TCGv temp
= tcg_temp_new();
1756 TCGv temp2
= tcg_temp_new();
1758 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1759 } else { /* n is expected to be 1 */
1760 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1761 tcg_gen_shli_tl(temp
, temp
, 1);
1762 /* catch special case r1 = r2 = 0x8000 */
1763 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1764 tcg_gen_sub_tl(temp
, temp
, temp2
);
1766 gen_sub_d(ret
, arg1
, temp
);
1770 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1772 TCGv temp
= tcg_temp_new();
1773 TCGv temp2
= tcg_temp_new();
1775 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1776 } else { /* n is expected to be 1 */
1777 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1778 tcg_gen_shli_tl(temp
, temp
, 1);
1779 /* catch special case r1 = r2 = 0x8000 */
1780 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1781 tcg_gen_sub_tl(temp
, temp
, temp2
);
1783 gen_subs(ret
, arg1
, temp
);
1787 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1788 TCGv arg3
, uint32_t n
)
1790 TCGv temp
= tcg_temp_new();
1791 TCGv temp2
= tcg_temp_new();
1792 TCGv_i64 t1
= tcg_temp_new_i64();
1793 TCGv_i64 t2
= tcg_temp_new_i64();
1794 TCGv_i64 t3
= tcg_temp_new_i64();
1797 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1798 } else { /* n is expected to be 1 */
1799 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1800 tcg_gen_shli_tl(temp
, temp
, 1);
1801 /* catch special case r1 = r2 = 0x8000 */
1802 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1803 tcg_gen_sub_tl(temp
, temp
, temp2
);
1805 tcg_gen_ext_i32_i64(t2
, temp
);
1806 tcg_gen_shli_i64(t2
, t2
, 16);
1807 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1808 gen_sub64_d(t3
, t1
, t2
);
1809 /* write back result */
1810 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1814 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1815 TCGv arg3
, uint32_t n
)
1817 TCGv temp
= tcg_temp_new();
1818 TCGv temp2
= tcg_temp_new();
1819 TCGv_i64 t1
= tcg_temp_new_i64();
1820 TCGv_i64 t2
= tcg_temp_new_i64();
1823 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1824 } else { /* n is expected to be 1 */
1825 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1826 tcg_gen_shli_tl(temp
, temp
, 1);
1827 /* catch special case r1 = r2 = 0x8000 */
1828 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1829 tcg_gen_sub_tl(temp
, temp
, temp2
);
1831 tcg_gen_ext_i32_i64(t2
, temp
);
1832 tcg_gen_shli_i64(t2
, t2
, 16);
1833 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1835 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1836 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1840 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1841 TCGv arg3
, uint32_t n
)
1843 TCGv_i64 t1
= tcg_temp_new_i64();
1844 TCGv_i64 t2
= tcg_temp_new_i64();
1845 TCGv_i64 t3
= tcg_temp_new_i64();
1846 TCGv_i64 t4
= tcg_temp_new_i64();
1849 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1850 tcg_gen_ext_i32_i64(t2
, arg2
);
1851 tcg_gen_ext_i32_i64(t3
, arg3
);
1853 tcg_gen_mul_i64(t2
, t2
, t3
);
1855 tcg_gen_shli_i64(t2
, t2
, 1);
1857 tcg_gen_sub_i64(t4
, t1
, t2
);
1859 tcg_gen_xor_i64(t3
, t4
, t1
);
1860 tcg_gen_xor_i64(t2
, t1
, t2
);
1861 tcg_gen_and_i64(t3
, t3
, t2
);
1862 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1863 /* We produce an overflow on the host if the mul before was
1864 (0x80000000 * 0x80000000) << 1). If this is the
1865 case, we negate the ovf. */
1867 temp
= tcg_temp_new();
1868 temp2
= tcg_temp_new();
1869 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1870 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1871 tcg_gen_and_tl(temp
, temp
, temp2
);
1872 tcg_gen_shli_tl(temp
, temp
, 31);
1873 /* negate v bit, if special condition */
1874 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1876 /* write back result */
1877 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1879 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1880 /* Calc AV/SAV bits */
1881 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1882 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1884 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1888 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1891 TCGv_i64 t1
= tcg_temp_new_i64();
1892 TCGv_i64 t2
= tcg_temp_new_i64();
1893 TCGv_i64 t3
= tcg_temp_new_i64();
1894 TCGv_i64 t4
= tcg_temp_new_i64();
1896 tcg_gen_ext_i32_i64(t1
, arg1
);
1897 tcg_gen_ext_i32_i64(t2
, arg2
);
1898 tcg_gen_ext_i32_i64(t3
, arg3
);
1900 tcg_gen_mul_i64(t2
, t2
, t3
);
1901 /* if we shift part of the fraction out, we need to round up */
1902 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1903 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1904 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1905 tcg_gen_add_i64(t3
, t3
, t4
);
1907 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1911 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1912 TCGv arg3
, uint32_t n
)
1914 TCGv_i64 r1
= tcg_temp_new_i64();
1915 TCGv temp
= tcg_const_i32(n
);
1917 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1918 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1919 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1923 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1924 TCGv r3
, uint32_t n
, uint32_t mode
)
1926 TCGv temp
= tcg_const_i32(n
);
1927 TCGv temp2
= tcg_temp_new();
1928 TCGv_i64 temp64
= tcg_temp_new_i64();
1931 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1934 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1937 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1940 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1943 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1944 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1945 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1949 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1950 TCGv r3
, uint32_t n
, uint32_t mode
)
1952 TCGv temp
= tcg_const_i32(n
);
1953 TCGv_i64 temp64
= tcg_temp_new_i64();
1954 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1955 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1958 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1961 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1964 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1967 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1970 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1971 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1972 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1973 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1974 tcg_gen_shli_i64(temp64
, temp64
, 16);
1976 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1977 /* write back result */
1978 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1982 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1984 TCGv temp
= tcg_const_i32(n
);
1985 TCGv temp2
= tcg_temp_new();
1986 TCGv_i64 temp64
= tcg_temp_new_i64();
1989 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1992 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1995 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1998 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2001 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2002 tcg_gen_shli_tl(temp
, r1
, 16);
2003 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2007 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2008 TCGv r3
, uint32_t n
, uint32_t mode
)
2010 TCGv temp
= tcg_const_i32(n
);
2011 TCGv temp2
= tcg_temp_new();
2012 TCGv temp3
= tcg_temp_new();
2013 TCGv_i64 temp64
= tcg_temp_new_i64();
2017 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2020 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2023 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2026 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2029 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2030 gen_adds(ret_low
, r1_low
, temp
);
2031 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2032 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2033 gen_subs(ret_high
, r1_high
, temp2
);
2034 /* combine v bits */
2035 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2036 /* combine av bits */
2037 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2041 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2042 TCGv r3
, uint32_t n
, uint32_t mode
)
2044 TCGv temp
= tcg_const_i32(n
);
2045 TCGv_i64 temp64
= tcg_temp_new_i64();
2046 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2050 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2053 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2056 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2059 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2062 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2063 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2064 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2065 tcg_gen_shli_i64(temp64
, temp64
, 16);
2066 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2068 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2069 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2073 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2075 TCGv temp
= tcg_const_i32(n
);
2076 TCGv temp2
= tcg_temp_new();
2077 TCGv_i64 temp64
= tcg_temp_new_i64();
2080 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2083 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2086 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2089 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2092 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2093 tcg_gen_shli_tl(temp
, r1
, 16);
2094 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2097 static inline void gen_abs(TCGv ret
, TCGv r1
)
2099 tcg_gen_abs_tl(ret
, r1
);
2100 /* overflow can only happen, if r1 = 0x80000000 */
2101 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2102 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2104 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2106 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2107 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2109 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2112 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2114 TCGv temp
= tcg_temp_new_i32();
2115 TCGv result
= tcg_temp_new_i32();
2117 tcg_gen_sub_tl(result
, r1
, r2
);
2118 tcg_gen_sub_tl(temp
, r2
, r1
);
2119 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2122 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2123 tcg_gen_xor_tl(temp
, result
, r2
);
2124 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2125 tcg_gen_xor_tl(temp
, r1
, r2
);
2126 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2128 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2130 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2131 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2133 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2134 /* write back result */
2135 tcg_gen_mov_tl(ret
, result
);
2138 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2140 TCGv temp
= tcg_const_i32(con
);
2141 gen_absdif(ret
, r1
, temp
);
2144 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2146 TCGv temp
= tcg_const_i32(con
);
2147 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2150 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2152 TCGv high
= tcg_temp_new();
2153 TCGv low
= tcg_temp_new();
2155 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2156 tcg_gen_mov_tl(ret
, low
);
2158 tcg_gen_sari_tl(low
, low
, 31);
2159 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2160 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2162 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2164 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2165 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2167 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2170 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2172 TCGv temp
= tcg_const_i32(con
);
2173 gen_mul_i32s(ret
, r1
, temp
);
2176 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2178 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2180 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2182 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2184 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2185 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2187 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2190 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2193 TCGv temp
= tcg_const_i32(con
);
2194 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2197 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2199 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2201 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2203 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2205 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2206 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2208 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2211 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2214 TCGv temp
= tcg_const_i32(con
);
2215 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2218 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2220 TCGv temp
= tcg_const_i32(con
);
2221 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2224 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2226 TCGv temp
= tcg_const_i32(con
);
2227 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2229 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2230 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2232 TCGv temp
= tcg_const_i32(con
);
2233 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2236 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2238 TCGv temp
= tcg_const_i32(con
);
2239 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2243 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2245 TCGv_i64 temp_64
= tcg_temp_new_i64();
2246 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2249 if (up_shift
== 32) {
2250 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2251 } else if (up_shift
== 16) {
2252 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2253 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2255 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2256 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2257 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2259 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2262 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2263 } else { /* n is expected to be 1 */
2264 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2265 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2267 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2269 if (up_shift
== 0) {
2270 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2272 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2274 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2275 /* overflow only occurs if r1 = r2 = 0x8000 */
2276 if (up_shift
== 0) {/* result is 64 bit */
2277 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2279 } else { /* result is 32 bit */
2280 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2283 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2284 /* calc sv overflow bit */
2285 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2287 /* calc av overflow bit */
2288 if (up_shift
== 0) {
2289 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2290 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2292 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2293 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2295 /* calc sav overflow bit */
2296 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2300 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2302 TCGv temp
= tcg_temp_new();
2304 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2305 } else { /* n is expected to be 1 */
2306 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2307 tcg_gen_shli_tl(ret
, ret
, 1);
2308 /* catch special case r1 = r2 = 0x8000 */
2309 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2310 tcg_gen_sub_tl(ret
, ret
, temp
);
2313 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2314 /* calc av overflow bit */
2315 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2316 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2317 /* calc sav overflow bit */
2318 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2321 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2323 TCGv temp
= tcg_temp_new();
2325 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2326 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2328 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2329 tcg_gen_shli_tl(ret
, ret
, 1);
2330 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2331 /* catch special case r1 = r2 = 0x8000 */
2332 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2333 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2334 tcg_gen_sub_tl(ret
, ret
, temp
);
2337 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2338 /* calc av overflow bit */
2339 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2340 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2341 /* calc sav overflow bit */
2342 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2343 /* cut halfword off */
2344 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2348 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2351 TCGv_i64 temp64
= tcg_temp_new_i64();
2352 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2353 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2354 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2358 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2361 TCGv temp
= tcg_const_i32(con
);
2362 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2366 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2369 TCGv_i64 temp64
= tcg_temp_new_i64();
2370 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2371 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2372 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2376 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2379 TCGv temp
= tcg_const_i32(con
);
2380 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2383 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2385 TCGv temp
= tcg_const_i32(con
);
2386 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2389 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2391 TCGv temp
= tcg_const_i32(con
);
2392 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2396 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2399 TCGv_i64 temp64
= tcg_temp_new_i64();
2400 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2401 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2402 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2406 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2409 TCGv temp
= tcg_const_i32(con
);
2410 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2414 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2417 TCGv_i64 temp64
= tcg_temp_new_i64();
2418 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2419 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2420 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2424 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2427 TCGv temp
= tcg_const_i32(con
);
2428 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2431 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2433 TCGv sat_neg
= tcg_const_i32(low
);
2434 TCGv temp
= tcg_const_i32(up
);
2436 /* sat_neg = (arg < low ) ? low : arg; */
2437 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2439 /* ret = (sat_neg > up ) ? up : sat_neg; */
2440 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2443 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2445 TCGv temp
= tcg_const_i32(up
);
2446 /* sat_neg = (arg > up ) ? up : arg; */
2447 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2450 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2452 if (shift_count
== -32) {
2453 tcg_gen_movi_tl(ret
, 0);
2454 } else if (shift_count
>= 0) {
2455 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2457 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2461 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2463 TCGv temp_low
, temp_high
;
2465 if (shiftcount
== -16) {
2466 tcg_gen_movi_tl(ret
, 0);
2468 temp_high
= tcg_temp_new();
2469 temp_low
= tcg_temp_new();
2471 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2472 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2473 gen_shi(temp_low
, temp_low
, shiftcount
);
2474 gen_shi(ret
, temp_high
, shiftcount
);
2475 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2479 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2481 uint32_t msk
, msk_start
;
2482 TCGv temp
= tcg_temp_new();
2483 TCGv temp2
= tcg_temp_new();
2485 if (shift_count
== 0) {
2486 /* Clear PSW.C and PSW.V */
2487 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2488 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2489 tcg_gen_mov_tl(ret
, r1
);
2490 } else if (shift_count
== -32) {
2492 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2493 /* fill ret completely with sign bit */
2494 tcg_gen_sari_tl(ret
, r1
, 31);
2496 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2497 } else if (shift_count
> 0) {
2498 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2499 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2502 msk_start
= 32 - shift_count
;
2503 msk
= ((1 << shift_count
) - 1) << msk_start
;
2504 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2505 /* calc v/sv bits */
2506 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2507 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2508 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2513 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2516 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2518 msk
= (1 << -shift_count
) - 1;
2519 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2521 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2523 /* calc av overflow bit */
2524 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2525 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2526 /* calc sav overflow bit */
2527 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2530 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2532 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2535 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2537 TCGv temp
= tcg_const_i32(con
);
2538 gen_shas(ret
, r1
, temp
);
2541 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2545 if (shift_count
== 0) {
2546 tcg_gen_mov_tl(ret
, r1
);
2547 } else if (shift_count
> 0) {
2548 low
= tcg_temp_new();
2549 high
= tcg_temp_new();
2551 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2552 tcg_gen_shli_tl(low
, r1
, shift_count
);
2553 tcg_gen_shli_tl(ret
, high
, shift_count
);
2554 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2556 low
= tcg_temp_new();
2557 high
= tcg_temp_new();
2559 tcg_gen_ext16s_tl(low
, r1
);
2560 tcg_gen_sari_tl(low
, low
, -shift_count
);
2561 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2562 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2566 /* ret = {ret[30:0], (r1 cond r2)}; */
2567 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2569 TCGv temp
= tcg_temp_new();
2570 TCGv temp2
= tcg_temp_new();
2572 tcg_gen_shli_tl(temp
, ret
, 1);
2573 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2574 tcg_gen_or_tl(ret
, temp
, temp2
);
2577 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2579 TCGv temp
= tcg_const_i32(con
);
2580 gen_sh_cond(cond
, ret
, r1
, temp
);
2583 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2585 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2588 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2590 TCGv temp
= tcg_const_i32(con
);
2591 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2594 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2596 TCGv temp
= tcg_const_i32(con
);
2597 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2600 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2602 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2605 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2607 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2610 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2612 void(*op1
)(TCGv
, TCGv
, TCGv
),
2613 void(*op2
)(TCGv
, TCGv
, TCGv
))
2617 temp1
= tcg_temp_new();
2618 temp2
= tcg_temp_new();
2620 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2621 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2623 (*op1
)(temp1
, temp1
, temp2
);
2624 (*op2
)(temp1
, ret
, temp1
);
2626 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2629 /* ret = r1[pos1] op1 r2[pos2]; */
2630 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2632 void(*op1
)(TCGv
, TCGv
, TCGv
))
2636 temp1
= tcg_temp_new();
2637 temp2
= tcg_temp_new();
2639 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2640 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2642 (*op1
)(ret
, temp1
, temp2
);
2644 tcg_gen_andi_tl(ret
, ret
, 0x1);
2647 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2648 void(*op
)(TCGv
, TCGv
, TCGv
))
2650 TCGv temp
= tcg_temp_new();
2651 TCGv temp2
= tcg_temp_new();
2652 /* temp = (arg1 cond arg2 )*/
2653 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2655 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2656 /* temp = temp insn temp2 */
2657 (*op
)(temp
, temp
, temp2
);
2658 /* ret = {ret[31:1], temp} */
2659 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2663 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2664 void(*op
)(TCGv
, TCGv
, TCGv
))
2666 TCGv temp
= tcg_const_i32(con
);
2667 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2670 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2671 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2673 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2674 tcg_gen_neg_tl(ret
, ret
);
2677 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2679 TCGv b0
= tcg_temp_new();
2680 TCGv b1
= tcg_temp_new();
2681 TCGv b2
= tcg_temp_new();
2682 TCGv b3
= tcg_temp_new();
2685 tcg_gen_andi_tl(b0
, r1
, 0xff);
2686 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2689 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2690 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2693 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2694 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2697 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2698 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2701 tcg_gen_or_tl(ret
, b0
, b1
);
2702 tcg_gen_or_tl(ret
, ret
, b2
);
2703 tcg_gen_or_tl(ret
, ret
, b3
);
2706 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2708 TCGv h0
= tcg_temp_new();
2709 TCGv h1
= tcg_temp_new();
2712 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2713 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2716 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2717 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2720 tcg_gen_or_tl(ret
, h0
, h1
);
2723 /* mask = ((1 << width) -1) << pos;
2724 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2725 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2727 TCGv mask
= tcg_temp_new();
2728 TCGv temp
= tcg_temp_new();
2729 TCGv temp2
= tcg_temp_new();
2731 tcg_gen_movi_tl(mask
, 1);
2732 tcg_gen_shl_tl(mask
, mask
, width
);
2733 tcg_gen_subi_tl(mask
, mask
, 1);
2734 tcg_gen_shl_tl(mask
, mask
, pos
);
2736 tcg_gen_shl_tl(temp
, r2
, pos
);
2737 tcg_gen_and_tl(temp
, temp
, mask
);
2738 tcg_gen_andc_tl(temp2
, r1
, mask
);
2739 tcg_gen_or_tl(ret
, temp
, temp2
);
2742 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2744 TCGv_i64 temp
= tcg_temp_new_i64();
2746 gen_helper_bsplit(temp
, r1
);
2747 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2750 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2752 TCGv_i64 temp
= tcg_temp_new_i64();
2754 gen_helper_unpack(temp
, r1
);
2755 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2759 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2761 TCGv_i64 ret
= tcg_temp_new_i64();
2763 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2764 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2766 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2768 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2772 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2774 TCGv_i64 ret
= tcg_temp_new_i64();
2776 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2777 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2779 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2781 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2784 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2786 TCGv temp
= tcg_temp_new();
2788 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2789 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2790 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2791 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2792 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2794 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2795 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2798 static void gen_calc_usb_mulr_h(TCGv arg
)
2800 TCGv temp
= tcg_temp_new();
2802 tcg_gen_add_tl(temp
, arg
, arg
);
2803 tcg_gen_xor_tl(temp
, temp
, arg
);
2804 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2805 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2807 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2809 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2812 /* helpers for generating program flow micro-ops */
2814 static inline void gen_save_pc(target_ulong pc
)
2816 tcg_gen_movi_tl(cpu_PC
, pc
);
2819 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2821 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2824 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2827 tcg_gen_lookup_and_goto_ptr();
2831 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2833 TCGv_i32 classtemp
= tcg_const_i32(class);
2834 TCGv_i32 tintemp
= tcg_const_i32(tin
);
2836 gen_save_pc(ctx
->base
.pc_next
);
2837 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2838 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2841 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2842 TCGv r2
, int16_t address
)
2844 TCGLabel
*jumpLabel
= gen_new_label();
2845 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2847 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2849 gen_set_label(jumpLabel
);
2850 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2853 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2854 int r2
, int16_t address
)
2856 TCGv temp
= tcg_const_i32(r2
);
2857 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2860 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2862 TCGLabel
*l1
= gen_new_label();
2864 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2865 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2866 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2868 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2871 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2873 TCGv temp
= tcg_temp_new();
2875 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2876 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2877 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2878 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2881 static void gen_fret(DisasContext
*ctx
)
2883 TCGv temp
= tcg_temp_new();
2885 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2886 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2887 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2888 tcg_gen_mov_tl(cpu_PC
, temp
);
2889 tcg_gen_exit_tb(NULL
, 0);
2890 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2893 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2894 int r2
, int32_t constant
, int32_t offset
)
2900 /* SB-format jumps */
2903 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2905 case OPC1_32_B_CALL
:
2906 case OPC1_16_SB_CALL
:
2907 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2908 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2911 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2913 case OPC1_16_SB_JNZ
:
2914 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2916 /* SBC-format jumps */
2917 case OPC1_16_SBC_JEQ
:
2918 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2920 case OPC1_16_SBC_JEQ2
:
2921 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2924 case OPC1_16_SBC_JNE
:
2925 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2927 case OPC1_16_SBC_JNE2
:
2928 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2929 constant
, offset
+ 16);
2931 /* SBRN-format jumps */
2932 case OPC1_16_SBRN_JZ_T
:
2933 temp
= tcg_temp_new();
2934 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2935 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2937 case OPC1_16_SBRN_JNZ_T
:
2938 temp
= tcg_temp_new();
2939 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2940 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2942 /* SBR-format jumps */
2943 case OPC1_16_SBR_JEQ
:
2944 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2947 case OPC1_16_SBR_JEQ2
:
2948 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2951 case OPC1_16_SBR_JNE
:
2952 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2955 case OPC1_16_SBR_JNE2
:
2956 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2959 case OPC1_16_SBR_JNZ
:
2960 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2962 case OPC1_16_SBR_JNZ_A
:
2963 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2965 case OPC1_16_SBR_JGEZ
:
2966 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2968 case OPC1_16_SBR_JGTZ
:
2969 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2971 case OPC1_16_SBR_JLEZ
:
2972 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2974 case OPC1_16_SBR_JLTZ
:
2975 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2977 case OPC1_16_SBR_JZ
:
2978 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2980 case OPC1_16_SBR_JZ_A
:
2981 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2983 case OPC1_16_SBR_LOOP
:
2984 gen_loop(ctx
, r1
, offset
* 2 - 32);
2986 /* SR-format jumps */
2988 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2989 tcg_gen_exit_tb(NULL
, 0);
2991 case OPC2_32_SYS_RET
:
2992 case OPC2_16_SR_RET
:
2993 gen_helper_ret(cpu_env
);
2994 tcg_gen_exit_tb(NULL
, 0);
2997 case OPC1_32_B_CALLA
:
2998 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2999 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3001 case OPC1_32_B_FCALL
:
3002 gen_fcall_save_ctx(ctx
);
3003 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3005 case OPC1_32_B_FCALLA
:
3006 gen_fcall_save_ctx(ctx
);
3007 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3010 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3013 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3016 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3017 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3020 case OPCM_32_BRC_EQ_NEQ
:
3021 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3022 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3024 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3027 case OPCM_32_BRC_GE
:
3028 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3029 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3031 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3032 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3036 case OPCM_32_BRC_JLT
:
3037 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3038 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3040 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3041 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3045 case OPCM_32_BRC_JNE
:
3046 temp
= tcg_temp_new();
3047 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3048 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3049 /* subi is unconditional */
3050 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3051 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3053 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3054 /* addi is unconditional */
3055 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3056 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3060 case OPCM_32_BRN_JTT
:
3061 n
= MASK_OP_BRN_N(ctx
->opcode
);
3063 temp
= tcg_temp_new();
3064 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3066 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3067 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3069 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3073 case OPCM_32_BRR_EQ_NEQ
:
3074 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3075 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3078 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3082 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3083 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3084 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3087 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3091 case OPCM_32_BRR_GE
:
3092 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3093 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3096 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3100 case OPCM_32_BRR_JLT
:
3101 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3102 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3105 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3109 case OPCM_32_BRR_LOOP
:
3110 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3111 gen_loop(ctx
, r2
, offset
* 2);
3113 /* OPC2_32_BRR_LOOPU */
3114 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3117 case OPCM_32_BRR_JNE
:
3118 temp
= tcg_temp_new();
3119 temp2
= tcg_temp_new();
3120 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3121 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3122 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3123 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3124 /* subi is unconditional */
3125 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3126 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3128 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3129 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3130 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3131 /* addi is unconditional */
3132 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3133 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3136 case OPCM_32_BRR_JNZ
:
3137 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3138 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3140 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3144 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3146 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3151 * Functions for decoding instructions
3154 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3160 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3161 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3164 case OPC1_16_SRC_ADD
:
3165 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3167 case OPC1_16_SRC_ADD_A15
:
3168 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3170 case OPC1_16_SRC_ADD_15A
:
3171 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3173 case OPC1_16_SRC_ADD_A
:
3174 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3176 case OPC1_16_SRC_CADD
:
3177 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3180 case OPC1_16_SRC_CADDN
:
3181 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3184 case OPC1_16_SRC_CMOV
:
3185 temp
= tcg_const_tl(0);
3186 temp2
= tcg_const_tl(const4
);
3187 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3188 temp2
, cpu_gpr_d
[r1
]);
3190 case OPC1_16_SRC_CMOVN
:
3191 temp
= tcg_const_tl(0);
3192 temp2
= tcg_const_tl(const4
);
3193 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3194 temp2
, cpu_gpr_d
[r1
]);
3196 case OPC1_16_SRC_EQ
:
3197 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3200 case OPC1_16_SRC_LT
:
3201 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3204 case OPC1_16_SRC_MOV
:
3205 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3207 case OPC1_16_SRC_MOV_A
:
3208 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3209 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3211 case OPC1_16_SRC_MOV_E
:
3212 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3213 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3214 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3216 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3219 case OPC1_16_SRC_SH
:
3220 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3222 case OPC1_16_SRC_SHA
:
3223 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3226 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3230 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3235 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3236 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3239 case OPC1_16_SRR_ADD
:
3240 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3242 case OPC1_16_SRR_ADD_A15
:
3243 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3245 case OPC1_16_SRR_ADD_15A
:
3246 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3248 case OPC1_16_SRR_ADD_A
:
3249 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3251 case OPC1_16_SRR_ADDS
:
3252 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3254 case OPC1_16_SRR_AND
:
3255 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3257 case OPC1_16_SRR_CMOV
:
3258 temp
= tcg_const_tl(0);
3259 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3260 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3262 case OPC1_16_SRR_CMOVN
:
3263 temp
= tcg_const_tl(0);
3264 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3265 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3267 case OPC1_16_SRR_EQ
:
3268 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3271 case OPC1_16_SRR_LT
:
3272 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3275 case OPC1_16_SRR_MOV
:
3276 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3278 case OPC1_16_SRR_MOV_A
:
3279 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3281 case OPC1_16_SRR_MOV_AA
:
3282 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3284 case OPC1_16_SRR_MOV_D
:
3285 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3287 case OPC1_16_SRR_MUL
:
3288 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3290 case OPC1_16_SRR_OR
:
3291 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3293 case OPC1_16_SRR_SUB
:
3294 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3296 case OPC1_16_SRR_SUB_A15B
:
3297 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3299 case OPC1_16_SRR_SUB_15AB
:
3300 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3302 case OPC1_16_SRR_SUBS
:
3303 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3305 case OPC1_16_SRR_XOR
:
3306 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3309 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3313 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3317 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3318 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3321 case OPC1_16_SSR_ST_A
:
3322 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3324 case OPC1_16_SSR_ST_A_POSTINC
:
3325 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3326 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3328 case OPC1_16_SSR_ST_B
:
3329 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3331 case OPC1_16_SSR_ST_B_POSTINC
:
3332 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3333 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3335 case OPC1_16_SSR_ST_H
:
3336 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3338 case OPC1_16_SSR_ST_H_POSTINC
:
3339 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3340 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3342 case OPC1_16_SSR_ST_W
:
3343 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3345 case OPC1_16_SSR_ST_W_POSTINC
:
3346 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3347 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3350 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3354 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3358 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3361 case OPC1_16_SC_AND
:
3362 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3364 case OPC1_16_SC_BISR
:
3365 gen_helper_1arg(bisr
, const16
& 0xff);
3367 case OPC1_16_SC_LD_A
:
3368 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3370 case OPC1_16_SC_LD_W
:
3371 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3373 case OPC1_16_SC_MOV
:
3374 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3377 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3379 case OPC1_16_SC_ST_A
:
3380 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3382 case OPC1_16_SC_ST_W
:
3383 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3385 case OPC1_16_SC_SUB_A
:
3386 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3389 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3393 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3397 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3398 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3402 case OPC1_16_SLR_LD_A
:
3403 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3405 case OPC1_16_SLR_LD_A_POSTINC
:
3406 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3407 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3409 case OPC1_16_SLR_LD_BU
:
3410 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3412 case OPC1_16_SLR_LD_BU_POSTINC
:
3413 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3414 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3416 case OPC1_16_SLR_LD_H
:
3417 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3419 case OPC1_16_SLR_LD_H_POSTINC
:
3420 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3421 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3423 case OPC1_16_SLR_LD_W
:
3424 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3426 case OPC1_16_SLR_LD_W_POSTINC
:
3427 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3428 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3431 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3435 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3440 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3441 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3445 case OPC1_16_SRO_LD_A
:
3446 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3448 case OPC1_16_SRO_LD_BU
:
3449 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3451 case OPC1_16_SRO_LD_H
:
3452 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3454 case OPC1_16_SRO_LD_W
:
3455 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3457 case OPC1_16_SRO_ST_A
:
3458 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3460 case OPC1_16_SRO_ST_B
:
3461 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3463 case OPC1_16_SRO_ST_H
:
3464 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3466 case OPC1_16_SRO_ST_W
:
3467 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3470 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3474 static void decode_sr_system(DisasContext
*ctx
)
3477 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3480 case OPC2_16_SR_NOP
:
3482 case OPC2_16_SR_RET
:
3483 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3485 case OPC2_16_SR_RFE
:
3486 gen_helper_rfe(cpu_env
);
3487 tcg_gen_exit_tb(NULL
, 0);
3488 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3490 case OPC2_16_SR_DEBUG
:
3491 /* raise EXCP_DEBUG */
3493 case OPC2_16_SR_FRET
:
3497 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3501 static void decode_sr_accu(DisasContext
*ctx
)
3507 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3508 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3511 case OPC2_16_SR_RSUB
:
3512 /* overflow only if r1 = -0x80000000 */
3513 temp
= tcg_const_i32(-0x80000000);
3515 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3516 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3518 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3520 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3522 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3523 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3525 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3527 case OPC2_16_SR_SAT_B
:
3528 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3530 case OPC2_16_SR_SAT_BU
:
3531 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3533 case OPC2_16_SR_SAT_H
:
3534 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3536 case OPC2_16_SR_SAT_HU
:
3537 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3540 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3544 static void decode_16Bit_opc(DisasContext
*ctx
)
3552 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3554 /* handle ADDSC.A opcode only being 6 bit long */
3555 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3556 op1
= OPC1_16_SRRS_ADDSC_A
;
3560 case OPC1_16_SRC_ADD
:
3561 case OPC1_16_SRC_ADD_A15
:
3562 case OPC1_16_SRC_ADD_15A
:
3563 case OPC1_16_SRC_ADD_A
:
3564 case OPC1_16_SRC_CADD
:
3565 case OPC1_16_SRC_CADDN
:
3566 case OPC1_16_SRC_CMOV
:
3567 case OPC1_16_SRC_CMOVN
:
3568 case OPC1_16_SRC_EQ
:
3569 case OPC1_16_SRC_LT
:
3570 case OPC1_16_SRC_MOV
:
3571 case OPC1_16_SRC_MOV_A
:
3572 case OPC1_16_SRC_MOV_E
:
3573 case OPC1_16_SRC_SH
:
3574 case OPC1_16_SRC_SHA
:
3575 decode_src_opc(ctx
, op1
);
3578 case OPC1_16_SRR_ADD
:
3579 case OPC1_16_SRR_ADD_A15
:
3580 case OPC1_16_SRR_ADD_15A
:
3581 case OPC1_16_SRR_ADD_A
:
3582 case OPC1_16_SRR_ADDS
:
3583 case OPC1_16_SRR_AND
:
3584 case OPC1_16_SRR_CMOV
:
3585 case OPC1_16_SRR_CMOVN
:
3586 case OPC1_16_SRR_EQ
:
3587 case OPC1_16_SRR_LT
:
3588 case OPC1_16_SRR_MOV
:
3589 case OPC1_16_SRR_MOV_A
:
3590 case OPC1_16_SRR_MOV_AA
:
3591 case OPC1_16_SRR_MOV_D
:
3592 case OPC1_16_SRR_MUL
:
3593 case OPC1_16_SRR_OR
:
3594 case OPC1_16_SRR_SUB
:
3595 case OPC1_16_SRR_SUB_A15B
:
3596 case OPC1_16_SRR_SUB_15AB
:
3597 case OPC1_16_SRR_SUBS
:
3598 case OPC1_16_SRR_XOR
:
3599 decode_srr_opc(ctx
, op1
);
3602 case OPC1_16_SSR_ST_A
:
3603 case OPC1_16_SSR_ST_A_POSTINC
:
3604 case OPC1_16_SSR_ST_B
:
3605 case OPC1_16_SSR_ST_B_POSTINC
:
3606 case OPC1_16_SSR_ST_H
:
3607 case OPC1_16_SSR_ST_H_POSTINC
:
3608 case OPC1_16_SSR_ST_W
:
3609 case OPC1_16_SSR_ST_W_POSTINC
:
3610 decode_ssr_opc(ctx
, op1
);
3613 case OPC1_16_SRRS_ADDSC_A
:
3614 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3615 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3616 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3617 temp
= tcg_temp_new();
3618 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3619 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3622 case OPC1_16_SLRO_LD_A
:
3623 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3624 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3625 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3627 case OPC1_16_SLRO_LD_BU
:
3628 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3629 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3630 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3632 case OPC1_16_SLRO_LD_H
:
3633 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3634 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3635 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3637 case OPC1_16_SLRO_LD_W
:
3638 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3639 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3640 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3643 case OPC1_16_SB_CALL
:
3645 case OPC1_16_SB_JNZ
:
3647 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3648 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3651 case OPC1_16_SBC_JEQ
:
3652 case OPC1_16_SBC_JNE
:
3653 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3654 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3655 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3657 case OPC1_16_SBC_JEQ2
:
3658 case OPC1_16_SBC_JNE2
:
3659 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3660 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3661 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3662 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3664 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3668 case OPC1_16_SBRN_JNZ_T
:
3669 case OPC1_16_SBRN_JZ_T
:
3670 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3671 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3672 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3675 case OPC1_16_SBR_JEQ2
:
3676 case OPC1_16_SBR_JNE2
:
3677 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3678 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3679 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3680 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3682 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3685 case OPC1_16_SBR_JEQ
:
3686 case OPC1_16_SBR_JGEZ
:
3687 case OPC1_16_SBR_JGTZ
:
3688 case OPC1_16_SBR_JLEZ
:
3689 case OPC1_16_SBR_JLTZ
:
3690 case OPC1_16_SBR_JNE
:
3691 case OPC1_16_SBR_JNZ
:
3692 case OPC1_16_SBR_JNZ_A
:
3693 case OPC1_16_SBR_JZ
:
3694 case OPC1_16_SBR_JZ_A
:
3695 case OPC1_16_SBR_LOOP
:
3696 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3697 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3698 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3701 case OPC1_16_SC_AND
:
3702 case OPC1_16_SC_BISR
:
3703 case OPC1_16_SC_LD_A
:
3704 case OPC1_16_SC_LD_W
:
3705 case OPC1_16_SC_MOV
:
3707 case OPC1_16_SC_ST_A
:
3708 case OPC1_16_SC_ST_W
:
3709 case OPC1_16_SC_SUB_A
:
3710 decode_sc_opc(ctx
, op1
);
3713 case OPC1_16_SLR_LD_A
:
3714 case OPC1_16_SLR_LD_A_POSTINC
:
3715 case OPC1_16_SLR_LD_BU
:
3716 case OPC1_16_SLR_LD_BU_POSTINC
:
3717 case OPC1_16_SLR_LD_H
:
3718 case OPC1_16_SLR_LD_H_POSTINC
:
3719 case OPC1_16_SLR_LD_W
:
3720 case OPC1_16_SLR_LD_W_POSTINC
:
3721 decode_slr_opc(ctx
, op1
);
3724 case OPC1_16_SRO_LD_A
:
3725 case OPC1_16_SRO_LD_BU
:
3726 case OPC1_16_SRO_LD_H
:
3727 case OPC1_16_SRO_LD_W
:
3728 case OPC1_16_SRO_ST_A
:
3729 case OPC1_16_SRO_ST_B
:
3730 case OPC1_16_SRO_ST_H
:
3731 case OPC1_16_SRO_ST_W
:
3732 decode_sro_opc(ctx
, op1
);
3735 case OPC1_16_SSRO_ST_A
:
3736 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3737 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3738 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3740 case OPC1_16_SSRO_ST_B
:
3741 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3742 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3743 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3745 case OPC1_16_SSRO_ST_H
:
3746 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3747 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3748 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3750 case OPC1_16_SSRO_ST_W
:
3751 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3752 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3753 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3756 case OPCM_16_SR_SYSTEM
:
3757 decode_sr_system(ctx
);
3759 case OPCM_16_SR_ACCU
:
3760 decode_sr_accu(ctx
);
3763 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3764 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3766 case OPC1_16_SR_NOT
:
3767 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3768 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3771 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3776 * 32 bit instructions
3780 static void decode_abs_ldw(DisasContext
*ctx
)
3787 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3788 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3789 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3791 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3794 case OPC2_32_ABS_LD_A
:
3795 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3797 case OPC2_32_ABS_LD_D
:
3799 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3801 case OPC2_32_ABS_LD_DA
:
3803 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3805 case OPC2_32_ABS_LD_W
:
3806 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3809 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3813 static void decode_abs_ldb(DisasContext
*ctx
)
3820 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3821 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3822 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3824 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3827 case OPC2_32_ABS_LD_B
:
3828 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3830 case OPC2_32_ABS_LD_BU
:
3831 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3833 case OPC2_32_ABS_LD_H
:
3834 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3836 case OPC2_32_ABS_LD_HU
:
3837 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3840 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3844 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3851 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3852 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3853 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3855 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3858 case OPC2_32_ABS_LDMST
:
3859 gen_ldmst(ctx
, r1
, temp
);
3861 case OPC2_32_ABS_SWAP_W
:
3862 gen_swap(ctx
, r1
, temp
);
3865 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3869 static void decode_abs_ldst_context(DisasContext
*ctx
)
3874 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3875 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3878 case OPC2_32_ABS_LDLCX
:
3879 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3881 case OPC2_32_ABS_LDUCX
:
3882 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3884 case OPC2_32_ABS_STLCX
:
3885 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3887 case OPC2_32_ABS_STUCX
:
3888 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3891 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3895 static void decode_abs_store(DisasContext
*ctx
)
3902 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3903 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3904 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3906 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3909 case OPC2_32_ABS_ST_A
:
3910 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3912 case OPC2_32_ABS_ST_D
:
3914 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3916 case OPC2_32_ABS_ST_DA
:
3918 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3920 case OPC2_32_ABS_ST_W
:
3921 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3924 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3928 static void decode_abs_storeb_h(DisasContext
*ctx
)
3935 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3936 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3937 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3939 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3942 case OPC2_32_ABS_ST_B
:
3943 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3945 case OPC2_32_ABS_ST_H
:
3946 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3949 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3955 static void decode_bit_andacc(DisasContext
*ctx
)
3961 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3962 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3963 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3964 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3965 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3966 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3970 case OPC2_32_BIT_AND_AND_T
:
3971 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3972 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3974 case OPC2_32_BIT_AND_ANDN_T
:
3975 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3976 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3978 case OPC2_32_BIT_AND_NOR_T
:
3979 if (TCG_TARGET_HAS_andc_i32
) {
3980 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3981 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3983 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3984 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3987 case OPC2_32_BIT_AND_OR_T
:
3988 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3989 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3992 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3996 static void decode_bit_logical_t(DisasContext
*ctx
)
4001 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4002 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4003 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4004 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4005 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4006 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4009 case OPC2_32_BIT_AND_T
:
4010 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4011 pos1
, pos2
, &tcg_gen_and_tl
);
4013 case OPC2_32_BIT_ANDN_T
:
4014 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4015 pos1
, pos2
, &tcg_gen_andc_tl
);
4017 case OPC2_32_BIT_NOR_T
:
4018 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4019 pos1
, pos2
, &tcg_gen_nor_tl
);
4021 case OPC2_32_BIT_OR_T
:
4022 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4023 pos1
, pos2
, &tcg_gen_or_tl
);
4026 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4030 static void decode_bit_insert(DisasContext
*ctx
)
4036 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4037 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4038 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4039 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4040 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4041 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4043 temp
= tcg_temp_new();
4045 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4046 if (op2
== OPC2_32_BIT_INSN_T
) {
4047 tcg_gen_not_tl(temp
, temp
);
4049 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4052 static void decode_bit_logical_t2(DisasContext
*ctx
)
4059 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4060 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4061 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4062 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4063 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4064 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4067 case OPC2_32_BIT_NAND_T
:
4068 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4069 pos1
, pos2
, &tcg_gen_nand_tl
);
4071 case OPC2_32_BIT_ORN_T
:
4072 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4073 pos1
, pos2
, &tcg_gen_orc_tl
);
4075 case OPC2_32_BIT_XNOR_T
:
4076 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4077 pos1
, pos2
, &tcg_gen_eqv_tl
);
4079 case OPC2_32_BIT_XOR_T
:
4080 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4081 pos1
, pos2
, &tcg_gen_xor_tl
);
4084 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4088 static void decode_bit_orand(DisasContext
*ctx
)
4095 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4096 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4097 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4098 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4099 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4100 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4103 case OPC2_32_BIT_OR_AND_T
:
4104 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4105 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4107 case OPC2_32_BIT_OR_ANDN_T
:
4108 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4109 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4111 case OPC2_32_BIT_OR_NOR_T
:
4112 if (TCG_TARGET_HAS_orc_i32
) {
4113 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4114 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4116 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4117 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4120 case OPC2_32_BIT_OR_OR_T
:
4121 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4122 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4125 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4129 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4136 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4137 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4138 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4139 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4140 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4141 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4143 temp
= tcg_temp_new();
4146 case OPC2_32_BIT_SH_AND_T
:
4147 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4148 pos1
, pos2
, &tcg_gen_and_tl
);
4150 case OPC2_32_BIT_SH_ANDN_T
:
4151 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4152 pos1
, pos2
, &tcg_gen_andc_tl
);
4154 case OPC2_32_BIT_SH_NOR_T
:
4155 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4156 pos1
, pos2
, &tcg_gen_nor_tl
);
4158 case OPC2_32_BIT_SH_OR_T
:
4159 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4160 pos1
, pos2
, &tcg_gen_or_tl
);
4163 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4165 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4166 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4169 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4176 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4177 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4178 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4179 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4180 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4181 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4183 temp
= tcg_temp_new();
4186 case OPC2_32_BIT_SH_NAND_T
:
4187 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4188 pos1
, pos2
, &tcg_gen_nand_tl
);
4190 case OPC2_32_BIT_SH_ORN_T
:
4191 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4192 pos1
, pos2
, &tcg_gen_orc_tl
);
4194 case OPC2_32_BIT_SH_XNOR_T
:
4195 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4196 pos1
, pos2
, &tcg_gen_eqv_tl
);
4198 case OPC2_32_BIT_SH_XOR_T
:
4199 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4200 pos1
, pos2
, &tcg_gen_xor_tl
);
4203 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4205 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4206 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4212 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4219 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4220 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4221 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4222 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4225 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4226 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4227 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4228 /* instruction to access the cache */
4230 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4231 case OPC2_32_BO_CACHEA_W_POSTINC
:
4232 case OPC2_32_BO_CACHEA_I_POSTINC
:
4233 /* instruction to access the cache, but we still need to handle
4234 the addressing mode */
4235 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4237 case OPC2_32_BO_CACHEA_WI_PREINC
:
4238 case OPC2_32_BO_CACHEA_W_PREINC
:
4239 case OPC2_32_BO_CACHEA_I_PREINC
:
4240 /* instruction to access the cache, but we still need to handle
4241 the addressing mode */
4242 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4244 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4245 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4246 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4247 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4250 case OPC2_32_BO_CACHEI_W_POSTINC
:
4251 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4252 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4253 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4255 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4258 case OPC2_32_BO_CACHEI_W_PREINC
:
4259 case OPC2_32_BO_CACHEI_WI_PREINC
:
4260 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4261 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4263 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4266 case OPC2_32_BO_ST_A_SHORTOFF
:
4267 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4269 case OPC2_32_BO_ST_A_POSTINC
:
4270 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4272 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4274 case OPC2_32_BO_ST_A_PREINC
:
4275 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4277 case OPC2_32_BO_ST_B_SHORTOFF
:
4278 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4280 case OPC2_32_BO_ST_B_POSTINC
:
4281 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4283 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4285 case OPC2_32_BO_ST_B_PREINC
:
4286 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4288 case OPC2_32_BO_ST_D_SHORTOFF
:
4290 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4293 case OPC2_32_BO_ST_D_POSTINC
:
4295 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4296 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4298 case OPC2_32_BO_ST_D_PREINC
:
4300 temp
= tcg_temp_new();
4301 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4302 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4303 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4305 case OPC2_32_BO_ST_DA_SHORTOFF
:
4307 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4310 case OPC2_32_BO_ST_DA_POSTINC
:
4312 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4313 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4315 case OPC2_32_BO_ST_DA_PREINC
:
4317 temp
= tcg_temp_new();
4318 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4319 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4320 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4322 case OPC2_32_BO_ST_H_SHORTOFF
:
4323 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4325 case OPC2_32_BO_ST_H_POSTINC
:
4326 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4328 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4330 case OPC2_32_BO_ST_H_PREINC
:
4331 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4333 case OPC2_32_BO_ST_Q_SHORTOFF
:
4334 temp
= tcg_temp_new();
4335 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4336 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4338 case OPC2_32_BO_ST_Q_POSTINC
:
4339 temp
= tcg_temp_new();
4340 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4341 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4343 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4345 case OPC2_32_BO_ST_Q_PREINC
:
4346 temp
= tcg_temp_new();
4347 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4348 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4350 case OPC2_32_BO_ST_W_SHORTOFF
:
4351 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4353 case OPC2_32_BO_ST_W_POSTINC
:
4354 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4356 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4358 case OPC2_32_BO_ST_W_PREINC
:
4359 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4362 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4366 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4371 TCGv temp
, temp2
, temp3
;
4373 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4374 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4375 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4376 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4378 temp
= tcg_temp_new();
4379 temp2
= tcg_temp_new();
4380 temp3
= tcg_const_i32(off10
);
4382 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4383 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4386 case OPC2_32_BO_CACHEA_WI_BR
:
4387 case OPC2_32_BO_CACHEA_W_BR
:
4388 case OPC2_32_BO_CACHEA_I_BR
:
4389 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4391 case OPC2_32_BO_CACHEA_WI_CIRC
:
4392 case OPC2_32_BO_CACHEA_W_CIRC
:
4393 case OPC2_32_BO_CACHEA_I_CIRC
:
4394 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4396 case OPC2_32_BO_ST_A_BR
:
4397 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4398 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4400 case OPC2_32_BO_ST_A_CIRC
:
4401 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4402 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4404 case OPC2_32_BO_ST_B_BR
:
4405 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4406 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4408 case OPC2_32_BO_ST_B_CIRC
:
4409 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4410 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4412 case OPC2_32_BO_ST_D_BR
:
4414 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4415 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4417 case OPC2_32_BO_ST_D_CIRC
:
4419 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4420 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4421 tcg_gen_addi_tl(temp
, temp
, 4);
4422 tcg_gen_rem_tl(temp
, temp
, temp2
);
4423 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4424 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4425 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4427 case OPC2_32_BO_ST_DA_BR
:
4429 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4430 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4432 case OPC2_32_BO_ST_DA_CIRC
:
4434 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4435 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4436 tcg_gen_addi_tl(temp
, temp
, 4);
4437 tcg_gen_rem_tl(temp
, temp
, temp2
);
4438 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4439 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4440 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4442 case OPC2_32_BO_ST_H_BR
:
4443 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4444 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4446 case OPC2_32_BO_ST_H_CIRC
:
4447 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4448 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4450 case OPC2_32_BO_ST_Q_BR
:
4451 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4452 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4453 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4455 case OPC2_32_BO_ST_Q_CIRC
:
4456 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4457 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4458 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4460 case OPC2_32_BO_ST_W_BR
:
4461 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4462 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4464 case OPC2_32_BO_ST_W_CIRC
:
4465 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4466 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4469 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4473 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4480 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4481 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4482 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4483 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4486 case OPC2_32_BO_LD_A_SHORTOFF
:
4487 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4489 case OPC2_32_BO_LD_A_POSTINC
:
4490 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4492 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4494 case OPC2_32_BO_LD_A_PREINC
:
4495 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4497 case OPC2_32_BO_LD_B_SHORTOFF
:
4498 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4500 case OPC2_32_BO_LD_B_POSTINC
:
4501 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4503 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4505 case OPC2_32_BO_LD_B_PREINC
:
4506 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4508 case OPC2_32_BO_LD_BU_SHORTOFF
:
4509 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4511 case OPC2_32_BO_LD_BU_POSTINC
:
4512 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4514 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4516 case OPC2_32_BO_LD_BU_PREINC
:
4517 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4519 case OPC2_32_BO_LD_D_SHORTOFF
:
4521 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4524 case OPC2_32_BO_LD_D_POSTINC
:
4526 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4527 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4529 case OPC2_32_BO_LD_D_PREINC
:
4531 temp
= tcg_temp_new();
4532 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4533 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4534 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4536 case OPC2_32_BO_LD_DA_SHORTOFF
:
4538 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4541 case OPC2_32_BO_LD_DA_POSTINC
:
4543 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4544 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4546 case OPC2_32_BO_LD_DA_PREINC
:
4548 temp
= tcg_temp_new();
4549 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4550 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4551 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4553 case OPC2_32_BO_LD_H_SHORTOFF
:
4554 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4556 case OPC2_32_BO_LD_H_POSTINC
:
4557 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4559 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4561 case OPC2_32_BO_LD_H_PREINC
:
4562 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4564 case OPC2_32_BO_LD_HU_SHORTOFF
:
4565 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4567 case OPC2_32_BO_LD_HU_POSTINC
:
4568 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4570 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4572 case OPC2_32_BO_LD_HU_PREINC
:
4573 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4575 case OPC2_32_BO_LD_Q_SHORTOFF
:
4576 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4577 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4579 case OPC2_32_BO_LD_Q_POSTINC
:
4580 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4582 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4583 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4585 case OPC2_32_BO_LD_Q_PREINC
:
4586 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4587 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4589 case OPC2_32_BO_LD_W_SHORTOFF
:
4590 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4592 case OPC2_32_BO_LD_W_POSTINC
:
4593 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4595 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4597 case OPC2_32_BO_LD_W_PREINC
:
4598 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4601 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4605 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4611 TCGv temp
, temp2
, temp3
;
4613 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4614 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4615 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4616 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4618 temp
= tcg_temp_new();
4619 temp2
= tcg_temp_new();
4620 temp3
= tcg_const_i32(off10
);
4622 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4623 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4627 case OPC2_32_BO_LD_A_BR
:
4628 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4629 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4631 case OPC2_32_BO_LD_A_CIRC
:
4632 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4633 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4635 case OPC2_32_BO_LD_B_BR
:
4636 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4637 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4639 case OPC2_32_BO_LD_B_CIRC
:
4640 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4641 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4643 case OPC2_32_BO_LD_BU_BR
:
4644 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4645 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4647 case OPC2_32_BO_LD_BU_CIRC
:
4648 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4649 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4651 case OPC2_32_BO_LD_D_BR
:
4653 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4654 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4656 case OPC2_32_BO_LD_D_CIRC
:
4658 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4659 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4660 tcg_gen_addi_tl(temp
, temp
, 4);
4661 tcg_gen_rem_tl(temp
, temp
, temp2
);
4662 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4663 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4664 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4666 case OPC2_32_BO_LD_DA_BR
:
4668 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4669 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4671 case OPC2_32_BO_LD_DA_CIRC
:
4673 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4674 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4675 tcg_gen_addi_tl(temp
, temp
, 4);
4676 tcg_gen_rem_tl(temp
, temp
, temp2
);
4677 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4678 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4679 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4681 case OPC2_32_BO_LD_H_BR
:
4682 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4683 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4685 case OPC2_32_BO_LD_H_CIRC
:
4686 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4687 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4689 case OPC2_32_BO_LD_HU_BR
:
4690 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4691 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4693 case OPC2_32_BO_LD_HU_CIRC
:
4694 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4695 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4697 case OPC2_32_BO_LD_Q_BR
:
4698 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4699 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4700 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4702 case OPC2_32_BO_LD_Q_CIRC
:
4703 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4704 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4705 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4707 case OPC2_32_BO_LD_W_BR
:
4708 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4709 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4711 case OPC2_32_BO_LD_W_CIRC
:
4712 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4713 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4716 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4720 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4728 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4729 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4730 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4731 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4734 temp
= tcg_temp_new();
4737 case OPC2_32_BO_LDLCX_SHORTOFF
:
4738 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4739 gen_helper_ldlcx(cpu_env
, temp
);
4741 case OPC2_32_BO_LDMST_SHORTOFF
:
4742 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4743 gen_ldmst(ctx
, r1
, temp
);
4745 case OPC2_32_BO_LDMST_POSTINC
:
4746 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4747 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4749 case OPC2_32_BO_LDMST_PREINC
:
4750 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4751 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4753 case OPC2_32_BO_LDUCX_SHORTOFF
:
4754 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4755 gen_helper_lducx(cpu_env
, temp
);
4757 case OPC2_32_BO_LEA_SHORTOFF
:
4758 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4760 case OPC2_32_BO_STLCX_SHORTOFF
:
4761 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4762 gen_helper_stlcx(cpu_env
, temp
);
4764 case OPC2_32_BO_STUCX_SHORTOFF
:
4765 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4766 gen_helper_stucx(cpu_env
, temp
);
4768 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4769 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4770 gen_swap(ctx
, r1
, temp
);
4772 case OPC2_32_BO_SWAP_W_POSTINC
:
4773 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4774 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4776 case OPC2_32_BO_SWAP_W_PREINC
:
4777 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4778 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4780 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4781 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4782 gen_cmpswap(ctx
, r1
, temp
);
4784 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4785 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4786 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4788 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4789 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4790 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4792 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4793 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4794 gen_swapmsk(ctx
, r1
, temp
);
4796 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4797 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4798 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4800 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4801 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4802 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4805 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4809 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4815 TCGv temp
, temp2
, temp3
;
4817 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4818 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4819 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4820 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4822 temp
= tcg_temp_new();
4823 temp2
= tcg_temp_new();
4824 temp3
= tcg_const_i32(off10
);
4826 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4827 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4830 case OPC2_32_BO_LDMST_BR
:
4831 gen_ldmst(ctx
, r1
, temp2
);
4832 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4834 case OPC2_32_BO_LDMST_CIRC
:
4835 gen_ldmst(ctx
, r1
, temp2
);
4836 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4838 case OPC2_32_BO_SWAP_W_BR
:
4839 gen_swap(ctx
, r1
, temp2
);
4840 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4842 case OPC2_32_BO_SWAP_W_CIRC
:
4843 gen_swap(ctx
, r1
, temp2
);
4844 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4846 case OPC2_32_BO_CMPSWAP_W_BR
:
4847 gen_cmpswap(ctx
, r1
, temp2
);
4848 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4850 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4851 gen_cmpswap(ctx
, r1
, temp2
);
4852 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4854 case OPC2_32_BO_SWAPMSK_W_BR
:
4855 gen_swapmsk(ctx
, r1
, temp2
);
4856 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4858 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4859 gen_swapmsk(ctx
, r1
, temp2
);
4860 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4863 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4867 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4873 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4874 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4875 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4878 case OPC1_32_BOL_LD_A_LONGOFF
:
4879 temp
= tcg_temp_new();
4880 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4881 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4883 case OPC1_32_BOL_LD_W_LONGOFF
:
4884 temp
= tcg_temp_new();
4885 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4886 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4888 case OPC1_32_BOL_LEA_LONGOFF
:
4889 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4891 case OPC1_32_BOL_ST_A_LONGOFF
:
4892 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4893 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4895 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4898 case OPC1_32_BOL_ST_W_LONGOFF
:
4899 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4901 case OPC1_32_BOL_LD_B_LONGOFF
:
4902 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4903 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4905 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4908 case OPC1_32_BOL_LD_BU_LONGOFF
:
4909 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4910 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4912 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4915 case OPC1_32_BOL_LD_H_LONGOFF
:
4916 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4917 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4919 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4922 case OPC1_32_BOL_LD_HU_LONGOFF
:
4923 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4924 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4926 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4929 case OPC1_32_BOL_ST_B_LONGOFF
:
4930 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4931 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4933 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4936 case OPC1_32_BOL_ST_H_LONGOFF
:
4937 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4938 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4940 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4944 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4949 static void decode_rc_logical_shift(DisasContext
*ctx
)
4956 r2
= MASK_OP_RC_D(ctx
->opcode
);
4957 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4958 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4959 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4961 temp
= tcg_temp_new();
4964 case OPC2_32_RC_AND
:
4965 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4967 case OPC2_32_RC_ANDN
:
4968 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4970 case OPC2_32_RC_NAND
:
4971 tcg_gen_movi_tl(temp
, const9
);
4972 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4974 case OPC2_32_RC_NOR
:
4975 tcg_gen_movi_tl(temp
, const9
);
4976 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4979 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4981 case OPC2_32_RC_ORN
:
4982 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4985 const9
= sextract32(const9
, 0, 6);
4986 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4988 case OPC2_32_RC_SH_H
:
4989 const9
= sextract32(const9
, 0, 5);
4990 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4992 case OPC2_32_RC_SHA
:
4993 const9
= sextract32(const9
, 0, 6);
4994 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4996 case OPC2_32_RC_SHA_H
:
4997 const9
= sextract32(const9
, 0, 5);
4998 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5000 case OPC2_32_RC_SHAS
:
5001 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5003 case OPC2_32_RC_XNOR
:
5004 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5005 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5007 case OPC2_32_RC_XOR
:
5008 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5011 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5015 static void decode_rc_accumulator(DisasContext
*ctx
)
5023 r2
= MASK_OP_RC_D(ctx
->opcode
);
5024 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5025 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5027 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5029 temp
= tcg_temp_new();
5032 case OPC2_32_RC_ABSDIF
:
5033 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5035 case OPC2_32_RC_ABSDIFS
:
5036 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5038 case OPC2_32_RC_ADD
:
5039 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5041 case OPC2_32_RC_ADDC
:
5042 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5044 case OPC2_32_RC_ADDS
:
5045 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5047 case OPC2_32_RC_ADDS_U
:
5048 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5050 case OPC2_32_RC_ADDX
:
5051 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5053 case OPC2_32_RC_AND_EQ
:
5054 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5055 const9
, &tcg_gen_and_tl
);
5057 case OPC2_32_RC_AND_GE
:
5058 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5059 const9
, &tcg_gen_and_tl
);
5061 case OPC2_32_RC_AND_GE_U
:
5062 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5063 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5064 const9
, &tcg_gen_and_tl
);
5066 case OPC2_32_RC_AND_LT
:
5067 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5068 const9
, &tcg_gen_and_tl
);
5070 case OPC2_32_RC_AND_LT_U
:
5071 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5072 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5073 const9
, &tcg_gen_and_tl
);
5075 case OPC2_32_RC_AND_NE
:
5076 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5077 const9
, &tcg_gen_and_tl
);
5080 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5082 case OPC2_32_RC_EQANY_B
:
5083 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5085 case OPC2_32_RC_EQANY_H
:
5086 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5089 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5091 case OPC2_32_RC_GE_U
:
5092 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5093 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5096 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5098 case OPC2_32_RC_LT_U
:
5099 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5100 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5102 case OPC2_32_RC_MAX
:
5103 tcg_gen_movi_tl(temp
, const9
);
5104 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5105 cpu_gpr_d
[r1
], temp
);
5107 case OPC2_32_RC_MAX_U
:
5108 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5109 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5110 cpu_gpr_d
[r1
], temp
);
5112 case OPC2_32_RC_MIN
:
5113 tcg_gen_movi_tl(temp
, const9
);
5114 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5115 cpu_gpr_d
[r1
], temp
);
5117 case OPC2_32_RC_MIN_U
:
5118 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5119 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5120 cpu_gpr_d
[r1
], temp
);
5123 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5125 case OPC2_32_RC_OR_EQ
:
5126 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5127 const9
, &tcg_gen_or_tl
);
5129 case OPC2_32_RC_OR_GE
:
5130 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5131 const9
, &tcg_gen_or_tl
);
5133 case OPC2_32_RC_OR_GE_U
:
5134 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5135 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5136 const9
, &tcg_gen_or_tl
);
5138 case OPC2_32_RC_OR_LT
:
5139 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5140 const9
, &tcg_gen_or_tl
);
5142 case OPC2_32_RC_OR_LT_U
:
5143 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5144 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5145 const9
, &tcg_gen_or_tl
);
5147 case OPC2_32_RC_OR_NE
:
5148 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5149 const9
, &tcg_gen_or_tl
);
5151 case OPC2_32_RC_RSUB
:
5152 tcg_gen_movi_tl(temp
, const9
);
5153 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5155 case OPC2_32_RC_RSUBS
:
5156 tcg_gen_movi_tl(temp
, const9
);
5157 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5159 case OPC2_32_RC_RSUBS_U
:
5160 tcg_gen_movi_tl(temp
, const9
);
5161 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5163 case OPC2_32_RC_SH_EQ
:
5164 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5166 case OPC2_32_RC_SH_GE
:
5167 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5169 case OPC2_32_RC_SH_GE_U
:
5170 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5171 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5173 case OPC2_32_RC_SH_LT
:
5174 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5176 case OPC2_32_RC_SH_LT_U
:
5177 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5178 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5180 case OPC2_32_RC_SH_NE
:
5181 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5183 case OPC2_32_RC_XOR_EQ
:
5184 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5185 const9
, &tcg_gen_xor_tl
);
5187 case OPC2_32_RC_XOR_GE
:
5188 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5189 const9
, &tcg_gen_xor_tl
);
5191 case OPC2_32_RC_XOR_GE_U
:
5192 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5193 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5194 const9
, &tcg_gen_xor_tl
);
5196 case OPC2_32_RC_XOR_LT
:
5197 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5198 const9
, &tcg_gen_xor_tl
);
5200 case OPC2_32_RC_XOR_LT_U
:
5201 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5202 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5203 const9
, &tcg_gen_xor_tl
);
5205 case OPC2_32_RC_XOR_NE
:
5206 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5207 const9
, &tcg_gen_xor_tl
);
5210 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5214 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5219 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5220 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5223 case OPC2_32_RC_BISR
:
5224 gen_helper_1arg(bisr
, const9
);
5226 case OPC2_32_RC_SYSCALL
:
5227 /* TODO: Add exception generation */
5230 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5234 static void decode_rc_mul(DisasContext
*ctx
)
5240 r2
= MASK_OP_RC_D(ctx
->opcode
);
5241 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5242 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5244 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5247 case OPC2_32_RC_MUL_32
:
5248 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5250 case OPC2_32_RC_MUL_64
:
5252 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5254 case OPC2_32_RC_MULS_32
:
5255 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5257 case OPC2_32_RC_MUL_U_64
:
5258 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5260 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5262 case OPC2_32_RC_MULS_U_32
:
5263 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5264 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5267 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5272 static void decode_rcpw_insert(DisasContext
*ctx
)
5276 int32_t pos
, width
, const4
;
5280 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5281 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5282 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5283 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5284 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5285 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5288 case OPC2_32_RCPW_IMASK
:
5290 /* if pos + width > 32 undefined result */
5291 if (pos
+ width
<= 32) {
5292 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5293 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5296 case OPC2_32_RCPW_INSERT
:
5297 /* if pos + width > 32 undefined result */
5298 if (pos
+ width
<= 32) {
5299 temp
= tcg_const_i32(const4
);
5300 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5304 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5310 static void decode_rcrw_insert(DisasContext
*ctx
)
5314 int32_t width
, const4
;
5316 TCGv temp
, temp2
, temp3
;
5318 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5319 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5320 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5321 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5322 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5323 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5325 temp
= tcg_temp_new();
5326 temp2
= tcg_temp_new();
5329 case OPC2_32_RCRW_IMASK
:
5330 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5331 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5332 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5333 tcg_gen_movi_tl(temp2
, const4
);
5334 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5336 case OPC2_32_RCRW_INSERT
:
5337 temp3
= tcg_temp_new();
5339 tcg_gen_movi_tl(temp
, width
);
5340 tcg_gen_movi_tl(temp2
, const4
);
5341 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5342 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5345 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5351 static void decode_rcr_cond_select(DisasContext
*ctx
)
5359 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5360 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5361 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5362 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5363 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5366 case OPC2_32_RCR_CADD
:
5367 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5370 case OPC2_32_RCR_CADDN
:
5371 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5374 case OPC2_32_RCR_SEL
:
5375 temp
= tcg_const_i32(0);
5376 temp2
= tcg_const_i32(const9
);
5377 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5378 cpu_gpr_d
[r1
], temp2
);
5380 case OPC2_32_RCR_SELN
:
5381 temp
= tcg_const_i32(0);
5382 temp2
= tcg_const_i32(const9
);
5383 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5384 cpu_gpr_d
[r1
], temp2
);
5387 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5391 static void decode_rcr_madd(DisasContext
*ctx
)
5398 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5399 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5400 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5401 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5402 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5405 case OPC2_32_RCR_MADD_32
:
5406 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5408 case OPC2_32_RCR_MADD_64
:
5411 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5412 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5414 case OPC2_32_RCR_MADDS_32
:
5415 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5417 case OPC2_32_RCR_MADDS_64
:
5420 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5421 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5423 case OPC2_32_RCR_MADD_U_64
:
5426 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5427 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5428 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5430 case OPC2_32_RCR_MADDS_U_32
:
5431 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5432 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5434 case OPC2_32_RCR_MADDS_U_64
:
5437 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5438 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5439 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5442 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5446 static void decode_rcr_msub(DisasContext
*ctx
)
5453 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5454 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5455 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5456 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5457 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5460 case OPC2_32_RCR_MSUB_32
:
5461 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5463 case OPC2_32_RCR_MSUB_64
:
5466 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5467 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5469 case OPC2_32_RCR_MSUBS_32
:
5470 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5472 case OPC2_32_RCR_MSUBS_64
:
5475 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5476 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5478 case OPC2_32_RCR_MSUB_U_64
:
5481 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5482 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5483 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5485 case OPC2_32_RCR_MSUBS_U_32
:
5486 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5487 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5489 case OPC2_32_RCR_MSUBS_U_64
:
5492 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5493 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5494 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5497 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5503 static void decode_rlc_opc(DisasContext
*ctx
,
5509 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5510 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5511 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5514 case OPC1_32_RLC_ADDI
:
5515 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5517 case OPC1_32_RLC_ADDIH
:
5518 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5520 case OPC1_32_RLC_ADDIH_A
:
5521 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5523 case OPC1_32_RLC_MFCR
:
5524 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5525 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5527 case OPC1_32_RLC_MOV
:
5528 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5530 case OPC1_32_RLC_MOV_64
:
5531 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5533 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5534 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5536 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5539 case OPC1_32_RLC_MOV_U
:
5540 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5541 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5543 case OPC1_32_RLC_MOV_H
:
5544 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5546 case OPC1_32_RLC_MOVH_A
:
5547 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5549 case OPC1_32_RLC_MTCR
:
5550 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5551 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5554 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5559 static void decode_rr_accumulator(DisasContext
*ctx
)
5566 r3
= MASK_OP_RR_D(ctx
->opcode
);
5567 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5568 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5569 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5572 case OPC2_32_RR_ABS
:
5573 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5575 case OPC2_32_RR_ABS_B
:
5576 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5578 case OPC2_32_RR_ABS_H
:
5579 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5581 case OPC2_32_RR_ABSDIF
:
5582 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5584 case OPC2_32_RR_ABSDIF_B
:
5585 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5588 case OPC2_32_RR_ABSDIF_H
:
5589 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5592 case OPC2_32_RR_ABSDIFS
:
5593 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5596 case OPC2_32_RR_ABSDIFS_H
:
5597 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5600 case OPC2_32_RR_ABSS
:
5601 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5603 case OPC2_32_RR_ABSS_H
:
5604 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5606 case OPC2_32_RR_ADD
:
5607 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5609 case OPC2_32_RR_ADD_B
:
5610 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5612 case OPC2_32_RR_ADD_H
:
5613 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5615 case OPC2_32_RR_ADDC
:
5616 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5618 case OPC2_32_RR_ADDS
:
5619 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5621 case OPC2_32_RR_ADDS_H
:
5622 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5625 case OPC2_32_RR_ADDS_HU
:
5626 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5629 case OPC2_32_RR_ADDS_U
:
5630 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5633 case OPC2_32_RR_ADDX
:
5634 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5636 case OPC2_32_RR_AND_EQ
:
5637 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5638 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5640 case OPC2_32_RR_AND_GE
:
5641 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5642 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5644 case OPC2_32_RR_AND_GE_U
:
5645 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5646 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5648 case OPC2_32_RR_AND_LT
:
5649 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5650 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5652 case OPC2_32_RR_AND_LT_U
:
5653 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5654 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5656 case OPC2_32_RR_AND_NE
:
5657 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5658 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5661 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5664 case OPC2_32_RR_EQ_B
:
5665 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5667 case OPC2_32_RR_EQ_H
:
5668 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5670 case OPC2_32_RR_EQ_W
:
5671 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5673 case OPC2_32_RR_EQANY_B
:
5674 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5676 case OPC2_32_RR_EQANY_H
:
5677 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5680 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5683 case OPC2_32_RR_GE_U
:
5684 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5688 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5691 case OPC2_32_RR_LT_U
:
5692 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5695 case OPC2_32_RR_LT_B
:
5696 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5698 case OPC2_32_RR_LT_BU
:
5699 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5701 case OPC2_32_RR_LT_H
:
5702 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5704 case OPC2_32_RR_LT_HU
:
5705 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5707 case OPC2_32_RR_LT_W
:
5708 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5710 case OPC2_32_RR_LT_WU
:
5711 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5713 case OPC2_32_RR_MAX
:
5714 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5715 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5717 case OPC2_32_RR_MAX_U
:
5718 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5719 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5721 case OPC2_32_RR_MAX_B
:
5722 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5724 case OPC2_32_RR_MAX_BU
:
5725 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5727 case OPC2_32_RR_MAX_H
:
5728 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5730 case OPC2_32_RR_MAX_HU
:
5731 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5733 case OPC2_32_RR_MIN
:
5734 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5735 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5737 case OPC2_32_RR_MIN_U
:
5738 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5739 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5741 case OPC2_32_RR_MIN_B
:
5742 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5744 case OPC2_32_RR_MIN_BU
:
5745 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5747 case OPC2_32_RR_MIN_H
:
5748 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5750 case OPC2_32_RR_MIN_HU
:
5751 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5753 case OPC2_32_RR_MOV
:
5754 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5756 case OPC2_32_RR_MOV_64
:
5757 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5758 temp
= tcg_temp_new();
5761 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5762 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5763 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5765 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5768 case OPC2_32_RR_MOVS_64
:
5769 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5771 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5772 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5774 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5778 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5781 case OPC2_32_RR_OR_EQ
:
5782 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5783 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5785 case OPC2_32_RR_OR_GE
:
5786 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5787 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5789 case OPC2_32_RR_OR_GE_U
:
5790 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5791 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5793 case OPC2_32_RR_OR_LT
:
5794 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5795 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5797 case OPC2_32_RR_OR_LT_U
:
5798 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5799 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5801 case OPC2_32_RR_OR_NE
:
5802 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5803 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5805 case OPC2_32_RR_SAT_B
:
5806 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5808 case OPC2_32_RR_SAT_BU
:
5809 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5811 case OPC2_32_RR_SAT_H
:
5812 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5814 case OPC2_32_RR_SAT_HU
:
5815 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5817 case OPC2_32_RR_SH_EQ
:
5818 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5821 case OPC2_32_RR_SH_GE
:
5822 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5825 case OPC2_32_RR_SH_GE_U
:
5826 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5829 case OPC2_32_RR_SH_LT
:
5830 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5833 case OPC2_32_RR_SH_LT_U
:
5834 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5837 case OPC2_32_RR_SH_NE
:
5838 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5841 case OPC2_32_RR_SUB
:
5842 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5844 case OPC2_32_RR_SUB_B
:
5845 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5847 case OPC2_32_RR_SUB_H
:
5848 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5850 case OPC2_32_RR_SUBC
:
5851 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5853 case OPC2_32_RR_SUBS
:
5854 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5856 case OPC2_32_RR_SUBS_U
:
5857 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5859 case OPC2_32_RR_SUBS_H
:
5860 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5863 case OPC2_32_RR_SUBS_HU
:
5864 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5867 case OPC2_32_RR_SUBX
:
5868 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5870 case OPC2_32_RR_XOR_EQ
:
5871 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5872 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5874 case OPC2_32_RR_XOR_GE
:
5875 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5876 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5878 case OPC2_32_RR_XOR_GE_U
:
5879 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5880 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5882 case OPC2_32_RR_XOR_LT
:
5883 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5884 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5886 case OPC2_32_RR_XOR_LT_U
:
5887 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5888 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5890 case OPC2_32_RR_XOR_NE
:
5891 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5892 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5895 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5899 static void decode_rr_logical_shift(DisasContext
*ctx
)
5904 r3
= MASK_OP_RR_D(ctx
->opcode
);
5905 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5906 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5907 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5910 case OPC2_32_RR_AND
:
5911 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5913 case OPC2_32_RR_ANDN
:
5914 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5916 case OPC2_32_RR_CLO
:
5917 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5918 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5920 case OPC2_32_RR_CLO_H
:
5921 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5923 case OPC2_32_RR_CLS
:
5924 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5926 case OPC2_32_RR_CLS_H
:
5927 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5929 case OPC2_32_RR_CLZ
:
5930 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5932 case OPC2_32_RR_CLZ_H
:
5933 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5935 case OPC2_32_RR_NAND
:
5936 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5938 case OPC2_32_RR_NOR
:
5939 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5942 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5944 case OPC2_32_RR_ORN
:
5945 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5948 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5950 case OPC2_32_RR_SH_H
:
5951 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5953 case OPC2_32_RR_SHA
:
5954 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5956 case OPC2_32_RR_SHA_H
:
5957 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5959 case OPC2_32_RR_SHAS
:
5960 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5962 case OPC2_32_RR_XNOR
:
5963 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5965 case OPC2_32_RR_XOR
:
5966 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5969 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5973 static void decode_rr_address(DisasContext
*ctx
)
5979 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5980 r3
= MASK_OP_RR_D(ctx
->opcode
);
5981 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5982 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5983 n
= MASK_OP_RR_N(ctx
->opcode
);
5986 case OPC2_32_RR_ADD_A
:
5987 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5989 case OPC2_32_RR_ADDSC_A
:
5990 temp
= tcg_temp_new();
5991 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5992 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
5994 case OPC2_32_RR_ADDSC_AT
:
5995 temp
= tcg_temp_new();
5996 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
5997 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
5998 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6000 case OPC2_32_RR_EQ_A
:
6001 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6004 case OPC2_32_RR_EQZ
:
6005 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6007 case OPC2_32_RR_GE_A
:
6008 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6011 case OPC2_32_RR_LT_A
:
6012 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6015 case OPC2_32_RR_MOV_A
:
6016 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6018 case OPC2_32_RR_MOV_AA
:
6019 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6021 case OPC2_32_RR_MOV_D
:
6022 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6024 case OPC2_32_RR_NE_A
:
6025 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6028 case OPC2_32_RR_NEZ_A
:
6029 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6031 case OPC2_32_RR_SUB_A
:
6032 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6035 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6039 static void decode_rr_idirect(DisasContext
*ctx
)
6044 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6045 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6049 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6051 case OPC2_32_RR_JLI
:
6052 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6053 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6055 case OPC2_32_RR_CALLI
:
6056 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6057 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6059 case OPC2_32_RR_FCALLI
:
6060 gen_fcall_save_ctx(ctx
);
6061 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6064 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6066 tcg_gen_exit_tb(NULL
, 0);
6067 ctx
->base
.is_jmp
= DISAS_NORETURN
;
6070 static void decode_rr_divide(DisasContext
*ctx
)
6075 TCGv temp
, temp2
, temp3
;
6077 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6078 r3
= MASK_OP_RR_D(ctx
->opcode
);
6079 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6080 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6083 case OPC2_32_RR_BMERGE
:
6084 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6086 case OPC2_32_RR_BSPLIT
:
6088 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6090 case OPC2_32_RR_DVINIT_B
:
6092 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6095 case OPC2_32_RR_DVINIT_BU
:
6096 temp
= tcg_temp_new();
6097 temp2
= tcg_temp_new();
6098 temp3
= tcg_temp_new();
6100 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6102 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6103 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6104 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6105 tcg_gen_abs_tl(temp
, temp3
);
6106 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6107 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6109 /* overflow = (D[b] == 0) */
6110 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6112 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6114 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6116 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6117 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6119 case OPC2_32_RR_DVINIT_H
:
6121 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6124 case OPC2_32_RR_DVINIT_HU
:
6125 temp
= tcg_temp_new();
6126 temp2
= tcg_temp_new();
6127 temp3
= tcg_temp_new();
6129 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6131 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6132 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6133 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6134 tcg_gen_abs_tl(temp
, temp3
);
6135 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6136 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6138 /* overflow = (D[b] == 0) */
6139 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6141 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6143 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6145 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6146 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6148 case OPC2_32_RR_DVINIT
:
6149 temp
= tcg_temp_new();
6150 temp2
= tcg_temp_new();
6152 /* overflow = ((D[b] == 0) ||
6153 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6154 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6155 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6156 tcg_gen_and_tl(temp
, temp
, temp2
);
6157 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6158 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6159 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6161 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6163 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6165 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6166 /* sign extend to high reg */
6167 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6169 case OPC2_32_RR_DVINIT_U
:
6170 /* overflow = (D[b] == 0) */
6171 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6172 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6174 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6176 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6178 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6179 /* zero extend to high reg*/
6180 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6182 case OPC2_32_RR_PARITY
:
6183 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6185 case OPC2_32_RR_UNPACK
:
6187 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6189 case OPC2_32_RR_CRC32
:
6190 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6191 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6193 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6196 case OPC2_32_RR_DIV
:
6197 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6198 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6201 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6204 case OPC2_32_RR_DIV_U
:
6205 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6206 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6207 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6209 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6212 case OPC2_32_RR_MUL_F
:
6213 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6215 case OPC2_32_RR_DIV_F
:
6216 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6218 case OPC2_32_RR_CMP_F
:
6219 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6221 case OPC2_32_RR_FTOI
:
6222 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6224 case OPC2_32_RR_ITOF
:
6225 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6227 case OPC2_32_RR_FTOUZ
:
6228 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6230 case OPC2_32_RR_UPDFL
:
6231 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6233 case OPC2_32_RR_UTOF
:
6234 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6236 case OPC2_32_RR_FTOIZ
:
6237 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6239 case OPC2_32_RR_QSEED_F
:
6240 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6243 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6248 static void decode_rr1_mul(DisasContext
*ctx
)
6256 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6257 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6258 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6259 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6260 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6263 case OPC2_32_RR1_MUL_H_32_LL
:
6264 temp64
= tcg_temp_new_i64();
6266 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6267 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6268 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6270 case OPC2_32_RR1_MUL_H_32_LU
:
6271 temp64
= tcg_temp_new_i64();
6273 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6274 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6275 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6277 case OPC2_32_RR1_MUL_H_32_UL
:
6278 temp64
= tcg_temp_new_i64();
6280 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6281 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6282 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6284 case OPC2_32_RR1_MUL_H_32_UU
:
6285 temp64
= tcg_temp_new_i64();
6287 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6288 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6289 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6291 case OPC2_32_RR1_MULM_H_64_LL
:
6292 temp64
= tcg_temp_new_i64();
6294 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6295 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6297 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6299 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6301 case OPC2_32_RR1_MULM_H_64_LU
:
6302 temp64
= tcg_temp_new_i64();
6304 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6305 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6307 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6309 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6311 case OPC2_32_RR1_MULM_H_64_UL
:
6312 temp64
= tcg_temp_new_i64();
6314 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6315 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6317 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6319 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6321 case OPC2_32_RR1_MULM_H_64_UU
:
6322 temp64
= tcg_temp_new_i64();
6324 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6325 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6327 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6329 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6331 case OPC2_32_RR1_MULR_H_16_LL
:
6332 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6333 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6335 case OPC2_32_RR1_MULR_H_16_LU
:
6336 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6337 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6339 case OPC2_32_RR1_MULR_H_16_UL
:
6340 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6341 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6343 case OPC2_32_RR1_MULR_H_16_UU
:
6344 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6345 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6348 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6352 static void decode_rr1_mulq(DisasContext
*ctx
)
6360 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6361 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6362 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6363 n
= MASK_OP_RR1_N(ctx
->opcode
);
6364 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6366 temp
= tcg_temp_new();
6367 temp2
= tcg_temp_new();
6370 case OPC2_32_RR1_MUL_Q_32
:
6371 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6373 case OPC2_32_RR1_MUL_Q_64
:
6375 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6378 case OPC2_32_RR1_MUL_Q_32_L
:
6379 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6380 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6382 case OPC2_32_RR1_MUL_Q_64_L
:
6384 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6385 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6387 case OPC2_32_RR1_MUL_Q_32_U
:
6388 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6389 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6391 case OPC2_32_RR1_MUL_Q_64_U
:
6393 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6394 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6396 case OPC2_32_RR1_MUL_Q_32_LL
:
6397 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6398 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6399 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6401 case OPC2_32_RR1_MUL_Q_32_UU
:
6402 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6403 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6404 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6406 case OPC2_32_RR1_MULR_Q_32_L
:
6407 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6408 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6409 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6411 case OPC2_32_RR1_MULR_Q_32_U
:
6412 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6413 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6414 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6417 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6422 static void decode_rr2_mul(DisasContext
*ctx
)
6427 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6428 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6429 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6430 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6432 case OPC2_32_RR2_MUL_32
:
6433 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6435 case OPC2_32_RR2_MUL_64
:
6437 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6440 case OPC2_32_RR2_MULS_32
:
6441 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6444 case OPC2_32_RR2_MUL_U_64
:
6446 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6449 case OPC2_32_RR2_MULS_U_32
:
6450 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6454 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6459 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6466 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6467 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6468 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6469 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6470 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6471 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6474 case OPC2_32_RRPW_EXTR
:
6476 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6480 if (pos
+ width
<= 32) {
6481 /* optimize special cases */
6482 if ((pos
== 0) && (width
== 8)) {
6483 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6484 } else if ((pos
== 0) && (width
== 16)) {
6485 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6487 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6488 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6492 case OPC2_32_RRPW_EXTR_U
:
6494 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6496 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6497 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6500 case OPC2_32_RRPW_IMASK
:
6503 if (pos
+ width
<= 32) {
6504 temp
= tcg_temp_new();
6505 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6506 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6507 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6511 case OPC2_32_RRPW_INSERT
:
6512 if (pos
+ width
<= 32) {
6513 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6518 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6523 static void decode_rrr_cond_select(DisasContext
*ctx
)
6529 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6530 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6531 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6532 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6533 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6536 case OPC2_32_RRR_CADD
:
6537 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6538 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6540 case OPC2_32_RRR_CADDN
:
6541 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6544 case OPC2_32_RRR_CSUB
:
6545 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6548 case OPC2_32_RRR_CSUBN
:
6549 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6552 case OPC2_32_RRR_SEL
:
6553 temp
= tcg_const_i32(0);
6554 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6555 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6557 case OPC2_32_RRR_SELN
:
6558 temp
= tcg_const_i32(0);
6559 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6560 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6563 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6567 static void decode_rrr_divide(DisasContext
*ctx
)
6573 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6574 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6575 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6576 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6577 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6580 case OPC2_32_RRR_DVADJ
:
6583 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6584 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6586 case OPC2_32_RRR_DVSTEP
:
6589 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6590 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6592 case OPC2_32_RRR_DVSTEP_U
:
6595 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6596 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6598 case OPC2_32_RRR_IXMAX
:
6601 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6602 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6604 case OPC2_32_RRR_IXMAX_U
:
6607 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6608 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6610 case OPC2_32_RRR_IXMIN
:
6613 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6614 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6616 case OPC2_32_RRR_IXMIN_U
:
6619 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6620 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6622 case OPC2_32_RRR_PACK
:
6624 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6625 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6627 case OPC2_32_RRR_ADD_F
:
6628 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6630 case OPC2_32_RRR_SUB_F
:
6631 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6633 case OPC2_32_RRR_MADD_F
:
6634 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6635 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6637 case OPC2_32_RRR_MSUB_F
:
6638 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6639 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6642 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6647 static void decode_rrr2_madd(DisasContext
*ctx
)
6650 uint32_t r1
, r2
, r3
, r4
;
6652 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6653 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6654 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6655 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6656 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6658 case OPC2_32_RRR2_MADD_32
:
6659 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6662 case OPC2_32_RRR2_MADD_64
:
6665 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6666 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6668 case OPC2_32_RRR2_MADDS_32
:
6669 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6670 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6672 case OPC2_32_RRR2_MADDS_64
:
6675 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6676 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6678 case OPC2_32_RRR2_MADD_U_64
:
6681 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6682 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6684 case OPC2_32_RRR2_MADDS_U_32
:
6685 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6686 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6688 case OPC2_32_RRR2_MADDS_U_64
:
6691 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6692 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6695 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6699 static void decode_rrr2_msub(DisasContext
*ctx
)
6702 uint32_t r1
, r2
, r3
, r4
;
6704 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6705 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6706 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6707 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6708 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6711 case OPC2_32_RRR2_MSUB_32
:
6712 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6715 case OPC2_32_RRR2_MSUB_64
:
6718 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6719 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6721 case OPC2_32_RRR2_MSUBS_32
:
6722 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6723 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6725 case OPC2_32_RRR2_MSUBS_64
:
6728 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6729 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6731 case OPC2_32_RRR2_MSUB_U_64
:
6732 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6733 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6735 case OPC2_32_RRR2_MSUBS_U_32
:
6736 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6737 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6739 case OPC2_32_RRR2_MSUBS_U_64
:
6742 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6743 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6746 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6751 static void decode_rrr1_madd(DisasContext
*ctx
)
6754 uint32_t r1
, r2
, r3
, r4
, n
;
6756 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6757 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6758 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6759 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6760 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6761 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6764 case OPC2_32_RRR1_MADD_H_LL
:
6767 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6768 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6770 case OPC2_32_RRR1_MADD_H_LU
:
6773 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6774 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6776 case OPC2_32_RRR1_MADD_H_UL
:
6779 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6780 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6782 case OPC2_32_RRR1_MADD_H_UU
:
6785 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6786 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6788 case OPC2_32_RRR1_MADDS_H_LL
:
6791 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6792 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6794 case OPC2_32_RRR1_MADDS_H_LU
:
6797 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6798 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6800 case OPC2_32_RRR1_MADDS_H_UL
:
6803 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6804 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6806 case OPC2_32_RRR1_MADDS_H_UU
:
6809 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6810 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6812 case OPC2_32_RRR1_MADDM_H_LL
:
6815 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6816 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6818 case OPC2_32_RRR1_MADDM_H_LU
:
6821 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6822 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6824 case OPC2_32_RRR1_MADDM_H_UL
:
6827 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6828 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6830 case OPC2_32_RRR1_MADDM_H_UU
:
6833 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6834 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6836 case OPC2_32_RRR1_MADDMS_H_LL
:
6839 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6840 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6842 case OPC2_32_RRR1_MADDMS_H_LU
:
6845 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6846 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6848 case OPC2_32_RRR1_MADDMS_H_UL
:
6851 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6852 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6854 case OPC2_32_RRR1_MADDMS_H_UU
:
6857 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6858 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6860 case OPC2_32_RRR1_MADDR_H_LL
:
6861 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6862 cpu_gpr_d
[r2
], n
, MODE_LL
);
6864 case OPC2_32_RRR1_MADDR_H_LU
:
6865 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6866 cpu_gpr_d
[r2
], n
, MODE_LU
);
6868 case OPC2_32_RRR1_MADDR_H_UL
:
6869 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6870 cpu_gpr_d
[r2
], n
, MODE_UL
);
6872 case OPC2_32_RRR1_MADDR_H_UU
:
6873 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6874 cpu_gpr_d
[r2
], n
, MODE_UU
);
6876 case OPC2_32_RRR1_MADDRS_H_LL
:
6877 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6878 cpu_gpr_d
[r2
], n
, MODE_LL
);
6880 case OPC2_32_RRR1_MADDRS_H_LU
:
6881 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6882 cpu_gpr_d
[r2
], n
, MODE_LU
);
6884 case OPC2_32_RRR1_MADDRS_H_UL
:
6885 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6886 cpu_gpr_d
[r2
], n
, MODE_UL
);
6888 case OPC2_32_RRR1_MADDRS_H_UU
:
6889 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6890 cpu_gpr_d
[r2
], n
, MODE_UU
);
6893 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6897 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6900 uint32_t r1
, r2
, r3
, r4
, n
;
6903 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6904 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6905 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6906 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6907 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6908 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6910 temp
= tcg_const_i32(n
);
6911 temp2
= tcg_temp_new();
6914 case OPC2_32_RRR1_MADD_Q_32
:
6915 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6916 cpu_gpr_d
[r2
], n
, 32);
6918 case OPC2_32_RRR1_MADD_Q_64
:
6921 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6922 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6925 case OPC2_32_RRR1_MADD_Q_32_L
:
6926 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6927 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6930 case OPC2_32_RRR1_MADD_Q_64_L
:
6933 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6934 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6935 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6938 case OPC2_32_RRR1_MADD_Q_32_U
:
6939 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6940 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6943 case OPC2_32_RRR1_MADD_Q_64_U
:
6946 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6947 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6948 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6951 case OPC2_32_RRR1_MADD_Q_32_LL
:
6952 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6953 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6954 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6956 case OPC2_32_RRR1_MADD_Q_64_LL
:
6959 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6960 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6961 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6962 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6964 case OPC2_32_RRR1_MADD_Q_32_UU
:
6965 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6966 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6967 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6969 case OPC2_32_RRR1_MADD_Q_64_UU
:
6972 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6973 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6974 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6975 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6977 case OPC2_32_RRR1_MADDS_Q_32
:
6978 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6979 cpu_gpr_d
[r2
], n
, 32);
6981 case OPC2_32_RRR1_MADDS_Q_64
:
6984 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6985 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6988 case OPC2_32_RRR1_MADDS_Q_32_L
:
6989 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6990 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6993 case OPC2_32_RRR1_MADDS_Q_64_L
:
6996 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6997 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6998 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7001 case OPC2_32_RRR1_MADDS_Q_32_U
:
7002 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7003 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7006 case OPC2_32_RRR1_MADDS_Q_64_U
:
7009 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7010 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7011 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7014 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7015 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7016 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7017 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7019 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7022 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7023 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7024 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7025 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7027 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7028 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7029 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7030 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7032 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7035 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7036 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7037 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7038 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7040 case OPC2_32_RRR1_MADDR_H_64_UL
:
7042 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7043 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7045 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7047 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7048 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7050 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7051 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7052 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7053 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7055 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7056 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7057 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7058 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7060 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7061 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7062 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7063 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7065 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7066 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7067 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7068 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7071 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7075 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7078 uint32_t r1
, r2
, r3
, r4
, n
;
7080 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7081 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7082 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7083 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7084 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7085 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7088 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7091 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7092 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7094 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7097 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7098 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7100 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7103 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7104 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7106 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7109 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7110 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7112 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7115 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7116 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7119 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7122 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7123 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7126 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7129 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7130 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7133 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7136 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7137 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7140 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7143 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7144 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7147 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7150 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7151 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7154 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7157 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7158 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7161 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7164 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7165 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7168 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7171 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7172 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7175 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7178 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7179 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7182 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7185 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7186 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7189 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7192 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7193 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7196 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7197 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7198 cpu_gpr_d
[r2
], n
, MODE_LL
);
7200 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7201 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7202 cpu_gpr_d
[r2
], n
, MODE_LU
);
7204 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7205 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7206 cpu_gpr_d
[r2
], n
, MODE_UL
);
7208 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7209 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7210 cpu_gpr_d
[r2
], n
, MODE_UU
);
7212 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7213 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7214 cpu_gpr_d
[r2
], n
, MODE_LL
);
7216 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7217 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7218 cpu_gpr_d
[r2
], n
, MODE_LU
);
7220 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7221 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7222 cpu_gpr_d
[r2
], n
, MODE_UL
);
7224 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7225 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7226 cpu_gpr_d
[r2
], n
, MODE_UU
);
7229 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7233 static void decode_rrr1_msub(DisasContext
*ctx
)
7236 uint32_t r1
, r2
, r3
, r4
, n
;
7238 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7239 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7240 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7241 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7242 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7243 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7246 case OPC2_32_RRR1_MSUB_H_LL
:
7249 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7250 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7252 case OPC2_32_RRR1_MSUB_H_LU
:
7255 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7256 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7258 case OPC2_32_RRR1_MSUB_H_UL
:
7261 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7262 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7264 case OPC2_32_RRR1_MSUB_H_UU
:
7267 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7268 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7270 case OPC2_32_RRR1_MSUBS_H_LL
:
7273 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7274 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7276 case OPC2_32_RRR1_MSUBS_H_LU
:
7279 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7280 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7282 case OPC2_32_RRR1_MSUBS_H_UL
:
7285 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7286 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7288 case OPC2_32_RRR1_MSUBS_H_UU
:
7291 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7292 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7294 case OPC2_32_RRR1_MSUBM_H_LL
:
7297 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7298 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7300 case OPC2_32_RRR1_MSUBM_H_LU
:
7303 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7304 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7306 case OPC2_32_RRR1_MSUBM_H_UL
:
7309 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7310 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7312 case OPC2_32_RRR1_MSUBM_H_UU
:
7315 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7316 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7318 case OPC2_32_RRR1_MSUBMS_H_LL
:
7321 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7322 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7324 case OPC2_32_RRR1_MSUBMS_H_LU
:
7327 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7328 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7330 case OPC2_32_RRR1_MSUBMS_H_UL
:
7333 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7334 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7336 case OPC2_32_RRR1_MSUBMS_H_UU
:
7339 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7340 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7342 case OPC2_32_RRR1_MSUBR_H_LL
:
7343 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7344 cpu_gpr_d
[r2
], n
, MODE_LL
);
7346 case OPC2_32_RRR1_MSUBR_H_LU
:
7347 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7348 cpu_gpr_d
[r2
], n
, MODE_LU
);
7350 case OPC2_32_RRR1_MSUBR_H_UL
:
7351 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7352 cpu_gpr_d
[r2
], n
, MODE_UL
);
7354 case OPC2_32_RRR1_MSUBR_H_UU
:
7355 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7356 cpu_gpr_d
[r2
], n
, MODE_UU
);
7358 case OPC2_32_RRR1_MSUBRS_H_LL
:
7359 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7360 cpu_gpr_d
[r2
], n
, MODE_LL
);
7362 case OPC2_32_RRR1_MSUBRS_H_LU
:
7363 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7364 cpu_gpr_d
[r2
], n
, MODE_LU
);
7366 case OPC2_32_RRR1_MSUBRS_H_UL
:
7367 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7368 cpu_gpr_d
[r2
], n
, MODE_UL
);
7370 case OPC2_32_RRR1_MSUBRS_H_UU
:
7371 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7372 cpu_gpr_d
[r2
], n
, MODE_UU
);
7375 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7379 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7382 uint32_t r1
, r2
, r3
, r4
, n
;
7385 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7386 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7387 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7388 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7389 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7390 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7392 temp
= tcg_const_i32(n
);
7393 temp2
= tcg_temp_new();
7396 case OPC2_32_RRR1_MSUB_Q_32
:
7397 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7398 cpu_gpr_d
[r2
], n
, 32);
7400 case OPC2_32_RRR1_MSUB_Q_64
:
7403 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7404 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7407 case OPC2_32_RRR1_MSUB_Q_32_L
:
7408 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7409 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7412 case OPC2_32_RRR1_MSUB_Q_64_L
:
7415 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7416 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7417 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7420 case OPC2_32_RRR1_MSUB_Q_32_U
:
7421 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7422 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7425 case OPC2_32_RRR1_MSUB_Q_64_U
:
7428 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7429 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7430 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7433 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7434 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7435 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7436 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7438 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7441 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7442 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7443 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7444 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7446 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7447 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7448 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7449 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7451 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7454 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7455 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7456 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7457 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7459 case OPC2_32_RRR1_MSUBS_Q_32
:
7460 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7461 cpu_gpr_d
[r2
], n
, 32);
7463 case OPC2_32_RRR1_MSUBS_Q_64
:
7466 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7467 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7470 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7471 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7472 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7475 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7478 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7479 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7480 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7483 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7484 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7485 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7488 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7491 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7492 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7493 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7496 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7497 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7498 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7499 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7501 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7504 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7505 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7506 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7507 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7509 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7510 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7511 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7512 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7514 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7517 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7518 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7519 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7520 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7522 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7524 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7525 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7527 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7529 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7530 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7532 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7533 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7534 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7535 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7537 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7538 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7539 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7540 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7542 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7543 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7544 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7545 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7547 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7548 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7549 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7550 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7553 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7557 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7560 uint32_t r1
, r2
, r3
, r4
, n
;
7562 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7563 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7564 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7565 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7566 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7567 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7570 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7573 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7574 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7576 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7579 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7580 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7582 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7585 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7586 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7588 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7591 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7592 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7594 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7597 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7598 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7601 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7604 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7605 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7608 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7611 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7612 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7615 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7618 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7619 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7622 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7625 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7626 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7629 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7632 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7633 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7636 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7639 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7640 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7643 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7646 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7650 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7653 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7654 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7657 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7660 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7661 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7664 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7667 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7671 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7674 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7675 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7678 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7679 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7680 cpu_gpr_d
[r2
], n
, MODE_LL
);
7682 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7683 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7684 cpu_gpr_d
[r2
], n
, MODE_LU
);
7686 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7687 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7688 cpu_gpr_d
[r2
], n
, MODE_UL
);
7690 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7691 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7692 cpu_gpr_d
[r2
], n
, MODE_UU
);
7694 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7695 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7696 cpu_gpr_d
[r2
], n
, MODE_LL
);
7698 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7699 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7700 cpu_gpr_d
[r2
], n
, MODE_LU
);
7702 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7703 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7704 cpu_gpr_d
[r2
], n
, MODE_UL
);
7706 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7707 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7708 cpu_gpr_d
[r2
], n
, MODE_UU
);
7711 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7716 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7720 TCGv tmp_width
, tmp_pos
;
7722 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7723 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7724 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7725 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7726 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7728 tmp_pos
= tcg_temp_new();
7729 tmp_width
= tcg_temp_new();
7732 case OPC2_32_RRRR_DEXTR
:
7733 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7735 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7737 TCGv msw
= tcg_temp_new();
7738 TCGv zero
= tcg_constant_tl(0);
7739 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7740 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7741 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7743 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7744 * behaviour. So check that case here and set the low bits to zero
7745 * which effectivly returns cpu_gpr_d[r1]
7747 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7748 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7751 case OPC2_32_RRRR_EXTR
:
7752 case OPC2_32_RRRR_EXTR_U
:
7754 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7755 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7756 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7757 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7758 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7759 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7760 if (op2
== OPC2_32_RRRR_EXTR
) {
7761 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7763 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7766 case OPC2_32_RRRR_INSERT
:
7768 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7769 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7770 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7774 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7779 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7787 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7788 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7789 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7790 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7791 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7792 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7794 temp
= tcg_temp_new();
7797 case OPC2_32_RRRW_EXTR
:
7798 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7799 tcg_gen_addi_tl(temp
, temp
, width
);
7800 tcg_gen_subfi_tl(temp
, 32, temp
);
7801 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7802 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7804 case OPC2_32_RRRW_EXTR_U
:
7806 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7808 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7809 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7810 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7813 case OPC2_32_RRRW_IMASK
:
7814 temp2
= tcg_temp_new();
7816 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7817 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7818 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7819 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7820 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7822 case OPC2_32_RRRW_INSERT
:
7823 temp2
= tcg_temp_new();
7825 tcg_gen_movi_tl(temp
, width
);
7826 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7827 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7830 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7835 static void decode_sys_interrupts(DisasContext
*ctx
)
7842 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7843 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7846 case OPC2_32_SYS_DEBUG
:
7847 /* raise EXCP_DEBUG */
7849 case OPC2_32_SYS_DISABLE
:
7850 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE_1_3
);
7852 case OPC2_32_SYS_DSYNC
:
7854 case OPC2_32_SYS_ENABLE
:
7855 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE_1_3
);
7857 case OPC2_32_SYS_ISYNC
:
7859 case OPC2_32_SYS_NOP
:
7861 case OPC2_32_SYS_RET
:
7862 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7864 case OPC2_32_SYS_FRET
:
7867 case OPC2_32_SYS_RFE
:
7868 gen_helper_rfe(cpu_env
);
7869 tcg_gen_exit_tb(NULL
, 0);
7870 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7872 case OPC2_32_SYS_RFM
:
7873 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7874 tmp
= tcg_temp_new();
7875 l1
= gen_new_label();
7877 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7878 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7879 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7880 gen_helper_rfm(cpu_env
);
7882 tcg_gen_exit_tb(NULL
, 0);
7883 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7885 /* generate privilege trap */
7888 case OPC2_32_SYS_RSLCX
:
7889 gen_helper_rslcx(cpu_env
);
7891 case OPC2_32_SYS_SVLCX
:
7892 gen_helper_svlcx(cpu_env
);
7894 case OPC2_32_SYS_RESTORE
:
7895 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7896 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7897 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7898 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7899 } /* else raise privilege trap */
7901 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7904 case OPC2_32_SYS_TRAPSV
:
7905 l1
= gen_new_label();
7906 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7907 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7910 case OPC2_32_SYS_TRAPV
:
7911 l1
= gen_new_label();
7912 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7913 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7917 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7921 static void decode_32Bit_opc(DisasContext
*ctx
)
7925 int32_t address
, const16
;
7928 TCGv temp
, temp2
, temp3
;
7930 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7932 /* handle JNZ.T opcode only being 7 bit long */
7933 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7934 op1
= OPCM_32_BRN_JTT
;
7939 case OPCM_32_ABS_LDW
:
7940 decode_abs_ldw(ctx
);
7942 case OPCM_32_ABS_LDB
:
7943 decode_abs_ldb(ctx
);
7945 case OPCM_32_ABS_LDMST_SWAP
:
7946 decode_abs_ldst_swap(ctx
);
7948 case OPCM_32_ABS_LDST_CONTEXT
:
7949 decode_abs_ldst_context(ctx
);
7951 case OPCM_32_ABS_STORE
:
7952 decode_abs_store(ctx
);
7954 case OPCM_32_ABS_STOREB_H
:
7955 decode_abs_storeb_h(ctx
);
7957 case OPC1_32_ABS_STOREQ
:
7958 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7959 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7960 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7961 temp2
= tcg_temp_new();
7963 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7964 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7966 case OPC1_32_ABS_LD_Q
:
7967 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7968 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7969 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7971 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7972 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7974 case OPC1_32_ABS_LEA
:
7975 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7976 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7977 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7980 case OPC1_32_ABSB_ST_T
:
7981 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7982 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7983 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7985 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7986 temp2
= tcg_temp_new();
7988 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7989 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7990 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7991 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7994 case OPC1_32_B_CALL
:
7995 case OPC1_32_B_CALLA
:
7996 case OPC1_32_B_FCALL
:
7997 case OPC1_32_B_FCALLA
:
8002 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8003 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8006 case OPCM_32_BIT_ANDACC
:
8007 decode_bit_andacc(ctx
);
8009 case OPCM_32_BIT_LOGICAL_T1
:
8010 decode_bit_logical_t(ctx
);
8012 case OPCM_32_BIT_INSERT
:
8013 decode_bit_insert(ctx
);
8015 case OPCM_32_BIT_LOGICAL_T2
:
8016 decode_bit_logical_t2(ctx
);
8018 case OPCM_32_BIT_ORAND
:
8019 decode_bit_orand(ctx
);
8021 case OPCM_32_BIT_SH_LOGIC1
:
8022 decode_bit_sh_logic1(ctx
);
8024 case OPCM_32_BIT_SH_LOGIC2
:
8025 decode_bit_sh_logic2(ctx
);
8028 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8029 decode_bo_addrmode_post_pre_base(ctx
);
8031 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8032 decode_bo_addrmode_bitreverse_circular(ctx
);
8034 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8035 decode_bo_addrmode_ld_post_pre_base(ctx
);
8037 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8038 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8040 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8041 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8043 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8044 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8047 case OPC1_32_BOL_LD_A_LONGOFF
:
8048 case OPC1_32_BOL_LD_W_LONGOFF
:
8049 case OPC1_32_BOL_LEA_LONGOFF
:
8050 case OPC1_32_BOL_ST_W_LONGOFF
:
8051 case OPC1_32_BOL_ST_A_LONGOFF
:
8052 case OPC1_32_BOL_LD_B_LONGOFF
:
8053 case OPC1_32_BOL_LD_BU_LONGOFF
:
8054 case OPC1_32_BOL_LD_H_LONGOFF
:
8055 case OPC1_32_BOL_LD_HU_LONGOFF
:
8056 case OPC1_32_BOL_ST_B_LONGOFF
:
8057 case OPC1_32_BOL_ST_H_LONGOFF
:
8058 decode_bol_opc(ctx
, op1
);
8061 case OPCM_32_BRC_EQ_NEQ
:
8062 case OPCM_32_BRC_GE
:
8063 case OPCM_32_BRC_JLT
:
8064 case OPCM_32_BRC_JNE
:
8065 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8066 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8067 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8068 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8071 case OPCM_32_BRN_JTT
:
8072 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8073 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8074 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8077 case OPCM_32_BRR_EQ_NEQ
:
8078 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8079 case OPCM_32_BRR_GE
:
8080 case OPCM_32_BRR_JLT
:
8081 case OPCM_32_BRR_JNE
:
8082 case OPCM_32_BRR_JNZ
:
8083 case OPCM_32_BRR_LOOP
:
8084 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8085 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8086 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8087 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8090 case OPCM_32_RC_LOGICAL_SHIFT
:
8091 decode_rc_logical_shift(ctx
);
8093 case OPCM_32_RC_ACCUMULATOR
:
8094 decode_rc_accumulator(ctx
);
8096 case OPCM_32_RC_SERVICEROUTINE
:
8097 decode_rc_serviceroutine(ctx
);
8099 case OPCM_32_RC_MUL
:
8103 case OPCM_32_RCPW_MASK_INSERT
:
8104 decode_rcpw_insert(ctx
);
8107 case OPC1_32_RCRR_INSERT
:
8108 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8109 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8110 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8111 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8112 temp
= tcg_const_i32(const16
);
8113 temp2
= tcg_temp_new(); /* width*/
8114 temp3
= tcg_temp_new(); /* pos */
8118 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8119 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8121 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8124 case OPCM_32_RCRW_MASK_INSERT
:
8125 decode_rcrw_insert(ctx
);
8128 case OPCM_32_RCR_COND_SELECT
:
8129 decode_rcr_cond_select(ctx
);
8131 case OPCM_32_RCR_MADD
:
8132 decode_rcr_madd(ctx
);
8134 case OPCM_32_RCR_MSUB
:
8135 decode_rcr_msub(ctx
);
8138 case OPC1_32_RLC_ADDI
:
8139 case OPC1_32_RLC_ADDIH
:
8140 case OPC1_32_RLC_ADDIH_A
:
8141 case OPC1_32_RLC_MFCR
:
8142 case OPC1_32_RLC_MOV
:
8143 case OPC1_32_RLC_MOV_64
:
8144 case OPC1_32_RLC_MOV_U
:
8145 case OPC1_32_RLC_MOV_H
:
8146 case OPC1_32_RLC_MOVH_A
:
8147 case OPC1_32_RLC_MTCR
:
8148 decode_rlc_opc(ctx
, op1
);
8151 case OPCM_32_RR_ACCUMULATOR
:
8152 decode_rr_accumulator(ctx
);
8154 case OPCM_32_RR_LOGICAL_SHIFT
:
8155 decode_rr_logical_shift(ctx
);
8157 case OPCM_32_RR_ADDRESS
:
8158 decode_rr_address(ctx
);
8160 case OPCM_32_RR_IDIRECT
:
8161 decode_rr_idirect(ctx
);
8163 case OPCM_32_RR_DIVIDE
:
8164 decode_rr_divide(ctx
);
8167 case OPCM_32_RR1_MUL
:
8168 decode_rr1_mul(ctx
);
8170 case OPCM_32_RR1_MULQ
:
8171 decode_rr1_mulq(ctx
);
8174 case OPCM_32_RR2_MUL
:
8175 decode_rr2_mul(ctx
);
8178 case OPCM_32_RRPW_EXTRACT_INSERT
:
8179 decode_rrpw_extract_insert(ctx
);
8181 case OPC1_32_RRPW_DEXTR
:
8182 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8183 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8184 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8185 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8187 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8191 case OPCM_32_RRR_COND_SELECT
:
8192 decode_rrr_cond_select(ctx
);
8194 case OPCM_32_RRR_DIVIDE
:
8195 decode_rrr_divide(ctx
);
8198 case OPCM_32_RRR2_MADD
:
8199 decode_rrr2_madd(ctx
);
8201 case OPCM_32_RRR2_MSUB
:
8202 decode_rrr2_msub(ctx
);
8205 case OPCM_32_RRR1_MADD
:
8206 decode_rrr1_madd(ctx
);
8208 case OPCM_32_RRR1_MADDQ_H
:
8209 decode_rrr1_maddq_h(ctx
);
8211 case OPCM_32_RRR1_MADDSU_H
:
8212 decode_rrr1_maddsu_h(ctx
);
8214 case OPCM_32_RRR1_MSUB_H
:
8215 decode_rrr1_msub(ctx
);
8217 case OPCM_32_RRR1_MSUB_Q
:
8218 decode_rrr1_msubq_h(ctx
);
8220 case OPCM_32_RRR1_MSUBAD_H
:
8221 decode_rrr1_msubad_h(ctx
);
8224 case OPCM_32_RRRR_EXTRACT_INSERT
:
8225 decode_rrrr_extract_insert(ctx
);
8228 case OPCM_32_RRRW_EXTRACT_INSERT
:
8229 decode_rrrw_extract_insert(ctx
);
8232 case OPCM_32_SYS_INTERRUPTS
:
8233 decode_sys_interrupts(ctx
);
8235 case OPC1_32_SYS_RSTV
:
8236 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8237 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8238 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8239 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8242 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8246 static bool tricore_insn_is_16bit(uint32_t insn
)
8248 return (insn
& 0x1) == 0;
8251 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8254 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8255 CPUTriCoreState
*env
= cs
->env_ptr
;
8256 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8257 ctx
->hflags
= (uint32_t)ctx
->base
.tb
->flags
;
8258 ctx
->features
= env
->features
;
8261 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8265 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8267 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8269 tcg_gen_insn_start(ctx
->base
.pc_next
);
8272 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8275 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8276 * (False positives are OK, false negatives are not.)
8277 * Our caller ensures we are only called if dc->base.pc_next is less than
8278 * 4 bytes from the page boundary, so we cross the page if the first
8279 * 16 bits indicate that this is a 32 bit insn.
8281 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8283 return !tricore_insn_is_16bit(insn
);
8287 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8289 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8290 CPUTriCoreState
*env
= cpu
->env_ptr
;
8294 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8295 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8297 ctx
->opcode
= insn_lo
;
8298 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8299 decode_16Bit_opc(ctx
);
8301 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8302 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8303 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8304 decode_32Bit_opc(ctx
);
8306 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8308 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8309 target_ulong page_start
;
8311 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8312 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8313 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8314 && insn_crosses_page(env
, ctx
))) {
8315 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8320 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8322 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8324 switch (ctx
->base
.is_jmp
) {
8325 case DISAS_TOO_MANY
:
8326 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8328 case DISAS_NORETURN
:
8331 g_assert_not_reached();
8335 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8336 CPUState
*cpu
, FILE *logfile
)
8338 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8339 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8342 static const TranslatorOps tricore_tr_ops
= {
8343 .init_disas_context
= tricore_tr_init_disas_context
,
8344 .tb_start
= tricore_tr_tb_start
,
8345 .insn_start
= tricore_tr_insn_start
,
8346 .translate_insn
= tricore_tr_translate_insn
,
8347 .tb_stop
= tricore_tr_tb_stop
,
8348 .disas_log
= tricore_tr_disas_log
,
8352 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8353 target_ulong pc
, void *host_pc
)
8356 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8357 &tricore_tr_ops
, &ctx
.base
);
8366 void cpu_state_reset(CPUTriCoreState
*env
)
8368 /* Reset Regs to Default Value */
8373 static void tricore_tcg_init_csfr(void)
8375 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8376 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8377 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8378 offsetof(CPUTriCoreState
, PSW
), "PSW");
8379 cpu_PC
= tcg_global_mem_new(cpu_env
,
8380 offsetof(CPUTriCoreState
, PC
), "PC");
8381 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8382 offsetof(CPUTriCoreState
, ICR
), "ICR");
8385 void tricore_tcg_init(void)
8390 for (i
= 0 ; i
< 16 ; i
++) {
8391 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8392 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8395 for (i
= 0 ; i
< 16 ; i
++) {
8396 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8397 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8400 tricore_tcg_init_csfr();
8401 /* init PSW flag cache */
8402 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8403 offsetof(CPUTriCoreState
, PSW_USB_C
),
8405 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8406 offsetof(CPUTriCoreState
, PSW_USB_V
),
8408 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8409 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8411 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8412 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8414 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8415 offsetof(CPUTriCoreState
, PSW_USB_SAV
),