2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
49 static TCGv cpu_gpr_a
[16];
50 static TCGv cpu_gpr_d
[16];
52 static TCGv cpu_PSW_C
;
53 static TCGv cpu_PSW_V
;
54 static TCGv cpu_PSW_SV
;
55 static TCGv cpu_PSW_AV
;
56 static TCGv cpu_PSW_SAV
;
58 static const char *regnames_a
[] = {
59 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
60 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
61 "a12" , "a13" , "a14" , "a15",
64 static const char *regnames_d
[] = {
65 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
66 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
67 "d12" , "d13" , "d14" , "d15",
70 typedef struct DisasContext
{
71 DisasContextBase base
;
72 target_ulong pc_succ_insn
;
74 /* Routine used to access memory */
76 uint32_t hflags
, saved_hflags
;
78 uint32_t icr_ie_mask
, icr_ie_offset
;
81 static int has_feature(DisasContext
*ctx
, int feature
)
83 return (ctx
->features
& (1ULL << feature
)) != 0;
93 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
95 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
96 CPUTriCoreState
*env
= &cpu
->env
;
102 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
103 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
104 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
105 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
106 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
107 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
109 for (i
= 0; i
< 16; ++i
) {
111 qemu_fprintf(f
, "\nGPR A%02d:", i
);
113 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
115 for (i
= 0; i
< 16; ++i
) {
117 qemu_fprintf(f
, "\nGPR D%02d:", i
);
119 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
121 qemu_fprintf(f
, "\n");
125 * Functions to generate micro-ops
128 /* Makros for generating helpers */
130 #define gen_helper_1arg(name, arg) do { \
131 TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
132 gen_helper_##name(cpu_env, helper_tmp); \
135 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
136 TCGv arg00 = tcg_temp_new(); \
137 TCGv arg01 = tcg_temp_new(); \
138 TCGv arg11 = tcg_temp_new(); \
139 tcg_gen_sari_tl(arg00, arg0, 16); \
140 tcg_gen_ext16s_tl(arg01, arg0); \
141 tcg_gen_ext16s_tl(arg11, arg1); \
142 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
145 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
146 TCGv arg00 = tcg_temp_new(); \
147 TCGv arg01 = tcg_temp_new(); \
148 TCGv arg10 = tcg_temp_new(); \
149 TCGv arg11 = tcg_temp_new(); \
150 tcg_gen_sari_tl(arg00, arg0, 16); \
151 tcg_gen_ext16s_tl(arg01, arg0); \
152 tcg_gen_sari_tl(arg11, arg1, 16); \
153 tcg_gen_ext16s_tl(arg10, arg1); \
154 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
158 TCGv arg00 = tcg_temp_new(); \
159 TCGv arg01 = tcg_temp_new(); \
160 TCGv arg10 = tcg_temp_new(); \
161 TCGv arg11 = tcg_temp_new(); \
162 tcg_gen_sari_tl(arg00, arg0, 16); \
163 tcg_gen_ext16s_tl(arg01, arg0); \
164 tcg_gen_sari_tl(arg10, arg1, 16); \
165 tcg_gen_ext16s_tl(arg11, arg1); \
166 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
169 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
170 TCGv arg00 = tcg_temp_new(); \
171 TCGv arg01 = tcg_temp_new(); \
172 TCGv arg11 = tcg_temp_new(); \
173 tcg_gen_sari_tl(arg01, arg0, 16); \
174 tcg_gen_ext16s_tl(arg00, arg0); \
175 tcg_gen_sari_tl(arg11, arg1, 16); \
176 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
179 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
180 TCGv_i64 ret = tcg_temp_new_i64(); \
181 TCGv_i64 arg1 = tcg_temp_new_i64(); \
183 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
184 gen_helper_##name(ret, arg1, arg2); \
185 tcg_gen_extr_i64_i32(rl, rh, ret); \
188 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
189 TCGv_i64 ret = tcg_temp_new_i64(); \
191 gen_helper_##name(ret, cpu_env, arg1, arg2); \
192 tcg_gen_extr_i64_i32(rl, rh, ret); \
195 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
196 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
197 ((offset & 0x0fffff) << 1))
199 /* For two 32-bit registers used a 64-bit register, the first
200 registernumber needs to be even. Otherwise we trap. */
201 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
202 #define CHECK_REG_PAIR(reg) do { \
204 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, MemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
218 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
219 int16_t con
, MemOp mop
)
221 TCGv temp
= tcg_temp_new();
222 tcg_gen_addi_tl(temp
, r2
, con
);
223 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
226 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
228 TCGv_i64 temp
= tcg_temp_new_i64();
230 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
231 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
234 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
237 TCGv temp
= tcg_temp_new();
238 tcg_gen_addi_tl(temp
, base
, con
);
239 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
242 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
244 TCGv_i64 temp
= tcg_temp_new_i64();
246 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
247 /* write back to two 32 bit regs */
248 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
251 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
254 TCGv temp
= tcg_temp_new();
255 tcg_gen_addi_tl(temp
, base
, con
);
256 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
259 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
262 TCGv temp
= tcg_temp_new();
263 tcg_gen_addi_tl(temp
, r2
, off
);
264 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
265 tcg_gen_mov_tl(r2
, temp
);
268 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
271 TCGv temp
= tcg_temp_new();
272 tcg_gen_addi_tl(temp
, r2
, off
);
273 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
274 tcg_gen_mov_tl(r2
, temp
);
277 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
278 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
280 TCGv temp
= tcg_temp_new();
281 TCGv temp2
= tcg_temp_new();
283 CHECK_REG_PAIR(ereg
);
284 /* temp = (M(EA, word) */
285 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
286 /* temp = temp & ~E[a][63:32]) */
287 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
288 /* temp2 = (E[a][31:0] & E[a][63:32]); */
289 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
290 /* temp = temp | temp2; */
291 tcg_gen_or_tl(temp
, temp
, temp2
);
292 /* M(EA, word) = temp; */
293 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
296 /* tmp = M(EA, word);
299 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
301 TCGv temp
= tcg_temp_new();
303 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
304 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
308 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
310 TCGv temp
= tcg_temp_new();
311 TCGv temp2
= tcg_temp_new();
313 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
314 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
315 cpu_gpr_d
[reg
], temp
);
316 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
320 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
322 TCGv temp
= tcg_temp_new();
323 TCGv temp2
= tcg_temp_new();
324 TCGv temp3
= tcg_temp_new();
326 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
327 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
328 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
329 tcg_gen_or_tl(temp2
, temp2
, temp3
);
330 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
331 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
335 /* We generate loads and store to core special function register (csfr) through
336 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
337 makros R, A and E, which allow read-only, all and endinit protected access.
338 These makros also specify in which ISA version the csfr was introduced. */
339 #define R(ADDRESS, REG, FEATURE) \
341 if (has_feature(ctx, FEATURE)) { \
342 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
345 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
346 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
347 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
349 /* since we're caching PSW make this a special case */
350 if (offset
== 0xfe04) {
351 gen_helper_psw_read(ret
, cpu_env
);
354 #include "csfr.h.inc"
362 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
363 since no execption occurs */
364 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
366 if (has_feature(ctx, FEATURE)) { \
367 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
370 /* Endinit protected registers
371 TODO: Since the endinit bit is in a register of a not yet implemented
372 watchdog device, we handle endinit protected registers like
373 all-access registers for now. */
374 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
375 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
378 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
379 /* since we're caching PSW make this a special case */
380 if (offset
== 0xfe04) {
381 gen_helper_psw_write(cpu_env
, r1
);
384 #include "csfr.h.inc"
388 /* generate privilege trap */
392 /* Functions for arithmetic instructions */
394 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
396 TCGv t0
= tcg_temp_new_i32();
397 TCGv result
= tcg_temp_new_i32();
398 /* Addition and set V/SV bits */
399 tcg_gen_add_tl(result
, r1
, r2
);
401 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
402 tcg_gen_xor_tl(t0
, r1
, r2
);
403 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
405 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
406 /* Calc AV/SAV bits */
407 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
408 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
410 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
411 /* write back result */
412 tcg_gen_mov_tl(ret
, result
);
416 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
418 TCGv temp
= tcg_temp_new();
419 TCGv_i64 t0
= tcg_temp_new_i64();
420 TCGv_i64 t1
= tcg_temp_new_i64();
421 TCGv_i64 result
= tcg_temp_new_i64();
423 tcg_gen_add_i64(result
, r1
, r2
);
425 tcg_gen_xor_i64(t1
, result
, r1
);
426 tcg_gen_xor_i64(t0
, r1
, r2
);
427 tcg_gen_andc_i64(t1
, t1
, t0
);
428 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
430 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
431 /* calc AV/SAV bits */
432 tcg_gen_extrh_i64_i32(temp
, result
);
433 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
434 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
436 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
437 /* write back result */
438 tcg_gen_mov_i64(ret
, result
);
442 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
443 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
444 void(*op2
)(TCGv
, TCGv
, TCGv
))
446 TCGv temp
= tcg_temp_new();
447 TCGv temp2
= tcg_temp_new();
448 TCGv temp3
= tcg_temp_new();
449 TCGv temp4
= tcg_temp_new();
451 (*op1
)(temp
, r1_low
, r2
);
453 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
454 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
455 if (op1
== tcg_gen_add_tl
) {
456 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
458 tcg_gen_and_tl(temp2
, temp2
, temp3
);
461 (*op2
)(temp3
, r1_high
, r3
);
463 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
464 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
465 if (op2
== tcg_gen_add_tl
) {
466 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
468 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
470 /* combine V0/V1 bits */
471 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
473 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
475 tcg_gen_mov_tl(ret_low
, temp
);
476 tcg_gen_mov_tl(ret_high
, temp3
);
478 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
479 tcg_gen_xor_tl(temp
, temp
, ret_low
);
480 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
481 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
482 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
484 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
487 /* ret = r2 + (r1 * r3); */
488 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
490 TCGv_i64 t1
= tcg_temp_new_i64();
491 TCGv_i64 t2
= tcg_temp_new_i64();
492 TCGv_i64 t3
= tcg_temp_new_i64();
494 tcg_gen_ext_i32_i64(t1
, r1
);
495 tcg_gen_ext_i32_i64(t2
, r2
);
496 tcg_gen_ext_i32_i64(t3
, r3
);
498 tcg_gen_mul_i64(t1
, t1
, t3
);
499 tcg_gen_add_i64(t1
, t2
, t1
);
501 tcg_gen_extrl_i64_i32(ret
, t1
);
504 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
505 /* t1 < -0x80000000 */
506 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
507 tcg_gen_or_i64(t2
, t2
, t3
);
508 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 /* Calc AV/SAV bits */
513 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
514 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
516 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
519 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
521 TCGv temp
= tcg_constant_i32(con
);
522 gen_madd32_d(ret
, r1
, r2
, temp
);
526 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
529 TCGv t1
= tcg_temp_new();
530 TCGv t2
= tcg_temp_new();
531 TCGv t3
= tcg_temp_new();
532 TCGv t4
= tcg_temp_new();
534 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
535 /* only the add can overflow */
536 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
538 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
539 tcg_gen_xor_tl(t1
, r2_high
, t2
);
540 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
542 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
543 /* Calc AV/SAV bits */
544 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
545 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
547 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
548 /* write back the result */
549 tcg_gen_mov_tl(ret_low
, t3
);
550 tcg_gen_mov_tl(ret_high
, t4
);
554 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
557 TCGv_i64 t1
= tcg_temp_new_i64();
558 TCGv_i64 t2
= tcg_temp_new_i64();
559 TCGv_i64 t3
= tcg_temp_new_i64();
561 tcg_gen_extu_i32_i64(t1
, r1
);
562 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
563 tcg_gen_extu_i32_i64(t3
, r3
);
565 tcg_gen_mul_i64(t1
, t1
, t3
);
566 tcg_gen_add_i64(t2
, t2
, t1
);
567 /* write back result */
568 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
569 /* only the add overflows, if t2 < t1
571 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
572 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
573 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
575 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
576 /* Calc AV/SAV bits */
577 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
578 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
580 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
584 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
587 TCGv temp
= tcg_constant_i32(con
);
588 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
592 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
595 TCGv temp
= tcg_constant_i32(con
);
596 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
600 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
601 TCGv r3
, uint32_t n
, uint32_t mode
)
603 TCGv t_n
= tcg_constant_i32(n
);
604 TCGv temp
= tcg_temp_new();
605 TCGv temp2
= tcg_temp_new();
606 TCGv_i64 temp64
= tcg_temp_new_i64();
609 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
612 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
615 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
618 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
621 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
622 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
623 tcg_gen_add_tl
, tcg_gen_add_tl
);
627 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
628 TCGv r3
, uint32_t n
, uint32_t mode
)
630 TCGv t_n
= tcg_constant_i32(n
);
631 TCGv temp
= tcg_temp_new();
632 TCGv temp2
= tcg_temp_new();
633 TCGv_i64 temp64
= tcg_temp_new_i64();
636 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
639 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
642 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
645 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
648 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
649 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
650 tcg_gen_sub_tl
, tcg_gen_add_tl
);
654 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
655 TCGv r3
, uint32_t n
, uint32_t mode
)
657 TCGv t_n
= tcg_constant_i32(n
);
658 TCGv_i64 temp64
= tcg_temp_new_i64();
659 TCGv_i64 temp64_2
= tcg_temp_new_i64();
660 TCGv_i64 temp64_3
= tcg_temp_new_i64();
663 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
666 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
669 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
672 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
675 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
676 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
677 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
678 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
679 tcg_gen_shli_i64(temp64
, temp64
, 16);
681 gen_add64_d(temp64_2
, temp64_3
, temp64
);
682 /* write back result */
683 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
686 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
689 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
690 TCGv r3
, uint32_t n
, uint32_t mode
)
692 TCGv t_n
= tcg_constant_i32(n
);
693 TCGv temp
= tcg_temp_new();
694 TCGv temp2
= tcg_temp_new();
695 TCGv temp3
= tcg_temp_new();
696 TCGv_i64 temp64
= tcg_temp_new_i64();
700 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
703 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
706 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
709 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
712 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
713 gen_adds(ret_low
, r1_low
, temp
);
714 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
715 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
716 gen_adds(ret_high
, r1_high
, temp2
);
718 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
719 /* combine av bits */
720 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
723 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
726 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
727 TCGv r3
, uint32_t n
, uint32_t mode
)
729 TCGv t_n
= tcg_constant_i32(n
);
730 TCGv temp
= tcg_temp_new();
731 TCGv temp2
= tcg_temp_new();
732 TCGv temp3
= tcg_temp_new();
733 TCGv_i64 temp64
= tcg_temp_new_i64();
737 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
740 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
743 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
746 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
749 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
750 gen_subs(ret_low
, r1_low
, temp
);
751 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
752 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
753 gen_adds(ret_high
, r1_high
, temp2
);
755 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
756 /* combine av bits */
757 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
761 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
762 TCGv r3
, uint32_t n
, uint32_t mode
)
764 TCGv t_n
= tcg_constant_i32(n
);
765 TCGv_i64 temp64
= tcg_temp_new_i64();
766 TCGv_i64 temp64_2
= tcg_temp_new_i64();
770 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
773 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
776 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
779 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
782 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
783 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
784 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
785 tcg_gen_shli_i64(temp64
, temp64
, 16);
786 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
788 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
789 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
794 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
795 TCGv r3
, uint32_t n
, uint32_t mode
)
797 TCGv t_n
= tcg_constant_i32(n
);
798 TCGv_i64 temp64
= tcg_temp_new_i64();
799 TCGv_i64 temp64_2
= tcg_temp_new_i64();
800 TCGv_i64 temp64_3
= tcg_temp_new_i64();
803 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
806 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
809 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
812 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
815 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
816 gen_add64_d(temp64_3
, temp64_2
, temp64
);
817 /* write back result */
818 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
822 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
823 TCGv r3
, uint32_t n
, uint32_t mode
)
825 TCGv t_n
= tcg_constant_i32(n
);
826 TCGv_i64 temp64
= tcg_temp_new_i64();
827 TCGv_i64 temp64_2
= tcg_temp_new_i64();
830 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
833 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
836 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
839 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
842 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
843 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
844 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
848 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
851 TCGv t_n
= tcg_constant_i32(n
);
852 TCGv_i64 temp64
= tcg_temp_new_i64();
855 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
858 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
861 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
864 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
867 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
871 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
873 TCGv temp
= tcg_temp_new();
874 TCGv temp2
= tcg_temp_new();
876 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
877 tcg_gen_shli_tl(temp
, r1
, 16);
878 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
882 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
884 TCGv t_n
= tcg_constant_i32(n
);
885 TCGv temp
= tcg_temp_new();
886 TCGv temp2
= tcg_temp_new();
887 TCGv_i64 temp64
= tcg_temp_new_i64();
890 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
893 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
896 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
899 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
902 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
903 tcg_gen_shli_tl(temp
, r1
, 16);
904 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
909 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
910 uint32_t n
, uint32_t mode
)
912 TCGv t_n
= tcg_constant_i32(n
);
913 TCGv_i64 temp64
= tcg_temp_new_i64();
916 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
919 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
922 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
925 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
928 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
932 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
934 TCGv temp
= tcg_temp_new();
935 TCGv temp2
= tcg_temp_new();
937 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
938 tcg_gen_shli_tl(temp
, r1
, 16);
939 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
943 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
945 TCGv t_n
= tcg_constant_i32(n
);
946 TCGv temp
= tcg_temp_new();
947 TCGv temp2
= tcg_temp_new();
948 TCGv_i64 temp64
= tcg_temp_new_i64();
951 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
954 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
957 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
960 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
963 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
964 tcg_gen_shli_tl(temp
, r1
, 16);
965 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
969 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
971 TCGv t_n
= tcg_constant_i32(n
);
972 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
976 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
978 TCGv t_n
= tcg_constant_i32(n
);
979 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
983 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
986 TCGv temp
= tcg_temp_new();
987 TCGv temp2
= tcg_temp_new();
988 TCGv temp3
= tcg_temp_new();
989 TCGv_i64 t1
= tcg_temp_new_i64();
990 TCGv_i64 t2
= tcg_temp_new_i64();
991 TCGv_i64 t3
= tcg_temp_new_i64();
993 tcg_gen_ext_i32_i64(t2
, arg2
);
994 tcg_gen_ext_i32_i64(t3
, arg3
);
996 tcg_gen_mul_i64(t2
, t2
, t3
);
997 tcg_gen_shli_i64(t2
, t2
, n
);
999 tcg_gen_ext_i32_i64(t1
, arg1
);
1000 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1002 tcg_gen_add_i64(t3
, t1
, t2
);
1003 tcg_gen_extrl_i64_i32(temp3
, t3
);
1005 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1006 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1007 tcg_gen_or_i64(t1
, t1
, t2
);
1008 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1009 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1010 /* We produce an overflow on the host if the mul before was
1011 (0x80000000 * 0x80000000) << 1). If this is the
1012 case, we negate the ovf. */
1014 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1015 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1016 tcg_gen_and_tl(temp
, temp
, temp2
);
1017 tcg_gen_shli_tl(temp
, temp
, 31);
1018 /* negate v bit, if special condition */
1019 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1022 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1023 /* Calc AV/SAV bits */
1024 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1025 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1027 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1028 /* write back result */
1029 tcg_gen_mov_tl(ret
, temp3
);
1033 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1035 TCGv temp
= tcg_temp_new();
1036 TCGv temp2
= tcg_temp_new();
1038 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1039 } else { /* n is expected to be 1 */
1040 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1041 tcg_gen_shli_tl(temp
, temp
, 1);
1042 /* catch special case r1 = r2 = 0x8000 */
1043 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1044 tcg_gen_sub_tl(temp
, temp
, temp2
);
1046 gen_add_d(ret
, arg1
, temp
);
1050 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1052 TCGv temp
= tcg_temp_new();
1053 TCGv temp2
= tcg_temp_new();
1055 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1056 } else { /* n is expected to be 1 */
1057 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1058 tcg_gen_shli_tl(temp
, temp
, 1);
1059 /* catch special case r1 = r2 = 0x8000 */
1060 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1061 tcg_gen_sub_tl(temp
, temp
, temp2
);
1063 gen_adds(ret
, arg1
, temp
);
1067 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1068 TCGv arg3
, uint32_t n
)
1070 TCGv temp
= tcg_temp_new();
1071 TCGv temp2
= tcg_temp_new();
1072 TCGv_i64 t1
= tcg_temp_new_i64();
1073 TCGv_i64 t2
= tcg_temp_new_i64();
1074 TCGv_i64 t3
= tcg_temp_new_i64();
1077 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1078 } else { /* n is expected to be 1 */
1079 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1080 tcg_gen_shli_tl(temp
, temp
, 1);
1081 /* catch special case r1 = r2 = 0x8000 */
1082 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1083 tcg_gen_sub_tl(temp
, temp
, temp2
);
1085 tcg_gen_ext_i32_i64(t2
, temp
);
1086 tcg_gen_shli_i64(t2
, t2
, 16);
1087 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1088 gen_add64_d(t3
, t1
, t2
);
1089 /* write back result */
1090 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1094 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1095 TCGv arg3
, uint32_t n
)
1097 TCGv temp
= tcg_temp_new();
1098 TCGv temp2
= tcg_temp_new();
1099 TCGv_i64 t1
= tcg_temp_new_i64();
1100 TCGv_i64 t2
= tcg_temp_new_i64();
1103 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1104 } else { /* n is expected to be 1 */
1105 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1106 tcg_gen_shli_tl(temp
, temp
, 1);
1107 /* catch special case r1 = r2 = 0x8000 */
1108 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1109 tcg_gen_sub_tl(temp
, temp
, temp2
);
1111 tcg_gen_ext_i32_i64(t2
, temp
);
1112 tcg_gen_shli_i64(t2
, t2
, 16);
1113 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1115 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1116 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1120 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1121 TCGv arg3
, uint32_t n
)
1123 TCGv_i64 t1
= tcg_temp_new_i64();
1124 TCGv_i64 t2
= tcg_temp_new_i64();
1125 TCGv_i64 t3
= tcg_temp_new_i64();
1126 TCGv_i64 t4
= tcg_temp_new_i64();
1129 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1130 tcg_gen_ext_i32_i64(t2
, arg2
);
1131 tcg_gen_ext_i32_i64(t3
, arg3
);
1133 tcg_gen_mul_i64(t2
, t2
, t3
);
1135 tcg_gen_shli_i64(t2
, t2
, 1);
1137 tcg_gen_add_i64(t4
, t1
, t2
);
1139 tcg_gen_xor_i64(t3
, t4
, t1
);
1140 tcg_gen_xor_i64(t2
, t1
, t2
);
1141 tcg_gen_andc_i64(t3
, t3
, t2
);
1142 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1143 /* We produce an overflow on the host if the mul before was
1144 (0x80000000 * 0x80000000) << 1). If this is the
1145 case, we negate the ovf. */
1147 temp
= tcg_temp_new();
1148 temp2
= tcg_temp_new();
1149 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1150 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1151 tcg_gen_and_tl(temp
, temp
, temp2
);
1152 tcg_gen_shli_tl(temp
, temp
, 31);
1153 /* negate v bit, if special condition */
1154 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1156 /* write back result */
1157 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1159 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1160 /* Calc AV/SAV bits */
1161 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1162 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1164 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1168 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1171 TCGv_i64 t1
= tcg_temp_new_i64();
1172 TCGv_i64 t2
= tcg_temp_new_i64();
1173 TCGv_i64 t3
= tcg_temp_new_i64();
1175 tcg_gen_ext_i32_i64(t1
, arg1
);
1176 tcg_gen_ext_i32_i64(t2
, arg2
);
1177 tcg_gen_ext_i32_i64(t3
, arg3
);
1179 tcg_gen_mul_i64(t2
, t2
, t3
);
1180 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1182 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1186 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1187 TCGv arg3
, uint32_t n
)
1189 TCGv_i64 r1
= tcg_temp_new_i64();
1190 TCGv t_n
= tcg_constant_i32(n
);
1192 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1193 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1194 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1197 /* ret = r2 - (r1 * r3); */
1198 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1200 TCGv_i64 t1
= tcg_temp_new_i64();
1201 TCGv_i64 t2
= tcg_temp_new_i64();
1202 TCGv_i64 t3
= tcg_temp_new_i64();
1204 tcg_gen_ext_i32_i64(t1
, r1
);
1205 tcg_gen_ext_i32_i64(t2
, r2
);
1206 tcg_gen_ext_i32_i64(t3
, r3
);
1208 tcg_gen_mul_i64(t1
, t1
, t3
);
1209 tcg_gen_sub_i64(t1
, t2
, t1
);
1211 tcg_gen_extrl_i64_i32(ret
, t1
);
1214 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1215 /* result < -0x80000000 */
1216 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1217 tcg_gen_or_i64(t2
, t2
, t3
);
1218 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1219 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1222 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1223 /* Calc AV/SAV bits */
1224 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1225 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1227 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1230 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1232 TCGv temp
= tcg_constant_i32(con
);
1233 gen_msub32_d(ret
, r1
, r2
, temp
);
1237 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1240 TCGv t1
= tcg_temp_new();
1241 TCGv t2
= tcg_temp_new();
1242 TCGv t3
= tcg_temp_new();
1243 TCGv t4
= tcg_temp_new();
1245 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1246 /* only the sub can overflow */
1247 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1249 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1250 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1251 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1253 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1254 /* Calc AV/SAV bits */
1255 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1256 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1258 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1259 /* write back the result */
1260 tcg_gen_mov_tl(ret_low
, t3
);
1261 tcg_gen_mov_tl(ret_high
, t4
);
1265 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1268 TCGv temp
= tcg_constant_i32(con
);
1269 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1273 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1276 TCGv_i64 t1
= tcg_temp_new_i64();
1277 TCGv_i64 t2
= tcg_temp_new_i64();
1278 TCGv_i64 t3
= tcg_temp_new_i64();
1280 tcg_gen_extu_i32_i64(t1
, r1
);
1281 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1282 tcg_gen_extu_i32_i64(t3
, r3
);
1284 tcg_gen_mul_i64(t1
, t1
, t3
);
1285 tcg_gen_sub_i64(t3
, t2
, t1
);
1286 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1287 /* calc V bit, only the sub can overflow, if t1 > t2 */
1288 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1289 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1290 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1292 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1293 /* Calc AV/SAV bits */
1294 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1295 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1297 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1301 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1304 TCGv temp
= tcg_constant_i32(con
);
1305 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1308 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1310 TCGv temp
= tcg_constant_i32(r2
);
1311 gen_add_d(ret
, r1
, temp
);
1314 /* calculate the carry bit too */
1315 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1317 TCGv t0
= tcg_temp_new_i32();
1318 TCGv result
= tcg_temp_new_i32();
1320 tcg_gen_movi_tl(t0
, 0);
1321 /* Addition and set C/V/SV bits */
1322 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1324 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1325 tcg_gen_xor_tl(t0
, r1
, r2
);
1326 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1328 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1329 /* Calc AV/SAV bits */
1330 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1331 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1333 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1334 /* write back result */
1335 tcg_gen_mov_tl(ret
, result
);
1338 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1340 TCGv temp
= tcg_constant_i32(con
);
1341 gen_add_CC(ret
, r1
, temp
);
1344 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1346 TCGv carry
= tcg_temp_new_i32();
1347 TCGv t0
= tcg_temp_new_i32();
1348 TCGv result
= tcg_temp_new_i32();
1350 tcg_gen_movi_tl(t0
, 0);
1351 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1352 /* Addition, carry and set C/V/SV bits */
1353 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1354 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1356 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1357 tcg_gen_xor_tl(t0
, r1
, r2
);
1358 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1360 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1361 /* Calc AV/SAV bits */
1362 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1363 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1365 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1366 /* write back result */
1367 tcg_gen_mov_tl(ret
, result
);
1370 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1372 TCGv temp
= tcg_constant_i32(con
);
1373 gen_addc_CC(ret
, r1
, temp
);
1376 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1379 TCGv temp
= tcg_temp_new();
1380 TCGv temp2
= tcg_temp_new();
1381 TCGv result
= tcg_temp_new();
1382 TCGv mask
= tcg_temp_new();
1383 TCGv t0
= tcg_constant_i32(0);
1385 /* create mask for sticky bits */
1386 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1387 tcg_gen_shli_tl(mask
, mask
, 31);
1389 tcg_gen_add_tl(result
, r1
, r2
);
1391 tcg_gen_xor_tl(temp
, result
, r1
);
1392 tcg_gen_xor_tl(temp2
, r1
, r2
);
1393 tcg_gen_andc_tl(temp
, temp
, temp2
);
1394 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1396 tcg_gen_and_tl(temp
, temp
, mask
);
1397 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1399 tcg_gen_add_tl(temp
, result
, result
);
1400 tcg_gen_xor_tl(temp
, temp
, result
);
1401 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1403 tcg_gen_and_tl(temp
, temp
, mask
);
1404 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1405 /* write back result */
1406 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1409 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1412 TCGv temp
= tcg_constant_i32(r2
);
1413 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1416 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1418 TCGv temp
= tcg_temp_new_i32();
1419 TCGv result
= tcg_temp_new_i32();
1421 tcg_gen_sub_tl(result
, r1
, r2
);
1423 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1424 tcg_gen_xor_tl(temp
, r1
, r2
);
1425 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1427 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1429 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1430 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1432 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1433 /* write back result */
1434 tcg_gen_mov_tl(ret
, result
);
1438 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1440 TCGv temp
= tcg_temp_new();
1441 TCGv_i64 t0
= tcg_temp_new_i64();
1442 TCGv_i64 t1
= tcg_temp_new_i64();
1443 TCGv_i64 result
= tcg_temp_new_i64();
1445 tcg_gen_sub_i64(result
, r1
, r2
);
1447 tcg_gen_xor_i64(t1
, result
, r1
);
1448 tcg_gen_xor_i64(t0
, r1
, r2
);
1449 tcg_gen_and_i64(t1
, t1
, t0
);
1450 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1452 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1453 /* calc AV/SAV bits */
1454 tcg_gen_extrh_i64_i32(temp
, result
);
1455 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1456 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1458 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1459 /* write back result */
1460 tcg_gen_mov_i64(ret
, result
);
1463 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1465 TCGv result
= tcg_temp_new();
1466 TCGv temp
= tcg_temp_new();
1468 tcg_gen_sub_tl(result
, r1
, r2
);
1470 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1472 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1473 tcg_gen_xor_tl(temp
, r1
, r2
);
1474 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1476 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1478 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1479 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1481 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1482 /* write back result */
1483 tcg_gen_mov_tl(ret
, result
);
1486 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1488 TCGv temp
= tcg_temp_new();
1489 tcg_gen_not_tl(temp
, r2
);
1490 gen_addc_CC(ret
, r1
, temp
);
1493 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1496 TCGv temp
= tcg_temp_new();
1497 TCGv temp2
= tcg_temp_new();
1498 TCGv result
= tcg_temp_new();
1499 TCGv mask
= tcg_temp_new();
1500 TCGv t0
= tcg_constant_i32(0);
1502 /* create mask for sticky bits */
1503 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1504 tcg_gen_shli_tl(mask
, mask
, 31);
1506 tcg_gen_sub_tl(result
, r1
, r2
);
1508 tcg_gen_xor_tl(temp
, result
, r1
);
1509 tcg_gen_xor_tl(temp2
, r1
, r2
);
1510 tcg_gen_and_tl(temp
, temp
, temp2
);
1511 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1513 tcg_gen_and_tl(temp
, temp
, mask
);
1514 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1516 tcg_gen_add_tl(temp
, result
, result
);
1517 tcg_gen_xor_tl(temp
, temp
, result
);
1518 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1520 tcg_gen_and_tl(temp
, temp
, mask
);
1521 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1522 /* write back result */
1523 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1527 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1528 TCGv r3
, uint32_t n
, uint32_t mode
)
1530 TCGv t_n
= tcg_constant_i32(n
);
1531 TCGv temp
= tcg_temp_new();
1532 TCGv temp2
= tcg_temp_new();
1533 TCGv_i64 temp64
= tcg_temp_new_i64();
1536 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1539 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1542 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1545 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1548 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1549 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1550 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1554 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1555 TCGv r3
, uint32_t n
, uint32_t mode
)
1557 TCGv t_n
= tcg_constant_i32(n
);
1558 TCGv temp
= tcg_temp_new();
1559 TCGv temp2
= tcg_temp_new();
1560 TCGv temp3
= tcg_temp_new();
1561 TCGv_i64 temp64
= tcg_temp_new_i64();
1565 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1568 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1571 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1574 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1577 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1578 gen_subs(ret_low
, r1_low
, temp
);
1579 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1580 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1581 gen_subs(ret_high
, r1_high
, temp2
);
1582 /* combine v bits */
1583 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1584 /* combine av bits */
1585 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1589 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1590 TCGv r3
, uint32_t n
, uint32_t mode
)
1592 TCGv t_n
= tcg_constant_i32(n
);
1593 TCGv_i64 temp64
= tcg_temp_new_i64();
1594 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1595 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1598 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1601 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1604 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1607 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1610 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1611 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1612 /* write back result */
1613 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1617 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1618 TCGv r3
, uint32_t n
, uint32_t mode
)
1620 TCGv t_n
= tcg_constant_i32(n
);
1621 TCGv_i64 temp64
= tcg_temp_new_i64();
1622 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1625 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1628 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1631 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1634 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1637 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1638 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1639 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1643 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1646 TCGv t_n
= tcg_constant_i32(n
);
1647 TCGv_i64 temp64
= tcg_temp_new_i64();
1650 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1653 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1656 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1659 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1662 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1666 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1668 TCGv temp
= tcg_temp_new();
1669 TCGv temp2
= tcg_temp_new();
1671 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1672 tcg_gen_shli_tl(temp
, r1
, 16);
1673 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1677 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1678 uint32_t n
, uint32_t mode
)
1680 TCGv t_n
= tcg_constant_i32(n
);
1681 TCGv_i64 temp64
= tcg_temp_new_i64();
1684 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1687 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1690 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1693 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1696 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1700 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1702 TCGv temp
= tcg_temp_new();
1703 TCGv temp2
= tcg_temp_new();
1705 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1706 tcg_gen_shli_tl(temp
, r1
, 16);
1707 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1711 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1713 TCGv temp
= tcg_constant_i32(n
);
1714 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1718 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1720 TCGv temp
= tcg_constant_i32(n
);
1721 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1725 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1728 TCGv temp3
= tcg_temp_new();
1729 TCGv_i64 t1
= tcg_temp_new_i64();
1730 TCGv_i64 t2
= tcg_temp_new_i64();
1731 TCGv_i64 t3
= tcg_temp_new_i64();
1732 TCGv_i64 t4
= tcg_temp_new_i64();
1734 tcg_gen_ext_i32_i64(t2
, arg2
);
1735 tcg_gen_ext_i32_i64(t3
, arg3
);
1737 tcg_gen_mul_i64(t2
, t2
, t3
);
1739 tcg_gen_ext_i32_i64(t1
, arg1
);
1740 /* if we shift part of the fraction out, we need to round up */
1741 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1742 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1743 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1744 tcg_gen_add_i64(t2
, t2
, t4
);
1746 tcg_gen_sub_i64(t3
, t1
, t2
);
1747 tcg_gen_extrl_i64_i32(temp3
, t3
);
1749 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1750 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1751 tcg_gen_or_i64(t1
, t1
, t2
);
1752 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1753 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1755 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1756 /* Calc AV/SAV bits */
1757 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1758 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1760 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1761 /* write back result */
1762 tcg_gen_mov_tl(ret
, temp3
);
1766 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1768 TCGv temp
= tcg_temp_new();
1769 TCGv temp2
= tcg_temp_new();
1771 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1772 } else { /* n is expected to be 1 */
1773 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1774 tcg_gen_shli_tl(temp
, temp
, 1);
1775 /* catch special case r1 = r2 = 0x8000 */
1776 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1777 tcg_gen_sub_tl(temp
, temp
, temp2
);
1779 gen_sub_d(ret
, arg1
, temp
);
1783 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1785 TCGv temp
= tcg_temp_new();
1786 TCGv temp2
= tcg_temp_new();
1788 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1789 } else { /* n is expected to be 1 */
1790 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1791 tcg_gen_shli_tl(temp
, temp
, 1);
1792 /* catch special case r1 = r2 = 0x8000 */
1793 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1794 tcg_gen_sub_tl(temp
, temp
, temp2
);
1796 gen_subs(ret
, arg1
, temp
);
1800 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1801 TCGv arg3
, uint32_t n
)
1803 TCGv temp
= tcg_temp_new();
1804 TCGv temp2
= tcg_temp_new();
1805 TCGv_i64 t1
= tcg_temp_new_i64();
1806 TCGv_i64 t2
= tcg_temp_new_i64();
1807 TCGv_i64 t3
= tcg_temp_new_i64();
1810 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1811 } else { /* n is expected to be 1 */
1812 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1813 tcg_gen_shli_tl(temp
, temp
, 1);
1814 /* catch special case r1 = r2 = 0x8000 */
1815 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1816 tcg_gen_sub_tl(temp
, temp
, temp2
);
1818 tcg_gen_ext_i32_i64(t2
, temp
);
1819 tcg_gen_shli_i64(t2
, t2
, 16);
1820 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1821 gen_sub64_d(t3
, t1
, t2
);
1822 /* write back result */
1823 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1827 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1828 TCGv arg3
, uint32_t n
)
1830 TCGv temp
= tcg_temp_new();
1831 TCGv temp2
= tcg_temp_new();
1832 TCGv_i64 t1
= tcg_temp_new_i64();
1833 TCGv_i64 t2
= tcg_temp_new_i64();
1836 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1837 } else { /* n is expected to be 1 */
1838 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1839 tcg_gen_shli_tl(temp
, temp
, 1);
1840 /* catch special case r1 = r2 = 0x8000 */
1841 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1842 tcg_gen_sub_tl(temp
, temp
, temp2
);
1844 tcg_gen_ext_i32_i64(t2
, temp
);
1845 tcg_gen_shli_i64(t2
, t2
, 16);
1846 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1848 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1849 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1853 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1854 TCGv arg3
, uint32_t n
)
1856 TCGv_i64 t1
= tcg_temp_new_i64();
1857 TCGv_i64 t2
= tcg_temp_new_i64();
1858 TCGv_i64 t3
= tcg_temp_new_i64();
1859 TCGv_i64 t4
= tcg_temp_new_i64();
1862 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1863 tcg_gen_ext_i32_i64(t2
, arg2
);
1864 tcg_gen_ext_i32_i64(t3
, arg3
);
1866 tcg_gen_mul_i64(t2
, t2
, t3
);
1868 tcg_gen_shli_i64(t2
, t2
, 1);
1870 tcg_gen_sub_i64(t4
, t1
, t2
);
1872 tcg_gen_xor_i64(t3
, t4
, t1
);
1873 tcg_gen_xor_i64(t2
, t1
, t2
);
1874 tcg_gen_and_i64(t3
, t3
, t2
);
1875 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1876 /* We produce an overflow on the host if the mul before was
1877 (0x80000000 * 0x80000000) << 1). If this is the
1878 case, we negate the ovf. */
1880 temp
= tcg_temp_new();
1881 temp2
= tcg_temp_new();
1882 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1883 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1884 tcg_gen_and_tl(temp
, temp
, temp2
);
1885 tcg_gen_shli_tl(temp
, temp
, 31);
1886 /* negate v bit, if special condition */
1887 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1889 /* write back result */
1890 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1892 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1893 /* Calc AV/SAV bits */
1894 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1895 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1897 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1901 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1904 TCGv_i64 t1
= tcg_temp_new_i64();
1905 TCGv_i64 t2
= tcg_temp_new_i64();
1906 TCGv_i64 t3
= tcg_temp_new_i64();
1907 TCGv_i64 t4
= tcg_temp_new_i64();
1909 tcg_gen_ext_i32_i64(t1
, arg1
);
1910 tcg_gen_ext_i32_i64(t2
, arg2
);
1911 tcg_gen_ext_i32_i64(t3
, arg3
);
1913 tcg_gen_mul_i64(t2
, t2
, t3
);
1914 /* if we shift part of the fraction out, we need to round up */
1915 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1916 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1917 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1918 tcg_gen_add_i64(t3
, t3
, t4
);
1920 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1924 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1925 TCGv arg3
, uint32_t n
)
1927 TCGv_i64 r1
= tcg_temp_new_i64();
1928 TCGv t_n
= tcg_constant_i32(n
);
1930 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1931 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1932 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1936 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1937 TCGv r3
, uint32_t n
, uint32_t mode
)
1939 TCGv t_n
= tcg_constant_i32(n
);
1940 TCGv temp
= tcg_temp_new();
1941 TCGv temp2
= tcg_temp_new();
1942 TCGv_i64 temp64
= tcg_temp_new_i64();
1945 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1948 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1951 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1954 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1957 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1958 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1959 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1963 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1964 TCGv r3
, uint32_t n
, uint32_t mode
)
1966 TCGv t_n
= tcg_constant_i32(n
);
1967 TCGv_i64 temp64
= tcg_temp_new_i64();
1968 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1969 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1972 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1975 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1978 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1981 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1984 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1985 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1986 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1987 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1988 tcg_gen_shli_i64(temp64
, temp64
, 16);
1990 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1991 /* write back result */
1992 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1996 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1998 TCGv t_n
= tcg_constant_i32(n
);
1999 TCGv temp
= tcg_temp_new();
2000 TCGv temp2
= tcg_temp_new();
2001 TCGv_i64 temp64
= tcg_temp_new_i64();
2004 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2007 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2010 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2013 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2016 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2017 tcg_gen_shli_tl(temp
, r1
, 16);
2018 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2022 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2023 TCGv r3
, uint32_t n
, uint32_t mode
)
2025 TCGv t_n
= tcg_constant_i32(n
);
2026 TCGv temp
= tcg_temp_new();
2027 TCGv temp2
= tcg_temp_new();
2028 TCGv temp3
= tcg_temp_new();
2029 TCGv_i64 temp64
= tcg_temp_new_i64();
2033 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2036 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2039 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2042 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2045 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2046 gen_adds(ret_low
, r1_low
, temp
);
2047 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2048 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2049 gen_subs(ret_high
, r1_high
, temp2
);
2050 /* combine v bits */
2051 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2052 /* combine av bits */
2053 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2057 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2058 TCGv r3
, uint32_t n
, uint32_t mode
)
2060 TCGv t_n
= tcg_constant_i32(n
);
2061 TCGv_i64 temp64
= tcg_temp_new_i64();
2062 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2066 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2069 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2072 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2075 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2078 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2079 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2080 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2081 tcg_gen_shli_i64(temp64
, temp64
, 16);
2082 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2084 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2085 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2089 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2091 TCGv t_n
= tcg_constant_i32(n
);
2092 TCGv temp
= tcg_temp_new();
2093 TCGv temp2
= tcg_temp_new();
2094 TCGv_i64 temp64
= tcg_temp_new_i64();
2097 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2100 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2103 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2106 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2109 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2110 tcg_gen_shli_tl(temp
, r1
, 16);
2111 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2114 static inline void gen_abs(TCGv ret
, TCGv r1
)
2116 tcg_gen_abs_tl(ret
, r1
);
2117 /* overflow can only happen, if r1 = 0x80000000 */
2118 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2119 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2121 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2123 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2124 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2126 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2129 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2131 TCGv temp
= tcg_temp_new_i32();
2132 TCGv result
= tcg_temp_new_i32();
2134 tcg_gen_sub_tl(result
, r1
, r2
);
2135 tcg_gen_sub_tl(temp
, r2
, r1
);
2136 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2139 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2140 tcg_gen_xor_tl(temp
, result
, r2
);
2141 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2142 tcg_gen_xor_tl(temp
, r1
, r2
);
2143 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2145 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2147 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2148 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2150 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2151 /* write back result */
2152 tcg_gen_mov_tl(ret
, result
);
2155 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2157 TCGv temp
= tcg_constant_i32(con
);
2158 gen_absdif(ret
, r1
, temp
);
2161 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2163 TCGv temp
= tcg_constant_i32(con
);
2164 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2167 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2169 TCGv high
= tcg_temp_new();
2170 TCGv low
= tcg_temp_new();
2172 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2173 tcg_gen_mov_tl(ret
, low
);
2175 tcg_gen_sari_tl(low
, low
, 31);
2176 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2177 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2179 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2181 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2182 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2184 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2187 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2189 TCGv temp
= tcg_constant_i32(con
);
2190 gen_mul_i32s(ret
, r1
, temp
);
2193 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2195 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2197 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2199 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2201 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2202 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2204 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2207 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2210 TCGv temp
= tcg_constant_i32(con
);
2211 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2214 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2216 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2218 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2220 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2222 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2223 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2225 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2228 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2231 TCGv temp
= tcg_constant_i32(con
);
2232 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2235 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2237 TCGv temp
= tcg_constant_i32(con
);
2238 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2241 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2243 TCGv temp
= tcg_constant_i32(con
);
2244 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2247 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2248 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2250 TCGv temp
= tcg_constant_i32(con
);
2251 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2254 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2256 TCGv temp
= tcg_constant_i32(con
);
2257 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2261 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2263 TCGv_i64 temp_64
= tcg_temp_new_i64();
2264 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2267 if (up_shift
== 32) {
2268 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2269 } else if (up_shift
== 16) {
2270 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2271 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2273 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2274 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2275 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2277 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2280 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2281 } else { /* n is expected to be 1 */
2282 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2283 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2285 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2287 if (up_shift
== 0) {
2288 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2290 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2292 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2293 /* overflow only occurs if r1 = r2 = 0x8000 */
2294 if (up_shift
== 0) {/* result is 64 bit */
2295 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2297 } else { /* result is 32 bit */
2298 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2301 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2302 /* calc sv overflow bit */
2303 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2305 /* calc av overflow bit */
2306 if (up_shift
== 0) {
2307 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2308 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2310 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2311 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2313 /* calc sav overflow bit */
2314 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2318 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2320 TCGv temp
= tcg_temp_new();
2322 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2323 } else { /* n is expected to be 1 */
2324 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2325 tcg_gen_shli_tl(ret
, ret
, 1);
2326 /* catch special case r1 = r2 = 0x8000 */
2327 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2328 tcg_gen_sub_tl(ret
, ret
, temp
);
2331 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2332 /* calc av overflow bit */
2333 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2334 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2335 /* calc sav overflow bit */
2336 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2339 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2341 TCGv temp
= tcg_temp_new();
2343 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2344 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2346 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2347 tcg_gen_shli_tl(ret
, ret
, 1);
2348 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2349 /* catch special case r1 = r2 = 0x8000 */
2350 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2351 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2352 tcg_gen_sub_tl(ret
, ret
, temp
);
2355 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2356 /* calc av overflow bit */
2357 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2358 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2359 /* calc sav overflow bit */
2360 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2361 /* cut halfword off */
2362 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2366 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2369 TCGv_i64 temp64
= tcg_temp_new_i64();
2370 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2371 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2372 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2376 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2379 TCGv temp
= tcg_constant_i32(con
);
2380 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2384 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2387 TCGv_i64 temp64
= tcg_temp_new_i64();
2388 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2389 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2390 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2394 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2397 TCGv temp
= tcg_constant_i32(con
);
2398 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2401 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2403 TCGv temp
= tcg_constant_i32(con
);
2404 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2407 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2409 TCGv temp
= tcg_constant_i32(con
);
2410 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2414 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2417 TCGv_i64 temp64
= tcg_temp_new_i64();
2418 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2419 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2420 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2424 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2427 TCGv temp
= tcg_constant_i32(con
);
2428 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2432 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2435 TCGv_i64 temp64
= tcg_temp_new_i64();
2436 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2437 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2438 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2442 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2445 TCGv temp
= tcg_constant_i32(con
);
2446 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2449 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2451 tcg_gen_smax_tl(ret
, arg
, tcg_constant_i32(low
));
2452 tcg_gen_smin_tl(ret
, ret
, tcg_constant_i32(up
));
2455 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2457 tcg_gen_umin_tl(ret
, arg
, tcg_constant_i32(up
));
2460 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2462 if (shift_count
== -32) {
2463 tcg_gen_movi_tl(ret
, 0);
2464 } else if (shift_count
>= 0) {
2465 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2467 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2471 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2473 TCGv temp_low
, temp_high
;
2475 if (shiftcount
== -16) {
2476 tcg_gen_movi_tl(ret
, 0);
2478 temp_high
= tcg_temp_new();
2479 temp_low
= tcg_temp_new();
2481 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2482 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2483 gen_shi(temp_low
, temp_low
, shiftcount
);
2484 gen_shi(ret
, temp_high
, shiftcount
);
2485 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2489 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2491 uint32_t msk
, msk_start
;
2492 TCGv temp
= tcg_temp_new();
2493 TCGv temp2
= tcg_temp_new();
2495 if (shift_count
== 0) {
2496 /* Clear PSW.C and PSW.V */
2497 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2498 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2499 tcg_gen_mov_tl(ret
, r1
);
2500 } else if (shift_count
== -32) {
2502 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2503 /* fill ret completely with sign bit */
2504 tcg_gen_sari_tl(ret
, r1
, 31);
2506 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2507 } else if (shift_count
> 0) {
2508 TCGv t_max
= tcg_constant_i32(0x7FFFFFFF >> shift_count
);
2509 TCGv t_min
= tcg_constant_i32(((int32_t) -0x80000000) >> shift_count
);
2512 msk_start
= 32 - shift_count
;
2513 msk
= ((1 << shift_count
) - 1) << msk_start
;
2514 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2515 /* calc v/sv bits */
2516 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2517 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2518 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2519 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2521 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2523 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2526 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2528 msk
= (1 << -shift_count
) - 1;
2529 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2531 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2533 /* calc av overflow bit */
2534 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2535 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2536 /* calc sav overflow bit */
2537 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2540 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2542 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2545 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2547 TCGv temp
= tcg_constant_i32(con
);
2548 gen_shas(ret
, r1
, temp
);
2551 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2555 if (shift_count
== 0) {
2556 tcg_gen_mov_tl(ret
, r1
);
2557 } else if (shift_count
> 0) {
2558 low
= tcg_temp_new();
2559 high
= tcg_temp_new();
2561 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2562 tcg_gen_shli_tl(low
, r1
, shift_count
);
2563 tcg_gen_shli_tl(ret
, high
, shift_count
);
2564 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2566 low
= tcg_temp_new();
2567 high
= tcg_temp_new();
2569 tcg_gen_ext16s_tl(low
, r1
);
2570 tcg_gen_sari_tl(low
, low
, -shift_count
);
2571 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2572 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2576 /* ret = {ret[30:0], (r1 cond r2)}; */
2577 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2579 TCGv temp
= tcg_temp_new();
2580 TCGv temp2
= tcg_temp_new();
2582 tcg_gen_shli_tl(temp
, ret
, 1);
2583 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2584 tcg_gen_or_tl(ret
, temp
, temp2
);
2587 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2589 TCGv temp
= tcg_constant_i32(con
);
2590 gen_sh_cond(cond
, ret
, r1
, temp
);
2593 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2595 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2598 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2600 TCGv temp
= tcg_constant_i32(con
);
2601 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2604 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2606 TCGv temp
= tcg_constant_i32(con
);
2607 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2610 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2612 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2615 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2617 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2620 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2622 void(*op1
)(TCGv
, TCGv
, TCGv
),
2623 void(*op2
)(TCGv
, TCGv
, TCGv
))
2627 temp1
= tcg_temp_new();
2628 temp2
= tcg_temp_new();
2630 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2631 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2633 (*op1
)(temp1
, temp1
, temp2
);
2634 (*op2
)(temp1
, ret
, temp1
);
2636 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2639 /* ret = r1[pos1] op1 r2[pos2]; */
2640 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2642 void(*op1
)(TCGv
, TCGv
, TCGv
))
2646 temp1
= tcg_temp_new();
2647 temp2
= tcg_temp_new();
2649 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2650 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2652 (*op1
)(ret
, temp1
, temp2
);
2654 tcg_gen_andi_tl(ret
, ret
, 0x1);
2657 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2658 void(*op
)(TCGv
, TCGv
, TCGv
))
2660 TCGv temp
= tcg_temp_new();
2661 TCGv temp2
= tcg_temp_new();
2662 /* temp = (arg1 cond arg2 )*/
2663 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2665 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2666 /* temp = temp insn temp2 */
2667 (*op
)(temp
, temp
, temp2
);
2668 /* ret = {ret[31:1], temp} */
2669 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2673 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2674 void(*op
)(TCGv
, TCGv
, TCGv
))
2676 TCGv temp
= tcg_constant_i32(con
);
2677 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2680 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2681 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2683 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2684 tcg_gen_neg_tl(ret
, ret
);
2687 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2689 TCGv b0
= tcg_temp_new();
2690 TCGv b1
= tcg_temp_new();
2691 TCGv b2
= tcg_temp_new();
2692 TCGv b3
= tcg_temp_new();
2695 tcg_gen_andi_tl(b0
, r1
, 0xff);
2696 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2699 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2700 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2703 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2704 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2707 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2708 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2711 tcg_gen_or_tl(ret
, b0
, b1
);
2712 tcg_gen_or_tl(ret
, ret
, b2
);
2713 tcg_gen_or_tl(ret
, ret
, b3
);
2716 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2718 TCGv h0
= tcg_temp_new();
2719 TCGv h1
= tcg_temp_new();
2722 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2723 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2726 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2727 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2730 tcg_gen_or_tl(ret
, h0
, h1
);
2733 /* mask = ((1 << width) -1) << pos;
2734 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2735 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2737 TCGv mask
= tcg_temp_new();
2738 TCGv temp
= tcg_temp_new();
2739 TCGv temp2
= tcg_temp_new();
2741 tcg_gen_movi_tl(mask
, 1);
2742 tcg_gen_shl_tl(mask
, mask
, width
);
2743 tcg_gen_subi_tl(mask
, mask
, 1);
2744 tcg_gen_shl_tl(mask
, mask
, pos
);
2746 tcg_gen_shl_tl(temp
, r2
, pos
);
2747 tcg_gen_and_tl(temp
, temp
, mask
);
2748 tcg_gen_andc_tl(temp2
, r1
, mask
);
2749 tcg_gen_or_tl(ret
, temp
, temp2
);
2752 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2754 TCGv_i64 temp
= tcg_temp_new_i64();
2756 gen_helper_bsplit(temp
, r1
);
2757 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2760 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2762 TCGv_i64 temp
= tcg_temp_new_i64();
2764 gen_helper_unpack(temp
, r1
);
2765 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2769 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2771 TCGv_i64 ret
= tcg_temp_new_i64();
2773 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2774 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2776 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2778 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2782 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2784 TCGv_i64 ret
= tcg_temp_new_i64();
2786 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2787 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2789 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2791 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2794 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2796 TCGv temp
= tcg_temp_new();
2798 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2799 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2800 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2801 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2802 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2804 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2805 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2808 static void gen_calc_usb_mulr_h(TCGv arg
)
2810 TCGv temp
= tcg_temp_new();
2812 tcg_gen_add_tl(temp
, arg
, arg
);
2813 tcg_gen_xor_tl(temp
, temp
, arg
);
2814 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2815 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2817 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2819 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2822 /* helpers for generating program flow micro-ops */
2824 static inline void gen_save_pc(target_ulong pc
)
2826 tcg_gen_movi_tl(cpu_PC
, pc
);
2829 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2831 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2834 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2837 tcg_gen_lookup_and_goto_ptr();
2841 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2843 TCGv_i32 classtemp
= tcg_constant_i32(class);
2844 TCGv_i32 tintemp
= tcg_constant_i32(tin
);
2846 gen_save_pc(ctx
->base
.pc_next
);
2847 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2848 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2851 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2852 TCGv r2
, int16_t address
)
2854 TCGLabel
*jumpLabel
= gen_new_label();
2855 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2857 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2859 gen_set_label(jumpLabel
);
2860 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2863 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2864 int r2
, int16_t address
)
2866 TCGv temp
= tcg_constant_i32(r2
);
2867 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2870 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2872 TCGLabel
*l1
= gen_new_label();
2874 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2875 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2876 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2878 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2881 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2883 TCGv temp
= tcg_temp_new();
2885 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2886 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2887 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2888 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2891 static void gen_fret(DisasContext
*ctx
)
2893 TCGv temp
= tcg_temp_new();
2895 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2896 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2897 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2898 tcg_gen_mov_tl(cpu_PC
, temp
);
2899 tcg_gen_exit_tb(NULL
, 0);
2900 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2903 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2904 int r2
, int32_t constant
, int32_t offset
)
2910 /* SB-format jumps */
2913 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2915 case OPC1_32_B_CALL
:
2916 case OPC1_16_SB_CALL
:
2917 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2918 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2921 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2923 case OPC1_16_SB_JNZ
:
2924 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2926 /* SBC-format jumps */
2927 case OPC1_16_SBC_JEQ
:
2928 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2930 case OPC1_16_SBC_JEQ2
:
2931 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2934 case OPC1_16_SBC_JNE
:
2935 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2937 case OPC1_16_SBC_JNE2
:
2938 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2939 constant
, offset
+ 16);
2941 /* SBRN-format jumps */
2942 case OPC1_16_SBRN_JZ_T
:
2943 temp
= tcg_temp_new();
2944 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2945 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2947 case OPC1_16_SBRN_JNZ_T
:
2948 temp
= tcg_temp_new();
2949 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2950 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2952 /* SBR-format jumps */
2953 case OPC1_16_SBR_JEQ
:
2954 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2957 case OPC1_16_SBR_JEQ2
:
2958 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2961 case OPC1_16_SBR_JNE
:
2962 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2965 case OPC1_16_SBR_JNE2
:
2966 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2969 case OPC1_16_SBR_JNZ
:
2970 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2972 case OPC1_16_SBR_JNZ_A
:
2973 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2975 case OPC1_16_SBR_JGEZ
:
2976 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2978 case OPC1_16_SBR_JGTZ
:
2979 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2981 case OPC1_16_SBR_JLEZ
:
2982 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2984 case OPC1_16_SBR_JLTZ
:
2985 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2987 case OPC1_16_SBR_JZ
:
2988 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2990 case OPC1_16_SBR_JZ_A
:
2991 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2993 case OPC1_16_SBR_LOOP
:
2994 gen_loop(ctx
, r1
, offset
* 2 - 32);
2996 /* SR-format jumps */
2998 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2999 tcg_gen_exit_tb(NULL
, 0);
3001 case OPC2_32_SYS_RET
:
3002 case OPC2_16_SR_RET
:
3003 gen_helper_ret(cpu_env
);
3004 tcg_gen_exit_tb(NULL
, 0);
3007 case OPC1_32_B_CALLA
:
3008 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3009 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3011 case OPC1_32_B_FCALL
:
3012 gen_fcall_save_ctx(ctx
);
3013 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3015 case OPC1_32_B_FCALLA
:
3016 gen_fcall_save_ctx(ctx
);
3017 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3020 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3023 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3026 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3027 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3030 case OPCM_32_BRC_EQ_NEQ
:
3031 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3032 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3034 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3037 case OPCM_32_BRC_GE
:
3038 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3039 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3041 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3042 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3046 case OPCM_32_BRC_JLT
:
3047 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3048 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3050 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3051 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3055 case OPCM_32_BRC_JNE
:
3056 temp
= tcg_temp_new();
3057 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3058 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3059 /* subi is unconditional */
3060 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3061 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3063 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3064 /* addi is unconditional */
3065 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3066 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3070 case OPCM_32_BRN_JTT
:
3071 n
= MASK_OP_BRN_N(ctx
->opcode
);
3073 temp
= tcg_temp_new();
3074 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3076 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3077 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3079 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3083 case OPCM_32_BRR_EQ_NEQ
:
3084 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3085 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3088 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3092 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3093 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3094 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3097 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3101 case OPCM_32_BRR_GE
:
3102 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3103 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3106 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3110 case OPCM_32_BRR_JLT
:
3111 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3112 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3115 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3119 case OPCM_32_BRR_LOOP
:
3120 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3121 gen_loop(ctx
, r2
, offset
* 2);
3123 /* OPC2_32_BRR_LOOPU */
3124 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3127 case OPCM_32_BRR_JNE
:
3128 temp
= tcg_temp_new();
3129 temp2
= tcg_temp_new();
3130 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3131 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3132 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3133 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3134 /* subi is unconditional */
3135 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3136 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3138 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3139 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3140 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3141 /* addi is unconditional */
3142 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3143 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3146 case OPCM_32_BRR_JNZ
:
3147 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3148 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3150 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3154 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3156 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3161 * Functions for decoding instructions
3164 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3170 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3171 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3174 case OPC1_16_SRC_ADD
:
3175 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3177 case OPC1_16_SRC_ADD_A15
:
3178 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3180 case OPC1_16_SRC_ADD_15A
:
3181 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3183 case OPC1_16_SRC_ADD_A
:
3184 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3186 case OPC1_16_SRC_CADD
:
3187 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3190 case OPC1_16_SRC_CADDN
:
3191 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3194 case OPC1_16_SRC_CMOV
:
3195 temp
= tcg_constant_tl(0);
3196 temp2
= tcg_constant_tl(const4
);
3197 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3198 temp2
, cpu_gpr_d
[r1
]);
3200 case OPC1_16_SRC_CMOVN
:
3201 temp
= tcg_constant_tl(0);
3202 temp2
= tcg_constant_tl(const4
);
3203 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3204 temp2
, cpu_gpr_d
[r1
]);
3206 case OPC1_16_SRC_EQ
:
3207 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3210 case OPC1_16_SRC_LT
:
3211 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3214 case OPC1_16_SRC_MOV
:
3215 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3217 case OPC1_16_SRC_MOV_A
:
3218 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3219 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3221 case OPC1_16_SRC_MOV_E
:
3222 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3224 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3225 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3227 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3230 case OPC1_16_SRC_SH
:
3231 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3233 case OPC1_16_SRC_SHA
:
3234 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3237 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3241 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3246 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3247 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3250 case OPC1_16_SRR_ADD
:
3251 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3253 case OPC1_16_SRR_ADD_A15
:
3254 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3256 case OPC1_16_SRR_ADD_15A
:
3257 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3259 case OPC1_16_SRR_ADD_A
:
3260 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3262 case OPC1_16_SRR_ADDS
:
3263 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3265 case OPC1_16_SRR_AND
:
3266 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3268 case OPC1_16_SRR_CMOV
:
3269 temp
= tcg_constant_tl(0);
3270 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3271 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3273 case OPC1_16_SRR_CMOVN
:
3274 temp
= tcg_constant_tl(0);
3275 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3276 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3278 case OPC1_16_SRR_EQ
:
3279 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3282 case OPC1_16_SRR_LT
:
3283 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3286 case OPC1_16_SRR_MOV
:
3287 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3289 case OPC1_16_SRR_MOV_A
:
3290 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3292 case OPC1_16_SRR_MOV_AA
:
3293 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3295 case OPC1_16_SRR_MOV_D
:
3296 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3298 case OPC1_16_SRR_MUL
:
3299 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3301 case OPC1_16_SRR_OR
:
3302 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3304 case OPC1_16_SRR_SUB
:
3305 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3307 case OPC1_16_SRR_SUB_A15B
:
3308 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3310 case OPC1_16_SRR_SUB_15AB
:
3311 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3313 case OPC1_16_SRR_SUBS
:
3314 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3316 case OPC1_16_SRR_XOR
:
3317 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3320 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3324 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3328 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3329 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3332 case OPC1_16_SSR_ST_A
:
3333 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3335 case OPC1_16_SSR_ST_A_POSTINC
:
3336 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3337 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3339 case OPC1_16_SSR_ST_B
:
3340 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3342 case OPC1_16_SSR_ST_B_POSTINC
:
3343 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3344 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3346 case OPC1_16_SSR_ST_H
:
3347 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3349 case OPC1_16_SSR_ST_H_POSTINC
:
3350 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3351 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3353 case OPC1_16_SSR_ST_W
:
3354 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3356 case OPC1_16_SSR_ST_W_POSTINC
:
3357 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3358 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3361 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3365 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3369 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3372 case OPC1_16_SC_AND
:
3373 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3375 case OPC1_16_SC_BISR
:
3376 gen_helper_1arg(bisr
, const16
& 0xff);
3378 case OPC1_16_SC_LD_A
:
3379 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3381 case OPC1_16_SC_LD_W
:
3382 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3384 case OPC1_16_SC_MOV
:
3385 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3388 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3390 case OPC1_16_SC_ST_A
:
3391 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3393 case OPC1_16_SC_ST_W
:
3394 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3396 case OPC1_16_SC_SUB_A
:
3397 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3400 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3404 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3408 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3409 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3413 case OPC1_16_SLR_LD_A
:
3414 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3416 case OPC1_16_SLR_LD_A_POSTINC
:
3417 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3418 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3420 case OPC1_16_SLR_LD_BU
:
3421 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3423 case OPC1_16_SLR_LD_BU_POSTINC
:
3424 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3425 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3427 case OPC1_16_SLR_LD_H
:
3428 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3430 case OPC1_16_SLR_LD_H_POSTINC
:
3431 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3432 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3434 case OPC1_16_SLR_LD_W
:
3435 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3437 case OPC1_16_SLR_LD_W_POSTINC
:
3438 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3439 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3442 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3446 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3451 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3452 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3456 case OPC1_16_SRO_LD_A
:
3457 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3459 case OPC1_16_SRO_LD_BU
:
3460 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3462 case OPC1_16_SRO_LD_H
:
3463 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3465 case OPC1_16_SRO_LD_W
:
3466 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3468 case OPC1_16_SRO_ST_A
:
3469 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3471 case OPC1_16_SRO_ST_B
:
3472 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3474 case OPC1_16_SRO_ST_H
:
3475 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3477 case OPC1_16_SRO_ST_W
:
3478 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3481 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3485 static void decode_sr_system(DisasContext
*ctx
)
3488 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3491 case OPC2_16_SR_NOP
:
3493 case OPC2_16_SR_RET
:
3494 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3496 case OPC2_16_SR_RFE
:
3497 gen_helper_rfe(cpu_env
);
3498 tcg_gen_exit_tb(NULL
, 0);
3499 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3501 case OPC2_16_SR_DEBUG
:
3502 /* raise EXCP_DEBUG */
3504 case OPC2_16_SR_FRET
:
3508 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3512 static void decode_sr_accu(DisasContext
*ctx
)
3517 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3518 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3521 case OPC2_16_SR_RSUB
:
3522 /* calc V bit -- overflow only if r1 = -0x80000000 */
3523 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3524 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3526 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3528 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3530 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3531 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3533 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3535 case OPC2_16_SR_SAT_B
:
3536 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3538 case OPC2_16_SR_SAT_BU
:
3539 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3541 case OPC2_16_SR_SAT_H
:
3542 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3544 case OPC2_16_SR_SAT_HU
:
3545 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3548 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3552 static void decode_16Bit_opc(DisasContext
*ctx
)
3560 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3562 /* handle ADDSC.A opcode only being 6 bit long */
3563 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3564 op1
= OPC1_16_SRRS_ADDSC_A
;
3568 case OPC1_16_SRC_ADD
:
3569 case OPC1_16_SRC_ADD_A15
:
3570 case OPC1_16_SRC_ADD_15A
:
3571 case OPC1_16_SRC_ADD_A
:
3572 case OPC1_16_SRC_CADD
:
3573 case OPC1_16_SRC_CADDN
:
3574 case OPC1_16_SRC_CMOV
:
3575 case OPC1_16_SRC_CMOVN
:
3576 case OPC1_16_SRC_EQ
:
3577 case OPC1_16_SRC_LT
:
3578 case OPC1_16_SRC_MOV
:
3579 case OPC1_16_SRC_MOV_A
:
3580 case OPC1_16_SRC_MOV_E
:
3581 case OPC1_16_SRC_SH
:
3582 case OPC1_16_SRC_SHA
:
3583 decode_src_opc(ctx
, op1
);
3586 case OPC1_16_SRR_ADD
:
3587 case OPC1_16_SRR_ADD_A15
:
3588 case OPC1_16_SRR_ADD_15A
:
3589 case OPC1_16_SRR_ADD_A
:
3590 case OPC1_16_SRR_ADDS
:
3591 case OPC1_16_SRR_AND
:
3592 case OPC1_16_SRR_CMOV
:
3593 case OPC1_16_SRR_CMOVN
:
3594 case OPC1_16_SRR_EQ
:
3595 case OPC1_16_SRR_LT
:
3596 case OPC1_16_SRR_MOV
:
3597 case OPC1_16_SRR_MOV_A
:
3598 case OPC1_16_SRR_MOV_AA
:
3599 case OPC1_16_SRR_MOV_D
:
3600 case OPC1_16_SRR_MUL
:
3601 case OPC1_16_SRR_OR
:
3602 case OPC1_16_SRR_SUB
:
3603 case OPC1_16_SRR_SUB_A15B
:
3604 case OPC1_16_SRR_SUB_15AB
:
3605 case OPC1_16_SRR_SUBS
:
3606 case OPC1_16_SRR_XOR
:
3607 decode_srr_opc(ctx
, op1
);
3610 case OPC1_16_SSR_ST_A
:
3611 case OPC1_16_SSR_ST_A_POSTINC
:
3612 case OPC1_16_SSR_ST_B
:
3613 case OPC1_16_SSR_ST_B_POSTINC
:
3614 case OPC1_16_SSR_ST_H
:
3615 case OPC1_16_SSR_ST_H_POSTINC
:
3616 case OPC1_16_SSR_ST_W
:
3617 case OPC1_16_SSR_ST_W_POSTINC
:
3618 decode_ssr_opc(ctx
, op1
);
3621 case OPC1_16_SRRS_ADDSC_A
:
3622 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3623 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3624 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3625 temp
= tcg_temp_new();
3626 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3627 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3630 case OPC1_16_SLRO_LD_A
:
3631 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3632 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3633 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3635 case OPC1_16_SLRO_LD_BU
:
3636 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3637 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3638 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3640 case OPC1_16_SLRO_LD_H
:
3641 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3642 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3643 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3645 case OPC1_16_SLRO_LD_W
:
3646 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3647 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3648 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3651 case OPC1_16_SB_CALL
:
3653 case OPC1_16_SB_JNZ
:
3655 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3656 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3659 case OPC1_16_SBC_JEQ
:
3660 case OPC1_16_SBC_JNE
:
3661 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3662 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3663 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3665 case OPC1_16_SBC_JEQ2
:
3666 case OPC1_16_SBC_JNE2
:
3667 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3668 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3669 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3670 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3672 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3676 case OPC1_16_SBRN_JNZ_T
:
3677 case OPC1_16_SBRN_JZ_T
:
3678 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3679 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3680 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3683 case OPC1_16_SBR_JEQ2
:
3684 case OPC1_16_SBR_JNE2
:
3685 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3686 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3687 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3688 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3690 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3693 case OPC1_16_SBR_JEQ
:
3694 case OPC1_16_SBR_JGEZ
:
3695 case OPC1_16_SBR_JGTZ
:
3696 case OPC1_16_SBR_JLEZ
:
3697 case OPC1_16_SBR_JLTZ
:
3698 case OPC1_16_SBR_JNE
:
3699 case OPC1_16_SBR_JNZ
:
3700 case OPC1_16_SBR_JNZ_A
:
3701 case OPC1_16_SBR_JZ
:
3702 case OPC1_16_SBR_JZ_A
:
3703 case OPC1_16_SBR_LOOP
:
3704 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3705 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3706 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3709 case OPC1_16_SC_AND
:
3710 case OPC1_16_SC_BISR
:
3711 case OPC1_16_SC_LD_A
:
3712 case OPC1_16_SC_LD_W
:
3713 case OPC1_16_SC_MOV
:
3715 case OPC1_16_SC_ST_A
:
3716 case OPC1_16_SC_ST_W
:
3717 case OPC1_16_SC_SUB_A
:
3718 decode_sc_opc(ctx
, op1
);
3721 case OPC1_16_SLR_LD_A
:
3722 case OPC1_16_SLR_LD_A_POSTINC
:
3723 case OPC1_16_SLR_LD_BU
:
3724 case OPC1_16_SLR_LD_BU_POSTINC
:
3725 case OPC1_16_SLR_LD_H
:
3726 case OPC1_16_SLR_LD_H_POSTINC
:
3727 case OPC1_16_SLR_LD_W
:
3728 case OPC1_16_SLR_LD_W_POSTINC
:
3729 decode_slr_opc(ctx
, op1
);
3732 case OPC1_16_SRO_LD_A
:
3733 case OPC1_16_SRO_LD_BU
:
3734 case OPC1_16_SRO_LD_H
:
3735 case OPC1_16_SRO_LD_W
:
3736 case OPC1_16_SRO_ST_A
:
3737 case OPC1_16_SRO_ST_B
:
3738 case OPC1_16_SRO_ST_H
:
3739 case OPC1_16_SRO_ST_W
:
3740 decode_sro_opc(ctx
, op1
);
3743 case OPC1_16_SSRO_ST_A
:
3744 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3745 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3746 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3748 case OPC1_16_SSRO_ST_B
:
3749 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3750 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3751 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3753 case OPC1_16_SSRO_ST_H
:
3754 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3755 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3756 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3758 case OPC1_16_SSRO_ST_W
:
3759 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3760 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3761 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3764 case OPCM_16_SR_SYSTEM
:
3765 decode_sr_system(ctx
);
3767 case OPCM_16_SR_ACCU
:
3768 decode_sr_accu(ctx
);
3771 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3772 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3774 case OPC1_16_SR_NOT
:
3775 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3776 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3779 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3784 * 32 bit instructions
3788 static void decode_abs_ldw(DisasContext
*ctx
)
3795 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3796 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3797 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3799 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3802 case OPC2_32_ABS_LD_A
:
3803 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3805 case OPC2_32_ABS_LD_D
:
3807 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3809 case OPC2_32_ABS_LD_DA
:
3811 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3813 case OPC2_32_ABS_LD_W
:
3814 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3817 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3821 static void decode_abs_ldb(DisasContext
*ctx
)
3828 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3829 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3830 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3832 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3835 case OPC2_32_ABS_LD_B
:
3836 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3838 case OPC2_32_ABS_LD_BU
:
3839 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3841 case OPC2_32_ABS_LD_H
:
3842 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3844 case OPC2_32_ABS_LD_HU
:
3845 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3848 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3852 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3859 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3860 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3861 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3863 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3866 case OPC2_32_ABS_LDMST
:
3867 gen_ldmst(ctx
, r1
, temp
);
3869 case OPC2_32_ABS_SWAP_W
:
3870 gen_swap(ctx
, r1
, temp
);
3873 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3877 static void decode_abs_ldst_context(DisasContext
*ctx
)
3882 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3883 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3886 case OPC2_32_ABS_LDLCX
:
3887 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3889 case OPC2_32_ABS_LDUCX
:
3890 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3892 case OPC2_32_ABS_STLCX
:
3893 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3895 case OPC2_32_ABS_STUCX
:
3896 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3899 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3903 static void decode_abs_store(DisasContext
*ctx
)
3910 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3911 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3912 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3914 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3917 case OPC2_32_ABS_ST_A
:
3918 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3920 case OPC2_32_ABS_ST_D
:
3922 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3924 case OPC2_32_ABS_ST_DA
:
3926 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3928 case OPC2_32_ABS_ST_W
:
3929 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3932 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3936 static void decode_abs_storeb_h(DisasContext
*ctx
)
3943 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3944 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3945 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3947 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3950 case OPC2_32_ABS_ST_B
:
3951 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3953 case OPC2_32_ABS_ST_H
:
3954 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3957 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3963 static void decode_bit_andacc(DisasContext
*ctx
)
3969 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3970 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3971 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3972 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3973 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3974 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3978 case OPC2_32_BIT_AND_AND_T
:
3979 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3980 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3982 case OPC2_32_BIT_AND_ANDN_T
:
3983 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3984 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3986 case OPC2_32_BIT_AND_NOR_T
:
3987 if (TCG_TARGET_HAS_andc_i32
) {
3988 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3989 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3991 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3992 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3995 case OPC2_32_BIT_AND_OR_T
:
3996 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3997 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4000 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4004 static void decode_bit_logical_t(DisasContext
*ctx
)
4009 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4010 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4011 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4012 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4013 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4014 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4017 case OPC2_32_BIT_AND_T
:
4018 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4019 pos1
, pos2
, &tcg_gen_and_tl
);
4021 case OPC2_32_BIT_ANDN_T
:
4022 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4023 pos1
, pos2
, &tcg_gen_andc_tl
);
4025 case OPC2_32_BIT_NOR_T
:
4026 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4027 pos1
, pos2
, &tcg_gen_nor_tl
);
4029 case OPC2_32_BIT_OR_T
:
4030 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4031 pos1
, pos2
, &tcg_gen_or_tl
);
4034 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4038 static void decode_bit_insert(DisasContext
*ctx
)
4044 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4045 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4046 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4047 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4048 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4049 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4051 temp
= tcg_temp_new();
4053 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4054 if (op2
== OPC2_32_BIT_INSN_T
) {
4055 tcg_gen_not_tl(temp
, temp
);
4057 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4060 static void decode_bit_logical_t2(DisasContext
*ctx
)
4067 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4068 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4069 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4070 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4071 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4072 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4075 case OPC2_32_BIT_NAND_T
:
4076 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4077 pos1
, pos2
, &tcg_gen_nand_tl
);
4079 case OPC2_32_BIT_ORN_T
:
4080 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4081 pos1
, pos2
, &tcg_gen_orc_tl
);
4083 case OPC2_32_BIT_XNOR_T
:
4084 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4085 pos1
, pos2
, &tcg_gen_eqv_tl
);
4087 case OPC2_32_BIT_XOR_T
:
4088 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4089 pos1
, pos2
, &tcg_gen_xor_tl
);
4092 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4096 static void decode_bit_orand(DisasContext
*ctx
)
4103 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4104 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4105 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4106 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4107 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4108 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4111 case OPC2_32_BIT_OR_AND_T
:
4112 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4113 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4115 case OPC2_32_BIT_OR_ANDN_T
:
4116 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4117 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4119 case OPC2_32_BIT_OR_NOR_T
:
4120 if (TCG_TARGET_HAS_orc_i32
) {
4121 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4122 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4124 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4125 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4128 case OPC2_32_BIT_OR_OR_T
:
4129 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4130 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4133 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4137 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4144 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4145 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4146 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4147 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4148 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4149 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4151 temp
= tcg_temp_new();
4154 case OPC2_32_BIT_SH_AND_T
:
4155 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4156 pos1
, pos2
, &tcg_gen_and_tl
);
4158 case OPC2_32_BIT_SH_ANDN_T
:
4159 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4160 pos1
, pos2
, &tcg_gen_andc_tl
);
4162 case OPC2_32_BIT_SH_NOR_T
:
4163 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4164 pos1
, pos2
, &tcg_gen_nor_tl
);
4166 case OPC2_32_BIT_SH_OR_T
:
4167 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4168 pos1
, pos2
, &tcg_gen_or_tl
);
4171 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4173 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4174 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4177 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4184 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4185 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4186 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4187 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4188 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4189 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4191 temp
= tcg_temp_new();
4194 case OPC2_32_BIT_SH_NAND_T
:
4195 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4196 pos1
, pos2
, &tcg_gen_nand_tl
);
4198 case OPC2_32_BIT_SH_ORN_T
:
4199 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4200 pos1
, pos2
, &tcg_gen_orc_tl
);
4202 case OPC2_32_BIT_SH_XNOR_T
:
4203 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4204 pos1
, pos2
, &tcg_gen_eqv_tl
);
4206 case OPC2_32_BIT_SH_XOR_T
:
4207 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4208 pos1
, pos2
, &tcg_gen_xor_tl
);
4211 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4213 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4214 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4220 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4227 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4228 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4229 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4230 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4233 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4234 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4235 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4236 /* instruction to access the cache */
4238 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4239 case OPC2_32_BO_CACHEA_W_POSTINC
:
4240 case OPC2_32_BO_CACHEA_I_POSTINC
:
4241 /* instruction to access the cache, but we still need to handle
4242 the addressing mode */
4243 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4245 case OPC2_32_BO_CACHEA_WI_PREINC
:
4246 case OPC2_32_BO_CACHEA_W_PREINC
:
4247 case OPC2_32_BO_CACHEA_I_PREINC
:
4248 /* instruction to access the cache, but we still need to handle
4249 the addressing mode */
4250 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4252 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4253 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4254 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4255 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4258 case OPC2_32_BO_CACHEI_W_POSTINC
:
4259 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4260 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4261 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4263 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4266 case OPC2_32_BO_CACHEI_W_PREINC
:
4267 case OPC2_32_BO_CACHEI_WI_PREINC
:
4268 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4269 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4271 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4274 case OPC2_32_BO_ST_A_SHORTOFF
:
4275 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4277 case OPC2_32_BO_ST_A_POSTINC
:
4278 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4280 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4282 case OPC2_32_BO_ST_A_PREINC
:
4283 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4285 case OPC2_32_BO_ST_B_SHORTOFF
:
4286 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4288 case OPC2_32_BO_ST_B_POSTINC
:
4289 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4291 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4293 case OPC2_32_BO_ST_B_PREINC
:
4294 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4296 case OPC2_32_BO_ST_D_SHORTOFF
:
4298 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4301 case OPC2_32_BO_ST_D_POSTINC
:
4303 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4304 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4306 case OPC2_32_BO_ST_D_PREINC
:
4308 temp
= tcg_temp_new();
4309 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4310 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4311 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4313 case OPC2_32_BO_ST_DA_SHORTOFF
:
4315 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4318 case OPC2_32_BO_ST_DA_POSTINC
:
4320 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4321 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4323 case OPC2_32_BO_ST_DA_PREINC
:
4325 temp
= tcg_temp_new();
4326 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4327 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4328 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4330 case OPC2_32_BO_ST_H_SHORTOFF
:
4331 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4333 case OPC2_32_BO_ST_H_POSTINC
:
4334 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4336 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4338 case OPC2_32_BO_ST_H_PREINC
:
4339 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4341 case OPC2_32_BO_ST_Q_SHORTOFF
:
4342 temp
= tcg_temp_new();
4343 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4344 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4346 case OPC2_32_BO_ST_Q_POSTINC
:
4347 temp
= tcg_temp_new();
4348 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4349 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4351 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4353 case OPC2_32_BO_ST_Q_PREINC
:
4354 temp
= tcg_temp_new();
4355 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4356 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4358 case OPC2_32_BO_ST_W_SHORTOFF
:
4359 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4361 case OPC2_32_BO_ST_W_POSTINC
:
4362 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4364 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4366 case OPC2_32_BO_ST_W_PREINC
:
4367 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4370 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4374 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4379 TCGv temp
, temp2
, t_off10
;
4381 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4382 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4383 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4384 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4386 temp
= tcg_temp_new();
4387 temp2
= tcg_temp_new();
4388 t_off10
= tcg_constant_i32(off10
);
4390 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4391 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4394 case OPC2_32_BO_CACHEA_WI_BR
:
4395 case OPC2_32_BO_CACHEA_W_BR
:
4396 case OPC2_32_BO_CACHEA_I_BR
:
4397 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4399 case OPC2_32_BO_CACHEA_WI_CIRC
:
4400 case OPC2_32_BO_CACHEA_W_CIRC
:
4401 case OPC2_32_BO_CACHEA_I_CIRC
:
4402 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4404 case OPC2_32_BO_ST_A_BR
:
4405 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4406 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4408 case OPC2_32_BO_ST_A_CIRC
:
4409 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4410 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4412 case OPC2_32_BO_ST_B_BR
:
4413 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4414 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4416 case OPC2_32_BO_ST_B_CIRC
:
4417 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4418 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4420 case OPC2_32_BO_ST_D_BR
:
4422 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4423 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4425 case OPC2_32_BO_ST_D_CIRC
:
4427 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4428 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4429 tcg_gen_addi_tl(temp
, temp
, 4);
4430 tcg_gen_rem_tl(temp
, temp
, temp2
);
4431 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4432 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4433 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4435 case OPC2_32_BO_ST_DA_BR
:
4437 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4438 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4440 case OPC2_32_BO_ST_DA_CIRC
:
4442 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4443 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4444 tcg_gen_addi_tl(temp
, temp
, 4);
4445 tcg_gen_rem_tl(temp
, temp
, temp2
);
4446 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4447 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4448 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4450 case OPC2_32_BO_ST_H_BR
:
4451 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4452 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4454 case OPC2_32_BO_ST_H_CIRC
:
4455 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4456 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4458 case OPC2_32_BO_ST_Q_BR
:
4459 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4460 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4461 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4463 case OPC2_32_BO_ST_Q_CIRC
:
4464 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4465 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4466 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4468 case OPC2_32_BO_ST_W_BR
:
4469 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4470 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4472 case OPC2_32_BO_ST_W_CIRC
:
4473 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4474 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4477 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4481 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4488 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4489 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4490 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4491 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4494 case OPC2_32_BO_LD_A_SHORTOFF
:
4495 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4497 case OPC2_32_BO_LD_A_POSTINC
:
4498 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4500 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4502 case OPC2_32_BO_LD_A_PREINC
:
4503 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4505 case OPC2_32_BO_LD_B_SHORTOFF
:
4506 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4508 case OPC2_32_BO_LD_B_POSTINC
:
4509 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4511 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4513 case OPC2_32_BO_LD_B_PREINC
:
4514 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4516 case OPC2_32_BO_LD_BU_SHORTOFF
:
4517 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4519 case OPC2_32_BO_LD_BU_POSTINC
:
4520 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4522 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4524 case OPC2_32_BO_LD_BU_PREINC
:
4525 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4527 case OPC2_32_BO_LD_D_SHORTOFF
:
4529 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4532 case OPC2_32_BO_LD_D_POSTINC
:
4534 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4535 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4537 case OPC2_32_BO_LD_D_PREINC
:
4539 temp
= tcg_temp_new();
4540 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4541 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4542 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4544 case OPC2_32_BO_LD_DA_SHORTOFF
:
4546 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4549 case OPC2_32_BO_LD_DA_POSTINC
:
4551 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4552 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4554 case OPC2_32_BO_LD_DA_PREINC
:
4556 temp
= tcg_temp_new();
4557 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4558 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4559 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4561 case OPC2_32_BO_LD_H_SHORTOFF
:
4562 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4564 case OPC2_32_BO_LD_H_POSTINC
:
4565 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4567 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4569 case OPC2_32_BO_LD_H_PREINC
:
4570 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4572 case OPC2_32_BO_LD_HU_SHORTOFF
:
4573 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4575 case OPC2_32_BO_LD_HU_POSTINC
:
4576 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4578 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4580 case OPC2_32_BO_LD_HU_PREINC
:
4581 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4583 case OPC2_32_BO_LD_Q_SHORTOFF
:
4584 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4585 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4587 case OPC2_32_BO_LD_Q_POSTINC
:
4588 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4590 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4591 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4593 case OPC2_32_BO_LD_Q_PREINC
:
4594 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4595 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4597 case OPC2_32_BO_LD_W_SHORTOFF
:
4598 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4600 case OPC2_32_BO_LD_W_POSTINC
:
4601 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4603 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4605 case OPC2_32_BO_LD_W_PREINC
:
4606 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4609 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4613 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4618 TCGv temp
, temp2
, t_off10
;
4620 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4621 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4622 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4623 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4625 temp
= tcg_temp_new();
4626 temp2
= tcg_temp_new();
4627 t_off10
= tcg_constant_i32(off10
);
4629 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4630 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4634 case OPC2_32_BO_LD_A_BR
:
4635 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4636 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4638 case OPC2_32_BO_LD_A_CIRC
:
4639 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4640 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4642 case OPC2_32_BO_LD_B_BR
:
4643 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4644 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4646 case OPC2_32_BO_LD_B_CIRC
:
4647 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4648 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4650 case OPC2_32_BO_LD_BU_BR
:
4651 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4652 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4654 case OPC2_32_BO_LD_BU_CIRC
:
4655 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4656 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4658 case OPC2_32_BO_LD_D_BR
:
4660 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4661 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4663 case OPC2_32_BO_LD_D_CIRC
:
4665 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4666 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4667 tcg_gen_addi_tl(temp
, temp
, 4);
4668 tcg_gen_rem_tl(temp
, temp
, temp2
);
4669 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4670 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4671 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4673 case OPC2_32_BO_LD_DA_BR
:
4675 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4676 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4678 case OPC2_32_BO_LD_DA_CIRC
:
4680 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4681 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4682 tcg_gen_addi_tl(temp
, temp
, 4);
4683 tcg_gen_rem_tl(temp
, temp
, temp2
);
4684 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4685 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4686 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4688 case OPC2_32_BO_LD_H_BR
:
4689 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4690 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4692 case OPC2_32_BO_LD_H_CIRC
:
4693 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4694 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4696 case OPC2_32_BO_LD_HU_BR
:
4697 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4698 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4700 case OPC2_32_BO_LD_HU_CIRC
:
4701 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4702 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4704 case OPC2_32_BO_LD_Q_BR
:
4705 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4706 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4707 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4709 case OPC2_32_BO_LD_Q_CIRC
:
4710 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4711 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4712 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4714 case OPC2_32_BO_LD_W_BR
:
4715 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4716 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4718 case OPC2_32_BO_LD_W_CIRC
:
4719 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4720 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4723 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4727 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4735 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4736 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4737 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4738 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4741 temp
= tcg_temp_new();
4744 case OPC2_32_BO_LDLCX_SHORTOFF
:
4745 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4746 gen_helper_ldlcx(cpu_env
, temp
);
4748 case OPC2_32_BO_LDMST_SHORTOFF
:
4749 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4750 gen_ldmst(ctx
, r1
, temp
);
4752 case OPC2_32_BO_LDMST_POSTINC
:
4753 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4754 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4756 case OPC2_32_BO_LDMST_PREINC
:
4757 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4758 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4760 case OPC2_32_BO_LDUCX_SHORTOFF
:
4761 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4762 gen_helper_lducx(cpu_env
, temp
);
4764 case OPC2_32_BO_LEA_SHORTOFF
:
4765 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4767 case OPC2_32_BO_STLCX_SHORTOFF
:
4768 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4769 gen_helper_stlcx(cpu_env
, temp
);
4771 case OPC2_32_BO_STUCX_SHORTOFF
:
4772 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4773 gen_helper_stucx(cpu_env
, temp
);
4775 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4776 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4777 gen_swap(ctx
, r1
, temp
);
4779 case OPC2_32_BO_SWAP_W_POSTINC
:
4780 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4781 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4783 case OPC2_32_BO_SWAP_W_PREINC
:
4784 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4785 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4787 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4788 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4789 gen_cmpswap(ctx
, r1
, temp
);
4791 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4792 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4793 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4795 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4796 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4797 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4799 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4800 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4801 gen_swapmsk(ctx
, r1
, temp
);
4803 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4804 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4805 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4807 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4808 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4809 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4812 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4816 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4821 TCGv temp
, temp2
, t_off10
;
4823 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4824 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4825 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4826 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4828 temp
= tcg_temp_new();
4829 temp2
= tcg_temp_new();
4830 t_off10
= tcg_constant_i32(off10
);
4832 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4833 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4836 case OPC2_32_BO_LDMST_BR
:
4837 gen_ldmst(ctx
, r1
, temp2
);
4838 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4840 case OPC2_32_BO_LDMST_CIRC
:
4841 gen_ldmst(ctx
, r1
, temp2
);
4842 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4844 case OPC2_32_BO_SWAP_W_BR
:
4845 gen_swap(ctx
, r1
, temp2
);
4846 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4848 case OPC2_32_BO_SWAP_W_CIRC
:
4849 gen_swap(ctx
, r1
, temp2
);
4850 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4852 case OPC2_32_BO_CMPSWAP_W_BR
:
4853 gen_cmpswap(ctx
, r1
, temp2
);
4854 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4856 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4857 gen_cmpswap(ctx
, r1
, temp2
);
4858 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4860 case OPC2_32_BO_SWAPMSK_W_BR
:
4861 gen_swapmsk(ctx
, r1
, temp2
);
4862 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4864 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4865 gen_swapmsk(ctx
, r1
, temp2
);
4866 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4869 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4873 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4879 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4880 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4881 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4884 case OPC1_32_BOL_LD_A_LONGOFF
:
4885 temp
= tcg_temp_new();
4886 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4887 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4889 case OPC1_32_BOL_LD_W_LONGOFF
:
4890 temp
= tcg_temp_new();
4891 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4892 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4894 case OPC1_32_BOL_LEA_LONGOFF
:
4895 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4897 case OPC1_32_BOL_ST_A_LONGOFF
:
4898 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4899 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4901 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4904 case OPC1_32_BOL_ST_W_LONGOFF
:
4905 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4907 case OPC1_32_BOL_LD_B_LONGOFF
:
4908 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4909 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4911 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4914 case OPC1_32_BOL_LD_BU_LONGOFF
:
4915 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4916 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4918 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4921 case OPC1_32_BOL_LD_H_LONGOFF
:
4922 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4923 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4925 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4928 case OPC1_32_BOL_LD_HU_LONGOFF
:
4929 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4930 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4932 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4935 case OPC1_32_BOL_ST_B_LONGOFF
:
4936 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4937 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4939 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4942 case OPC1_32_BOL_ST_H_LONGOFF
:
4943 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4944 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4946 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4950 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4955 static void decode_rc_logical_shift(DisasContext
*ctx
)
4962 r2
= MASK_OP_RC_D(ctx
->opcode
);
4963 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4964 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4965 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4967 temp
= tcg_temp_new();
4970 case OPC2_32_RC_AND
:
4971 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4973 case OPC2_32_RC_ANDN
:
4974 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4976 case OPC2_32_RC_NAND
:
4977 tcg_gen_movi_tl(temp
, const9
);
4978 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4980 case OPC2_32_RC_NOR
:
4981 tcg_gen_movi_tl(temp
, const9
);
4982 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4985 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4987 case OPC2_32_RC_ORN
:
4988 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4991 const9
= sextract32(const9
, 0, 6);
4992 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4994 case OPC2_32_RC_SH_H
:
4995 const9
= sextract32(const9
, 0, 5);
4996 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4998 case OPC2_32_RC_SHA
:
4999 const9
= sextract32(const9
, 0, 6);
5000 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5002 case OPC2_32_RC_SHA_H
:
5003 const9
= sextract32(const9
, 0, 5);
5004 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5006 case OPC2_32_RC_SHAS
:
5007 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5009 case OPC2_32_RC_XNOR
:
5010 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5011 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5013 case OPC2_32_RC_XOR
:
5014 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5016 case OPC2_32_RC_SHUFFLE
:
5017 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
5018 TCGv temp
= tcg_constant_i32(const9
);
5019 gen_helper_shuffle(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5021 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5025 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5029 static void decode_rc_accumulator(DisasContext
*ctx
)
5037 r2
= MASK_OP_RC_D(ctx
->opcode
);
5038 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5039 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5041 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5043 temp
= tcg_temp_new();
5046 case OPC2_32_RC_ABSDIF
:
5047 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5049 case OPC2_32_RC_ABSDIFS
:
5050 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5052 case OPC2_32_RC_ADD
:
5053 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5055 case OPC2_32_RC_ADDC
:
5056 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5058 case OPC2_32_RC_ADDS
:
5059 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5061 case OPC2_32_RC_ADDS_U
:
5062 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5064 case OPC2_32_RC_ADDX
:
5065 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5067 case OPC2_32_RC_AND_EQ
:
5068 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5069 const9
, &tcg_gen_and_tl
);
5071 case OPC2_32_RC_AND_GE
:
5072 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5073 const9
, &tcg_gen_and_tl
);
5075 case OPC2_32_RC_AND_GE_U
:
5076 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5077 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5078 const9
, &tcg_gen_and_tl
);
5080 case OPC2_32_RC_AND_LT
:
5081 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5082 const9
, &tcg_gen_and_tl
);
5084 case OPC2_32_RC_AND_LT_U
:
5085 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5086 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5087 const9
, &tcg_gen_and_tl
);
5089 case OPC2_32_RC_AND_NE
:
5090 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5091 const9
, &tcg_gen_and_tl
);
5094 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5096 case OPC2_32_RC_EQANY_B
:
5097 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5099 case OPC2_32_RC_EQANY_H
:
5100 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5103 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5105 case OPC2_32_RC_GE_U
:
5106 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5107 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5110 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5112 case OPC2_32_RC_LT_U
:
5113 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5114 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5116 case OPC2_32_RC_MAX
:
5117 tcg_gen_movi_tl(temp
, const9
);
5118 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5119 cpu_gpr_d
[r1
], temp
);
5121 case OPC2_32_RC_MAX_U
:
5122 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5123 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5124 cpu_gpr_d
[r1
], temp
);
5126 case OPC2_32_RC_MIN
:
5127 tcg_gen_movi_tl(temp
, const9
);
5128 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5129 cpu_gpr_d
[r1
], temp
);
5131 case OPC2_32_RC_MIN_U
:
5132 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5133 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5134 cpu_gpr_d
[r1
], temp
);
5137 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5139 case OPC2_32_RC_OR_EQ
:
5140 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5141 const9
, &tcg_gen_or_tl
);
5143 case OPC2_32_RC_OR_GE
:
5144 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5145 const9
, &tcg_gen_or_tl
);
5147 case OPC2_32_RC_OR_GE_U
:
5148 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5149 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5150 const9
, &tcg_gen_or_tl
);
5152 case OPC2_32_RC_OR_LT
:
5153 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5154 const9
, &tcg_gen_or_tl
);
5156 case OPC2_32_RC_OR_LT_U
:
5157 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5158 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5159 const9
, &tcg_gen_or_tl
);
5161 case OPC2_32_RC_OR_NE
:
5162 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5163 const9
, &tcg_gen_or_tl
);
5165 case OPC2_32_RC_RSUB
:
5166 tcg_gen_movi_tl(temp
, const9
);
5167 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5169 case OPC2_32_RC_RSUBS
:
5170 tcg_gen_movi_tl(temp
, const9
);
5171 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5173 case OPC2_32_RC_RSUBS_U
:
5174 tcg_gen_movi_tl(temp
, const9
);
5175 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5177 case OPC2_32_RC_SH_EQ
:
5178 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5180 case OPC2_32_RC_SH_GE
:
5181 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5183 case OPC2_32_RC_SH_GE_U
:
5184 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5185 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5187 case OPC2_32_RC_SH_LT
:
5188 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5190 case OPC2_32_RC_SH_LT_U
:
5191 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5192 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5194 case OPC2_32_RC_SH_NE
:
5195 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5197 case OPC2_32_RC_XOR_EQ
:
5198 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5199 const9
, &tcg_gen_xor_tl
);
5201 case OPC2_32_RC_XOR_GE
:
5202 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5203 const9
, &tcg_gen_xor_tl
);
5205 case OPC2_32_RC_XOR_GE_U
:
5206 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5207 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5208 const9
, &tcg_gen_xor_tl
);
5210 case OPC2_32_RC_XOR_LT
:
5211 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5212 const9
, &tcg_gen_xor_tl
);
5214 case OPC2_32_RC_XOR_LT_U
:
5215 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5216 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5217 const9
, &tcg_gen_xor_tl
);
5219 case OPC2_32_RC_XOR_NE
:
5220 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5221 const9
, &tcg_gen_xor_tl
);
5224 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5228 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5233 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5234 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5237 case OPC2_32_RC_BISR
:
5238 gen_helper_1arg(bisr
, const9
);
5240 case OPC2_32_RC_SYSCALL
:
5241 generate_trap(ctx
, TRAPC_SYSCALL
, const9
& 0xff);
5244 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5248 static void decode_rc_mul(DisasContext
*ctx
)
5254 r2
= MASK_OP_RC_D(ctx
->opcode
);
5255 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5256 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5258 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5261 case OPC2_32_RC_MUL_32
:
5262 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5264 case OPC2_32_RC_MUL_64
:
5266 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5268 case OPC2_32_RC_MULS_32
:
5269 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5271 case OPC2_32_RC_MUL_U_64
:
5272 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5274 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5276 case OPC2_32_RC_MULS_U_32
:
5277 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5278 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5281 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5286 static void decode_rcpw_insert(DisasContext
*ctx
)
5290 int32_t pos
, width
, const4
;
5294 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5295 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5296 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5297 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5298 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5299 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5302 case OPC2_32_RCPW_IMASK
:
5304 /* if pos + width > 32 undefined result */
5305 if (pos
+ width
<= 32) {
5306 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5307 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5310 case OPC2_32_RCPW_INSERT
:
5311 /* if pos + width > 32 undefined result */
5312 if (pos
+ width
<= 32) {
5313 temp
= tcg_constant_i32(const4
);
5314 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5318 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5324 static void decode_rcrw_insert(DisasContext
*ctx
)
5328 int32_t width
, const4
;
5330 TCGv temp
, temp2
, temp3
;
5332 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5333 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5334 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5335 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5336 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5337 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5339 temp
= tcg_temp_new();
5340 temp2
= tcg_temp_new();
5343 case OPC2_32_RCRW_IMASK
:
5345 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5346 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5347 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5348 tcg_gen_movi_tl(temp2
, const4
);
5349 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5351 case OPC2_32_RCRW_INSERT
:
5352 temp3
= tcg_temp_new();
5354 tcg_gen_movi_tl(temp
, width
);
5355 tcg_gen_movi_tl(temp2
, const4
);
5356 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5357 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5360 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5366 static void decode_rcr_cond_select(DisasContext
*ctx
)
5374 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5375 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5376 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5377 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5378 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5381 case OPC2_32_RCR_CADD
:
5382 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5385 case OPC2_32_RCR_CADDN
:
5386 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5389 case OPC2_32_RCR_SEL
:
5390 temp
= tcg_constant_i32(0);
5391 temp2
= tcg_constant_i32(const9
);
5392 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5393 cpu_gpr_d
[r1
], temp2
);
5395 case OPC2_32_RCR_SELN
:
5396 temp
= tcg_constant_i32(0);
5397 temp2
= tcg_constant_i32(const9
);
5398 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5399 cpu_gpr_d
[r1
], temp2
);
5402 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5406 static void decode_rcr_madd(DisasContext
*ctx
)
5413 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5414 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5415 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5416 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5417 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5420 case OPC2_32_RCR_MADD_32
:
5421 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5423 case OPC2_32_RCR_MADD_64
:
5426 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5427 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5429 case OPC2_32_RCR_MADDS_32
:
5430 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5432 case OPC2_32_RCR_MADDS_64
:
5435 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5436 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5438 case OPC2_32_RCR_MADD_U_64
:
5441 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5442 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5443 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5445 case OPC2_32_RCR_MADDS_U_32
:
5446 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5447 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5449 case OPC2_32_RCR_MADDS_U_64
:
5452 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5453 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5454 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5457 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5461 static void decode_rcr_msub(DisasContext
*ctx
)
5468 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5469 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5470 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5471 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5472 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5475 case OPC2_32_RCR_MSUB_32
:
5476 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5478 case OPC2_32_RCR_MSUB_64
:
5481 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5482 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5484 case OPC2_32_RCR_MSUBS_32
:
5485 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5487 case OPC2_32_RCR_MSUBS_64
:
5490 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5491 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5493 case OPC2_32_RCR_MSUB_U_64
:
5496 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5497 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5498 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5500 case OPC2_32_RCR_MSUBS_U_32
:
5501 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5502 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5504 case OPC2_32_RCR_MSUBS_U_64
:
5507 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5508 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5509 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5512 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5518 static void decode_rlc_opc(DisasContext
*ctx
,
5524 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5525 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5526 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5529 case OPC1_32_RLC_ADDI
:
5530 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5532 case OPC1_32_RLC_ADDIH
:
5533 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5535 case OPC1_32_RLC_ADDIH_A
:
5536 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5538 case OPC1_32_RLC_MFCR
:
5539 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5540 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5542 case OPC1_32_RLC_MOV
:
5543 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5545 case OPC1_32_RLC_MOV_64
:
5546 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5548 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5549 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5551 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5554 case OPC1_32_RLC_MOV_U
:
5555 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5556 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5558 case OPC1_32_RLC_MOV_H
:
5559 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5561 case OPC1_32_RLC_MOVH_A
:
5562 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5564 case OPC1_32_RLC_MTCR
:
5565 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5566 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5569 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5574 static void decode_rr_accumulator(DisasContext
*ctx
)
5581 r3
= MASK_OP_RR_D(ctx
->opcode
);
5582 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5583 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5584 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5587 case OPC2_32_RR_ABS
:
5588 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5590 case OPC2_32_RR_ABS_B
:
5591 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5593 case OPC2_32_RR_ABS_H
:
5594 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5596 case OPC2_32_RR_ABSDIF
:
5597 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5599 case OPC2_32_RR_ABSDIF_B
:
5600 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5603 case OPC2_32_RR_ABSDIF_H
:
5604 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5607 case OPC2_32_RR_ABSDIFS
:
5608 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5611 case OPC2_32_RR_ABSDIFS_H
:
5612 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5615 case OPC2_32_RR_ABSS
:
5616 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5618 case OPC2_32_RR_ABSS_H
:
5619 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5621 case OPC2_32_RR_ADD
:
5622 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5624 case OPC2_32_RR_ADD_B
:
5625 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5627 case OPC2_32_RR_ADD_H
:
5628 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5630 case OPC2_32_RR_ADDC
:
5631 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5633 case OPC2_32_RR_ADDS
:
5634 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5636 case OPC2_32_RR_ADDS_H
:
5637 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5640 case OPC2_32_RR_ADDS_HU
:
5641 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5644 case OPC2_32_RR_ADDS_U
:
5645 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5648 case OPC2_32_RR_ADDX
:
5649 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5651 case OPC2_32_RR_AND_EQ
:
5652 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5653 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5655 case OPC2_32_RR_AND_GE
:
5656 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5657 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5659 case OPC2_32_RR_AND_GE_U
:
5660 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5661 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5663 case OPC2_32_RR_AND_LT
:
5664 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5665 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5667 case OPC2_32_RR_AND_LT_U
:
5668 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5669 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5671 case OPC2_32_RR_AND_NE
:
5672 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5673 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5676 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5679 case OPC2_32_RR_EQ_B
:
5680 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5682 case OPC2_32_RR_EQ_H
:
5683 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5685 case OPC2_32_RR_EQ_W
:
5686 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5688 case OPC2_32_RR_EQANY_B
:
5689 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5691 case OPC2_32_RR_EQANY_H
:
5692 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5695 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5698 case OPC2_32_RR_GE_U
:
5699 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5703 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5706 case OPC2_32_RR_LT_U
:
5707 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5710 case OPC2_32_RR_LT_B
:
5711 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5713 case OPC2_32_RR_LT_BU
:
5714 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5716 case OPC2_32_RR_LT_H
:
5717 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5719 case OPC2_32_RR_LT_HU
:
5720 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5722 case OPC2_32_RR_LT_W
:
5723 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5725 case OPC2_32_RR_LT_WU
:
5726 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5728 case OPC2_32_RR_MAX
:
5729 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5730 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5732 case OPC2_32_RR_MAX_U
:
5733 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5734 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5736 case OPC2_32_RR_MAX_B
:
5737 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5739 case OPC2_32_RR_MAX_BU
:
5740 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5742 case OPC2_32_RR_MAX_H
:
5743 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5745 case OPC2_32_RR_MAX_HU
:
5746 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5748 case OPC2_32_RR_MIN
:
5749 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5750 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5752 case OPC2_32_RR_MIN_U
:
5753 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5754 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5756 case OPC2_32_RR_MIN_B
:
5757 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5759 case OPC2_32_RR_MIN_BU
:
5760 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5762 case OPC2_32_RR_MIN_H
:
5763 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5765 case OPC2_32_RR_MIN_HU
:
5766 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5768 case OPC2_32_RR_MOV
:
5769 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5771 case OPC2_32_RR_MOV_64
:
5772 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5773 temp
= tcg_temp_new();
5776 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5777 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5778 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5780 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5783 case OPC2_32_RR_MOVS_64
:
5784 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5786 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5787 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5789 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5793 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5796 case OPC2_32_RR_OR_EQ
:
5797 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5798 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5800 case OPC2_32_RR_OR_GE
:
5801 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5802 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5804 case OPC2_32_RR_OR_GE_U
:
5805 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5806 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5808 case OPC2_32_RR_OR_LT
:
5809 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5810 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5812 case OPC2_32_RR_OR_LT_U
:
5813 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5814 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5816 case OPC2_32_RR_OR_NE
:
5817 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5818 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5820 case OPC2_32_RR_SAT_B
:
5821 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5823 case OPC2_32_RR_SAT_BU
:
5824 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5826 case OPC2_32_RR_SAT_H
:
5827 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5829 case OPC2_32_RR_SAT_HU
:
5830 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5832 case OPC2_32_RR_SH_EQ
:
5833 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5836 case OPC2_32_RR_SH_GE
:
5837 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5840 case OPC2_32_RR_SH_GE_U
:
5841 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5844 case OPC2_32_RR_SH_LT
:
5845 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5848 case OPC2_32_RR_SH_LT_U
:
5849 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5852 case OPC2_32_RR_SH_NE
:
5853 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5856 case OPC2_32_RR_SUB
:
5857 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5859 case OPC2_32_RR_SUB_B
:
5860 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5862 case OPC2_32_RR_SUB_H
:
5863 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5865 case OPC2_32_RR_SUBC
:
5866 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5868 case OPC2_32_RR_SUBS
:
5869 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5871 case OPC2_32_RR_SUBS_U
:
5872 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5874 case OPC2_32_RR_SUBS_H
:
5875 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5878 case OPC2_32_RR_SUBS_HU
:
5879 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5882 case OPC2_32_RR_SUBX
:
5883 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5885 case OPC2_32_RR_XOR_EQ
:
5886 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5887 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5889 case OPC2_32_RR_XOR_GE
:
5890 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5891 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5893 case OPC2_32_RR_XOR_GE_U
:
5894 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5895 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5897 case OPC2_32_RR_XOR_LT
:
5898 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5899 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5901 case OPC2_32_RR_XOR_LT_U
:
5902 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5903 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5905 case OPC2_32_RR_XOR_NE
:
5906 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5907 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5910 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5914 static void decode_rr_logical_shift(DisasContext
*ctx
)
5919 r3
= MASK_OP_RR_D(ctx
->opcode
);
5920 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5921 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5922 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5925 case OPC2_32_RR_AND
:
5926 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5928 case OPC2_32_RR_ANDN
:
5929 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5931 case OPC2_32_RR_CLO
:
5932 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5933 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5935 case OPC2_32_RR_CLO_H
:
5936 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5938 case OPC2_32_RR_CLS
:
5939 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5941 case OPC2_32_RR_CLS_H
:
5942 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5944 case OPC2_32_RR_CLZ
:
5945 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5947 case OPC2_32_RR_CLZ_H
:
5948 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5950 case OPC2_32_RR_NAND
:
5951 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5953 case OPC2_32_RR_NOR
:
5954 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5957 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5959 case OPC2_32_RR_ORN
:
5960 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5963 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5965 case OPC2_32_RR_SH_H
:
5966 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5968 case OPC2_32_RR_SHA
:
5969 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5971 case OPC2_32_RR_SHA_H
:
5972 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5974 case OPC2_32_RR_SHAS
:
5975 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5977 case OPC2_32_RR_XNOR
:
5978 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5980 case OPC2_32_RR_XOR
:
5981 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5984 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5988 static void decode_rr_address(DisasContext
*ctx
)
5994 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5995 r3
= MASK_OP_RR_D(ctx
->opcode
);
5996 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5997 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5998 n
= MASK_OP_RR_N(ctx
->opcode
);
6001 case OPC2_32_RR_ADD_A
:
6002 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6004 case OPC2_32_RR_ADDSC_A
:
6005 temp
= tcg_temp_new();
6006 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6007 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6009 case OPC2_32_RR_ADDSC_AT
:
6010 temp
= tcg_temp_new();
6011 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6012 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6013 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6015 case OPC2_32_RR_EQ_A
:
6016 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6019 case OPC2_32_RR_EQZ
:
6020 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6022 case OPC2_32_RR_GE_A
:
6023 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6026 case OPC2_32_RR_LT_A
:
6027 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6030 case OPC2_32_RR_MOV_A
:
6031 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6033 case OPC2_32_RR_MOV_AA
:
6034 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6036 case OPC2_32_RR_MOV_D
:
6037 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6039 case OPC2_32_RR_NE_A
:
6040 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6043 case OPC2_32_RR_NEZ_A
:
6044 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6046 case OPC2_32_RR_SUB_A
:
6047 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6050 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6054 static void decode_rr_idirect(DisasContext
*ctx
)
6059 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6060 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6064 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6066 case OPC2_32_RR_JLI
:
6067 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6068 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6070 case OPC2_32_RR_CALLI
:
6071 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6072 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6074 case OPC2_32_RR_FCALLI
:
6075 gen_fcall_save_ctx(ctx
);
6076 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6079 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6081 tcg_gen_exit_tb(NULL
, 0);
6082 ctx
->base
.is_jmp
= DISAS_NORETURN
;
6085 static void decode_rr_divide(DisasContext
*ctx
)
6090 TCGv temp
, temp2
, temp3
;
6092 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6093 r3
= MASK_OP_RR_D(ctx
->opcode
);
6094 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6095 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6098 case OPC2_32_RR_BMERGE
:
6099 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6101 case OPC2_32_RR_BSPLIT
:
6103 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6105 case OPC2_32_RR_DVINIT_B
:
6107 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6110 case OPC2_32_RR_DVINIT_BU
:
6111 temp
= tcg_temp_new();
6112 temp2
= tcg_temp_new();
6113 temp3
= tcg_temp_new();
6115 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6117 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6118 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6119 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6120 tcg_gen_abs_tl(temp
, temp3
);
6121 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6122 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6124 /* overflow = (D[b] == 0) */
6125 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6127 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6129 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6131 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6132 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6134 case OPC2_32_RR_DVINIT_H
:
6136 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6139 case OPC2_32_RR_DVINIT_HU
:
6140 temp
= tcg_temp_new();
6141 temp2
= tcg_temp_new();
6142 temp3
= tcg_temp_new();
6144 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6146 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6147 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6148 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6149 tcg_gen_abs_tl(temp
, temp3
);
6150 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6151 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6153 /* overflow = (D[b] == 0) */
6154 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6156 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6158 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6160 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6161 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6163 case OPC2_32_RR_DVINIT
:
6164 temp
= tcg_temp_new();
6165 temp2
= tcg_temp_new();
6167 /* overflow = ((D[b] == 0) ||
6168 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6169 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6170 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6171 tcg_gen_and_tl(temp
, temp
, temp2
);
6172 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6173 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6174 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6176 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6178 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6180 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6181 /* sign extend to high reg */
6182 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6184 case OPC2_32_RR_DVINIT_U
:
6186 /* overflow = (D[b] == 0) */
6187 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6188 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6190 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6192 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6194 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6195 /* zero extend to high reg*/
6196 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6198 case OPC2_32_RR_PARITY
:
6199 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6201 case OPC2_32_RR_UNPACK
:
6203 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6205 case OPC2_32_RR_CRC32_B
:
6206 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6207 gen_helper_crc32b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6209 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6212 case OPC2_32_RR_CRC32
: /* CRC32B.W in 1.6.2 */
6213 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6214 gen_helper_crc32_be(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6216 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6219 case OPC2_32_RR_CRC32L_W
:
6220 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6221 gen_helper_crc32_le(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6223 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6227 case OPC2_32_RR_POPCNT_W
:
6228 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6229 tcg_gen_ctpop_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6231 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6234 case OPC2_32_RR_DIV
:
6235 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6237 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6240 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6243 case OPC2_32_RR_DIV_U
:
6244 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6246 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6247 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6249 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6252 case OPC2_32_RR_MUL_F
:
6253 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6255 case OPC2_32_RR_DIV_F
:
6256 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6258 case OPC2_32_RR_CMP_F
:
6259 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6261 case OPC2_32_RR_FTOI
:
6262 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6264 case OPC2_32_RR_ITOF
:
6265 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6267 case OPC2_32_RR_FTOUZ
:
6268 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6270 case OPC2_32_RR_UPDFL
:
6271 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6273 case OPC2_32_RR_UTOF
:
6274 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6276 case OPC2_32_RR_FTOIZ
:
6277 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6279 case OPC2_32_RR_QSEED_F
:
6280 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6283 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6288 static void decode_rr1_mul(DisasContext
*ctx
)
6296 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6297 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6298 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6299 n
= tcg_constant_i32(MASK_OP_RR1_N(ctx
->opcode
));
6300 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6303 case OPC2_32_RR1_MUL_H_32_LL
:
6304 temp64
= tcg_temp_new_i64();
6306 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6307 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6308 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6310 case OPC2_32_RR1_MUL_H_32_LU
:
6311 temp64
= tcg_temp_new_i64();
6313 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6314 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6315 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6317 case OPC2_32_RR1_MUL_H_32_UL
:
6318 temp64
= tcg_temp_new_i64();
6320 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6321 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6322 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6324 case OPC2_32_RR1_MUL_H_32_UU
:
6325 temp64
= tcg_temp_new_i64();
6327 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6328 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6329 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6331 case OPC2_32_RR1_MULM_H_64_LL
:
6332 temp64
= tcg_temp_new_i64();
6334 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6335 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6337 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6339 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6341 case OPC2_32_RR1_MULM_H_64_LU
:
6342 temp64
= tcg_temp_new_i64();
6344 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6345 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6347 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6349 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6351 case OPC2_32_RR1_MULM_H_64_UL
:
6352 temp64
= tcg_temp_new_i64();
6354 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6355 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6357 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6359 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6361 case OPC2_32_RR1_MULM_H_64_UU
:
6362 temp64
= tcg_temp_new_i64();
6364 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6365 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6367 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6369 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6371 case OPC2_32_RR1_MULR_H_16_LL
:
6372 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6373 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6375 case OPC2_32_RR1_MULR_H_16_LU
:
6376 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6377 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6379 case OPC2_32_RR1_MULR_H_16_UL
:
6380 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6381 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6383 case OPC2_32_RR1_MULR_H_16_UU
:
6384 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6385 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6388 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6392 static void decode_rr1_mulq(DisasContext
*ctx
)
6400 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6401 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6402 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6403 n
= MASK_OP_RR1_N(ctx
->opcode
);
6404 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6406 temp
= tcg_temp_new();
6407 temp2
= tcg_temp_new();
6410 case OPC2_32_RR1_MUL_Q_32
:
6411 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6413 case OPC2_32_RR1_MUL_Q_64
:
6415 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6418 case OPC2_32_RR1_MUL_Q_32_L
:
6419 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6420 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6422 case OPC2_32_RR1_MUL_Q_64_L
:
6424 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6425 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6427 case OPC2_32_RR1_MUL_Q_32_U
:
6428 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6429 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6431 case OPC2_32_RR1_MUL_Q_64_U
:
6433 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6434 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6436 case OPC2_32_RR1_MUL_Q_32_LL
:
6437 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6438 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6439 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6441 case OPC2_32_RR1_MUL_Q_32_UU
:
6442 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6443 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6444 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6446 case OPC2_32_RR1_MULR_Q_32_L
:
6447 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6448 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6449 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6451 case OPC2_32_RR1_MULR_Q_32_U
:
6452 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6453 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6454 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6457 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6462 static void decode_rr2_mul(DisasContext
*ctx
)
6467 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6468 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6469 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6470 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6472 case OPC2_32_RR2_MUL_32
:
6473 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6475 case OPC2_32_RR2_MUL_64
:
6477 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6480 case OPC2_32_RR2_MULS_32
:
6481 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6484 case OPC2_32_RR2_MUL_U_64
:
6486 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6489 case OPC2_32_RR2_MULS_U_32
:
6490 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6494 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6499 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6506 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6507 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6508 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6509 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6510 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6511 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6514 case OPC2_32_RRPW_EXTR
:
6516 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6520 if (pos
+ width
<= 32) {
6521 /* optimize special cases */
6522 if ((pos
== 0) && (width
== 8)) {
6523 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6524 } else if ((pos
== 0) && (width
== 16)) {
6525 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6527 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6528 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6532 case OPC2_32_RRPW_EXTR_U
:
6534 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6536 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6537 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6540 case OPC2_32_RRPW_IMASK
:
6543 if (pos
+ width
<= 32) {
6544 temp
= tcg_temp_new();
6545 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6546 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6547 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6551 case OPC2_32_RRPW_INSERT
:
6552 if (pos
+ width
<= 32) {
6553 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6558 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6563 static void decode_rrr_cond_select(DisasContext
*ctx
)
6569 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6570 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6571 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6572 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6573 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6576 case OPC2_32_RRR_CADD
:
6577 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6578 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6580 case OPC2_32_RRR_CADDN
:
6581 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6584 case OPC2_32_RRR_CSUB
:
6585 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6588 case OPC2_32_RRR_CSUBN
:
6589 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6592 case OPC2_32_RRR_SEL
:
6593 temp
= tcg_constant_i32(0);
6594 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6595 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6597 case OPC2_32_RRR_SELN
:
6598 temp
= tcg_constant_i32(0);
6599 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6600 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6603 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6607 static void decode_rrr_divide(DisasContext
*ctx
)
6613 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6614 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6615 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6616 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6617 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6620 case OPC2_32_RRR_DVADJ
:
6623 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6624 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6626 case OPC2_32_RRR_DVSTEP
:
6629 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6630 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6632 case OPC2_32_RRR_DVSTEP_U
:
6635 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6636 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6638 case OPC2_32_RRR_IXMAX
:
6641 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6642 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6644 case OPC2_32_RRR_IXMAX_U
:
6647 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6648 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6650 case OPC2_32_RRR_IXMIN
:
6653 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6654 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6656 case OPC2_32_RRR_IXMIN_U
:
6659 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6660 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6662 case OPC2_32_RRR_PACK
:
6664 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6665 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6667 case OPC2_32_RRR_ADD_F
:
6668 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6670 case OPC2_32_RRR_SUB_F
:
6671 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6673 case OPC2_32_RRR_MADD_F
:
6674 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6675 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6677 case OPC2_32_RRR_MSUB_F
:
6678 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6679 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6682 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6687 static void decode_rrr2_madd(DisasContext
*ctx
)
6690 uint32_t r1
, r2
, r3
, r4
;
6692 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6693 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6694 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6695 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6696 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6698 case OPC2_32_RRR2_MADD_32
:
6699 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6702 case OPC2_32_RRR2_MADD_64
:
6705 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6706 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6708 case OPC2_32_RRR2_MADDS_32
:
6709 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6710 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6712 case OPC2_32_RRR2_MADDS_64
:
6715 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6716 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6718 case OPC2_32_RRR2_MADD_U_64
:
6721 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6722 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6724 case OPC2_32_RRR2_MADDS_U_32
:
6725 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6726 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6728 case OPC2_32_RRR2_MADDS_U_64
:
6731 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6732 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6735 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6739 static void decode_rrr2_msub(DisasContext
*ctx
)
6742 uint32_t r1
, r2
, r3
, r4
;
6744 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6745 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6746 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6747 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6748 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6751 case OPC2_32_RRR2_MSUB_32
:
6752 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6755 case OPC2_32_RRR2_MSUB_64
:
6758 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6759 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6761 case OPC2_32_RRR2_MSUBS_32
:
6762 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6763 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6765 case OPC2_32_RRR2_MSUBS_64
:
6768 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6769 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6771 case OPC2_32_RRR2_MSUB_U_64
:
6774 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6775 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6777 case OPC2_32_RRR2_MSUBS_U_32
:
6778 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6779 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6781 case OPC2_32_RRR2_MSUBS_U_64
:
6784 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6785 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6788 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6793 static void decode_rrr1_madd(DisasContext
*ctx
)
6796 uint32_t r1
, r2
, r3
, r4
, n
;
6798 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6799 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6800 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6801 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6802 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6803 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6806 case OPC2_32_RRR1_MADD_H_LL
:
6809 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6810 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6812 case OPC2_32_RRR1_MADD_H_LU
:
6815 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6816 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6818 case OPC2_32_RRR1_MADD_H_UL
:
6821 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6822 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6824 case OPC2_32_RRR1_MADD_H_UU
:
6827 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6828 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6830 case OPC2_32_RRR1_MADDS_H_LL
:
6833 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6834 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6836 case OPC2_32_RRR1_MADDS_H_LU
:
6839 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6840 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6842 case OPC2_32_RRR1_MADDS_H_UL
:
6845 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6846 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6848 case OPC2_32_RRR1_MADDS_H_UU
:
6851 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6852 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6854 case OPC2_32_RRR1_MADDM_H_LL
:
6857 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6858 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6860 case OPC2_32_RRR1_MADDM_H_LU
:
6863 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6864 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6866 case OPC2_32_RRR1_MADDM_H_UL
:
6869 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6870 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6872 case OPC2_32_RRR1_MADDM_H_UU
:
6875 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6876 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6878 case OPC2_32_RRR1_MADDMS_H_LL
:
6881 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6882 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6884 case OPC2_32_RRR1_MADDMS_H_LU
:
6887 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6888 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6890 case OPC2_32_RRR1_MADDMS_H_UL
:
6893 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6894 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6896 case OPC2_32_RRR1_MADDMS_H_UU
:
6899 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6900 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6902 case OPC2_32_RRR1_MADDR_H_LL
:
6903 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6904 cpu_gpr_d
[r2
], n
, MODE_LL
);
6906 case OPC2_32_RRR1_MADDR_H_LU
:
6907 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6908 cpu_gpr_d
[r2
], n
, MODE_LU
);
6910 case OPC2_32_RRR1_MADDR_H_UL
:
6911 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6912 cpu_gpr_d
[r2
], n
, MODE_UL
);
6914 case OPC2_32_RRR1_MADDR_H_UU
:
6915 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6916 cpu_gpr_d
[r2
], n
, MODE_UU
);
6918 case OPC2_32_RRR1_MADDRS_H_LL
:
6919 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6920 cpu_gpr_d
[r2
], n
, MODE_LL
);
6922 case OPC2_32_RRR1_MADDRS_H_LU
:
6923 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6924 cpu_gpr_d
[r2
], n
, MODE_LU
);
6926 case OPC2_32_RRR1_MADDRS_H_UL
:
6927 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6928 cpu_gpr_d
[r2
], n
, MODE_UL
);
6930 case OPC2_32_RRR1_MADDRS_H_UU
:
6931 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6932 cpu_gpr_d
[r2
], n
, MODE_UU
);
6935 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6939 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6942 uint32_t r1
, r2
, r3
, r4
, n
;
6945 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6946 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6947 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6948 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6949 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6950 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6952 temp
= tcg_temp_new();
6953 temp2
= tcg_temp_new();
6956 case OPC2_32_RRR1_MADD_Q_32
:
6957 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6958 cpu_gpr_d
[r2
], n
, 32);
6960 case OPC2_32_RRR1_MADD_Q_64
:
6963 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6964 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6967 case OPC2_32_RRR1_MADD_Q_32_L
:
6968 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6969 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6972 case OPC2_32_RRR1_MADD_Q_64_L
:
6975 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6976 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6977 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6980 case OPC2_32_RRR1_MADD_Q_32_U
:
6981 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6982 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6985 case OPC2_32_RRR1_MADD_Q_64_U
:
6988 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6989 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6990 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6993 case OPC2_32_RRR1_MADD_Q_32_LL
:
6994 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6995 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6996 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6998 case OPC2_32_RRR1_MADD_Q_64_LL
:
7001 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7002 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7003 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7004 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7006 case OPC2_32_RRR1_MADD_Q_32_UU
:
7007 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7008 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7009 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7011 case OPC2_32_RRR1_MADD_Q_64_UU
:
7014 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7015 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7016 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7017 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7019 case OPC2_32_RRR1_MADDS_Q_32
:
7020 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7021 cpu_gpr_d
[r2
], n
, 32);
7023 case OPC2_32_RRR1_MADDS_Q_64
:
7026 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7027 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7030 case OPC2_32_RRR1_MADDS_Q_32_L
:
7031 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7032 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7035 case OPC2_32_RRR1_MADDS_Q_64_L
:
7038 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7039 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7040 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7043 case OPC2_32_RRR1_MADDS_Q_32_U
:
7044 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7045 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7048 case OPC2_32_RRR1_MADDS_Q_64_U
:
7051 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7052 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7053 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7056 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7057 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7058 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7059 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7061 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7064 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7065 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7066 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7067 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7069 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7070 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7071 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7072 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7074 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7077 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7078 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7079 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7080 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7082 case OPC2_32_RRR1_MADDR_H_64_UL
:
7084 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7085 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7087 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7089 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7090 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7092 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7093 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7094 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7095 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7097 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7098 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7099 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7100 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7102 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7103 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7104 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7105 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7107 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7108 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7109 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7110 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7113 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7117 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7120 uint32_t r1
, r2
, r3
, r4
, n
;
7122 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7123 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7124 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7125 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7126 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7127 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7130 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7133 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7134 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7136 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7139 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7140 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7142 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7145 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7146 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7148 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7151 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7152 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7154 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7157 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7158 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7161 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7164 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7165 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7168 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7171 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7172 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7175 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7178 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7179 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7182 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7185 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7186 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7189 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7192 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7193 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7196 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7199 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7200 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7203 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7206 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7207 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7210 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7213 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7214 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7217 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7220 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7221 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7224 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7227 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7228 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7231 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7234 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7235 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7238 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7239 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7240 cpu_gpr_d
[r2
], n
, MODE_LL
);
7242 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7243 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7244 cpu_gpr_d
[r2
], n
, MODE_LU
);
7246 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7247 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7248 cpu_gpr_d
[r2
], n
, MODE_UL
);
7250 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7251 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7252 cpu_gpr_d
[r2
], n
, MODE_UU
);
7254 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7255 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7256 cpu_gpr_d
[r2
], n
, MODE_LL
);
7258 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7259 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7260 cpu_gpr_d
[r2
], n
, MODE_LU
);
7262 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7263 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7264 cpu_gpr_d
[r2
], n
, MODE_UL
);
7266 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7267 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7268 cpu_gpr_d
[r2
], n
, MODE_UU
);
7271 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7275 static void decode_rrr1_msub(DisasContext
*ctx
)
7278 uint32_t r1
, r2
, r3
, r4
, n
;
7280 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7281 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7282 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7283 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7284 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7285 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7288 case OPC2_32_RRR1_MSUB_H_LL
:
7291 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7292 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7294 case OPC2_32_RRR1_MSUB_H_LU
:
7297 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7298 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7300 case OPC2_32_RRR1_MSUB_H_UL
:
7303 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7304 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7306 case OPC2_32_RRR1_MSUB_H_UU
:
7309 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7310 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7312 case OPC2_32_RRR1_MSUBS_H_LL
:
7315 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7316 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7318 case OPC2_32_RRR1_MSUBS_H_LU
:
7321 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7322 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7324 case OPC2_32_RRR1_MSUBS_H_UL
:
7327 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7328 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7330 case OPC2_32_RRR1_MSUBS_H_UU
:
7333 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7334 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7336 case OPC2_32_RRR1_MSUBM_H_LL
:
7339 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7340 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7342 case OPC2_32_RRR1_MSUBM_H_LU
:
7345 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7346 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7348 case OPC2_32_RRR1_MSUBM_H_UL
:
7351 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7352 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7354 case OPC2_32_RRR1_MSUBM_H_UU
:
7357 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7358 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7360 case OPC2_32_RRR1_MSUBMS_H_LL
:
7363 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7364 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7366 case OPC2_32_RRR1_MSUBMS_H_LU
:
7369 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7370 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7372 case OPC2_32_RRR1_MSUBMS_H_UL
:
7375 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7376 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7378 case OPC2_32_RRR1_MSUBMS_H_UU
:
7381 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7382 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7384 case OPC2_32_RRR1_MSUBR_H_LL
:
7385 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7386 cpu_gpr_d
[r2
], n
, MODE_LL
);
7388 case OPC2_32_RRR1_MSUBR_H_LU
:
7389 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7390 cpu_gpr_d
[r2
], n
, MODE_LU
);
7392 case OPC2_32_RRR1_MSUBR_H_UL
:
7393 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7394 cpu_gpr_d
[r2
], n
, MODE_UL
);
7396 case OPC2_32_RRR1_MSUBR_H_UU
:
7397 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7398 cpu_gpr_d
[r2
], n
, MODE_UU
);
7400 case OPC2_32_RRR1_MSUBRS_H_LL
:
7401 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7402 cpu_gpr_d
[r2
], n
, MODE_LL
);
7404 case OPC2_32_RRR1_MSUBRS_H_LU
:
7405 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7406 cpu_gpr_d
[r2
], n
, MODE_LU
);
7408 case OPC2_32_RRR1_MSUBRS_H_UL
:
7409 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7410 cpu_gpr_d
[r2
], n
, MODE_UL
);
7412 case OPC2_32_RRR1_MSUBRS_H_UU
:
7413 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7414 cpu_gpr_d
[r2
], n
, MODE_UU
);
7417 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7421 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7424 uint32_t r1
, r2
, r3
, r4
, n
;
7427 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7428 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7429 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7430 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7431 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7432 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7434 temp
= tcg_temp_new();
7435 temp2
= tcg_temp_new();
7438 case OPC2_32_RRR1_MSUB_Q_32
:
7439 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7440 cpu_gpr_d
[r2
], n
, 32);
7442 case OPC2_32_RRR1_MSUB_Q_64
:
7445 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7446 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7449 case OPC2_32_RRR1_MSUB_Q_32_L
:
7450 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7451 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7454 case OPC2_32_RRR1_MSUB_Q_64_L
:
7457 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7458 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7459 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7462 case OPC2_32_RRR1_MSUB_Q_32_U
:
7463 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7464 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7467 case OPC2_32_RRR1_MSUB_Q_64_U
:
7470 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7471 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7472 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7475 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7476 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7477 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7478 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7480 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7483 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7484 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7485 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7486 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7488 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7489 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7490 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7491 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7493 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7496 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7497 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7498 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7499 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7501 case OPC2_32_RRR1_MSUBS_Q_32
:
7502 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7503 cpu_gpr_d
[r2
], n
, 32);
7505 case OPC2_32_RRR1_MSUBS_Q_64
:
7508 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7509 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7512 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7513 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7514 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7517 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7520 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7521 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7522 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7525 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7526 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7527 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7530 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7533 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7534 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7535 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7538 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7539 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7540 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7541 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7543 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7546 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7547 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7548 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7549 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7551 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7552 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7553 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7554 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7556 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7559 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7560 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7561 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7562 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7564 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7566 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7567 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7569 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7571 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7572 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7574 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7575 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7576 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7577 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7579 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7580 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7581 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7582 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7584 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7585 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7586 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7587 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7589 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7590 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7591 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7592 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7595 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7599 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7602 uint32_t r1
, r2
, r3
, r4
, n
;
7604 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7605 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7606 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7607 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7608 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7609 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7612 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7615 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7616 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7618 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7621 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7622 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7624 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7627 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7628 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7630 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7633 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7634 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7636 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7639 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7640 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7643 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7646 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7650 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7653 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7654 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7657 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7660 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7661 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7664 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7667 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7671 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7674 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7675 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7678 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7681 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7682 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7685 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7688 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7689 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7692 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7695 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7696 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7699 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7702 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7703 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7706 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7709 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7710 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7713 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7716 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7717 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7720 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7721 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7722 cpu_gpr_d
[r2
], n
, MODE_LL
);
7724 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7725 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7726 cpu_gpr_d
[r2
], n
, MODE_LU
);
7728 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7729 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7730 cpu_gpr_d
[r2
], n
, MODE_UL
);
7732 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7733 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7734 cpu_gpr_d
[r2
], n
, MODE_UU
);
7736 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7737 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7738 cpu_gpr_d
[r2
], n
, MODE_LL
);
7740 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7741 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7742 cpu_gpr_d
[r2
], n
, MODE_LU
);
7744 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7745 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7746 cpu_gpr_d
[r2
], n
, MODE_UL
);
7748 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7749 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7750 cpu_gpr_d
[r2
], n
, MODE_UU
);
7753 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7758 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7762 TCGv tmp_width
, tmp_pos
;
7764 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7765 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7766 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7767 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7768 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7770 tmp_pos
= tcg_temp_new();
7771 tmp_width
= tcg_temp_new();
7774 case OPC2_32_RRRR_DEXTR
:
7775 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7777 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7779 TCGv msw
= tcg_temp_new();
7780 TCGv zero
= tcg_constant_tl(0);
7781 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7782 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7783 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7785 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7786 * behaviour. So check that case here and set the low bits to zero
7787 * which effectivly returns cpu_gpr_d[r1]
7789 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7790 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7793 case OPC2_32_RRRR_EXTR
:
7794 case OPC2_32_RRRR_EXTR_U
:
7796 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7797 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7798 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7799 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7800 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7801 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7802 if (op2
== OPC2_32_RRRR_EXTR
) {
7803 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7805 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7808 case OPC2_32_RRRR_INSERT
:
7810 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7811 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7812 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7816 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7821 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7829 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7830 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7831 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7832 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7833 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7834 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7836 temp
= tcg_temp_new();
7839 case OPC2_32_RRRW_EXTR
:
7840 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7841 tcg_gen_addi_tl(temp
, temp
, width
);
7842 tcg_gen_subfi_tl(temp
, 32, temp
);
7843 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7844 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7846 case OPC2_32_RRRW_EXTR_U
:
7848 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7850 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7851 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7852 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7855 case OPC2_32_RRRW_IMASK
:
7856 temp2
= tcg_temp_new();
7858 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7859 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7860 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7861 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7862 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7864 case OPC2_32_RRRW_INSERT
:
7865 temp2
= tcg_temp_new();
7867 tcg_gen_movi_tl(temp
, width
);
7868 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7869 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7872 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7877 static void decode_sys_interrupts(DisasContext
*ctx
)
7884 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7885 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7888 case OPC2_32_SYS_DEBUG
:
7889 /* raise EXCP_DEBUG */
7891 case OPC2_32_SYS_DISABLE
:
7892 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7894 case OPC2_32_SYS_DISABLE_D
:
7895 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7896 tcg_gen_extract_tl(cpu_gpr_d
[r1
], cpu_ICR
, ctx
->icr_ie_offset
, 1);
7897 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7899 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7901 case OPC2_32_SYS_DSYNC
:
7903 case OPC2_32_SYS_ENABLE
:
7904 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, ctx
->icr_ie_mask
);
7906 case OPC2_32_SYS_ISYNC
:
7908 case OPC2_32_SYS_NOP
:
7910 case OPC2_32_SYS_RET
:
7911 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7913 case OPC2_32_SYS_FRET
:
7916 case OPC2_32_SYS_RFE
:
7917 gen_helper_rfe(cpu_env
);
7918 tcg_gen_exit_tb(NULL
, 0);
7919 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7921 case OPC2_32_SYS_RFM
:
7922 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7923 tmp
= tcg_temp_new();
7924 l1
= gen_new_label();
7926 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7927 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7928 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7929 gen_helper_rfm(cpu_env
);
7931 tcg_gen_exit_tb(NULL
, 0);
7932 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7934 /* generate privilege trap */
7937 case OPC2_32_SYS_RSLCX
:
7938 gen_helper_rslcx(cpu_env
);
7940 case OPC2_32_SYS_SVLCX
:
7941 gen_helper_svlcx(cpu_env
);
7943 case OPC2_32_SYS_RESTORE
:
7944 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7945 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7946 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7947 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7948 } /* else raise privilege trap */
7950 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7953 case OPC2_32_SYS_TRAPSV
:
7954 l1
= gen_new_label();
7955 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7956 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7959 case OPC2_32_SYS_TRAPV
:
7960 l1
= gen_new_label();
7961 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7962 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7966 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7970 static void decode_32Bit_opc(DisasContext
*ctx
)
7974 int32_t address
, const16
;
7977 TCGv temp
, temp2
, temp3
;
7979 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7981 /* handle JNZ.T opcode only being 7 bit long */
7982 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7983 op1
= OPCM_32_BRN_JTT
;
7988 case OPCM_32_ABS_LDW
:
7989 decode_abs_ldw(ctx
);
7991 case OPCM_32_ABS_LDB
:
7992 decode_abs_ldb(ctx
);
7994 case OPCM_32_ABS_LDMST_SWAP
:
7995 decode_abs_ldst_swap(ctx
);
7997 case OPCM_32_ABS_LDST_CONTEXT
:
7998 decode_abs_ldst_context(ctx
);
8000 case OPCM_32_ABS_STORE
:
8001 decode_abs_store(ctx
);
8003 case OPCM_32_ABS_STOREB_H
:
8004 decode_abs_storeb_h(ctx
);
8006 case OPC1_32_ABS_STOREQ
:
8007 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8008 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8009 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8010 temp2
= tcg_temp_new();
8012 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
8013 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
8015 case OPC1_32_ABS_LD_Q
:
8016 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8017 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8018 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8020 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
8021 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
8023 case OPCM_32_ABS_LEA_LHA
:
8024 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8025 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8027 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
8028 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
8029 if (op2
== OPC2_32_ABS_LHA
) {
8030 tcg_gen_movi_tl(cpu_gpr_a
[r1
], address
<< 14);
8033 /* otherwise translate regular LEA */
8036 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
8039 case OPC1_32_ABSB_ST_T
:
8040 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8041 b
= MASK_OP_ABSB_B(ctx
->opcode
);
8042 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
8044 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8045 temp2
= tcg_temp_new();
8047 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8048 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
8049 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
8050 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8053 case OPC1_32_B_CALL
:
8054 case OPC1_32_B_CALLA
:
8055 case OPC1_32_B_FCALL
:
8056 case OPC1_32_B_FCALLA
:
8061 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8062 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8065 case OPCM_32_BIT_ANDACC
:
8066 decode_bit_andacc(ctx
);
8068 case OPCM_32_BIT_LOGICAL_T1
:
8069 decode_bit_logical_t(ctx
);
8071 case OPCM_32_BIT_INSERT
:
8072 decode_bit_insert(ctx
);
8074 case OPCM_32_BIT_LOGICAL_T2
:
8075 decode_bit_logical_t2(ctx
);
8077 case OPCM_32_BIT_ORAND
:
8078 decode_bit_orand(ctx
);
8080 case OPCM_32_BIT_SH_LOGIC1
:
8081 decode_bit_sh_logic1(ctx
);
8083 case OPCM_32_BIT_SH_LOGIC2
:
8084 decode_bit_sh_logic2(ctx
);
8087 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8088 decode_bo_addrmode_post_pre_base(ctx
);
8090 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8091 decode_bo_addrmode_bitreverse_circular(ctx
);
8093 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8094 decode_bo_addrmode_ld_post_pre_base(ctx
);
8096 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8097 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8099 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8100 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8102 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8103 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8106 case OPC1_32_BOL_LD_A_LONGOFF
:
8107 case OPC1_32_BOL_LD_W_LONGOFF
:
8108 case OPC1_32_BOL_LEA_LONGOFF
:
8109 case OPC1_32_BOL_ST_W_LONGOFF
:
8110 case OPC1_32_BOL_ST_A_LONGOFF
:
8111 case OPC1_32_BOL_LD_B_LONGOFF
:
8112 case OPC1_32_BOL_LD_BU_LONGOFF
:
8113 case OPC1_32_BOL_LD_H_LONGOFF
:
8114 case OPC1_32_BOL_LD_HU_LONGOFF
:
8115 case OPC1_32_BOL_ST_B_LONGOFF
:
8116 case OPC1_32_BOL_ST_H_LONGOFF
:
8117 decode_bol_opc(ctx
, op1
);
8120 case OPCM_32_BRC_EQ_NEQ
:
8121 case OPCM_32_BRC_GE
:
8122 case OPCM_32_BRC_JLT
:
8123 case OPCM_32_BRC_JNE
:
8124 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8125 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8126 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8127 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8130 case OPCM_32_BRN_JTT
:
8131 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8132 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8133 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8136 case OPCM_32_BRR_EQ_NEQ
:
8137 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8138 case OPCM_32_BRR_GE
:
8139 case OPCM_32_BRR_JLT
:
8140 case OPCM_32_BRR_JNE
:
8141 case OPCM_32_BRR_JNZ
:
8142 case OPCM_32_BRR_LOOP
:
8143 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8144 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8145 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8146 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8149 case OPCM_32_RC_LOGICAL_SHIFT
:
8150 decode_rc_logical_shift(ctx
);
8152 case OPCM_32_RC_ACCUMULATOR
:
8153 decode_rc_accumulator(ctx
);
8155 case OPCM_32_RC_SERVICEROUTINE
:
8156 decode_rc_serviceroutine(ctx
);
8158 case OPCM_32_RC_MUL
:
8162 case OPCM_32_RCPW_MASK_INSERT
:
8163 decode_rcpw_insert(ctx
);
8166 case OPC1_32_RCRR_INSERT
:
8167 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8168 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8169 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8170 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8171 temp
= tcg_constant_i32(const16
);
8172 temp2
= tcg_temp_new(); /* width*/
8173 temp3
= tcg_temp_new(); /* pos */
8177 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8178 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8180 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8183 case OPCM_32_RCRW_MASK_INSERT
:
8184 decode_rcrw_insert(ctx
);
8187 case OPCM_32_RCR_COND_SELECT
:
8188 decode_rcr_cond_select(ctx
);
8190 case OPCM_32_RCR_MADD
:
8191 decode_rcr_madd(ctx
);
8193 case OPCM_32_RCR_MSUB
:
8194 decode_rcr_msub(ctx
);
8197 case OPC1_32_RLC_ADDI
:
8198 case OPC1_32_RLC_ADDIH
:
8199 case OPC1_32_RLC_ADDIH_A
:
8200 case OPC1_32_RLC_MFCR
:
8201 case OPC1_32_RLC_MOV
:
8202 case OPC1_32_RLC_MOV_64
:
8203 case OPC1_32_RLC_MOV_U
:
8204 case OPC1_32_RLC_MOV_H
:
8205 case OPC1_32_RLC_MOVH_A
:
8206 case OPC1_32_RLC_MTCR
:
8207 decode_rlc_opc(ctx
, op1
);
8210 case OPCM_32_RR_ACCUMULATOR
:
8211 decode_rr_accumulator(ctx
);
8213 case OPCM_32_RR_LOGICAL_SHIFT
:
8214 decode_rr_logical_shift(ctx
);
8216 case OPCM_32_RR_ADDRESS
:
8217 decode_rr_address(ctx
);
8219 case OPCM_32_RR_IDIRECT
:
8220 decode_rr_idirect(ctx
);
8222 case OPCM_32_RR_DIVIDE
:
8223 decode_rr_divide(ctx
);
8226 case OPCM_32_RR1_MUL
:
8227 decode_rr1_mul(ctx
);
8229 case OPCM_32_RR1_MULQ
:
8230 decode_rr1_mulq(ctx
);
8233 case OPCM_32_RR2_MUL
:
8234 decode_rr2_mul(ctx
);
8237 case OPCM_32_RRPW_EXTRACT_INSERT
:
8238 decode_rrpw_extract_insert(ctx
);
8240 case OPC1_32_RRPW_DEXTR
:
8241 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8242 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8243 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8244 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8246 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8250 case OPCM_32_RRR_COND_SELECT
:
8251 decode_rrr_cond_select(ctx
);
8253 case OPCM_32_RRR_DIVIDE
:
8254 decode_rrr_divide(ctx
);
8257 case OPCM_32_RRR2_MADD
:
8258 decode_rrr2_madd(ctx
);
8260 case OPCM_32_RRR2_MSUB
:
8261 decode_rrr2_msub(ctx
);
8264 case OPCM_32_RRR1_MADD
:
8265 decode_rrr1_madd(ctx
);
8267 case OPCM_32_RRR1_MADDQ_H
:
8268 decode_rrr1_maddq_h(ctx
);
8270 case OPCM_32_RRR1_MADDSU_H
:
8271 decode_rrr1_maddsu_h(ctx
);
8273 case OPCM_32_RRR1_MSUB_H
:
8274 decode_rrr1_msub(ctx
);
8276 case OPCM_32_RRR1_MSUB_Q
:
8277 decode_rrr1_msubq_h(ctx
);
8279 case OPCM_32_RRR1_MSUBAD_H
:
8280 decode_rrr1_msubad_h(ctx
);
8283 case OPCM_32_RRRR_EXTRACT_INSERT
:
8284 decode_rrrr_extract_insert(ctx
);
8287 case OPCM_32_RRRW_EXTRACT_INSERT
:
8288 decode_rrrw_extract_insert(ctx
);
8291 case OPCM_32_SYS_INTERRUPTS
:
8292 decode_sys_interrupts(ctx
);
8294 case OPC1_32_SYS_RSTV
:
8295 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8296 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8297 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8298 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8301 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8305 static bool tricore_insn_is_16bit(uint32_t insn
)
8307 return (insn
& 0x1) == 0;
8310 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8313 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8314 CPUTriCoreState
*env
= cs
->env_ptr
;
8315 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8316 ctx
->hflags
= (uint32_t)ctx
->base
.tb
->flags
;
8317 ctx
->features
= env
->features
;
8318 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
8319 ctx
->icr_ie_mask
= R_ICR_IE_161_MASK
;
8320 ctx
->icr_ie_offset
= R_ICR_IE_161_SHIFT
;
8322 ctx
->icr_ie_mask
= R_ICR_IE_13_MASK
;
8323 ctx
->icr_ie_offset
= R_ICR_IE_13_SHIFT
;
8327 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8331 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8333 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8335 tcg_gen_insn_start(ctx
->base
.pc_next
);
8338 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8341 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8342 * (False positives are OK, false negatives are not.)
8343 * Our caller ensures we are only called if dc->base.pc_next is less than
8344 * 4 bytes from the page boundary, so we cross the page if the first
8345 * 16 bits indicate that this is a 32 bit insn.
8347 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8349 return !tricore_insn_is_16bit(insn
);
8353 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8355 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8356 CPUTriCoreState
*env
= cpu
->env_ptr
;
8360 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8361 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8363 ctx
->opcode
= insn_lo
;
8364 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8365 decode_16Bit_opc(ctx
);
8367 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8368 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8369 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8370 decode_32Bit_opc(ctx
);
8372 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8374 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8375 target_ulong page_start
;
8377 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8378 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8379 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8380 && insn_crosses_page(env
, ctx
))) {
8381 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8386 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8388 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8390 switch (ctx
->base
.is_jmp
) {
8391 case DISAS_TOO_MANY
:
8392 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8394 case DISAS_NORETURN
:
8397 g_assert_not_reached();
8401 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8402 CPUState
*cpu
, FILE *logfile
)
8404 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8405 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8408 static const TranslatorOps tricore_tr_ops
= {
8409 .init_disas_context
= tricore_tr_init_disas_context
,
8410 .tb_start
= tricore_tr_tb_start
,
8411 .insn_start
= tricore_tr_insn_start
,
8412 .translate_insn
= tricore_tr_translate_insn
,
8413 .tb_stop
= tricore_tr_tb_stop
,
8414 .disas_log
= tricore_tr_disas_log
,
8418 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8419 target_ulong pc
, void *host_pc
)
8422 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8423 &tricore_tr_ops
, &ctx
.base
);
8432 void cpu_state_reset(CPUTriCoreState
*env
)
8434 /* Reset Regs to Default Value */
8439 static void tricore_tcg_init_csfr(void)
8441 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8442 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8443 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8444 offsetof(CPUTriCoreState
, PSW
), "PSW");
8445 cpu_PC
= tcg_global_mem_new(cpu_env
,
8446 offsetof(CPUTriCoreState
, PC
), "PC");
8447 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8448 offsetof(CPUTriCoreState
, ICR
), "ICR");
8451 void tricore_tcg_init(void)
8456 for (i
= 0 ; i
< 16 ; i
++) {
8457 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8458 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8461 for (i
= 0 ; i
< 16 ; i
++) {
8462 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8463 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8466 tricore_tcg_init_csfr();
8467 /* init PSW flag cache */
8468 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8469 offsetof(CPUTriCoreState
, PSW_USB_C
),
8471 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8472 offsetof(CPUTriCoreState
, PSW_USB_V
),
8474 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8475 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8477 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8478 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8480 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8481 offsetof(CPUTriCoreState
, PSW_USB_SAV
),