2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
49 static TCGv cpu_gpr_a
[16];
50 static TCGv cpu_gpr_d
[16];
52 static TCGv cpu_PSW_C
;
53 static TCGv cpu_PSW_V
;
54 static TCGv cpu_PSW_SV
;
55 static TCGv cpu_PSW_AV
;
56 static TCGv cpu_PSW_SAV
;
58 static const char *regnames_a
[] = {
59 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
60 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
61 "a12" , "a13" , "a14" , "a15",
64 static const char *regnames_d
[] = {
65 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
66 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
67 "d12" , "d13" , "d14" , "d15",
70 typedef struct DisasContext
{
71 DisasContextBase base
;
72 target_ulong pc_succ_insn
;
74 /* Routine used to access memory */
76 uint32_t hflags
, saved_hflags
;
81 static int has_feature(DisasContext
*ctx
, int feature
)
83 return (ctx
->features
& (1ULL << feature
)) != 0;
93 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
95 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
96 CPUTriCoreState
*env
= &cpu
->env
;
102 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
103 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
104 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
105 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
106 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
107 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
109 for (i
= 0; i
< 16; ++i
) {
111 qemu_fprintf(f
, "\nGPR A%02d:", i
);
113 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
115 for (i
= 0; i
< 16; ++i
) {
117 qemu_fprintf(f
, "\nGPR D%02d:", i
);
119 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
121 qemu_fprintf(f
, "\n");
125 * Functions to generate micro-ops
128 /* Makros for generating helpers */
130 #define gen_helper_1arg(name, arg) do { \
131 TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
132 gen_helper_##name(cpu_env, helper_tmp); \
135 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
136 TCGv arg00 = tcg_temp_new(); \
137 TCGv arg01 = tcg_temp_new(); \
138 TCGv arg11 = tcg_temp_new(); \
139 tcg_gen_sari_tl(arg00, arg0, 16); \
140 tcg_gen_ext16s_tl(arg01, arg0); \
141 tcg_gen_ext16s_tl(arg11, arg1); \
142 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
145 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
146 TCGv arg00 = tcg_temp_new(); \
147 TCGv arg01 = tcg_temp_new(); \
148 TCGv arg10 = tcg_temp_new(); \
149 TCGv arg11 = tcg_temp_new(); \
150 tcg_gen_sari_tl(arg00, arg0, 16); \
151 tcg_gen_ext16s_tl(arg01, arg0); \
152 tcg_gen_sari_tl(arg11, arg1, 16); \
153 tcg_gen_ext16s_tl(arg10, arg1); \
154 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
158 TCGv arg00 = tcg_temp_new(); \
159 TCGv arg01 = tcg_temp_new(); \
160 TCGv arg10 = tcg_temp_new(); \
161 TCGv arg11 = tcg_temp_new(); \
162 tcg_gen_sari_tl(arg00, arg0, 16); \
163 tcg_gen_ext16s_tl(arg01, arg0); \
164 tcg_gen_sari_tl(arg10, arg1, 16); \
165 tcg_gen_ext16s_tl(arg11, arg1); \
166 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
169 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
170 TCGv arg00 = tcg_temp_new(); \
171 TCGv arg01 = tcg_temp_new(); \
172 TCGv arg11 = tcg_temp_new(); \
173 tcg_gen_sari_tl(arg01, arg0, 16); \
174 tcg_gen_ext16s_tl(arg00, arg0); \
175 tcg_gen_sari_tl(arg11, arg1, 16); \
176 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
179 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
180 TCGv_i64 ret = tcg_temp_new_i64(); \
181 TCGv_i64 arg1 = tcg_temp_new_i64(); \
183 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
184 gen_helper_##name(ret, arg1, arg2); \
185 tcg_gen_extr_i64_i32(rl, rh, ret); \
188 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
189 TCGv_i64 ret = tcg_temp_new_i64(); \
191 gen_helper_##name(ret, cpu_env, arg1, arg2); \
192 tcg_gen_extr_i64_i32(rl, rh, ret); \
195 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
196 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
197 ((offset & 0x0fffff) << 1))
199 /* For two 32-bit registers used a 64-bit register, the first
200 registernumber needs to be even. Otherwise we trap. */
201 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
202 #define CHECK_REG_PAIR(reg) do { \
204 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, MemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
218 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
219 int16_t con
, MemOp mop
)
221 TCGv temp
= tcg_temp_new();
222 tcg_gen_addi_tl(temp
, r2
, con
);
223 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
226 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
228 TCGv_i64 temp
= tcg_temp_new_i64();
230 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
231 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
234 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
237 TCGv temp
= tcg_temp_new();
238 tcg_gen_addi_tl(temp
, base
, con
);
239 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
242 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
244 TCGv_i64 temp
= tcg_temp_new_i64();
246 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
247 /* write back to two 32 bit regs */
248 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
251 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
254 TCGv temp
= tcg_temp_new();
255 tcg_gen_addi_tl(temp
, base
, con
);
256 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
259 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
262 TCGv temp
= tcg_temp_new();
263 tcg_gen_addi_tl(temp
, r2
, off
);
264 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
265 tcg_gen_mov_tl(r2
, temp
);
268 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
271 TCGv temp
= tcg_temp_new();
272 tcg_gen_addi_tl(temp
, r2
, off
);
273 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
274 tcg_gen_mov_tl(r2
, temp
);
277 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
278 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
280 TCGv temp
= tcg_temp_new();
281 TCGv temp2
= tcg_temp_new();
283 CHECK_REG_PAIR(ereg
);
284 /* temp = (M(EA, word) */
285 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
286 /* temp = temp & ~E[a][63:32]) */
287 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
288 /* temp2 = (E[a][31:0] & E[a][63:32]); */
289 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
290 /* temp = temp | temp2; */
291 tcg_gen_or_tl(temp
, temp
, temp2
);
292 /* M(EA, word) = temp; */
293 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
296 /* tmp = M(EA, word);
299 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
301 TCGv temp
= tcg_temp_new();
303 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
304 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
308 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
310 TCGv temp
= tcg_temp_new();
311 TCGv temp2
= tcg_temp_new();
312 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
313 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
314 cpu_gpr_d
[reg
], temp
);
315 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
319 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
321 TCGv temp
= tcg_temp_new();
322 TCGv temp2
= tcg_temp_new();
323 TCGv temp3
= tcg_temp_new();
325 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
326 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
327 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
328 tcg_gen_or_tl(temp2
, temp2
, temp3
);
329 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
330 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
334 /* We generate loads and store to core special function register (csfr) through
335 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
336 makros R, A and E, which allow read-only, all and endinit protected access.
337 These makros also specify in which ISA version the csfr was introduced. */
338 #define R(ADDRESS, REG, FEATURE) \
340 if (has_feature(ctx, FEATURE)) { \
341 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
344 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
345 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
346 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
348 /* since we're caching PSW make this a special case */
349 if (offset
== 0xfe04) {
350 gen_helper_psw_read(ret
, cpu_env
);
353 #include "csfr.h.inc"
361 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
362 since no execption occurs */
363 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
365 if (has_feature(ctx, FEATURE)) { \
366 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
369 /* Endinit protected registers
370 TODO: Since the endinit bit is in a register of a not yet implemented
371 watchdog device, we handle endinit protected registers like
372 all-access registers for now. */
373 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
374 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
377 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
378 /* since we're caching PSW make this a special case */
379 if (offset
== 0xfe04) {
380 gen_helper_psw_write(cpu_env
, r1
);
383 #include "csfr.h.inc"
387 /* generate privilege trap */
391 /* Functions for arithmetic instructions */
393 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
395 TCGv t0
= tcg_temp_new_i32();
396 TCGv result
= tcg_temp_new_i32();
397 /* Addition and set V/SV bits */
398 tcg_gen_add_tl(result
, r1
, r2
);
400 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
401 tcg_gen_xor_tl(t0
, r1
, r2
);
402 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
404 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
405 /* Calc AV/SAV bits */
406 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
407 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
409 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
410 /* write back result */
411 tcg_gen_mov_tl(ret
, result
);
415 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
417 TCGv temp
= tcg_temp_new();
418 TCGv_i64 t0
= tcg_temp_new_i64();
419 TCGv_i64 t1
= tcg_temp_new_i64();
420 TCGv_i64 result
= tcg_temp_new_i64();
422 tcg_gen_add_i64(result
, r1
, r2
);
424 tcg_gen_xor_i64(t1
, result
, r1
);
425 tcg_gen_xor_i64(t0
, r1
, r2
);
426 tcg_gen_andc_i64(t1
, t1
, t0
);
427 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
429 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
430 /* calc AV/SAV bits */
431 tcg_gen_extrh_i64_i32(temp
, result
);
432 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
433 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
435 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
436 /* write back result */
437 tcg_gen_mov_i64(ret
, result
);
441 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
442 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
443 void(*op2
)(TCGv
, TCGv
, TCGv
))
445 TCGv temp
= tcg_temp_new();
446 TCGv temp2
= tcg_temp_new();
447 TCGv temp3
= tcg_temp_new();
448 TCGv temp4
= tcg_temp_new();
450 (*op1
)(temp
, r1_low
, r2
);
452 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
453 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
454 if (op1
== tcg_gen_add_tl
) {
455 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
457 tcg_gen_and_tl(temp2
, temp2
, temp3
);
460 (*op2
)(temp3
, r1_high
, r3
);
462 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
463 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
464 if (op2
== tcg_gen_add_tl
) {
465 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
467 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
469 /* combine V0/V1 bits */
470 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
472 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
474 tcg_gen_mov_tl(ret_low
, temp
);
475 tcg_gen_mov_tl(ret_high
, temp3
);
477 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
478 tcg_gen_xor_tl(temp
, temp
, ret_low
);
479 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
480 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
481 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
483 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
486 /* ret = r2 + (r1 * r3); */
487 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
489 TCGv_i64 t1
= tcg_temp_new_i64();
490 TCGv_i64 t2
= tcg_temp_new_i64();
491 TCGv_i64 t3
= tcg_temp_new_i64();
493 tcg_gen_ext_i32_i64(t1
, r1
);
494 tcg_gen_ext_i32_i64(t2
, r2
);
495 tcg_gen_ext_i32_i64(t3
, r3
);
497 tcg_gen_mul_i64(t1
, t1
, t3
);
498 tcg_gen_add_i64(t1
, t2
, t1
);
500 tcg_gen_extrl_i64_i32(ret
, t1
);
503 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
504 /* t1 < -0x80000000 */
505 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
506 tcg_gen_or_i64(t2
, t2
, t3
);
507 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
508 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
510 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
511 /* Calc AV/SAV bits */
512 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
513 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
515 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
518 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
520 TCGv temp
= tcg_constant_i32(con
);
521 gen_madd32_d(ret
, r1
, r2
, temp
);
525 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
528 TCGv t1
= tcg_temp_new();
529 TCGv t2
= tcg_temp_new();
530 TCGv t3
= tcg_temp_new();
531 TCGv t4
= tcg_temp_new();
533 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
534 /* only the add can overflow */
535 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
537 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
538 tcg_gen_xor_tl(t1
, r2_high
, t2
);
539 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
541 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
542 /* Calc AV/SAV bits */
543 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
544 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
546 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
547 /* write back the result */
548 tcg_gen_mov_tl(ret_low
, t3
);
549 tcg_gen_mov_tl(ret_high
, t4
);
553 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
556 TCGv_i64 t1
= tcg_temp_new_i64();
557 TCGv_i64 t2
= tcg_temp_new_i64();
558 TCGv_i64 t3
= tcg_temp_new_i64();
560 tcg_gen_extu_i32_i64(t1
, r1
);
561 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
562 tcg_gen_extu_i32_i64(t3
, r3
);
564 tcg_gen_mul_i64(t1
, t1
, t3
);
565 tcg_gen_add_i64(t2
, t2
, t1
);
566 /* write back result */
567 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
568 /* only the add overflows, if t2 < t1
570 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
571 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
572 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
574 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
575 /* Calc AV/SAV bits */
576 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
577 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
579 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
583 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
586 TCGv temp
= tcg_constant_i32(con
);
587 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
591 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
594 TCGv temp
= tcg_constant_i32(con
);
595 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
599 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
600 TCGv r3
, uint32_t n
, uint32_t mode
)
602 TCGv t_n
= tcg_constant_i32(n
);
603 TCGv temp
= tcg_temp_new();
604 TCGv temp2
= tcg_temp_new();
605 TCGv_i64 temp64
= tcg_temp_new_i64();
608 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
611 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
614 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
617 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
620 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
621 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
622 tcg_gen_add_tl
, tcg_gen_add_tl
);
626 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
627 TCGv r3
, uint32_t n
, uint32_t mode
)
629 TCGv t_n
= tcg_constant_i32(n
);
630 TCGv temp
= tcg_temp_new();
631 TCGv temp2
= tcg_temp_new();
632 TCGv_i64 temp64
= tcg_temp_new_i64();
635 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
638 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
641 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
644 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
647 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
648 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
649 tcg_gen_sub_tl
, tcg_gen_add_tl
);
653 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
654 TCGv r3
, uint32_t n
, uint32_t mode
)
656 TCGv t_n
= tcg_constant_i32(n
);
657 TCGv_i64 temp64
= tcg_temp_new_i64();
658 TCGv_i64 temp64_2
= tcg_temp_new_i64();
659 TCGv_i64 temp64_3
= tcg_temp_new_i64();
662 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
665 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
668 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
671 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
674 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
675 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
676 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
677 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
678 tcg_gen_shli_i64(temp64
, temp64
, 16);
680 gen_add64_d(temp64_2
, temp64_3
, temp64
);
681 /* write back result */
682 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
685 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
688 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
689 TCGv r3
, uint32_t n
, uint32_t mode
)
691 TCGv t_n
= tcg_constant_i32(n
);
692 TCGv temp
= tcg_temp_new();
693 TCGv temp2
= tcg_temp_new();
694 TCGv temp3
= tcg_temp_new();
695 TCGv_i64 temp64
= tcg_temp_new_i64();
699 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
702 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
705 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
708 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
711 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
712 gen_adds(ret_low
, r1_low
, temp
);
713 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
714 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
715 gen_adds(ret_high
, r1_high
, temp2
);
717 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
718 /* combine av bits */
719 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
722 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
725 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
726 TCGv r3
, uint32_t n
, uint32_t mode
)
728 TCGv t_n
= tcg_constant_i32(n
);
729 TCGv temp
= tcg_temp_new();
730 TCGv temp2
= tcg_temp_new();
731 TCGv temp3
= tcg_temp_new();
732 TCGv_i64 temp64
= tcg_temp_new_i64();
736 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
739 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
742 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
745 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
748 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
749 gen_subs(ret_low
, r1_low
, temp
);
750 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
751 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
752 gen_adds(ret_high
, r1_high
, temp2
);
754 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
755 /* combine av bits */
756 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
760 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
761 TCGv r3
, uint32_t n
, uint32_t mode
)
763 TCGv t_n
= tcg_constant_i32(n
);
764 TCGv_i64 temp64
= tcg_temp_new_i64();
765 TCGv_i64 temp64_2
= tcg_temp_new_i64();
769 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
772 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
775 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
778 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
781 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
782 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
783 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
784 tcg_gen_shli_i64(temp64
, temp64
, 16);
785 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
787 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
788 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
793 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
794 TCGv r3
, uint32_t n
, uint32_t mode
)
796 TCGv t_n
= tcg_constant_i32(n
);
797 TCGv_i64 temp64
= tcg_temp_new_i64();
798 TCGv_i64 temp64_2
= tcg_temp_new_i64();
799 TCGv_i64 temp64_3
= tcg_temp_new_i64();
802 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
805 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
808 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
811 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
814 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
815 gen_add64_d(temp64_3
, temp64_2
, temp64
);
816 /* write back result */
817 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
821 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
822 TCGv r3
, uint32_t n
, uint32_t mode
)
824 TCGv t_n
= tcg_constant_i32(n
);
825 TCGv_i64 temp64
= tcg_temp_new_i64();
826 TCGv_i64 temp64_2
= tcg_temp_new_i64();
829 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
832 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
835 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
838 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
841 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
842 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
843 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
847 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
850 TCGv t_n
= tcg_constant_i32(n
);
851 TCGv_i64 temp64
= tcg_temp_new_i64();
854 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
857 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
860 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
863 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
866 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
870 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
872 TCGv temp
= tcg_temp_new();
873 TCGv temp2
= tcg_temp_new();
875 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
876 tcg_gen_shli_tl(temp
, r1
, 16);
877 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
881 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
883 TCGv t_n
= tcg_constant_i32(n
);
884 TCGv temp
= tcg_temp_new();
885 TCGv temp2
= tcg_temp_new();
886 TCGv_i64 temp64
= tcg_temp_new_i64();
889 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
892 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
895 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
898 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
901 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
902 tcg_gen_shli_tl(temp
, r1
, 16);
903 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
908 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
909 uint32_t n
, uint32_t mode
)
911 TCGv t_n
= tcg_constant_i32(n
);
912 TCGv_i64 temp64
= tcg_temp_new_i64();
915 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
918 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
921 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
924 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
927 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
931 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
933 TCGv temp
= tcg_temp_new();
934 TCGv temp2
= tcg_temp_new();
936 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
937 tcg_gen_shli_tl(temp
, r1
, 16);
938 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
942 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
944 TCGv t_n
= tcg_constant_i32(n
);
945 TCGv temp
= tcg_temp_new();
946 TCGv temp2
= tcg_temp_new();
947 TCGv_i64 temp64
= tcg_temp_new_i64();
950 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
953 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
956 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
959 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
962 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
963 tcg_gen_shli_tl(temp
, r1
, 16);
964 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
968 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
970 TCGv t_n
= tcg_constant_i32(n
);
971 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
975 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
977 TCGv t_n
= tcg_constant_i32(n
);
978 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
982 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
985 TCGv temp
= tcg_temp_new();
986 TCGv temp2
= tcg_temp_new();
987 TCGv temp3
= tcg_temp_new();
988 TCGv_i64 t1
= tcg_temp_new_i64();
989 TCGv_i64 t2
= tcg_temp_new_i64();
990 TCGv_i64 t3
= tcg_temp_new_i64();
992 tcg_gen_ext_i32_i64(t2
, arg2
);
993 tcg_gen_ext_i32_i64(t3
, arg3
);
995 tcg_gen_mul_i64(t2
, t2
, t3
);
996 tcg_gen_shli_i64(t2
, t2
, n
);
998 tcg_gen_ext_i32_i64(t1
, arg1
);
999 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1001 tcg_gen_add_i64(t3
, t1
, t2
);
1002 tcg_gen_extrl_i64_i32(temp3
, t3
);
1004 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1005 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1006 tcg_gen_or_i64(t1
, t1
, t2
);
1007 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1008 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1009 /* We produce an overflow on the host if the mul before was
1010 (0x80000000 * 0x80000000) << 1). If this is the
1011 case, we negate the ovf. */
1013 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1014 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1015 tcg_gen_and_tl(temp
, temp
, temp2
);
1016 tcg_gen_shli_tl(temp
, temp
, 31);
1017 /* negate v bit, if special condition */
1018 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1021 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1022 /* Calc AV/SAV bits */
1023 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1024 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1026 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1027 /* write back result */
1028 tcg_gen_mov_tl(ret
, temp3
);
1032 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1034 TCGv temp
= tcg_temp_new();
1035 TCGv temp2
= tcg_temp_new();
1037 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1038 } else { /* n is expected to be 1 */
1039 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1040 tcg_gen_shli_tl(temp
, temp
, 1);
1041 /* catch special case r1 = r2 = 0x8000 */
1042 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1043 tcg_gen_sub_tl(temp
, temp
, temp2
);
1045 gen_add_d(ret
, arg1
, temp
);
1049 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1051 TCGv temp
= tcg_temp_new();
1052 TCGv temp2
= tcg_temp_new();
1054 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1055 } else { /* n is expected to be 1 */
1056 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1057 tcg_gen_shli_tl(temp
, temp
, 1);
1058 /* catch special case r1 = r2 = 0x8000 */
1059 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1060 tcg_gen_sub_tl(temp
, temp
, temp2
);
1062 gen_adds(ret
, arg1
, temp
);
1066 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1067 TCGv arg3
, uint32_t n
)
1069 TCGv temp
= tcg_temp_new();
1070 TCGv temp2
= tcg_temp_new();
1071 TCGv_i64 t1
= tcg_temp_new_i64();
1072 TCGv_i64 t2
= tcg_temp_new_i64();
1073 TCGv_i64 t3
= tcg_temp_new_i64();
1076 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1077 } else { /* n is expected to be 1 */
1078 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1079 tcg_gen_shli_tl(temp
, temp
, 1);
1080 /* catch special case r1 = r2 = 0x8000 */
1081 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1082 tcg_gen_sub_tl(temp
, temp
, temp2
);
1084 tcg_gen_ext_i32_i64(t2
, temp
);
1085 tcg_gen_shli_i64(t2
, t2
, 16);
1086 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1087 gen_add64_d(t3
, t1
, t2
);
1088 /* write back result */
1089 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1093 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1094 TCGv arg3
, uint32_t n
)
1096 TCGv temp
= tcg_temp_new();
1097 TCGv temp2
= tcg_temp_new();
1098 TCGv_i64 t1
= tcg_temp_new_i64();
1099 TCGv_i64 t2
= tcg_temp_new_i64();
1102 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1103 } else { /* n is expected to be 1 */
1104 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1105 tcg_gen_shli_tl(temp
, temp
, 1);
1106 /* catch special case r1 = r2 = 0x8000 */
1107 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1108 tcg_gen_sub_tl(temp
, temp
, temp2
);
1110 tcg_gen_ext_i32_i64(t2
, temp
);
1111 tcg_gen_shli_i64(t2
, t2
, 16);
1112 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1114 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1115 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1119 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1120 TCGv arg3
, uint32_t n
)
1122 TCGv_i64 t1
= tcg_temp_new_i64();
1123 TCGv_i64 t2
= tcg_temp_new_i64();
1124 TCGv_i64 t3
= tcg_temp_new_i64();
1125 TCGv_i64 t4
= tcg_temp_new_i64();
1128 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1129 tcg_gen_ext_i32_i64(t2
, arg2
);
1130 tcg_gen_ext_i32_i64(t3
, arg3
);
1132 tcg_gen_mul_i64(t2
, t2
, t3
);
1134 tcg_gen_shli_i64(t2
, t2
, 1);
1136 tcg_gen_add_i64(t4
, t1
, t2
);
1138 tcg_gen_xor_i64(t3
, t4
, t1
);
1139 tcg_gen_xor_i64(t2
, t1
, t2
);
1140 tcg_gen_andc_i64(t3
, t3
, t2
);
1141 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1142 /* We produce an overflow on the host if the mul before was
1143 (0x80000000 * 0x80000000) << 1). If this is the
1144 case, we negate the ovf. */
1146 temp
= tcg_temp_new();
1147 temp2
= tcg_temp_new();
1148 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1149 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1150 tcg_gen_and_tl(temp
, temp
, temp2
);
1151 tcg_gen_shli_tl(temp
, temp
, 31);
1152 /* negate v bit, if special condition */
1153 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1155 /* write back result */
1156 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1158 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1159 /* Calc AV/SAV bits */
1160 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1161 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1163 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1167 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1170 TCGv_i64 t1
= tcg_temp_new_i64();
1171 TCGv_i64 t2
= tcg_temp_new_i64();
1172 TCGv_i64 t3
= tcg_temp_new_i64();
1174 tcg_gen_ext_i32_i64(t1
, arg1
);
1175 tcg_gen_ext_i32_i64(t2
, arg2
);
1176 tcg_gen_ext_i32_i64(t3
, arg3
);
1178 tcg_gen_mul_i64(t2
, t2
, t3
);
1179 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1181 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1185 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1186 TCGv arg3
, uint32_t n
)
1188 TCGv_i64 r1
= tcg_temp_new_i64();
1189 TCGv t_n
= tcg_constant_i32(n
);
1191 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1192 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1193 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1196 /* ret = r2 - (r1 * r3); */
1197 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1199 TCGv_i64 t1
= tcg_temp_new_i64();
1200 TCGv_i64 t2
= tcg_temp_new_i64();
1201 TCGv_i64 t3
= tcg_temp_new_i64();
1203 tcg_gen_ext_i32_i64(t1
, r1
);
1204 tcg_gen_ext_i32_i64(t2
, r2
);
1205 tcg_gen_ext_i32_i64(t3
, r3
);
1207 tcg_gen_mul_i64(t1
, t1
, t3
);
1208 tcg_gen_sub_i64(t1
, t2
, t1
);
1210 tcg_gen_extrl_i64_i32(ret
, t1
);
1213 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1214 /* result < -0x80000000 */
1215 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1216 tcg_gen_or_i64(t2
, t2
, t3
);
1217 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1218 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1221 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1222 /* Calc AV/SAV bits */
1223 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1224 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1226 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1229 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1231 TCGv temp
= tcg_constant_i32(con
);
1232 gen_msub32_d(ret
, r1
, r2
, temp
);
1236 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1239 TCGv t1
= tcg_temp_new();
1240 TCGv t2
= tcg_temp_new();
1241 TCGv t3
= tcg_temp_new();
1242 TCGv t4
= tcg_temp_new();
1244 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1245 /* only the sub can overflow */
1246 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1248 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1249 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1250 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1252 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1253 /* Calc AV/SAV bits */
1254 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1255 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1257 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1258 /* write back the result */
1259 tcg_gen_mov_tl(ret_low
, t3
);
1260 tcg_gen_mov_tl(ret_high
, t4
);
1264 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1267 TCGv temp
= tcg_constant_i32(con
);
1268 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1272 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1275 TCGv_i64 t1
= tcg_temp_new_i64();
1276 TCGv_i64 t2
= tcg_temp_new_i64();
1277 TCGv_i64 t3
= tcg_temp_new_i64();
1279 tcg_gen_extu_i32_i64(t1
, r1
);
1280 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1281 tcg_gen_extu_i32_i64(t3
, r3
);
1283 tcg_gen_mul_i64(t1
, t1
, t3
);
1284 tcg_gen_sub_i64(t3
, t2
, t1
);
1285 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1286 /* calc V bit, only the sub can overflow, if t1 > t2 */
1287 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1288 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1289 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1291 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1292 /* Calc AV/SAV bits */
1293 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1294 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1296 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1300 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1303 TCGv temp
= tcg_constant_i32(con
);
1304 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1307 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1309 TCGv temp
= tcg_constant_i32(r2
);
1310 gen_add_d(ret
, r1
, temp
);
1313 /* calculate the carry bit too */
1314 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1316 TCGv t0
= tcg_temp_new_i32();
1317 TCGv result
= tcg_temp_new_i32();
1319 tcg_gen_movi_tl(t0
, 0);
1320 /* Addition and set C/V/SV bits */
1321 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1323 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1324 tcg_gen_xor_tl(t0
, r1
, r2
);
1325 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1327 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1328 /* Calc AV/SAV bits */
1329 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1330 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1332 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1333 /* write back result */
1334 tcg_gen_mov_tl(ret
, result
);
1337 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1339 TCGv temp
= tcg_constant_i32(con
);
1340 gen_add_CC(ret
, r1
, temp
);
1343 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1345 TCGv carry
= tcg_temp_new_i32();
1346 TCGv t0
= tcg_temp_new_i32();
1347 TCGv result
= tcg_temp_new_i32();
1349 tcg_gen_movi_tl(t0
, 0);
1350 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1351 /* Addition, carry and set C/V/SV bits */
1352 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1353 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1355 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1356 tcg_gen_xor_tl(t0
, r1
, r2
);
1357 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1359 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1360 /* Calc AV/SAV bits */
1361 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1362 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1364 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1365 /* write back result */
1366 tcg_gen_mov_tl(ret
, result
);
1369 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1371 TCGv temp
= tcg_constant_i32(con
);
1372 gen_addc_CC(ret
, r1
, temp
);
1375 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1378 TCGv temp
= tcg_temp_new();
1379 TCGv temp2
= tcg_temp_new();
1380 TCGv result
= tcg_temp_new();
1381 TCGv mask
= tcg_temp_new();
1382 TCGv t0
= tcg_constant_i32(0);
1384 /* create mask for sticky bits */
1385 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1386 tcg_gen_shli_tl(mask
, mask
, 31);
1388 tcg_gen_add_tl(result
, r1
, r2
);
1390 tcg_gen_xor_tl(temp
, result
, r1
);
1391 tcg_gen_xor_tl(temp2
, r1
, r2
);
1392 tcg_gen_andc_tl(temp
, temp
, temp2
);
1393 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1395 tcg_gen_and_tl(temp
, temp
, mask
);
1396 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1398 tcg_gen_add_tl(temp
, result
, result
);
1399 tcg_gen_xor_tl(temp
, temp
, result
);
1400 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1402 tcg_gen_and_tl(temp
, temp
, mask
);
1403 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1404 /* write back result */
1405 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1408 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1411 TCGv temp
= tcg_constant_i32(r2
);
1412 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1415 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1417 TCGv temp
= tcg_temp_new_i32();
1418 TCGv result
= tcg_temp_new_i32();
1420 tcg_gen_sub_tl(result
, r1
, r2
);
1422 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1423 tcg_gen_xor_tl(temp
, r1
, r2
);
1424 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1426 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1428 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1429 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1431 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1432 /* write back result */
1433 tcg_gen_mov_tl(ret
, result
);
1437 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1439 TCGv temp
= tcg_temp_new();
1440 TCGv_i64 t0
= tcg_temp_new_i64();
1441 TCGv_i64 t1
= tcg_temp_new_i64();
1442 TCGv_i64 result
= tcg_temp_new_i64();
1444 tcg_gen_sub_i64(result
, r1
, r2
);
1446 tcg_gen_xor_i64(t1
, result
, r1
);
1447 tcg_gen_xor_i64(t0
, r1
, r2
);
1448 tcg_gen_and_i64(t1
, t1
, t0
);
1449 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1451 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1452 /* calc AV/SAV bits */
1453 tcg_gen_extrh_i64_i32(temp
, result
);
1454 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1455 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1457 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1458 /* write back result */
1459 tcg_gen_mov_i64(ret
, result
);
1462 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1464 TCGv result
= tcg_temp_new();
1465 TCGv temp
= tcg_temp_new();
1467 tcg_gen_sub_tl(result
, r1
, r2
);
1469 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1471 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1472 tcg_gen_xor_tl(temp
, r1
, r2
);
1473 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1475 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1477 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1478 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1480 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1481 /* write back result */
1482 tcg_gen_mov_tl(ret
, result
);
1485 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1487 TCGv temp
= tcg_temp_new();
1488 tcg_gen_not_tl(temp
, r2
);
1489 gen_addc_CC(ret
, r1
, temp
);
1492 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1495 TCGv temp
= tcg_temp_new();
1496 TCGv temp2
= tcg_temp_new();
1497 TCGv result
= tcg_temp_new();
1498 TCGv mask
= tcg_temp_new();
1499 TCGv t0
= tcg_constant_i32(0);
1501 /* create mask for sticky bits */
1502 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1503 tcg_gen_shli_tl(mask
, mask
, 31);
1505 tcg_gen_sub_tl(result
, r1
, r2
);
1507 tcg_gen_xor_tl(temp
, result
, r1
);
1508 tcg_gen_xor_tl(temp2
, r1
, r2
);
1509 tcg_gen_and_tl(temp
, temp
, temp2
);
1510 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1512 tcg_gen_and_tl(temp
, temp
, mask
);
1513 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1515 tcg_gen_add_tl(temp
, result
, result
);
1516 tcg_gen_xor_tl(temp
, temp
, result
);
1517 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1519 tcg_gen_and_tl(temp
, temp
, mask
);
1520 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1521 /* write back result */
1522 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1526 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1527 TCGv r3
, uint32_t n
, uint32_t mode
)
1529 TCGv t_n
= tcg_constant_i32(n
);
1530 TCGv temp
= tcg_temp_new();
1531 TCGv temp2
= tcg_temp_new();
1532 TCGv_i64 temp64
= tcg_temp_new_i64();
1535 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1538 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1541 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1544 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1547 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1548 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1549 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1553 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1554 TCGv r3
, uint32_t n
, uint32_t mode
)
1556 TCGv t_n
= tcg_constant_i32(n
);
1557 TCGv temp
= tcg_temp_new();
1558 TCGv temp2
= tcg_temp_new();
1559 TCGv temp3
= tcg_temp_new();
1560 TCGv_i64 temp64
= tcg_temp_new_i64();
1564 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1567 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1570 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1573 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1576 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1577 gen_subs(ret_low
, r1_low
, temp
);
1578 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1579 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1580 gen_subs(ret_high
, r1_high
, temp2
);
1581 /* combine v bits */
1582 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1583 /* combine av bits */
1584 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1588 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1589 TCGv r3
, uint32_t n
, uint32_t mode
)
1591 TCGv t_n
= tcg_constant_i32(n
);
1592 TCGv_i64 temp64
= tcg_temp_new_i64();
1593 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1594 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1597 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1600 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1603 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1606 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1609 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1610 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1611 /* write back result */
1612 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1616 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1617 TCGv r3
, uint32_t n
, uint32_t mode
)
1619 TCGv t_n
= tcg_constant_i32(n
);
1620 TCGv_i64 temp64
= tcg_temp_new_i64();
1621 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1624 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1627 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1630 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1633 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1636 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1637 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1638 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1642 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1645 TCGv t_n
= tcg_constant_i32(n
);
1646 TCGv_i64 temp64
= tcg_temp_new_i64();
1649 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1652 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1655 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1658 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1661 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1665 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1667 TCGv temp
= tcg_temp_new();
1668 TCGv temp2
= tcg_temp_new();
1670 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1671 tcg_gen_shli_tl(temp
, r1
, 16);
1672 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1676 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1677 uint32_t n
, uint32_t mode
)
1679 TCGv t_n
= tcg_constant_i32(n
);
1680 TCGv_i64 temp64
= tcg_temp_new_i64();
1683 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1686 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1689 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1692 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1695 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1699 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1701 TCGv temp
= tcg_temp_new();
1702 TCGv temp2
= tcg_temp_new();
1704 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1705 tcg_gen_shli_tl(temp
, r1
, 16);
1706 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1710 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1712 TCGv temp
= tcg_constant_i32(n
);
1713 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1717 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1719 TCGv temp
= tcg_constant_i32(n
);
1720 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1724 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1727 TCGv temp3
= tcg_temp_new();
1728 TCGv_i64 t1
= tcg_temp_new_i64();
1729 TCGv_i64 t2
= tcg_temp_new_i64();
1730 TCGv_i64 t3
= tcg_temp_new_i64();
1731 TCGv_i64 t4
= tcg_temp_new_i64();
1733 tcg_gen_ext_i32_i64(t2
, arg2
);
1734 tcg_gen_ext_i32_i64(t3
, arg3
);
1736 tcg_gen_mul_i64(t2
, t2
, t3
);
1738 tcg_gen_ext_i32_i64(t1
, arg1
);
1739 /* if we shift part of the fraction out, we need to round up */
1740 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1741 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1742 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1743 tcg_gen_add_i64(t2
, t2
, t4
);
1745 tcg_gen_sub_i64(t3
, t1
, t2
);
1746 tcg_gen_extrl_i64_i32(temp3
, t3
);
1748 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1749 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1750 tcg_gen_or_i64(t1
, t1
, t2
);
1751 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1752 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1754 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1755 /* Calc AV/SAV bits */
1756 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1757 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1759 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1760 /* write back result */
1761 tcg_gen_mov_tl(ret
, temp3
);
1765 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1767 TCGv temp
= tcg_temp_new();
1768 TCGv temp2
= tcg_temp_new();
1770 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1771 } else { /* n is expected to be 1 */
1772 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1773 tcg_gen_shli_tl(temp
, temp
, 1);
1774 /* catch special case r1 = r2 = 0x8000 */
1775 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1776 tcg_gen_sub_tl(temp
, temp
, temp2
);
1778 gen_sub_d(ret
, arg1
, temp
);
1782 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1784 TCGv temp
= tcg_temp_new();
1785 TCGv temp2
= tcg_temp_new();
1787 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1788 } else { /* n is expected to be 1 */
1789 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1790 tcg_gen_shli_tl(temp
, temp
, 1);
1791 /* catch special case r1 = r2 = 0x8000 */
1792 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1793 tcg_gen_sub_tl(temp
, temp
, temp2
);
1795 gen_subs(ret
, arg1
, temp
);
1799 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1800 TCGv arg3
, uint32_t n
)
1802 TCGv temp
= tcg_temp_new();
1803 TCGv temp2
= tcg_temp_new();
1804 TCGv_i64 t1
= tcg_temp_new_i64();
1805 TCGv_i64 t2
= tcg_temp_new_i64();
1806 TCGv_i64 t3
= tcg_temp_new_i64();
1809 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1810 } else { /* n is expected to be 1 */
1811 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1812 tcg_gen_shli_tl(temp
, temp
, 1);
1813 /* catch special case r1 = r2 = 0x8000 */
1814 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1815 tcg_gen_sub_tl(temp
, temp
, temp2
);
1817 tcg_gen_ext_i32_i64(t2
, temp
);
1818 tcg_gen_shli_i64(t2
, t2
, 16);
1819 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1820 gen_sub64_d(t3
, t1
, t2
);
1821 /* write back result */
1822 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1826 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1827 TCGv arg3
, uint32_t n
)
1829 TCGv temp
= tcg_temp_new();
1830 TCGv temp2
= tcg_temp_new();
1831 TCGv_i64 t1
= tcg_temp_new_i64();
1832 TCGv_i64 t2
= tcg_temp_new_i64();
1835 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1836 } else { /* n is expected to be 1 */
1837 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1838 tcg_gen_shli_tl(temp
, temp
, 1);
1839 /* catch special case r1 = r2 = 0x8000 */
1840 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1841 tcg_gen_sub_tl(temp
, temp
, temp2
);
1843 tcg_gen_ext_i32_i64(t2
, temp
);
1844 tcg_gen_shli_i64(t2
, t2
, 16);
1845 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1847 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1848 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1852 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1853 TCGv arg3
, uint32_t n
)
1855 TCGv_i64 t1
= tcg_temp_new_i64();
1856 TCGv_i64 t2
= tcg_temp_new_i64();
1857 TCGv_i64 t3
= tcg_temp_new_i64();
1858 TCGv_i64 t4
= tcg_temp_new_i64();
1861 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1862 tcg_gen_ext_i32_i64(t2
, arg2
);
1863 tcg_gen_ext_i32_i64(t3
, arg3
);
1865 tcg_gen_mul_i64(t2
, t2
, t3
);
1867 tcg_gen_shli_i64(t2
, t2
, 1);
1869 tcg_gen_sub_i64(t4
, t1
, t2
);
1871 tcg_gen_xor_i64(t3
, t4
, t1
);
1872 tcg_gen_xor_i64(t2
, t1
, t2
);
1873 tcg_gen_and_i64(t3
, t3
, t2
);
1874 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1875 /* We produce an overflow on the host if the mul before was
1876 (0x80000000 * 0x80000000) << 1). If this is the
1877 case, we negate the ovf. */
1879 temp
= tcg_temp_new();
1880 temp2
= tcg_temp_new();
1881 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1882 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1883 tcg_gen_and_tl(temp
, temp
, temp2
);
1884 tcg_gen_shli_tl(temp
, temp
, 31);
1885 /* negate v bit, if special condition */
1886 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1888 /* write back result */
1889 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1891 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1892 /* Calc AV/SAV bits */
1893 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1894 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1896 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1900 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1903 TCGv_i64 t1
= tcg_temp_new_i64();
1904 TCGv_i64 t2
= tcg_temp_new_i64();
1905 TCGv_i64 t3
= tcg_temp_new_i64();
1906 TCGv_i64 t4
= tcg_temp_new_i64();
1908 tcg_gen_ext_i32_i64(t1
, arg1
);
1909 tcg_gen_ext_i32_i64(t2
, arg2
);
1910 tcg_gen_ext_i32_i64(t3
, arg3
);
1912 tcg_gen_mul_i64(t2
, t2
, t3
);
1913 /* if we shift part of the fraction out, we need to round up */
1914 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1915 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1916 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1917 tcg_gen_add_i64(t3
, t3
, t4
);
1919 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1923 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1924 TCGv arg3
, uint32_t n
)
1926 TCGv_i64 r1
= tcg_temp_new_i64();
1927 TCGv t_n
= tcg_constant_i32(n
);
1929 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1930 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1931 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1935 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1936 TCGv r3
, uint32_t n
, uint32_t mode
)
1938 TCGv t_n
= tcg_constant_i32(n
);
1939 TCGv temp
= tcg_temp_new();
1940 TCGv temp2
= tcg_temp_new();
1941 TCGv_i64 temp64
= tcg_temp_new_i64();
1944 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1947 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1950 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1953 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1956 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1957 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1958 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1962 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1963 TCGv r3
, uint32_t n
, uint32_t mode
)
1965 TCGv t_n
= tcg_constant_i32(n
);
1966 TCGv_i64 temp64
= tcg_temp_new_i64();
1967 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1968 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1971 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1974 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1977 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1980 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1983 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1984 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1985 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1986 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1987 tcg_gen_shli_i64(temp64
, temp64
, 16);
1989 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1990 /* write back result */
1991 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1995 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1997 TCGv t_n
= tcg_constant_i32(n
);
1998 TCGv temp
= tcg_temp_new();
1999 TCGv temp2
= tcg_temp_new();
2000 TCGv_i64 temp64
= tcg_temp_new_i64();
2003 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2006 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2009 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2012 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2015 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2016 tcg_gen_shli_tl(temp
, r1
, 16);
2017 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2021 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2022 TCGv r3
, uint32_t n
, uint32_t mode
)
2024 TCGv t_n
= tcg_constant_i32(n
);
2025 TCGv temp
= tcg_temp_new();
2026 TCGv temp2
= tcg_temp_new();
2027 TCGv temp3
= tcg_temp_new();
2028 TCGv_i64 temp64
= tcg_temp_new_i64();
2032 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2035 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2038 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2041 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2044 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2045 gen_adds(ret_low
, r1_low
, temp
);
2046 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2047 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2048 gen_subs(ret_high
, r1_high
, temp2
);
2049 /* combine v bits */
2050 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2051 /* combine av bits */
2052 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2056 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2057 TCGv r3
, uint32_t n
, uint32_t mode
)
2059 TCGv t_n
= tcg_constant_i32(n
);
2060 TCGv_i64 temp64
= tcg_temp_new_i64();
2061 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2065 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2068 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2071 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2074 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2077 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2078 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2079 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2080 tcg_gen_shli_i64(temp64
, temp64
, 16);
2081 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2083 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2084 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2088 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2090 TCGv t_n
= tcg_constant_i32(n
);
2091 TCGv temp
= tcg_temp_new();
2092 TCGv temp2
= tcg_temp_new();
2093 TCGv_i64 temp64
= tcg_temp_new_i64();
2096 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2099 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2102 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2105 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2108 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2109 tcg_gen_shli_tl(temp
, r1
, 16);
2110 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2113 static inline void gen_abs(TCGv ret
, TCGv r1
)
2115 tcg_gen_abs_tl(ret
, r1
);
2116 /* overflow can only happen, if r1 = 0x80000000 */
2117 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2118 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2120 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2122 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2123 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2125 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2128 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2130 TCGv temp
= tcg_temp_new_i32();
2131 TCGv result
= tcg_temp_new_i32();
2133 tcg_gen_sub_tl(result
, r1
, r2
);
2134 tcg_gen_sub_tl(temp
, r2
, r1
);
2135 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2138 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2139 tcg_gen_xor_tl(temp
, result
, r2
);
2140 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2141 tcg_gen_xor_tl(temp
, r1
, r2
);
2142 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2144 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2146 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2147 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2149 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2150 /* write back result */
2151 tcg_gen_mov_tl(ret
, result
);
2154 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2156 TCGv temp
= tcg_constant_i32(con
);
2157 gen_absdif(ret
, r1
, temp
);
2160 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2162 TCGv temp
= tcg_constant_i32(con
);
2163 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2166 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2168 TCGv high
= tcg_temp_new();
2169 TCGv low
= tcg_temp_new();
2171 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2172 tcg_gen_mov_tl(ret
, low
);
2174 tcg_gen_sari_tl(low
, low
, 31);
2175 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2176 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2178 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2180 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2181 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2183 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2186 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2188 TCGv temp
= tcg_constant_i32(con
);
2189 gen_mul_i32s(ret
, r1
, temp
);
2192 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2194 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2196 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2198 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2200 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2201 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2203 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2206 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2209 TCGv temp
= tcg_constant_i32(con
);
2210 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2213 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2215 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2217 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2219 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2221 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2222 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2224 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2227 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2230 TCGv temp
= tcg_constant_i32(con
);
2231 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2234 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2236 TCGv temp
= tcg_constant_i32(con
);
2237 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2240 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2242 TCGv temp
= tcg_constant_i32(con
);
2243 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2246 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2247 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2249 TCGv temp
= tcg_constant_i32(con
);
2250 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2253 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2255 TCGv temp
= tcg_constant_i32(con
);
2256 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2260 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2262 TCGv_i64 temp_64
= tcg_temp_new_i64();
2263 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2266 if (up_shift
== 32) {
2267 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2268 } else if (up_shift
== 16) {
2269 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2270 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2272 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2273 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2274 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2276 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2279 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2280 } else { /* n is expected to be 1 */
2281 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2282 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2284 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2286 if (up_shift
== 0) {
2287 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2289 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2291 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2292 /* overflow only occurs if r1 = r2 = 0x8000 */
2293 if (up_shift
== 0) {/* result is 64 bit */
2294 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2296 } else { /* result is 32 bit */
2297 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2300 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2301 /* calc sv overflow bit */
2302 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2304 /* calc av overflow bit */
2305 if (up_shift
== 0) {
2306 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2307 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2309 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2310 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2312 /* calc sav overflow bit */
2313 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2317 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2319 TCGv temp
= tcg_temp_new();
2321 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2322 } else { /* n is expected to be 1 */
2323 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2324 tcg_gen_shli_tl(ret
, ret
, 1);
2325 /* catch special case r1 = r2 = 0x8000 */
2326 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2327 tcg_gen_sub_tl(ret
, ret
, temp
);
2330 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2331 /* calc av overflow bit */
2332 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2333 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2334 /* calc sav overflow bit */
2335 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2338 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2340 TCGv temp
= tcg_temp_new();
2342 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2343 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2345 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2346 tcg_gen_shli_tl(ret
, ret
, 1);
2347 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2348 /* catch special case r1 = r2 = 0x8000 */
2349 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2350 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2351 tcg_gen_sub_tl(ret
, ret
, temp
);
2354 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2355 /* calc av overflow bit */
2356 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2357 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2358 /* calc sav overflow bit */
2359 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2360 /* cut halfword off */
2361 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2365 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2368 TCGv_i64 temp64
= tcg_temp_new_i64();
2369 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2370 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2371 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2375 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2378 TCGv temp
= tcg_constant_i32(con
);
2379 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2383 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2386 TCGv_i64 temp64
= tcg_temp_new_i64();
2387 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2388 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2389 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2393 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2396 TCGv temp
= tcg_constant_i32(con
);
2397 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2400 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2402 TCGv temp
= tcg_constant_i32(con
);
2403 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2406 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2408 TCGv temp
= tcg_constant_i32(con
);
2409 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2413 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2416 TCGv_i64 temp64
= tcg_temp_new_i64();
2417 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2418 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2419 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2423 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2426 TCGv temp
= tcg_constant_i32(con
);
2427 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2431 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2434 TCGv_i64 temp64
= tcg_temp_new_i64();
2435 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2436 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2437 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2441 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2444 TCGv temp
= tcg_constant_i32(con
);
2445 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2448 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2450 tcg_gen_smax_tl(ret
, arg
, tcg_constant_i32(low
));
2451 tcg_gen_smin_tl(ret
, ret
, tcg_constant_i32(up
));
2454 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2456 tcg_gen_umin_tl(ret
, arg
, tcg_constant_i32(up
));
2459 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2461 if (shift_count
== -32) {
2462 tcg_gen_movi_tl(ret
, 0);
2463 } else if (shift_count
>= 0) {
2464 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2466 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2470 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2472 TCGv temp_low
, temp_high
;
2474 if (shiftcount
== -16) {
2475 tcg_gen_movi_tl(ret
, 0);
2477 temp_high
= tcg_temp_new();
2478 temp_low
= tcg_temp_new();
2480 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2481 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2482 gen_shi(temp_low
, temp_low
, shiftcount
);
2483 gen_shi(ret
, temp_high
, shiftcount
);
2484 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2488 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2490 uint32_t msk
, msk_start
;
2491 TCGv temp
= tcg_temp_new();
2492 TCGv temp2
= tcg_temp_new();
2494 if (shift_count
== 0) {
2495 /* Clear PSW.C and PSW.V */
2496 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2497 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2498 tcg_gen_mov_tl(ret
, r1
);
2499 } else if (shift_count
== -32) {
2501 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2502 /* fill ret completely with sign bit */
2503 tcg_gen_sari_tl(ret
, r1
, 31);
2505 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2506 } else if (shift_count
> 0) {
2507 TCGv t_max
= tcg_constant_i32(0x7FFFFFFF >> shift_count
);
2508 TCGv t_min
= tcg_constant_i32(((int32_t) -0x80000000) >> shift_count
);
2511 msk_start
= 32 - shift_count
;
2512 msk
= ((1 << shift_count
) - 1) << msk_start
;
2513 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2514 /* calc v/sv bits */
2515 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2516 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2517 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2518 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2520 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2522 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2525 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2527 msk
= (1 << -shift_count
) - 1;
2528 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2530 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2532 /* calc av overflow bit */
2533 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2534 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2535 /* calc sav overflow bit */
2536 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2539 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2541 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2544 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2546 TCGv temp
= tcg_constant_i32(con
);
2547 gen_shas(ret
, r1
, temp
);
2550 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2554 if (shift_count
== 0) {
2555 tcg_gen_mov_tl(ret
, r1
);
2556 } else if (shift_count
> 0) {
2557 low
= tcg_temp_new();
2558 high
= tcg_temp_new();
2560 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2561 tcg_gen_shli_tl(low
, r1
, shift_count
);
2562 tcg_gen_shli_tl(ret
, high
, shift_count
);
2563 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2565 low
= tcg_temp_new();
2566 high
= tcg_temp_new();
2568 tcg_gen_ext16s_tl(low
, r1
);
2569 tcg_gen_sari_tl(low
, low
, -shift_count
);
2570 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2571 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2575 /* ret = {ret[30:0], (r1 cond r2)}; */
2576 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2578 TCGv temp
= tcg_temp_new();
2579 TCGv temp2
= tcg_temp_new();
2581 tcg_gen_shli_tl(temp
, ret
, 1);
2582 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2583 tcg_gen_or_tl(ret
, temp
, temp2
);
2586 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2588 TCGv temp
= tcg_constant_i32(con
);
2589 gen_sh_cond(cond
, ret
, r1
, temp
);
2592 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2594 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2597 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2599 TCGv temp
= tcg_constant_i32(con
);
2600 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2603 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2605 TCGv temp
= tcg_constant_i32(con
);
2606 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2609 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2611 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2614 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2616 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2619 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2621 void(*op1
)(TCGv
, TCGv
, TCGv
),
2622 void(*op2
)(TCGv
, TCGv
, TCGv
))
2626 temp1
= tcg_temp_new();
2627 temp2
= tcg_temp_new();
2629 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2630 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2632 (*op1
)(temp1
, temp1
, temp2
);
2633 (*op2
)(temp1
, ret
, temp1
);
2635 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2638 /* ret = r1[pos1] op1 r2[pos2]; */
2639 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2641 void(*op1
)(TCGv
, TCGv
, TCGv
))
2645 temp1
= tcg_temp_new();
2646 temp2
= tcg_temp_new();
2648 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2649 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2651 (*op1
)(ret
, temp1
, temp2
);
2653 tcg_gen_andi_tl(ret
, ret
, 0x1);
2656 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2657 void(*op
)(TCGv
, TCGv
, TCGv
))
2659 TCGv temp
= tcg_temp_new();
2660 TCGv temp2
= tcg_temp_new();
2661 /* temp = (arg1 cond arg2 )*/
2662 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2664 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2665 /* temp = temp insn temp2 */
2666 (*op
)(temp
, temp
, temp2
);
2667 /* ret = {ret[31:1], temp} */
2668 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2672 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2673 void(*op
)(TCGv
, TCGv
, TCGv
))
2675 TCGv temp
= tcg_constant_i32(con
);
2676 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2679 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2680 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2682 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2683 tcg_gen_neg_tl(ret
, ret
);
2686 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2688 TCGv b0
= tcg_temp_new();
2689 TCGv b1
= tcg_temp_new();
2690 TCGv b2
= tcg_temp_new();
2691 TCGv b3
= tcg_temp_new();
2694 tcg_gen_andi_tl(b0
, r1
, 0xff);
2695 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2698 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2699 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2702 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2703 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2706 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2707 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2710 tcg_gen_or_tl(ret
, b0
, b1
);
2711 tcg_gen_or_tl(ret
, ret
, b2
);
2712 tcg_gen_or_tl(ret
, ret
, b3
);
2715 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2717 TCGv h0
= tcg_temp_new();
2718 TCGv h1
= tcg_temp_new();
2721 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2722 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2725 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2726 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2729 tcg_gen_or_tl(ret
, h0
, h1
);
2732 /* mask = ((1 << width) -1) << pos;
2733 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2734 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2736 TCGv mask
= tcg_temp_new();
2737 TCGv temp
= tcg_temp_new();
2738 TCGv temp2
= tcg_temp_new();
2740 tcg_gen_movi_tl(mask
, 1);
2741 tcg_gen_shl_tl(mask
, mask
, width
);
2742 tcg_gen_subi_tl(mask
, mask
, 1);
2743 tcg_gen_shl_tl(mask
, mask
, pos
);
2745 tcg_gen_shl_tl(temp
, r2
, pos
);
2746 tcg_gen_and_tl(temp
, temp
, mask
);
2747 tcg_gen_andc_tl(temp2
, r1
, mask
);
2748 tcg_gen_or_tl(ret
, temp
, temp2
);
2751 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2753 TCGv_i64 temp
= tcg_temp_new_i64();
2755 gen_helper_bsplit(temp
, r1
);
2756 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2759 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2761 TCGv_i64 temp
= tcg_temp_new_i64();
2763 gen_helper_unpack(temp
, r1
);
2764 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2768 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2770 TCGv_i64 ret
= tcg_temp_new_i64();
2772 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2773 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2775 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2777 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2781 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2783 TCGv_i64 ret
= tcg_temp_new_i64();
2785 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2786 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2788 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2790 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2793 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2795 TCGv temp
= tcg_temp_new();
2797 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2798 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2799 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2800 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2801 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2803 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2804 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2807 static void gen_calc_usb_mulr_h(TCGv arg
)
2809 TCGv temp
= tcg_temp_new();
2811 tcg_gen_add_tl(temp
, arg
, arg
);
2812 tcg_gen_xor_tl(temp
, temp
, arg
);
2813 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2814 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2816 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2818 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2821 /* helpers for generating program flow micro-ops */
2823 static inline void gen_save_pc(target_ulong pc
)
2825 tcg_gen_movi_tl(cpu_PC
, pc
);
2828 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2830 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2833 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2836 tcg_gen_lookup_and_goto_ptr();
2840 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2842 TCGv_i32 classtemp
= tcg_constant_i32(class);
2843 TCGv_i32 tintemp
= tcg_constant_i32(tin
);
2845 gen_save_pc(ctx
->base
.pc_next
);
2846 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2847 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2850 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2851 TCGv r2
, int16_t address
)
2853 TCGLabel
*jumpLabel
= gen_new_label();
2854 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2856 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2858 gen_set_label(jumpLabel
);
2859 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2862 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2863 int r2
, int16_t address
)
2865 TCGv temp
= tcg_constant_i32(r2
);
2866 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2869 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2871 TCGLabel
*l1
= gen_new_label();
2873 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2874 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2875 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2877 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2880 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2882 TCGv temp
= tcg_temp_new();
2884 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2885 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2886 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2887 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2890 static void gen_fret(DisasContext
*ctx
)
2892 TCGv temp
= tcg_temp_new();
2894 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2895 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2896 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2897 tcg_gen_mov_tl(cpu_PC
, temp
);
2898 tcg_gen_exit_tb(NULL
, 0);
2899 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2902 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2903 int r2
, int32_t constant
, int32_t offset
)
2909 /* SB-format jumps */
2912 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2914 case OPC1_32_B_CALL
:
2915 case OPC1_16_SB_CALL
:
2916 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2917 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2920 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2922 case OPC1_16_SB_JNZ
:
2923 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2925 /* SBC-format jumps */
2926 case OPC1_16_SBC_JEQ
:
2927 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2929 case OPC1_16_SBC_JEQ2
:
2930 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2933 case OPC1_16_SBC_JNE
:
2934 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2936 case OPC1_16_SBC_JNE2
:
2937 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2938 constant
, offset
+ 16);
2940 /* SBRN-format jumps */
2941 case OPC1_16_SBRN_JZ_T
:
2942 temp
= tcg_temp_new();
2943 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2944 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2946 case OPC1_16_SBRN_JNZ_T
:
2947 temp
= tcg_temp_new();
2948 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2949 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2951 /* SBR-format jumps */
2952 case OPC1_16_SBR_JEQ
:
2953 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2956 case OPC1_16_SBR_JEQ2
:
2957 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2960 case OPC1_16_SBR_JNE
:
2961 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2964 case OPC1_16_SBR_JNE2
:
2965 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2968 case OPC1_16_SBR_JNZ
:
2969 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2971 case OPC1_16_SBR_JNZ_A
:
2972 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2974 case OPC1_16_SBR_JGEZ
:
2975 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2977 case OPC1_16_SBR_JGTZ
:
2978 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2980 case OPC1_16_SBR_JLEZ
:
2981 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2983 case OPC1_16_SBR_JLTZ
:
2984 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2986 case OPC1_16_SBR_JZ
:
2987 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2989 case OPC1_16_SBR_JZ_A
:
2990 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2992 case OPC1_16_SBR_LOOP
:
2993 gen_loop(ctx
, r1
, offset
* 2 - 32);
2995 /* SR-format jumps */
2997 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2998 tcg_gen_exit_tb(NULL
, 0);
3000 case OPC2_32_SYS_RET
:
3001 case OPC2_16_SR_RET
:
3002 gen_helper_ret(cpu_env
);
3003 tcg_gen_exit_tb(NULL
, 0);
3006 case OPC1_32_B_CALLA
:
3007 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3008 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3010 case OPC1_32_B_FCALL
:
3011 gen_fcall_save_ctx(ctx
);
3012 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3014 case OPC1_32_B_FCALLA
:
3015 gen_fcall_save_ctx(ctx
);
3016 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3019 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3022 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3025 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3026 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3029 case OPCM_32_BRC_EQ_NEQ
:
3030 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3031 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3033 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3036 case OPCM_32_BRC_GE
:
3037 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3038 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3040 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3041 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3045 case OPCM_32_BRC_JLT
:
3046 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3047 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3049 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3050 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3054 case OPCM_32_BRC_JNE
:
3055 temp
= tcg_temp_new();
3056 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3057 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3058 /* subi is unconditional */
3059 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3060 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3062 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3063 /* addi is unconditional */
3064 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3065 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3069 case OPCM_32_BRN_JTT
:
3070 n
= MASK_OP_BRN_N(ctx
->opcode
);
3072 temp
= tcg_temp_new();
3073 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3075 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3076 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3078 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3082 case OPCM_32_BRR_EQ_NEQ
:
3083 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3084 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3087 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3091 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3092 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3093 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3096 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3100 case OPCM_32_BRR_GE
:
3101 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3102 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3105 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3109 case OPCM_32_BRR_JLT
:
3110 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3111 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3114 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3118 case OPCM_32_BRR_LOOP
:
3119 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3120 gen_loop(ctx
, r2
, offset
* 2);
3122 /* OPC2_32_BRR_LOOPU */
3123 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3126 case OPCM_32_BRR_JNE
:
3127 temp
= tcg_temp_new();
3128 temp2
= tcg_temp_new();
3129 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3130 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3131 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3132 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3133 /* subi is unconditional */
3134 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3135 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3137 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3138 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3139 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3140 /* addi is unconditional */
3141 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3142 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3145 case OPCM_32_BRR_JNZ
:
3146 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3147 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3149 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3153 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3155 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3160 * Functions for decoding instructions
3163 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3169 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3170 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3173 case OPC1_16_SRC_ADD
:
3174 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3176 case OPC1_16_SRC_ADD_A15
:
3177 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3179 case OPC1_16_SRC_ADD_15A
:
3180 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3182 case OPC1_16_SRC_ADD_A
:
3183 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3185 case OPC1_16_SRC_CADD
:
3186 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3189 case OPC1_16_SRC_CADDN
:
3190 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3193 case OPC1_16_SRC_CMOV
:
3194 temp
= tcg_constant_tl(0);
3195 temp2
= tcg_constant_tl(const4
);
3196 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3197 temp2
, cpu_gpr_d
[r1
]);
3199 case OPC1_16_SRC_CMOVN
:
3200 temp
= tcg_constant_tl(0);
3201 temp2
= tcg_constant_tl(const4
);
3202 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3203 temp2
, cpu_gpr_d
[r1
]);
3205 case OPC1_16_SRC_EQ
:
3206 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3209 case OPC1_16_SRC_LT
:
3210 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3213 case OPC1_16_SRC_MOV
:
3214 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3216 case OPC1_16_SRC_MOV_A
:
3217 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3218 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3220 case OPC1_16_SRC_MOV_E
:
3221 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3222 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3223 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3225 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3228 case OPC1_16_SRC_SH
:
3229 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3231 case OPC1_16_SRC_SHA
:
3232 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3235 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3239 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3244 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3245 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3248 case OPC1_16_SRR_ADD
:
3249 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3251 case OPC1_16_SRR_ADD_A15
:
3252 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3254 case OPC1_16_SRR_ADD_15A
:
3255 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3257 case OPC1_16_SRR_ADD_A
:
3258 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3260 case OPC1_16_SRR_ADDS
:
3261 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3263 case OPC1_16_SRR_AND
:
3264 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3266 case OPC1_16_SRR_CMOV
:
3267 temp
= tcg_constant_tl(0);
3268 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3269 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3271 case OPC1_16_SRR_CMOVN
:
3272 temp
= tcg_constant_tl(0);
3273 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3274 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3276 case OPC1_16_SRR_EQ
:
3277 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3280 case OPC1_16_SRR_LT
:
3281 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3284 case OPC1_16_SRR_MOV
:
3285 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3287 case OPC1_16_SRR_MOV_A
:
3288 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3290 case OPC1_16_SRR_MOV_AA
:
3291 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3293 case OPC1_16_SRR_MOV_D
:
3294 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3296 case OPC1_16_SRR_MUL
:
3297 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3299 case OPC1_16_SRR_OR
:
3300 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3302 case OPC1_16_SRR_SUB
:
3303 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3305 case OPC1_16_SRR_SUB_A15B
:
3306 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3308 case OPC1_16_SRR_SUB_15AB
:
3309 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3311 case OPC1_16_SRR_SUBS
:
3312 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3314 case OPC1_16_SRR_XOR
:
3315 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3318 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3322 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3326 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3327 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3330 case OPC1_16_SSR_ST_A
:
3331 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3333 case OPC1_16_SSR_ST_A_POSTINC
:
3334 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3335 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3337 case OPC1_16_SSR_ST_B
:
3338 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3340 case OPC1_16_SSR_ST_B_POSTINC
:
3341 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3342 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3344 case OPC1_16_SSR_ST_H
:
3345 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3347 case OPC1_16_SSR_ST_H_POSTINC
:
3348 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3349 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3351 case OPC1_16_SSR_ST_W
:
3352 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3354 case OPC1_16_SSR_ST_W_POSTINC
:
3355 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3356 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3359 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3363 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3367 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3370 case OPC1_16_SC_AND
:
3371 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3373 case OPC1_16_SC_BISR
:
3374 gen_helper_1arg(bisr
, const16
& 0xff);
3376 case OPC1_16_SC_LD_A
:
3377 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3379 case OPC1_16_SC_LD_W
:
3380 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3382 case OPC1_16_SC_MOV
:
3383 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3386 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3388 case OPC1_16_SC_ST_A
:
3389 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3391 case OPC1_16_SC_ST_W
:
3392 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3394 case OPC1_16_SC_SUB_A
:
3395 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3398 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3402 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3406 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3407 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3411 case OPC1_16_SLR_LD_A
:
3412 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3414 case OPC1_16_SLR_LD_A_POSTINC
:
3415 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3416 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3418 case OPC1_16_SLR_LD_BU
:
3419 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3421 case OPC1_16_SLR_LD_BU_POSTINC
:
3422 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3423 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3425 case OPC1_16_SLR_LD_H
:
3426 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3428 case OPC1_16_SLR_LD_H_POSTINC
:
3429 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3430 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3432 case OPC1_16_SLR_LD_W
:
3433 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3435 case OPC1_16_SLR_LD_W_POSTINC
:
3436 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3437 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3440 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3444 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3449 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3450 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3454 case OPC1_16_SRO_LD_A
:
3455 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3457 case OPC1_16_SRO_LD_BU
:
3458 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3460 case OPC1_16_SRO_LD_H
:
3461 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3463 case OPC1_16_SRO_LD_W
:
3464 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3466 case OPC1_16_SRO_ST_A
:
3467 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3469 case OPC1_16_SRO_ST_B
:
3470 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3472 case OPC1_16_SRO_ST_H
:
3473 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3475 case OPC1_16_SRO_ST_W
:
3476 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3479 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3483 static void decode_sr_system(DisasContext
*ctx
)
3486 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3489 case OPC2_16_SR_NOP
:
3491 case OPC2_16_SR_RET
:
3492 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3494 case OPC2_16_SR_RFE
:
3495 gen_helper_rfe(cpu_env
);
3496 tcg_gen_exit_tb(NULL
, 0);
3497 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3499 case OPC2_16_SR_DEBUG
:
3500 /* raise EXCP_DEBUG */
3502 case OPC2_16_SR_FRET
:
3506 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3510 static void decode_sr_accu(DisasContext
*ctx
)
3515 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3516 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3519 case OPC2_16_SR_RSUB
:
3520 /* calc V bit -- overflow only if r1 = -0x80000000 */
3521 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3522 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3524 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3526 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3528 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3529 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3531 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3533 case OPC2_16_SR_SAT_B
:
3534 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3536 case OPC2_16_SR_SAT_BU
:
3537 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3539 case OPC2_16_SR_SAT_H
:
3540 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3542 case OPC2_16_SR_SAT_HU
:
3543 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3546 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3550 static void decode_16Bit_opc(DisasContext
*ctx
)
3558 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3560 /* handle ADDSC.A opcode only being 6 bit long */
3561 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3562 op1
= OPC1_16_SRRS_ADDSC_A
;
3566 case OPC1_16_SRC_ADD
:
3567 case OPC1_16_SRC_ADD_A15
:
3568 case OPC1_16_SRC_ADD_15A
:
3569 case OPC1_16_SRC_ADD_A
:
3570 case OPC1_16_SRC_CADD
:
3571 case OPC1_16_SRC_CADDN
:
3572 case OPC1_16_SRC_CMOV
:
3573 case OPC1_16_SRC_CMOVN
:
3574 case OPC1_16_SRC_EQ
:
3575 case OPC1_16_SRC_LT
:
3576 case OPC1_16_SRC_MOV
:
3577 case OPC1_16_SRC_MOV_A
:
3578 case OPC1_16_SRC_MOV_E
:
3579 case OPC1_16_SRC_SH
:
3580 case OPC1_16_SRC_SHA
:
3581 decode_src_opc(ctx
, op1
);
3584 case OPC1_16_SRR_ADD
:
3585 case OPC1_16_SRR_ADD_A15
:
3586 case OPC1_16_SRR_ADD_15A
:
3587 case OPC1_16_SRR_ADD_A
:
3588 case OPC1_16_SRR_ADDS
:
3589 case OPC1_16_SRR_AND
:
3590 case OPC1_16_SRR_CMOV
:
3591 case OPC1_16_SRR_CMOVN
:
3592 case OPC1_16_SRR_EQ
:
3593 case OPC1_16_SRR_LT
:
3594 case OPC1_16_SRR_MOV
:
3595 case OPC1_16_SRR_MOV_A
:
3596 case OPC1_16_SRR_MOV_AA
:
3597 case OPC1_16_SRR_MOV_D
:
3598 case OPC1_16_SRR_MUL
:
3599 case OPC1_16_SRR_OR
:
3600 case OPC1_16_SRR_SUB
:
3601 case OPC1_16_SRR_SUB_A15B
:
3602 case OPC1_16_SRR_SUB_15AB
:
3603 case OPC1_16_SRR_SUBS
:
3604 case OPC1_16_SRR_XOR
:
3605 decode_srr_opc(ctx
, op1
);
3608 case OPC1_16_SSR_ST_A
:
3609 case OPC1_16_SSR_ST_A_POSTINC
:
3610 case OPC1_16_SSR_ST_B
:
3611 case OPC1_16_SSR_ST_B_POSTINC
:
3612 case OPC1_16_SSR_ST_H
:
3613 case OPC1_16_SSR_ST_H_POSTINC
:
3614 case OPC1_16_SSR_ST_W
:
3615 case OPC1_16_SSR_ST_W_POSTINC
:
3616 decode_ssr_opc(ctx
, op1
);
3619 case OPC1_16_SRRS_ADDSC_A
:
3620 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3621 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3622 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3623 temp
= tcg_temp_new();
3624 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3625 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3628 case OPC1_16_SLRO_LD_A
:
3629 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3630 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3631 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3633 case OPC1_16_SLRO_LD_BU
:
3634 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3635 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3636 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3638 case OPC1_16_SLRO_LD_H
:
3639 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3640 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3641 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3643 case OPC1_16_SLRO_LD_W
:
3644 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3645 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3646 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3649 case OPC1_16_SB_CALL
:
3651 case OPC1_16_SB_JNZ
:
3653 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3654 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3657 case OPC1_16_SBC_JEQ
:
3658 case OPC1_16_SBC_JNE
:
3659 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3660 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3661 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3663 case OPC1_16_SBC_JEQ2
:
3664 case OPC1_16_SBC_JNE2
:
3665 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3666 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3667 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3668 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3670 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3674 case OPC1_16_SBRN_JNZ_T
:
3675 case OPC1_16_SBRN_JZ_T
:
3676 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3677 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3678 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3681 case OPC1_16_SBR_JEQ2
:
3682 case OPC1_16_SBR_JNE2
:
3683 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3684 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3685 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3686 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3688 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3691 case OPC1_16_SBR_JEQ
:
3692 case OPC1_16_SBR_JGEZ
:
3693 case OPC1_16_SBR_JGTZ
:
3694 case OPC1_16_SBR_JLEZ
:
3695 case OPC1_16_SBR_JLTZ
:
3696 case OPC1_16_SBR_JNE
:
3697 case OPC1_16_SBR_JNZ
:
3698 case OPC1_16_SBR_JNZ_A
:
3699 case OPC1_16_SBR_JZ
:
3700 case OPC1_16_SBR_JZ_A
:
3701 case OPC1_16_SBR_LOOP
:
3702 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3703 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3704 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3707 case OPC1_16_SC_AND
:
3708 case OPC1_16_SC_BISR
:
3709 case OPC1_16_SC_LD_A
:
3710 case OPC1_16_SC_LD_W
:
3711 case OPC1_16_SC_MOV
:
3713 case OPC1_16_SC_ST_A
:
3714 case OPC1_16_SC_ST_W
:
3715 case OPC1_16_SC_SUB_A
:
3716 decode_sc_opc(ctx
, op1
);
3719 case OPC1_16_SLR_LD_A
:
3720 case OPC1_16_SLR_LD_A_POSTINC
:
3721 case OPC1_16_SLR_LD_BU
:
3722 case OPC1_16_SLR_LD_BU_POSTINC
:
3723 case OPC1_16_SLR_LD_H
:
3724 case OPC1_16_SLR_LD_H_POSTINC
:
3725 case OPC1_16_SLR_LD_W
:
3726 case OPC1_16_SLR_LD_W_POSTINC
:
3727 decode_slr_opc(ctx
, op1
);
3730 case OPC1_16_SRO_LD_A
:
3731 case OPC1_16_SRO_LD_BU
:
3732 case OPC1_16_SRO_LD_H
:
3733 case OPC1_16_SRO_LD_W
:
3734 case OPC1_16_SRO_ST_A
:
3735 case OPC1_16_SRO_ST_B
:
3736 case OPC1_16_SRO_ST_H
:
3737 case OPC1_16_SRO_ST_W
:
3738 decode_sro_opc(ctx
, op1
);
3741 case OPC1_16_SSRO_ST_A
:
3742 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3743 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3744 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3746 case OPC1_16_SSRO_ST_B
:
3747 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3748 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3749 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3751 case OPC1_16_SSRO_ST_H
:
3752 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3753 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3754 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3756 case OPC1_16_SSRO_ST_W
:
3757 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3758 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3759 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3762 case OPCM_16_SR_SYSTEM
:
3763 decode_sr_system(ctx
);
3765 case OPCM_16_SR_ACCU
:
3766 decode_sr_accu(ctx
);
3769 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3770 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3772 case OPC1_16_SR_NOT
:
3773 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3774 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3777 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3782 * 32 bit instructions
3786 static void decode_abs_ldw(DisasContext
*ctx
)
3793 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3794 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3795 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3797 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3800 case OPC2_32_ABS_LD_A
:
3801 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3803 case OPC2_32_ABS_LD_D
:
3805 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3807 case OPC2_32_ABS_LD_DA
:
3809 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3811 case OPC2_32_ABS_LD_W
:
3812 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3815 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3819 static void decode_abs_ldb(DisasContext
*ctx
)
3826 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3827 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3828 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3830 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3833 case OPC2_32_ABS_LD_B
:
3834 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3836 case OPC2_32_ABS_LD_BU
:
3837 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3839 case OPC2_32_ABS_LD_H
:
3840 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3842 case OPC2_32_ABS_LD_HU
:
3843 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3846 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3850 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3857 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3858 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3859 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3861 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3864 case OPC2_32_ABS_LDMST
:
3865 gen_ldmst(ctx
, r1
, temp
);
3867 case OPC2_32_ABS_SWAP_W
:
3868 gen_swap(ctx
, r1
, temp
);
3871 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3875 static void decode_abs_ldst_context(DisasContext
*ctx
)
3880 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3881 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3884 case OPC2_32_ABS_LDLCX
:
3885 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3887 case OPC2_32_ABS_LDUCX
:
3888 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3890 case OPC2_32_ABS_STLCX
:
3891 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3893 case OPC2_32_ABS_STUCX
:
3894 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3897 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3901 static void decode_abs_store(DisasContext
*ctx
)
3908 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3909 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3910 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3912 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3915 case OPC2_32_ABS_ST_A
:
3916 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3918 case OPC2_32_ABS_ST_D
:
3920 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3922 case OPC2_32_ABS_ST_DA
:
3924 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3926 case OPC2_32_ABS_ST_W
:
3927 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3930 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3934 static void decode_abs_storeb_h(DisasContext
*ctx
)
3941 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3942 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3943 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3945 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3948 case OPC2_32_ABS_ST_B
:
3949 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3951 case OPC2_32_ABS_ST_H
:
3952 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3955 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3961 static void decode_bit_andacc(DisasContext
*ctx
)
3967 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3968 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3969 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3970 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3971 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3972 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3976 case OPC2_32_BIT_AND_AND_T
:
3977 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3978 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3980 case OPC2_32_BIT_AND_ANDN_T
:
3981 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3982 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3984 case OPC2_32_BIT_AND_NOR_T
:
3985 if (TCG_TARGET_HAS_andc_i32
) {
3986 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3987 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3989 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3990 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3993 case OPC2_32_BIT_AND_OR_T
:
3994 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3995 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3998 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4002 static void decode_bit_logical_t(DisasContext
*ctx
)
4007 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4008 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4009 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4010 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4011 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4012 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4015 case OPC2_32_BIT_AND_T
:
4016 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4017 pos1
, pos2
, &tcg_gen_and_tl
);
4019 case OPC2_32_BIT_ANDN_T
:
4020 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4021 pos1
, pos2
, &tcg_gen_andc_tl
);
4023 case OPC2_32_BIT_NOR_T
:
4024 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4025 pos1
, pos2
, &tcg_gen_nor_tl
);
4027 case OPC2_32_BIT_OR_T
:
4028 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4029 pos1
, pos2
, &tcg_gen_or_tl
);
4032 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4036 static void decode_bit_insert(DisasContext
*ctx
)
4042 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4043 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4044 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4045 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4046 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4047 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4049 temp
= tcg_temp_new();
4051 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4052 if (op2
== OPC2_32_BIT_INSN_T
) {
4053 tcg_gen_not_tl(temp
, temp
);
4055 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4058 static void decode_bit_logical_t2(DisasContext
*ctx
)
4065 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4066 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4067 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4068 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4069 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4070 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4073 case OPC2_32_BIT_NAND_T
:
4074 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4075 pos1
, pos2
, &tcg_gen_nand_tl
);
4077 case OPC2_32_BIT_ORN_T
:
4078 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4079 pos1
, pos2
, &tcg_gen_orc_tl
);
4081 case OPC2_32_BIT_XNOR_T
:
4082 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4083 pos1
, pos2
, &tcg_gen_eqv_tl
);
4085 case OPC2_32_BIT_XOR_T
:
4086 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4087 pos1
, pos2
, &tcg_gen_xor_tl
);
4090 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4094 static void decode_bit_orand(DisasContext
*ctx
)
4101 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4102 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4103 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4104 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4105 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4106 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4109 case OPC2_32_BIT_OR_AND_T
:
4110 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4111 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4113 case OPC2_32_BIT_OR_ANDN_T
:
4114 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4115 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4117 case OPC2_32_BIT_OR_NOR_T
:
4118 if (TCG_TARGET_HAS_orc_i32
) {
4119 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4120 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4122 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4123 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4126 case OPC2_32_BIT_OR_OR_T
:
4127 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4128 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4131 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4135 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4142 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4143 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4144 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4145 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4146 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4147 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4149 temp
= tcg_temp_new();
4152 case OPC2_32_BIT_SH_AND_T
:
4153 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4154 pos1
, pos2
, &tcg_gen_and_tl
);
4156 case OPC2_32_BIT_SH_ANDN_T
:
4157 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4158 pos1
, pos2
, &tcg_gen_andc_tl
);
4160 case OPC2_32_BIT_SH_NOR_T
:
4161 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4162 pos1
, pos2
, &tcg_gen_nor_tl
);
4164 case OPC2_32_BIT_SH_OR_T
:
4165 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4166 pos1
, pos2
, &tcg_gen_or_tl
);
4169 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4171 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4172 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4175 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4182 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4183 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4184 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4185 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4186 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4187 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4189 temp
= tcg_temp_new();
4192 case OPC2_32_BIT_SH_NAND_T
:
4193 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4194 pos1
, pos2
, &tcg_gen_nand_tl
);
4196 case OPC2_32_BIT_SH_ORN_T
:
4197 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4198 pos1
, pos2
, &tcg_gen_orc_tl
);
4200 case OPC2_32_BIT_SH_XNOR_T
:
4201 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4202 pos1
, pos2
, &tcg_gen_eqv_tl
);
4204 case OPC2_32_BIT_SH_XOR_T
:
4205 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4206 pos1
, pos2
, &tcg_gen_xor_tl
);
4209 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4211 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4212 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4218 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4225 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4226 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4227 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4228 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4231 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4232 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4233 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4234 /* instruction to access the cache */
4236 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4237 case OPC2_32_BO_CACHEA_W_POSTINC
:
4238 case OPC2_32_BO_CACHEA_I_POSTINC
:
4239 /* instruction to access the cache, but we still need to handle
4240 the addressing mode */
4241 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4243 case OPC2_32_BO_CACHEA_WI_PREINC
:
4244 case OPC2_32_BO_CACHEA_W_PREINC
:
4245 case OPC2_32_BO_CACHEA_I_PREINC
:
4246 /* instruction to access the cache, but we still need to handle
4247 the addressing mode */
4248 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4250 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4251 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4252 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4253 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4256 case OPC2_32_BO_CACHEI_W_POSTINC
:
4257 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4258 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4259 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4261 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4264 case OPC2_32_BO_CACHEI_W_PREINC
:
4265 case OPC2_32_BO_CACHEI_WI_PREINC
:
4266 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4267 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4269 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4272 case OPC2_32_BO_ST_A_SHORTOFF
:
4273 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4275 case OPC2_32_BO_ST_A_POSTINC
:
4276 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4278 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4280 case OPC2_32_BO_ST_A_PREINC
:
4281 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4283 case OPC2_32_BO_ST_B_SHORTOFF
:
4284 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4286 case OPC2_32_BO_ST_B_POSTINC
:
4287 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4289 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4291 case OPC2_32_BO_ST_B_PREINC
:
4292 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4294 case OPC2_32_BO_ST_D_SHORTOFF
:
4296 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4299 case OPC2_32_BO_ST_D_POSTINC
:
4301 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4302 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4304 case OPC2_32_BO_ST_D_PREINC
:
4306 temp
= tcg_temp_new();
4307 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4308 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4309 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4311 case OPC2_32_BO_ST_DA_SHORTOFF
:
4313 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4316 case OPC2_32_BO_ST_DA_POSTINC
:
4318 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4319 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4321 case OPC2_32_BO_ST_DA_PREINC
:
4323 temp
= tcg_temp_new();
4324 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4325 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4326 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4328 case OPC2_32_BO_ST_H_SHORTOFF
:
4329 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4331 case OPC2_32_BO_ST_H_POSTINC
:
4332 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4334 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4336 case OPC2_32_BO_ST_H_PREINC
:
4337 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4339 case OPC2_32_BO_ST_Q_SHORTOFF
:
4340 temp
= tcg_temp_new();
4341 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4342 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4344 case OPC2_32_BO_ST_Q_POSTINC
:
4345 temp
= tcg_temp_new();
4346 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4347 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4349 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4351 case OPC2_32_BO_ST_Q_PREINC
:
4352 temp
= tcg_temp_new();
4353 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4354 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4356 case OPC2_32_BO_ST_W_SHORTOFF
:
4357 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4359 case OPC2_32_BO_ST_W_POSTINC
:
4360 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4362 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4364 case OPC2_32_BO_ST_W_PREINC
:
4365 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4368 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4372 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4377 TCGv temp
, temp2
, t_off10
;
4379 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4380 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4381 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4382 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4384 temp
= tcg_temp_new();
4385 temp2
= tcg_temp_new();
4386 t_off10
= tcg_constant_i32(off10
);
4388 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4389 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4392 case OPC2_32_BO_CACHEA_WI_BR
:
4393 case OPC2_32_BO_CACHEA_W_BR
:
4394 case OPC2_32_BO_CACHEA_I_BR
:
4395 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4397 case OPC2_32_BO_CACHEA_WI_CIRC
:
4398 case OPC2_32_BO_CACHEA_W_CIRC
:
4399 case OPC2_32_BO_CACHEA_I_CIRC
:
4400 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4402 case OPC2_32_BO_ST_A_BR
:
4403 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4404 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4406 case OPC2_32_BO_ST_A_CIRC
:
4407 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4408 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4410 case OPC2_32_BO_ST_B_BR
:
4411 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4412 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4414 case OPC2_32_BO_ST_B_CIRC
:
4415 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4416 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4418 case OPC2_32_BO_ST_D_BR
:
4420 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4421 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4423 case OPC2_32_BO_ST_D_CIRC
:
4425 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4426 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4427 tcg_gen_addi_tl(temp
, temp
, 4);
4428 tcg_gen_rem_tl(temp
, temp
, temp2
);
4429 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4430 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4431 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4433 case OPC2_32_BO_ST_DA_BR
:
4435 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4436 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4438 case OPC2_32_BO_ST_DA_CIRC
:
4440 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4441 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4442 tcg_gen_addi_tl(temp
, temp
, 4);
4443 tcg_gen_rem_tl(temp
, temp
, temp2
);
4444 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4445 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4446 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4448 case OPC2_32_BO_ST_H_BR
:
4449 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4450 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4452 case OPC2_32_BO_ST_H_CIRC
:
4453 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4454 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4456 case OPC2_32_BO_ST_Q_BR
:
4457 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4458 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4459 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4461 case OPC2_32_BO_ST_Q_CIRC
:
4462 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4463 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4464 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4466 case OPC2_32_BO_ST_W_BR
:
4467 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4468 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4470 case OPC2_32_BO_ST_W_CIRC
:
4471 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4472 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4475 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4479 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4486 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4487 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4488 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4489 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4492 case OPC2_32_BO_LD_A_SHORTOFF
:
4493 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4495 case OPC2_32_BO_LD_A_POSTINC
:
4496 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4498 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4500 case OPC2_32_BO_LD_A_PREINC
:
4501 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4503 case OPC2_32_BO_LD_B_SHORTOFF
:
4504 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4506 case OPC2_32_BO_LD_B_POSTINC
:
4507 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4509 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4511 case OPC2_32_BO_LD_B_PREINC
:
4512 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4514 case OPC2_32_BO_LD_BU_SHORTOFF
:
4515 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4517 case OPC2_32_BO_LD_BU_POSTINC
:
4518 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4520 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4522 case OPC2_32_BO_LD_BU_PREINC
:
4523 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4525 case OPC2_32_BO_LD_D_SHORTOFF
:
4527 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4530 case OPC2_32_BO_LD_D_POSTINC
:
4532 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4533 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4535 case OPC2_32_BO_LD_D_PREINC
:
4537 temp
= tcg_temp_new();
4538 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4539 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4540 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4542 case OPC2_32_BO_LD_DA_SHORTOFF
:
4544 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4547 case OPC2_32_BO_LD_DA_POSTINC
:
4549 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4550 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4552 case OPC2_32_BO_LD_DA_PREINC
:
4554 temp
= tcg_temp_new();
4555 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4556 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4557 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4559 case OPC2_32_BO_LD_H_SHORTOFF
:
4560 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4562 case OPC2_32_BO_LD_H_POSTINC
:
4563 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4565 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4567 case OPC2_32_BO_LD_H_PREINC
:
4568 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4570 case OPC2_32_BO_LD_HU_SHORTOFF
:
4571 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4573 case OPC2_32_BO_LD_HU_POSTINC
:
4574 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4576 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4578 case OPC2_32_BO_LD_HU_PREINC
:
4579 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4581 case OPC2_32_BO_LD_Q_SHORTOFF
:
4582 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4583 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4585 case OPC2_32_BO_LD_Q_POSTINC
:
4586 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4588 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4589 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4591 case OPC2_32_BO_LD_Q_PREINC
:
4592 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4593 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4595 case OPC2_32_BO_LD_W_SHORTOFF
:
4596 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4598 case OPC2_32_BO_LD_W_POSTINC
:
4599 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4601 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4603 case OPC2_32_BO_LD_W_PREINC
:
4604 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4607 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4611 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4616 TCGv temp
, temp2
, t_off10
;
4618 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4619 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4620 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4621 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4623 temp
= tcg_temp_new();
4624 temp2
= tcg_temp_new();
4625 t_off10
= tcg_constant_i32(off10
);
4627 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4628 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4632 case OPC2_32_BO_LD_A_BR
:
4633 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4634 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4636 case OPC2_32_BO_LD_A_CIRC
:
4637 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4638 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4640 case OPC2_32_BO_LD_B_BR
:
4641 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4642 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4644 case OPC2_32_BO_LD_B_CIRC
:
4645 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4646 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4648 case OPC2_32_BO_LD_BU_BR
:
4649 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4650 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4652 case OPC2_32_BO_LD_BU_CIRC
:
4653 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4654 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4656 case OPC2_32_BO_LD_D_BR
:
4658 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4659 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4661 case OPC2_32_BO_LD_D_CIRC
:
4663 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4664 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4665 tcg_gen_addi_tl(temp
, temp
, 4);
4666 tcg_gen_rem_tl(temp
, temp
, temp2
);
4667 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4668 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4669 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4671 case OPC2_32_BO_LD_DA_BR
:
4673 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4674 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4676 case OPC2_32_BO_LD_DA_CIRC
:
4678 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4679 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4680 tcg_gen_addi_tl(temp
, temp
, 4);
4681 tcg_gen_rem_tl(temp
, temp
, temp2
);
4682 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4683 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4684 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4686 case OPC2_32_BO_LD_H_BR
:
4687 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4688 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4690 case OPC2_32_BO_LD_H_CIRC
:
4691 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4692 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4694 case OPC2_32_BO_LD_HU_BR
:
4695 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4696 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4698 case OPC2_32_BO_LD_HU_CIRC
:
4699 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4700 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4702 case OPC2_32_BO_LD_Q_BR
:
4703 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4704 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4705 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4707 case OPC2_32_BO_LD_Q_CIRC
:
4708 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4709 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4710 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4712 case OPC2_32_BO_LD_W_BR
:
4713 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4714 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4716 case OPC2_32_BO_LD_W_CIRC
:
4717 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4718 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4721 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4725 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4733 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4734 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4735 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4736 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4739 temp
= tcg_temp_new();
4742 case OPC2_32_BO_LDLCX_SHORTOFF
:
4743 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4744 gen_helper_ldlcx(cpu_env
, temp
);
4746 case OPC2_32_BO_LDMST_SHORTOFF
:
4747 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4748 gen_ldmst(ctx
, r1
, temp
);
4750 case OPC2_32_BO_LDMST_POSTINC
:
4751 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4752 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4754 case OPC2_32_BO_LDMST_PREINC
:
4755 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4756 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4758 case OPC2_32_BO_LDUCX_SHORTOFF
:
4759 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4760 gen_helper_lducx(cpu_env
, temp
);
4762 case OPC2_32_BO_LEA_SHORTOFF
:
4763 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4765 case OPC2_32_BO_STLCX_SHORTOFF
:
4766 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4767 gen_helper_stlcx(cpu_env
, temp
);
4769 case OPC2_32_BO_STUCX_SHORTOFF
:
4770 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4771 gen_helper_stucx(cpu_env
, temp
);
4773 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4774 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4775 gen_swap(ctx
, r1
, temp
);
4777 case OPC2_32_BO_SWAP_W_POSTINC
:
4778 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4779 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4781 case OPC2_32_BO_SWAP_W_PREINC
:
4782 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4783 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4785 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4786 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4787 gen_cmpswap(ctx
, r1
, temp
);
4789 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4790 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4791 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4793 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4794 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4795 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4797 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4798 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4799 gen_swapmsk(ctx
, r1
, temp
);
4801 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4802 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4803 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4805 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4806 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4807 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4810 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4814 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4819 TCGv temp
, temp2
, t_off10
;
4821 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4822 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4823 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4824 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4826 temp
= tcg_temp_new();
4827 temp2
= tcg_temp_new();
4828 t_off10
= tcg_constant_i32(off10
);
4830 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4831 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4834 case OPC2_32_BO_LDMST_BR
:
4835 gen_ldmst(ctx
, r1
, temp2
);
4836 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4838 case OPC2_32_BO_LDMST_CIRC
:
4839 gen_ldmst(ctx
, r1
, temp2
);
4840 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4842 case OPC2_32_BO_SWAP_W_BR
:
4843 gen_swap(ctx
, r1
, temp2
);
4844 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4846 case OPC2_32_BO_SWAP_W_CIRC
:
4847 gen_swap(ctx
, r1
, temp2
);
4848 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4850 case OPC2_32_BO_CMPSWAP_W_BR
:
4851 gen_cmpswap(ctx
, r1
, temp2
);
4852 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4854 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4855 gen_cmpswap(ctx
, r1
, temp2
);
4856 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4858 case OPC2_32_BO_SWAPMSK_W_BR
:
4859 gen_swapmsk(ctx
, r1
, temp2
);
4860 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4862 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4863 gen_swapmsk(ctx
, r1
, temp2
);
4864 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4867 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4871 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4877 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4878 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4879 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4882 case OPC1_32_BOL_LD_A_LONGOFF
:
4883 temp
= tcg_temp_new();
4884 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4885 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4887 case OPC1_32_BOL_LD_W_LONGOFF
:
4888 temp
= tcg_temp_new();
4889 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4890 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4892 case OPC1_32_BOL_LEA_LONGOFF
:
4893 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4895 case OPC1_32_BOL_ST_A_LONGOFF
:
4896 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4897 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4899 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4902 case OPC1_32_BOL_ST_W_LONGOFF
:
4903 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4905 case OPC1_32_BOL_LD_B_LONGOFF
:
4906 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4907 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4909 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4912 case OPC1_32_BOL_LD_BU_LONGOFF
:
4913 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4914 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4916 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4919 case OPC1_32_BOL_LD_H_LONGOFF
:
4920 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4921 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4923 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4926 case OPC1_32_BOL_LD_HU_LONGOFF
:
4927 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4928 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4930 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4933 case OPC1_32_BOL_ST_B_LONGOFF
:
4934 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4935 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4937 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4940 case OPC1_32_BOL_ST_H_LONGOFF
:
4941 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4942 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4944 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4948 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4953 static void decode_rc_logical_shift(DisasContext
*ctx
)
4960 r2
= MASK_OP_RC_D(ctx
->opcode
);
4961 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4962 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4963 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4965 temp
= tcg_temp_new();
4968 case OPC2_32_RC_AND
:
4969 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4971 case OPC2_32_RC_ANDN
:
4972 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4974 case OPC2_32_RC_NAND
:
4975 tcg_gen_movi_tl(temp
, const9
);
4976 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4978 case OPC2_32_RC_NOR
:
4979 tcg_gen_movi_tl(temp
, const9
);
4980 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4983 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4985 case OPC2_32_RC_ORN
:
4986 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4989 const9
= sextract32(const9
, 0, 6);
4990 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4992 case OPC2_32_RC_SH_H
:
4993 const9
= sextract32(const9
, 0, 5);
4994 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4996 case OPC2_32_RC_SHA
:
4997 const9
= sextract32(const9
, 0, 6);
4998 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5000 case OPC2_32_RC_SHA_H
:
5001 const9
= sextract32(const9
, 0, 5);
5002 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5004 case OPC2_32_RC_SHAS
:
5005 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5007 case OPC2_32_RC_XNOR
:
5008 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5009 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5011 case OPC2_32_RC_XOR
:
5012 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5015 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5019 static void decode_rc_accumulator(DisasContext
*ctx
)
5027 r2
= MASK_OP_RC_D(ctx
->opcode
);
5028 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5029 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5031 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5033 temp
= tcg_temp_new();
5036 case OPC2_32_RC_ABSDIF
:
5037 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5039 case OPC2_32_RC_ABSDIFS
:
5040 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5042 case OPC2_32_RC_ADD
:
5043 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5045 case OPC2_32_RC_ADDC
:
5046 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5048 case OPC2_32_RC_ADDS
:
5049 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5051 case OPC2_32_RC_ADDS_U
:
5052 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5054 case OPC2_32_RC_ADDX
:
5055 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5057 case OPC2_32_RC_AND_EQ
:
5058 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5059 const9
, &tcg_gen_and_tl
);
5061 case OPC2_32_RC_AND_GE
:
5062 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5063 const9
, &tcg_gen_and_tl
);
5065 case OPC2_32_RC_AND_GE_U
:
5066 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5067 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5068 const9
, &tcg_gen_and_tl
);
5070 case OPC2_32_RC_AND_LT
:
5071 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5072 const9
, &tcg_gen_and_tl
);
5074 case OPC2_32_RC_AND_LT_U
:
5075 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5076 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5077 const9
, &tcg_gen_and_tl
);
5079 case OPC2_32_RC_AND_NE
:
5080 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5081 const9
, &tcg_gen_and_tl
);
5084 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5086 case OPC2_32_RC_EQANY_B
:
5087 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5089 case OPC2_32_RC_EQANY_H
:
5090 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5093 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5095 case OPC2_32_RC_GE_U
:
5096 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5097 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5100 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5102 case OPC2_32_RC_LT_U
:
5103 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5104 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5106 case OPC2_32_RC_MAX
:
5107 tcg_gen_movi_tl(temp
, const9
);
5108 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5109 cpu_gpr_d
[r1
], temp
);
5111 case OPC2_32_RC_MAX_U
:
5112 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5113 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5114 cpu_gpr_d
[r1
], temp
);
5116 case OPC2_32_RC_MIN
:
5117 tcg_gen_movi_tl(temp
, const9
);
5118 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5119 cpu_gpr_d
[r1
], temp
);
5121 case OPC2_32_RC_MIN_U
:
5122 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5123 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5124 cpu_gpr_d
[r1
], temp
);
5127 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5129 case OPC2_32_RC_OR_EQ
:
5130 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5131 const9
, &tcg_gen_or_tl
);
5133 case OPC2_32_RC_OR_GE
:
5134 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5135 const9
, &tcg_gen_or_tl
);
5137 case OPC2_32_RC_OR_GE_U
:
5138 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5139 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5140 const9
, &tcg_gen_or_tl
);
5142 case OPC2_32_RC_OR_LT
:
5143 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5144 const9
, &tcg_gen_or_tl
);
5146 case OPC2_32_RC_OR_LT_U
:
5147 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5148 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5149 const9
, &tcg_gen_or_tl
);
5151 case OPC2_32_RC_OR_NE
:
5152 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5153 const9
, &tcg_gen_or_tl
);
5155 case OPC2_32_RC_RSUB
:
5156 tcg_gen_movi_tl(temp
, const9
);
5157 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5159 case OPC2_32_RC_RSUBS
:
5160 tcg_gen_movi_tl(temp
, const9
);
5161 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5163 case OPC2_32_RC_RSUBS_U
:
5164 tcg_gen_movi_tl(temp
, const9
);
5165 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5167 case OPC2_32_RC_SH_EQ
:
5168 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5170 case OPC2_32_RC_SH_GE
:
5171 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5173 case OPC2_32_RC_SH_GE_U
:
5174 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5175 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5177 case OPC2_32_RC_SH_LT
:
5178 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5180 case OPC2_32_RC_SH_LT_U
:
5181 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5182 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5184 case OPC2_32_RC_SH_NE
:
5185 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5187 case OPC2_32_RC_XOR_EQ
:
5188 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5189 const9
, &tcg_gen_xor_tl
);
5191 case OPC2_32_RC_XOR_GE
:
5192 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5193 const9
, &tcg_gen_xor_tl
);
5195 case OPC2_32_RC_XOR_GE_U
:
5196 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5197 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5198 const9
, &tcg_gen_xor_tl
);
5200 case OPC2_32_RC_XOR_LT
:
5201 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5202 const9
, &tcg_gen_xor_tl
);
5204 case OPC2_32_RC_XOR_LT_U
:
5205 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5206 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5207 const9
, &tcg_gen_xor_tl
);
5209 case OPC2_32_RC_XOR_NE
:
5210 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5211 const9
, &tcg_gen_xor_tl
);
5214 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5218 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5223 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5224 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5227 case OPC2_32_RC_BISR
:
5228 gen_helper_1arg(bisr
, const9
);
5230 case OPC2_32_RC_SYSCALL
:
5231 /* TODO: Add exception generation */
5234 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5238 static void decode_rc_mul(DisasContext
*ctx
)
5244 r2
= MASK_OP_RC_D(ctx
->opcode
);
5245 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5246 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5248 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5251 case OPC2_32_RC_MUL_32
:
5252 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5254 case OPC2_32_RC_MUL_64
:
5256 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5258 case OPC2_32_RC_MULS_32
:
5259 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5261 case OPC2_32_RC_MUL_U_64
:
5262 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5264 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5266 case OPC2_32_RC_MULS_U_32
:
5267 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5268 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5271 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5276 static void decode_rcpw_insert(DisasContext
*ctx
)
5280 int32_t pos
, width
, const4
;
5284 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5285 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5286 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5287 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5288 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5289 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5292 case OPC2_32_RCPW_IMASK
:
5294 /* if pos + width > 32 undefined result */
5295 if (pos
+ width
<= 32) {
5296 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5297 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5300 case OPC2_32_RCPW_INSERT
:
5301 /* if pos + width > 32 undefined result */
5302 if (pos
+ width
<= 32) {
5303 temp
= tcg_constant_i32(const4
);
5304 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5308 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5314 static void decode_rcrw_insert(DisasContext
*ctx
)
5318 int32_t width
, const4
;
5320 TCGv temp
, temp2
, temp3
;
5322 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5323 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5324 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5325 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5326 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5327 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5329 temp
= tcg_temp_new();
5330 temp2
= tcg_temp_new();
5333 case OPC2_32_RCRW_IMASK
:
5334 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5335 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5336 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5337 tcg_gen_movi_tl(temp2
, const4
);
5338 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5340 case OPC2_32_RCRW_INSERT
:
5341 temp3
= tcg_temp_new();
5343 tcg_gen_movi_tl(temp
, width
);
5344 tcg_gen_movi_tl(temp2
, const4
);
5345 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5346 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5349 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5355 static void decode_rcr_cond_select(DisasContext
*ctx
)
5363 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5364 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5365 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5366 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5367 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5370 case OPC2_32_RCR_CADD
:
5371 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5374 case OPC2_32_RCR_CADDN
:
5375 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5378 case OPC2_32_RCR_SEL
:
5379 temp
= tcg_constant_i32(0);
5380 temp2
= tcg_constant_i32(const9
);
5381 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5382 cpu_gpr_d
[r1
], temp2
);
5384 case OPC2_32_RCR_SELN
:
5385 temp
= tcg_constant_i32(0);
5386 temp2
= tcg_constant_i32(const9
);
5387 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5388 cpu_gpr_d
[r1
], temp2
);
5391 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5395 static void decode_rcr_madd(DisasContext
*ctx
)
5402 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5403 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5404 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5405 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5406 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5409 case OPC2_32_RCR_MADD_32
:
5410 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5412 case OPC2_32_RCR_MADD_64
:
5415 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5416 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5418 case OPC2_32_RCR_MADDS_32
:
5419 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5421 case OPC2_32_RCR_MADDS_64
:
5424 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5425 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5427 case OPC2_32_RCR_MADD_U_64
:
5430 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5431 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5432 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5434 case OPC2_32_RCR_MADDS_U_32
:
5435 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5436 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5438 case OPC2_32_RCR_MADDS_U_64
:
5441 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5442 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5443 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5446 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5450 static void decode_rcr_msub(DisasContext
*ctx
)
5457 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5458 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5459 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5460 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5461 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5464 case OPC2_32_RCR_MSUB_32
:
5465 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5467 case OPC2_32_RCR_MSUB_64
:
5470 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5471 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5473 case OPC2_32_RCR_MSUBS_32
:
5474 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5476 case OPC2_32_RCR_MSUBS_64
:
5479 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5480 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5482 case OPC2_32_RCR_MSUB_U_64
:
5485 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5486 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5487 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5489 case OPC2_32_RCR_MSUBS_U_32
:
5490 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5491 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5493 case OPC2_32_RCR_MSUBS_U_64
:
5496 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5497 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5498 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5501 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5507 static void decode_rlc_opc(DisasContext
*ctx
,
5513 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5514 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5515 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5518 case OPC1_32_RLC_ADDI
:
5519 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5521 case OPC1_32_RLC_ADDIH
:
5522 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5524 case OPC1_32_RLC_ADDIH_A
:
5525 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5527 case OPC1_32_RLC_MFCR
:
5528 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5529 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5531 case OPC1_32_RLC_MOV
:
5532 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5534 case OPC1_32_RLC_MOV_64
:
5535 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5537 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5538 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5540 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5543 case OPC1_32_RLC_MOV_U
:
5544 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5545 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5547 case OPC1_32_RLC_MOV_H
:
5548 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5550 case OPC1_32_RLC_MOVH_A
:
5551 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5553 case OPC1_32_RLC_MTCR
:
5554 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5555 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5558 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5563 static void decode_rr_accumulator(DisasContext
*ctx
)
5570 r3
= MASK_OP_RR_D(ctx
->opcode
);
5571 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5572 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5573 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5576 case OPC2_32_RR_ABS
:
5577 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5579 case OPC2_32_RR_ABS_B
:
5580 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5582 case OPC2_32_RR_ABS_H
:
5583 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5585 case OPC2_32_RR_ABSDIF
:
5586 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5588 case OPC2_32_RR_ABSDIF_B
:
5589 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5592 case OPC2_32_RR_ABSDIF_H
:
5593 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5596 case OPC2_32_RR_ABSDIFS
:
5597 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5600 case OPC2_32_RR_ABSDIFS_H
:
5601 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5604 case OPC2_32_RR_ABSS
:
5605 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5607 case OPC2_32_RR_ABSS_H
:
5608 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5610 case OPC2_32_RR_ADD
:
5611 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5613 case OPC2_32_RR_ADD_B
:
5614 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5616 case OPC2_32_RR_ADD_H
:
5617 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5619 case OPC2_32_RR_ADDC
:
5620 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5622 case OPC2_32_RR_ADDS
:
5623 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5625 case OPC2_32_RR_ADDS_H
:
5626 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5629 case OPC2_32_RR_ADDS_HU
:
5630 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5633 case OPC2_32_RR_ADDS_U
:
5634 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5637 case OPC2_32_RR_ADDX
:
5638 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5640 case OPC2_32_RR_AND_EQ
:
5641 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5642 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5644 case OPC2_32_RR_AND_GE
:
5645 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5646 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5648 case OPC2_32_RR_AND_GE_U
:
5649 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5650 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5652 case OPC2_32_RR_AND_LT
:
5653 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5654 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5656 case OPC2_32_RR_AND_LT_U
:
5657 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5658 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5660 case OPC2_32_RR_AND_NE
:
5661 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5662 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5665 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5668 case OPC2_32_RR_EQ_B
:
5669 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5671 case OPC2_32_RR_EQ_H
:
5672 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5674 case OPC2_32_RR_EQ_W
:
5675 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5677 case OPC2_32_RR_EQANY_B
:
5678 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5680 case OPC2_32_RR_EQANY_H
:
5681 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5684 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5687 case OPC2_32_RR_GE_U
:
5688 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5692 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5695 case OPC2_32_RR_LT_U
:
5696 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5699 case OPC2_32_RR_LT_B
:
5700 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5702 case OPC2_32_RR_LT_BU
:
5703 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5705 case OPC2_32_RR_LT_H
:
5706 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5708 case OPC2_32_RR_LT_HU
:
5709 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5711 case OPC2_32_RR_LT_W
:
5712 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5714 case OPC2_32_RR_LT_WU
:
5715 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5717 case OPC2_32_RR_MAX
:
5718 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5719 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5721 case OPC2_32_RR_MAX_U
:
5722 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5723 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5725 case OPC2_32_RR_MAX_B
:
5726 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5728 case OPC2_32_RR_MAX_BU
:
5729 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5731 case OPC2_32_RR_MAX_H
:
5732 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5734 case OPC2_32_RR_MAX_HU
:
5735 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5737 case OPC2_32_RR_MIN
:
5738 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5739 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5741 case OPC2_32_RR_MIN_U
:
5742 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5743 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5745 case OPC2_32_RR_MIN_B
:
5746 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5748 case OPC2_32_RR_MIN_BU
:
5749 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5751 case OPC2_32_RR_MIN_H
:
5752 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5754 case OPC2_32_RR_MIN_HU
:
5755 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5757 case OPC2_32_RR_MOV
:
5758 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5760 case OPC2_32_RR_MOV_64
:
5761 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5762 temp
= tcg_temp_new();
5765 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5766 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5767 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5769 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5772 case OPC2_32_RR_MOVS_64
:
5773 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5775 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5776 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5778 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5782 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5785 case OPC2_32_RR_OR_EQ
:
5786 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5787 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5789 case OPC2_32_RR_OR_GE
:
5790 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5791 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5793 case OPC2_32_RR_OR_GE_U
:
5794 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5795 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5797 case OPC2_32_RR_OR_LT
:
5798 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5799 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5801 case OPC2_32_RR_OR_LT_U
:
5802 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5803 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5805 case OPC2_32_RR_OR_NE
:
5806 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5807 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5809 case OPC2_32_RR_SAT_B
:
5810 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5812 case OPC2_32_RR_SAT_BU
:
5813 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5815 case OPC2_32_RR_SAT_H
:
5816 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5818 case OPC2_32_RR_SAT_HU
:
5819 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5821 case OPC2_32_RR_SH_EQ
:
5822 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5825 case OPC2_32_RR_SH_GE
:
5826 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5829 case OPC2_32_RR_SH_GE_U
:
5830 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5833 case OPC2_32_RR_SH_LT
:
5834 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5837 case OPC2_32_RR_SH_LT_U
:
5838 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5841 case OPC2_32_RR_SH_NE
:
5842 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5845 case OPC2_32_RR_SUB
:
5846 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5848 case OPC2_32_RR_SUB_B
:
5849 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5851 case OPC2_32_RR_SUB_H
:
5852 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5854 case OPC2_32_RR_SUBC
:
5855 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5857 case OPC2_32_RR_SUBS
:
5858 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5860 case OPC2_32_RR_SUBS_U
:
5861 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5863 case OPC2_32_RR_SUBS_H
:
5864 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5867 case OPC2_32_RR_SUBS_HU
:
5868 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5871 case OPC2_32_RR_SUBX
:
5872 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5874 case OPC2_32_RR_XOR_EQ
:
5875 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5876 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5878 case OPC2_32_RR_XOR_GE
:
5879 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5880 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5882 case OPC2_32_RR_XOR_GE_U
:
5883 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5884 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5886 case OPC2_32_RR_XOR_LT
:
5887 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5888 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5890 case OPC2_32_RR_XOR_LT_U
:
5891 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5892 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5894 case OPC2_32_RR_XOR_NE
:
5895 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5896 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5899 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5903 static void decode_rr_logical_shift(DisasContext
*ctx
)
5908 r3
= MASK_OP_RR_D(ctx
->opcode
);
5909 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5910 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5911 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5914 case OPC2_32_RR_AND
:
5915 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5917 case OPC2_32_RR_ANDN
:
5918 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5920 case OPC2_32_RR_CLO
:
5921 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5922 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5924 case OPC2_32_RR_CLO_H
:
5925 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5927 case OPC2_32_RR_CLS
:
5928 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5930 case OPC2_32_RR_CLS_H
:
5931 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5933 case OPC2_32_RR_CLZ
:
5934 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5936 case OPC2_32_RR_CLZ_H
:
5937 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5939 case OPC2_32_RR_NAND
:
5940 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5942 case OPC2_32_RR_NOR
:
5943 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5946 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5948 case OPC2_32_RR_ORN
:
5949 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5952 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5954 case OPC2_32_RR_SH_H
:
5955 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5957 case OPC2_32_RR_SHA
:
5958 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5960 case OPC2_32_RR_SHA_H
:
5961 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5963 case OPC2_32_RR_SHAS
:
5964 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5966 case OPC2_32_RR_XNOR
:
5967 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5969 case OPC2_32_RR_XOR
:
5970 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5973 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5977 static void decode_rr_address(DisasContext
*ctx
)
5983 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5984 r3
= MASK_OP_RR_D(ctx
->opcode
);
5985 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5986 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5987 n
= MASK_OP_RR_N(ctx
->opcode
);
5990 case OPC2_32_RR_ADD_A
:
5991 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5993 case OPC2_32_RR_ADDSC_A
:
5994 temp
= tcg_temp_new();
5995 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5996 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
5998 case OPC2_32_RR_ADDSC_AT
:
5999 temp
= tcg_temp_new();
6000 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6001 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6002 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6004 case OPC2_32_RR_EQ_A
:
6005 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6008 case OPC2_32_RR_EQZ
:
6009 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6011 case OPC2_32_RR_GE_A
:
6012 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6015 case OPC2_32_RR_LT_A
:
6016 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6019 case OPC2_32_RR_MOV_A
:
6020 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6022 case OPC2_32_RR_MOV_AA
:
6023 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6025 case OPC2_32_RR_MOV_D
:
6026 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6028 case OPC2_32_RR_NE_A
:
6029 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6032 case OPC2_32_RR_NEZ_A
:
6033 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6035 case OPC2_32_RR_SUB_A
:
6036 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6039 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6043 static void decode_rr_idirect(DisasContext
*ctx
)
6048 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6049 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6053 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6055 case OPC2_32_RR_JLI
:
6056 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6057 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6059 case OPC2_32_RR_CALLI
:
6060 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6061 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6063 case OPC2_32_RR_FCALLI
:
6064 gen_fcall_save_ctx(ctx
);
6065 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6068 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6070 tcg_gen_exit_tb(NULL
, 0);
6071 ctx
->base
.is_jmp
= DISAS_NORETURN
;
6074 static void decode_rr_divide(DisasContext
*ctx
)
6079 TCGv temp
, temp2
, temp3
;
6081 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6082 r3
= MASK_OP_RR_D(ctx
->opcode
);
6083 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6084 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6087 case OPC2_32_RR_BMERGE
:
6088 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6090 case OPC2_32_RR_BSPLIT
:
6092 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6094 case OPC2_32_RR_DVINIT_B
:
6096 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6099 case OPC2_32_RR_DVINIT_BU
:
6100 temp
= tcg_temp_new();
6101 temp2
= tcg_temp_new();
6102 temp3
= tcg_temp_new();
6104 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6106 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6107 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6108 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6109 tcg_gen_abs_tl(temp
, temp3
);
6110 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6111 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6113 /* overflow = (D[b] == 0) */
6114 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6116 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6118 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6120 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6121 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6123 case OPC2_32_RR_DVINIT_H
:
6125 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6128 case OPC2_32_RR_DVINIT_HU
:
6129 temp
= tcg_temp_new();
6130 temp2
= tcg_temp_new();
6131 temp3
= tcg_temp_new();
6133 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6135 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6136 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6137 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6138 tcg_gen_abs_tl(temp
, temp3
);
6139 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6140 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6142 /* overflow = (D[b] == 0) */
6143 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6145 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6147 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6149 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6150 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6152 case OPC2_32_RR_DVINIT
:
6153 temp
= tcg_temp_new();
6154 temp2
= tcg_temp_new();
6156 /* overflow = ((D[b] == 0) ||
6157 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6158 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6159 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6160 tcg_gen_and_tl(temp
, temp
, temp2
);
6161 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6162 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6163 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6165 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6167 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6169 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6170 /* sign extend to high reg */
6171 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6173 case OPC2_32_RR_DVINIT_U
:
6174 /* overflow = (D[b] == 0) */
6175 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6176 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6178 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6180 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6182 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6183 /* zero extend to high reg*/
6184 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6186 case OPC2_32_RR_PARITY
:
6187 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6189 case OPC2_32_RR_UNPACK
:
6191 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6193 case OPC2_32_RR_CRC32
:
6194 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6195 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6197 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6200 case OPC2_32_RR_DIV
:
6201 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6202 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6205 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6208 case OPC2_32_RR_DIV_U
:
6209 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6210 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6211 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6213 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6216 case OPC2_32_RR_MUL_F
:
6217 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6219 case OPC2_32_RR_DIV_F
:
6220 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6222 case OPC2_32_RR_CMP_F
:
6223 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6225 case OPC2_32_RR_FTOI
:
6226 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6228 case OPC2_32_RR_ITOF
:
6229 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6231 case OPC2_32_RR_FTOUZ
:
6232 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6234 case OPC2_32_RR_UPDFL
:
6235 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6237 case OPC2_32_RR_UTOF
:
6238 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6240 case OPC2_32_RR_FTOIZ
:
6241 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6243 case OPC2_32_RR_QSEED_F
:
6244 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6247 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6252 static void decode_rr1_mul(DisasContext
*ctx
)
6260 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6261 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6262 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6263 n
= tcg_constant_i32(MASK_OP_RR1_N(ctx
->opcode
));
6264 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6267 case OPC2_32_RR1_MUL_H_32_LL
:
6268 temp64
= tcg_temp_new_i64();
6270 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6271 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6272 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6274 case OPC2_32_RR1_MUL_H_32_LU
:
6275 temp64
= tcg_temp_new_i64();
6277 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6278 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6279 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6281 case OPC2_32_RR1_MUL_H_32_UL
:
6282 temp64
= tcg_temp_new_i64();
6284 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6285 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6286 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6288 case OPC2_32_RR1_MUL_H_32_UU
:
6289 temp64
= tcg_temp_new_i64();
6291 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6292 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6293 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6295 case OPC2_32_RR1_MULM_H_64_LL
:
6296 temp64
= tcg_temp_new_i64();
6298 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6299 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6301 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6303 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6305 case OPC2_32_RR1_MULM_H_64_LU
:
6306 temp64
= tcg_temp_new_i64();
6308 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6309 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6311 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6313 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6315 case OPC2_32_RR1_MULM_H_64_UL
:
6316 temp64
= tcg_temp_new_i64();
6318 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6319 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6321 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6323 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6325 case OPC2_32_RR1_MULM_H_64_UU
:
6326 temp64
= tcg_temp_new_i64();
6328 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6329 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6331 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6333 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6335 case OPC2_32_RR1_MULR_H_16_LL
:
6336 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6337 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6339 case OPC2_32_RR1_MULR_H_16_LU
:
6340 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6341 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6343 case OPC2_32_RR1_MULR_H_16_UL
:
6344 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6345 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6347 case OPC2_32_RR1_MULR_H_16_UU
:
6348 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6349 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6352 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6356 static void decode_rr1_mulq(DisasContext
*ctx
)
6364 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6365 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6366 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6367 n
= MASK_OP_RR1_N(ctx
->opcode
);
6368 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6370 temp
= tcg_temp_new();
6371 temp2
= tcg_temp_new();
6374 case OPC2_32_RR1_MUL_Q_32
:
6375 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6377 case OPC2_32_RR1_MUL_Q_64
:
6379 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6382 case OPC2_32_RR1_MUL_Q_32_L
:
6383 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6384 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6386 case OPC2_32_RR1_MUL_Q_64_L
:
6388 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6389 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6391 case OPC2_32_RR1_MUL_Q_32_U
:
6392 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6393 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6395 case OPC2_32_RR1_MUL_Q_64_U
:
6397 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6398 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6400 case OPC2_32_RR1_MUL_Q_32_LL
:
6401 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6402 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6403 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6405 case OPC2_32_RR1_MUL_Q_32_UU
:
6406 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6407 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6408 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6410 case OPC2_32_RR1_MULR_Q_32_L
:
6411 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6412 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6413 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6415 case OPC2_32_RR1_MULR_Q_32_U
:
6416 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6417 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6418 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6421 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6426 static void decode_rr2_mul(DisasContext
*ctx
)
6431 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6432 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6433 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6434 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6436 case OPC2_32_RR2_MUL_32
:
6437 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6439 case OPC2_32_RR2_MUL_64
:
6441 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6444 case OPC2_32_RR2_MULS_32
:
6445 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6448 case OPC2_32_RR2_MUL_U_64
:
6450 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6453 case OPC2_32_RR2_MULS_U_32
:
6454 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6458 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6463 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6470 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6471 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6472 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6473 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6474 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6475 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6478 case OPC2_32_RRPW_EXTR
:
6480 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6484 if (pos
+ width
<= 32) {
6485 /* optimize special cases */
6486 if ((pos
== 0) && (width
== 8)) {
6487 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6488 } else if ((pos
== 0) && (width
== 16)) {
6489 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6491 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6492 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6496 case OPC2_32_RRPW_EXTR_U
:
6498 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6500 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6501 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6504 case OPC2_32_RRPW_IMASK
:
6507 if (pos
+ width
<= 32) {
6508 temp
= tcg_temp_new();
6509 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6510 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6511 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6515 case OPC2_32_RRPW_INSERT
:
6516 if (pos
+ width
<= 32) {
6517 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6522 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6527 static void decode_rrr_cond_select(DisasContext
*ctx
)
6533 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6534 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6535 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6536 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6537 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6540 case OPC2_32_RRR_CADD
:
6541 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6542 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6544 case OPC2_32_RRR_CADDN
:
6545 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6548 case OPC2_32_RRR_CSUB
:
6549 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6552 case OPC2_32_RRR_CSUBN
:
6553 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6556 case OPC2_32_RRR_SEL
:
6557 temp
= tcg_constant_i32(0);
6558 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6559 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6561 case OPC2_32_RRR_SELN
:
6562 temp
= tcg_constant_i32(0);
6563 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6564 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6567 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6571 static void decode_rrr_divide(DisasContext
*ctx
)
6577 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6578 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6579 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6580 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6581 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6584 case OPC2_32_RRR_DVADJ
:
6587 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6588 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6590 case OPC2_32_RRR_DVSTEP
:
6593 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6594 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6596 case OPC2_32_RRR_DVSTEP_U
:
6599 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6600 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6602 case OPC2_32_RRR_IXMAX
:
6605 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6606 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6608 case OPC2_32_RRR_IXMAX_U
:
6611 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6612 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6614 case OPC2_32_RRR_IXMIN
:
6617 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6618 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6620 case OPC2_32_RRR_IXMIN_U
:
6623 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6624 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6626 case OPC2_32_RRR_PACK
:
6628 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6629 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6631 case OPC2_32_RRR_ADD_F
:
6632 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6634 case OPC2_32_RRR_SUB_F
:
6635 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6637 case OPC2_32_RRR_MADD_F
:
6638 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6639 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6641 case OPC2_32_RRR_MSUB_F
:
6642 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6643 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6646 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6651 static void decode_rrr2_madd(DisasContext
*ctx
)
6654 uint32_t r1
, r2
, r3
, r4
;
6656 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6657 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6658 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6659 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6660 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6662 case OPC2_32_RRR2_MADD_32
:
6663 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6666 case OPC2_32_RRR2_MADD_64
:
6669 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6670 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6672 case OPC2_32_RRR2_MADDS_32
:
6673 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6674 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6676 case OPC2_32_RRR2_MADDS_64
:
6679 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6680 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6682 case OPC2_32_RRR2_MADD_U_64
:
6685 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6686 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6688 case OPC2_32_RRR2_MADDS_U_32
:
6689 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6690 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6692 case OPC2_32_RRR2_MADDS_U_64
:
6695 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6696 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6699 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6703 static void decode_rrr2_msub(DisasContext
*ctx
)
6706 uint32_t r1
, r2
, r3
, r4
;
6708 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6709 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6710 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6711 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6712 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6715 case OPC2_32_RRR2_MSUB_32
:
6716 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6719 case OPC2_32_RRR2_MSUB_64
:
6722 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6723 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6725 case OPC2_32_RRR2_MSUBS_32
:
6726 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6727 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6729 case OPC2_32_RRR2_MSUBS_64
:
6732 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6733 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6735 case OPC2_32_RRR2_MSUB_U_64
:
6736 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6737 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6739 case OPC2_32_RRR2_MSUBS_U_32
:
6740 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6741 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6743 case OPC2_32_RRR2_MSUBS_U_64
:
6746 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6747 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6750 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6755 static void decode_rrr1_madd(DisasContext
*ctx
)
6758 uint32_t r1
, r2
, r3
, r4
, n
;
6760 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6761 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6762 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6763 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6764 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6765 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6768 case OPC2_32_RRR1_MADD_H_LL
:
6771 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6772 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6774 case OPC2_32_RRR1_MADD_H_LU
:
6777 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6778 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6780 case OPC2_32_RRR1_MADD_H_UL
:
6783 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6784 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6786 case OPC2_32_RRR1_MADD_H_UU
:
6789 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6790 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6792 case OPC2_32_RRR1_MADDS_H_LL
:
6795 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6796 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6798 case OPC2_32_RRR1_MADDS_H_LU
:
6801 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6802 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6804 case OPC2_32_RRR1_MADDS_H_UL
:
6807 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6808 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6810 case OPC2_32_RRR1_MADDS_H_UU
:
6813 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6814 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6816 case OPC2_32_RRR1_MADDM_H_LL
:
6819 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6820 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6822 case OPC2_32_RRR1_MADDM_H_LU
:
6825 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6826 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6828 case OPC2_32_RRR1_MADDM_H_UL
:
6831 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6832 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6834 case OPC2_32_RRR1_MADDM_H_UU
:
6837 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6838 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6840 case OPC2_32_RRR1_MADDMS_H_LL
:
6843 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6844 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6846 case OPC2_32_RRR1_MADDMS_H_LU
:
6849 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6850 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6852 case OPC2_32_RRR1_MADDMS_H_UL
:
6855 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6856 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6858 case OPC2_32_RRR1_MADDMS_H_UU
:
6861 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6862 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6864 case OPC2_32_RRR1_MADDR_H_LL
:
6865 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6866 cpu_gpr_d
[r2
], n
, MODE_LL
);
6868 case OPC2_32_RRR1_MADDR_H_LU
:
6869 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6870 cpu_gpr_d
[r2
], n
, MODE_LU
);
6872 case OPC2_32_RRR1_MADDR_H_UL
:
6873 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6874 cpu_gpr_d
[r2
], n
, MODE_UL
);
6876 case OPC2_32_RRR1_MADDR_H_UU
:
6877 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6878 cpu_gpr_d
[r2
], n
, MODE_UU
);
6880 case OPC2_32_RRR1_MADDRS_H_LL
:
6881 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6882 cpu_gpr_d
[r2
], n
, MODE_LL
);
6884 case OPC2_32_RRR1_MADDRS_H_LU
:
6885 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6886 cpu_gpr_d
[r2
], n
, MODE_LU
);
6888 case OPC2_32_RRR1_MADDRS_H_UL
:
6889 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6890 cpu_gpr_d
[r2
], n
, MODE_UL
);
6892 case OPC2_32_RRR1_MADDRS_H_UU
:
6893 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6894 cpu_gpr_d
[r2
], n
, MODE_UU
);
6897 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6901 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6904 uint32_t r1
, r2
, r3
, r4
, n
;
6907 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6908 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6909 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6910 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6911 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6912 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6914 temp
= tcg_temp_new();
6915 temp2
= tcg_temp_new();
6918 case OPC2_32_RRR1_MADD_Q_32
:
6919 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6920 cpu_gpr_d
[r2
], n
, 32);
6922 case OPC2_32_RRR1_MADD_Q_64
:
6925 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6926 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6929 case OPC2_32_RRR1_MADD_Q_32_L
:
6930 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6931 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6934 case OPC2_32_RRR1_MADD_Q_64_L
:
6937 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6938 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6939 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6942 case OPC2_32_RRR1_MADD_Q_32_U
:
6943 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6944 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6947 case OPC2_32_RRR1_MADD_Q_64_U
:
6950 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6951 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6952 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6955 case OPC2_32_RRR1_MADD_Q_32_LL
:
6956 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6957 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6958 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6960 case OPC2_32_RRR1_MADD_Q_64_LL
:
6963 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6964 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6965 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6966 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6968 case OPC2_32_RRR1_MADD_Q_32_UU
:
6969 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6970 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6971 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6973 case OPC2_32_RRR1_MADD_Q_64_UU
:
6976 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6977 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6978 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6979 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6981 case OPC2_32_RRR1_MADDS_Q_32
:
6982 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6983 cpu_gpr_d
[r2
], n
, 32);
6985 case OPC2_32_RRR1_MADDS_Q_64
:
6988 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6989 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6992 case OPC2_32_RRR1_MADDS_Q_32_L
:
6993 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6994 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6997 case OPC2_32_RRR1_MADDS_Q_64_L
:
7000 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7001 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7002 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7005 case OPC2_32_RRR1_MADDS_Q_32_U
:
7006 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7007 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7010 case OPC2_32_RRR1_MADDS_Q_64_U
:
7013 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7014 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7015 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7018 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7019 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7020 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7021 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7023 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7026 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7027 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7028 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7029 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7031 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7032 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7033 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7034 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7036 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7039 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7040 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7041 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7042 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7044 case OPC2_32_RRR1_MADDR_H_64_UL
:
7046 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7047 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7049 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7051 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7052 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7054 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7055 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7056 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7057 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7059 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7060 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7061 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7062 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7064 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7065 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7066 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7067 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7069 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7070 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7071 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7072 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7075 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7079 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7082 uint32_t r1
, r2
, r3
, r4
, n
;
7084 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7085 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7086 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7087 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7088 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7089 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7092 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7095 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7096 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7098 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7101 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7102 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7104 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7107 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7108 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7110 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7113 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7114 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7116 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7119 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7120 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7123 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7126 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7127 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7130 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7133 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7134 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7137 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7140 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7141 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7144 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7147 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7148 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7151 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7154 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7155 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7158 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7161 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7162 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7165 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7168 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7169 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7172 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7175 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7176 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7179 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7182 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7183 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7186 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7189 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7190 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7193 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7196 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7197 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7200 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7201 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7202 cpu_gpr_d
[r2
], n
, MODE_LL
);
7204 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7205 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7206 cpu_gpr_d
[r2
], n
, MODE_LU
);
7208 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7209 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7210 cpu_gpr_d
[r2
], n
, MODE_UL
);
7212 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7213 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7214 cpu_gpr_d
[r2
], n
, MODE_UU
);
7216 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7217 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7218 cpu_gpr_d
[r2
], n
, MODE_LL
);
7220 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7221 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7222 cpu_gpr_d
[r2
], n
, MODE_LU
);
7224 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7225 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7226 cpu_gpr_d
[r2
], n
, MODE_UL
);
7228 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7229 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7230 cpu_gpr_d
[r2
], n
, MODE_UU
);
7233 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7237 static void decode_rrr1_msub(DisasContext
*ctx
)
7240 uint32_t r1
, r2
, r3
, r4
, n
;
7242 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7243 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7244 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7245 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7246 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7247 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7250 case OPC2_32_RRR1_MSUB_H_LL
:
7253 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7254 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7256 case OPC2_32_RRR1_MSUB_H_LU
:
7259 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7260 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7262 case OPC2_32_RRR1_MSUB_H_UL
:
7265 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7266 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7268 case OPC2_32_RRR1_MSUB_H_UU
:
7271 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7272 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7274 case OPC2_32_RRR1_MSUBS_H_LL
:
7277 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7278 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7280 case OPC2_32_RRR1_MSUBS_H_LU
:
7283 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7284 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7286 case OPC2_32_RRR1_MSUBS_H_UL
:
7289 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7290 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7292 case OPC2_32_RRR1_MSUBS_H_UU
:
7295 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7296 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7298 case OPC2_32_RRR1_MSUBM_H_LL
:
7301 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7302 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7304 case OPC2_32_RRR1_MSUBM_H_LU
:
7307 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7308 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7310 case OPC2_32_RRR1_MSUBM_H_UL
:
7313 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7314 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7316 case OPC2_32_RRR1_MSUBM_H_UU
:
7319 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7320 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7322 case OPC2_32_RRR1_MSUBMS_H_LL
:
7325 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7326 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7328 case OPC2_32_RRR1_MSUBMS_H_LU
:
7331 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7332 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7334 case OPC2_32_RRR1_MSUBMS_H_UL
:
7337 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7338 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7340 case OPC2_32_RRR1_MSUBMS_H_UU
:
7343 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7344 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7346 case OPC2_32_RRR1_MSUBR_H_LL
:
7347 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7348 cpu_gpr_d
[r2
], n
, MODE_LL
);
7350 case OPC2_32_RRR1_MSUBR_H_LU
:
7351 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7352 cpu_gpr_d
[r2
], n
, MODE_LU
);
7354 case OPC2_32_RRR1_MSUBR_H_UL
:
7355 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7356 cpu_gpr_d
[r2
], n
, MODE_UL
);
7358 case OPC2_32_RRR1_MSUBR_H_UU
:
7359 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7360 cpu_gpr_d
[r2
], n
, MODE_UU
);
7362 case OPC2_32_RRR1_MSUBRS_H_LL
:
7363 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7364 cpu_gpr_d
[r2
], n
, MODE_LL
);
7366 case OPC2_32_RRR1_MSUBRS_H_LU
:
7367 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7368 cpu_gpr_d
[r2
], n
, MODE_LU
);
7370 case OPC2_32_RRR1_MSUBRS_H_UL
:
7371 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7372 cpu_gpr_d
[r2
], n
, MODE_UL
);
7374 case OPC2_32_RRR1_MSUBRS_H_UU
:
7375 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7376 cpu_gpr_d
[r2
], n
, MODE_UU
);
7379 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7383 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7386 uint32_t r1
, r2
, r3
, r4
, n
;
7389 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7390 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7391 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7392 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7393 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7394 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7396 temp
= tcg_temp_new();
7397 temp2
= tcg_temp_new();
7400 case OPC2_32_RRR1_MSUB_Q_32
:
7401 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7402 cpu_gpr_d
[r2
], n
, 32);
7404 case OPC2_32_RRR1_MSUB_Q_64
:
7407 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7408 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7411 case OPC2_32_RRR1_MSUB_Q_32_L
:
7412 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7413 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7416 case OPC2_32_RRR1_MSUB_Q_64_L
:
7419 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7420 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7421 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7424 case OPC2_32_RRR1_MSUB_Q_32_U
:
7425 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7426 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7429 case OPC2_32_RRR1_MSUB_Q_64_U
:
7432 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7433 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7434 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7437 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7438 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7439 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7440 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7442 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7445 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7446 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7447 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7448 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7450 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7451 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7452 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7453 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7455 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7458 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7459 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7460 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7461 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7463 case OPC2_32_RRR1_MSUBS_Q_32
:
7464 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7465 cpu_gpr_d
[r2
], n
, 32);
7467 case OPC2_32_RRR1_MSUBS_Q_64
:
7470 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7471 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7474 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7475 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7476 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7479 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7482 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7483 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7484 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7487 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7488 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7489 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7492 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7495 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7496 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7497 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7500 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7501 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7502 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7503 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7505 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7508 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7509 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7510 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7511 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7513 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7514 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7515 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7516 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7518 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7521 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7522 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7523 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7524 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7526 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7528 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7529 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7531 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7533 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7534 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7536 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7537 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7538 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7539 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7541 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7542 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7543 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7544 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7546 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7547 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7548 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7549 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7551 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7552 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7553 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7554 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7557 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7561 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7564 uint32_t r1
, r2
, r3
, r4
, n
;
7566 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7567 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7568 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7569 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7570 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7571 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7574 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7577 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7578 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7580 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7583 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7584 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7586 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7589 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7590 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7592 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7595 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7596 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7598 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7601 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7602 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7605 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7608 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7609 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7612 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7615 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7616 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7619 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7622 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7623 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7626 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7629 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7630 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7633 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7636 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7637 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7640 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7643 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7644 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7647 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7650 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7651 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7654 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7657 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7658 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7661 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7664 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7665 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7668 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7671 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7672 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7675 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7678 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7679 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7682 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7683 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7684 cpu_gpr_d
[r2
], n
, MODE_LL
);
7686 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7687 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7688 cpu_gpr_d
[r2
], n
, MODE_LU
);
7690 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7691 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7692 cpu_gpr_d
[r2
], n
, MODE_UL
);
7694 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7695 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7696 cpu_gpr_d
[r2
], n
, MODE_UU
);
7698 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7699 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7700 cpu_gpr_d
[r2
], n
, MODE_LL
);
7702 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7703 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7704 cpu_gpr_d
[r2
], n
, MODE_LU
);
7706 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7707 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7708 cpu_gpr_d
[r2
], n
, MODE_UL
);
7710 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7711 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7712 cpu_gpr_d
[r2
], n
, MODE_UU
);
7715 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7720 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7724 TCGv tmp_width
, tmp_pos
;
7726 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7727 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7728 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7729 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7730 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7732 tmp_pos
= tcg_temp_new();
7733 tmp_width
= tcg_temp_new();
7736 case OPC2_32_RRRR_DEXTR
:
7737 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7739 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7741 TCGv msw
= tcg_temp_new();
7742 TCGv zero
= tcg_constant_tl(0);
7743 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7744 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7745 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7747 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7748 * behaviour. So check that case here and set the low bits to zero
7749 * which effectivly returns cpu_gpr_d[r1]
7751 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7752 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7755 case OPC2_32_RRRR_EXTR
:
7756 case OPC2_32_RRRR_EXTR_U
:
7758 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7759 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7760 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7761 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7762 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7763 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7764 if (op2
== OPC2_32_RRRR_EXTR
) {
7765 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7767 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7770 case OPC2_32_RRRR_INSERT
:
7772 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7773 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7774 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7778 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7783 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7791 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7792 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7793 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7794 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7795 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7796 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7798 temp
= tcg_temp_new();
7801 case OPC2_32_RRRW_EXTR
:
7802 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7803 tcg_gen_addi_tl(temp
, temp
, width
);
7804 tcg_gen_subfi_tl(temp
, 32, temp
);
7805 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7806 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7808 case OPC2_32_RRRW_EXTR_U
:
7810 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7812 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7813 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7814 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7817 case OPC2_32_RRRW_IMASK
:
7818 temp2
= tcg_temp_new();
7820 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7821 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7822 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7823 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7824 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7826 case OPC2_32_RRRW_INSERT
:
7827 temp2
= tcg_temp_new();
7829 tcg_gen_movi_tl(temp
, width
);
7830 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7831 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7834 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7839 static void decode_sys_interrupts(DisasContext
*ctx
)
7846 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7847 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7850 case OPC2_32_SYS_DEBUG
:
7851 /* raise EXCP_DEBUG */
7853 case OPC2_32_SYS_DISABLE
:
7854 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7856 case OPC2_32_SYS_DSYNC
:
7858 case OPC2_32_SYS_ENABLE
:
7859 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, ctx
->icr_ie_mask
);
7861 case OPC2_32_SYS_ISYNC
:
7863 case OPC2_32_SYS_NOP
:
7865 case OPC2_32_SYS_RET
:
7866 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7868 case OPC2_32_SYS_FRET
:
7871 case OPC2_32_SYS_RFE
:
7872 gen_helper_rfe(cpu_env
);
7873 tcg_gen_exit_tb(NULL
, 0);
7874 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7876 case OPC2_32_SYS_RFM
:
7877 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7878 tmp
= tcg_temp_new();
7879 l1
= gen_new_label();
7881 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7882 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7883 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7884 gen_helper_rfm(cpu_env
);
7886 tcg_gen_exit_tb(NULL
, 0);
7887 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7889 /* generate privilege trap */
7892 case OPC2_32_SYS_RSLCX
:
7893 gen_helper_rslcx(cpu_env
);
7895 case OPC2_32_SYS_SVLCX
:
7896 gen_helper_svlcx(cpu_env
);
7898 case OPC2_32_SYS_RESTORE
:
7899 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7900 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7901 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7902 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7903 } /* else raise privilege trap */
7905 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7908 case OPC2_32_SYS_TRAPSV
:
7909 l1
= gen_new_label();
7910 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7911 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7914 case OPC2_32_SYS_TRAPV
:
7915 l1
= gen_new_label();
7916 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7917 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7921 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7925 static void decode_32Bit_opc(DisasContext
*ctx
)
7929 int32_t address
, const16
;
7932 TCGv temp
, temp2
, temp3
;
7934 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7936 /* handle JNZ.T opcode only being 7 bit long */
7937 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7938 op1
= OPCM_32_BRN_JTT
;
7943 case OPCM_32_ABS_LDW
:
7944 decode_abs_ldw(ctx
);
7946 case OPCM_32_ABS_LDB
:
7947 decode_abs_ldb(ctx
);
7949 case OPCM_32_ABS_LDMST_SWAP
:
7950 decode_abs_ldst_swap(ctx
);
7952 case OPCM_32_ABS_LDST_CONTEXT
:
7953 decode_abs_ldst_context(ctx
);
7955 case OPCM_32_ABS_STORE
:
7956 decode_abs_store(ctx
);
7958 case OPCM_32_ABS_STOREB_H
:
7959 decode_abs_storeb_h(ctx
);
7961 case OPC1_32_ABS_STOREQ
:
7962 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7963 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7964 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7965 temp2
= tcg_temp_new();
7967 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7968 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7970 case OPC1_32_ABS_LD_Q
:
7971 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7972 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7973 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7975 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7976 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7978 case OPC1_32_ABS_LEA
:
7979 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7980 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7981 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7984 case OPC1_32_ABSB_ST_T
:
7985 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7986 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7987 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7989 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7990 temp2
= tcg_temp_new();
7992 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7993 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7994 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7995 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7998 case OPC1_32_B_CALL
:
7999 case OPC1_32_B_CALLA
:
8000 case OPC1_32_B_FCALL
:
8001 case OPC1_32_B_FCALLA
:
8006 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8007 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8010 case OPCM_32_BIT_ANDACC
:
8011 decode_bit_andacc(ctx
);
8013 case OPCM_32_BIT_LOGICAL_T1
:
8014 decode_bit_logical_t(ctx
);
8016 case OPCM_32_BIT_INSERT
:
8017 decode_bit_insert(ctx
);
8019 case OPCM_32_BIT_LOGICAL_T2
:
8020 decode_bit_logical_t2(ctx
);
8022 case OPCM_32_BIT_ORAND
:
8023 decode_bit_orand(ctx
);
8025 case OPCM_32_BIT_SH_LOGIC1
:
8026 decode_bit_sh_logic1(ctx
);
8028 case OPCM_32_BIT_SH_LOGIC2
:
8029 decode_bit_sh_logic2(ctx
);
8032 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8033 decode_bo_addrmode_post_pre_base(ctx
);
8035 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8036 decode_bo_addrmode_bitreverse_circular(ctx
);
8038 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8039 decode_bo_addrmode_ld_post_pre_base(ctx
);
8041 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8042 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8044 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8045 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8047 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8048 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8051 case OPC1_32_BOL_LD_A_LONGOFF
:
8052 case OPC1_32_BOL_LD_W_LONGOFF
:
8053 case OPC1_32_BOL_LEA_LONGOFF
:
8054 case OPC1_32_BOL_ST_W_LONGOFF
:
8055 case OPC1_32_BOL_ST_A_LONGOFF
:
8056 case OPC1_32_BOL_LD_B_LONGOFF
:
8057 case OPC1_32_BOL_LD_BU_LONGOFF
:
8058 case OPC1_32_BOL_LD_H_LONGOFF
:
8059 case OPC1_32_BOL_LD_HU_LONGOFF
:
8060 case OPC1_32_BOL_ST_B_LONGOFF
:
8061 case OPC1_32_BOL_ST_H_LONGOFF
:
8062 decode_bol_opc(ctx
, op1
);
8065 case OPCM_32_BRC_EQ_NEQ
:
8066 case OPCM_32_BRC_GE
:
8067 case OPCM_32_BRC_JLT
:
8068 case OPCM_32_BRC_JNE
:
8069 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8070 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8071 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8072 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8075 case OPCM_32_BRN_JTT
:
8076 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8077 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8078 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8081 case OPCM_32_BRR_EQ_NEQ
:
8082 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8083 case OPCM_32_BRR_GE
:
8084 case OPCM_32_BRR_JLT
:
8085 case OPCM_32_BRR_JNE
:
8086 case OPCM_32_BRR_JNZ
:
8087 case OPCM_32_BRR_LOOP
:
8088 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8089 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8090 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8091 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8094 case OPCM_32_RC_LOGICAL_SHIFT
:
8095 decode_rc_logical_shift(ctx
);
8097 case OPCM_32_RC_ACCUMULATOR
:
8098 decode_rc_accumulator(ctx
);
8100 case OPCM_32_RC_SERVICEROUTINE
:
8101 decode_rc_serviceroutine(ctx
);
8103 case OPCM_32_RC_MUL
:
8107 case OPCM_32_RCPW_MASK_INSERT
:
8108 decode_rcpw_insert(ctx
);
8111 case OPC1_32_RCRR_INSERT
:
8112 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8113 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8114 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8115 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8116 temp
= tcg_constant_i32(const16
);
8117 temp2
= tcg_temp_new(); /* width*/
8118 temp3
= tcg_temp_new(); /* pos */
8122 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8123 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8125 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8128 case OPCM_32_RCRW_MASK_INSERT
:
8129 decode_rcrw_insert(ctx
);
8132 case OPCM_32_RCR_COND_SELECT
:
8133 decode_rcr_cond_select(ctx
);
8135 case OPCM_32_RCR_MADD
:
8136 decode_rcr_madd(ctx
);
8138 case OPCM_32_RCR_MSUB
:
8139 decode_rcr_msub(ctx
);
8142 case OPC1_32_RLC_ADDI
:
8143 case OPC1_32_RLC_ADDIH
:
8144 case OPC1_32_RLC_ADDIH_A
:
8145 case OPC1_32_RLC_MFCR
:
8146 case OPC1_32_RLC_MOV
:
8147 case OPC1_32_RLC_MOV_64
:
8148 case OPC1_32_RLC_MOV_U
:
8149 case OPC1_32_RLC_MOV_H
:
8150 case OPC1_32_RLC_MOVH_A
:
8151 case OPC1_32_RLC_MTCR
:
8152 decode_rlc_opc(ctx
, op1
);
8155 case OPCM_32_RR_ACCUMULATOR
:
8156 decode_rr_accumulator(ctx
);
8158 case OPCM_32_RR_LOGICAL_SHIFT
:
8159 decode_rr_logical_shift(ctx
);
8161 case OPCM_32_RR_ADDRESS
:
8162 decode_rr_address(ctx
);
8164 case OPCM_32_RR_IDIRECT
:
8165 decode_rr_idirect(ctx
);
8167 case OPCM_32_RR_DIVIDE
:
8168 decode_rr_divide(ctx
);
8171 case OPCM_32_RR1_MUL
:
8172 decode_rr1_mul(ctx
);
8174 case OPCM_32_RR1_MULQ
:
8175 decode_rr1_mulq(ctx
);
8178 case OPCM_32_RR2_MUL
:
8179 decode_rr2_mul(ctx
);
8182 case OPCM_32_RRPW_EXTRACT_INSERT
:
8183 decode_rrpw_extract_insert(ctx
);
8185 case OPC1_32_RRPW_DEXTR
:
8186 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8187 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8188 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8189 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8191 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8195 case OPCM_32_RRR_COND_SELECT
:
8196 decode_rrr_cond_select(ctx
);
8198 case OPCM_32_RRR_DIVIDE
:
8199 decode_rrr_divide(ctx
);
8202 case OPCM_32_RRR2_MADD
:
8203 decode_rrr2_madd(ctx
);
8205 case OPCM_32_RRR2_MSUB
:
8206 decode_rrr2_msub(ctx
);
8209 case OPCM_32_RRR1_MADD
:
8210 decode_rrr1_madd(ctx
);
8212 case OPCM_32_RRR1_MADDQ_H
:
8213 decode_rrr1_maddq_h(ctx
);
8215 case OPCM_32_RRR1_MADDSU_H
:
8216 decode_rrr1_maddsu_h(ctx
);
8218 case OPCM_32_RRR1_MSUB_H
:
8219 decode_rrr1_msub(ctx
);
8221 case OPCM_32_RRR1_MSUB_Q
:
8222 decode_rrr1_msubq_h(ctx
);
8224 case OPCM_32_RRR1_MSUBAD_H
:
8225 decode_rrr1_msubad_h(ctx
);
8228 case OPCM_32_RRRR_EXTRACT_INSERT
:
8229 decode_rrrr_extract_insert(ctx
);
8232 case OPCM_32_RRRW_EXTRACT_INSERT
:
8233 decode_rrrw_extract_insert(ctx
);
8236 case OPCM_32_SYS_INTERRUPTS
:
8237 decode_sys_interrupts(ctx
);
8239 case OPC1_32_SYS_RSTV
:
8240 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8241 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8242 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8243 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8246 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8250 static bool tricore_insn_is_16bit(uint32_t insn
)
8252 return (insn
& 0x1) == 0;
8255 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8258 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8259 CPUTriCoreState
*env
= cs
->env_ptr
;
8260 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8261 ctx
->hflags
= (uint32_t)ctx
->base
.tb
->flags
;
8262 ctx
->features
= env
->features
;
8263 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
8264 ctx
->icr_ie_mask
= R_ICR_IE_161_MASK
;
8266 ctx
->icr_ie_mask
= R_ICR_IE_13_MASK
;
8270 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8274 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8276 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8278 tcg_gen_insn_start(ctx
->base
.pc_next
);
8281 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8284 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8285 * (False positives are OK, false negatives are not.)
8286 * Our caller ensures we are only called if dc->base.pc_next is less than
8287 * 4 bytes from the page boundary, so we cross the page if the first
8288 * 16 bits indicate that this is a 32 bit insn.
8290 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8292 return !tricore_insn_is_16bit(insn
);
8296 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8298 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8299 CPUTriCoreState
*env
= cpu
->env_ptr
;
8303 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8304 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8306 ctx
->opcode
= insn_lo
;
8307 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8308 decode_16Bit_opc(ctx
);
8310 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8311 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8312 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8313 decode_32Bit_opc(ctx
);
8315 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8317 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8318 target_ulong page_start
;
8320 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8321 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8322 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8323 && insn_crosses_page(env
, ctx
))) {
8324 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8329 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8331 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8333 switch (ctx
->base
.is_jmp
) {
8334 case DISAS_TOO_MANY
:
8335 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8337 case DISAS_NORETURN
:
8340 g_assert_not_reached();
8344 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8345 CPUState
*cpu
, FILE *logfile
)
8347 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8348 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8351 static const TranslatorOps tricore_tr_ops
= {
8352 .init_disas_context
= tricore_tr_init_disas_context
,
8353 .tb_start
= tricore_tr_tb_start
,
8354 .insn_start
= tricore_tr_insn_start
,
8355 .translate_insn
= tricore_tr_translate_insn
,
8356 .tb_stop
= tricore_tr_tb_stop
,
8357 .disas_log
= tricore_tr_disas_log
,
8361 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8362 target_ulong pc
, void *host_pc
)
8365 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8366 &tricore_tr_ops
, &ctx
.base
);
8375 void cpu_state_reset(CPUTriCoreState
*env
)
8377 /* Reset Regs to Default Value */
8382 static void tricore_tcg_init_csfr(void)
8384 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8385 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8386 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8387 offsetof(CPUTriCoreState
, PSW
), "PSW");
8388 cpu_PC
= tcg_global_mem_new(cpu_env
,
8389 offsetof(CPUTriCoreState
, PC
), "PC");
8390 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8391 offsetof(CPUTriCoreState
, ICR
), "ICR");
8394 void tricore_tcg_init(void)
8399 for (i
= 0 ; i
< 16 ; i
++) {
8400 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8401 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8404 for (i
= 0 ; i
< 16 ; i
++) {
8405 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8406 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8409 tricore_tcg_init_csfr();
8410 /* init PSW flag cache */
8411 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8412 offsetof(CPUTriCoreState
, PSW_USB_C
),
8414 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8415 offsetof(CPUTriCoreState
, PSW_USB_V
),
8417 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8418 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8420 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8421 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8423 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8424 offsetof(CPUTriCoreState
, PSW_USB_SAV
),