2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
49 static TCGv cpu_gpr_a
[16];
50 static TCGv cpu_gpr_d
[16];
52 static TCGv cpu_PSW_C
;
53 static TCGv cpu_PSW_V
;
54 static TCGv cpu_PSW_SV
;
55 static TCGv cpu_PSW_AV
;
56 static TCGv cpu_PSW_SAV
;
58 static const char *regnames_a
[] = {
59 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
60 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
61 "a12" , "a13" , "a14" , "a15",
64 static const char *regnames_d
[] = {
65 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
66 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
67 "d12" , "d13" , "d14" , "d15",
70 typedef struct DisasContext
{
71 DisasContextBase base
;
72 target_ulong pc_succ_insn
;
74 /* Routine used to access memory */
76 uint32_t hflags
, saved_hflags
;
80 static int has_feature(DisasContext
*ctx
, int feature
)
82 return (ctx
->features
& (1ULL << feature
)) != 0;
92 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
94 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
95 CPUTriCoreState
*env
= &cpu
->env
;
101 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
102 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
103 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
104 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
105 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
106 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
108 for (i
= 0; i
< 16; ++i
) {
110 qemu_fprintf(f
, "\nGPR A%02d:", i
);
112 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
114 for (i
= 0; i
< 16; ++i
) {
116 qemu_fprintf(f
, "\nGPR D%02d:", i
);
118 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
120 qemu_fprintf(f
, "\n");
124 * Functions to generate micro-ops
127 /* Makros for generating helpers */
129 #define gen_helper_1arg(name, arg) do { \
130 TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
131 gen_helper_##name(cpu_env, helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
144 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
145 TCGv arg00 = tcg_temp_new(); \
146 TCGv arg01 = tcg_temp_new(); \
147 TCGv arg10 = tcg_temp_new(); \
148 TCGv arg11 = tcg_temp_new(); \
149 tcg_gen_sari_tl(arg00, arg0, 16); \
150 tcg_gen_ext16s_tl(arg01, arg0); \
151 tcg_gen_sari_tl(arg11, arg1, 16); \
152 tcg_gen_ext16s_tl(arg10, arg1); \
153 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
156 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
157 TCGv arg00 = tcg_temp_new(); \
158 TCGv arg01 = tcg_temp_new(); \
159 TCGv arg10 = tcg_temp_new(); \
160 TCGv arg11 = tcg_temp_new(); \
161 tcg_gen_sari_tl(arg00, arg0, 16); \
162 tcg_gen_ext16s_tl(arg01, arg0); \
163 tcg_gen_sari_tl(arg10, arg1, 16); \
164 tcg_gen_ext16s_tl(arg11, arg1); \
165 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
168 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
169 TCGv arg00 = tcg_temp_new(); \
170 TCGv arg01 = tcg_temp_new(); \
171 TCGv arg11 = tcg_temp_new(); \
172 tcg_gen_sari_tl(arg01, arg0, 16); \
173 tcg_gen_ext16s_tl(arg00, arg0); \
174 tcg_gen_sari_tl(arg11, arg1, 16); \
175 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
178 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
179 TCGv_i64 ret = tcg_temp_new_i64(); \
180 TCGv_i64 arg1 = tcg_temp_new_i64(); \
182 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
183 gen_helper_##name(ret, arg1, arg2); \
184 tcg_gen_extr_i64_i32(rl, rh, ret); \
187 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
188 TCGv_i64 ret = tcg_temp_new_i64(); \
190 gen_helper_##name(ret, cpu_env, arg1, arg2); \
191 tcg_gen_extr_i64_i32(rl, rh, ret); \
194 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
195 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
196 ((offset & 0x0fffff) << 1))
198 /* For two 32-bit registers used a 64-bit register, the first
199 registernumber needs to be even. Otherwise we trap. */
200 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
201 #define CHECK_REG_PAIR(reg) do { \
203 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
207 /* Functions for load/save to/from memory */
209 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
210 int16_t con
, MemOp mop
)
212 TCGv temp
= tcg_temp_new();
213 tcg_gen_addi_tl(temp
, r2
, con
);
214 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
217 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
218 int16_t con
, MemOp mop
)
220 TCGv temp
= tcg_temp_new();
221 tcg_gen_addi_tl(temp
, r2
, con
);
222 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
225 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
227 TCGv_i64 temp
= tcg_temp_new_i64();
229 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
230 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
233 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
236 TCGv temp
= tcg_temp_new();
237 tcg_gen_addi_tl(temp
, base
, con
);
238 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
241 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
243 TCGv_i64 temp
= tcg_temp_new_i64();
245 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
246 /* write back to two 32 bit regs */
247 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
250 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
253 TCGv temp
= tcg_temp_new();
254 tcg_gen_addi_tl(temp
, base
, con
);
255 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
258 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, r2
, off
);
263 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
264 tcg_gen_mov_tl(r2
, temp
);
267 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
276 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
277 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
279 TCGv temp
= tcg_temp_new();
280 TCGv temp2
= tcg_temp_new();
282 CHECK_REG_PAIR(ereg
);
283 /* temp = (M(EA, word) */
284 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
285 /* temp = temp & ~E[a][63:32]) */
286 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
287 /* temp2 = (E[a][31:0] & E[a][63:32]); */
288 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
289 /* temp = temp | temp2; */
290 tcg_gen_or_tl(temp
, temp
, temp2
);
291 /* M(EA, word) = temp; */
292 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* tmp = M(EA, word);
298 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
300 TCGv temp
= tcg_temp_new();
302 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
303 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
304 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
307 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
309 TCGv temp
= tcg_temp_new();
310 TCGv temp2
= tcg_temp_new();
311 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
312 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
313 cpu_gpr_d
[reg
], temp
);
314 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
315 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
318 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
320 TCGv temp
= tcg_temp_new();
321 TCGv temp2
= tcg_temp_new();
322 TCGv temp3
= tcg_temp_new();
324 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
325 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
326 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
327 tcg_gen_or_tl(temp2
, temp2
, temp3
);
328 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
329 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
333 /* We generate loads and store to core special function register (csfr) through
334 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
335 makros R, A and E, which allow read-only, all and endinit protected access.
336 These makros also specify in which ISA version the csfr was introduced. */
337 #define R(ADDRESS, REG, FEATURE) \
339 if (has_feature(ctx, FEATURE)) { \
340 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
343 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
344 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
345 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
347 /* since we're caching PSW make this a special case */
348 if (offset
== 0xfe04) {
349 gen_helper_psw_read(ret
, cpu_env
);
352 #include "csfr.h.inc"
360 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
361 since no execption occurs */
362 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
364 if (has_feature(ctx, FEATURE)) { \
365 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
368 /* Endinit protected registers
369 TODO: Since the endinit bit is in a register of a not yet implemented
370 watchdog device, we handle endinit protected registers like
371 all-access registers for now. */
372 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
373 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
376 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
377 /* since we're caching PSW make this a special case */
378 if (offset
== 0xfe04) {
379 gen_helper_psw_write(cpu_env
, r1
);
382 #include "csfr.h.inc"
386 /* generate privilege trap */
390 /* Functions for arithmetic instructions */
392 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
394 TCGv t0
= tcg_temp_new_i32();
395 TCGv result
= tcg_temp_new_i32();
396 /* Addition and set V/SV bits */
397 tcg_gen_add_tl(result
, r1
, r2
);
399 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
400 tcg_gen_xor_tl(t0
, r1
, r2
);
401 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
403 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
404 /* Calc AV/SAV bits */
405 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
406 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
408 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
409 /* write back result */
410 tcg_gen_mov_tl(ret
, result
);
414 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
416 TCGv temp
= tcg_temp_new();
417 TCGv_i64 t0
= tcg_temp_new_i64();
418 TCGv_i64 t1
= tcg_temp_new_i64();
419 TCGv_i64 result
= tcg_temp_new_i64();
421 tcg_gen_add_i64(result
, r1
, r2
);
423 tcg_gen_xor_i64(t1
, result
, r1
);
424 tcg_gen_xor_i64(t0
, r1
, r2
);
425 tcg_gen_andc_i64(t1
, t1
, t0
);
426 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
428 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
429 /* calc AV/SAV bits */
430 tcg_gen_extrh_i64_i32(temp
, result
);
431 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
432 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
434 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
435 /* write back result */
436 tcg_gen_mov_i64(ret
, result
);
440 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
441 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
442 void(*op2
)(TCGv
, TCGv
, TCGv
))
444 TCGv temp
= tcg_temp_new();
445 TCGv temp2
= tcg_temp_new();
446 TCGv temp3
= tcg_temp_new();
447 TCGv temp4
= tcg_temp_new();
449 (*op1
)(temp
, r1_low
, r2
);
451 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
452 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
453 if (op1
== tcg_gen_add_tl
) {
454 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
456 tcg_gen_and_tl(temp2
, temp2
, temp3
);
459 (*op2
)(temp3
, r1_high
, r3
);
461 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
462 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
463 if (op2
== tcg_gen_add_tl
) {
464 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
466 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
468 /* combine V0/V1 bits */
469 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
471 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
473 tcg_gen_mov_tl(ret_low
, temp
);
474 tcg_gen_mov_tl(ret_high
, temp3
);
476 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
477 tcg_gen_xor_tl(temp
, temp
, ret_low
);
478 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
479 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
480 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
482 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
485 /* ret = r2 + (r1 * r3); */
486 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
488 TCGv_i64 t1
= tcg_temp_new_i64();
489 TCGv_i64 t2
= tcg_temp_new_i64();
490 TCGv_i64 t3
= tcg_temp_new_i64();
492 tcg_gen_ext_i32_i64(t1
, r1
);
493 tcg_gen_ext_i32_i64(t2
, r2
);
494 tcg_gen_ext_i32_i64(t3
, r3
);
496 tcg_gen_mul_i64(t1
, t1
, t3
);
497 tcg_gen_add_i64(t1
, t2
, t1
);
499 tcg_gen_extrl_i64_i32(ret
, t1
);
502 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
503 /* t1 < -0x80000000 */
504 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
505 tcg_gen_or_i64(t2
, t2
, t3
);
506 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
507 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
509 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
510 /* Calc AV/SAV bits */
511 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
512 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
514 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
517 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
519 TCGv temp
= tcg_constant_i32(con
);
520 gen_madd32_d(ret
, r1
, r2
, temp
);
524 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
527 TCGv t1
= tcg_temp_new();
528 TCGv t2
= tcg_temp_new();
529 TCGv t3
= tcg_temp_new();
530 TCGv t4
= tcg_temp_new();
532 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
533 /* only the add can overflow */
534 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
536 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
537 tcg_gen_xor_tl(t1
, r2_high
, t2
);
538 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
540 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
541 /* Calc AV/SAV bits */
542 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
543 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
545 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
546 /* write back the result */
547 tcg_gen_mov_tl(ret_low
, t3
);
548 tcg_gen_mov_tl(ret_high
, t4
);
552 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
555 TCGv_i64 t1
= tcg_temp_new_i64();
556 TCGv_i64 t2
= tcg_temp_new_i64();
557 TCGv_i64 t3
= tcg_temp_new_i64();
559 tcg_gen_extu_i32_i64(t1
, r1
);
560 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
561 tcg_gen_extu_i32_i64(t3
, r3
);
563 tcg_gen_mul_i64(t1
, t1
, t3
);
564 tcg_gen_add_i64(t2
, t2
, t1
);
565 /* write back result */
566 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
567 /* only the add overflows, if t2 < t1
569 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
570 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
571 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
573 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
574 /* Calc AV/SAV bits */
575 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
576 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
578 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
582 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
585 TCGv temp
= tcg_constant_i32(con
);
586 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
590 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
593 TCGv temp
= tcg_constant_i32(con
);
594 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
598 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
599 TCGv r3
, uint32_t n
, uint32_t mode
)
601 TCGv t_n
= tcg_constant_i32(n
);
602 TCGv temp
= tcg_temp_new();
603 TCGv temp2
= tcg_temp_new();
604 TCGv_i64 temp64
= tcg_temp_new_i64();
607 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
610 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
613 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
616 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
619 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
620 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
621 tcg_gen_add_tl
, tcg_gen_add_tl
);
625 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
626 TCGv r3
, uint32_t n
, uint32_t mode
)
628 TCGv t_n
= tcg_constant_i32(n
);
629 TCGv temp
= tcg_temp_new();
630 TCGv temp2
= tcg_temp_new();
631 TCGv_i64 temp64
= tcg_temp_new_i64();
634 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
637 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
640 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
643 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
646 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
647 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
648 tcg_gen_sub_tl
, tcg_gen_add_tl
);
652 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
653 TCGv r3
, uint32_t n
, uint32_t mode
)
655 TCGv t_n
= tcg_constant_i32(n
);
656 TCGv_i64 temp64
= tcg_temp_new_i64();
657 TCGv_i64 temp64_2
= tcg_temp_new_i64();
658 TCGv_i64 temp64_3
= tcg_temp_new_i64();
661 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
664 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
667 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
670 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
673 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
674 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
675 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
676 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
677 tcg_gen_shli_i64(temp64
, temp64
, 16);
679 gen_add64_d(temp64_2
, temp64_3
, temp64
);
680 /* write back result */
681 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
684 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
687 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
688 TCGv r3
, uint32_t n
, uint32_t mode
)
690 TCGv t_n
= tcg_constant_i32(n
);
691 TCGv temp
= tcg_temp_new();
692 TCGv temp2
= tcg_temp_new();
693 TCGv temp3
= tcg_temp_new();
694 TCGv_i64 temp64
= tcg_temp_new_i64();
698 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
701 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
704 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
707 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
710 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
711 gen_adds(ret_low
, r1_low
, temp
);
712 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
713 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
714 gen_adds(ret_high
, r1_high
, temp2
);
716 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
717 /* combine av bits */
718 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
721 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
724 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
725 TCGv r3
, uint32_t n
, uint32_t mode
)
727 TCGv t_n
= tcg_constant_i32(n
);
728 TCGv temp
= tcg_temp_new();
729 TCGv temp2
= tcg_temp_new();
730 TCGv temp3
= tcg_temp_new();
731 TCGv_i64 temp64
= tcg_temp_new_i64();
735 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
738 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
741 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
744 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
747 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
748 gen_subs(ret_low
, r1_low
, temp
);
749 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
750 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
751 gen_adds(ret_high
, r1_high
, temp2
);
753 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
754 /* combine av bits */
755 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
759 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
760 TCGv r3
, uint32_t n
, uint32_t mode
)
762 TCGv t_n
= tcg_constant_i32(n
);
763 TCGv_i64 temp64
= tcg_temp_new_i64();
764 TCGv_i64 temp64_2
= tcg_temp_new_i64();
768 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
771 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
774 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
777 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
780 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
781 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
782 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
783 tcg_gen_shli_i64(temp64
, temp64
, 16);
784 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
786 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
787 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
792 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
793 TCGv r3
, uint32_t n
, uint32_t mode
)
795 TCGv t_n
= tcg_constant_i32(n
);
796 TCGv_i64 temp64
= tcg_temp_new_i64();
797 TCGv_i64 temp64_2
= tcg_temp_new_i64();
798 TCGv_i64 temp64_3
= tcg_temp_new_i64();
801 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
804 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
807 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
810 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
813 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
814 gen_add64_d(temp64_3
, temp64_2
, temp64
);
815 /* write back result */
816 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
820 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
821 TCGv r3
, uint32_t n
, uint32_t mode
)
823 TCGv t_n
= tcg_constant_i32(n
);
824 TCGv_i64 temp64
= tcg_temp_new_i64();
825 TCGv_i64 temp64_2
= tcg_temp_new_i64();
828 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
831 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
834 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
837 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
840 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
841 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
842 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
846 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
849 TCGv t_n
= tcg_constant_i32(n
);
850 TCGv_i64 temp64
= tcg_temp_new_i64();
853 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
856 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
859 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
862 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
865 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
869 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
871 TCGv temp
= tcg_temp_new();
872 TCGv temp2
= tcg_temp_new();
874 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
875 tcg_gen_shli_tl(temp
, r1
, 16);
876 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
880 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
882 TCGv t_n
= tcg_constant_i32(n
);
883 TCGv temp
= tcg_temp_new();
884 TCGv temp2
= tcg_temp_new();
885 TCGv_i64 temp64
= tcg_temp_new_i64();
888 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
891 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
894 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
897 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
900 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
901 tcg_gen_shli_tl(temp
, r1
, 16);
902 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
907 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
908 uint32_t n
, uint32_t mode
)
910 TCGv t_n
= tcg_constant_i32(n
);
911 TCGv_i64 temp64
= tcg_temp_new_i64();
914 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
917 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
920 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
923 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
926 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
930 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
932 TCGv temp
= tcg_temp_new();
933 TCGv temp2
= tcg_temp_new();
935 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
936 tcg_gen_shli_tl(temp
, r1
, 16);
937 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
941 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
943 TCGv t_n
= tcg_constant_i32(n
);
944 TCGv temp
= tcg_temp_new();
945 TCGv temp2
= tcg_temp_new();
946 TCGv_i64 temp64
= tcg_temp_new_i64();
949 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
952 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
955 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
958 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
961 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
962 tcg_gen_shli_tl(temp
, r1
, 16);
963 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
967 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
969 TCGv t_n
= tcg_constant_i32(n
);
970 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
974 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
976 TCGv t_n
= tcg_constant_i32(n
);
977 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
981 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
984 TCGv temp
= tcg_temp_new();
985 TCGv temp2
= tcg_temp_new();
986 TCGv temp3
= tcg_temp_new();
987 TCGv_i64 t1
= tcg_temp_new_i64();
988 TCGv_i64 t2
= tcg_temp_new_i64();
989 TCGv_i64 t3
= tcg_temp_new_i64();
991 tcg_gen_ext_i32_i64(t2
, arg2
);
992 tcg_gen_ext_i32_i64(t3
, arg3
);
994 tcg_gen_mul_i64(t2
, t2
, t3
);
995 tcg_gen_shli_i64(t2
, t2
, n
);
997 tcg_gen_ext_i32_i64(t1
, arg1
);
998 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1000 tcg_gen_add_i64(t3
, t1
, t2
);
1001 tcg_gen_extrl_i64_i32(temp3
, t3
);
1003 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1004 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1005 tcg_gen_or_i64(t1
, t1
, t2
);
1006 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1007 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1008 /* We produce an overflow on the host if the mul before was
1009 (0x80000000 * 0x80000000) << 1). If this is the
1010 case, we negate the ovf. */
1012 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1013 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1014 tcg_gen_and_tl(temp
, temp
, temp2
);
1015 tcg_gen_shli_tl(temp
, temp
, 31);
1016 /* negate v bit, if special condition */
1017 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1020 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1021 /* Calc AV/SAV bits */
1022 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1023 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1025 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1026 /* write back result */
1027 tcg_gen_mov_tl(ret
, temp3
);
1031 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1033 TCGv temp
= tcg_temp_new();
1034 TCGv temp2
= tcg_temp_new();
1036 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1037 } else { /* n is expected to be 1 */
1038 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1039 tcg_gen_shli_tl(temp
, temp
, 1);
1040 /* catch special case r1 = r2 = 0x8000 */
1041 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1042 tcg_gen_sub_tl(temp
, temp
, temp2
);
1044 gen_add_d(ret
, arg1
, temp
);
1048 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1050 TCGv temp
= tcg_temp_new();
1051 TCGv temp2
= tcg_temp_new();
1053 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1054 } else { /* n is expected to be 1 */
1055 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1056 tcg_gen_shli_tl(temp
, temp
, 1);
1057 /* catch special case r1 = r2 = 0x8000 */
1058 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1059 tcg_gen_sub_tl(temp
, temp
, temp2
);
1061 gen_adds(ret
, arg1
, temp
);
1065 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1066 TCGv arg3
, uint32_t n
)
1068 TCGv temp
= tcg_temp_new();
1069 TCGv temp2
= tcg_temp_new();
1070 TCGv_i64 t1
= tcg_temp_new_i64();
1071 TCGv_i64 t2
= tcg_temp_new_i64();
1072 TCGv_i64 t3
= tcg_temp_new_i64();
1075 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1076 } else { /* n is expected to be 1 */
1077 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1078 tcg_gen_shli_tl(temp
, temp
, 1);
1079 /* catch special case r1 = r2 = 0x8000 */
1080 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1081 tcg_gen_sub_tl(temp
, temp
, temp2
);
1083 tcg_gen_ext_i32_i64(t2
, temp
);
1084 tcg_gen_shli_i64(t2
, t2
, 16);
1085 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1086 gen_add64_d(t3
, t1
, t2
);
1087 /* write back result */
1088 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1092 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1093 TCGv arg3
, uint32_t n
)
1095 TCGv temp
= tcg_temp_new();
1096 TCGv temp2
= tcg_temp_new();
1097 TCGv_i64 t1
= tcg_temp_new_i64();
1098 TCGv_i64 t2
= tcg_temp_new_i64();
1101 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1102 } else { /* n is expected to be 1 */
1103 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1104 tcg_gen_shli_tl(temp
, temp
, 1);
1105 /* catch special case r1 = r2 = 0x8000 */
1106 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1107 tcg_gen_sub_tl(temp
, temp
, temp2
);
1109 tcg_gen_ext_i32_i64(t2
, temp
);
1110 tcg_gen_shli_i64(t2
, t2
, 16);
1111 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1113 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1114 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1118 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1119 TCGv arg3
, uint32_t n
)
1121 TCGv_i64 t1
= tcg_temp_new_i64();
1122 TCGv_i64 t2
= tcg_temp_new_i64();
1123 TCGv_i64 t3
= tcg_temp_new_i64();
1124 TCGv_i64 t4
= tcg_temp_new_i64();
1127 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1128 tcg_gen_ext_i32_i64(t2
, arg2
);
1129 tcg_gen_ext_i32_i64(t3
, arg3
);
1131 tcg_gen_mul_i64(t2
, t2
, t3
);
1133 tcg_gen_shli_i64(t2
, t2
, 1);
1135 tcg_gen_add_i64(t4
, t1
, t2
);
1137 tcg_gen_xor_i64(t3
, t4
, t1
);
1138 tcg_gen_xor_i64(t2
, t1
, t2
);
1139 tcg_gen_andc_i64(t3
, t3
, t2
);
1140 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1141 /* We produce an overflow on the host if the mul before was
1142 (0x80000000 * 0x80000000) << 1). If this is the
1143 case, we negate the ovf. */
1145 temp
= tcg_temp_new();
1146 temp2
= tcg_temp_new();
1147 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1148 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1149 tcg_gen_and_tl(temp
, temp
, temp2
);
1150 tcg_gen_shli_tl(temp
, temp
, 31);
1151 /* negate v bit, if special condition */
1152 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1154 /* write back result */
1155 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1157 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1158 /* Calc AV/SAV bits */
1159 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1160 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1162 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1166 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1169 TCGv_i64 t1
= tcg_temp_new_i64();
1170 TCGv_i64 t2
= tcg_temp_new_i64();
1171 TCGv_i64 t3
= tcg_temp_new_i64();
1173 tcg_gen_ext_i32_i64(t1
, arg1
);
1174 tcg_gen_ext_i32_i64(t2
, arg2
);
1175 tcg_gen_ext_i32_i64(t3
, arg3
);
1177 tcg_gen_mul_i64(t2
, t2
, t3
);
1178 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1180 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1184 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1185 TCGv arg3
, uint32_t n
)
1187 TCGv_i64 r1
= tcg_temp_new_i64();
1188 TCGv t_n
= tcg_constant_i32(n
);
1190 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1191 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1192 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1195 /* ret = r2 - (r1 * r3); */
1196 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1198 TCGv_i64 t1
= tcg_temp_new_i64();
1199 TCGv_i64 t2
= tcg_temp_new_i64();
1200 TCGv_i64 t3
= tcg_temp_new_i64();
1202 tcg_gen_ext_i32_i64(t1
, r1
);
1203 tcg_gen_ext_i32_i64(t2
, r2
);
1204 tcg_gen_ext_i32_i64(t3
, r3
);
1206 tcg_gen_mul_i64(t1
, t1
, t3
);
1207 tcg_gen_sub_i64(t1
, t2
, t1
);
1209 tcg_gen_extrl_i64_i32(ret
, t1
);
1212 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1213 /* result < -0x80000000 */
1214 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1215 tcg_gen_or_i64(t2
, t2
, t3
);
1216 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1217 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1220 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1221 /* Calc AV/SAV bits */
1222 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1223 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1225 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1228 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1230 TCGv temp
= tcg_constant_i32(con
);
1231 gen_msub32_d(ret
, r1
, r2
, temp
);
1235 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1238 TCGv t1
= tcg_temp_new();
1239 TCGv t2
= tcg_temp_new();
1240 TCGv t3
= tcg_temp_new();
1241 TCGv t4
= tcg_temp_new();
1243 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1244 /* only the sub can overflow */
1245 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1247 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1248 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1249 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1251 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1252 /* Calc AV/SAV bits */
1253 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1254 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1256 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1257 /* write back the result */
1258 tcg_gen_mov_tl(ret_low
, t3
);
1259 tcg_gen_mov_tl(ret_high
, t4
);
1263 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1266 TCGv temp
= tcg_constant_i32(con
);
1267 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1271 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1274 TCGv_i64 t1
= tcg_temp_new_i64();
1275 TCGv_i64 t2
= tcg_temp_new_i64();
1276 TCGv_i64 t3
= tcg_temp_new_i64();
1278 tcg_gen_extu_i32_i64(t1
, r1
);
1279 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1280 tcg_gen_extu_i32_i64(t3
, r3
);
1282 tcg_gen_mul_i64(t1
, t1
, t3
);
1283 tcg_gen_sub_i64(t3
, t2
, t1
);
1284 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1285 /* calc V bit, only the sub can overflow, if t1 > t2 */
1286 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1287 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1288 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1290 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1291 /* Calc AV/SAV bits */
1292 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1293 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1295 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1299 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1302 TCGv temp
= tcg_constant_i32(con
);
1303 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1306 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1308 TCGv temp
= tcg_constant_i32(r2
);
1309 gen_add_d(ret
, r1
, temp
);
1312 /* calculate the carry bit too */
1313 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1315 TCGv t0
= tcg_temp_new_i32();
1316 TCGv result
= tcg_temp_new_i32();
1318 tcg_gen_movi_tl(t0
, 0);
1319 /* Addition and set C/V/SV bits */
1320 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1322 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1323 tcg_gen_xor_tl(t0
, r1
, r2
);
1324 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1326 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1327 /* Calc AV/SAV bits */
1328 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1329 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1331 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1332 /* write back result */
1333 tcg_gen_mov_tl(ret
, result
);
1336 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1338 TCGv temp
= tcg_constant_i32(con
);
1339 gen_add_CC(ret
, r1
, temp
);
1342 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1344 TCGv carry
= tcg_temp_new_i32();
1345 TCGv t0
= tcg_temp_new_i32();
1346 TCGv result
= tcg_temp_new_i32();
1348 tcg_gen_movi_tl(t0
, 0);
1349 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1350 /* Addition, carry and set C/V/SV bits */
1351 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1352 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1354 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1355 tcg_gen_xor_tl(t0
, r1
, r2
);
1356 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1358 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1359 /* Calc AV/SAV bits */
1360 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1361 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1363 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1364 /* write back result */
1365 tcg_gen_mov_tl(ret
, result
);
1368 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1370 TCGv temp
= tcg_constant_i32(con
);
1371 gen_addc_CC(ret
, r1
, temp
);
1374 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1377 TCGv temp
= tcg_temp_new();
1378 TCGv temp2
= tcg_temp_new();
1379 TCGv result
= tcg_temp_new();
1380 TCGv mask
= tcg_temp_new();
1381 TCGv t0
= tcg_constant_i32(0);
1383 /* create mask for sticky bits */
1384 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1385 tcg_gen_shli_tl(mask
, mask
, 31);
1387 tcg_gen_add_tl(result
, r1
, r2
);
1389 tcg_gen_xor_tl(temp
, result
, r1
);
1390 tcg_gen_xor_tl(temp2
, r1
, r2
);
1391 tcg_gen_andc_tl(temp
, temp
, temp2
);
1392 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1394 tcg_gen_and_tl(temp
, temp
, mask
);
1395 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1397 tcg_gen_add_tl(temp
, result
, result
);
1398 tcg_gen_xor_tl(temp
, temp
, result
);
1399 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1401 tcg_gen_and_tl(temp
, temp
, mask
);
1402 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1403 /* write back result */
1404 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1407 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1410 TCGv temp
= tcg_constant_i32(r2
);
1411 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1414 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1416 TCGv temp
= tcg_temp_new_i32();
1417 TCGv result
= tcg_temp_new_i32();
1419 tcg_gen_sub_tl(result
, r1
, r2
);
1421 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1422 tcg_gen_xor_tl(temp
, r1
, r2
);
1423 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1425 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1427 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1428 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1430 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1431 /* write back result */
1432 tcg_gen_mov_tl(ret
, result
);
1436 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1438 TCGv temp
= tcg_temp_new();
1439 TCGv_i64 t0
= tcg_temp_new_i64();
1440 TCGv_i64 t1
= tcg_temp_new_i64();
1441 TCGv_i64 result
= tcg_temp_new_i64();
1443 tcg_gen_sub_i64(result
, r1
, r2
);
1445 tcg_gen_xor_i64(t1
, result
, r1
);
1446 tcg_gen_xor_i64(t0
, r1
, r2
);
1447 tcg_gen_and_i64(t1
, t1
, t0
);
1448 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1450 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1451 /* calc AV/SAV bits */
1452 tcg_gen_extrh_i64_i32(temp
, result
);
1453 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1454 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1456 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1457 /* write back result */
1458 tcg_gen_mov_i64(ret
, result
);
1461 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1463 TCGv result
= tcg_temp_new();
1464 TCGv temp
= tcg_temp_new();
1466 tcg_gen_sub_tl(result
, r1
, r2
);
1468 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1470 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1471 tcg_gen_xor_tl(temp
, r1
, r2
);
1472 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1474 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1476 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1477 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1480 /* write back result */
1481 tcg_gen_mov_tl(ret
, result
);
1484 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1486 TCGv temp
= tcg_temp_new();
1487 tcg_gen_not_tl(temp
, r2
);
1488 gen_addc_CC(ret
, r1
, temp
);
1491 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1494 TCGv temp
= tcg_temp_new();
1495 TCGv temp2
= tcg_temp_new();
1496 TCGv result
= tcg_temp_new();
1497 TCGv mask
= tcg_temp_new();
1498 TCGv t0
= tcg_constant_i32(0);
1500 /* create mask for sticky bits */
1501 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1502 tcg_gen_shli_tl(mask
, mask
, 31);
1504 tcg_gen_sub_tl(result
, r1
, r2
);
1506 tcg_gen_xor_tl(temp
, result
, r1
);
1507 tcg_gen_xor_tl(temp2
, r1
, r2
);
1508 tcg_gen_and_tl(temp
, temp
, temp2
);
1509 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1511 tcg_gen_and_tl(temp
, temp
, mask
);
1512 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1514 tcg_gen_add_tl(temp
, result
, result
);
1515 tcg_gen_xor_tl(temp
, temp
, result
);
1516 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1518 tcg_gen_and_tl(temp
, temp
, mask
);
1519 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1520 /* write back result */
1521 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1525 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1526 TCGv r3
, uint32_t n
, uint32_t mode
)
1528 TCGv t_n
= tcg_constant_i32(n
);
1529 TCGv temp
= tcg_temp_new();
1530 TCGv temp2
= tcg_temp_new();
1531 TCGv_i64 temp64
= tcg_temp_new_i64();
1534 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1537 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1540 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1543 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1546 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1547 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1548 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1552 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1553 TCGv r3
, uint32_t n
, uint32_t mode
)
1555 TCGv t_n
= tcg_constant_i32(n
);
1556 TCGv temp
= tcg_temp_new();
1557 TCGv temp2
= tcg_temp_new();
1558 TCGv temp3
= tcg_temp_new();
1559 TCGv_i64 temp64
= tcg_temp_new_i64();
1563 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1566 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1569 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1572 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1575 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1576 gen_subs(ret_low
, r1_low
, temp
);
1577 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1578 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1579 gen_subs(ret_high
, r1_high
, temp2
);
1580 /* combine v bits */
1581 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1582 /* combine av bits */
1583 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1587 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1588 TCGv r3
, uint32_t n
, uint32_t mode
)
1590 TCGv t_n
= tcg_constant_i32(n
);
1591 TCGv_i64 temp64
= tcg_temp_new_i64();
1592 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1593 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1596 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1599 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1602 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1605 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1608 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1609 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1610 /* write back result */
1611 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1615 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1616 TCGv r3
, uint32_t n
, uint32_t mode
)
1618 TCGv t_n
= tcg_constant_i32(n
);
1619 TCGv_i64 temp64
= tcg_temp_new_i64();
1620 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1623 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1626 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1629 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1632 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1635 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1636 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1637 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1641 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1644 TCGv t_n
= tcg_constant_i32(n
);
1645 TCGv_i64 temp64
= tcg_temp_new_i64();
1648 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1651 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1654 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1657 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1660 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1664 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1666 TCGv temp
= tcg_temp_new();
1667 TCGv temp2
= tcg_temp_new();
1669 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1670 tcg_gen_shli_tl(temp
, r1
, 16);
1671 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1675 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1676 uint32_t n
, uint32_t mode
)
1678 TCGv t_n
= tcg_constant_i32(n
);
1679 TCGv_i64 temp64
= tcg_temp_new_i64();
1682 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1685 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1688 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1691 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1694 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1698 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1700 TCGv temp
= tcg_temp_new();
1701 TCGv temp2
= tcg_temp_new();
1703 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1704 tcg_gen_shli_tl(temp
, r1
, 16);
1705 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1709 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1711 TCGv temp
= tcg_constant_i32(n
);
1712 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1716 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1718 TCGv temp
= tcg_constant_i32(n
);
1719 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1723 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1726 TCGv temp3
= tcg_temp_new();
1727 TCGv_i64 t1
= tcg_temp_new_i64();
1728 TCGv_i64 t2
= tcg_temp_new_i64();
1729 TCGv_i64 t3
= tcg_temp_new_i64();
1730 TCGv_i64 t4
= tcg_temp_new_i64();
1732 tcg_gen_ext_i32_i64(t2
, arg2
);
1733 tcg_gen_ext_i32_i64(t3
, arg3
);
1735 tcg_gen_mul_i64(t2
, t2
, t3
);
1737 tcg_gen_ext_i32_i64(t1
, arg1
);
1738 /* if we shift part of the fraction out, we need to round up */
1739 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1740 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1741 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1742 tcg_gen_add_i64(t2
, t2
, t4
);
1744 tcg_gen_sub_i64(t3
, t1
, t2
);
1745 tcg_gen_extrl_i64_i32(temp3
, t3
);
1747 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1748 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1749 tcg_gen_or_i64(t1
, t1
, t2
);
1750 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1751 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1753 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1754 /* Calc AV/SAV bits */
1755 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1756 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1758 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1759 /* write back result */
1760 tcg_gen_mov_tl(ret
, temp3
);
1764 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1766 TCGv temp
= tcg_temp_new();
1767 TCGv temp2
= tcg_temp_new();
1769 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1770 } else { /* n is expected to be 1 */
1771 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1772 tcg_gen_shli_tl(temp
, temp
, 1);
1773 /* catch special case r1 = r2 = 0x8000 */
1774 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1775 tcg_gen_sub_tl(temp
, temp
, temp2
);
1777 gen_sub_d(ret
, arg1
, temp
);
1781 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1783 TCGv temp
= tcg_temp_new();
1784 TCGv temp2
= tcg_temp_new();
1786 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1787 } else { /* n is expected to be 1 */
1788 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1789 tcg_gen_shli_tl(temp
, temp
, 1);
1790 /* catch special case r1 = r2 = 0x8000 */
1791 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1792 tcg_gen_sub_tl(temp
, temp
, temp2
);
1794 gen_subs(ret
, arg1
, temp
);
1798 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1799 TCGv arg3
, uint32_t n
)
1801 TCGv temp
= tcg_temp_new();
1802 TCGv temp2
= tcg_temp_new();
1803 TCGv_i64 t1
= tcg_temp_new_i64();
1804 TCGv_i64 t2
= tcg_temp_new_i64();
1805 TCGv_i64 t3
= tcg_temp_new_i64();
1808 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1809 } else { /* n is expected to be 1 */
1810 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1811 tcg_gen_shli_tl(temp
, temp
, 1);
1812 /* catch special case r1 = r2 = 0x8000 */
1813 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1814 tcg_gen_sub_tl(temp
, temp
, temp2
);
1816 tcg_gen_ext_i32_i64(t2
, temp
);
1817 tcg_gen_shli_i64(t2
, t2
, 16);
1818 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1819 gen_sub64_d(t3
, t1
, t2
);
1820 /* write back result */
1821 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1825 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1826 TCGv arg3
, uint32_t n
)
1828 TCGv temp
= tcg_temp_new();
1829 TCGv temp2
= tcg_temp_new();
1830 TCGv_i64 t1
= tcg_temp_new_i64();
1831 TCGv_i64 t2
= tcg_temp_new_i64();
1834 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1835 } else { /* n is expected to be 1 */
1836 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1837 tcg_gen_shli_tl(temp
, temp
, 1);
1838 /* catch special case r1 = r2 = 0x8000 */
1839 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1840 tcg_gen_sub_tl(temp
, temp
, temp2
);
1842 tcg_gen_ext_i32_i64(t2
, temp
);
1843 tcg_gen_shli_i64(t2
, t2
, 16);
1844 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1846 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1847 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1851 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1852 TCGv arg3
, uint32_t n
)
1854 TCGv_i64 t1
= tcg_temp_new_i64();
1855 TCGv_i64 t2
= tcg_temp_new_i64();
1856 TCGv_i64 t3
= tcg_temp_new_i64();
1857 TCGv_i64 t4
= tcg_temp_new_i64();
1860 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1861 tcg_gen_ext_i32_i64(t2
, arg2
);
1862 tcg_gen_ext_i32_i64(t3
, arg3
);
1864 tcg_gen_mul_i64(t2
, t2
, t3
);
1866 tcg_gen_shli_i64(t2
, t2
, 1);
1868 tcg_gen_sub_i64(t4
, t1
, t2
);
1870 tcg_gen_xor_i64(t3
, t4
, t1
);
1871 tcg_gen_xor_i64(t2
, t1
, t2
);
1872 tcg_gen_and_i64(t3
, t3
, t2
);
1873 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1874 /* We produce an overflow on the host if the mul before was
1875 (0x80000000 * 0x80000000) << 1). If this is the
1876 case, we negate the ovf. */
1878 temp
= tcg_temp_new();
1879 temp2
= tcg_temp_new();
1880 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1881 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1882 tcg_gen_and_tl(temp
, temp
, temp2
);
1883 tcg_gen_shli_tl(temp
, temp
, 31);
1884 /* negate v bit, if special condition */
1885 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1887 /* write back result */
1888 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1890 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1891 /* Calc AV/SAV bits */
1892 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1893 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1895 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1899 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1902 TCGv_i64 t1
= tcg_temp_new_i64();
1903 TCGv_i64 t2
= tcg_temp_new_i64();
1904 TCGv_i64 t3
= tcg_temp_new_i64();
1905 TCGv_i64 t4
= tcg_temp_new_i64();
1907 tcg_gen_ext_i32_i64(t1
, arg1
);
1908 tcg_gen_ext_i32_i64(t2
, arg2
);
1909 tcg_gen_ext_i32_i64(t3
, arg3
);
1911 tcg_gen_mul_i64(t2
, t2
, t3
);
1912 /* if we shift part of the fraction out, we need to round up */
1913 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1914 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1915 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1916 tcg_gen_add_i64(t3
, t3
, t4
);
1918 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1922 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1923 TCGv arg3
, uint32_t n
)
1925 TCGv_i64 r1
= tcg_temp_new_i64();
1926 TCGv t_n
= tcg_constant_i32(n
);
1928 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1929 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1930 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1934 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1935 TCGv r3
, uint32_t n
, uint32_t mode
)
1937 TCGv t_n
= tcg_constant_i32(n
);
1938 TCGv temp
= tcg_temp_new();
1939 TCGv temp2
= tcg_temp_new();
1940 TCGv_i64 temp64
= tcg_temp_new_i64();
1943 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1946 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1949 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1952 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1955 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1956 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1957 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1961 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1962 TCGv r3
, uint32_t n
, uint32_t mode
)
1964 TCGv t_n
= tcg_constant_i32(n
);
1965 TCGv_i64 temp64
= tcg_temp_new_i64();
1966 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1967 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1970 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1973 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1976 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1979 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1982 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1983 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1984 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1985 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1986 tcg_gen_shli_i64(temp64
, temp64
, 16);
1988 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1989 /* write back result */
1990 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1994 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1996 TCGv t_n
= tcg_constant_i32(n
);
1997 TCGv temp
= tcg_temp_new();
1998 TCGv temp2
= tcg_temp_new();
1999 TCGv_i64 temp64
= tcg_temp_new_i64();
2002 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2005 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2008 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2011 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2014 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2015 tcg_gen_shli_tl(temp
, r1
, 16);
2016 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2020 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2021 TCGv r3
, uint32_t n
, uint32_t mode
)
2023 TCGv t_n
= tcg_constant_i32(n
);
2024 TCGv temp
= tcg_temp_new();
2025 TCGv temp2
= tcg_temp_new();
2026 TCGv temp3
= tcg_temp_new();
2027 TCGv_i64 temp64
= tcg_temp_new_i64();
2031 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2034 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2037 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2040 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2043 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2044 gen_adds(ret_low
, r1_low
, temp
);
2045 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2046 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2047 gen_subs(ret_high
, r1_high
, temp2
);
2048 /* combine v bits */
2049 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2050 /* combine av bits */
2051 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2055 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2056 TCGv r3
, uint32_t n
, uint32_t mode
)
2058 TCGv t_n
= tcg_constant_i32(n
);
2059 TCGv_i64 temp64
= tcg_temp_new_i64();
2060 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2064 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2067 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2070 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2073 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2076 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2077 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2078 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2079 tcg_gen_shli_i64(temp64
, temp64
, 16);
2080 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2082 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2083 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2087 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2089 TCGv t_n
= tcg_constant_i32(n
);
2090 TCGv temp
= tcg_temp_new();
2091 TCGv temp2
= tcg_temp_new();
2092 TCGv_i64 temp64
= tcg_temp_new_i64();
2095 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2098 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2101 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2104 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2107 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2108 tcg_gen_shli_tl(temp
, r1
, 16);
2109 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2112 static inline void gen_abs(TCGv ret
, TCGv r1
)
2114 tcg_gen_abs_tl(ret
, r1
);
2115 /* overflow can only happen, if r1 = 0x80000000 */
2116 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2117 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2119 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2121 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2122 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2124 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2127 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2129 TCGv temp
= tcg_temp_new_i32();
2130 TCGv result
= tcg_temp_new_i32();
2132 tcg_gen_sub_tl(result
, r1
, r2
);
2133 tcg_gen_sub_tl(temp
, r2
, r1
);
2134 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2137 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2138 tcg_gen_xor_tl(temp
, result
, r2
);
2139 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2140 tcg_gen_xor_tl(temp
, r1
, r2
);
2141 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2143 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2145 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2146 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2148 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2149 /* write back result */
2150 tcg_gen_mov_tl(ret
, result
);
2153 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2155 TCGv temp
= tcg_constant_i32(con
);
2156 gen_absdif(ret
, r1
, temp
);
2159 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2161 TCGv temp
= tcg_constant_i32(con
);
2162 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2165 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2167 TCGv high
= tcg_temp_new();
2168 TCGv low
= tcg_temp_new();
2170 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2171 tcg_gen_mov_tl(ret
, low
);
2173 tcg_gen_sari_tl(low
, low
, 31);
2174 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2175 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2177 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2179 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2180 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2182 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2185 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2187 TCGv temp
= tcg_constant_i32(con
);
2188 gen_mul_i32s(ret
, r1
, temp
);
2191 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2193 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2195 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2197 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2199 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2200 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2202 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2205 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2208 TCGv temp
= tcg_constant_i32(con
);
2209 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2212 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2214 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2216 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2218 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2220 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2221 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2223 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2226 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2229 TCGv temp
= tcg_constant_i32(con
);
2230 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2233 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2235 TCGv temp
= tcg_constant_i32(con
);
2236 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2239 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2241 TCGv temp
= tcg_constant_i32(con
);
2242 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2245 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2246 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2248 TCGv temp
= tcg_constant_i32(con
);
2249 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2252 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2254 TCGv temp
= tcg_constant_i32(con
);
2255 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2259 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2261 TCGv_i64 temp_64
= tcg_temp_new_i64();
2262 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2265 if (up_shift
== 32) {
2266 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2267 } else if (up_shift
== 16) {
2268 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2269 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2271 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2272 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2273 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2275 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2278 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2279 } else { /* n is expected to be 1 */
2280 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2281 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2283 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2285 if (up_shift
== 0) {
2286 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2288 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2290 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2291 /* overflow only occurs if r1 = r2 = 0x8000 */
2292 if (up_shift
== 0) {/* result is 64 bit */
2293 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2295 } else { /* result is 32 bit */
2296 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2299 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2300 /* calc sv overflow bit */
2301 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2303 /* calc av overflow bit */
2304 if (up_shift
== 0) {
2305 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2306 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2308 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2309 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2311 /* calc sav overflow bit */
2312 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2316 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2318 TCGv temp
= tcg_temp_new();
2320 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2321 } else { /* n is expected to be 1 */
2322 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2323 tcg_gen_shli_tl(ret
, ret
, 1);
2324 /* catch special case r1 = r2 = 0x8000 */
2325 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2326 tcg_gen_sub_tl(ret
, ret
, temp
);
2329 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2330 /* calc av overflow bit */
2331 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2332 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2333 /* calc sav overflow bit */
2334 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2337 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2339 TCGv temp
= tcg_temp_new();
2341 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2342 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2344 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2345 tcg_gen_shli_tl(ret
, ret
, 1);
2346 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2347 /* catch special case r1 = r2 = 0x8000 */
2348 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2349 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2350 tcg_gen_sub_tl(ret
, ret
, temp
);
2353 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2354 /* calc av overflow bit */
2355 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2356 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2357 /* calc sav overflow bit */
2358 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2359 /* cut halfword off */
2360 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2364 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2367 TCGv_i64 temp64
= tcg_temp_new_i64();
2368 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2369 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2370 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2374 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2377 TCGv temp
= tcg_constant_i32(con
);
2378 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2382 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2385 TCGv_i64 temp64
= tcg_temp_new_i64();
2386 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2387 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2388 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2392 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2395 TCGv temp
= tcg_constant_i32(con
);
2396 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2399 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2401 TCGv temp
= tcg_constant_i32(con
);
2402 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2405 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2407 TCGv temp
= tcg_constant_i32(con
);
2408 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2412 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2415 TCGv_i64 temp64
= tcg_temp_new_i64();
2416 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2417 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2418 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2422 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2425 TCGv temp
= tcg_constant_i32(con
);
2426 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2430 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2433 TCGv_i64 temp64
= tcg_temp_new_i64();
2434 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2435 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2436 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2440 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2443 TCGv temp
= tcg_constant_i32(con
);
2444 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2447 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2449 tcg_gen_smax_tl(ret
, arg
, tcg_constant_i32(low
));
2450 tcg_gen_smin_tl(ret
, ret
, tcg_constant_i32(up
));
2453 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2455 tcg_gen_umin_tl(ret
, arg
, tcg_constant_i32(up
));
2458 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2460 if (shift_count
== -32) {
2461 tcg_gen_movi_tl(ret
, 0);
2462 } else if (shift_count
>= 0) {
2463 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2465 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2469 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2471 TCGv temp_low
, temp_high
;
2473 if (shiftcount
== -16) {
2474 tcg_gen_movi_tl(ret
, 0);
2476 temp_high
= tcg_temp_new();
2477 temp_low
= tcg_temp_new();
2479 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2480 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2481 gen_shi(temp_low
, temp_low
, shiftcount
);
2482 gen_shi(ret
, temp_high
, shiftcount
);
2483 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2487 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2489 uint32_t msk
, msk_start
;
2490 TCGv temp
= tcg_temp_new();
2491 TCGv temp2
= tcg_temp_new();
2493 if (shift_count
== 0) {
2494 /* Clear PSW.C and PSW.V */
2495 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2496 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2497 tcg_gen_mov_tl(ret
, r1
);
2498 } else if (shift_count
== -32) {
2500 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2501 /* fill ret completely with sign bit */
2502 tcg_gen_sari_tl(ret
, r1
, 31);
2504 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2505 } else if (shift_count
> 0) {
2506 TCGv t_max
= tcg_constant_i32(0x7FFFFFFF >> shift_count
);
2507 TCGv t_min
= tcg_constant_i32(((int32_t) -0x80000000) >> shift_count
);
2510 msk_start
= 32 - shift_count
;
2511 msk
= ((1 << shift_count
) - 1) << msk_start
;
2512 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2513 /* calc v/sv bits */
2514 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2515 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2516 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2517 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2519 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2521 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2524 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2526 msk
= (1 << -shift_count
) - 1;
2527 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2529 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2531 /* calc av overflow bit */
2532 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2533 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2534 /* calc sav overflow bit */
2535 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2538 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2540 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2543 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2545 TCGv temp
= tcg_constant_i32(con
);
2546 gen_shas(ret
, r1
, temp
);
2549 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2553 if (shift_count
== 0) {
2554 tcg_gen_mov_tl(ret
, r1
);
2555 } else if (shift_count
> 0) {
2556 low
= tcg_temp_new();
2557 high
= tcg_temp_new();
2559 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2560 tcg_gen_shli_tl(low
, r1
, shift_count
);
2561 tcg_gen_shli_tl(ret
, high
, shift_count
);
2562 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2564 low
= tcg_temp_new();
2565 high
= tcg_temp_new();
2567 tcg_gen_ext16s_tl(low
, r1
);
2568 tcg_gen_sari_tl(low
, low
, -shift_count
);
2569 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2570 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2574 /* ret = {ret[30:0], (r1 cond r2)}; */
2575 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2577 TCGv temp
= tcg_temp_new();
2578 TCGv temp2
= tcg_temp_new();
2580 tcg_gen_shli_tl(temp
, ret
, 1);
2581 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2582 tcg_gen_or_tl(ret
, temp
, temp2
);
2585 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2587 TCGv temp
= tcg_constant_i32(con
);
2588 gen_sh_cond(cond
, ret
, r1
, temp
);
2591 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2593 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2596 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2598 TCGv temp
= tcg_constant_i32(con
);
2599 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2602 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2604 TCGv temp
= tcg_constant_i32(con
);
2605 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2608 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2610 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2613 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2615 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2618 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2620 void(*op1
)(TCGv
, TCGv
, TCGv
),
2621 void(*op2
)(TCGv
, TCGv
, TCGv
))
2625 temp1
= tcg_temp_new();
2626 temp2
= tcg_temp_new();
2628 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2629 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2631 (*op1
)(temp1
, temp1
, temp2
);
2632 (*op2
)(temp1
, ret
, temp1
);
2634 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2637 /* ret = r1[pos1] op1 r2[pos2]; */
2638 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2640 void(*op1
)(TCGv
, TCGv
, TCGv
))
2644 temp1
= tcg_temp_new();
2645 temp2
= tcg_temp_new();
2647 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2648 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2650 (*op1
)(ret
, temp1
, temp2
);
2652 tcg_gen_andi_tl(ret
, ret
, 0x1);
2655 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2656 void(*op
)(TCGv
, TCGv
, TCGv
))
2658 TCGv temp
= tcg_temp_new();
2659 TCGv temp2
= tcg_temp_new();
2660 /* temp = (arg1 cond arg2 )*/
2661 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2663 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2664 /* temp = temp insn temp2 */
2665 (*op
)(temp
, temp
, temp2
);
2666 /* ret = {ret[31:1], temp} */
2667 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2671 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2672 void(*op
)(TCGv
, TCGv
, TCGv
))
2674 TCGv temp
= tcg_constant_i32(con
);
2675 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2678 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2679 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2681 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2682 tcg_gen_neg_tl(ret
, ret
);
2685 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2687 TCGv b0
= tcg_temp_new();
2688 TCGv b1
= tcg_temp_new();
2689 TCGv b2
= tcg_temp_new();
2690 TCGv b3
= tcg_temp_new();
2693 tcg_gen_andi_tl(b0
, r1
, 0xff);
2694 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2697 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2698 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2701 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2702 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2705 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2706 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2709 tcg_gen_or_tl(ret
, b0
, b1
);
2710 tcg_gen_or_tl(ret
, ret
, b2
);
2711 tcg_gen_or_tl(ret
, ret
, b3
);
2714 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2716 TCGv h0
= tcg_temp_new();
2717 TCGv h1
= tcg_temp_new();
2720 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2721 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2724 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2725 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2728 tcg_gen_or_tl(ret
, h0
, h1
);
2731 /* mask = ((1 << width) -1) << pos;
2732 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2733 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2735 TCGv mask
= tcg_temp_new();
2736 TCGv temp
= tcg_temp_new();
2737 TCGv temp2
= tcg_temp_new();
2739 tcg_gen_movi_tl(mask
, 1);
2740 tcg_gen_shl_tl(mask
, mask
, width
);
2741 tcg_gen_subi_tl(mask
, mask
, 1);
2742 tcg_gen_shl_tl(mask
, mask
, pos
);
2744 tcg_gen_shl_tl(temp
, r2
, pos
);
2745 tcg_gen_and_tl(temp
, temp
, mask
);
2746 tcg_gen_andc_tl(temp2
, r1
, mask
);
2747 tcg_gen_or_tl(ret
, temp
, temp2
);
2750 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2752 TCGv_i64 temp
= tcg_temp_new_i64();
2754 gen_helper_bsplit(temp
, r1
);
2755 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2758 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2760 TCGv_i64 temp
= tcg_temp_new_i64();
2762 gen_helper_unpack(temp
, r1
);
2763 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2767 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2769 TCGv_i64 ret
= tcg_temp_new_i64();
2771 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2772 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2774 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2776 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2780 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2782 TCGv_i64 ret
= tcg_temp_new_i64();
2784 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2785 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2787 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2789 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2792 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2794 TCGv temp
= tcg_temp_new();
2796 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2797 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2798 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2799 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2800 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2802 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2803 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2806 static void gen_calc_usb_mulr_h(TCGv arg
)
2808 TCGv temp
= tcg_temp_new();
2810 tcg_gen_add_tl(temp
, arg
, arg
);
2811 tcg_gen_xor_tl(temp
, temp
, arg
);
2812 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2813 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2815 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2817 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2820 /* helpers for generating program flow micro-ops */
2822 static inline void gen_save_pc(target_ulong pc
)
2824 tcg_gen_movi_tl(cpu_PC
, pc
);
2827 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2829 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2832 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2835 tcg_gen_lookup_and_goto_ptr();
2839 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2841 TCGv_i32 classtemp
= tcg_constant_i32(class);
2842 TCGv_i32 tintemp
= tcg_constant_i32(tin
);
2844 gen_save_pc(ctx
->base
.pc_next
);
2845 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2846 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2849 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2850 TCGv r2
, int16_t address
)
2852 TCGLabel
*jumpLabel
= gen_new_label();
2853 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2855 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2857 gen_set_label(jumpLabel
);
2858 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2861 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2862 int r2
, int16_t address
)
2864 TCGv temp
= tcg_constant_i32(r2
);
2865 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2868 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2870 TCGLabel
*l1
= gen_new_label();
2872 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2873 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2874 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2876 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2879 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2881 TCGv temp
= tcg_temp_new();
2883 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2884 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2885 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2886 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2889 static void gen_fret(DisasContext
*ctx
)
2891 TCGv temp
= tcg_temp_new();
2893 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2894 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2895 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2896 tcg_gen_mov_tl(cpu_PC
, temp
);
2897 tcg_gen_exit_tb(NULL
, 0);
2898 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2901 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2902 int r2
, int32_t constant
, int32_t offset
)
2908 /* SB-format jumps */
2911 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2913 case OPC1_32_B_CALL
:
2914 case OPC1_16_SB_CALL
:
2915 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2916 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2919 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2921 case OPC1_16_SB_JNZ
:
2922 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2924 /* SBC-format jumps */
2925 case OPC1_16_SBC_JEQ
:
2926 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2928 case OPC1_16_SBC_JEQ2
:
2929 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2932 case OPC1_16_SBC_JNE
:
2933 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2935 case OPC1_16_SBC_JNE2
:
2936 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2937 constant
, offset
+ 16);
2939 /* SBRN-format jumps */
2940 case OPC1_16_SBRN_JZ_T
:
2941 temp
= tcg_temp_new();
2942 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2943 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2945 case OPC1_16_SBRN_JNZ_T
:
2946 temp
= tcg_temp_new();
2947 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2948 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2950 /* SBR-format jumps */
2951 case OPC1_16_SBR_JEQ
:
2952 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2955 case OPC1_16_SBR_JEQ2
:
2956 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2959 case OPC1_16_SBR_JNE
:
2960 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2963 case OPC1_16_SBR_JNE2
:
2964 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2967 case OPC1_16_SBR_JNZ
:
2968 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2970 case OPC1_16_SBR_JNZ_A
:
2971 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2973 case OPC1_16_SBR_JGEZ
:
2974 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2976 case OPC1_16_SBR_JGTZ
:
2977 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2979 case OPC1_16_SBR_JLEZ
:
2980 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2982 case OPC1_16_SBR_JLTZ
:
2983 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2985 case OPC1_16_SBR_JZ
:
2986 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2988 case OPC1_16_SBR_JZ_A
:
2989 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2991 case OPC1_16_SBR_LOOP
:
2992 gen_loop(ctx
, r1
, offset
* 2 - 32);
2994 /* SR-format jumps */
2996 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2997 tcg_gen_exit_tb(NULL
, 0);
2999 case OPC2_32_SYS_RET
:
3000 case OPC2_16_SR_RET
:
3001 gen_helper_ret(cpu_env
);
3002 tcg_gen_exit_tb(NULL
, 0);
3005 case OPC1_32_B_CALLA
:
3006 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3007 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3009 case OPC1_32_B_FCALL
:
3010 gen_fcall_save_ctx(ctx
);
3011 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3013 case OPC1_32_B_FCALLA
:
3014 gen_fcall_save_ctx(ctx
);
3015 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3018 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3021 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3024 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3025 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3028 case OPCM_32_BRC_EQ_NEQ
:
3029 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3030 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3032 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3035 case OPCM_32_BRC_GE
:
3036 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3037 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3039 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3040 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3044 case OPCM_32_BRC_JLT
:
3045 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3046 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3048 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3049 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3053 case OPCM_32_BRC_JNE
:
3054 temp
= tcg_temp_new();
3055 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3056 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3057 /* subi is unconditional */
3058 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3059 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3061 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3062 /* addi is unconditional */
3063 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3064 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3068 case OPCM_32_BRN_JTT
:
3069 n
= MASK_OP_BRN_N(ctx
->opcode
);
3071 temp
= tcg_temp_new();
3072 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3074 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3075 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3077 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3081 case OPCM_32_BRR_EQ_NEQ
:
3082 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3083 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3086 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3090 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3091 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3092 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3095 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3099 case OPCM_32_BRR_GE
:
3100 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3101 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3104 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3108 case OPCM_32_BRR_JLT
:
3109 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3110 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3113 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3117 case OPCM_32_BRR_LOOP
:
3118 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3119 gen_loop(ctx
, r2
, offset
* 2);
3121 /* OPC2_32_BRR_LOOPU */
3122 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3125 case OPCM_32_BRR_JNE
:
3126 temp
= tcg_temp_new();
3127 temp2
= tcg_temp_new();
3128 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3129 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3130 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3131 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3132 /* subi is unconditional */
3133 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3134 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3136 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3137 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3138 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3139 /* addi is unconditional */
3140 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3141 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3144 case OPCM_32_BRR_JNZ
:
3145 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3146 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3148 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3152 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3154 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3159 * Functions for decoding instructions
3162 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3168 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3169 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3172 case OPC1_16_SRC_ADD
:
3173 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3175 case OPC1_16_SRC_ADD_A15
:
3176 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3178 case OPC1_16_SRC_ADD_15A
:
3179 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3181 case OPC1_16_SRC_ADD_A
:
3182 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3184 case OPC1_16_SRC_CADD
:
3185 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3188 case OPC1_16_SRC_CADDN
:
3189 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3192 case OPC1_16_SRC_CMOV
:
3193 temp
= tcg_constant_tl(0);
3194 temp2
= tcg_constant_tl(const4
);
3195 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3196 temp2
, cpu_gpr_d
[r1
]);
3198 case OPC1_16_SRC_CMOVN
:
3199 temp
= tcg_constant_tl(0);
3200 temp2
= tcg_constant_tl(const4
);
3201 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3202 temp2
, cpu_gpr_d
[r1
]);
3204 case OPC1_16_SRC_EQ
:
3205 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3208 case OPC1_16_SRC_LT
:
3209 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3212 case OPC1_16_SRC_MOV
:
3213 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3215 case OPC1_16_SRC_MOV_A
:
3216 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3217 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3219 case OPC1_16_SRC_MOV_E
:
3220 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3221 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3222 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3224 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3227 case OPC1_16_SRC_SH
:
3228 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3230 case OPC1_16_SRC_SHA
:
3231 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3234 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3238 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3243 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3244 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3247 case OPC1_16_SRR_ADD
:
3248 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3250 case OPC1_16_SRR_ADD_A15
:
3251 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3253 case OPC1_16_SRR_ADD_15A
:
3254 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3256 case OPC1_16_SRR_ADD_A
:
3257 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3259 case OPC1_16_SRR_ADDS
:
3260 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3262 case OPC1_16_SRR_AND
:
3263 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3265 case OPC1_16_SRR_CMOV
:
3266 temp
= tcg_constant_tl(0);
3267 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3268 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3270 case OPC1_16_SRR_CMOVN
:
3271 temp
= tcg_constant_tl(0);
3272 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3273 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3275 case OPC1_16_SRR_EQ
:
3276 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3279 case OPC1_16_SRR_LT
:
3280 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3283 case OPC1_16_SRR_MOV
:
3284 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3286 case OPC1_16_SRR_MOV_A
:
3287 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3289 case OPC1_16_SRR_MOV_AA
:
3290 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3292 case OPC1_16_SRR_MOV_D
:
3293 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3295 case OPC1_16_SRR_MUL
:
3296 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3298 case OPC1_16_SRR_OR
:
3299 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3301 case OPC1_16_SRR_SUB
:
3302 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3304 case OPC1_16_SRR_SUB_A15B
:
3305 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3307 case OPC1_16_SRR_SUB_15AB
:
3308 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3310 case OPC1_16_SRR_SUBS
:
3311 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3313 case OPC1_16_SRR_XOR
:
3314 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3317 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3321 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3325 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3326 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3329 case OPC1_16_SSR_ST_A
:
3330 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3332 case OPC1_16_SSR_ST_A_POSTINC
:
3333 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3334 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3336 case OPC1_16_SSR_ST_B
:
3337 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3339 case OPC1_16_SSR_ST_B_POSTINC
:
3340 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3341 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3343 case OPC1_16_SSR_ST_H
:
3344 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3346 case OPC1_16_SSR_ST_H_POSTINC
:
3347 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3348 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3350 case OPC1_16_SSR_ST_W
:
3351 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3353 case OPC1_16_SSR_ST_W_POSTINC
:
3354 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3355 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3358 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3362 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3366 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3369 case OPC1_16_SC_AND
:
3370 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3372 case OPC1_16_SC_BISR
:
3373 gen_helper_1arg(bisr
, const16
& 0xff);
3375 case OPC1_16_SC_LD_A
:
3376 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3378 case OPC1_16_SC_LD_W
:
3379 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3381 case OPC1_16_SC_MOV
:
3382 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3385 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3387 case OPC1_16_SC_ST_A
:
3388 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3390 case OPC1_16_SC_ST_W
:
3391 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3393 case OPC1_16_SC_SUB_A
:
3394 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3397 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3401 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3405 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3406 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3410 case OPC1_16_SLR_LD_A
:
3411 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3413 case OPC1_16_SLR_LD_A_POSTINC
:
3414 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3415 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3417 case OPC1_16_SLR_LD_BU
:
3418 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3420 case OPC1_16_SLR_LD_BU_POSTINC
:
3421 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3422 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3424 case OPC1_16_SLR_LD_H
:
3425 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3427 case OPC1_16_SLR_LD_H_POSTINC
:
3428 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3429 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3431 case OPC1_16_SLR_LD_W
:
3432 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3434 case OPC1_16_SLR_LD_W_POSTINC
:
3435 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3436 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3439 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3443 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3448 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3449 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3453 case OPC1_16_SRO_LD_A
:
3454 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3456 case OPC1_16_SRO_LD_BU
:
3457 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3459 case OPC1_16_SRO_LD_H
:
3460 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3462 case OPC1_16_SRO_LD_W
:
3463 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3465 case OPC1_16_SRO_ST_A
:
3466 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3468 case OPC1_16_SRO_ST_B
:
3469 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3471 case OPC1_16_SRO_ST_H
:
3472 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3474 case OPC1_16_SRO_ST_W
:
3475 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3478 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3482 static void decode_sr_system(DisasContext
*ctx
)
3485 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3488 case OPC2_16_SR_NOP
:
3490 case OPC2_16_SR_RET
:
3491 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3493 case OPC2_16_SR_RFE
:
3494 gen_helper_rfe(cpu_env
);
3495 tcg_gen_exit_tb(NULL
, 0);
3496 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3498 case OPC2_16_SR_DEBUG
:
3499 /* raise EXCP_DEBUG */
3501 case OPC2_16_SR_FRET
:
3505 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3509 static void decode_sr_accu(DisasContext
*ctx
)
3514 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3515 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3518 case OPC2_16_SR_RSUB
:
3519 /* calc V bit -- overflow only if r1 = -0x80000000 */
3520 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3521 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3523 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3525 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3527 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3528 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3530 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3532 case OPC2_16_SR_SAT_B
:
3533 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3535 case OPC2_16_SR_SAT_BU
:
3536 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3538 case OPC2_16_SR_SAT_H
:
3539 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3541 case OPC2_16_SR_SAT_HU
:
3542 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3545 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3549 static void decode_16Bit_opc(DisasContext
*ctx
)
3557 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3559 /* handle ADDSC.A opcode only being 6 bit long */
3560 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3561 op1
= OPC1_16_SRRS_ADDSC_A
;
3565 case OPC1_16_SRC_ADD
:
3566 case OPC1_16_SRC_ADD_A15
:
3567 case OPC1_16_SRC_ADD_15A
:
3568 case OPC1_16_SRC_ADD_A
:
3569 case OPC1_16_SRC_CADD
:
3570 case OPC1_16_SRC_CADDN
:
3571 case OPC1_16_SRC_CMOV
:
3572 case OPC1_16_SRC_CMOVN
:
3573 case OPC1_16_SRC_EQ
:
3574 case OPC1_16_SRC_LT
:
3575 case OPC1_16_SRC_MOV
:
3576 case OPC1_16_SRC_MOV_A
:
3577 case OPC1_16_SRC_MOV_E
:
3578 case OPC1_16_SRC_SH
:
3579 case OPC1_16_SRC_SHA
:
3580 decode_src_opc(ctx
, op1
);
3583 case OPC1_16_SRR_ADD
:
3584 case OPC1_16_SRR_ADD_A15
:
3585 case OPC1_16_SRR_ADD_15A
:
3586 case OPC1_16_SRR_ADD_A
:
3587 case OPC1_16_SRR_ADDS
:
3588 case OPC1_16_SRR_AND
:
3589 case OPC1_16_SRR_CMOV
:
3590 case OPC1_16_SRR_CMOVN
:
3591 case OPC1_16_SRR_EQ
:
3592 case OPC1_16_SRR_LT
:
3593 case OPC1_16_SRR_MOV
:
3594 case OPC1_16_SRR_MOV_A
:
3595 case OPC1_16_SRR_MOV_AA
:
3596 case OPC1_16_SRR_MOV_D
:
3597 case OPC1_16_SRR_MUL
:
3598 case OPC1_16_SRR_OR
:
3599 case OPC1_16_SRR_SUB
:
3600 case OPC1_16_SRR_SUB_A15B
:
3601 case OPC1_16_SRR_SUB_15AB
:
3602 case OPC1_16_SRR_SUBS
:
3603 case OPC1_16_SRR_XOR
:
3604 decode_srr_opc(ctx
, op1
);
3607 case OPC1_16_SSR_ST_A
:
3608 case OPC1_16_SSR_ST_A_POSTINC
:
3609 case OPC1_16_SSR_ST_B
:
3610 case OPC1_16_SSR_ST_B_POSTINC
:
3611 case OPC1_16_SSR_ST_H
:
3612 case OPC1_16_SSR_ST_H_POSTINC
:
3613 case OPC1_16_SSR_ST_W
:
3614 case OPC1_16_SSR_ST_W_POSTINC
:
3615 decode_ssr_opc(ctx
, op1
);
3618 case OPC1_16_SRRS_ADDSC_A
:
3619 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3620 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3621 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3622 temp
= tcg_temp_new();
3623 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3624 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3627 case OPC1_16_SLRO_LD_A
:
3628 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3629 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3630 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3632 case OPC1_16_SLRO_LD_BU
:
3633 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3634 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3635 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3637 case OPC1_16_SLRO_LD_H
:
3638 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3639 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3640 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3642 case OPC1_16_SLRO_LD_W
:
3643 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3644 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3645 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3648 case OPC1_16_SB_CALL
:
3650 case OPC1_16_SB_JNZ
:
3652 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3653 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3656 case OPC1_16_SBC_JEQ
:
3657 case OPC1_16_SBC_JNE
:
3658 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3659 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3660 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3662 case OPC1_16_SBC_JEQ2
:
3663 case OPC1_16_SBC_JNE2
:
3664 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3665 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3666 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3667 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3669 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3673 case OPC1_16_SBRN_JNZ_T
:
3674 case OPC1_16_SBRN_JZ_T
:
3675 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3676 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3677 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3680 case OPC1_16_SBR_JEQ2
:
3681 case OPC1_16_SBR_JNE2
:
3682 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3683 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3684 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3685 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3687 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3690 case OPC1_16_SBR_JEQ
:
3691 case OPC1_16_SBR_JGEZ
:
3692 case OPC1_16_SBR_JGTZ
:
3693 case OPC1_16_SBR_JLEZ
:
3694 case OPC1_16_SBR_JLTZ
:
3695 case OPC1_16_SBR_JNE
:
3696 case OPC1_16_SBR_JNZ
:
3697 case OPC1_16_SBR_JNZ_A
:
3698 case OPC1_16_SBR_JZ
:
3699 case OPC1_16_SBR_JZ_A
:
3700 case OPC1_16_SBR_LOOP
:
3701 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3702 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3703 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3706 case OPC1_16_SC_AND
:
3707 case OPC1_16_SC_BISR
:
3708 case OPC1_16_SC_LD_A
:
3709 case OPC1_16_SC_LD_W
:
3710 case OPC1_16_SC_MOV
:
3712 case OPC1_16_SC_ST_A
:
3713 case OPC1_16_SC_ST_W
:
3714 case OPC1_16_SC_SUB_A
:
3715 decode_sc_opc(ctx
, op1
);
3718 case OPC1_16_SLR_LD_A
:
3719 case OPC1_16_SLR_LD_A_POSTINC
:
3720 case OPC1_16_SLR_LD_BU
:
3721 case OPC1_16_SLR_LD_BU_POSTINC
:
3722 case OPC1_16_SLR_LD_H
:
3723 case OPC1_16_SLR_LD_H_POSTINC
:
3724 case OPC1_16_SLR_LD_W
:
3725 case OPC1_16_SLR_LD_W_POSTINC
:
3726 decode_slr_opc(ctx
, op1
);
3729 case OPC1_16_SRO_LD_A
:
3730 case OPC1_16_SRO_LD_BU
:
3731 case OPC1_16_SRO_LD_H
:
3732 case OPC1_16_SRO_LD_W
:
3733 case OPC1_16_SRO_ST_A
:
3734 case OPC1_16_SRO_ST_B
:
3735 case OPC1_16_SRO_ST_H
:
3736 case OPC1_16_SRO_ST_W
:
3737 decode_sro_opc(ctx
, op1
);
3740 case OPC1_16_SSRO_ST_A
:
3741 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3742 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3743 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3745 case OPC1_16_SSRO_ST_B
:
3746 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3747 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3748 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3750 case OPC1_16_SSRO_ST_H
:
3751 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3752 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3753 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3755 case OPC1_16_SSRO_ST_W
:
3756 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3757 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3758 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3761 case OPCM_16_SR_SYSTEM
:
3762 decode_sr_system(ctx
);
3764 case OPCM_16_SR_ACCU
:
3765 decode_sr_accu(ctx
);
3768 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3769 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3771 case OPC1_16_SR_NOT
:
3772 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3773 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3776 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3781 * 32 bit instructions
3785 static void decode_abs_ldw(DisasContext
*ctx
)
3792 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3793 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3794 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3796 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3799 case OPC2_32_ABS_LD_A
:
3800 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3802 case OPC2_32_ABS_LD_D
:
3804 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3806 case OPC2_32_ABS_LD_DA
:
3808 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3810 case OPC2_32_ABS_LD_W
:
3811 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3814 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3818 static void decode_abs_ldb(DisasContext
*ctx
)
3825 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3826 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3827 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3829 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3832 case OPC2_32_ABS_LD_B
:
3833 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3835 case OPC2_32_ABS_LD_BU
:
3836 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3838 case OPC2_32_ABS_LD_H
:
3839 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3841 case OPC2_32_ABS_LD_HU
:
3842 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3845 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3849 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3856 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3857 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3858 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3860 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3863 case OPC2_32_ABS_LDMST
:
3864 gen_ldmst(ctx
, r1
, temp
);
3866 case OPC2_32_ABS_SWAP_W
:
3867 gen_swap(ctx
, r1
, temp
);
3870 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3874 static void decode_abs_ldst_context(DisasContext
*ctx
)
3879 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3880 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3883 case OPC2_32_ABS_LDLCX
:
3884 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3886 case OPC2_32_ABS_LDUCX
:
3887 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3889 case OPC2_32_ABS_STLCX
:
3890 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3892 case OPC2_32_ABS_STUCX
:
3893 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3896 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3900 static void decode_abs_store(DisasContext
*ctx
)
3907 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3908 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3909 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3911 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3914 case OPC2_32_ABS_ST_A
:
3915 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3917 case OPC2_32_ABS_ST_D
:
3919 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3921 case OPC2_32_ABS_ST_DA
:
3923 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3925 case OPC2_32_ABS_ST_W
:
3926 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3929 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3933 static void decode_abs_storeb_h(DisasContext
*ctx
)
3940 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3941 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3942 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3944 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3947 case OPC2_32_ABS_ST_B
:
3948 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3950 case OPC2_32_ABS_ST_H
:
3951 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3954 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3960 static void decode_bit_andacc(DisasContext
*ctx
)
3966 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3967 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3968 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3969 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3970 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3971 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3975 case OPC2_32_BIT_AND_AND_T
:
3976 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3977 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3979 case OPC2_32_BIT_AND_ANDN_T
:
3980 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3981 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3983 case OPC2_32_BIT_AND_NOR_T
:
3984 if (TCG_TARGET_HAS_andc_i32
) {
3985 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3986 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3988 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3989 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3992 case OPC2_32_BIT_AND_OR_T
:
3993 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3994 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3997 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4001 static void decode_bit_logical_t(DisasContext
*ctx
)
4006 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4007 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4008 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4009 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4010 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4011 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4014 case OPC2_32_BIT_AND_T
:
4015 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4016 pos1
, pos2
, &tcg_gen_and_tl
);
4018 case OPC2_32_BIT_ANDN_T
:
4019 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4020 pos1
, pos2
, &tcg_gen_andc_tl
);
4022 case OPC2_32_BIT_NOR_T
:
4023 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4024 pos1
, pos2
, &tcg_gen_nor_tl
);
4026 case OPC2_32_BIT_OR_T
:
4027 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4028 pos1
, pos2
, &tcg_gen_or_tl
);
4031 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4035 static void decode_bit_insert(DisasContext
*ctx
)
4041 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4042 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4043 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4044 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4045 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4046 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4048 temp
= tcg_temp_new();
4050 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4051 if (op2
== OPC2_32_BIT_INSN_T
) {
4052 tcg_gen_not_tl(temp
, temp
);
4054 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4057 static void decode_bit_logical_t2(DisasContext
*ctx
)
4064 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4065 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4066 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4067 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4068 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4069 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4072 case OPC2_32_BIT_NAND_T
:
4073 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4074 pos1
, pos2
, &tcg_gen_nand_tl
);
4076 case OPC2_32_BIT_ORN_T
:
4077 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4078 pos1
, pos2
, &tcg_gen_orc_tl
);
4080 case OPC2_32_BIT_XNOR_T
:
4081 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4082 pos1
, pos2
, &tcg_gen_eqv_tl
);
4084 case OPC2_32_BIT_XOR_T
:
4085 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4086 pos1
, pos2
, &tcg_gen_xor_tl
);
4089 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4093 static void decode_bit_orand(DisasContext
*ctx
)
4100 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4101 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4102 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4103 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4104 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4105 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4108 case OPC2_32_BIT_OR_AND_T
:
4109 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4110 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4112 case OPC2_32_BIT_OR_ANDN_T
:
4113 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4114 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4116 case OPC2_32_BIT_OR_NOR_T
:
4117 if (TCG_TARGET_HAS_orc_i32
) {
4118 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4119 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4121 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4122 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4125 case OPC2_32_BIT_OR_OR_T
:
4126 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4127 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4130 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4134 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4141 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4142 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4143 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4144 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4145 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4146 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4148 temp
= tcg_temp_new();
4151 case OPC2_32_BIT_SH_AND_T
:
4152 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4153 pos1
, pos2
, &tcg_gen_and_tl
);
4155 case OPC2_32_BIT_SH_ANDN_T
:
4156 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4157 pos1
, pos2
, &tcg_gen_andc_tl
);
4159 case OPC2_32_BIT_SH_NOR_T
:
4160 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4161 pos1
, pos2
, &tcg_gen_nor_tl
);
4163 case OPC2_32_BIT_SH_OR_T
:
4164 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4165 pos1
, pos2
, &tcg_gen_or_tl
);
4168 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4170 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4171 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4174 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4181 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4182 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4183 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4184 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4185 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4186 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4188 temp
= tcg_temp_new();
4191 case OPC2_32_BIT_SH_NAND_T
:
4192 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4193 pos1
, pos2
, &tcg_gen_nand_tl
);
4195 case OPC2_32_BIT_SH_ORN_T
:
4196 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4197 pos1
, pos2
, &tcg_gen_orc_tl
);
4199 case OPC2_32_BIT_SH_XNOR_T
:
4200 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4201 pos1
, pos2
, &tcg_gen_eqv_tl
);
4203 case OPC2_32_BIT_SH_XOR_T
:
4204 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4205 pos1
, pos2
, &tcg_gen_xor_tl
);
4208 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4210 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4211 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4217 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4224 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4225 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4226 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4227 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4230 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4231 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4232 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4233 /* instruction to access the cache */
4235 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4236 case OPC2_32_BO_CACHEA_W_POSTINC
:
4237 case OPC2_32_BO_CACHEA_I_POSTINC
:
4238 /* instruction to access the cache, but we still need to handle
4239 the addressing mode */
4240 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4242 case OPC2_32_BO_CACHEA_WI_PREINC
:
4243 case OPC2_32_BO_CACHEA_W_PREINC
:
4244 case OPC2_32_BO_CACHEA_I_PREINC
:
4245 /* instruction to access the cache, but we still need to handle
4246 the addressing mode */
4247 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4249 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4250 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4251 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4252 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4255 case OPC2_32_BO_CACHEI_W_POSTINC
:
4256 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4257 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4258 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4260 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4263 case OPC2_32_BO_CACHEI_W_PREINC
:
4264 case OPC2_32_BO_CACHEI_WI_PREINC
:
4265 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4266 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4268 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4271 case OPC2_32_BO_ST_A_SHORTOFF
:
4272 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4274 case OPC2_32_BO_ST_A_POSTINC
:
4275 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4277 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4279 case OPC2_32_BO_ST_A_PREINC
:
4280 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4282 case OPC2_32_BO_ST_B_SHORTOFF
:
4283 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4285 case OPC2_32_BO_ST_B_POSTINC
:
4286 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4288 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4290 case OPC2_32_BO_ST_B_PREINC
:
4291 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4293 case OPC2_32_BO_ST_D_SHORTOFF
:
4295 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4298 case OPC2_32_BO_ST_D_POSTINC
:
4300 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4301 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4303 case OPC2_32_BO_ST_D_PREINC
:
4305 temp
= tcg_temp_new();
4306 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4307 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4308 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4310 case OPC2_32_BO_ST_DA_SHORTOFF
:
4312 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4315 case OPC2_32_BO_ST_DA_POSTINC
:
4317 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4318 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4320 case OPC2_32_BO_ST_DA_PREINC
:
4322 temp
= tcg_temp_new();
4323 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4324 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4325 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4327 case OPC2_32_BO_ST_H_SHORTOFF
:
4328 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4330 case OPC2_32_BO_ST_H_POSTINC
:
4331 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4333 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4335 case OPC2_32_BO_ST_H_PREINC
:
4336 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4338 case OPC2_32_BO_ST_Q_SHORTOFF
:
4339 temp
= tcg_temp_new();
4340 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4341 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4343 case OPC2_32_BO_ST_Q_POSTINC
:
4344 temp
= tcg_temp_new();
4345 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4346 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4348 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4350 case OPC2_32_BO_ST_Q_PREINC
:
4351 temp
= tcg_temp_new();
4352 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4353 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4355 case OPC2_32_BO_ST_W_SHORTOFF
:
4356 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4358 case OPC2_32_BO_ST_W_POSTINC
:
4359 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4361 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4363 case OPC2_32_BO_ST_W_PREINC
:
4364 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4367 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4371 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4376 TCGv temp
, temp2
, t_off10
;
4378 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4379 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4380 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4381 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4383 temp
= tcg_temp_new();
4384 temp2
= tcg_temp_new();
4385 t_off10
= tcg_constant_i32(off10
);
4387 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4388 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4391 case OPC2_32_BO_CACHEA_WI_BR
:
4392 case OPC2_32_BO_CACHEA_W_BR
:
4393 case OPC2_32_BO_CACHEA_I_BR
:
4394 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4396 case OPC2_32_BO_CACHEA_WI_CIRC
:
4397 case OPC2_32_BO_CACHEA_W_CIRC
:
4398 case OPC2_32_BO_CACHEA_I_CIRC
:
4399 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4401 case OPC2_32_BO_ST_A_BR
:
4402 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4403 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4405 case OPC2_32_BO_ST_A_CIRC
:
4406 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4407 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4409 case OPC2_32_BO_ST_B_BR
:
4410 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4411 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4413 case OPC2_32_BO_ST_B_CIRC
:
4414 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4415 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4417 case OPC2_32_BO_ST_D_BR
:
4419 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4420 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4422 case OPC2_32_BO_ST_D_CIRC
:
4424 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4425 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4426 tcg_gen_addi_tl(temp
, temp
, 4);
4427 tcg_gen_rem_tl(temp
, temp
, temp2
);
4428 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4429 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4430 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4432 case OPC2_32_BO_ST_DA_BR
:
4434 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4435 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4437 case OPC2_32_BO_ST_DA_CIRC
:
4439 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4440 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4441 tcg_gen_addi_tl(temp
, temp
, 4);
4442 tcg_gen_rem_tl(temp
, temp
, temp2
);
4443 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4444 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4445 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4447 case OPC2_32_BO_ST_H_BR
:
4448 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4449 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4451 case OPC2_32_BO_ST_H_CIRC
:
4452 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4453 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4455 case OPC2_32_BO_ST_Q_BR
:
4456 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4457 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4458 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4460 case OPC2_32_BO_ST_Q_CIRC
:
4461 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4462 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4463 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4465 case OPC2_32_BO_ST_W_BR
:
4466 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4467 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4469 case OPC2_32_BO_ST_W_CIRC
:
4470 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4471 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4474 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4478 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4485 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4486 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4487 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4488 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4491 case OPC2_32_BO_LD_A_SHORTOFF
:
4492 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4494 case OPC2_32_BO_LD_A_POSTINC
:
4495 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4497 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4499 case OPC2_32_BO_LD_A_PREINC
:
4500 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4502 case OPC2_32_BO_LD_B_SHORTOFF
:
4503 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4505 case OPC2_32_BO_LD_B_POSTINC
:
4506 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4508 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4510 case OPC2_32_BO_LD_B_PREINC
:
4511 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4513 case OPC2_32_BO_LD_BU_SHORTOFF
:
4514 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4516 case OPC2_32_BO_LD_BU_POSTINC
:
4517 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4519 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4521 case OPC2_32_BO_LD_BU_PREINC
:
4522 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4524 case OPC2_32_BO_LD_D_SHORTOFF
:
4526 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4529 case OPC2_32_BO_LD_D_POSTINC
:
4531 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4532 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4534 case OPC2_32_BO_LD_D_PREINC
:
4536 temp
= tcg_temp_new();
4537 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4538 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4539 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4541 case OPC2_32_BO_LD_DA_SHORTOFF
:
4543 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4546 case OPC2_32_BO_LD_DA_POSTINC
:
4548 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4549 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4551 case OPC2_32_BO_LD_DA_PREINC
:
4553 temp
= tcg_temp_new();
4554 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4555 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4556 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4558 case OPC2_32_BO_LD_H_SHORTOFF
:
4559 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4561 case OPC2_32_BO_LD_H_POSTINC
:
4562 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4564 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4566 case OPC2_32_BO_LD_H_PREINC
:
4567 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4569 case OPC2_32_BO_LD_HU_SHORTOFF
:
4570 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4572 case OPC2_32_BO_LD_HU_POSTINC
:
4573 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4575 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4577 case OPC2_32_BO_LD_HU_PREINC
:
4578 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4580 case OPC2_32_BO_LD_Q_SHORTOFF
:
4581 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4582 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4584 case OPC2_32_BO_LD_Q_POSTINC
:
4585 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4587 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4588 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4590 case OPC2_32_BO_LD_Q_PREINC
:
4591 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4592 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4594 case OPC2_32_BO_LD_W_SHORTOFF
:
4595 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4597 case OPC2_32_BO_LD_W_POSTINC
:
4598 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4600 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4602 case OPC2_32_BO_LD_W_PREINC
:
4603 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4606 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4610 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4615 TCGv temp
, temp2
, t_off10
;
4617 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4618 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4619 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4620 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4622 temp
= tcg_temp_new();
4623 temp2
= tcg_temp_new();
4624 t_off10
= tcg_constant_i32(off10
);
4626 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4627 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4631 case OPC2_32_BO_LD_A_BR
:
4632 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4633 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4635 case OPC2_32_BO_LD_A_CIRC
:
4636 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4637 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4639 case OPC2_32_BO_LD_B_BR
:
4640 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4641 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4643 case OPC2_32_BO_LD_B_CIRC
:
4644 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4645 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4647 case OPC2_32_BO_LD_BU_BR
:
4648 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4649 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4651 case OPC2_32_BO_LD_BU_CIRC
:
4652 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4653 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4655 case OPC2_32_BO_LD_D_BR
:
4657 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4658 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4660 case OPC2_32_BO_LD_D_CIRC
:
4662 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4663 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4664 tcg_gen_addi_tl(temp
, temp
, 4);
4665 tcg_gen_rem_tl(temp
, temp
, temp2
);
4666 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4667 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4668 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4670 case OPC2_32_BO_LD_DA_BR
:
4672 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4673 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4675 case OPC2_32_BO_LD_DA_CIRC
:
4677 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4678 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4679 tcg_gen_addi_tl(temp
, temp
, 4);
4680 tcg_gen_rem_tl(temp
, temp
, temp2
);
4681 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4682 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4683 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4685 case OPC2_32_BO_LD_H_BR
:
4686 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4687 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4689 case OPC2_32_BO_LD_H_CIRC
:
4690 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4691 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4693 case OPC2_32_BO_LD_HU_BR
:
4694 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4695 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4697 case OPC2_32_BO_LD_HU_CIRC
:
4698 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4699 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4701 case OPC2_32_BO_LD_Q_BR
:
4702 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4703 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4704 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4706 case OPC2_32_BO_LD_Q_CIRC
:
4707 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4708 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4709 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4711 case OPC2_32_BO_LD_W_BR
:
4712 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4713 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4715 case OPC2_32_BO_LD_W_CIRC
:
4716 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4717 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4720 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4724 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4732 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4733 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4734 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4735 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4738 temp
= tcg_temp_new();
4741 case OPC2_32_BO_LDLCX_SHORTOFF
:
4742 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4743 gen_helper_ldlcx(cpu_env
, temp
);
4745 case OPC2_32_BO_LDMST_SHORTOFF
:
4746 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4747 gen_ldmst(ctx
, r1
, temp
);
4749 case OPC2_32_BO_LDMST_POSTINC
:
4750 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4751 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4753 case OPC2_32_BO_LDMST_PREINC
:
4754 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4755 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4757 case OPC2_32_BO_LDUCX_SHORTOFF
:
4758 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4759 gen_helper_lducx(cpu_env
, temp
);
4761 case OPC2_32_BO_LEA_SHORTOFF
:
4762 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4764 case OPC2_32_BO_STLCX_SHORTOFF
:
4765 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4766 gen_helper_stlcx(cpu_env
, temp
);
4768 case OPC2_32_BO_STUCX_SHORTOFF
:
4769 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4770 gen_helper_stucx(cpu_env
, temp
);
4772 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4773 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4774 gen_swap(ctx
, r1
, temp
);
4776 case OPC2_32_BO_SWAP_W_POSTINC
:
4777 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4778 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4780 case OPC2_32_BO_SWAP_W_PREINC
:
4781 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4782 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4784 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4785 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4786 gen_cmpswap(ctx
, r1
, temp
);
4788 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4789 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4790 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4792 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4793 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4794 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4796 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4797 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4798 gen_swapmsk(ctx
, r1
, temp
);
4800 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4801 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4802 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4804 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4805 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4806 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4809 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4813 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4818 TCGv temp
, temp2
, t_off10
;
4820 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4821 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4822 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4823 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4825 temp
= tcg_temp_new();
4826 temp2
= tcg_temp_new();
4827 t_off10
= tcg_constant_i32(off10
);
4829 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4830 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4833 case OPC2_32_BO_LDMST_BR
:
4834 gen_ldmst(ctx
, r1
, temp2
);
4835 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4837 case OPC2_32_BO_LDMST_CIRC
:
4838 gen_ldmst(ctx
, r1
, temp2
);
4839 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4841 case OPC2_32_BO_SWAP_W_BR
:
4842 gen_swap(ctx
, r1
, temp2
);
4843 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4845 case OPC2_32_BO_SWAP_W_CIRC
:
4846 gen_swap(ctx
, r1
, temp2
);
4847 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4849 case OPC2_32_BO_CMPSWAP_W_BR
:
4850 gen_cmpswap(ctx
, r1
, temp2
);
4851 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4853 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4854 gen_cmpswap(ctx
, r1
, temp2
);
4855 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4857 case OPC2_32_BO_SWAPMSK_W_BR
:
4858 gen_swapmsk(ctx
, r1
, temp2
);
4859 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4861 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4862 gen_swapmsk(ctx
, r1
, temp2
);
4863 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4866 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4870 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4876 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4877 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4878 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4881 case OPC1_32_BOL_LD_A_LONGOFF
:
4882 temp
= tcg_temp_new();
4883 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4884 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4886 case OPC1_32_BOL_LD_W_LONGOFF
:
4887 temp
= tcg_temp_new();
4888 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4889 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4891 case OPC1_32_BOL_LEA_LONGOFF
:
4892 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4894 case OPC1_32_BOL_ST_A_LONGOFF
:
4895 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4896 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4898 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4901 case OPC1_32_BOL_ST_W_LONGOFF
:
4902 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4904 case OPC1_32_BOL_LD_B_LONGOFF
:
4905 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4906 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4908 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4911 case OPC1_32_BOL_LD_BU_LONGOFF
:
4912 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4913 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4915 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4918 case OPC1_32_BOL_LD_H_LONGOFF
:
4919 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4920 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4922 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4925 case OPC1_32_BOL_LD_HU_LONGOFF
:
4926 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4927 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4929 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4932 case OPC1_32_BOL_ST_B_LONGOFF
:
4933 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4934 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4936 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4939 case OPC1_32_BOL_ST_H_LONGOFF
:
4940 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4941 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4943 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4947 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4952 static void decode_rc_logical_shift(DisasContext
*ctx
)
4959 r2
= MASK_OP_RC_D(ctx
->opcode
);
4960 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4961 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4962 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4964 temp
= tcg_temp_new();
4967 case OPC2_32_RC_AND
:
4968 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4970 case OPC2_32_RC_ANDN
:
4971 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4973 case OPC2_32_RC_NAND
:
4974 tcg_gen_movi_tl(temp
, const9
);
4975 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4977 case OPC2_32_RC_NOR
:
4978 tcg_gen_movi_tl(temp
, const9
);
4979 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4982 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4984 case OPC2_32_RC_ORN
:
4985 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4988 const9
= sextract32(const9
, 0, 6);
4989 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4991 case OPC2_32_RC_SH_H
:
4992 const9
= sextract32(const9
, 0, 5);
4993 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4995 case OPC2_32_RC_SHA
:
4996 const9
= sextract32(const9
, 0, 6);
4997 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4999 case OPC2_32_RC_SHA_H
:
5000 const9
= sextract32(const9
, 0, 5);
5001 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5003 case OPC2_32_RC_SHAS
:
5004 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5006 case OPC2_32_RC_XNOR
:
5007 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5008 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5010 case OPC2_32_RC_XOR
:
5011 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5014 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5018 static void decode_rc_accumulator(DisasContext
*ctx
)
5026 r2
= MASK_OP_RC_D(ctx
->opcode
);
5027 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5028 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5030 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5032 temp
= tcg_temp_new();
5035 case OPC2_32_RC_ABSDIF
:
5036 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5038 case OPC2_32_RC_ABSDIFS
:
5039 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5041 case OPC2_32_RC_ADD
:
5042 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5044 case OPC2_32_RC_ADDC
:
5045 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5047 case OPC2_32_RC_ADDS
:
5048 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5050 case OPC2_32_RC_ADDS_U
:
5051 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5053 case OPC2_32_RC_ADDX
:
5054 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5056 case OPC2_32_RC_AND_EQ
:
5057 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5058 const9
, &tcg_gen_and_tl
);
5060 case OPC2_32_RC_AND_GE
:
5061 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5062 const9
, &tcg_gen_and_tl
);
5064 case OPC2_32_RC_AND_GE_U
:
5065 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5066 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5067 const9
, &tcg_gen_and_tl
);
5069 case OPC2_32_RC_AND_LT
:
5070 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5071 const9
, &tcg_gen_and_tl
);
5073 case OPC2_32_RC_AND_LT_U
:
5074 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5075 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5076 const9
, &tcg_gen_and_tl
);
5078 case OPC2_32_RC_AND_NE
:
5079 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5080 const9
, &tcg_gen_and_tl
);
5083 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5085 case OPC2_32_RC_EQANY_B
:
5086 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5088 case OPC2_32_RC_EQANY_H
:
5089 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5092 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5094 case OPC2_32_RC_GE_U
:
5095 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5096 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5099 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5101 case OPC2_32_RC_LT_U
:
5102 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5103 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5105 case OPC2_32_RC_MAX
:
5106 tcg_gen_movi_tl(temp
, const9
);
5107 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5108 cpu_gpr_d
[r1
], temp
);
5110 case OPC2_32_RC_MAX_U
:
5111 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5112 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5113 cpu_gpr_d
[r1
], temp
);
5115 case OPC2_32_RC_MIN
:
5116 tcg_gen_movi_tl(temp
, const9
);
5117 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5118 cpu_gpr_d
[r1
], temp
);
5120 case OPC2_32_RC_MIN_U
:
5121 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5122 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5123 cpu_gpr_d
[r1
], temp
);
5126 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5128 case OPC2_32_RC_OR_EQ
:
5129 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5130 const9
, &tcg_gen_or_tl
);
5132 case OPC2_32_RC_OR_GE
:
5133 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5134 const9
, &tcg_gen_or_tl
);
5136 case OPC2_32_RC_OR_GE_U
:
5137 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5138 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5139 const9
, &tcg_gen_or_tl
);
5141 case OPC2_32_RC_OR_LT
:
5142 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5143 const9
, &tcg_gen_or_tl
);
5145 case OPC2_32_RC_OR_LT_U
:
5146 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5147 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5148 const9
, &tcg_gen_or_tl
);
5150 case OPC2_32_RC_OR_NE
:
5151 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5152 const9
, &tcg_gen_or_tl
);
5154 case OPC2_32_RC_RSUB
:
5155 tcg_gen_movi_tl(temp
, const9
);
5156 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5158 case OPC2_32_RC_RSUBS
:
5159 tcg_gen_movi_tl(temp
, const9
);
5160 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5162 case OPC2_32_RC_RSUBS_U
:
5163 tcg_gen_movi_tl(temp
, const9
);
5164 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5166 case OPC2_32_RC_SH_EQ
:
5167 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5169 case OPC2_32_RC_SH_GE
:
5170 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5172 case OPC2_32_RC_SH_GE_U
:
5173 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5174 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5176 case OPC2_32_RC_SH_LT
:
5177 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5179 case OPC2_32_RC_SH_LT_U
:
5180 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5181 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5183 case OPC2_32_RC_SH_NE
:
5184 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5186 case OPC2_32_RC_XOR_EQ
:
5187 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5188 const9
, &tcg_gen_xor_tl
);
5190 case OPC2_32_RC_XOR_GE
:
5191 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5192 const9
, &tcg_gen_xor_tl
);
5194 case OPC2_32_RC_XOR_GE_U
:
5195 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5196 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5197 const9
, &tcg_gen_xor_tl
);
5199 case OPC2_32_RC_XOR_LT
:
5200 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5201 const9
, &tcg_gen_xor_tl
);
5203 case OPC2_32_RC_XOR_LT_U
:
5204 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5205 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5206 const9
, &tcg_gen_xor_tl
);
5208 case OPC2_32_RC_XOR_NE
:
5209 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5210 const9
, &tcg_gen_xor_tl
);
5213 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5217 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5222 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5223 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5226 case OPC2_32_RC_BISR
:
5227 gen_helper_1arg(bisr
, const9
);
5229 case OPC2_32_RC_SYSCALL
:
5230 /* TODO: Add exception generation */
5233 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5237 static void decode_rc_mul(DisasContext
*ctx
)
5243 r2
= MASK_OP_RC_D(ctx
->opcode
);
5244 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5245 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5247 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5250 case OPC2_32_RC_MUL_32
:
5251 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5253 case OPC2_32_RC_MUL_64
:
5255 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5257 case OPC2_32_RC_MULS_32
:
5258 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5260 case OPC2_32_RC_MUL_U_64
:
5261 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5263 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5265 case OPC2_32_RC_MULS_U_32
:
5266 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5267 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5270 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5275 static void decode_rcpw_insert(DisasContext
*ctx
)
5279 int32_t pos
, width
, const4
;
5283 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5284 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5285 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5286 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5287 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5288 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5291 case OPC2_32_RCPW_IMASK
:
5293 /* if pos + width > 32 undefined result */
5294 if (pos
+ width
<= 32) {
5295 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5296 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5299 case OPC2_32_RCPW_INSERT
:
5300 /* if pos + width > 32 undefined result */
5301 if (pos
+ width
<= 32) {
5302 temp
= tcg_constant_i32(const4
);
5303 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5307 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5313 static void decode_rcrw_insert(DisasContext
*ctx
)
5317 int32_t width
, const4
;
5319 TCGv temp
, temp2
, temp3
;
5321 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5322 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5323 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5324 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5325 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5326 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5328 temp
= tcg_temp_new();
5329 temp2
= tcg_temp_new();
5332 case OPC2_32_RCRW_IMASK
:
5333 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5334 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5335 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5336 tcg_gen_movi_tl(temp2
, const4
);
5337 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5339 case OPC2_32_RCRW_INSERT
:
5340 temp3
= tcg_temp_new();
5342 tcg_gen_movi_tl(temp
, width
);
5343 tcg_gen_movi_tl(temp2
, const4
);
5344 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5345 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5348 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5354 static void decode_rcr_cond_select(DisasContext
*ctx
)
5362 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5363 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5364 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5365 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5366 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5369 case OPC2_32_RCR_CADD
:
5370 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5373 case OPC2_32_RCR_CADDN
:
5374 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5377 case OPC2_32_RCR_SEL
:
5378 temp
= tcg_constant_i32(0);
5379 temp2
= tcg_constant_i32(const9
);
5380 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5381 cpu_gpr_d
[r1
], temp2
);
5383 case OPC2_32_RCR_SELN
:
5384 temp
= tcg_constant_i32(0);
5385 temp2
= tcg_constant_i32(const9
);
5386 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5387 cpu_gpr_d
[r1
], temp2
);
5390 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5394 static void decode_rcr_madd(DisasContext
*ctx
)
5401 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5402 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5403 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5404 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5405 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5408 case OPC2_32_RCR_MADD_32
:
5409 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5411 case OPC2_32_RCR_MADD_64
:
5414 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5415 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5417 case OPC2_32_RCR_MADDS_32
:
5418 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5420 case OPC2_32_RCR_MADDS_64
:
5423 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5424 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5426 case OPC2_32_RCR_MADD_U_64
:
5429 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5430 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5431 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5433 case OPC2_32_RCR_MADDS_U_32
:
5434 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5435 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5437 case OPC2_32_RCR_MADDS_U_64
:
5440 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5441 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5442 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5445 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5449 static void decode_rcr_msub(DisasContext
*ctx
)
5456 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5457 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5458 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5459 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5460 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5463 case OPC2_32_RCR_MSUB_32
:
5464 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5466 case OPC2_32_RCR_MSUB_64
:
5469 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5470 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5472 case OPC2_32_RCR_MSUBS_32
:
5473 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5475 case OPC2_32_RCR_MSUBS_64
:
5478 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5479 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5481 case OPC2_32_RCR_MSUB_U_64
:
5484 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5485 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5486 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5488 case OPC2_32_RCR_MSUBS_U_32
:
5489 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5490 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5492 case OPC2_32_RCR_MSUBS_U_64
:
5495 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5496 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5497 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5500 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5506 static void decode_rlc_opc(DisasContext
*ctx
,
5512 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5513 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5514 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5517 case OPC1_32_RLC_ADDI
:
5518 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5520 case OPC1_32_RLC_ADDIH
:
5521 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5523 case OPC1_32_RLC_ADDIH_A
:
5524 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5526 case OPC1_32_RLC_MFCR
:
5527 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5528 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5530 case OPC1_32_RLC_MOV
:
5531 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5533 case OPC1_32_RLC_MOV_64
:
5534 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5536 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5537 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5539 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5542 case OPC1_32_RLC_MOV_U
:
5543 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5544 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5546 case OPC1_32_RLC_MOV_H
:
5547 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5549 case OPC1_32_RLC_MOVH_A
:
5550 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5552 case OPC1_32_RLC_MTCR
:
5553 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5554 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5557 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5562 static void decode_rr_accumulator(DisasContext
*ctx
)
5569 r3
= MASK_OP_RR_D(ctx
->opcode
);
5570 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5571 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5572 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5575 case OPC2_32_RR_ABS
:
5576 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5578 case OPC2_32_RR_ABS_B
:
5579 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5581 case OPC2_32_RR_ABS_H
:
5582 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5584 case OPC2_32_RR_ABSDIF
:
5585 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5587 case OPC2_32_RR_ABSDIF_B
:
5588 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5591 case OPC2_32_RR_ABSDIF_H
:
5592 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5595 case OPC2_32_RR_ABSDIFS
:
5596 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5599 case OPC2_32_RR_ABSDIFS_H
:
5600 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5603 case OPC2_32_RR_ABSS
:
5604 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5606 case OPC2_32_RR_ABSS_H
:
5607 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5609 case OPC2_32_RR_ADD
:
5610 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5612 case OPC2_32_RR_ADD_B
:
5613 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5615 case OPC2_32_RR_ADD_H
:
5616 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5618 case OPC2_32_RR_ADDC
:
5619 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5621 case OPC2_32_RR_ADDS
:
5622 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5624 case OPC2_32_RR_ADDS_H
:
5625 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5628 case OPC2_32_RR_ADDS_HU
:
5629 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5632 case OPC2_32_RR_ADDS_U
:
5633 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5636 case OPC2_32_RR_ADDX
:
5637 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5639 case OPC2_32_RR_AND_EQ
:
5640 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5641 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5643 case OPC2_32_RR_AND_GE
:
5644 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5645 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5647 case OPC2_32_RR_AND_GE_U
:
5648 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5649 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5651 case OPC2_32_RR_AND_LT
:
5652 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5653 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5655 case OPC2_32_RR_AND_LT_U
:
5656 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5657 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5659 case OPC2_32_RR_AND_NE
:
5660 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5661 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5664 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5667 case OPC2_32_RR_EQ_B
:
5668 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5670 case OPC2_32_RR_EQ_H
:
5671 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5673 case OPC2_32_RR_EQ_W
:
5674 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5676 case OPC2_32_RR_EQANY_B
:
5677 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5679 case OPC2_32_RR_EQANY_H
:
5680 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5683 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5686 case OPC2_32_RR_GE_U
:
5687 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5691 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5694 case OPC2_32_RR_LT_U
:
5695 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5698 case OPC2_32_RR_LT_B
:
5699 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5701 case OPC2_32_RR_LT_BU
:
5702 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5704 case OPC2_32_RR_LT_H
:
5705 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5707 case OPC2_32_RR_LT_HU
:
5708 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5710 case OPC2_32_RR_LT_W
:
5711 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5713 case OPC2_32_RR_LT_WU
:
5714 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5716 case OPC2_32_RR_MAX
:
5717 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5718 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5720 case OPC2_32_RR_MAX_U
:
5721 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5722 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5724 case OPC2_32_RR_MAX_B
:
5725 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5727 case OPC2_32_RR_MAX_BU
:
5728 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5730 case OPC2_32_RR_MAX_H
:
5731 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5733 case OPC2_32_RR_MAX_HU
:
5734 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5736 case OPC2_32_RR_MIN
:
5737 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5738 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5740 case OPC2_32_RR_MIN_U
:
5741 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5742 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5744 case OPC2_32_RR_MIN_B
:
5745 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5747 case OPC2_32_RR_MIN_BU
:
5748 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5750 case OPC2_32_RR_MIN_H
:
5751 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5753 case OPC2_32_RR_MIN_HU
:
5754 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5756 case OPC2_32_RR_MOV
:
5757 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5759 case OPC2_32_RR_MOV_64
:
5760 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5761 temp
= tcg_temp_new();
5764 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5765 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5766 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5768 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5771 case OPC2_32_RR_MOVS_64
:
5772 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5774 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5775 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5777 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5781 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5784 case OPC2_32_RR_OR_EQ
:
5785 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5786 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5788 case OPC2_32_RR_OR_GE
:
5789 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5790 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5792 case OPC2_32_RR_OR_GE_U
:
5793 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5794 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5796 case OPC2_32_RR_OR_LT
:
5797 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5798 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5800 case OPC2_32_RR_OR_LT_U
:
5801 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5802 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5804 case OPC2_32_RR_OR_NE
:
5805 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5806 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5808 case OPC2_32_RR_SAT_B
:
5809 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5811 case OPC2_32_RR_SAT_BU
:
5812 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5814 case OPC2_32_RR_SAT_H
:
5815 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5817 case OPC2_32_RR_SAT_HU
:
5818 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5820 case OPC2_32_RR_SH_EQ
:
5821 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5824 case OPC2_32_RR_SH_GE
:
5825 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5828 case OPC2_32_RR_SH_GE_U
:
5829 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5832 case OPC2_32_RR_SH_LT
:
5833 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5836 case OPC2_32_RR_SH_LT_U
:
5837 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5840 case OPC2_32_RR_SH_NE
:
5841 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5844 case OPC2_32_RR_SUB
:
5845 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5847 case OPC2_32_RR_SUB_B
:
5848 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5850 case OPC2_32_RR_SUB_H
:
5851 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5853 case OPC2_32_RR_SUBC
:
5854 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5856 case OPC2_32_RR_SUBS
:
5857 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5859 case OPC2_32_RR_SUBS_U
:
5860 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5862 case OPC2_32_RR_SUBS_H
:
5863 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5866 case OPC2_32_RR_SUBS_HU
:
5867 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5870 case OPC2_32_RR_SUBX
:
5871 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5873 case OPC2_32_RR_XOR_EQ
:
5874 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5875 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5877 case OPC2_32_RR_XOR_GE
:
5878 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5879 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5881 case OPC2_32_RR_XOR_GE_U
:
5882 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5883 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5885 case OPC2_32_RR_XOR_LT
:
5886 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5887 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5889 case OPC2_32_RR_XOR_LT_U
:
5890 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5891 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5893 case OPC2_32_RR_XOR_NE
:
5894 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5895 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5898 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5902 static void decode_rr_logical_shift(DisasContext
*ctx
)
5907 r3
= MASK_OP_RR_D(ctx
->opcode
);
5908 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5909 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5910 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5913 case OPC2_32_RR_AND
:
5914 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5916 case OPC2_32_RR_ANDN
:
5917 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5919 case OPC2_32_RR_CLO
:
5920 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5921 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5923 case OPC2_32_RR_CLO_H
:
5924 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5926 case OPC2_32_RR_CLS
:
5927 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5929 case OPC2_32_RR_CLS_H
:
5930 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5932 case OPC2_32_RR_CLZ
:
5933 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5935 case OPC2_32_RR_CLZ_H
:
5936 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5938 case OPC2_32_RR_NAND
:
5939 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5941 case OPC2_32_RR_NOR
:
5942 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5945 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5947 case OPC2_32_RR_ORN
:
5948 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5951 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5953 case OPC2_32_RR_SH_H
:
5954 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5956 case OPC2_32_RR_SHA
:
5957 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5959 case OPC2_32_RR_SHA_H
:
5960 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5962 case OPC2_32_RR_SHAS
:
5963 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5965 case OPC2_32_RR_XNOR
:
5966 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5968 case OPC2_32_RR_XOR
:
5969 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5972 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5976 static void decode_rr_address(DisasContext
*ctx
)
5982 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5983 r3
= MASK_OP_RR_D(ctx
->opcode
);
5984 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5985 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5986 n
= MASK_OP_RR_N(ctx
->opcode
);
5989 case OPC2_32_RR_ADD_A
:
5990 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5992 case OPC2_32_RR_ADDSC_A
:
5993 temp
= tcg_temp_new();
5994 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5995 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
5997 case OPC2_32_RR_ADDSC_AT
:
5998 temp
= tcg_temp_new();
5999 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6000 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6001 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6003 case OPC2_32_RR_EQ_A
:
6004 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6007 case OPC2_32_RR_EQZ
:
6008 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6010 case OPC2_32_RR_GE_A
:
6011 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6014 case OPC2_32_RR_LT_A
:
6015 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6018 case OPC2_32_RR_MOV_A
:
6019 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6021 case OPC2_32_RR_MOV_AA
:
6022 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6024 case OPC2_32_RR_MOV_D
:
6025 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6027 case OPC2_32_RR_NE_A
:
6028 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6031 case OPC2_32_RR_NEZ_A
:
6032 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6034 case OPC2_32_RR_SUB_A
:
6035 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6038 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6042 static void decode_rr_idirect(DisasContext
*ctx
)
6047 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6048 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6052 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6054 case OPC2_32_RR_JLI
:
6055 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6056 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6058 case OPC2_32_RR_CALLI
:
6059 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6060 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6062 case OPC2_32_RR_FCALLI
:
6063 gen_fcall_save_ctx(ctx
);
6064 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6067 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6069 tcg_gen_exit_tb(NULL
, 0);
6070 ctx
->base
.is_jmp
= DISAS_NORETURN
;
6073 static void decode_rr_divide(DisasContext
*ctx
)
6078 TCGv temp
, temp2
, temp3
;
6080 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6081 r3
= MASK_OP_RR_D(ctx
->opcode
);
6082 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6083 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6086 case OPC2_32_RR_BMERGE
:
6087 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6089 case OPC2_32_RR_BSPLIT
:
6091 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6093 case OPC2_32_RR_DVINIT_B
:
6095 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6098 case OPC2_32_RR_DVINIT_BU
:
6099 temp
= tcg_temp_new();
6100 temp2
= tcg_temp_new();
6101 temp3
= tcg_temp_new();
6103 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6105 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6106 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6107 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6108 tcg_gen_abs_tl(temp
, temp3
);
6109 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6110 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6112 /* overflow = (D[b] == 0) */
6113 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6115 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6117 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6119 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6120 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6122 case OPC2_32_RR_DVINIT_H
:
6124 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6127 case OPC2_32_RR_DVINIT_HU
:
6128 temp
= tcg_temp_new();
6129 temp2
= tcg_temp_new();
6130 temp3
= tcg_temp_new();
6132 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6134 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6135 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6136 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6137 tcg_gen_abs_tl(temp
, temp3
);
6138 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6139 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6141 /* overflow = (D[b] == 0) */
6142 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6144 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6146 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6148 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6149 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6151 case OPC2_32_RR_DVINIT
:
6152 temp
= tcg_temp_new();
6153 temp2
= tcg_temp_new();
6155 /* overflow = ((D[b] == 0) ||
6156 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6157 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6158 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6159 tcg_gen_and_tl(temp
, temp
, temp2
);
6160 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6161 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6162 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6164 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6166 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6168 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6169 /* sign extend to high reg */
6170 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6172 case OPC2_32_RR_DVINIT_U
:
6173 /* overflow = (D[b] == 0) */
6174 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6175 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6177 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6179 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6181 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6182 /* zero extend to high reg*/
6183 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6185 case OPC2_32_RR_PARITY
:
6186 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6188 case OPC2_32_RR_UNPACK
:
6190 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6192 case OPC2_32_RR_CRC32
:
6193 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6194 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6196 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6199 case OPC2_32_RR_DIV
:
6200 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6201 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6204 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6207 case OPC2_32_RR_DIV_U
:
6208 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6209 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6210 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6212 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6215 case OPC2_32_RR_MUL_F
:
6216 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6218 case OPC2_32_RR_DIV_F
:
6219 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6221 case OPC2_32_RR_CMP_F
:
6222 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6224 case OPC2_32_RR_FTOI
:
6225 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6227 case OPC2_32_RR_ITOF
:
6228 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6230 case OPC2_32_RR_FTOUZ
:
6231 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6233 case OPC2_32_RR_UPDFL
:
6234 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6236 case OPC2_32_RR_UTOF
:
6237 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6239 case OPC2_32_RR_FTOIZ
:
6240 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6242 case OPC2_32_RR_QSEED_F
:
6243 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6246 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6251 static void decode_rr1_mul(DisasContext
*ctx
)
6259 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6260 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6261 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6262 n
= tcg_constant_i32(MASK_OP_RR1_N(ctx
->opcode
));
6263 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6266 case OPC2_32_RR1_MUL_H_32_LL
:
6267 temp64
= tcg_temp_new_i64();
6269 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6270 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6271 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6273 case OPC2_32_RR1_MUL_H_32_LU
:
6274 temp64
= tcg_temp_new_i64();
6276 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6277 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6278 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6280 case OPC2_32_RR1_MUL_H_32_UL
:
6281 temp64
= tcg_temp_new_i64();
6283 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6284 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6285 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6287 case OPC2_32_RR1_MUL_H_32_UU
:
6288 temp64
= tcg_temp_new_i64();
6290 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6291 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6292 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6294 case OPC2_32_RR1_MULM_H_64_LL
:
6295 temp64
= tcg_temp_new_i64();
6297 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6298 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6300 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6302 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6304 case OPC2_32_RR1_MULM_H_64_LU
:
6305 temp64
= tcg_temp_new_i64();
6307 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6308 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6310 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6312 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6314 case OPC2_32_RR1_MULM_H_64_UL
:
6315 temp64
= tcg_temp_new_i64();
6317 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6318 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6320 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6322 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6324 case OPC2_32_RR1_MULM_H_64_UU
:
6325 temp64
= tcg_temp_new_i64();
6327 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6328 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6330 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6332 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6334 case OPC2_32_RR1_MULR_H_16_LL
:
6335 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6336 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6338 case OPC2_32_RR1_MULR_H_16_LU
:
6339 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6340 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6342 case OPC2_32_RR1_MULR_H_16_UL
:
6343 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6344 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6346 case OPC2_32_RR1_MULR_H_16_UU
:
6347 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6348 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6351 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6355 static void decode_rr1_mulq(DisasContext
*ctx
)
6363 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6364 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6365 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6366 n
= MASK_OP_RR1_N(ctx
->opcode
);
6367 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6369 temp
= tcg_temp_new();
6370 temp2
= tcg_temp_new();
6373 case OPC2_32_RR1_MUL_Q_32
:
6374 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6376 case OPC2_32_RR1_MUL_Q_64
:
6378 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6381 case OPC2_32_RR1_MUL_Q_32_L
:
6382 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6383 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6385 case OPC2_32_RR1_MUL_Q_64_L
:
6387 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6388 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6390 case OPC2_32_RR1_MUL_Q_32_U
:
6391 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6392 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6394 case OPC2_32_RR1_MUL_Q_64_U
:
6396 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6397 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6399 case OPC2_32_RR1_MUL_Q_32_LL
:
6400 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6401 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6402 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6404 case OPC2_32_RR1_MUL_Q_32_UU
:
6405 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6406 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6407 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6409 case OPC2_32_RR1_MULR_Q_32_L
:
6410 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6411 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6412 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6414 case OPC2_32_RR1_MULR_Q_32_U
:
6415 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6416 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6417 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6420 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6425 static void decode_rr2_mul(DisasContext
*ctx
)
6430 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6431 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6432 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6433 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6435 case OPC2_32_RR2_MUL_32
:
6436 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6438 case OPC2_32_RR2_MUL_64
:
6440 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6443 case OPC2_32_RR2_MULS_32
:
6444 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6447 case OPC2_32_RR2_MUL_U_64
:
6449 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6452 case OPC2_32_RR2_MULS_U_32
:
6453 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6457 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6462 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6469 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6470 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6471 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6472 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6473 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6474 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6477 case OPC2_32_RRPW_EXTR
:
6479 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6483 if (pos
+ width
<= 32) {
6484 /* optimize special cases */
6485 if ((pos
== 0) && (width
== 8)) {
6486 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6487 } else if ((pos
== 0) && (width
== 16)) {
6488 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6490 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6491 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6495 case OPC2_32_RRPW_EXTR_U
:
6497 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6499 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6500 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6503 case OPC2_32_RRPW_IMASK
:
6506 if (pos
+ width
<= 32) {
6507 temp
= tcg_temp_new();
6508 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6509 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6510 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6514 case OPC2_32_RRPW_INSERT
:
6515 if (pos
+ width
<= 32) {
6516 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6521 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6526 static void decode_rrr_cond_select(DisasContext
*ctx
)
6532 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6533 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6534 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6535 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6536 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6539 case OPC2_32_RRR_CADD
:
6540 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6541 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6543 case OPC2_32_RRR_CADDN
:
6544 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6547 case OPC2_32_RRR_CSUB
:
6548 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6551 case OPC2_32_RRR_CSUBN
:
6552 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6555 case OPC2_32_RRR_SEL
:
6556 temp
= tcg_constant_i32(0);
6557 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6558 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6560 case OPC2_32_RRR_SELN
:
6561 temp
= tcg_constant_i32(0);
6562 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6563 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6566 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6570 static void decode_rrr_divide(DisasContext
*ctx
)
6576 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6577 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6578 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6579 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6580 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6583 case OPC2_32_RRR_DVADJ
:
6586 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6587 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6589 case OPC2_32_RRR_DVSTEP
:
6592 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6593 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6595 case OPC2_32_RRR_DVSTEP_U
:
6598 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6599 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6601 case OPC2_32_RRR_IXMAX
:
6604 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6605 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6607 case OPC2_32_RRR_IXMAX_U
:
6610 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6611 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6613 case OPC2_32_RRR_IXMIN
:
6616 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6617 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6619 case OPC2_32_RRR_IXMIN_U
:
6622 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6623 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6625 case OPC2_32_RRR_PACK
:
6627 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6628 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6630 case OPC2_32_RRR_ADD_F
:
6631 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6633 case OPC2_32_RRR_SUB_F
:
6634 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6636 case OPC2_32_RRR_MADD_F
:
6637 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6638 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6640 case OPC2_32_RRR_MSUB_F
:
6641 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6642 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6645 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6650 static void decode_rrr2_madd(DisasContext
*ctx
)
6653 uint32_t r1
, r2
, r3
, r4
;
6655 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6656 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6657 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6658 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6659 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6661 case OPC2_32_RRR2_MADD_32
:
6662 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6665 case OPC2_32_RRR2_MADD_64
:
6668 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6669 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6671 case OPC2_32_RRR2_MADDS_32
:
6672 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6673 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6675 case OPC2_32_RRR2_MADDS_64
:
6678 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6679 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6681 case OPC2_32_RRR2_MADD_U_64
:
6684 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6685 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6687 case OPC2_32_RRR2_MADDS_U_32
:
6688 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6689 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6691 case OPC2_32_RRR2_MADDS_U_64
:
6694 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6695 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6698 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6702 static void decode_rrr2_msub(DisasContext
*ctx
)
6705 uint32_t r1
, r2
, r3
, r4
;
6707 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6708 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6709 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6710 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6711 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6714 case OPC2_32_RRR2_MSUB_32
:
6715 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6718 case OPC2_32_RRR2_MSUB_64
:
6721 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6722 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6724 case OPC2_32_RRR2_MSUBS_32
:
6725 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6726 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6728 case OPC2_32_RRR2_MSUBS_64
:
6731 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6732 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6734 case OPC2_32_RRR2_MSUB_U_64
:
6735 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6736 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6738 case OPC2_32_RRR2_MSUBS_U_32
:
6739 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6740 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6742 case OPC2_32_RRR2_MSUBS_U_64
:
6745 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6746 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6749 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6754 static void decode_rrr1_madd(DisasContext
*ctx
)
6757 uint32_t r1
, r2
, r3
, r4
, n
;
6759 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6760 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6761 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6762 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6763 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6764 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6767 case OPC2_32_RRR1_MADD_H_LL
:
6770 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6771 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6773 case OPC2_32_RRR1_MADD_H_LU
:
6776 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6777 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6779 case OPC2_32_RRR1_MADD_H_UL
:
6782 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6783 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6785 case OPC2_32_RRR1_MADD_H_UU
:
6788 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6789 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6791 case OPC2_32_RRR1_MADDS_H_LL
:
6794 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6795 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6797 case OPC2_32_RRR1_MADDS_H_LU
:
6800 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6801 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6803 case OPC2_32_RRR1_MADDS_H_UL
:
6806 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6807 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6809 case OPC2_32_RRR1_MADDS_H_UU
:
6812 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6813 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6815 case OPC2_32_RRR1_MADDM_H_LL
:
6818 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6819 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6821 case OPC2_32_RRR1_MADDM_H_LU
:
6824 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6825 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6827 case OPC2_32_RRR1_MADDM_H_UL
:
6830 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6831 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6833 case OPC2_32_RRR1_MADDM_H_UU
:
6836 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6837 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6839 case OPC2_32_RRR1_MADDMS_H_LL
:
6842 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6843 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6845 case OPC2_32_RRR1_MADDMS_H_LU
:
6848 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6849 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6851 case OPC2_32_RRR1_MADDMS_H_UL
:
6854 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6855 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6857 case OPC2_32_RRR1_MADDMS_H_UU
:
6860 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6861 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6863 case OPC2_32_RRR1_MADDR_H_LL
:
6864 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6865 cpu_gpr_d
[r2
], n
, MODE_LL
);
6867 case OPC2_32_RRR1_MADDR_H_LU
:
6868 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6869 cpu_gpr_d
[r2
], n
, MODE_LU
);
6871 case OPC2_32_RRR1_MADDR_H_UL
:
6872 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6873 cpu_gpr_d
[r2
], n
, MODE_UL
);
6875 case OPC2_32_RRR1_MADDR_H_UU
:
6876 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6877 cpu_gpr_d
[r2
], n
, MODE_UU
);
6879 case OPC2_32_RRR1_MADDRS_H_LL
:
6880 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6881 cpu_gpr_d
[r2
], n
, MODE_LL
);
6883 case OPC2_32_RRR1_MADDRS_H_LU
:
6884 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6885 cpu_gpr_d
[r2
], n
, MODE_LU
);
6887 case OPC2_32_RRR1_MADDRS_H_UL
:
6888 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6889 cpu_gpr_d
[r2
], n
, MODE_UL
);
6891 case OPC2_32_RRR1_MADDRS_H_UU
:
6892 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6893 cpu_gpr_d
[r2
], n
, MODE_UU
);
6896 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6900 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6903 uint32_t r1
, r2
, r3
, r4
, n
;
6906 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6907 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6908 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6909 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6910 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6911 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6913 temp
= tcg_temp_new();
6914 temp2
= tcg_temp_new();
6917 case OPC2_32_RRR1_MADD_Q_32
:
6918 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6919 cpu_gpr_d
[r2
], n
, 32);
6921 case OPC2_32_RRR1_MADD_Q_64
:
6924 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6925 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6928 case OPC2_32_RRR1_MADD_Q_32_L
:
6929 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6930 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6933 case OPC2_32_RRR1_MADD_Q_64_L
:
6936 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6937 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6938 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6941 case OPC2_32_RRR1_MADD_Q_32_U
:
6942 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6943 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6946 case OPC2_32_RRR1_MADD_Q_64_U
:
6949 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6950 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6951 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6954 case OPC2_32_RRR1_MADD_Q_32_LL
:
6955 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6956 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6957 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6959 case OPC2_32_RRR1_MADD_Q_64_LL
:
6962 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6963 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6964 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6965 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6967 case OPC2_32_RRR1_MADD_Q_32_UU
:
6968 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6969 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6970 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6972 case OPC2_32_RRR1_MADD_Q_64_UU
:
6975 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6976 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6977 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6978 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6980 case OPC2_32_RRR1_MADDS_Q_32
:
6981 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6982 cpu_gpr_d
[r2
], n
, 32);
6984 case OPC2_32_RRR1_MADDS_Q_64
:
6987 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6988 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6991 case OPC2_32_RRR1_MADDS_Q_32_L
:
6992 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6993 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6996 case OPC2_32_RRR1_MADDS_Q_64_L
:
6999 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7000 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7001 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7004 case OPC2_32_RRR1_MADDS_Q_32_U
:
7005 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7006 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7009 case OPC2_32_RRR1_MADDS_Q_64_U
:
7012 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7013 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7014 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7017 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7018 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7019 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7020 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7022 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7025 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7026 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7027 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7028 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7030 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7031 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7032 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7033 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7035 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7038 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7039 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7040 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7041 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7043 case OPC2_32_RRR1_MADDR_H_64_UL
:
7045 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7046 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7048 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7050 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7051 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7053 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7054 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7055 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7056 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7058 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7059 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7060 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7061 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7063 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7064 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7065 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7066 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7068 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7069 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7070 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7071 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7074 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7078 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7081 uint32_t r1
, r2
, r3
, r4
, n
;
7083 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7084 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7085 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7086 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7087 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7088 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7091 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7094 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7095 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7097 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7100 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7101 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7103 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7106 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7107 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7109 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7112 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7113 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7115 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7118 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7119 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7122 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7125 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7126 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7129 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7132 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7133 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7136 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7139 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7140 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7143 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7146 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7147 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7150 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7153 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7154 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7157 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7160 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7161 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7164 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7167 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7168 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7171 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7174 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7175 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7178 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7181 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7182 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7185 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7188 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7189 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7192 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7195 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7196 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7199 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7200 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7201 cpu_gpr_d
[r2
], n
, MODE_LL
);
7203 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7204 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7205 cpu_gpr_d
[r2
], n
, MODE_LU
);
7207 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7208 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7209 cpu_gpr_d
[r2
], n
, MODE_UL
);
7211 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7212 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7213 cpu_gpr_d
[r2
], n
, MODE_UU
);
7215 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7216 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7217 cpu_gpr_d
[r2
], n
, MODE_LL
);
7219 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7220 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7221 cpu_gpr_d
[r2
], n
, MODE_LU
);
7223 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7224 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7225 cpu_gpr_d
[r2
], n
, MODE_UL
);
7227 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7228 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7229 cpu_gpr_d
[r2
], n
, MODE_UU
);
7232 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7236 static void decode_rrr1_msub(DisasContext
*ctx
)
7239 uint32_t r1
, r2
, r3
, r4
, n
;
7241 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7242 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7243 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7244 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7245 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7246 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7249 case OPC2_32_RRR1_MSUB_H_LL
:
7252 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7253 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7255 case OPC2_32_RRR1_MSUB_H_LU
:
7258 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7259 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7261 case OPC2_32_RRR1_MSUB_H_UL
:
7264 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7265 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7267 case OPC2_32_RRR1_MSUB_H_UU
:
7270 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7271 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7273 case OPC2_32_RRR1_MSUBS_H_LL
:
7276 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7277 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7279 case OPC2_32_RRR1_MSUBS_H_LU
:
7282 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7283 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7285 case OPC2_32_RRR1_MSUBS_H_UL
:
7288 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7289 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7291 case OPC2_32_RRR1_MSUBS_H_UU
:
7294 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7295 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7297 case OPC2_32_RRR1_MSUBM_H_LL
:
7300 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7301 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7303 case OPC2_32_RRR1_MSUBM_H_LU
:
7306 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7307 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7309 case OPC2_32_RRR1_MSUBM_H_UL
:
7312 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7313 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7315 case OPC2_32_RRR1_MSUBM_H_UU
:
7318 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7319 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7321 case OPC2_32_RRR1_MSUBMS_H_LL
:
7324 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7325 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7327 case OPC2_32_RRR1_MSUBMS_H_LU
:
7330 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7331 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7333 case OPC2_32_RRR1_MSUBMS_H_UL
:
7336 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7337 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7339 case OPC2_32_RRR1_MSUBMS_H_UU
:
7342 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7343 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7345 case OPC2_32_RRR1_MSUBR_H_LL
:
7346 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7347 cpu_gpr_d
[r2
], n
, MODE_LL
);
7349 case OPC2_32_RRR1_MSUBR_H_LU
:
7350 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7351 cpu_gpr_d
[r2
], n
, MODE_LU
);
7353 case OPC2_32_RRR1_MSUBR_H_UL
:
7354 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7355 cpu_gpr_d
[r2
], n
, MODE_UL
);
7357 case OPC2_32_RRR1_MSUBR_H_UU
:
7358 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7359 cpu_gpr_d
[r2
], n
, MODE_UU
);
7361 case OPC2_32_RRR1_MSUBRS_H_LL
:
7362 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7363 cpu_gpr_d
[r2
], n
, MODE_LL
);
7365 case OPC2_32_RRR1_MSUBRS_H_LU
:
7366 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7367 cpu_gpr_d
[r2
], n
, MODE_LU
);
7369 case OPC2_32_RRR1_MSUBRS_H_UL
:
7370 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7371 cpu_gpr_d
[r2
], n
, MODE_UL
);
7373 case OPC2_32_RRR1_MSUBRS_H_UU
:
7374 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7375 cpu_gpr_d
[r2
], n
, MODE_UU
);
7378 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7382 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7385 uint32_t r1
, r2
, r3
, r4
, n
;
7388 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7389 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7390 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7391 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7392 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7393 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7395 temp
= tcg_temp_new();
7396 temp2
= tcg_temp_new();
7399 case OPC2_32_RRR1_MSUB_Q_32
:
7400 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7401 cpu_gpr_d
[r2
], n
, 32);
7403 case OPC2_32_RRR1_MSUB_Q_64
:
7406 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7407 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7410 case OPC2_32_RRR1_MSUB_Q_32_L
:
7411 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7412 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7415 case OPC2_32_RRR1_MSUB_Q_64_L
:
7418 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7419 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7420 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7423 case OPC2_32_RRR1_MSUB_Q_32_U
:
7424 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7425 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7428 case OPC2_32_RRR1_MSUB_Q_64_U
:
7431 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7432 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7433 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7436 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7437 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7438 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7439 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7441 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7444 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7445 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7446 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7447 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7449 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7450 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7451 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7452 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7454 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7457 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7458 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7459 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7460 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7462 case OPC2_32_RRR1_MSUBS_Q_32
:
7463 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7464 cpu_gpr_d
[r2
], n
, 32);
7466 case OPC2_32_RRR1_MSUBS_Q_64
:
7469 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7470 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7473 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7474 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7475 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7478 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7481 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7482 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7483 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7486 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7487 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7488 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7491 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7494 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7495 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7496 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7499 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7500 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7501 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7502 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7504 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7507 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7508 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7509 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7510 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7512 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7513 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7514 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7515 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7517 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7520 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7521 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7522 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7523 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7525 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7527 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7528 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7530 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7532 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7533 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7535 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7536 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7537 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7538 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7540 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7541 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7542 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7543 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7545 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7546 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7547 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7548 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7550 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7551 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7552 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7553 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7556 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7560 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7563 uint32_t r1
, r2
, r3
, r4
, n
;
7565 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7566 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7567 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7568 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7569 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7570 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7573 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7576 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7577 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7579 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7582 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7583 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7585 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7588 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7589 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7591 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7594 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7595 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7597 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7600 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7601 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7604 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7607 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7608 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7611 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7614 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7615 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7618 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7621 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7622 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7625 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7628 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7629 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7632 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7635 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7636 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7639 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7642 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7643 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7646 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7649 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7650 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7653 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7656 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7657 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7660 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7663 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7664 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7667 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7670 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7671 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7674 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7677 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7678 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7681 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7682 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7683 cpu_gpr_d
[r2
], n
, MODE_LL
);
7685 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7686 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7687 cpu_gpr_d
[r2
], n
, MODE_LU
);
7689 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7690 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7691 cpu_gpr_d
[r2
], n
, MODE_UL
);
7693 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7694 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7695 cpu_gpr_d
[r2
], n
, MODE_UU
);
7697 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7698 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7699 cpu_gpr_d
[r2
], n
, MODE_LL
);
7701 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7702 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7703 cpu_gpr_d
[r2
], n
, MODE_LU
);
7705 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7706 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7707 cpu_gpr_d
[r2
], n
, MODE_UL
);
7709 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7710 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7711 cpu_gpr_d
[r2
], n
, MODE_UU
);
7714 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7719 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7723 TCGv tmp_width
, tmp_pos
;
7725 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7726 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7727 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7728 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7729 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7731 tmp_pos
= tcg_temp_new();
7732 tmp_width
= tcg_temp_new();
7735 case OPC2_32_RRRR_DEXTR
:
7736 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7738 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7740 TCGv msw
= tcg_temp_new();
7741 TCGv zero
= tcg_constant_tl(0);
7742 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7743 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7744 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7746 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7747 * behaviour. So check that case here and set the low bits to zero
7748 * which effectivly returns cpu_gpr_d[r1]
7750 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7751 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7754 case OPC2_32_RRRR_EXTR
:
7755 case OPC2_32_RRRR_EXTR_U
:
7757 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7758 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7759 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7760 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7761 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7762 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7763 if (op2
== OPC2_32_RRRR_EXTR
) {
7764 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7766 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7769 case OPC2_32_RRRR_INSERT
:
7771 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7772 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7773 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7777 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7782 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7790 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7791 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7792 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7793 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7794 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7795 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7797 temp
= tcg_temp_new();
7800 case OPC2_32_RRRW_EXTR
:
7801 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7802 tcg_gen_addi_tl(temp
, temp
, width
);
7803 tcg_gen_subfi_tl(temp
, 32, temp
);
7804 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7805 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7807 case OPC2_32_RRRW_EXTR_U
:
7809 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7811 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7812 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7813 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7816 case OPC2_32_RRRW_IMASK
:
7817 temp2
= tcg_temp_new();
7819 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7820 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7821 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7822 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7823 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7825 case OPC2_32_RRRW_INSERT
:
7826 temp2
= tcg_temp_new();
7828 tcg_gen_movi_tl(temp
, width
);
7829 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7830 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7833 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7838 static void decode_sys_interrupts(DisasContext
*ctx
)
7845 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7846 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7849 case OPC2_32_SYS_DEBUG
:
7850 /* raise EXCP_DEBUG */
7852 case OPC2_32_SYS_DISABLE
:
7853 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE_1_3
);
7855 case OPC2_32_SYS_DSYNC
:
7857 case OPC2_32_SYS_ENABLE
:
7858 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE_1_3
);
7860 case OPC2_32_SYS_ISYNC
:
7862 case OPC2_32_SYS_NOP
:
7864 case OPC2_32_SYS_RET
:
7865 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7867 case OPC2_32_SYS_FRET
:
7870 case OPC2_32_SYS_RFE
:
7871 gen_helper_rfe(cpu_env
);
7872 tcg_gen_exit_tb(NULL
, 0);
7873 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7875 case OPC2_32_SYS_RFM
:
7876 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7877 tmp
= tcg_temp_new();
7878 l1
= gen_new_label();
7880 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7881 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7882 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7883 gen_helper_rfm(cpu_env
);
7885 tcg_gen_exit_tb(NULL
, 0);
7886 ctx
->base
.is_jmp
= DISAS_NORETURN
;
7888 /* generate privilege trap */
7891 case OPC2_32_SYS_RSLCX
:
7892 gen_helper_rslcx(cpu_env
);
7894 case OPC2_32_SYS_SVLCX
:
7895 gen_helper_svlcx(cpu_env
);
7897 case OPC2_32_SYS_RESTORE
:
7898 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7899 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7900 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7901 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7902 } /* else raise privilege trap */
7904 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7907 case OPC2_32_SYS_TRAPSV
:
7908 l1
= gen_new_label();
7909 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7910 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7913 case OPC2_32_SYS_TRAPV
:
7914 l1
= gen_new_label();
7915 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7916 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7920 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7924 static void decode_32Bit_opc(DisasContext
*ctx
)
7928 int32_t address
, const16
;
7931 TCGv temp
, temp2
, temp3
;
7933 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7935 /* handle JNZ.T opcode only being 7 bit long */
7936 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7937 op1
= OPCM_32_BRN_JTT
;
7942 case OPCM_32_ABS_LDW
:
7943 decode_abs_ldw(ctx
);
7945 case OPCM_32_ABS_LDB
:
7946 decode_abs_ldb(ctx
);
7948 case OPCM_32_ABS_LDMST_SWAP
:
7949 decode_abs_ldst_swap(ctx
);
7951 case OPCM_32_ABS_LDST_CONTEXT
:
7952 decode_abs_ldst_context(ctx
);
7954 case OPCM_32_ABS_STORE
:
7955 decode_abs_store(ctx
);
7957 case OPCM_32_ABS_STOREB_H
:
7958 decode_abs_storeb_h(ctx
);
7960 case OPC1_32_ABS_STOREQ
:
7961 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7962 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7963 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7964 temp2
= tcg_temp_new();
7966 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7967 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7969 case OPC1_32_ABS_LD_Q
:
7970 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7971 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7972 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7974 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7975 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7977 case OPC1_32_ABS_LEA
:
7978 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7979 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7980 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7983 case OPC1_32_ABSB_ST_T
:
7984 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7985 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7986 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7988 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
7989 temp2
= tcg_temp_new();
7991 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7992 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7993 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7994 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7997 case OPC1_32_B_CALL
:
7998 case OPC1_32_B_CALLA
:
7999 case OPC1_32_B_FCALL
:
8000 case OPC1_32_B_FCALLA
:
8005 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8006 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8009 case OPCM_32_BIT_ANDACC
:
8010 decode_bit_andacc(ctx
);
8012 case OPCM_32_BIT_LOGICAL_T1
:
8013 decode_bit_logical_t(ctx
);
8015 case OPCM_32_BIT_INSERT
:
8016 decode_bit_insert(ctx
);
8018 case OPCM_32_BIT_LOGICAL_T2
:
8019 decode_bit_logical_t2(ctx
);
8021 case OPCM_32_BIT_ORAND
:
8022 decode_bit_orand(ctx
);
8024 case OPCM_32_BIT_SH_LOGIC1
:
8025 decode_bit_sh_logic1(ctx
);
8027 case OPCM_32_BIT_SH_LOGIC2
:
8028 decode_bit_sh_logic2(ctx
);
8031 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8032 decode_bo_addrmode_post_pre_base(ctx
);
8034 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8035 decode_bo_addrmode_bitreverse_circular(ctx
);
8037 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8038 decode_bo_addrmode_ld_post_pre_base(ctx
);
8040 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8041 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8043 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8044 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8046 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8047 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8050 case OPC1_32_BOL_LD_A_LONGOFF
:
8051 case OPC1_32_BOL_LD_W_LONGOFF
:
8052 case OPC1_32_BOL_LEA_LONGOFF
:
8053 case OPC1_32_BOL_ST_W_LONGOFF
:
8054 case OPC1_32_BOL_ST_A_LONGOFF
:
8055 case OPC1_32_BOL_LD_B_LONGOFF
:
8056 case OPC1_32_BOL_LD_BU_LONGOFF
:
8057 case OPC1_32_BOL_LD_H_LONGOFF
:
8058 case OPC1_32_BOL_LD_HU_LONGOFF
:
8059 case OPC1_32_BOL_ST_B_LONGOFF
:
8060 case OPC1_32_BOL_ST_H_LONGOFF
:
8061 decode_bol_opc(ctx
, op1
);
8064 case OPCM_32_BRC_EQ_NEQ
:
8065 case OPCM_32_BRC_GE
:
8066 case OPCM_32_BRC_JLT
:
8067 case OPCM_32_BRC_JNE
:
8068 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8069 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8070 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8071 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8074 case OPCM_32_BRN_JTT
:
8075 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8076 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8077 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8080 case OPCM_32_BRR_EQ_NEQ
:
8081 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8082 case OPCM_32_BRR_GE
:
8083 case OPCM_32_BRR_JLT
:
8084 case OPCM_32_BRR_JNE
:
8085 case OPCM_32_BRR_JNZ
:
8086 case OPCM_32_BRR_LOOP
:
8087 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8088 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8089 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8090 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8093 case OPCM_32_RC_LOGICAL_SHIFT
:
8094 decode_rc_logical_shift(ctx
);
8096 case OPCM_32_RC_ACCUMULATOR
:
8097 decode_rc_accumulator(ctx
);
8099 case OPCM_32_RC_SERVICEROUTINE
:
8100 decode_rc_serviceroutine(ctx
);
8102 case OPCM_32_RC_MUL
:
8106 case OPCM_32_RCPW_MASK_INSERT
:
8107 decode_rcpw_insert(ctx
);
8110 case OPC1_32_RCRR_INSERT
:
8111 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8112 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8113 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8114 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8115 temp
= tcg_constant_i32(const16
);
8116 temp2
= tcg_temp_new(); /* width*/
8117 temp3
= tcg_temp_new(); /* pos */
8121 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8122 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8124 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8127 case OPCM_32_RCRW_MASK_INSERT
:
8128 decode_rcrw_insert(ctx
);
8131 case OPCM_32_RCR_COND_SELECT
:
8132 decode_rcr_cond_select(ctx
);
8134 case OPCM_32_RCR_MADD
:
8135 decode_rcr_madd(ctx
);
8137 case OPCM_32_RCR_MSUB
:
8138 decode_rcr_msub(ctx
);
8141 case OPC1_32_RLC_ADDI
:
8142 case OPC1_32_RLC_ADDIH
:
8143 case OPC1_32_RLC_ADDIH_A
:
8144 case OPC1_32_RLC_MFCR
:
8145 case OPC1_32_RLC_MOV
:
8146 case OPC1_32_RLC_MOV_64
:
8147 case OPC1_32_RLC_MOV_U
:
8148 case OPC1_32_RLC_MOV_H
:
8149 case OPC1_32_RLC_MOVH_A
:
8150 case OPC1_32_RLC_MTCR
:
8151 decode_rlc_opc(ctx
, op1
);
8154 case OPCM_32_RR_ACCUMULATOR
:
8155 decode_rr_accumulator(ctx
);
8157 case OPCM_32_RR_LOGICAL_SHIFT
:
8158 decode_rr_logical_shift(ctx
);
8160 case OPCM_32_RR_ADDRESS
:
8161 decode_rr_address(ctx
);
8163 case OPCM_32_RR_IDIRECT
:
8164 decode_rr_idirect(ctx
);
8166 case OPCM_32_RR_DIVIDE
:
8167 decode_rr_divide(ctx
);
8170 case OPCM_32_RR1_MUL
:
8171 decode_rr1_mul(ctx
);
8173 case OPCM_32_RR1_MULQ
:
8174 decode_rr1_mulq(ctx
);
8177 case OPCM_32_RR2_MUL
:
8178 decode_rr2_mul(ctx
);
8181 case OPCM_32_RRPW_EXTRACT_INSERT
:
8182 decode_rrpw_extract_insert(ctx
);
8184 case OPC1_32_RRPW_DEXTR
:
8185 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8186 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8187 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8188 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8190 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8194 case OPCM_32_RRR_COND_SELECT
:
8195 decode_rrr_cond_select(ctx
);
8197 case OPCM_32_RRR_DIVIDE
:
8198 decode_rrr_divide(ctx
);
8201 case OPCM_32_RRR2_MADD
:
8202 decode_rrr2_madd(ctx
);
8204 case OPCM_32_RRR2_MSUB
:
8205 decode_rrr2_msub(ctx
);
8208 case OPCM_32_RRR1_MADD
:
8209 decode_rrr1_madd(ctx
);
8211 case OPCM_32_RRR1_MADDQ_H
:
8212 decode_rrr1_maddq_h(ctx
);
8214 case OPCM_32_RRR1_MADDSU_H
:
8215 decode_rrr1_maddsu_h(ctx
);
8217 case OPCM_32_RRR1_MSUB_H
:
8218 decode_rrr1_msub(ctx
);
8220 case OPCM_32_RRR1_MSUB_Q
:
8221 decode_rrr1_msubq_h(ctx
);
8223 case OPCM_32_RRR1_MSUBAD_H
:
8224 decode_rrr1_msubad_h(ctx
);
8227 case OPCM_32_RRRR_EXTRACT_INSERT
:
8228 decode_rrrr_extract_insert(ctx
);
8231 case OPCM_32_RRRW_EXTRACT_INSERT
:
8232 decode_rrrw_extract_insert(ctx
);
8235 case OPCM_32_SYS_INTERRUPTS
:
8236 decode_sys_interrupts(ctx
);
8238 case OPC1_32_SYS_RSTV
:
8239 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8240 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8241 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8242 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8245 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8249 static bool tricore_insn_is_16bit(uint32_t insn
)
8251 return (insn
& 0x1) == 0;
8254 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8257 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8258 CPUTriCoreState
*env
= cs
->env_ptr
;
8259 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8260 ctx
->hflags
= (uint32_t)ctx
->base
.tb
->flags
;
8261 ctx
->features
= env
->features
;
8264 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8268 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8270 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8272 tcg_gen_insn_start(ctx
->base
.pc_next
);
8275 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8278 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8279 * (False positives are OK, false negatives are not.)
8280 * Our caller ensures we are only called if dc->base.pc_next is less than
8281 * 4 bytes from the page boundary, so we cross the page if the first
8282 * 16 bits indicate that this is a 32 bit insn.
8284 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8286 return !tricore_insn_is_16bit(insn
);
8290 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8292 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8293 CPUTriCoreState
*env
= cpu
->env_ptr
;
8297 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8298 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8300 ctx
->opcode
= insn_lo
;
8301 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8302 decode_16Bit_opc(ctx
);
8304 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8305 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8306 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8307 decode_32Bit_opc(ctx
);
8309 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8311 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8312 target_ulong page_start
;
8314 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8315 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8316 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8317 && insn_crosses_page(env
, ctx
))) {
8318 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8323 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8325 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8327 switch (ctx
->base
.is_jmp
) {
8328 case DISAS_TOO_MANY
:
8329 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8331 case DISAS_NORETURN
:
8334 g_assert_not_reached();
8338 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8339 CPUState
*cpu
, FILE *logfile
)
8341 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8342 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8345 static const TranslatorOps tricore_tr_ops
= {
8346 .init_disas_context
= tricore_tr_init_disas_context
,
8347 .tb_start
= tricore_tr_tb_start
,
8348 .insn_start
= tricore_tr_insn_start
,
8349 .translate_insn
= tricore_tr_translate_insn
,
8350 .tb_stop
= tricore_tr_tb_stop
,
8351 .disas_log
= tricore_tr_disas_log
,
8355 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8356 target_ulong pc
, void *host_pc
)
8359 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8360 &tricore_tr_ops
, &ctx
.base
);
8369 void cpu_state_reset(CPUTriCoreState
*env
)
8371 /* Reset Regs to Default Value */
8376 static void tricore_tcg_init_csfr(void)
8378 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8379 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8380 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8381 offsetof(CPUTriCoreState
, PSW
), "PSW");
8382 cpu_PC
= tcg_global_mem_new(cpu_env
,
8383 offsetof(CPUTriCoreState
, PC
), "PC");
8384 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8385 offsetof(CPUTriCoreState
, ICR
), "ICR");
8388 void tricore_tcg_init(void)
8393 for (i
= 0 ; i
< 16 ; i
++) {
8394 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8395 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8398 for (i
= 0 ; i
< 16 ; i
++) {
8399 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8400 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8403 tricore_tcg_init_csfr();
8404 /* init PSW flag cache */
8405 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8406 offsetof(CPUTriCoreState
, PSW_USB_C
),
8408 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8409 offsetof(CPUTriCoreState
, PSW_USB_V
),
8411 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8412 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8414 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8415 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8417 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8418 offsetof(CPUTriCoreState
, PSW_USB_SAV
),