2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
40 #define DISAS_EXIT DISAS_TARGET_0
41 #define DISAS_EXIT_UPDATE DISAS_TARGET_1
42 #define DISAS_JUMP DISAS_TARGET_2
52 static TCGv cpu_gpr_a
[16];
53 static TCGv cpu_gpr_d
[16];
55 static TCGv cpu_PSW_C
;
56 static TCGv cpu_PSW_V
;
57 static TCGv cpu_PSW_SV
;
58 static TCGv cpu_PSW_AV
;
59 static TCGv cpu_PSW_SAV
;
61 static const char *regnames_a
[] = {
62 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
63 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
64 "a12" , "a13" , "a14" , "a15",
67 static const char *regnames_d
[] = {
68 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
69 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
70 "d12" , "d13" , "d14" , "d15",
73 typedef struct DisasContext
{
74 DisasContextBase base
;
75 target_ulong pc_succ_insn
;
77 /* Routine used to access memory */
81 uint32_t icr_ie_mask
, icr_ie_offset
;
84 static int has_feature(DisasContext
*ctx
, int feature
)
86 return (ctx
->features
& (1ULL << feature
)) != 0;
96 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
98 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
99 CPUTriCoreState
*env
= &cpu
->env
;
105 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
106 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
107 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
108 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
109 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
110 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
112 for (i
= 0; i
< 16; ++i
) {
114 qemu_fprintf(f
, "\nGPR A%02d:", i
);
116 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
118 for (i
= 0; i
< 16; ++i
) {
120 qemu_fprintf(f
, "\nGPR D%02d:", i
);
122 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
124 qemu_fprintf(f
, "\n");
128 * Functions to generate micro-ops
131 /* Macros for generating helpers */
133 #define gen_helper_1arg(name, arg) do { \
134 TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
135 gen_helper_##name(cpu_env, helper_tmp); \
138 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
139 TCGv arg00 = tcg_temp_new(); \
140 TCGv arg01 = tcg_temp_new(); \
141 TCGv arg11 = tcg_temp_new(); \
142 tcg_gen_sari_tl(arg00, arg0, 16); \
143 tcg_gen_ext16s_tl(arg01, arg0); \
144 tcg_gen_ext16s_tl(arg11, arg1); \
145 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
148 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
149 TCGv arg00 = tcg_temp_new(); \
150 TCGv arg01 = tcg_temp_new(); \
151 TCGv arg10 = tcg_temp_new(); \
152 TCGv arg11 = tcg_temp_new(); \
153 tcg_gen_sari_tl(arg00, arg0, 16); \
154 tcg_gen_ext16s_tl(arg01, arg0); \
155 tcg_gen_sari_tl(arg11, arg1, 16); \
156 tcg_gen_ext16s_tl(arg10, arg1); \
157 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
160 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
161 TCGv arg00 = tcg_temp_new(); \
162 TCGv arg01 = tcg_temp_new(); \
163 TCGv arg10 = tcg_temp_new(); \
164 TCGv arg11 = tcg_temp_new(); \
165 tcg_gen_sari_tl(arg00, arg0, 16); \
166 tcg_gen_ext16s_tl(arg01, arg0); \
167 tcg_gen_sari_tl(arg10, arg1, 16); \
168 tcg_gen_ext16s_tl(arg11, arg1); \
169 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
172 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
173 TCGv arg00 = tcg_temp_new(); \
174 TCGv arg01 = tcg_temp_new(); \
175 TCGv arg11 = tcg_temp_new(); \
176 tcg_gen_sari_tl(arg01, arg0, 16); \
177 tcg_gen_ext16s_tl(arg00, arg0); \
178 tcg_gen_sari_tl(arg11, arg1, 16); \
179 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
182 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
183 TCGv_i64 ret = tcg_temp_new_i64(); \
184 TCGv_i64 arg1 = tcg_temp_new_i64(); \
186 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
187 gen_helper_##name(ret, arg1, arg2); \
188 tcg_gen_extr_i64_i32(rl, rh, ret); \
191 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
192 TCGv_i64 ret = tcg_temp_new_i64(); \
194 gen_helper_##name(ret, cpu_env, arg1, arg2); \
195 tcg_gen_extr_i64_i32(rl, rh, ret); \
198 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
199 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
200 ((offset & 0x0fffff) << 1))
202 /* For two 32-bit registers used a 64-bit register, the first
203 registernumber needs to be even. Otherwise we trap. */
204 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
205 #define CHECK_REG_PAIR(reg) do { \
207 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
211 /* Functions for load/save to/from memory */
213 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
214 int16_t con
, MemOp mop
)
216 TCGv temp
= tcg_temp_new();
217 tcg_gen_addi_tl(temp
, r2
, con
);
218 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
221 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
222 int16_t con
, MemOp mop
)
224 TCGv temp
= tcg_temp_new();
225 tcg_gen_addi_tl(temp
, r2
, con
);
226 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
229 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
231 TCGv_i64 temp
= tcg_temp_new_i64();
233 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
234 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
237 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
240 TCGv temp
= tcg_temp_new();
241 tcg_gen_addi_tl(temp
, base
, con
);
242 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
245 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
247 TCGv_i64 temp
= tcg_temp_new_i64();
249 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
250 /* write back to two 32 bit regs */
251 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
254 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
257 TCGv temp
= tcg_temp_new();
258 tcg_gen_addi_tl(temp
, base
, con
);
259 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
262 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
265 TCGv temp
= tcg_temp_new();
266 tcg_gen_addi_tl(temp
, r2
, off
);
267 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
268 tcg_gen_mov_tl(r2
, temp
);
271 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
274 TCGv temp
= tcg_temp_new();
275 tcg_gen_addi_tl(temp
, r2
, off
);
276 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
277 tcg_gen_mov_tl(r2
, temp
);
280 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
281 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
283 TCGv temp
= tcg_temp_new();
284 TCGv temp2
= tcg_temp_new();
286 CHECK_REG_PAIR(ereg
);
287 /* temp = (M(EA, word) */
288 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
289 /* temp = temp & ~E[a][63:32]) */
290 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
291 /* temp2 = (E[a][31:0] & E[a][63:32]); */
292 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
293 /* temp = temp | temp2; */
294 tcg_gen_or_tl(temp
, temp
, temp2
);
295 /* M(EA, word) = temp; */
296 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
299 /* tmp = M(EA, word);
302 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
304 TCGv temp
= tcg_temp_new();
306 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
307 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
308 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
311 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
314 TCGv temp2
= tcg_temp_new();
316 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
318 cpu_gpr_d
[reg
], temp
);
319 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
320 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
323 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
325 TCGv temp
= tcg_temp_new();
326 TCGv temp2
= tcg_temp_new();
327 TCGv temp3
= tcg_temp_new();
329 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
330 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
331 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
332 tcg_gen_or_tl(temp2
, temp2
, temp3
);
333 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
334 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
337 /* We generate loads and store to core special function register (csfr) through
338 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
339 macros R, A and E, which allow read-only, all and endinit protected access.
340 These macros also specify in which ISA version the csfr was introduced. */
341 #define R(ADDRESS, REG, FEATURE) \
343 if (has_feature(ctx, FEATURE)) { \
344 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
347 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
348 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
349 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
351 /* since we're caching PSW make this a special case */
352 if (offset
== 0xfe04) {
353 gen_helper_psw_read(ret
, cpu_env
);
356 #include "csfr.h.inc"
364 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
365 since no exception occurs */
366 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
368 if (has_feature(ctx, FEATURE)) { \
369 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
372 /* Endinit protected registers
373 TODO: Since the endinit bit is in a register of a not yet implemented
374 watchdog device, we handle endinit protected registers like
375 all-access registers for now. */
376 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
377 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
380 if (ctx
->priv
== TRICORE_PRIV_SM
) {
381 /* since we're caching PSW make this a special case */
382 if (offset
== 0xfe04) {
383 gen_helper_psw_write(cpu_env
, r1
);
384 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
387 #include "csfr.h.inc"
391 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
395 /* Functions for arithmetic instructions */
397 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
399 TCGv t0
= tcg_temp_new_i32();
400 TCGv result
= tcg_temp_new_i32();
401 /* Addition and set V/SV bits */
402 tcg_gen_add_tl(result
, r1
, r2
);
404 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
405 tcg_gen_xor_tl(t0
, r1
, r2
);
406 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
408 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
409 /* Calc AV/SAV bits */
410 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
411 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
413 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
414 /* write back result */
415 tcg_gen_mov_tl(ret
, result
);
419 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
421 TCGv temp
= tcg_temp_new();
422 TCGv_i64 t0
= tcg_temp_new_i64();
423 TCGv_i64 t1
= tcg_temp_new_i64();
424 TCGv_i64 result
= tcg_temp_new_i64();
426 tcg_gen_add_i64(result
, r1
, r2
);
428 tcg_gen_xor_i64(t1
, result
, r1
);
429 tcg_gen_xor_i64(t0
, r1
, r2
);
430 tcg_gen_andc_i64(t1
, t1
, t0
);
431 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
433 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
434 /* calc AV/SAV bits */
435 tcg_gen_extrh_i64_i32(temp
, result
);
436 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
437 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
439 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
440 /* write back result */
441 tcg_gen_mov_i64(ret
, result
);
445 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
446 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
447 void(*op2
)(TCGv
, TCGv
, TCGv
))
449 TCGv temp
= tcg_temp_new();
450 TCGv temp2
= tcg_temp_new();
451 TCGv temp3
= tcg_temp_new();
452 TCGv temp4
= tcg_temp_new();
454 (*op1
)(temp
, r1_low
, r2
);
456 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
457 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
458 if (op1
== tcg_gen_add_tl
) {
459 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
461 tcg_gen_and_tl(temp2
, temp2
, temp3
);
464 (*op2
)(temp3
, r1_high
, r3
);
466 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
467 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
468 if (op2
== tcg_gen_add_tl
) {
469 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
471 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
473 /* combine V0/V1 bits */
474 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
476 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
478 tcg_gen_mov_tl(ret_low
, temp
);
479 tcg_gen_mov_tl(ret_high
, temp3
);
481 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
482 tcg_gen_xor_tl(temp
, temp
, ret_low
);
483 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
484 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
485 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
487 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
490 /* ret = r2 + (r1 * r3); */
491 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
493 TCGv_i64 t1
= tcg_temp_new_i64();
494 TCGv_i64 t2
= tcg_temp_new_i64();
495 TCGv_i64 t3
= tcg_temp_new_i64();
497 tcg_gen_ext_i32_i64(t1
, r1
);
498 tcg_gen_ext_i32_i64(t2
, r2
);
499 tcg_gen_ext_i32_i64(t3
, r3
);
501 tcg_gen_mul_i64(t1
, t1
, t3
);
502 tcg_gen_add_i64(t1
, t2
, t1
);
504 tcg_gen_extrl_i64_i32(ret
, t1
);
507 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
508 /* t1 < -0x80000000 */
509 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
510 tcg_gen_or_i64(t2
, t2
, t3
);
511 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
512 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
514 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
515 /* Calc AV/SAV bits */
516 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
517 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
519 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
522 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
524 TCGv temp
= tcg_constant_i32(con
);
525 gen_madd32_d(ret
, r1
, r2
, temp
);
529 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
532 TCGv t1
= tcg_temp_new();
533 TCGv t2
= tcg_temp_new();
534 TCGv t3
= tcg_temp_new();
535 TCGv t4
= tcg_temp_new();
537 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
538 /* only the add can overflow */
539 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
541 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
542 tcg_gen_xor_tl(t1
, r2_high
, t2
);
543 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
545 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
546 /* Calc AV/SAV bits */
547 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
548 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
550 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
551 /* write back the result */
552 tcg_gen_mov_tl(ret_low
, t3
);
553 tcg_gen_mov_tl(ret_high
, t4
);
557 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
560 TCGv_i64 t1
= tcg_temp_new_i64();
561 TCGv_i64 t2
= tcg_temp_new_i64();
562 TCGv_i64 t3
= tcg_temp_new_i64();
564 tcg_gen_extu_i32_i64(t1
, r1
);
565 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
566 tcg_gen_extu_i32_i64(t3
, r3
);
568 tcg_gen_mul_i64(t1
, t1
, t3
);
569 tcg_gen_add_i64(t2
, t2
, t1
);
570 /* write back result */
571 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
572 /* only the add overflows, if t2 < t1
574 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
575 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
576 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
578 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
579 /* Calc AV/SAV bits */
580 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
581 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
583 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
587 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
590 TCGv temp
= tcg_constant_i32(con
);
591 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
595 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
598 TCGv temp
= tcg_constant_i32(con
);
599 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
603 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
604 TCGv r3
, uint32_t n
, uint32_t mode
)
606 TCGv t_n
= tcg_constant_i32(n
);
607 TCGv temp
= tcg_temp_new();
608 TCGv temp2
= tcg_temp_new();
609 TCGv_i64 temp64
= tcg_temp_new_i64();
612 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
615 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
618 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
621 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
624 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
625 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
626 tcg_gen_add_tl
, tcg_gen_add_tl
);
630 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
631 TCGv r3
, uint32_t n
, uint32_t mode
)
633 TCGv t_n
= tcg_constant_i32(n
);
634 TCGv temp
= tcg_temp_new();
635 TCGv temp2
= tcg_temp_new();
636 TCGv_i64 temp64
= tcg_temp_new_i64();
639 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
642 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
645 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
648 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
651 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
652 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
653 tcg_gen_sub_tl
, tcg_gen_add_tl
);
657 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
658 TCGv r3
, uint32_t n
, uint32_t mode
)
660 TCGv t_n
= tcg_constant_i32(n
);
661 TCGv_i64 temp64
= tcg_temp_new_i64();
662 TCGv_i64 temp64_2
= tcg_temp_new_i64();
663 TCGv_i64 temp64_3
= tcg_temp_new_i64();
666 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
669 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
672 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
675 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
678 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
679 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
680 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
681 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
682 tcg_gen_shli_i64(temp64
, temp64
, 16);
684 gen_add64_d(temp64_2
, temp64_3
, temp64
);
685 /* write back result */
686 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
689 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
692 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
693 TCGv r3
, uint32_t n
, uint32_t mode
)
695 TCGv t_n
= tcg_constant_i32(n
);
696 TCGv temp
= tcg_temp_new();
697 TCGv temp2
= tcg_temp_new();
698 TCGv temp3
= tcg_temp_new();
699 TCGv_i64 temp64
= tcg_temp_new_i64();
703 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
706 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
709 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
712 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
715 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
716 gen_adds(ret_low
, r1_low
, temp
);
717 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
718 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
719 gen_adds(ret_high
, r1_high
, temp2
);
721 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
722 /* combine av bits */
723 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
726 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
729 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
730 TCGv r3
, uint32_t n
, uint32_t mode
)
732 TCGv t_n
= tcg_constant_i32(n
);
733 TCGv temp
= tcg_temp_new();
734 TCGv temp2
= tcg_temp_new();
735 TCGv temp3
= tcg_temp_new();
736 TCGv_i64 temp64
= tcg_temp_new_i64();
740 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
743 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
746 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
749 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
752 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
753 gen_subs(ret_low
, r1_low
, temp
);
754 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
755 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
756 gen_adds(ret_high
, r1_high
, temp2
);
758 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
759 /* combine av bits */
760 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
764 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
765 TCGv r3
, uint32_t n
, uint32_t mode
)
767 TCGv t_n
= tcg_constant_i32(n
);
768 TCGv_i64 temp64
= tcg_temp_new_i64();
769 TCGv_i64 temp64_2
= tcg_temp_new_i64();
773 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
776 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
779 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
782 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
785 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
786 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
787 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
788 tcg_gen_shli_i64(temp64
, temp64
, 16);
789 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
791 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
792 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
797 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
798 TCGv r3
, uint32_t n
, uint32_t mode
)
800 TCGv t_n
= tcg_constant_i32(n
);
801 TCGv_i64 temp64
= tcg_temp_new_i64();
802 TCGv_i64 temp64_2
= tcg_temp_new_i64();
803 TCGv_i64 temp64_3
= tcg_temp_new_i64();
806 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
809 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
812 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
815 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
818 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
819 gen_add64_d(temp64_3
, temp64_2
, temp64
);
820 /* write back result */
821 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
825 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
826 TCGv r3
, uint32_t n
, uint32_t mode
)
828 TCGv t_n
= tcg_constant_i32(n
);
829 TCGv_i64 temp64
= tcg_temp_new_i64();
830 TCGv_i64 temp64_2
= tcg_temp_new_i64();
833 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
836 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
839 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
842 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
845 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
846 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
847 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
851 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
854 TCGv t_n
= tcg_constant_i32(n
);
855 TCGv_i64 temp64
= tcg_temp_new_i64();
858 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
861 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
864 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
867 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
870 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
874 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
876 TCGv temp
= tcg_temp_new();
877 TCGv temp2
= tcg_temp_new();
879 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
880 tcg_gen_shli_tl(temp
, r1
, 16);
881 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
885 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
887 TCGv t_n
= tcg_constant_i32(n
);
888 TCGv temp
= tcg_temp_new();
889 TCGv temp2
= tcg_temp_new();
890 TCGv_i64 temp64
= tcg_temp_new_i64();
893 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
896 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
899 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
902 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
905 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
906 tcg_gen_shli_tl(temp
, r1
, 16);
907 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
912 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
913 uint32_t n
, uint32_t mode
)
915 TCGv t_n
= tcg_constant_i32(n
);
916 TCGv_i64 temp64
= tcg_temp_new_i64();
919 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
922 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
925 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
928 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
931 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
935 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
937 TCGv temp
= tcg_temp_new();
938 TCGv temp2
= tcg_temp_new();
940 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
941 tcg_gen_shli_tl(temp
, r1
, 16);
942 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
946 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
948 TCGv t_n
= tcg_constant_i32(n
);
949 TCGv temp
= tcg_temp_new();
950 TCGv temp2
= tcg_temp_new();
951 TCGv_i64 temp64
= tcg_temp_new_i64();
954 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
957 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
960 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
963 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
966 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
967 tcg_gen_shli_tl(temp
, r1
, 16);
968 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
972 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
974 TCGv t_n
= tcg_constant_i32(n
);
975 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
979 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
981 TCGv t_n
= tcg_constant_i32(n
);
982 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, t_n
);
986 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
989 TCGv temp
= tcg_temp_new();
990 TCGv temp2
= tcg_temp_new();
991 TCGv temp3
= tcg_temp_new();
992 TCGv_i64 t1
= tcg_temp_new_i64();
993 TCGv_i64 t2
= tcg_temp_new_i64();
994 TCGv_i64 t3
= tcg_temp_new_i64();
996 tcg_gen_ext_i32_i64(t2
, arg2
);
997 tcg_gen_ext_i32_i64(t3
, arg3
);
999 tcg_gen_mul_i64(t2
, t2
, t3
);
1000 tcg_gen_shli_i64(t2
, t2
, n
);
1002 tcg_gen_ext_i32_i64(t1
, arg1
);
1003 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1005 tcg_gen_add_i64(t3
, t1
, t2
);
1006 tcg_gen_extrl_i64_i32(temp3
, t3
);
1008 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1009 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1010 tcg_gen_or_i64(t1
, t1
, t2
);
1011 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1012 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1013 /* We produce an overflow on the host if the mul before was
1014 (0x80000000 * 0x80000000) << 1). If this is the
1015 case, we negate the ovf. */
1017 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1018 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1019 tcg_gen_and_tl(temp
, temp
, temp2
);
1020 tcg_gen_shli_tl(temp
, temp
, 31);
1021 /* negate v bit, if special condition */
1022 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1025 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1026 /* Calc AV/SAV bits */
1027 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1028 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1030 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1031 /* write back result */
1032 tcg_gen_mov_tl(ret
, temp3
);
1036 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1038 TCGv temp
= tcg_temp_new();
1039 TCGv temp2
= tcg_temp_new();
1041 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1042 } else { /* n is expected to be 1 */
1043 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1044 tcg_gen_shli_tl(temp
, temp
, 1);
1045 /* catch special case r1 = r2 = 0x8000 */
1046 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1047 tcg_gen_sub_tl(temp
, temp
, temp2
);
1049 gen_add_d(ret
, arg1
, temp
);
1053 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1055 TCGv temp
= tcg_temp_new();
1056 TCGv temp2
= tcg_temp_new();
1058 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1059 } else { /* n is expected to be 1 */
1060 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1061 tcg_gen_shli_tl(temp
, temp
, 1);
1062 /* catch special case r1 = r2 = 0x8000 */
1063 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1064 tcg_gen_sub_tl(temp
, temp
, temp2
);
1066 gen_adds(ret
, arg1
, temp
);
1070 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1071 TCGv arg3
, uint32_t n
)
1073 TCGv temp
= tcg_temp_new();
1074 TCGv temp2
= tcg_temp_new();
1075 TCGv_i64 t1
= tcg_temp_new_i64();
1076 TCGv_i64 t2
= tcg_temp_new_i64();
1077 TCGv_i64 t3
= tcg_temp_new_i64();
1080 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1081 } else { /* n is expected to be 1 */
1082 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1083 tcg_gen_shli_tl(temp
, temp
, 1);
1084 /* catch special case r1 = r2 = 0x8000 */
1085 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1086 tcg_gen_sub_tl(temp
, temp
, temp2
);
1088 tcg_gen_ext_i32_i64(t2
, temp
);
1089 tcg_gen_shli_i64(t2
, t2
, 16);
1090 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1091 gen_add64_d(t3
, t1
, t2
);
1092 /* write back result */
1093 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1097 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1098 TCGv arg3
, uint32_t n
)
1100 TCGv temp
= tcg_temp_new();
1101 TCGv temp2
= tcg_temp_new();
1102 TCGv_i64 t1
= tcg_temp_new_i64();
1103 TCGv_i64 t2
= tcg_temp_new_i64();
1106 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1107 } else { /* n is expected to be 1 */
1108 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1109 tcg_gen_shli_tl(temp
, temp
, 1);
1110 /* catch special case r1 = r2 = 0x8000 */
1111 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1112 tcg_gen_sub_tl(temp
, temp
, temp2
);
1114 tcg_gen_ext_i32_i64(t2
, temp
);
1115 tcg_gen_shli_i64(t2
, t2
, 16);
1116 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1118 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1119 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1123 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1124 TCGv arg3
, uint32_t n
)
1126 TCGv_i64 t1
= tcg_temp_new_i64();
1127 TCGv_i64 t2
= tcg_temp_new_i64();
1128 TCGv_i64 t3
= tcg_temp_new_i64();
1129 TCGv_i64 t4
= tcg_temp_new_i64();
1132 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1133 tcg_gen_ext_i32_i64(t2
, arg2
);
1134 tcg_gen_ext_i32_i64(t3
, arg3
);
1136 tcg_gen_mul_i64(t2
, t2
, t3
);
1138 tcg_gen_shli_i64(t2
, t2
, 1);
1140 tcg_gen_add_i64(t4
, t1
, t2
);
1142 tcg_gen_xor_i64(t3
, t4
, t1
);
1143 tcg_gen_xor_i64(t2
, t1
, t2
);
1144 tcg_gen_andc_i64(t3
, t3
, t2
);
1145 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1146 /* We produce an overflow on the host if the mul before was
1147 (0x80000000 * 0x80000000) << 1). If this is the
1148 case, we negate the ovf. */
1150 temp
= tcg_temp_new();
1151 temp2
= tcg_temp_new();
1152 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1153 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1154 tcg_gen_and_tl(temp
, temp
, temp2
);
1155 tcg_gen_shli_tl(temp
, temp
, 31);
1156 /* negate v bit, if special condition */
1157 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1159 /* write back result */
1160 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1162 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1163 /* Calc AV/SAV bits */
1164 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1165 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1167 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1171 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1174 TCGv_i64 t1
= tcg_temp_new_i64();
1175 TCGv_i64 t2
= tcg_temp_new_i64();
1176 TCGv_i64 t3
= tcg_temp_new_i64();
1178 tcg_gen_ext_i32_i64(t1
, arg1
);
1179 tcg_gen_ext_i32_i64(t2
, arg2
);
1180 tcg_gen_ext_i32_i64(t3
, arg3
);
1182 tcg_gen_mul_i64(t2
, t2
, t3
);
1183 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1185 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1189 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1190 TCGv arg3
, uint32_t n
)
1192 TCGv_i64 r1
= tcg_temp_new_i64();
1193 TCGv t_n
= tcg_constant_i32(n
);
1195 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1196 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1197 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1200 /* ret = r2 - (r1 * r3); */
1201 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1203 TCGv_i64 t1
= tcg_temp_new_i64();
1204 TCGv_i64 t2
= tcg_temp_new_i64();
1205 TCGv_i64 t3
= tcg_temp_new_i64();
1207 tcg_gen_ext_i32_i64(t1
, r1
);
1208 tcg_gen_ext_i32_i64(t2
, r2
);
1209 tcg_gen_ext_i32_i64(t3
, r3
);
1211 tcg_gen_mul_i64(t1
, t1
, t3
);
1212 tcg_gen_sub_i64(t1
, t2
, t1
);
1214 tcg_gen_extrl_i64_i32(ret
, t1
);
1217 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1218 /* result < -0x80000000 */
1219 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1220 tcg_gen_or_i64(t2
, t2
, t3
);
1221 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1222 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1225 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1226 /* Calc AV/SAV bits */
1227 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1228 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1230 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1233 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1235 TCGv temp
= tcg_constant_i32(con
);
1236 gen_msub32_d(ret
, r1
, r2
, temp
);
1240 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1243 TCGv t1
= tcg_temp_new();
1244 TCGv t2
= tcg_temp_new();
1245 TCGv t3
= tcg_temp_new();
1246 TCGv t4
= tcg_temp_new();
1248 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1249 /* only the sub can overflow */
1250 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1252 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1253 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1254 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1256 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1257 /* Calc AV/SAV bits */
1258 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1259 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1261 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1262 /* write back the result */
1263 tcg_gen_mov_tl(ret_low
, t3
);
1264 tcg_gen_mov_tl(ret_high
, t4
);
1268 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1271 TCGv temp
= tcg_constant_i32(con
);
1272 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1276 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1279 TCGv_i64 t1
= tcg_temp_new_i64();
1280 TCGv_i64 t2
= tcg_temp_new_i64();
1281 TCGv_i64 t3
= tcg_temp_new_i64();
1283 tcg_gen_extu_i32_i64(t1
, r1
);
1284 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1285 tcg_gen_extu_i32_i64(t3
, r3
);
1287 tcg_gen_mul_i64(t1
, t1
, t3
);
1288 tcg_gen_sub_i64(t3
, t2
, t1
);
1289 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1290 /* calc V bit, only the sub can overflow, if t1 > t2 */
1291 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1292 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1293 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1295 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1296 /* Calc AV/SAV bits */
1297 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1298 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1300 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1304 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1307 TCGv temp
= tcg_constant_i32(con
);
1308 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1311 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1313 TCGv temp
= tcg_constant_i32(r2
);
1314 gen_add_d(ret
, r1
, temp
);
1317 /* calculate the carry bit too */
1318 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1320 TCGv t0
= tcg_temp_new_i32();
1321 TCGv result
= tcg_temp_new_i32();
1323 tcg_gen_movi_tl(t0
, 0);
1324 /* Addition and set C/V/SV bits */
1325 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1327 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1328 tcg_gen_xor_tl(t0
, r1
, r2
);
1329 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1331 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1332 /* Calc AV/SAV bits */
1333 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1334 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1336 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1337 /* write back result */
1338 tcg_gen_mov_tl(ret
, result
);
1341 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1343 TCGv temp
= tcg_constant_i32(con
);
1344 gen_add_CC(ret
, r1
, temp
);
1347 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1349 TCGv carry
= tcg_temp_new_i32();
1350 TCGv t0
= tcg_temp_new_i32();
1351 TCGv result
= tcg_temp_new_i32();
1353 tcg_gen_movi_tl(t0
, 0);
1354 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1355 /* Addition, carry and set C/V/SV bits */
1356 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1357 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1359 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1360 tcg_gen_xor_tl(t0
, r1
, r2
);
1361 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1363 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1364 /* Calc AV/SAV bits */
1365 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1366 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1368 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1369 /* write back result */
1370 tcg_gen_mov_tl(ret
, result
);
1373 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1375 TCGv temp
= tcg_constant_i32(con
);
1376 gen_addc_CC(ret
, r1
, temp
);
1379 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1382 TCGv temp
= tcg_temp_new();
1383 TCGv temp2
= tcg_temp_new();
1384 TCGv result
= tcg_temp_new();
1385 TCGv mask
= tcg_temp_new();
1386 TCGv t0
= tcg_constant_i32(0);
1388 /* create mask for sticky bits */
1389 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1390 tcg_gen_shli_tl(mask
, mask
, 31);
1392 tcg_gen_add_tl(result
, r1
, r2
);
1394 tcg_gen_xor_tl(temp
, result
, r1
);
1395 tcg_gen_xor_tl(temp2
, r1
, r2
);
1396 tcg_gen_andc_tl(temp
, temp
, temp2
);
1397 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1399 tcg_gen_and_tl(temp
, temp
, mask
);
1400 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1402 tcg_gen_add_tl(temp
, result
, result
);
1403 tcg_gen_xor_tl(temp
, temp
, result
);
1404 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1406 tcg_gen_and_tl(temp
, temp
, mask
);
1407 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1408 /* write back result */
1409 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1412 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1415 TCGv temp
= tcg_constant_i32(r2
);
1416 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1419 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1421 TCGv temp
= tcg_temp_new_i32();
1422 TCGv result
= tcg_temp_new_i32();
1424 tcg_gen_sub_tl(result
, r1
, r2
);
1426 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1427 tcg_gen_xor_tl(temp
, r1
, r2
);
1428 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1430 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1432 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1433 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1435 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1436 /* write back result */
1437 tcg_gen_mov_tl(ret
, result
);
1441 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1443 TCGv temp
= tcg_temp_new();
1444 TCGv_i64 t0
= tcg_temp_new_i64();
1445 TCGv_i64 t1
= tcg_temp_new_i64();
1446 TCGv_i64 result
= tcg_temp_new_i64();
1448 tcg_gen_sub_i64(result
, r1
, r2
);
1450 tcg_gen_xor_i64(t1
, result
, r1
);
1451 tcg_gen_xor_i64(t0
, r1
, r2
);
1452 tcg_gen_and_i64(t1
, t1
, t0
);
1453 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1455 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1456 /* calc AV/SAV bits */
1457 tcg_gen_extrh_i64_i32(temp
, result
);
1458 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1459 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1461 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1462 /* write back result */
1463 tcg_gen_mov_i64(ret
, result
);
1466 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1468 TCGv result
= tcg_temp_new();
1469 TCGv temp
= tcg_temp_new();
1471 tcg_gen_sub_tl(result
, r1
, r2
);
1473 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1475 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1476 tcg_gen_xor_tl(temp
, r1
, r2
);
1477 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1479 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1481 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1482 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1484 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1485 /* write back result */
1486 tcg_gen_mov_tl(ret
, result
);
1489 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1491 TCGv temp
= tcg_temp_new();
1492 tcg_gen_not_tl(temp
, r2
);
1493 gen_addc_CC(ret
, r1
, temp
);
1496 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1499 TCGv temp
= tcg_temp_new();
1500 TCGv temp2
= tcg_temp_new();
1501 TCGv result
= tcg_temp_new();
1502 TCGv mask
= tcg_temp_new();
1503 TCGv t0
= tcg_constant_i32(0);
1505 /* create mask for sticky bits */
1506 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1507 tcg_gen_shli_tl(mask
, mask
, 31);
1509 tcg_gen_sub_tl(result
, r1
, r2
);
1511 tcg_gen_xor_tl(temp
, result
, r1
);
1512 tcg_gen_xor_tl(temp2
, r1
, r2
);
1513 tcg_gen_and_tl(temp
, temp
, temp2
);
1514 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1516 tcg_gen_and_tl(temp
, temp
, mask
);
1517 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1519 tcg_gen_add_tl(temp
, result
, result
);
1520 tcg_gen_xor_tl(temp
, temp
, result
);
1521 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1523 tcg_gen_and_tl(temp
, temp
, mask
);
1524 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1525 /* write back result */
1526 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1530 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1531 TCGv r3
, uint32_t n
, uint32_t mode
)
1533 TCGv t_n
= tcg_constant_i32(n
);
1534 TCGv temp
= tcg_temp_new();
1535 TCGv temp2
= tcg_temp_new();
1536 TCGv_i64 temp64
= tcg_temp_new_i64();
1539 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1542 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1545 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1548 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1551 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1552 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1553 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1557 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1558 TCGv r3
, uint32_t n
, uint32_t mode
)
1560 TCGv t_n
= tcg_constant_i32(n
);
1561 TCGv temp
= tcg_temp_new();
1562 TCGv temp2
= tcg_temp_new();
1563 TCGv temp3
= tcg_temp_new();
1564 TCGv_i64 temp64
= tcg_temp_new_i64();
1568 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1571 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1574 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1577 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1580 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1581 gen_subs(ret_low
, r1_low
, temp
);
1582 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1583 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1584 gen_subs(ret_high
, r1_high
, temp2
);
1585 /* combine v bits */
1586 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1587 /* combine av bits */
1588 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1592 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1593 TCGv r3
, uint32_t n
, uint32_t mode
)
1595 TCGv t_n
= tcg_constant_i32(n
);
1596 TCGv_i64 temp64
= tcg_temp_new_i64();
1597 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1598 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1601 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1604 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1607 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1610 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1613 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1614 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1615 /* write back result */
1616 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1620 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1621 TCGv r3
, uint32_t n
, uint32_t mode
)
1623 TCGv t_n
= tcg_constant_i32(n
);
1624 TCGv_i64 temp64
= tcg_temp_new_i64();
1625 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1628 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1631 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1634 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1637 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1640 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1641 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1642 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1646 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1649 TCGv t_n
= tcg_constant_i32(n
);
1650 TCGv_i64 temp64
= tcg_temp_new_i64();
1653 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1656 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1659 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1662 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1665 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1669 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1671 TCGv temp
= tcg_temp_new();
1672 TCGv temp2
= tcg_temp_new();
1674 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1675 tcg_gen_shli_tl(temp
, r1
, 16);
1676 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1680 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1681 uint32_t n
, uint32_t mode
)
1683 TCGv t_n
= tcg_constant_i32(n
);
1684 TCGv_i64 temp64
= tcg_temp_new_i64();
1687 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1690 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1693 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1696 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1699 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1703 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1705 TCGv temp
= tcg_temp_new();
1706 TCGv temp2
= tcg_temp_new();
1708 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1709 tcg_gen_shli_tl(temp
, r1
, 16);
1710 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1714 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1716 TCGv temp
= tcg_constant_i32(n
);
1717 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1721 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1723 TCGv temp
= tcg_constant_i32(n
);
1724 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1728 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1731 TCGv temp3
= tcg_temp_new();
1732 TCGv_i64 t1
= tcg_temp_new_i64();
1733 TCGv_i64 t2
= tcg_temp_new_i64();
1734 TCGv_i64 t3
= tcg_temp_new_i64();
1735 TCGv_i64 t4
= tcg_temp_new_i64();
1737 tcg_gen_ext_i32_i64(t2
, arg2
);
1738 tcg_gen_ext_i32_i64(t3
, arg3
);
1740 tcg_gen_mul_i64(t2
, t2
, t3
);
1742 tcg_gen_ext_i32_i64(t1
, arg1
);
1743 /* if we shift part of the fraction out, we need to round up */
1744 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1745 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1746 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1747 tcg_gen_add_i64(t2
, t2
, t4
);
1749 tcg_gen_sub_i64(t3
, t1
, t2
);
1750 tcg_gen_extrl_i64_i32(temp3
, t3
);
1752 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1753 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1754 tcg_gen_or_i64(t1
, t1
, t2
);
1755 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1756 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1758 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1759 /* Calc AV/SAV bits */
1760 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1761 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1763 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1764 /* write back result */
1765 tcg_gen_mov_tl(ret
, temp3
);
1769 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1771 TCGv temp
= tcg_temp_new();
1772 TCGv temp2
= tcg_temp_new();
1774 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1775 } else { /* n is expected to be 1 */
1776 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1777 tcg_gen_shli_tl(temp
, temp
, 1);
1778 /* catch special case r1 = r2 = 0x8000 */
1779 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1780 tcg_gen_sub_tl(temp
, temp
, temp2
);
1782 gen_sub_d(ret
, arg1
, temp
);
1786 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1788 TCGv temp
= tcg_temp_new();
1789 TCGv temp2
= tcg_temp_new();
1791 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1792 } else { /* n is expected to be 1 */
1793 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1794 tcg_gen_shli_tl(temp
, temp
, 1);
1795 /* catch special case r1 = r2 = 0x8000 */
1796 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1797 tcg_gen_sub_tl(temp
, temp
, temp2
);
1799 gen_subs(ret
, arg1
, temp
);
1803 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1804 TCGv arg3
, uint32_t n
)
1806 TCGv temp
= tcg_temp_new();
1807 TCGv temp2
= tcg_temp_new();
1808 TCGv_i64 t1
= tcg_temp_new_i64();
1809 TCGv_i64 t2
= tcg_temp_new_i64();
1810 TCGv_i64 t3
= tcg_temp_new_i64();
1813 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1814 } else { /* n is expected to be 1 */
1815 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1816 tcg_gen_shli_tl(temp
, temp
, 1);
1817 /* catch special case r1 = r2 = 0x8000 */
1818 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1819 tcg_gen_sub_tl(temp
, temp
, temp2
);
1821 tcg_gen_ext_i32_i64(t2
, temp
);
1822 tcg_gen_shli_i64(t2
, t2
, 16);
1823 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1824 gen_sub64_d(t3
, t1
, t2
);
1825 /* write back result */
1826 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1830 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1831 TCGv arg3
, uint32_t n
)
1833 TCGv temp
= tcg_temp_new();
1834 TCGv temp2
= tcg_temp_new();
1835 TCGv_i64 t1
= tcg_temp_new_i64();
1836 TCGv_i64 t2
= tcg_temp_new_i64();
1839 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1840 } else { /* n is expected to be 1 */
1841 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1842 tcg_gen_shli_tl(temp
, temp
, 1);
1843 /* catch special case r1 = r2 = 0x8000 */
1844 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1845 tcg_gen_sub_tl(temp
, temp
, temp2
);
1847 tcg_gen_ext_i32_i64(t2
, temp
);
1848 tcg_gen_shli_i64(t2
, t2
, 16);
1849 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1851 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
1852 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1856 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1857 TCGv arg3
, uint32_t n
)
1859 TCGv_i64 t1
= tcg_temp_new_i64();
1860 TCGv_i64 t2
= tcg_temp_new_i64();
1861 TCGv_i64 t3
= tcg_temp_new_i64();
1862 TCGv_i64 t4
= tcg_temp_new_i64();
1865 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1866 tcg_gen_ext_i32_i64(t2
, arg2
);
1867 tcg_gen_ext_i32_i64(t3
, arg3
);
1869 tcg_gen_mul_i64(t2
, t2
, t3
);
1871 tcg_gen_shli_i64(t2
, t2
, 1);
1873 tcg_gen_sub_i64(t4
, t1
, t2
);
1875 tcg_gen_xor_i64(t3
, t4
, t1
);
1876 tcg_gen_xor_i64(t2
, t1
, t2
);
1877 tcg_gen_and_i64(t3
, t3
, t2
);
1878 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1879 /* We produce an overflow on the host if the mul before was
1880 (0x80000000 * 0x80000000) << 1). If this is the
1881 case, we negate the ovf. */
1883 temp
= tcg_temp_new();
1884 temp2
= tcg_temp_new();
1885 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1886 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1887 tcg_gen_and_tl(temp
, temp
, temp2
);
1888 tcg_gen_shli_tl(temp
, temp
, 31);
1889 /* negate v bit, if special condition */
1890 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1892 /* write back result */
1893 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1895 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1896 /* Calc AV/SAV bits */
1897 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1898 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1900 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1904 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1907 TCGv_i64 t1
= tcg_temp_new_i64();
1908 TCGv_i64 t2
= tcg_temp_new_i64();
1909 TCGv_i64 t3
= tcg_temp_new_i64();
1910 TCGv_i64 t4
= tcg_temp_new_i64();
1912 tcg_gen_ext_i32_i64(t1
, arg1
);
1913 tcg_gen_ext_i32_i64(t2
, arg2
);
1914 tcg_gen_ext_i32_i64(t3
, arg3
);
1916 tcg_gen_mul_i64(t2
, t2
, t3
);
1917 /* if we shift part of the fraction out, we need to round up */
1918 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1919 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1920 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1921 tcg_gen_add_i64(t3
, t3
, t4
);
1923 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
1927 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1928 TCGv arg3
, uint32_t n
)
1930 TCGv_i64 r1
= tcg_temp_new_i64();
1931 TCGv t_n
= tcg_constant_i32(n
);
1933 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1934 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, t_n
);
1935 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1939 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1940 TCGv r3
, uint32_t n
, uint32_t mode
)
1942 TCGv t_n
= tcg_constant_i32(n
);
1943 TCGv temp
= tcg_temp_new();
1944 TCGv temp2
= tcg_temp_new();
1945 TCGv_i64 temp64
= tcg_temp_new_i64();
1948 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1951 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1954 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1957 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1960 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1961 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1962 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1966 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1967 TCGv r3
, uint32_t n
, uint32_t mode
)
1969 TCGv t_n
= tcg_constant_i32(n
);
1970 TCGv_i64 temp64
= tcg_temp_new_i64();
1971 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1972 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1975 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1978 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1981 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1984 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1987 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1988 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1989 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1990 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1991 tcg_gen_shli_i64(temp64
, temp64
, 16);
1993 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1994 /* write back result */
1995 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1999 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2001 TCGv t_n
= tcg_constant_i32(n
);
2002 TCGv temp
= tcg_temp_new();
2003 TCGv temp2
= tcg_temp_new();
2004 TCGv_i64 temp64
= tcg_temp_new_i64();
2007 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2010 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2013 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2016 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2019 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2020 tcg_gen_shli_tl(temp
, r1
, 16);
2021 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2025 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2026 TCGv r3
, uint32_t n
, uint32_t mode
)
2028 TCGv t_n
= tcg_constant_i32(n
);
2029 TCGv temp
= tcg_temp_new();
2030 TCGv temp2
= tcg_temp_new();
2031 TCGv temp3
= tcg_temp_new();
2032 TCGv_i64 temp64
= tcg_temp_new_i64();
2036 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2039 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2042 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2045 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2048 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2049 gen_adds(ret_low
, r1_low
, temp
);
2050 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2051 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2052 gen_subs(ret_high
, r1_high
, temp2
);
2053 /* combine v bits */
2054 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2055 /* combine av bits */
2056 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2060 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2061 TCGv r3
, uint32_t n
, uint32_t mode
)
2063 TCGv t_n
= tcg_constant_i32(n
);
2064 TCGv_i64 temp64
= tcg_temp_new_i64();
2065 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2069 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2072 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2075 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2078 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2081 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2082 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2083 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2084 tcg_gen_shli_i64(temp64
, temp64
, 16);
2085 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2087 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2088 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2092 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2094 TCGv t_n
= tcg_constant_i32(n
);
2095 TCGv temp
= tcg_temp_new();
2096 TCGv temp2
= tcg_temp_new();
2097 TCGv_i64 temp64
= tcg_temp_new_i64();
2100 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2103 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2106 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2109 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2112 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2113 tcg_gen_shli_tl(temp
, r1
, 16);
2114 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2117 static inline void gen_abs(TCGv ret
, TCGv r1
)
2119 tcg_gen_abs_tl(ret
, r1
);
2120 /* overflow can only happen, if r1 = 0x80000000 */
2121 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2122 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2124 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2126 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2127 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2129 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2132 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2134 TCGv temp
= tcg_temp_new_i32();
2135 TCGv result
= tcg_temp_new_i32();
2137 tcg_gen_sub_tl(result
, r1
, r2
);
2138 tcg_gen_sub_tl(temp
, r2
, r1
);
2139 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2142 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2143 tcg_gen_xor_tl(temp
, result
, r2
);
2144 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2145 tcg_gen_xor_tl(temp
, r1
, r2
);
2146 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2148 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2150 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2151 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2153 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2154 /* write back result */
2155 tcg_gen_mov_tl(ret
, result
);
2158 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2160 TCGv temp
= tcg_constant_i32(con
);
2161 gen_absdif(ret
, r1
, temp
);
2164 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2166 TCGv temp
= tcg_constant_i32(con
);
2167 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2170 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2172 TCGv high
= tcg_temp_new();
2173 TCGv low
= tcg_temp_new();
2175 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2176 tcg_gen_mov_tl(ret
, low
);
2178 tcg_gen_sari_tl(low
, low
, 31);
2179 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2180 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2182 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2184 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2185 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2187 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2190 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2192 TCGv temp
= tcg_constant_i32(con
);
2193 gen_mul_i32s(ret
, r1
, temp
);
2196 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2198 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2200 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2202 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2204 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2205 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2207 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2210 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2213 TCGv temp
= tcg_constant_i32(con
);
2214 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2217 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2219 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2221 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2223 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2225 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2226 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2228 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2231 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2234 TCGv temp
= tcg_constant_i32(con
);
2235 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2238 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2240 TCGv temp
= tcg_constant_i32(con
);
2241 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2244 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2246 TCGv temp
= tcg_constant_i32(con
);
2247 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2250 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2251 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2253 TCGv temp
= tcg_constant_i32(con
);
2254 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2257 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2259 TCGv temp
= tcg_constant_i32(con
);
2260 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2264 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2266 TCGv_i64 temp_64
= tcg_temp_new_i64();
2267 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2270 if (up_shift
== 32) {
2271 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2272 } else if (up_shift
== 16) {
2273 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2274 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2276 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2277 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2278 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2280 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2283 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2284 } else { /* n is expected to be 1 */
2285 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2286 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2288 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2290 if (up_shift
== 0) {
2291 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2293 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2295 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2296 /* overflow only occurs if r1 = r2 = 0x8000 */
2297 if (up_shift
== 0) {/* result is 64 bit */
2298 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2300 } else { /* result is 32 bit */
2301 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2304 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2305 /* calc sv overflow bit */
2306 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2308 /* calc av overflow bit */
2309 if (up_shift
== 0) {
2310 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2311 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2313 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2314 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2316 /* calc sav overflow bit */
2317 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2321 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2323 TCGv temp
= tcg_temp_new();
2325 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2326 } else { /* n is expected to be 1 */
2327 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2328 tcg_gen_shli_tl(ret
, ret
, 1);
2329 /* catch special case r1 = r2 = 0x8000 */
2330 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2331 tcg_gen_sub_tl(ret
, ret
, temp
);
2334 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2335 /* calc av overflow bit */
2336 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2337 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2338 /* calc sav overflow bit */
2339 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2342 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2344 TCGv temp
= tcg_temp_new();
2346 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2347 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2349 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2350 tcg_gen_shli_tl(ret
, ret
, 1);
2351 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2352 /* catch special case r1 = r2 = 0x8000 */
2353 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2354 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2355 tcg_gen_sub_tl(ret
, ret
, temp
);
2358 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2359 /* calc av overflow bit */
2360 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2361 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2362 /* calc sav overflow bit */
2363 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2364 /* cut halfword off */
2365 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2369 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2372 TCGv_i64 temp64
= tcg_temp_new_i64();
2373 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2374 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2375 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2379 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2382 TCGv temp
= tcg_constant_i32(con
);
2383 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2387 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2390 TCGv_i64 temp64
= tcg_temp_new_i64();
2391 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2392 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2393 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2397 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2400 TCGv temp
= tcg_constant_i32(con
);
2401 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2404 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2406 TCGv temp
= tcg_constant_i32(con
);
2407 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2410 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2412 TCGv temp
= tcg_constant_i32(con
);
2413 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2417 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2420 TCGv_i64 temp64
= tcg_temp_new_i64();
2421 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2422 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2423 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2427 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2430 TCGv temp
= tcg_constant_i32(con
);
2431 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2435 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2438 TCGv_i64 temp64
= tcg_temp_new_i64();
2439 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2440 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2441 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2445 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2448 TCGv temp
= tcg_constant_i32(con
);
2449 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2452 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2454 tcg_gen_smax_tl(ret
, arg
, tcg_constant_i32(low
));
2455 tcg_gen_smin_tl(ret
, ret
, tcg_constant_i32(up
));
2458 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2460 tcg_gen_umin_tl(ret
, arg
, tcg_constant_i32(up
));
2463 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2465 if (shift_count
== -32) {
2466 tcg_gen_movi_tl(ret
, 0);
2467 } else if (shift_count
>= 0) {
2468 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2470 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2474 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2476 TCGv temp_low
, temp_high
;
2478 if (shiftcount
== -16) {
2479 tcg_gen_movi_tl(ret
, 0);
2481 temp_high
= tcg_temp_new();
2482 temp_low
= tcg_temp_new();
2484 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2485 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2486 gen_shi(temp_low
, temp_low
, shiftcount
);
2487 gen_shi(ret
, temp_high
, shiftcount
);
2488 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2492 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2494 uint32_t msk
, msk_start
;
2495 TCGv temp
= tcg_temp_new();
2496 TCGv temp2
= tcg_temp_new();
2498 if (shift_count
== 0) {
2499 /* Clear PSW.C and PSW.V */
2500 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2501 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2502 tcg_gen_mov_tl(ret
, r1
);
2503 } else if (shift_count
== -32) {
2505 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2506 /* fill ret completely with sign bit */
2507 tcg_gen_sari_tl(ret
, r1
, 31);
2509 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2510 } else if (shift_count
> 0) {
2511 TCGv t_max
= tcg_constant_i32(0x7FFFFFFF >> shift_count
);
2512 TCGv t_min
= tcg_constant_i32(((int32_t) -0x80000000) >> shift_count
);
2515 msk_start
= 32 - shift_count
;
2516 msk
= ((1 << shift_count
) - 1) << msk_start
;
2517 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2518 /* calc v/sv bits */
2519 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2520 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2521 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2522 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2524 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2526 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2529 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2531 msk
= (1 << -shift_count
) - 1;
2532 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2534 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2536 /* calc av overflow bit */
2537 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2538 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2539 /* calc sav overflow bit */
2540 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2543 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2545 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2548 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2550 TCGv temp
= tcg_constant_i32(con
);
2551 gen_shas(ret
, r1
, temp
);
2554 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2558 if (shift_count
== 0) {
2559 tcg_gen_mov_tl(ret
, r1
);
2560 } else if (shift_count
> 0) {
2561 low
= tcg_temp_new();
2562 high
= tcg_temp_new();
2564 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2565 tcg_gen_shli_tl(low
, r1
, shift_count
);
2566 tcg_gen_shli_tl(ret
, high
, shift_count
);
2567 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2569 low
= tcg_temp_new();
2570 high
= tcg_temp_new();
2572 tcg_gen_ext16s_tl(low
, r1
);
2573 tcg_gen_sari_tl(low
, low
, -shift_count
);
2574 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2575 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2579 /* ret = {ret[30:0], (r1 cond r2)}; */
2580 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2582 TCGv temp
= tcg_temp_new();
2583 TCGv temp2
= tcg_temp_new();
2585 tcg_gen_shli_tl(temp
, ret
, 1);
2586 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2587 tcg_gen_or_tl(ret
, temp
, temp2
);
2590 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2592 TCGv temp
= tcg_constant_i32(con
);
2593 gen_sh_cond(cond
, ret
, r1
, temp
);
2596 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2598 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2601 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2603 TCGv temp
= tcg_constant_i32(con
);
2604 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2607 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2609 TCGv temp
= tcg_constant_i32(con
);
2610 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2613 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2615 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2618 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2620 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2623 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2625 void(*op1
)(TCGv
, TCGv
, TCGv
),
2626 void(*op2
)(TCGv
, TCGv
, TCGv
))
2630 temp1
= tcg_temp_new();
2631 temp2
= tcg_temp_new();
2633 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2634 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2636 (*op1
)(temp1
, temp1
, temp2
);
2637 (*op2
)(temp1
, ret
, temp1
);
2639 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2642 /* ret = r1[pos1] op1 r2[pos2]; */
2643 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2645 void(*op1
)(TCGv
, TCGv
, TCGv
))
2649 temp1
= tcg_temp_new();
2650 temp2
= tcg_temp_new();
2652 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2653 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2655 (*op1
)(ret
, temp1
, temp2
);
2657 tcg_gen_andi_tl(ret
, ret
, 0x1);
2660 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2661 void(*op
)(TCGv
, TCGv
, TCGv
))
2663 TCGv temp
= tcg_temp_new();
2664 TCGv temp2
= tcg_temp_new();
2665 /* temp = (arg1 cond arg2 )*/
2666 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2668 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2669 /* temp = temp insn temp2 */
2670 (*op
)(temp
, temp
, temp2
);
2671 /* ret = {ret[31:1], temp} */
2672 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2676 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2677 void(*op
)(TCGv
, TCGv
, TCGv
))
2679 TCGv temp
= tcg_constant_i32(con
);
2680 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2683 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2684 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2686 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2687 tcg_gen_neg_tl(ret
, ret
);
2690 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2692 TCGv b0
= tcg_temp_new();
2693 TCGv b1
= tcg_temp_new();
2694 TCGv b2
= tcg_temp_new();
2695 TCGv b3
= tcg_temp_new();
2698 tcg_gen_andi_tl(b0
, r1
, 0xff);
2699 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2702 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2703 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2706 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2707 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2710 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2711 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2714 tcg_gen_or_tl(ret
, b0
, b1
);
2715 tcg_gen_or_tl(ret
, ret
, b2
);
2716 tcg_gen_or_tl(ret
, ret
, b3
);
2719 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2721 TCGv h0
= tcg_temp_new();
2722 TCGv h1
= tcg_temp_new();
2725 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2726 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2729 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2730 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2733 tcg_gen_or_tl(ret
, h0
, h1
);
2736 /* mask = ((1 << width) -1) << pos;
2737 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2738 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2740 TCGv mask
= tcg_temp_new();
2741 TCGv temp
= tcg_temp_new();
2742 TCGv temp2
= tcg_temp_new();
2744 tcg_gen_movi_tl(mask
, 1);
2745 tcg_gen_shl_tl(mask
, mask
, width
);
2746 tcg_gen_subi_tl(mask
, mask
, 1);
2747 tcg_gen_shl_tl(mask
, mask
, pos
);
2749 tcg_gen_shl_tl(temp
, r2
, pos
);
2750 tcg_gen_and_tl(temp
, temp
, mask
);
2751 tcg_gen_andc_tl(temp2
, r1
, mask
);
2752 tcg_gen_or_tl(ret
, temp
, temp2
);
2755 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2757 TCGv_i64 temp
= tcg_temp_new_i64();
2759 gen_helper_bsplit(temp
, r1
);
2760 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2763 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2765 TCGv_i64 temp
= tcg_temp_new_i64();
2767 gen_helper_unpack(temp
, r1
);
2768 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2772 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2774 TCGv_i64 ret
= tcg_temp_new_i64();
2776 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2777 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2779 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2781 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2785 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2787 TCGv_i64 ret
= tcg_temp_new_i64();
2789 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2790 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2792 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2794 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2797 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2799 TCGv temp
= tcg_temp_new();
2801 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2802 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2803 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2804 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2805 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2807 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2808 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2811 static void gen_calc_usb_mulr_h(TCGv arg
)
2813 TCGv temp
= tcg_temp_new();
2815 tcg_gen_add_tl(temp
, arg
, arg
);
2816 tcg_gen_xor_tl(temp
, temp
, arg
);
2817 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2818 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2820 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2822 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2825 /* helpers for generating program flow micro-ops */
2827 static inline void gen_save_pc(target_ulong pc
)
2829 tcg_gen_movi_tl(cpu_PC
, pc
);
2832 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2834 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2837 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2840 tcg_gen_lookup_and_goto_ptr();
2842 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2845 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2847 TCGv_i32 classtemp
= tcg_constant_i32(class);
2848 TCGv_i32 tintemp
= tcg_constant_i32(tin
);
2850 gen_save_pc(ctx
->base
.pc_next
);
2851 gen_helper_raise_exception_sync(cpu_env
, classtemp
, tintemp
);
2852 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2855 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2856 TCGv r2
, int16_t address
)
2858 TCGLabel
*jumpLabel
= gen_new_label();
2859 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2861 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2863 gen_set_label(jumpLabel
);
2864 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2867 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2868 int r2
, int16_t address
)
2870 TCGv temp
= tcg_constant_i32(r2
);
2871 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2874 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2876 TCGLabel
*l1
= gen_new_label();
2878 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2879 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2880 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2882 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2885 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2887 TCGv temp
= tcg_temp_new();
2889 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2890 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2891 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2892 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2895 static void gen_fret(DisasContext
*ctx
)
2897 TCGv temp
= tcg_temp_new();
2899 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2900 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2901 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2902 tcg_gen_mov_tl(cpu_PC
, temp
);
2903 ctx
->base
.is_jmp
= DISAS_EXIT
;
2906 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2907 int r2
, int32_t constant
, int32_t offset
)
2913 /* SB-format jumps */
2916 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2918 case OPC1_32_B_CALL
:
2919 case OPC1_16_SB_CALL
:
2920 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2921 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2924 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2926 case OPC1_16_SB_JNZ
:
2927 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2929 /* SBC-format jumps */
2930 case OPC1_16_SBC_JEQ
:
2931 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2933 case OPC1_16_SBC_JEQ2
:
2934 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2937 case OPC1_16_SBC_JNE
:
2938 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2940 case OPC1_16_SBC_JNE2
:
2941 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2942 constant
, offset
+ 16);
2944 /* SBRN-format jumps */
2945 case OPC1_16_SBRN_JZ_T
:
2946 temp
= tcg_temp_new();
2947 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2948 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2950 case OPC1_16_SBRN_JNZ_T
:
2951 temp
= tcg_temp_new();
2952 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2953 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2955 /* SBR-format jumps */
2956 case OPC1_16_SBR_JEQ
:
2957 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2960 case OPC1_16_SBR_JEQ2
:
2961 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2964 case OPC1_16_SBR_JNE
:
2965 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2968 case OPC1_16_SBR_JNE2
:
2969 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2972 case OPC1_16_SBR_JNZ
:
2973 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2975 case OPC1_16_SBR_JNZ_A
:
2976 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2978 case OPC1_16_SBR_JGEZ
:
2979 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2981 case OPC1_16_SBR_JGTZ
:
2982 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2984 case OPC1_16_SBR_JLEZ
:
2985 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2987 case OPC1_16_SBR_JLTZ
:
2988 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2990 case OPC1_16_SBR_JZ
:
2991 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2993 case OPC1_16_SBR_JZ_A
:
2994 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2996 case OPC1_16_SBR_LOOP
:
2997 gen_loop(ctx
, r1
, offset
* 2 - 32);
2999 /* SR-format jumps */
3001 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3002 ctx
->base
.is_jmp
= DISAS_EXIT
;
3004 case OPC2_32_SYS_RET
:
3005 case OPC2_16_SR_RET
:
3006 gen_helper_ret(cpu_env
);
3007 ctx
->base
.is_jmp
= DISAS_EXIT
;
3010 case OPC1_32_B_CALLA
:
3011 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3012 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3014 case OPC1_32_B_FCALL
:
3015 gen_fcall_save_ctx(ctx
);
3016 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3018 case OPC1_32_B_FCALLA
:
3019 gen_fcall_save_ctx(ctx
);
3020 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3023 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3026 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3029 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3030 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3033 case OPCM_32_BRC_EQ_NEQ
:
3034 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3035 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3037 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3040 case OPCM_32_BRC_GE
:
3041 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3042 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3044 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3045 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3049 case OPCM_32_BRC_JLT
:
3050 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3051 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3053 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3054 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3058 case OPCM_32_BRC_JNE
:
3059 temp
= tcg_temp_new();
3060 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3061 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3062 /* subi is unconditional */
3063 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3064 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3066 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3067 /* addi is unconditional */
3068 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3069 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3073 case OPCM_32_BRN_JTT
:
3074 n
= MASK_OP_BRN_N(ctx
->opcode
);
3076 temp
= tcg_temp_new();
3077 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3079 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3080 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3082 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3086 case OPCM_32_BRR_EQ_NEQ
:
3087 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3088 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3091 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3095 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3096 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3097 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3100 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3104 case OPCM_32_BRR_GE
:
3105 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3106 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3109 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3113 case OPCM_32_BRR_JLT
:
3114 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3115 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3118 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3122 case OPCM_32_BRR_LOOP
:
3123 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3124 gen_loop(ctx
, r2
, offset
* 2);
3126 /* OPC2_32_BRR_LOOPU */
3127 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3130 case OPCM_32_BRR_JNE
:
3131 temp
= tcg_temp_new();
3132 temp2
= tcg_temp_new();
3133 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3134 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3135 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3136 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3137 /* subi is unconditional */
3138 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3139 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3141 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3142 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3143 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3144 /* addi is unconditional */
3145 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3146 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3149 case OPCM_32_BRR_JNZ
:
3150 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3151 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3153 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3157 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3163 * Functions for decoding instructions
3166 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3172 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3173 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3176 case OPC1_16_SRC_ADD
:
3177 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3179 case OPC1_16_SRC_ADD_A15
:
3180 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3182 case OPC1_16_SRC_ADD_15A
:
3183 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3185 case OPC1_16_SRC_ADD_A
:
3186 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3188 case OPC1_16_SRC_CADD
:
3189 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3192 case OPC1_16_SRC_CADDN
:
3193 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3196 case OPC1_16_SRC_CMOV
:
3197 temp
= tcg_constant_tl(0);
3198 temp2
= tcg_constant_tl(const4
);
3199 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3200 temp2
, cpu_gpr_d
[r1
]);
3202 case OPC1_16_SRC_CMOVN
:
3203 temp
= tcg_constant_tl(0);
3204 temp2
= tcg_constant_tl(const4
);
3205 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3206 temp2
, cpu_gpr_d
[r1
]);
3208 case OPC1_16_SRC_EQ
:
3209 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3212 case OPC1_16_SRC_LT
:
3213 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3216 case OPC1_16_SRC_MOV
:
3217 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3219 case OPC1_16_SRC_MOV_A
:
3220 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3221 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3223 case OPC1_16_SRC_MOV_E
:
3224 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3226 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3227 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3229 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3232 case OPC1_16_SRC_SH
:
3233 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3235 case OPC1_16_SRC_SHA
:
3236 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3239 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3243 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3248 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3249 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3252 case OPC1_16_SRR_ADD
:
3253 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3255 case OPC1_16_SRR_ADD_A15
:
3256 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3258 case OPC1_16_SRR_ADD_15A
:
3259 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3261 case OPC1_16_SRR_ADD_A
:
3262 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3264 case OPC1_16_SRR_ADDS
:
3265 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3267 case OPC1_16_SRR_AND
:
3268 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3270 case OPC1_16_SRR_CMOV
:
3271 temp
= tcg_constant_tl(0);
3272 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3273 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3275 case OPC1_16_SRR_CMOVN
:
3276 temp
= tcg_constant_tl(0);
3277 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3278 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3280 case OPC1_16_SRR_EQ
:
3281 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3284 case OPC1_16_SRR_LT
:
3285 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3288 case OPC1_16_SRR_MOV
:
3289 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3291 case OPC1_16_SRR_MOV_A
:
3292 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3294 case OPC1_16_SRR_MOV_AA
:
3295 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3297 case OPC1_16_SRR_MOV_D
:
3298 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3300 case OPC1_16_SRR_MUL
:
3301 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3303 case OPC1_16_SRR_OR
:
3304 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3306 case OPC1_16_SRR_SUB
:
3307 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3309 case OPC1_16_SRR_SUB_A15B
:
3310 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3312 case OPC1_16_SRR_SUB_15AB
:
3313 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3315 case OPC1_16_SRR_SUBS
:
3316 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3318 case OPC1_16_SRR_XOR
:
3319 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3322 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3326 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3330 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3331 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3334 case OPC1_16_SSR_ST_A
:
3335 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3337 case OPC1_16_SSR_ST_A_POSTINC
:
3338 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3339 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3341 case OPC1_16_SSR_ST_B
:
3342 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3344 case OPC1_16_SSR_ST_B_POSTINC
:
3345 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3346 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3348 case OPC1_16_SSR_ST_H
:
3349 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3351 case OPC1_16_SSR_ST_H_POSTINC
:
3352 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3353 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3355 case OPC1_16_SSR_ST_W
:
3356 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3358 case OPC1_16_SSR_ST_W_POSTINC
:
3359 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3360 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3363 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3367 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3371 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3374 case OPC1_16_SC_AND
:
3375 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3377 case OPC1_16_SC_BISR
:
3378 if (ctx
->priv
== TRICORE_PRIV_SM
) {
3379 gen_helper_1arg(bisr
, const16
& 0xff);
3381 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
3384 case OPC1_16_SC_LD_A
:
3385 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3387 case OPC1_16_SC_LD_W
:
3388 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3390 case OPC1_16_SC_MOV
:
3391 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3394 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3396 case OPC1_16_SC_ST_A
:
3397 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3399 case OPC1_16_SC_ST_W
:
3400 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3402 case OPC1_16_SC_SUB_A
:
3403 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3406 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3410 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3414 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3415 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3419 case OPC1_16_SLR_LD_A
:
3420 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3422 case OPC1_16_SLR_LD_A_POSTINC
:
3423 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3424 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3426 case OPC1_16_SLR_LD_BU
:
3427 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3429 case OPC1_16_SLR_LD_BU_POSTINC
:
3430 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3431 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3433 case OPC1_16_SLR_LD_H
:
3434 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3436 case OPC1_16_SLR_LD_H_POSTINC
:
3437 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3438 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3440 case OPC1_16_SLR_LD_W
:
3441 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3443 case OPC1_16_SLR_LD_W_POSTINC
:
3444 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3445 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3448 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3452 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3457 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3458 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3462 case OPC1_16_SRO_LD_A
:
3463 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3465 case OPC1_16_SRO_LD_BU
:
3466 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3468 case OPC1_16_SRO_LD_H
:
3469 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3471 case OPC1_16_SRO_LD_W
:
3472 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3474 case OPC1_16_SRO_ST_A
:
3475 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3477 case OPC1_16_SRO_ST_B
:
3478 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3480 case OPC1_16_SRO_ST_H
:
3481 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3483 case OPC1_16_SRO_ST_W
:
3484 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3487 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3491 static void decode_sr_system(DisasContext
*ctx
)
3494 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3497 case OPC2_16_SR_NOP
:
3499 case OPC2_16_SR_RET
:
3500 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3502 case OPC2_16_SR_RFE
:
3503 gen_helper_rfe(cpu_env
);
3504 ctx
->base
.is_jmp
= DISAS_EXIT
;
3506 case OPC2_16_SR_DEBUG
:
3507 /* raise EXCP_DEBUG */
3509 case OPC2_16_SR_FRET
:
3513 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3517 static void decode_sr_accu(DisasContext
*ctx
)
3522 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3523 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3526 case OPC2_16_SR_RSUB
:
3527 /* calc V bit -- overflow only if r1 = -0x80000000 */
3528 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3529 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3531 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3533 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3535 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3536 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3538 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3540 case OPC2_16_SR_SAT_B
:
3541 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3543 case OPC2_16_SR_SAT_BU
:
3544 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3546 case OPC2_16_SR_SAT_H
:
3547 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3549 case OPC2_16_SR_SAT_HU
:
3550 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3553 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3557 static void decode_16Bit_opc(DisasContext
*ctx
)
3565 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3567 /* handle ADDSC.A opcode only being 6 bit long */
3568 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3569 op1
= OPC1_16_SRRS_ADDSC_A
;
3573 case OPC1_16_SRC_ADD
:
3574 case OPC1_16_SRC_ADD_A15
:
3575 case OPC1_16_SRC_ADD_15A
:
3576 case OPC1_16_SRC_ADD_A
:
3577 case OPC1_16_SRC_CADD
:
3578 case OPC1_16_SRC_CADDN
:
3579 case OPC1_16_SRC_CMOV
:
3580 case OPC1_16_SRC_CMOVN
:
3581 case OPC1_16_SRC_EQ
:
3582 case OPC1_16_SRC_LT
:
3583 case OPC1_16_SRC_MOV
:
3584 case OPC1_16_SRC_MOV_A
:
3585 case OPC1_16_SRC_MOV_E
:
3586 case OPC1_16_SRC_SH
:
3587 case OPC1_16_SRC_SHA
:
3588 decode_src_opc(ctx
, op1
);
3591 case OPC1_16_SRR_ADD
:
3592 case OPC1_16_SRR_ADD_A15
:
3593 case OPC1_16_SRR_ADD_15A
:
3594 case OPC1_16_SRR_ADD_A
:
3595 case OPC1_16_SRR_ADDS
:
3596 case OPC1_16_SRR_AND
:
3597 case OPC1_16_SRR_CMOV
:
3598 case OPC1_16_SRR_CMOVN
:
3599 case OPC1_16_SRR_EQ
:
3600 case OPC1_16_SRR_LT
:
3601 case OPC1_16_SRR_MOV
:
3602 case OPC1_16_SRR_MOV_A
:
3603 case OPC1_16_SRR_MOV_AA
:
3604 case OPC1_16_SRR_MOV_D
:
3605 case OPC1_16_SRR_MUL
:
3606 case OPC1_16_SRR_OR
:
3607 case OPC1_16_SRR_SUB
:
3608 case OPC1_16_SRR_SUB_A15B
:
3609 case OPC1_16_SRR_SUB_15AB
:
3610 case OPC1_16_SRR_SUBS
:
3611 case OPC1_16_SRR_XOR
:
3612 decode_srr_opc(ctx
, op1
);
3615 case OPC1_16_SSR_ST_A
:
3616 case OPC1_16_SSR_ST_A_POSTINC
:
3617 case OPC1_16_SSR_ST_B
:
3618 case OPC1_16_SSR_ST_B_POSTINC
:
3619 case OPC1_16_SSR_ST_H
:
3620 case OPC1_16_SSR_ST_H_POSTINC
:
3621 case OPC1_16_SSR_ST_W
:
3622 case OPC1_16_SSR_ST_W_POSTINC
:
3623 decode_ssr_opc(ctx
, op1
);
3626 case OPC1_16_SRRS_ADDSC_A
:
3627 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3628 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3629 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3630 temp
= tcg_temp_new();
3631 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3632 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3635 case OPC1_16_SLRO_LD_A
:
3636 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3637 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3638 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3640 case OPC1_16_SLRO_LD_BU
:
3641 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3642 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3643 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3645 case OPC1_16_SLRO_LD_H
:
3646 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3647 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3648 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3650 case OPC1_16_SLRO_LD_W
:
3651 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3652 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3653 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3656 case OPC1_16_SB_CALL
:
3658 case OPC1_16_SB_JNZ
:
3660 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3661 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3664 case OPC1_16_SBC_JEQ
:
3665 case OPC1_16_SBC_JNE
:
3666 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3667 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3668 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3670 case OPC1_16_SBC_JEQ2
:
3671 case OPC1_16_SBC_JNE2
:
3672 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3673 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3674 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3675 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3677 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3681 case OPC1_16_SBRN_JNZ_T
:
3682 case OPC1_16_SBRN_JZ_T
:
3683 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3684 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3685 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3688 case OPC1_16_SBR_JEQ2
:
3689 case OPC1_16_SBR_JNE2
:
3690 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3691 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3692 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3693 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3695 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3698 case OPC1_16_SBR_JEQ
:
3699 case OPC1_16_SBR_JGEZ
:
3700 case OPC1_16_SBR_JGTZ
:
3701 case OPC1_16_SBR_JLEZ
:
3702 case OPC1_16_SBR_JLTZ
:
3703 case OPC1_16_SBR_JNE
:
3704 case OPC1_16_SBR_JNZ
:
3705 case OPC1_16_SBR_JNZ_A
:
3706 case OPC1_16_SBR_JZ
:
3707 case OPC1_16_SBR_JZ_A
:
3708 case OPC1_16_SBR_LOOP
:
3709 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3710 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3711 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3714 case OPC1_16_SC_AND
:
3715 case OPC1_16_SC_BISR
:
3716 case OPC1_16_SC_LD_A
:
3717 case OPC1_16_SC_LD_W
:
3718 case OPC1_16_SC_MOV
:
3720 case OPC1_16_SC_ST_A
:
3721 case OPC1_16_SC_ST_W
:
3722 case OPC1_16_SC_SUB_A
:
3723 decode_sc_opc(ctx
, op1
);
3726 case OPC1_16_SLR_LD_A
:
3727 case OPC1_16_SLR_LD_A_POSTINC
:
3728 case OPC1_16_SLR_LD_BU
:
3729 case OPC1_16_SLR_LD_BU_POSTINC
:
3730 case OPC1_16_SLR_LD_H
:
3731 case OPC1_16_SLR_LD_H_POSTINC
:
3732 case OPC1_16_SLR_LD_W
:
3733 case OPC1_16_SLR_LD_W_POSTINC
:
3734 decode_slr_opc(ctx
, op1
);
3737 case OPC1_16_SRO_LD_A
:
3738 case OPC1_16_SRO_LD_BU
:
3739 case OPC1_16_SRO_LD_H
:
3740 case OPC1_16_SRO_LD_W
:
3741 case OPC1_16_SRO_ST_A
:
3742 case OPC1_16_SRO_ST_B
:
3743 case OPC1_16_SRO_ST_H
:
3744 case OPC1_16_SRO_ST_W
:
3745 decode_sro_opc(ctx
, op1
);
3748 case OPC1_16_SSRO_ST_A
:
3749 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3750 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3751 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3753 case OPC1_16_SSRO_ST_B
:
3754 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3755 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3756 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3758 case OPC1_16_SSRO_ST_H
:
3759 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3760 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3761 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3763 case OPC1_16_SSRO_ST_W
:
3764 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3765 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3766 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3769 case OPCM_16_SR_SYSTEM
:
3770 decode_sr_system(ctx
);
3772 case OPCM_16_SR_ACCU
:
3773 decode_sr_accu(ctx
);
3776 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3777 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3779 case OPC1_16_SR_NOT
:
3780 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3781 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3784 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3789 * 32 bit instructions
3793 static void decode_abs_ldw(DisasContext
*ctx
)
3800 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3801 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3802 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3804 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3807 case OPC2_32_ABS_LD_A
:
3808 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3810 case OPC2_32_ABS_LD_D
:
3812 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3814 case OPC2_32_ABS_LD_DA
:
3816 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3818 case OPC2_32_ABS_LD_W
:
3819 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3822 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3826 static void decode_abs_ldb(DisasContext
*ctx
)
3833 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3834 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3835 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3837 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3840 case OPC2_32_ABS_LD_B
:
3841 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3843 case OPC2_32_ABS_LD_BU
:
3844 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3846 case OPC2_32_ABS_LD_H
:
3847 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3849 case OPC2_32_ABS_LD_HU
:
3850 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3853 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3857 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3864 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3865 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3866 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3868 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3871 case OPC2_32_ABS_LDMST
:
3872 gen_ldmst(ctx
, r1
, temp
);
3874 case OPC2_32_ABS_SWAP_W
:
3875 gen_swap(ctx
, r1
, temp
);
3878 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3882 static void decode_abs_ldst_context(DisasContext
*ctx
)
3887 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3888 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3891 case OPC2_32_ABS_LDLCX
:
3892 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3894 case OPC2_32_ABS_LDUCX
:
3895 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3897 case OPC2_32_ABS_STLCX
:
3898 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3900 case OPC2_32_ABS_STUCX
:
3901 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3904 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3908 static void decode_abs_store(DisasContext
*ctx
)
3915 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3916 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3917 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3919 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3922 case OPC2_32_ABS_ST_A
:
3923 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3925 case OPC2_32_ABS_ST_D
:
3927 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3929 case OPC2_32_ABS_ST_DA
:
3931 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3933 case OPC2_32_ABS_ST_W
:
3934 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3937 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3941 static void decode_abs_storeb_h(DisasContext
*ctx
)
3948 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3949 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3950 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3952 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3955 case OPC2_32_ABS_ST_B
:
3956 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3958 case OPC2_32_ABS_ST_H
:
3959 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3962 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3968 static void decode_bit_andacc(DisasContext
*ctx
)
3974 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3975 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3976 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3977 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3978 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3979 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3983 case OPC2_32_BIT_AND_AND_T
:
3984 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3985 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3987 case OPC2_32_BIT_AND_ANDN_T
:
3988 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3989 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3991 case OPC2_32_BIT_AND_NOR_T
:
3992 if (TCG_TARGET_HAS_andc_i32
) {
3993 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3994 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3996 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3997 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4000 case OPC2_32_BIT_AND_OR_T
:
4001 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4002 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4005 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4009 static void decode_bit_logical_t(DisasContext
*ctx
)
4014 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4015 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4016 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4017 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4018 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4019 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4022 case OPC2_32_BIT_AND_T
:
4023 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4024 pos1
, pos2
, &tcg_gen_and_tl
);
4026 case OPC2_32_BIT_ANDN_T
:
4027 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4028 pos1
, pos2
, &tcg_gen_andc_tl
);
4030 case OPC2_32_BIT_NOR_T
:
4031 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4032 pos1
, pos2
, &tcg_gen_nor_tl
);
4034 case OPC2_32_BIT_OR_T
:
4035 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4036 pos1
, pos2
, &tcg_gen_or_tl
);
4039 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4043 static void decode_bit_insert(DisasContext
*ctx
)
4049 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4050 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4051 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4052 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4053 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4054 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4056 temp
= tcg_temp_new();
4058 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4059 if (op2
== OPC2_32_BIT_INSN_T
) {
4060 tcg_gen_not_tl(temp
, temp
);
4062 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4065 static void decode_bit_logical_t2(DisasContext
*ctx
)
4072 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4073 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4074 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4075 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4076 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4077 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4080 case OPC2_32_BIT_NAND_T
:
4081 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4082 pos1
, pos2
, &tcg_gen_nand_tl
);
4084 case OPC2_32_BIT_ORN_T
:
4085 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4086 pos1
, pos2
, &tcg_gen_orc_tl
);
4088 case OPC2_32_BIT_XNOR_T
:
4089 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4090 pos1
, pos2
, &tcg_gen_eqv_tl
);
4092 case OPC2_32_BIT_XOR_T
:
4093 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4094 pos1
, pos2
, &tcg_gen_xor_tl
);
4097 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4101 static void decode_bit_orand(DisasContext
*ctx
)
4108 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4109 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4110 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4111 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4112 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4113 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4116 case OPC2_32_BIT_OR_AND_T
:
4117 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4118 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4120 case OPC2_32_BIT_OR_ANDN_T
:
4121 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4122 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4124 case OPC2_32_BIT_OR_NOR_T
:
4125 if (TCG_TARGET_HAS_orc_i32
) {
4126 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4127 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4129 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4130 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4133 case OPC2_32_BIT_OR_OR_T
:
4134 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4135 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4138 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4142 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4149 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4150 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4151 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4152 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4153 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4154 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4156 temp
= tcg_temp_new();
4159 case OPC2_32_BIT_SH_AND_T
:
4160 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4161 pos1
, pos2
, &tcg_gen_and_tl
);
4163 case OPC2_32_BIT_SH_ANDN_T
:
4164 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4165 pos1
, pos2
, &tcg_gen_andc_tl
);
4167 case OPC2_32_BIT_SH_NOR_T
:
4168 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4169 pos1
, pos2
, &tcg_gen_nor_tl
);
4171 case OPC2_32_BIT_SH_OR_T
:
4172 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4173 pos1
, pos2
, &tcg_gen_or_tl
);
4176 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4178 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4179 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4182 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4189 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4190 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4191 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4192 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4193 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4194 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4196 temp
= tcg_temp_new();
4199 case OPC2_32_BIT_SH_NAND_T
:
4200 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4201 pos1
, pos2
, &tcg_gen_nand_tl
);
4203 case OPC2_32_BIT_SH_ORN_T
:
4204 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4205 pos1
, pos2
, &tcg_gen_orc_tl
);
4207 case OPC2_32_BIT_SH_XNOR_T
:
4208 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4209 pos1
, pos2
, &tcg_gen_eqv_tl
);
4211 case OPC2_32_BIT_SH_XOR_T
:
4212 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4213 pos1
, pos2
, &tcg_gen_xor_tl
);
4216 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4218 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4219 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4225 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4232 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4233 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4234 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4235 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4238 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4239 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4240 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4241 /* instruction to access the cache */
4243 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4244 case OPC2_32_BO_CACHEA_W_POSTINC
:
4245 case OPC2_32_BO_CACHEA_I_POSTINC
:
4246 /* instruction to access the cache, but we still need to handle
4247 the addressing mode */
4248 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4250 case OPC2_32_BO_CACHEA_WI_PREINC
:
4251 case OPC2_32_BO_CACHEA_W_PREINC
:
4252 case OPC2_32_BO_CACHEA_I_PREINC
:
4253 /* instruction to access the cache, but we still need to handle
4254 the addressing mode */
4255 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4257 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4258 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4259 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4260 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4263 case OPC2_32_BO_CACHEI_W_POSTINC
:
4264 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4265 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4266 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4268 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4271 case OPC2_32_BO_CACHEI_W_PREINC
:
4272 case OPC2_32_BO_CACHEI_WI_PREINC
:
4273 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4274 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4276 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4279 case OPC2_32_BO_ST_A_SHORTOFF
:
4280 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4282 case OPC2_32_BO_ST_A_POSTINC
:
4283 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4285 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4287 case OPC2_32_BO_ST_A_PREINC
:
4288 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4290 case OPC2_32_BO_ST_B_SHORTOFF
:
4291 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4293 case OPC2_32_BO_ST_B_POSTINC
:
4294 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4296 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4298 case OPC2_32_BO_ST_B_PREINC
:
4299 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4301 case OPC2_32_BO_ST_D_SHORTOFF
:
4303 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4306 case OPC2_32_BO_ST_D_POSTINC
:
4308 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4309 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4311 case OPC2_32_BO_ST_D_PREINC
:
4313 temp
= tcg_temp_new();
4314 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4315 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4316 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4318 case OPC2_32_BO_ST_DA_SHORTOFF
:
4320 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4323 case OPC2_32_BO_ST_DA_POSTINC
:
4325 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4326 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4328 case OPC2_32_BO_ST_DA_PREINC
:
4330 temp
= tcg_temp_new();
4331 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4332 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4333 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4335 case OPC2_32_BO_ST_H_SHORTOFF
:
4336 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4338 case OPC2_32_BO_ST_H_POSTINC
:
4339 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4341 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4343 case OPC2_32_BO_ST_H_PREINC
:
4344 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4346 case OPC2_32_BO_ST_Q_SHORTOFF
:
4347 temp
= tcg_temp_new();
4348 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4349 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4351 case OPC2_32_BO_ST_Q_POSTINC
:
4352 temp
= tcg_temp_new();
4353 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4354 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4356 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4358 case OPC2_32_BO_ST_Q_PREINC
:
4359 temp
= tcg_temp_new();
4360 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4361 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4363 case OPC2_32_BO_ST_W_SHORTOFF
:
4364 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4366 case OPC2_32_BO_ST_W_POSTINC
:
4367 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4369 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4371 case OPC2_32_BO_ST_W_PREINC
:
4372 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4375 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4379 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4384 TCGv temp
, temp2
, t_off10
;
4386 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4387 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4388 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4389 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4391 temp
= tcg_temp_new();
4392 temp2
= tcg_temp_new();
4393 t_off10
= tcg_constant_i32(off10
);
4395 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4396 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4399 case OPC2_32_BO_CACHEA_WI_BR
:
4400 case OPC2_32_BO_CACHEA_W_BR
:
4401 case OPC2_32_BO_CACHEA_I_BR
:
4402 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4404 case OPC2_32_BO_CACHEA_WI_CIRC
:
4405 case OPC2_32_BO_CACHEA_W_CIRC
:
4406 case OPC2_32_BO_CACHEA_I_CIRC
:
4407 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4409 case OPC2_32_BO_ST_A_BR
:
4410 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4411 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4413 case OPC2_32_BO_ST_A_CIRC
:
4414 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4415 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4417 case OPC2_32_BO_ST_B_BR
:
4418 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4419 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4421 case OPC2_32_BO_ST_B_CIRC
:
4422 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4423 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4425 case OPC2_32_BO_ST_D_BR
:
4427 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4428 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4430 case OPC2_32_BO_ST_D_CIRC
:
4432 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4433 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4434 tcg_gen_addi_tl(temp
, temp
, 4);
4435 tcg_gen_rem_tl(temp
, temp
, temp2
);
4436 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4437 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4438 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4440 case OPC2_32_BO_ST_DA_BR
:
4442 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4443 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4445 case OPC2_32_BO_ST_DA_CIRC
:
4447 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4448 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4449 tcg_gen_addi_tl(temp
, temp
, 4);
4450 tcg_gen_rem_tl(temp
, temp
, temp2
);
4451 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4452 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4453 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4455 case OPC2_32_BO_ST_H_BR
:
4456 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4457 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4459 case OPC2_32_BO_ST_H_CIRC
:
4460 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4461 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4463 case OPC2_32_BO_ST_Q_BR
:
4464 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4465 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4466 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4468 case OPC2_32_BO_ST_Q_CIRC
:
4469 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4470 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4471 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4473 case OPC2_32_BO_ST_W_BR
:
4474 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4475 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4477 case OPC2_32_BO_ST_W_CIRC
:
4478 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4479 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4482 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4486 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4493 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4494 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4495 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4496 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4499 case OPC2_32_BO_LD_A_SHORTOFF
:
4500 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4502 case OPC2_32_BO_LD_A_POSTINC
:
4503 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4505 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4507 case OPC2_32_BO_LD_A_PREINC
:
4508 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4510 case OPC2_32_BO_LD_B_SHORTOFF
:
4511 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4513 case OPC2_32_BO_LD_B_POSTINC
:
4514 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4516 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4518 case OPC2_32_BO_LD_B_PREINC
:
4519 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4521 case OPC2_32_BO_LD_BU_SHORTOFF
:
4522 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4524 case OPC2_32_BO_LD_BU_POSTINC
:
4525 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4527 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4529 case OPC2_32_BO_LD_BU_PREINC
:
4530 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4532 case OPC2_32_BO_LD_D_SHORTOFF
:
4534 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4537 case OPC2_32_BO_LD_D_POSTINC
:
4539 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4540 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4542 case OPC2_32_BO_LD_D_PREINC
:
4544 temp
= tcg_temp_new();
4545 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4546 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4547 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4549 case OPC2_32_BO_LD_DA_SHORTOFF
:
4551 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4554 case OPC2_32_BO_LD_DA_POSTINC
:
4556 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4557 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4559 case OPC2_32_BO_LD_DA_PREINC
:
4561 temp
= tcg_temp_new();
4562 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4563 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4564 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4566 case OPC2_32_BO_LD_H_SHORTOFF
:
4567 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4569 case OPC2_32_BO_LD_H_POSTINC
:
4570 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4572 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4574 case OPC2_32_BO_LD_H_PREINC
:
4575 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4577 case OPC2_32_BO_LD_HU_SHORTOFF
:
4578 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4580 case OPC2_32_BO_LD_HU_POSTINC
:
4581 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4583 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4585 case OPC2_32_BO_LD_HU_PREINC
:
4586 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4588 case OPC2_32_BO_LD_Q_SHORTOFF
:
4589 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4590 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4592 case OPC2_32_BO_LD_Q_POSTINC
:
4593 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4595 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4596 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4598 case OPC2_32_BO_LD_Q_PREINC
:
4599 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4600 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4602 case OPC2_32_BO_LD_W_SHORTOFF
:
4603 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4605 case OPC2_32_BO_LD_W_POSTINC
:
4606 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4608 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4610 case OPC2_32_BO_LD_W_PREINC
:
4611 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4614 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4618 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4623 TCGv temp
, temp2
, t_off10
;
4625 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4626 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4627 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4628 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4630 temp
= tcg_temp_new();
4631 temp2
= tcg_temp_new();
4632 t_off10
= tcg_constant_i32(off10
);
4634 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4635 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4639 case OPC2_32_BO_LD_A_BR
:
4640 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4641 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4643 case OPC2_32_BO_LD_A_CIRC
:
4644 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4645 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4647 case OPC2_32_BO_LD_B_BR
:
4648 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4649 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4651 case OPC2_32_BO_LD_B_CIRC
:
4652 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4653 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4655 case OPC2_32_BO_LD_BU_BR
:
4656 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4657 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4659 case OPC2_32_BO_LD_BU_CIRC
:
4660 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4661 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4663 case OPC2_32_BO_LD_D_BR
:
4665 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4666 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4668 case OPC2_32_BO_LD_D_CIRC
:
4670 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4671 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4672 tcg_gen_addi_tl(temp
, temp
, 4);
4673 tcg_gen_rem_tl(temp
, temp
, temp2
);
4674 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4675 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4676 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4678 case OPC2_32_BO_LD_DA_BR
:
4680 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4681 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4683 case OPC2_32_BO_LD_DA_CIRC
:
4685 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4686 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4687 tcg_gen_addi_tl(temp
, temp
, 4);
4688 tcg_gen_rem_tl(temp
, temp
, temp2
);
4689 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4690 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4691 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4693 case OPC2_32_BO_LD_H_BR
:
4694 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4695 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4697 case OPC2_32_BO_LD_H_CIRC
:
4698 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4699 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4701 case OPC2_32_BO_LD_HU_BR
:
4702 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4703 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4705 case OPC2_32_BO_LD_HU_CIRC
:
4706 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4707 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4709 case OPC2_32_BO_LD_Q_BR
:
4710 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4711 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4712 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4714 case OPC2_32_BO_LD_Q_CIRC
:
4715 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4716 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4717 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4719 case OPC2_32_BO_LD_W_BR
:
4720 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4721 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4723 case OPC2_32_BO_LD_W_CIRC
:
4724 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4725 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4728 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4732 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4740 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4741 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4742 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4743 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4746 temp
= tcg_temp_new();
4749 case OPC2_32_BO_LDLCX_SHORTOFF
:
4750 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4751 gen_helper_ldlcx(cpu_env
, temp
);
4753 case OPC2_32_BO_LDMST_SHORTOFF
:
4754 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4755 gen_ldmst(ctx
, r1
, temp
);
4757 case OPC2_32_BO_LDMST_POSTINC
:
4758 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4759 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4761 case OPC2_32_BO_LDMST_PREINC
:
4762 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4763 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4765 case OPC2_32_BO_LDUCX_SHORTOFF
:
4766 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4767 gen_helper_lducx(cpu_env
, temp
);
4769 case OPC2_32_BO_LEA_SHORTOFF
:
4770 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4772 case OPC2_32_BO_STLCX_SHORTOFF
:
4773 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4774 gen_helper_stlcx(cpu_env
, temp
);
4776 case OPC2_32_BO_STUCX_SHORTOFF
:
4777 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4778 gen_helper_stucx(cpu_env
, temp
);
4780 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4781 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4782 gen_swap(ctx
, r1
, temp
);
4784 case OPC2_32_BO_SWAP_W_POSTINC
:
4785 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4786 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4788 case OPC2_32_BO_SWAP_W_PREINC
:
4789 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4790 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4792 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4793 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4794 gen_cmpswap(ctx
, r1
, temp
);
4796 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4797 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4798 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4800 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4801 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4802 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4804 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4805 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4806 gen_swapmsk(ctx
, r1
, temp
);
4808 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4809 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4810 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4812 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4813 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4814 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4817 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4821 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4826 TCGv temp
, temp2
, t_off10
;
4828 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4829 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4830 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4831 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4833 temp
= tcg_temp_new();
4834 temp2
= tcg_temp_new();
4835 t_off10
= tcg_constant_i32(off10
);
4837 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4838 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4841 case OPC2_32_BO_LDMST_BR
:
4842 gen_ldmst(ctx
, r1
, temp2
);
4843 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4845 case OPC2_32_BO_LDMST_CIRC
:
4846 gen_ldmst(ctx
, r1
, temp2
);
4847 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4849 case OPC2_32_BO_SWAP_W_BR
:
4850 gen_swap(ctx
, r1
, temp2
);
4851 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4853 case OPC2_32_BO_SWAP_W_CIRC
:
4854 gen_swap(ctx
, r1
, temp2
);
4855 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4857 case OPC2_32_BO_CMPSWAP_W_BR
:
4858 gen_cmpswap(ctx
, r1
, temp2
);
4859 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4861 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4862 gen_cmpswap(ctx
, r1
, temp2
);
4863 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4865 case OPC2_32_BO_SWAPMSK_W_BR
:
4866 gen_swapmsk(ctx
, r1
, temp2
);
4867 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4869 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4870 gen_swapmsk(ctx
, r1
, temp2
);
4871 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4874 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4878 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4884 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4885 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4886 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4889 case OPC1_32_BOL_LD_A_LONGOFF
:
4890 temp
= tcg_temp_new();
4891 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4892 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4894 case OPC1_32_BOL_LD_W_LONGOFF
:
4895 temp
= tcg_temp_new();
4896 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4897 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4899 case OPC1_32_BOL_LEA_LONGOFF
:
4900 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4902 case OPC1_32_BOL_ST_A_LONGOFF
:
4903 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4904 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4906 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4909 case OPC1_32_BOL_ST_W_LONGOFF
:
4910 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4912 case OPC1_32_BOL_LD_B_LONGOFF
:
4913 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4914 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4916 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4919 case OPC1_32_BOL_LD_BU_LONGOFF
:
4920 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4921 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4923 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4926 case OPC1_32_BOL_LD_H_LONGOFF
:
4927 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4928 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4930 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4933 case OPC1_32_BOL_LD_HU_LONGOFF
:
4934 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4935 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4937 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4940 case OPC1_32_BOL_ST_B_LONGOFF
:
4941 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4942 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4944 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4947 case OPC1_32_BOL_ST_H_LONGOFF
:
4948 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4949 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4951 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4955 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4960 static void decode_rc_logical_shift(DisasContext
*ctx
)
4967 r2
= MASK_OP_RC_D(ctx
->opcode
);
4968 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4969 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4970 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4972 temp
= tcg_temp_new();
4975 case OPC2_32_RC_AND
:
4976 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4978 case OPC2_32_RC_ANDN
:
4979 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4981 case OPC2_32_RC_NAND
:
4982 tcg_gen_movi_tl(temp
, const9
);
4983 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4985 case OPC2_32_RC_NOR
:
4986 tcg_gen_movi_tl(temp
, const9
);
4987 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4990 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4992 case OPC2_32_RC_ORN
:
4993 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4996 const9
= sextract32(const9
, 0, 6);
4997 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4999 case OPC2_32_RC_SH_H
:
5000 const9
= sextract32(const9
, 0, 5);
5001 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5003 case OPC2_32_RC_SHA
:
5004 const9
= sextract32(const9
, 0, 6);
5005 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5007 case OPC2_32_RC_SHA_H
:
5008 const9
= sextract32(const9
, 0, 5);
5009 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5011 case OPC2_32_RC_SHAS
:
5012 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5014 case OPC2_32_RC_XNOR
:
5015 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5016 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5018 case OPC2_32_RC_XOR
:
5019 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5021 case OPC2_32_RC_SHUFFLE
:
5022 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
5023 TCGv temp
= tcg_constant_i32(const9
);
5024 gen_helper_shuffle(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5026 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5030 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5034 static void decode_rc_accumulator(DisasContext
*ctx
)
5042 r2
= MASK_OP_RC_D(ctx
->opcode
);
5043 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5044 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5046 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5048 temp
= tcg_temp_new();
5051 case OPC2_32_RC_ABSDIF
:
5052 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5054 case OPC2_32_RC_ABSDIFS
:
5055 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5057 case OPC2_32_RC_ADD
:
5058 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5060 case OPC2_32_RC_ADDC
:
5061 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5063 case OPC2_32_RC_ADDS
:
5064 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5066 case OPC2_32_RC_ADDS_U
:
5067 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5069 case OPC2_32_RC_ADDX
:
5070 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5072 case OPC2_32_RC_AND_EQ
:
5073 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5074 const9
, &tcg_gen_and_tl
);
5076 case OPC2_32_RC_AND_GE
:
5077 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5078 const9
, &tcg_gen_and_tl
);
5080 case OPC2_32_RC_AND_GE_U
:
5081 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5082 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5083 const9
, &tcg_gen_and_tl
);
5085 case OPC2_32_RC_AND_LT
:
5086 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5087 const9
, &tcg_gen_and_tl
);
5089 case OPC2_32_RC_AND_LT_U
:
5090 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5091 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5092 const9
, &tcg_gen_and_tl
);
5094 case OPC2_32_RC_AND_NE
:
5095 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5096 const9
, &tcg_gen_and_tl
);
5099 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5101 case OPC2_32_RC_EQANY_B
:
5102 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5104 case OPC2_32_RC_EQANY_H
:
5105 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5108 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5110 case OPC2_32_RC_GE_U
:
5111 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5112 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5115 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5117 case OPC2_32_RC_LT_U
:
5118 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5119 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5121 case OPC2_32_RC_MAX
:
5122 tcg_gen_movi_tl(temp
, const9
);
5123 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5124 cpu_gpr_d
[r1
], temp
);
5126 case OPC2_32_RC_MAX_U
:
5127 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5128 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5129 cpu_gpr_d
[r1
], temp
);
5131 case OPC2_32_RC_MIN
:
5132 tcg_gen_movi_tl(temp
, const9
);
5133 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5134 cpu_gpr_d
[r1
], temp
);
5136 case OPC2_32_RC_MIN_U
:
5137 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5138 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5139 cpu_gpr_d
[r1
], temp
);
5142 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5144 case OPC2_32_RC_OR_EQ
:
5145 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5146 const9
, &tcg_gen_or_tl
);
5148 case OPC2_32_RC_OR_GE
:
5149 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5150 const9
, &tcg_gen_or_tl
);
5152 case OPC2_32_RC_OR_GE_U
:
5153 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5154 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5155 const9
, &tcg_gen_or_tl
);
5157 case OPC2_32_RC_OR_LT
:
5158 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5159 const9
, &tcg_gen_or_tl
);
5161 case OPC2_32_RC_OR_LT_U
:
5162 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5163 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5164 const9
, &tcg_gen_or_tl
);
5166 case OPC2_32_RC_OR_NE
:
5167 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5168 const9
, &tcg_gen_or_tl
);
5170 case OPC2_32_RC_RSUB
:
5171 tcg_gen_movi_tl(temp
, const9
);
5172 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5174 case OPC2_32_RC_RSUBS
:
5175 tcg_gen_movi_tl(temp
, const9
);
5176 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5178 case OPC2_32_RC_RSUBS_U
:
5179 tcg_gen_movi_tl(temp
, const9
);
5180 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5182 case OPC2_32_RC_SH_EQ
:
5183 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5185 case OPC2_32_RC_SH_GE
:
5186 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5188 case OPC2_32_RC_SH_GE_U
:
5189 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5190 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5192 case OPC2_32_RC_SH_LT
:
5193 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5195 case OPC2_32_RC_SH_LT_U
:
5196 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5197 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5199 case OPC2_32_RC_SH_NE
:
5200 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5202 case OPC2_32_RC_XOR_EQ
:
5203 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5204 const9
, &tcg_gen_xor_tl
);
5206 case OPC2_32_RC_XOR_GE
:
5207 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5208 const9
, &tcg_gen_xor_tl
);
5210 case OPC2_32_RC_XOR_GE_U
:
5211 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5212 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5213 const9
, &tcg_gen_xor_tl
);
5215 case OPC2_32_RC_XOR_LT
:
5216 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5217 const9
, &tcg_gen_xor_tl
);
5219 case OPC2_32_RC_XOR_LT_U
:
5220 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5221 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5222 const9
, &tcg_gen_xor_tl
);
5224 case OPC2_32_RC_XOR_NE
:
5225 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5226 const9
, &tcg_gen_xor_tl
);
5229 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5233 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5238 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5239 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5242 case OPC2_32_RC_BISR
:
5243 if (ctx
->priv
== TRICORE_PRIV_SM
) {
5244 gen_helper_1arg(bisr
, const9
);
5246 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
5249 case OPC2_32_RC_SYSCALL
:
5250 generate_trap(ctx
, TRAPC_SYSCALL
, const9
& 0xff);
5253 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5257 static void decode_rc_mul(DisasContext
*ctx
)
5263 r2
= MASK_OP_RC_D(ctx
->opcode
);
5264 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5265 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5267 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5270 case OPC2_32_RC_MUL_32
:
5271 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5273 case OPC2_32_RC_MUL_64
:
5275 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5277 case OPC2_32_RC_MULS_32
:
5278 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5280 case OPC2_32_RC_MUL_U_64
:
5281 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5283 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5285 case OPC2_32_RC_MULS_U_32
:
5286 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5287 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5290 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5295 static void decode_rcpw_insert(DisasContext
*ctx
)
5299 int32_t pos
, width
, const4
;
5303 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5304 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5305 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5306 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5307 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5308 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5311 case OPC2_32_RCPW_IMASK
:
5313 /* if pos + width > 32 undefined result */
5314 if (pos
+ width
<= 32) {
5315 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5316 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5319 case OPC2_32_RCPW_INSERT
:
5320 /* if pos + width > 32 undefined result */
5321 if (pos
+ width
<= 32) {
5322 temp
= tcg_constant_i32(const4
);
5323 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5327 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5333 static void decode_rcrw_insert(DisasContext
*ctx
)
5337 int32_t width
, const4
;
5339 TCGv temp
, temp2
, temp3
;
5341 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5342 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5343 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5344 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5345 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5346 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5348 temp
= tcg_temp_new();
5349 temp2
= tcg_temp_new();
5352 case OPC2_32_RCRW_IMASK
:
5354 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5355 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5356 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5357 tcg_gen_movi_tl(temp2
, const4
);
5358 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5360 case OPC2_32_RCRW_INSERT
:
5361 temp3
= tcg_temp_new();
5363 tcg_gen_movi_tl(temp
, width
);
5364 tcg_gen_movi_tl(temp2
, const4
);
5365 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5366 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5369 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5375 static void decode_rcr_cond_select(DisasContext
*ctx
)
5383 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5384 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5385 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5386 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5387 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5390 case OPC2_32_RCR_CADD
:
5391 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5394 case OPC2_32_RCR_CADDN
:
5395 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5398 case OPC2_32_RCR_SEL
:
5399 temp
= tcg_constant_i32(0);
5400 temp2
= tcg_constant_i32(const9
);
5401 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5402 cpu_gpr_d
[r1
], temp2
);
5404 case OPC2_32_RCR_SELN
:
5405 temp
= tcg_constant_i32(0);
5406 temp2
= tcg_constant_i32(const9
);
5407 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5408 cpu_gpr_d
[r1
], temp2
);
5411 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5415 static void decode_rcr_madd(DisasContext
*ctx
)
5422 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5423 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5424 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5425 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5426 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5429 case OPC2_32_RCR_MADD_32
:
5430 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5432 case OPC2_32_RCR_MADD_64
:
5435 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5436 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5438 case OPC2_32_RCR_MADDS_32
:
5439 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5441 case OPC2_32_RCR_MADDS_64
:
5444 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5445 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5447 case OPC2_32_RCR_MADD_U_64
:
5450 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5451 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5452 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5454 case OPC2_32_RCR_MADDS_U_32
:
5455 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5456 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5458 case OPC2_32_RCR_MADDS_U_64
:
5461 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5462 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5463 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5466 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5470 static void decode_rcr_msub(DisasContext
*ctx
)
5477 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5478 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5479 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5480 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5481 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5484 case OPC2_32_RCR_MSUB_32
:
5485 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5487 case OPC2_32_RCR_MSUB_64
:
5490 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5491 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5493 case OPC2_32_RCR_MSUBS_32
:
5494 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5496 case OPC2_32_RCR_MSUBS_64
:
5499 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5500 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5502 case OPC2_32_RCR_MSUB_U_64
:
5505 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5506 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5507 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5509 case OPC2_32_RCR_MSUBS_U_32
:
5510 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5511 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5513 case OPC2_32_RCR_MSUBS_U_64
:
5516 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5517 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5518 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5521 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5527 static void decode_rlc_opc(DisasContext
*ctx
,
5533 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5534 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5535 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5538 case OPC1_32_RLC_ADDI
:
5539 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5541 case OPC1_32_RLC_ADDIH
:
5542 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5544 case OPC1_32_RLC_ADDIH_A
:
5545 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5547 case OPC1_32_RLC_MFCR
:
5548 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5549 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5551 case OPC1_32_RLC_MOV
:
5552 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5554 case OPC1_32_RLC_MOV_64
:
5555 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5557 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5558 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5560 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5563 case OPC1_32_RLC_MOV_U
:
5564 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5565 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5567 case OPC1_32_RLC_MOV_H
:
5568 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5570 case OPC1_32_RLC_MOVH_A
:
5571 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5573 case OPC1_32_RLC_MTCR
:
5574 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5575 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5578 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5583 static void decode_rr_accumulator(DisasContext
*ctx
)
5590 r3
= MASK_OP_RR_D(ctx
->opcode
);
5591 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5592 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5593 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5596 case OPC2_32_RR_ABS
:
5597 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5599 case OPC2_32_RR_ABS_B
:
5600 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5602 case OPC2_32_RR_ABS_H
:
5603 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5605 case OPC2_32_RR_ABSDIF
:
5606 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5608 case OPC2_32_RR_ABSDIF_B
:
5609 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5612 case OPC2_32_RR_ABSDIF_H
:
5613 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5616 case OPC2_32_RR_ABSDIFS
:
5617 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5620 case OPC2_32_RR_ABSDIFS_H
:
5621 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5624 case OPC2_32_RR_ABSS
:
5625 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5627 case OPC2_32_RR_ABSS_H
:
5628 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5630 case OPC2_32_RR_ADD
:
5631 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5633 case OPC2_32_RR_ADD_B
:
5634 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5636 case OPC2_32_RR_ADD_H
:
5637 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5639 case OPC2_32_RR_ADDC
:
5640 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5642 case OPC2_32_RR_ADDS
:
5643 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5645 case OPC2_32_RR_ADDS_H
:
5646 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5649 case OPC2_32_RR_ADDS_HU
:
5650 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5653 case OPC2_32_RR_ADDS_U
:
5654 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5657 case OPC2_32_RR_ADDX
:
5658 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5660 case OPC2_32_RR_AND_EQ
:
5661 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5662 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5664 case OPC2_32_RR_AND_GE
:
5665 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5666 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5668 case OPC2_32_RR_AND_GE_U
:
5669 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5670 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5672 case OPC2_32_RR_AND_LT
:
5673 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5674 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5676 case OPC2_32_RR_AND_LT_U
:
5677 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5678 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5680 case OPC2_32_RR_AND_NE
:
5681 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5682 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5685 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5688 case OPC2_32_RR_EQ_B
:
5689 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5691 case OPC2_32_RR_EQ_H
:
5692 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5694 case OPC2_32_RR_EQ_W
:
5695 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5697 case OPC2_32_RR_EQANY_B
:
5698 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5700 case OPC2_32_RR_EQANY_H
:
5701 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5704 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5707 case OPC2_32_RR_GE_U
:
5708 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5712 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5715 case OPC2_32_RR_LT_U
:
5716 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5719 case OPC2_32_RR_LT_B
:
5720 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5722 case OPC2_32_RR_LT_BU
:
5723 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5725 case OPC2_32_RR_LT_H
:
5726 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5728 case OPC2_32_RR_LT_HU
:
5729 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5731 case OPC2_32_RR_LT_W
:
5732 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5734 case OPC2_32_RR_LT_WU
:
5735 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5737 case OPC2_32_RR_MAX
:
5738 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5739 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5741 case OPC2_32_RR_MAX_U
:
5742 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5743 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5745 case OPC2_32_RR_MAX_B
:
5746 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5748 case OPC2_32_RR_MAX_BU
:
5749 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5751 case OPC2_32_RR_MAX_H
:
5752 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5754 case OPC2_32_RR_MAX_HU
:
5755 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5757 case OPC2_32_RR_MIN
:
5758 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5759 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5761 case OPC2_32_RR_MIN_U
:
5762 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5763 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5765 case OPC2_32_RR_MIN_B
:
5766 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5768 case OPC2_32_RR_MIN_BU
:
5769 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5771 case OPC2_32_RR_MIN_H
:
5772 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5774 case OPC2_32_RR_MIN_HU
:
5775 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5777 case OPC2_32_RR_MOV
:
5778 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5780 case OPC2_32_RR_MOV_64
:
5781 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5782 temp
= tcg_temp_new();
5785 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5786 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5787 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5789 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5792 case OPC2_32_RR_MOVS_64
:
5793 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5795 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5796 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5798 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5802 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5805 case OPC2_32_RR_OR_EQ
:
5806 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5807 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5809 case OPC2_32_RR_OR_GE
:
5810 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5811 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5813 case OPC2_32_RR_OR_GE_U
:
5814 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5815 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5817 case OPC2_32_RR_OR_LT
:
5818 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5819 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5821 case OPC2_32_RR_OR_LT_U
:
5822 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5823 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5825 case OPC2_32_RR_OR_NE
:
5826 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5827 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5829 case OPC2_32_RR_SAT_B
:
5830 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5832 case OPC2_32_RR_SAT_BU
:
5833 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5835 case OPC2_32_RR_SAT_H
:
5836 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5838 case OPC2_32_RR_SAT_HU
:
5839 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5841 case OPC2_32_RR_SH_EQ
:
5842 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5845 case OPC2_32_RR_SH_GE
:
5846 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5849 case OPC2_32_RR_SH_GE_U
:
5850 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5853 case OPC2_32_RR_SH_LT
:
5854 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5857 case OPC2_32_RR_SH_LT_U
:
5858 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5861 case OPC2_32_RR_SH_NE
:
5862 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5865 case OPC2_32_RR_SUB
:
5866 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5868 case OPC2_32_RR_SUB_B
:
5869 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5871 case OPC2_32_RR_SUB_H
:
5872 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5874 case OPC2_32_RR_SUBC
:
5875 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5877 case OPC2_32_RR_SUBS
:
5878 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5880 case OPC2_32_RR_SUBS_U
:
5881 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5883 case OPC2_32_RR_SUBS_H
:
5884 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5887 case OPC2_32_RR_SUBS_HU
:
5888 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5891 case OPC2_32_RR_SUBX
:
5892 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5894 case OPC2_32_RR_XOR_EQ
:
5895 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5896 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5898 case OPC2_32_RR_XOR_GE
:
5899 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5900 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5902 case OPC2_32_RR_XOR_GE_U
:
5903 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5904 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5906 case OPC2_32_RR_XOR_LT
:
5907 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5908 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5910 case OPC2_32_RR_XOR_LT_U
:
5911 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5912 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5914 case OPC2_32_RR_XOR_NE
:
5915 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5916 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5919 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5923 static void decode_rr_logical_shift(DisasContext
*ctx
)
5928 r3
= MASK_OP_RR_D(ctx
->opcode
);
5929 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5930 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5931 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5934 case OPC2_32_RR_AND
:
5935 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5937 case OPC2_32_RR_ANDN
:
5938 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5940 case OPC2_32_RR_CLO
:
5941 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5942 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5944 case OPC2_32_RR_CLO_H
:
5945 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5947 case OPC2_32_RR_CLS
:
5948 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5950 case OPC2_32_RR_CLS_H
:
5951 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5953 case OPC2_32_RR_CLZ
:
5954 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5956 case OPC2_32_RR_CLZ_H
:
5957 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5959 case OPC2_32_RR_NAND
:
5960 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5962 case OPC2_32_RR_NOR
:
5963 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5966 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5968 case OPC2_32_RR_ORN
:
5969 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5972 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5974 case OPC2_32_RR_SH_H
:
5975 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5977 case OPC2_32_RR_SHA
:
5978 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5980 case OPC2_32_RR_SHA_H
:
5981 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5983 case OPC2_32_RR_SHAS
:
5984 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5986 case OPC2_32_RR_XNOR
:
5987 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5989 case OPC2_32_RR_XOR
:
5990 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5993 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5997 static void decode_rr_address(DisasContext
*ctx
)
6003 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6004 r3
= MASK_OP_RR_D(ctx
->opcode
);
6005 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6006 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6007 n
= MASK_OP_RR_N(ctx
->opcode
);
6010 case OPC2_32_RR_ADD_A
:
6011 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6013 case OPC2_32_RR_ADDSC_A
:
6014 temp
= tcg_temp_new();
6015 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6016 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6018 case OPC2_32_RR_ADDSC_AT
:
6019 temp
= tcg_temp_new();
6020 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6021 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6022 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6024 case OPC2_32_RR_EQ_A
:
6025 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6028 case OPC2_32_RR_EQZ
:
6029 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6031 case OPC2_32_RR_GE_A
:
6032 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6035 case OPC2_32_RR_LT_A
:
6036 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6039 case OPC2_32_RR_MOV_A
:
6040 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6042 case OPC2_32_RR_MOV_AA
:
6043 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6045 case OPC2_32_RR_MOV_D
:
6046 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6048 case OPC2_32_RR_NE_A
:
6049 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6052 case OPC2_32_RR_NEZ_A
:
6053 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6055 case OPC2_32_RR_SUB_A
:
6056 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6059 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6063 static void decode_rr_idirect(DisasContext
*ctx
)
6068 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6069 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6073 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6075 case OPC2_32_RR_JLI
:
6076 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6077 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6079 case OPC2_32_RR_CALLI
:
6080 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6081 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6083 case OPC2_32_RR_FCALLI
:
6084 gen_fcall_save_ctx(ctx
);
6085 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6088 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6091 ctx
->base
.is_jmp
= DISAS_JUMP
;
6094 static void decode_rr_divide(DisasContext
*ctx
)
6099 TCGv temp
, temp2
, temp3
;
6101 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6102 r3
= MASK_OP_RR_D(ctx
->opcode
);
6103 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6104 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6107 case OPC2_32_RR_BMERGE
:
6108 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6110 case OPC2_32_RR_BSPLIT
:
6112 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6114 case OPC2_32_RR_DVINIT_B
:
6116 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6119 case OPC2_32_RR_DVINIT_BU
:
6120 temp
= tcg_temp_new();
6121 temp2
= tcg_temp_new();
6122 temp3
= tcg_temp_new();
6124 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6126 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6127 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6128 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6129 tcg_gen_abs_tl(temp
, temp3
);
6130 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6131 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6133 /* overflow = (D[b] == 0) */
6134 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6136 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6138 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6140 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6141 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6143 case OPC2_32_RR_DVINIT_H
:
6145 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6148 case OPC2_32_RR_DVINIT_HU
:
6149 temp
= tcg_temp_new();
6150 temp2
= tcg_temp_new();
6151 temp3
= tcg_temp_new();
6153 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6155 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6156 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6157 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6158 tcg_gen_abs_tl(temp
, temp3
);
6159 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6160 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6162 /* overflow = (D[b] == 0) */
6163 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6165 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6167 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6169 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6170 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6172 case OPC2_32_RR_DVINIT
:
6173 temp
= tcg_temp_new();
6174 temp2
= tcg_temp_new();
6176 /* overflow = ((D[b] == 0) ||
6177 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6178 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6179 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6180 tcg_gen_and_tl(temp
, temp
, temp2
);
6181 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6182 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6183 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6185 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6187 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6189 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6190 /* sign extend to high reg */
6191 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6193 case OPC2_32_RR_DVINIT_U
:
6195 /* overflow = (D[b] == 0) */
6196 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6197 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6199 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6201 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6203 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6204 /* zero extend to high reg*/
6205 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6207 case OPC2_32_RR_PARITY
:
6208 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6210 case OPC2_32_RR_UNPACK
:
6212 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6214 case OPC2_32_RR_CRC32_B
:
6215 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6216 gen_helper_crc32b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6218 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6221 case OPC2_32_RR_CRC32
: /* CRC32B.W in 1.6.2 */
6222 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6223 gen_helper_crc32_be(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6225 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6228 case OPC2_32_RR_CRC32L_W
:
6229 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6230 gen_helper_crc32_le(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6232 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6236 case OPC2_32_RR_POPCNT_W
:
6237 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6238 tcg_gen_ctpop_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6240 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6243 case OPC2_32_RR_DIV
:
6244 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6246 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6249 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6252 case OPC2_32_RR_DIV_U
:
6253 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6255 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6256 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6258 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6261 case OPC2_32_RR_MUL_F
:
6262 gen_helper_fmul(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6264 case OPC2_32_RR_DIV_F
:
6265 gen_helper_fdiv(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6267 case OPC2_32_RR_CMP_F
:
6268 gen_helper_fcmp(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6270 case OPC2_32_RR_FTOI
:
6271 gen_helper_ftoi(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6273 case OPC2_32_RR_ITOF
:
6274 gen_helper_itof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6276 case OPC2_32_RR_FTOUZ
:
6277 gen_helper_ftouz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6279 case OPC2_32_RR_UPDFL
:
6280 gen_helper_updfl(cpu_env
, cpu_gpr_d
[r1
]);
6282 case OPC2_32_RR_UTOF
:
6283 gen_helper_utof(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6285 case OPC2_32_RR_FTOIZ
:
6286 gen_helper_ftoiz(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6288 case OPC2_32_RR_QSEED_F
:
6289 gen_helper_qseed(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
]);
6292 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6297 static void decode_rr1_mul(DisasContext
*ctx
)
6305 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6306 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6307 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6308 n
= tcg_constant_i32(MASK_OP_RR1_N(ctx
->opcode
));
6309 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6312 case OPC2_32_RR1_MUL_H_32_LL
:
6313 temp64
= tcg_temp_new_i64();
6315 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6316 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6317 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6319 case OPC2_32_RR1_MUL_H_32_LU
:
6320 temp64
= tcg_temp_new_i64();
6322 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6323 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6324 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6326 case OPC2_32_RR1_MUL_H_32_UL
:
6327 temp64
= tcg_temp_new_i64();
6329 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6330 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6331 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6333 case OPC2_32_RR1_MUL_H_32_UU
:
6334 temp64
= tcg_temp_new_i64();
6336 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6337 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6338 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6340 case OPC2_32_RR1_MULM_H_64_LL
:
6341 temp64
= tcg_temp_new_i64();
6343 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6344 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6346 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6348 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6350 case OPC2_32_RR1_MULM_H_64_LU
:
6351 temp64
= tcg_temp_new_i64();
6353 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6354 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6356 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6358 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6360 case OPC2_32_RR1_MULM_H_64_UL
:
6361 temp64
= tcg_temp_new_i64();
6363 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6364 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6366 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6368 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6370 case OPC2_32_RR1_MULM_H_64_UU
:
6371 temp64
= tcg_temp_new_i64();
6373 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6374 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6376 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6378 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6380 case OPC2_32_RR1_MULR_H_16_LL
:
6381 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6382 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6384 case OPC2_32_RR1_MULR_H_16_LU
:
6385 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6386 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6388 case OPC2_32_RR1_MULR_H_16_UL
:
6389 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6390 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6392 case OPC2_32_RR1_MULR_H_16_UU
:
6393 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6394 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6397 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6401 static void decode_rr1_mulq(DisasContext
*ctx
)
6409 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6410 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6411 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6412 n
= MASK_OP_RR1_N(ctx
->opcode
);
6413 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6415 temp
= tcg_temp_new();
6416 temp2
= tcg_temp_new();
6419 case OPC2_32_RR1_MUL_Q_32
:
6420 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6422 case OPC2_32_RR1_MUL_Q_64
:
6424 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6427 case OPC2_32_RR1_MUL_Q_32_L
:
6428 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6429 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6431 case OPC2_32_RR1_MUL_Q_64_L
:
6433 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6434 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6436 case OPC2_32_RR1_MUL_Q_32_U
:
6437 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6438 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6440 case OPC2_32_RR1_MUL_Q_64_U
:
6442 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6443 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6445 case OPC2_32_RR1_MUL_Q_32_LL
:
6446 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6447 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6448 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6450 case OPC2_32_RR1_MUL_Q_32_UU
:
6451 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6452 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6453 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6455 case OPC2_32_RR1_MULR_Q_32_L
:
6456 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6457 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6458 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6460 case OPC2_32_RR1_MULR_Q_32_U
:
6461 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6462 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6463 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6466 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6471 static void decode_rr2_mul(DisasContext
*ctx
)
6476 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6477 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6478 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6479 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6481 case OPC2_32_RR2_MUL_32
:
6482 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6484 case OPC2_32_RR2_MUL_64
:
6486 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6489 case OPC2_32_RR2_MULS_32
:
6490 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6493 case OPC2_32_RR2_MUL_U_64
:
6495 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6498 case OPC2_32_RR2_MULS_U_32
:
6499 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6503 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6508 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6515 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6516 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6517 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6518 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6519 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6520 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6523 case OPC2_32_RRPW_EXTR
:
6525 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6529 if (pos
+ width
<= 32) {
6530 /* optimize special cases */
6531 if ((pos
== 0) && (width
== 8)) {
6532 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6533 } else if ((pos
== 0) && (width
== 16)) {
6534 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6536 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6537 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6541 case OPC2_32_RRPW_EXTR_U
:
6543 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6545 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6546 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6549 case OPC2_32_RRPW_IMASK
:
6552 if (pos
+ width
<= 32) {
6553 temp
= tcg_temp_new();
6554 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6555 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6556 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6560 case OPC2_32_RRPW_INSERT
:
6561 if (pos
+ width
<= 32) {
6562 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6567 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6572 static void decode_rrr_cond_select(DisasContext
*ctx
)
6578 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6579 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6580 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6581 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6582 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6585 case OPC2_32_RRR_CADD
:
6586 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6587 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6589 case OPC2_32_RRR_CADDN
:
6590 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6593 case OPC2_32_RRR_CSUB
:
6594 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6597 case OPC2_32_RRR_CSUBN
:
6598 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6601 case OPC2_32_RRR_SEL
:
6602 temp
= tcg_constant_i32(0);
6603 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6604 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6606 case OPC2_32_RRR_SELN
:
6607 temp
= tcg_constant_i32(0);
6608 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6609 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6612 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6616 static void decode_rrr_divide(DisasContext
*ctx
)
6622 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6623 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6624 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6625 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6626 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6629 case OPC2_32_RRR_DVADJ
:
6632 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6633 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6635 case OPC2_32_RRR_DVSTEP
:
6638 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6639 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6641 case OPC2_32_RRR_DVSTEP_U
:
6644 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6645 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6647 case OPC2_32_RRR_IXMAX
:
6650 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6651 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6653 case OPC2_32_RRR_IXMAX_U
:
6656 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6657 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6659 case OPC2_32_RRR_IXMIN
:
6662 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6663 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6665 case OPC2_32_RRR_IXMIN_U
:
6668 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6669 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6671 case OPC2_32_RRR_PACK
:
6673 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6674 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6676 case OPC2_32_RRR_ADD_F
:
6677 gen_helper_fadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6679 case OPC2_32_RRR_SUB_F
:
6680 gen_helper_fsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6682 case OPC2_32_RRR_MADD_F
:
6683 gen_helper_fmadd(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6684 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6686 case OPC2_32_RRR_MSUB_F
:
6687 gen_helper_fmsub(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6688 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6691 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6696 static void decode_rrr2_madd(DisasContext
*ctx
)
6699 uint32_t r1
, r2
, r3
, r4
;
6701 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6702 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6703 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6704 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6705 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6707 case OPC2_32_RRR2_MADD_32
:
6708 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6711 case OPC2_32_RRR2_MADD_64
:
6714 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6715 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6717 case OPC2_32_RRR2_MADDS_32
:
6718 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6719 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6721 case OPC2_32_RRR2_MADDS_64
:
6724 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6725 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6727 case OPC2_32_RRR2_MADD_U_64
:
6730 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6731 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6733 case OPC2_32_RRR2_MADDS_U_32
:
6734 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6735 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6737 case OPC2_32_RRR2_MADDS_U_64
:
6740 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6741 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6744 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6748 static void decode_rrr2_msub(DisasContext
*ctx
)
6751 uint32_t r1
, r2
, r3
, r4
;
6753 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6754 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6755 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6756 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6757 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6760 case OPC2_32_RRR2_MSUB_32
:
6761 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6764 case OPC2_32_RRR2_MSUB_64
:
6767 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6768 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6770 case OPC2_32_RRR2_MSUBS_32
:
6771 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6772 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6774 case OPC2_32_RRR2_MSUBS_64
:
6777 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6778 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6780 case OPC2_32_RRR2_MSUB_U_64
:
6783 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6784 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6786 case OPC2_32_RRR2_MSUBS_U_32
:
6787 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6788 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6790 case OPC2_32_RRR2_MSUBS_U_64
:
6793 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6794 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6797 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6802 static void decode_rrr1_madd(DisasContext
*ctx
)
6805 uint32_t r1
, r2
, r3
, r4
, n
;
6807 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6808 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6809 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6810 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6811 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6812 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6815 case OPC2_32_RRR1_MADD_H_LL
:
6818 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6819 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6821 case OPC2_32_RRR1_MADD_H_LU
:
6824 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6825 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6827 case OPC2_32_RRR1_MADD_H_UL
:
6830 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6831 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6833 case OPC2_32_RRR1_MADD_H_UU
:
6836 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6837 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6839 case OPC2_32_RRR1_MADDS_H_LL
:
6842 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6843 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6845 case OPC2_32_RRR1_MADDS_H_LU
:
6848 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6849 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6851 case OPC2_32_RRR1_MADDS_H_UL
:
6854 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6855 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6857 case OPC2_32_RRR1_MADDS_H_UU
:
6860 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6861 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6863 case OPC2_32_RRR1_MADDM_H_LL
:
6866 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6867 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6869 case OPC2_32_RRR1_MADDM_H_LU
:
6872 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6873 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6875 case OPC2_32_RRR1_MADDM_H_UL
:
6878 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6879 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6881 case OPC2_32_RRR1_MADDM_H_UU
:
6884 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6885 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6887 case OPC2_32_RRR1_MADDMS_H_LL
:
6890 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6891 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6893 case OPC2_32_RRR1_MADDMS_H_LU
:
6896 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6897 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6899 case OPC2_32_RRR1_MADDMS_H_UL
:
6902 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6903 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6905 case OPC2_32_RRR1_MADDMS_H_UU
:
6908 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6909 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6911 case OPC2_32_RRR1_MADDR_H_LL
:
6912 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6913 cpu_gpr_d
[r2
], n
, MODE_LL
);
6915 case OPC2_32_RRR1_MADDR_H_LU
:
6916 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6917 cpu_gpr_d
[r2
], n
, MODE_LU
);
6919 case OPC2_32_RRR1_MADDR_H_UL
:
6920 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6921 cpu_gpr_d
[r2
], n
, MODE_UL
);
6923 case OPC2_32_RRR1_MADDR_H_UU
:
6924 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6925 cpu_gpr_d
[r2
], n
, MODE_UU
);
6927 case OPC2_32_RRR1_MADDRS_H_LL
:
6928 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6929 cpu_gpr_d
[r2
], n
, MODE_LL
);
6931 case OPC2_32_RRR1_MADDRS_H_LU
:
6932 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6933 cpu_gpr_d
[r2
], n
, MODE_LU
);
6935 case OPC2_32_RRR1_MADDRS_H_UL
:
6936 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6937 cpu_gpr_d
[r2
], n
, MODE_UL
);
6939 case OPC2_32_RRR1_MADDRS_H_UU
:
6940 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6941 cpu_gpr_d
[r2
], n
, MODE_UU
);
6944 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6948 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6951 uint32_t r1
, r2
, r3
, r4
, n
;
6954 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6955 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6956 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6957 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6958 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6959 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6961 temp
= tcg_temp_new();
6962 temp2
= tcg_temp_new();
6965 case OPC2_32_RRR1_MADD_Q_32
:
6966 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6967 cpu_gpr_d
[r2
], n
, 32);
6969 case OPC2_32_RRR1_MADD_Q_64
:
6972 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6973 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6976 case OPC2_32_RRR1_MADD_Q_32_L
:
6977 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6978 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6981 case OPC2_32_RRR1_MADD_Q_64_L
:
6984 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6985 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6986 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6989 case OPC2_32_RRR1_MADD_Q_32_U
:
6990 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6991 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6994 case OPC2_32_RRR1_MADD_Q_64_U
:
6997 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6998 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6999 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7002 case OPC2_32_RRR1_MADD_Q_32_LL
:
7003 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7004 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7005 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7007 case OPC2_32_RRR1_MADD_Q_64_LL
:
7010 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7011 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7012 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7013 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7015 case OPC2_32_RRR1_MADD_Q_32_UU
:
7016 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7017 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7018 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7020 case OPC2_32_RRR1_MADD_Q_64_UU
:
7023 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7024 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7025 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7026 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7028 case OPC2_32_RRR1_MADDS_Q_32
:
7029 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7030 cpu_gpr_d
[r2
], n
, 32);
7032 case OPC2_32_RRR1_MADDS_Q_64
:
7035 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7036 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7039 case OPC2_32_RRR1_MADDS_Q_32_L
:
7040 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7041 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7044 case OPC2_32_RRR1_MADDS_Q_64_L
:
7047 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7048 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7049 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7052 case OPC2_32_RRR1_MADDS_Q_32_U
:
7053 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7054 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7057 case OPC2_32_RRR1_MADDS_Q_64_U
:
7060 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7061 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7062 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7065 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7066 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7067 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7068 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7070 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7073 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7074 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7075 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7076 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7078 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7079 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7080 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7081 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7083 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7086 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7087 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7088 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7089 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7091 case OPC2_32_RRR1_MADDR_H_64_UL
:
7093 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7094 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7096 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7098 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7099 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7101 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7102 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7103 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7104 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7106 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7107 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7108 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7109 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7111 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7112 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7113 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7114 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7116 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7117 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7118 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7119 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7122 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7126 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7129 uint32_t r1
, r2
, r3
, r4
, n
;
7131 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7132 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7133 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7134 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7135 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7136 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7139 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7142 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7143 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7145 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7148 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7149 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7151 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7154 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7155 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7157 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7160 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7161 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7163 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7166 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7167 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7170 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7173 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7174 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7177 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7180 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7181 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7184 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7187 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7188 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7191 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7194 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7195 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7198 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7201 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7202 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7205 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7208 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7209 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7212 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7215 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7216 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7219 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7222 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7223 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7226 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7229 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7230 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7233 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7236 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7237 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7240 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7243 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7244 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7247 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7248 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7249 cpu_gpr_d
[r2
], n
, MODE_LL
);
7251 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7252 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7253 cpu_gpr_d
[r2
], n
, MODE_LU
);
7255 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7256 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7257 cpu_gpr_d
[r2
], n
, MODE_UL
);
7259 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7260 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7261 cpu_gpr_d
[r2
], n
, MODE_UU
);
7263 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7264 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7265 cpu_gpr_d
[r2
], n
, MODE_LL
);
7267 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7268 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7269 cpu_gpr_d
[r2
], n
, MODE_LU
);
7271 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7272 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7273 cpu_gpr_d
[r2
], n
, MODE_UL
);
7275 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7276 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7277 cpu_gpr_d
[r2
], n
, MODE_UU
);
7280 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7284 static void decode_rrr1_msub(DisasContext
*ctx
)
7287 uint32_t r1
, r2
, r3
, r4
, n
;
7289 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7290 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7291 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7292 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7293 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7294 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7297 case OPC2_32_RRR1_MSUB_H_LL
:
7300 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7301 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7303 case OPC2_32_RRR1_MSUB_H_LU
:
7306 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7307 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7309 case OPC2_32_RRR1_MSUB_H_UL
:
7312 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7313 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7315 case OPC2_32_RRR1_MSUB_H_UU
:
7318 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7319 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7321 case OPC2_32_RRR1_MSUBS_H_LL
:
7324 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7325 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7327 case OPC2_32_RRR1_MSUBS_H_LU
:
7330 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7331 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7333 case OPC2_32_RRR1_MSUBS_H_UL
:
7336 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7337 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7339 case OPC2_32_RRR1_MSUBS_H_UU
:
7342 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7343 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7345 case OPC2_32_RRR1_MSUBM_H_LL
:
7348 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7349 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7351 case OPC2_32_RRR1_MSUBM_H_LU
:
7354 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7355 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7357 case OPC2_32_RRR1_MSUBM_H_UL
:
7360 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7361 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7363 case OPC2_32_RRR1_MSUBM_H_UU
:
7366 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7367 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7369 case OPC2_32_RRR1_MSUBMS_H_LL
:
7372 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7373 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7375 case OPC2_32_RRR1_MSUBMS_H_LU
:
7378 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7379 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7381 case OPC2_32_RRR1_MSUBMS_H_UL
:
7384 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7385 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7387 case OPC2_32_RRR1_MSUBMS_H_UU
:
7390 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7391 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7393 case OPC2_32_RRR1_MSUBR_H_LL
:
7394 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7395 cpu_gpr_d
[r2
], n
, MODE_LL
);
7397 case OPC2_32_RRR1_MSUBR_H_LU
:
7398 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7399 cpu_gpr_d
[r2
], n
, MODE_LU
);
7401 case OPC2_32_RRR1_MSUBR_H_UL
:
7402 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7403 cpu_gpr_d
[r2
], n
, MODE_UL
);
7405 case OPC2_32_RRR1_MSUBR_H_UU
:
7406 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7407 cpu_gpr_d
[r2
], n
, MODE_UU
);
7409 case OPC2_32_RRR1_MSUBRS_H_LL
:
7410 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7411 cpu_gpr_d
[r2
], n
, MODE_LL
);
7413 case OPC2_32_RRR1_MSUBRS_H_LU
:
7414 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7415 cpu_gpr_d
[r2
], n
, MODE_LU
);
7417 case OPC2_32_RRR1_MSUBRS_H_UL
:
7418 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7419 cpu_gpr_d
[r2
], n
, MODE_UL
);
7421 case OPC2_32_RRR1_MSUBRS_H_UU
:
7422 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7423 cpu_gpr_d
[r2
], n
, MODE_UU
);
7426 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7430 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7433 uint32_t r1
, r2
, r3
, r4
, n
;
7436 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7437 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7438 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7439 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7440 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7441 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7443 temp
= tcg_temp_new();
7444 temp2
= tcg_temp_new();
7447 case OPC2_32_RRR1_MSUB_Q_32
:
7448 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7449 cpu_gpr_d
[r2
], n
, 32);
7451 case OPC2_32_RRR1_MSUB_Q_64
:
7454 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7455 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7458 case OPC2_32_RRR1_MSUB_Q_32_L
:
7459 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7460 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7463 case OPC2_32_RRR1_MSUB_Q_64_L
:
7466 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7467 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7468 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7471 case OPC2_32_RRR1_MSUB_Q_32_U
:
7472 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7473 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7476 case OPC2_32_RRR1_MSUB_Q_64_U
:
7479 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7480 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7481 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7484 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7485 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7486 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7487 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7489 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7492 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7493 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7494 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7495 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7497 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7498 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7499 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7500 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7502 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7505 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7506 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7507 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7508 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7510 case OPC2_32_RRR1_MSUBS_Q_32
:
7511 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7512 cpu_gpr_d
[r2
], n
, 32);
7514 case OPC2_32_RRR1_MSUBS_Q_64
:
7517 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7518 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7521 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7522 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7523 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7526 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7529 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7530 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7531 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7534 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7535 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7536 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7539 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7542 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7543 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7544 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7547 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7548 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7549 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7550 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7552 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7555 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7556 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7557 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7558 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7560 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7561 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7562 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7563 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7565 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7568 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7569 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7570 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7571 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7573 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7575 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7576 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7578 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7580 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7581 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7583 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7584 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7585 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7586 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7588 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7589 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7590 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7591 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7593 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7594 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7595 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7596 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7598 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7599 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7600 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7601 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7604 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7608 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7611 uint32_t r1
, r2
, r3
, r4
, n
;
7613 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7614 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7615 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7616 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7617 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7618 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7621 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7624 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7625 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7627 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7630 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7631 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7633 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7636 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7637 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7639 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7642 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7643 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7645 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7648 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7649 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7652 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7655 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7656 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7659 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7662 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7663 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7666 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7669 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7670 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7673 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7676 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7677 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7680 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7683 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7684 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7687 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7690 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7691 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7694 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7697 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7698 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7701 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7704 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7705 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7708 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7711 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7712 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7715 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7718 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7719 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7722 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7725 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7726 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7729 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7730 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7731 cpu_gpr_d
[r2
], n
, MODE_LL
);
7733 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7734 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7735 cpu_gpr_d
[r2
], n
, MODE_LU
);
7737 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7738 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7739 cpu_gpr_d
[r2
], n
, MODE_UL
);
7741 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7742 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7743 cpu_gpr_d
[r2
], n
, MODE_UU
);
7745 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7746 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7747 cpu_gpr_d
[r2
], n
, MODE_LL
);
7749 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7750 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7751 cpu_gpr_d
[r2
], n
, MODE_LU
);
7753 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7754 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7755 cpu_gpr_d
[r2
], n
, MODE_UL
);
7757 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7758 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7759 cpu_gpr_d
[r2
], n
, MODE_UU
);
7762 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7767 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7771 TCGv tmp_width
, tmp_pos
;
7773 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7774 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7775 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7776 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7777 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7779 tmp_pos
= tcg_temp_new();
7780 tmp_width
= tcg_temp_new();
7783 case OPC2_32_RRRR_DEXTR
:
7784 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7786 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7788 TCGv msw
= tcg_temp_new();
7789 TCGv zero
= tcg_constant_tl(0);
7790 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7791 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7792 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7794 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7795 * behaviour. So check that case here and set the low bits to zero
7796 * which effectivly returns cpu_gpr_d[r1]
7798 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7799 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7802 case OPC2_32_RRRR_EXTR
:
7803 case OPC2_32_RRRR_EXTR_U
:
7805 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7806 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7807 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7808 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7809 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7810 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7811 if (op2
== OPC2_32_RRRR_EXTR
) {
7812 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7814 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7817 case OPC2_32_RRRR_INSERT
:
7819 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7820 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7821 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7825 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7830 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7838 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7839 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7840 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7841 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7842 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7843 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7845 temp
= tcg_temp_new();
7848 case OPC2_32_RRRW_EXTR
:
7849 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7850 tcg_gen_addi_tl(temp
, temp
, width
);
7851 tcg_gen_subfi_tl(temp
, 32, temp
);
7852 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7853 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7855 case OPC2_32_RRRW_EXTR_U
:
7857 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7859 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7860 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7861 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7864 case OPC2_32_RRRW_IMASK
:
7865 temp2
= tcg_temp_new();
7867 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7868 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7869 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7870 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7871 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7873 case OPC2_32_RRRW_INSERT
:
7874 temp2
= tcg_temp_new();
7876 tcg_gen_movi_tl(temp
, width
);
7877 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7878 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7881 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7886 static void decode_sys_interrupts(DisasContext
*ctx
)
7893 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7894 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7897 case OPC2_32_SYS_DEBUG
:
7898 /* raise EXCP_DEBUG */
7900 case OPC2_32_SYS_DISABLE
:
7901 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7902 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7904 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7907 case OPC2_32_SYS_DISABLE_D
:
7908 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7909 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7910 tcg_gen_extract_tl(cpu_gpr_d
[r1
], cpu_ICR
,
7911 ctx
->icr_ie_offset
, 1);
7912 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7914 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7917 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7919 case OPC2_32_SYS_DSYNC
:
7921 case OPC2_32_SYS_ENABLE
:
7922 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7923 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, ctx
->icr_ie_mask
);
7924 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
7926 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7929 case OPC2_32_SYS_ISYNC
:
7931 case OPC2_32_SYS_NOP
:
7933 case OPC2_32_SYS_RET
:
7934 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7936 case OPC2_32_SYS_FRET
:
7939 case OPC2_32_SYS_RFE
:
7940 gen_helper_rfe(cpu_env
);
7941 ctx
->base
.is_jmp
= DISAS_EXIT
;
7943 case OPC2_32_SYS_RFM
:
7944 if (ctx
->priv
== TRICORE_PRIV_SM
) {
7945 tmp
= tcg_temp_new();
7946 l1
= gen_new_label();
7948 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7949 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7950 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7951 gen_helper_rfm(cpu_env
);
7953 ctx
->base
.is_jmp
= DISAS_EXIT
;
7955 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7958 case OPC2_32_SYS_RSLCX
:
7959 gen_helper_rslcx(cpu_env
);
7961 case OPC2_32_SYS_SVLCX
:
7962 gen_helper_svlcx(cpu_env
);
7964 case OPC2_32_SYS_RESTORE
:
7965 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7966 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7967 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
],
7968 ctx
->icr_ie_offset
, 1);
7969 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
7971 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7974 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7977 case OPC2_32_SYS_TRAPSV
:
7978 l1
= gen_new_label();
7979 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
7980 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
7983 case OPC2_32_SYS_TRAPV
:
7984 l1
= gen_new_label();
7985 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
7986 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
7990 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7994 static void decode_32Bit_opc(DisasContext
*ctx
)
7998 int32_t address
, const16
;
8001 TCGv temp
, temp2
, temp3
;
8003 op1
= MASK_OP_MAJOR(ctx
->opcode
);
8005 /* handle JNZ.T opcode only being 7 bit long */
8006 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
8007 op1
= OPCM_32_BRN_JTT
;
8012 case OPCM_32_ABS_LDW
:
8013 decode_abs_ldw(ctx
);
8015 case OPCM_32_ABS_LDB
:
8016 decode_abs_ldb(ctx
);
8018 case OPCM_32_ABS_LDMST_SWAP
:
8019 decode_abs_ldst_swap(ctx
);
8021 case OPCM_32_ABS_LDST_CONTEXT
:
8022 decode_abs_ldst_context(ctx
);
8024 case OPCM_32_ABS_STORE
:
8025 decode_abs_store(ctx
);
8027 case OPCM_32_ABS_STOREB_H
:
8028 decode_abs_storeb_h(ctx
);
8030 case OPC1_32_ABS_STOREQ
:
8031 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8032 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8033 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8034 temp2
= tcg_temp_new();
8036 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
8037 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
8039 case OPC1_32_ABS_LD_Q
:
8040 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8041 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8042 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8044 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
8045 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
8047 case OPCM_32_ABS_LEA_LHA
:
8048 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8049 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8051 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
8052 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
8053 if (op2
== OPC2_32_ABS_LHA
) {
8054 tcg_gen_movi_tl(cpu_gpr_a
[r1
], address
<< 14);
8057 /* otherwise translate regular LEA */
8060 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
8063 case OPC1_32_ABSB_ST_T
:
8064 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8065 b
= MASK_OP_ABSB_B(ctx
->opcode
);
8066 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
8068 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8069 temp2
= tcg_temp_new();
8071 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8072 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
8073 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
8074 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8077 case OPC1_32_B_CALL
:
8078 case OPC1_32_B_CALLA
:
8079 case OPC1_32_B_FCALL
:
8080 case OPC1_32_B_FCALLA
:
8085 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8086 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8089 case OPCM_32_BIT_ANDACC
:
8090 decode_bit_andacc(ctx
);
8092 case OPCM_32_BIT_LOGICAL_T1
:
8093 decode_bit_logical_t(ctx
);
8095 case OPCM_32_BIT_INSERT
:
8096 decode_bit_insert(ctx
);
8098 case OPCM_32_BIT_LOGICAL_T2
:
8099 decode_bit_logical_t2(ctx
);
8101 case OPCM_32_BIT_ORAND
:
8102 decode_bit_orand(ctx
);
8104 case OPCM_32_BIT_SH_LOGIC1
:
8105 decode_bit_sh_logic1(ctx
);
8107 case OPCM_32_BIT_SH_LOGIC2
:
8108 decode_bit_sh_logic2(ctx
);
8111 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8112 decode_bo_addrmode_post_pre_base(ctx
);
8114 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8115 decode_bo_addrmode_bitreverse_circular(ctx
);
8117 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8118 decode_bo_addrmode_ld_post_pre_base(ctx
);
8120 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8121 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8123 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8124 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8126 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8127 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8130 case OPC1_32_BOL_LD_A_LONGOFF
:
8131 case OPC1_32_BOL_LD_W_LONGOFF
:
8132 case OPC1_32_BOL_LEA_LONGOFF
:
8133 case OPC1_32_BOL_ST_W_LONGOFF
:
8134 case OPC1_32_BOL_ST_A_LONGOFF
:
8135 case OPC1_32_BOL_LD_B_LONGOFF
:
8136 case OPC1_32_BOL_LD_BU_LONGOFF
:
8137 case OPC1_32_BOL_LD_H_LONGOFF
:
8138 case OPC1_32_BOL_LD_HU_LONGOFF
:
8139 case OPC1_32_BOL_ST_B_LONGOFF
:
8140 case OPC1_32_BOL_ST_H_LONGOFF
:
8141 decode_bol_opc(ctx
, op1
);
8144 case OPCM_32_BRC_EQ_NEQ
:
8145 case OPCM_32_BRC_GE
:
8146 case OPCM_32_BRC_JLT
:
8147 case OPCM_32_BRC_JNE
:
8148 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8149 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8150 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8151 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8154 case OPCM_32_BRN_JTT
:
8155 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8156 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8157 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8160 case OPCM_32_BRR_EQ_NEQ
:
8161 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8162 case OPCM_32_BRR_GE
:
8163 case OPCM_32_BRR_JLT
:
8164 case OPCM_32_BRR_JNE
:
8165 case OPCM_32_BRR_JNZ
:
8166 case OPCM_32_BRR_LOOP
:
8167 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8168 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8169 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8170 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8173 case OPCM_32_RC_LOGICAL_SHIFT
:
8174 decode_rc_logical_shift(ctx
);
8176 case OPCM_32_RC_ACCUMULATOR
:
8177 decode_rc_accumulator(ctx
);
8179 case OPCM_32_RC_SERVICEROUTINE
:
8180 decode_rc_serviceroutine(ctx
);
8182 case OPCM_32_RC_MUL
:
8186 case OPCM_32_RCPW_MASK_INSERT
:
8187 decode_rcpw_insert(ctx
);
8190 case OPC1_32_RCRR_INSERT
:
8191 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8192 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8193 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8194 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8195 temp
= tcg_constant_i32(const16
);
8196 temp2
= tcg_temp_new(); /* width*/
8197 temp3
= tcg_temp_new(); /* pos */
8201 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8202 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8204 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8207 case OPCM_32_RCRW_MASK_INSERT
:
8208 decode_rcrw_insert(ctx
);
8211 case OPCM_32_RCR_COND_SELECT
:
8212 decode_rcr_cond_select(ctx
);
8214 case OPCM_32_RCR_MADD
:
8215 decode_rcr_madd(ctx
);
8217 case OPCM_32_RCR_MSUB
:
8218 decode_rcr_msub(ctx
);
8221 case OPC1_32_RLC_ADDI
:
8222 case OPC1_32_RLC_ADDIH
:
8223 case OPC1_32_RLC_ADDIH_A
:
8224 case OPC1_32_RLC_MFCR
:
8225 case OPC1_32_RLC_MOV
:
8226 case OPC1_32_RLC_MOV_64
:
8227 case OPC1_32_RLC_MOV_U
:
8228 case OPC1_32_RLC_MOV_H
:
8229 case OPC1_32_RLC_MOVH_A
:
8230 case OPC1_32_RLC_MTCR
:
8231 decode_rlc_opc(ctx
, op1
);
8234 case OPCM_32_RR_ACCUMULATOR
:
8235 decode_rr_accumulator(ctx
);
8237 case OPCM_32_RR_LOGICAL_SHIFT
:
8238 decode_rr_logical_shift(ctx
);
8240 case OPCM_32_RR_ADDRESS
:
8241 decode_rr_address(ctx
);
8243 case OPCM_32_RR_IDIRECT
:
8244 decode_rr_idirect(ctx
);
8246 case OPCM_32_RR_DIVIDE
:
8247 decode_rr_divide(ctx
);
8250 case OPCM_32_RR1_MUL
:
8251 decode_rr1_mul(ctx
);
8253 case OPCM_32_RR1_MULQ
:
8254 decode_rr1_mulq(ctx
);
8257 case OPCM_32_RR2_MUL
:
8258 decode_rr2_mul(ctx
);
8261 case OPCM_32_RRPW_EXTRACT_INSERT
:
8262 decode_rrpw_extract_insert(ctx
);
8264 case OPC1_32_RRPW_DEXTR
:
8265 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8266 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8267 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8268 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8270 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8274 case OPCM_32_RRR_COND_SELECT
:
8275 decode_rrr_cond_select(ctx
);
8277 case OPCM_32_RRR_DIVIDE
:
8278 decode_rrr_divide(ctx
);
8281 case OPCM_32_RRR2_MADD
:
8282 decode_rrr2_madd(ctx
);
8284 case OPCM_32_RRR2_MSUB
:
8285 decode_rrr2_msub(ctx
);
8288 case OPCM_32_RRR1_MADD
:
8289 decode_rrr1_madd(ctx
);
8291 case OPCM_32_RRR1_MADDQ_H
:
8292 decode_rrr1_maddq_h(ctx
);
8294 case OPCM_32_RRR1_MADDSU_H
:
8295 decode_rrr1_maddsu_h(ctx
);
8297 case OPCM_32_RRR1_MSUB_H
:
8298 decode_rrr1_msub(ctx
);
8300 case OPCM_32_RRR1_MSUB_Q
:
8301 decode_rrr1_msubq_h(ctx
);
8303 case OPCM_32_RRR1_MSUBAD_H
:
8304 decode_rrr1_msubad_h(ctx
);
8307 case OPCM_32_RRRR_EXTRACT_INSERT
:
8308 decode_rrrr_extract_insert(ctx
);
8311 case OPCM_32_RRRW_EXTRACT_INSERT
:
8312 decode_rrrw_extract_insert(ctx
);
8315 case OPCM_32_SYS_INTERRUPTS
:
8316 decode_sys_interrupts(ctx
);
8318 case OPC1_32_SYS_RSTV
:
8319 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8320 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8321 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8322 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8325 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8329 static bool tricore_insn_is_16bit(uint32_t insn
)
8331 return (insn
& 0x1) == 0;
8334 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8337 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8338 CPUTriCoreState
*env
= cs
->env_ptr
;
8339 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8341 uint32_t tb_flags
= (uint32_t)ctx
->base
.tb
->flags
;
8342 ctx
->priv
= FIELD_EX32(tb_flags
, TB_FLAGS
, PRIV
);
8344 ctx
->features
= env
->features
;
8345 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
8346 ctx
->icr_ie_mask
= R_ICR_IE_161_MASK
;
8347 ctx
->icr_ie_offset
= R_ICR_IE_161_SHIFT
;
8349 ctx
->icr_ie_mask
= R_ICR_IE_13_MASK
;
8350 ctx
->icr_ie_offset
= R_ICR_IE_13_SHIFT
;
8354 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8358 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8360 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8362 tcg_gen_insn_start(ctx
->base
.pc_next
);
8365 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8368 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8369 * (False positives are OK, false negatives are not.)
8370 * Our caller ensures we are only called if dc->base.pc_next is less than
8371 * 4 bytes from the page boundary, so we cross the page if the first
8372 * 16 bits indicate that this is a 32 bit insn.
8374 uint16_t insn
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8376 return !tricore_insn_is_16bit(insn
);
8380 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8382 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8383 CPUTriCoreState
*env
= cpu
->env_ptr
;
8387 insn_lo
= cpu_lduw_code(env
, ctx
->base
.pc_next
);
8388 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8390 ctx
->opcode
= insn_lo
;
8391 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8392 decode_16Bit_opc(ctx
);
8394 uint32_t insn_hi
= cpu_lduw_code(env
, ctx
->base
.pc_next
+ 2);
8395 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8396 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8397 decode_32Bit_opc(ctx
);
8399 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8401 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8402 target_ulong page_start
;
8404 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8405 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8406 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8407 && insn_crosses_page(env
, ctx
))) {
8408 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8413 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8415 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8417 switch (ctx
->base
.is_jmp
) {
8418 case DISAS_TOO_MANY
:
8419 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8421 case DISAS_EXIT_UPDATE
:
8422 gen_save_pc(ctx
->base
.pc_next
);
8425 tcg_gen_exit_tb(NULL
, 0);
8428 tcg_gen_lookup_and_goto_ptr();
8430 case DISAS_NORETURN
:
8433 g_assert_not_reached();
8437 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8438 CPUState
*cpu
, FILE *logfile
)
8440 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8441 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8444 static const TranslatorOps tricore_tr_ops
= {
8445 .init_disas_context
= tricore_tr_init_disas_context
,
8446 .tb_start
= tricore_tr_tb_start
,
8447 .insn_start
= tricore_tr_insn_start
,
8448 .translate_insn
= tricore_tr_translate_insn
,
8449 .tb_stop
= tricore_tr_tb_stop
,
8450 .disas_log
= tricore_tr_disas_log
,
8454 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8455 target_ulong pc
, void *host_pc
)
8458 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8459 &tricore_tr_ops
, &ctx
.base
);
8468 void cpu_state_reset(CPUTriCoreState
*env
)
8470 /* Reset Regs to Default Value */
8475 static void tricore_tcg_init_csfr(void)
8477 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8478 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8479 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8480 offsetof(CPUTriCoreState
, PSW
), "PSW");
8481 cpu_PC
= tcg_global_mem_new(cpu_env
,
8482 offsetof(CPUTriCoreState
, PC
), "PC");
8483 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8484 offsetof(CPUTriCoreState
, ICR
), "ICR");
8487 void tricore_tcg_init(void)
8492 for (i
= 0 ; i
< 16 ; i
++) {
8493 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8494 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8497 for (i
= 0 ; i
< 16 ; i
++) {
8498 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8499 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8502 tricore_tcg_init_csfr();
8503 /* init PSW flag cache */
8504 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8505 offsetof(CPUTriCoreState
, PSW_USB_C
),
8507 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8508 offsetof(CPUTriCoreState
, PSW_USB_V
),
8510 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8511 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8513 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8514 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8516 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8517 offsetof(CPUTriCoreState
, PSW_USB_SAV
),