4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit
[CC_OP_MAX
];
76 static uint64_t inline_branch_miss
[CC_OP_MAX
];
79 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
81 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
82 if (s
->tb
->flags
& FLAG_MASK_32
) {
83 return pc
| 0x80000000;
89 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
95 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
96 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
98 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
99 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
102 for (i
= 0; i
< 16; i
++) {
103 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
105 cpu_fprintf(f
, "\n");
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
114 cpu_fprintf(f
, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i
= 0; i
< 16; i
++) {
122 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
124 cpu_fprintf(f
, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i
= 0; i
< CC_OP_MAX
; i
++) {
133 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
134 inline_branch_miss
[i
], inline_branch_hit
[i
]);
138 cpu_fprintf(f
, "\n");
141 static TCGv_i64 psw_addr
;
142 static TCGv_i64 psw_mask
;
144 static TCGv_i32 cc_op
;
145 static TCGv_i64 cc_src
;
146 static TCGv_i64 cc_dst
;
147 static TCGv_i64 cc_vr
;
149 static char cpu_reg_names
[32][4];
150 static TCGv_i64 regs
[16];
151 static TCGv_i64 fregs
[16];
153 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
155 void s390x_translate_init(void)
159 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
160 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
161 offsetof(CPUS390XState
, psw
.addr
),
163 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
164 offsetof(CPUS390XState
, psw
.mask
),
167 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
169 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
171 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
173 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
176 for (i
= 0; i
< 16; i
++) {
177 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
178 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
179 offsetof(CPUS390XState
, regs
[i
]),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
185 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, fregs
[i
].d
),
187 cpu_reg_names
[i
+ 16]);
190 /* register helpers */
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
271 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
274 static int get_mem_index(DisasContext
*s
)
276 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
277 case PSW_ASC_PRIMARY
>> 32:
279 case PSW_ASC_SECONDARY
>> 32:
281 case PSW_ASC_HOME
>> 32:
289 static void gen_exception(int excp
)
291 TCGv_i32 tmp
= tcg_const_i32(excp
);
292 gen_helper_exception(cpu_env
, tmp
);
293 tcg_temp_free_i32(tmp
);
296 static void gen_program_exception(DisasContext
*s
, int code
)
300 /* Remember what pgm exeption this was. */
301 tmp
= tcg_const_i32(code
);
302 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
303 tcg_temp_free_i32(tmp
);
305 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
306 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
307 tcg_temp_free_i32(tmp
);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM
);
320 static inline void gen_illegal_opcode(DisasContext
*s
)
322 gen_program_exception(s
, PGM_SPECIFICATION
);
325 static inline void check_privileged(DisasContext
*s
)
327 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
328 gen_program_exception(s
, PGM_PRIVILEGED
);
332 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
343 tmp
= tcg_const_i64(d2
);
344 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
349 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
353 tmp
= tcg_const_i64(d2
);
354 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
359 tmp
= tcg_const_i64(d2
);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
364 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
370 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
372 s
->cc_op
= CC_OP_CONST0
+ val
;
375 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
377 tcg_gen_discard_i64(cc_src
);
378 tcg_gen_mov_i64(cc_dst
, dst
);
379 tcg_gen_discard_i64(cc_vr
);
383 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
386 tcg_gen_mov_i64(cc_src
, src
);
387 tcg_gen_mov_i64(cc_dst
, dst
);
388 tcg_gen_discard_i64(cc_vr
);
392 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
393 TCGv_i64 dst
, TCGv_i64 vr
)
395 tcg_gen_mov_i64(cc_src
, src
);
396 tcg_gen_mov_i64(cc_dst
, dst
);
397 tcg_gen_mov_i64(cc_vr
, vr
);
401 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
403 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
406 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
408 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
411 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
413 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
416 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
418 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
421 /* CC value is in env->cc_op */
422 static void set_cc_static(DisasContext
*s
)
424 tcg_gen_discard_i64(cc_src
);
425 tcg_gen_discard_i64(cc_dst
);
426 tcg_gen_discard_i64(cc_vr
);
427 s
->cc_op
= CC_OP_STATIC
;
430 /* calculates cc into cc_op */
431 static void gen_op_calc_cc(DisasContext
*s
)
433 TCGv_i32 local_cc_op
;
436 TCGV_UNUSED_I32(local_cc_op
);
437 TCGV_UNUSED_I64(dummy
);
440 dummy
= tcg_const_i64(0);
454 local_cc_op
= tcg_const_i32(s
->cc_op
);
470 /* s->cc_op is the cc value */
471 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
474 /* env->cc_op already is the cc value */
489 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
494 case CC_OP_LTUGTU_32
:
495 case CC_OP_LTUGTU_64
:
502 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
517 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
520 /* unknown operation - assume 3 arguments and cc_op in env */
521 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
527 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
528 tcg_temp_free_i32(local_cc_op
);
530 if (!TCGV_IS_UNUSED_I64(dummy
)) {
531 tcg_temp_free_i64(dummy
);
534 /* We now have cc in cc_op as constant */
538 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
540 /* NOTE: we handle the case where the TB spans two pages here */
541 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
542 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
543 && !s
->singlestep_enabled
544 && !(s
->tb
->cflags
& CF_LAST_IO
));
547 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
549 #ifdef DEBUG_INLINE_BRANCHES
550 inline_branch_miss
[cc_op
]++;
554 static void account_inline_branch(DisasContext
*s
, int cc_op
)
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_hit
[cc_op
]++;
561 /* Table of mask values to comparison codes, given a comparison as input.
562 For a true comparison CC=3 will never be set, but we treat this
563 conservatively for possible use when CC=3 indicates overflow. */
564 static const TCGCond ltgt_cond
[16] = {
565 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
566 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
567 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
568 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
569 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
570 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
571 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
572 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
575 /* Table of mask values to comparison codes, given a logic op as input.
576 For such, only CC=0 and CC=1 should be possible. */
577 static const TCGCond nz_cond
[16] = {
579 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
581 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
583 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
584 /* EQ | NE | x | x */
585 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
588 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
589 details required to generate a TCG comparison. */
590 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
593 enum cc_op old_cc_op
= s
->cc_op
;
595 if (mask
== 15 || mask
== 0) {
596 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
599 c
->g1
= c
->g2
= true;
604 /* Find the TCG condition for the mask + cc op. */
610 cond
= ltgt_cond
[mask
];
611 if (cond
== TCG_COND_NEVER
) {
614 account_inline_branch(s
, old_cc_op
);
617 case CC_OP_LTUGTU_32
:
618 case CC_OP_LTUGTU_64
:
619 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
620 if (cond
== TCG_COND_NEVER
) {
623 account_inline_branch(s
, old_cc_op
);
627 cond
= nz_cond
[mask
];
628 if (cond
== TCG_COND_NEVER
) {
631 account_inline_branch(s
, old_cc_op
);
646 account_inline_branch(s
, old_cc_op
);
661 account_inline_branch(s
, old_cc_op
);
665 switch (mask
& 0xa) {
666 case 8: /* src == 0 -> no one bit found */
669 case 2: /* src != 0 -> one bit found */
675 account_inline_branch(s
, old_cc_op
);
680 /* Calculate cc value. */
685 /* Jump based on CC. We'll load up the real cond below;
686 the assignment here merely avoids a compiler warning. */
687 account_noninline_branch(s
, old_cc_op
);
688 old_cc_op
= CC_OP_STATIC
;
689 cond
= TCG_COND_NEVER
;
693 /* Load up the arguments of the comparison. */
695 c
->g1
= c
->g2
= false;
699 c
->u
.s32
.a
= tcg_temp_new_i32();
700 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
701 c
->u
.s32
.b
= tcg_const_i32(0);
704 case CC_OP_LTUGTU_32
:
706 c
->u
.s32
.a
= tcg_temp_new_i32();
707 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
708 c
->u
.s32
.b
= tcg_temp_new_i32();
709 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
716 c
->u
.s64
.b
= tcg_const_i64(0);
720 case CC_OP_LTUGTU_64
:
723 c
->g1
= c
->g2
= true;
729 c
->u
.s64
.a
= tcg_temp_new_i64();
730 c
->u
.s64
.b
= tcg_const_i64(0);
731 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
739 case 0x8 | 0x4 | 0x2: /* cc != 3 */
741 c
->u
.s32
.b
= tcg_const_i32(3);
743 case 0x8 | 0x4 | 0x1: /* cc != 2 */
745 c
->u
.s32
.b
= tcg_const_i32(2);
747 case 0x8 | 0x2 | 0x1: /* cc != 1 */
749 c
->u
.s32
.b
= tcg_const_i32(1);
751 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
754 c
->u
.s32
.a
= tcg_temp_new_i32();
755 c
->u
.s32
.b
= tcg_const_i32(0);
756 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
758 case 0x8 | 0x4: /* cc < 2 */
760 c
->u
.s32
.b
= tcg_const_i32(2);
762 case 0x8: /* cc == 0 */
764 c
->u
.s32
.b
= tcg_const_i32(0);
766 case 0x4 | 0x2 | 0x1: /* cc != 0 */
768 c
->u
.s32
.b
= tcg_const_i32(0);
770 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
773 c
->u
.s32
.a
= tcg_temp_new_i32();
774 c
->u
.s32
.b
= tcg_const_i32(0);
775 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
777 case 0x4: /* cc == 1 */
779 c
->u
.s32
.b
= tcg_const_i32(1);
781 case 0x2 | 0x1: /* cc > 1 */
783 c
->u
.s32
.b
= tcg_const_i32(1);
785 case 0x2: /* cc == 2 */
787 c
->u
.s32
.b
= tcg_const_i32(2);
789 case 0x1: /* cc == 3 */
791 c
->u
.s32
.b
= tcg_const_i32(3);
794 /* CC is masked by something else: (8 >> cc) & mask. */
797 c
->u
.s32
.a
= tcg_const_i32(8);
798 c
->u
.s32
.b
= tcg_const_i32(0);
799 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
800 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
811 static void free_compare(DisasCompare
*c
)
815 tcg_temp_free_i64(c
->u
.s64
.a
);
817 tcg_temp_free_i32(c
->u
.s32
.a
);
822 tcg_temp_free_i64(c
->u
.s64
.b
);
824 tcg_temp_free_i32(c
->u
.s32
.b
);
829 /* ====================================================================== */
830 /* Define the insn format enumeration. */
831 #define F0(N) FMT_##N,
832 #define F1(N, X1) F0(N)
833 #define F2(N, X1, X2) F0(N)
834 #define F3(N, X1, X2, X3) F0(N)
835 #define F4(N, X1, X2, X3, X4) F0(N)
836 #define F5(N, X1, X2, X3, X4, X5) F0(N)
839 #include "insn-format.def"
849 /* Define a structure to hold the decoded fields. We'll store each inside
850 an array indexed by an enum. In order to conserve memory, we'll arrange
851 for fields that do not exist at the same time to overlap, thus the "C"
852 for compact. For checking purposes there is an "O" for original index
853 as well that will be applied to availability bitmaps. */
855 enum DisasFieldIndexO
{
878 enum DisasFieldIndexC
{
912 unsigned presentC
:16;
913 unsigned int presentO
;
917 /* This is the way fields are to be accessed out of DisasFields. */
918 #define have_field(S, F) have_field1((S), FLD_O_##F)
919 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
921 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
923 return (f
->presentO
>> c
) & 1;
926 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
927 enum DisasFieldIndexC c
)
929 assert(have_field1(f
, o
));
933 /* Describe the layout of each field in each format. */
934 typedef struct DisasField
{
938 unsigned int indexC
:6;
939 enum DisasFieldIndexO indexO
:8;
942 typedef struct DisasFormatInfo
{
943 DisasField op
[NUM_C_FIELD
];
946 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
947 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
948 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
949 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
950 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
951 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
952 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
953 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
954 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
955 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
956 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
957 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
958 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
959 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
961 #define F0(N) { { } },
962 #define F1(N, X1) { { X1 } },
963 #define F2(N, X1, X2) { { X1, X2 } },
964 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
965 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
966 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
968 static const DisasFormatInfo format_info
[] = {
969 #include "insn-format.def"
987 /* Generally, we'll extract operands into this structures, operate upon
988 them, and store them back. See the "in1", "in2", "prep", "wout" sets
989 of routines below for more details. */
991 bool g_out
, g_out2
, g_in1
, g_in2
;
992 TCGv_i64 out
, out2
, in1
, in2
;
996 /* Return values from translate_one, indicating the state of the TB. */
998 /* Continue the TB. */
1000 /* We have emitted one or more goto_tb. No fixup required. */
1002 /* We are not using a goto_tb (for whatever reason), but have updated
1003 the PC (for whatever reason), so there's no need to do it again on
1006 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1007 updated the PC for the next instruction to be executed. */
1009 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1010 No following code will be executed. */
1014 typedef enum DisasFacility
{
1015 FAC_Z
, /* zarch (default) */
1016 FAC_CASS
, /* compare and swap and store */
1017 FAC_CASS2
, /* compare and swap and store 2*/
1018 FAC_DFP
, /* decimal floating point */
1019 FAC_DFPR
, /* decimal floating point rounding */
1020 FAC_DO
, /* distinct operands */
1021 FAC_EE
, /* execute extensions */
1022 FAC_EI
, /* extended immediate */
1023 FAC_FPE
, /* floating point extension */
1024 FAC_FPSSH
, /* floating point support sign handling */
1025 FAC_FPRGR
, /* FPR-GR transfer */
1026 FAC_GIE
, /* general instructions extension */
1027 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1028 FAC_HW
, /* high-word */
1029 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1030 FAC_LOC
, /* load/store on condition */
1031 FAC_LD
, /* long displacement */
1032 FAC_PC
, /* population count */
1033 FAC_SCF
, /* store clock fast */
1034 FAC_SFLE
, /* store facility list extended */
1040 DisasFacility fac
:6;
1044 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1045 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1046 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1047 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1048 void (*help_cout
)(DisasContext
*, DisasOps
*);
1049 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1054 /* ====================================================================== */
1055 /* Miscelaneous helpers, used by several operations. */
1057 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1058 DisasOps
*o
, int mask
)
1060 int b2
= get_field(f
, b2
);
1061 int d2
= get_field(f
, d2
);
1064 o
->in2
= tcg_const_i64(d2
& mask
);
1066 o
->in2
= get_address(s
, 0, b2
, d2
);
1067 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1071 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1073 if (dest
== s
->next_pc
) {
1076 if (use_goto_tb(s
, dest
)) {
1079 tcg_gen_movi_i64(psw_addr
, dest
);
1080 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1081 return EXIT_GOTO_TB
;
1083 tcg_gen_movi_i64(psw_addr
, dest
);
1084 return EXIT_PC_UPDATED
;
1088 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1089 bool is_imm
, int imm
, TCGv_i64 cdest
)
1092 uint64_t dest
= s
->pc
+ 2 * imm
;
1095 /* Take care of the special cases first. */
1096 if (c
->cond
== TCG_COND_NEVER
) {
1101 if (dest
== s
->next_pc
) {
1102 /* Branch to next. */
1106 if (c
->cond
== TCG_COND_ALWAYS
) {
1107 ret
= help_goto_direct(s
, dest
);
1111 if (TCGV_IS_UNUSED_I64(cdest
)) {
1112 /* E.g. bcr %r0 -> no branch. */
1116 if (c
->cond
== TCG_COND_ALWAYS
) {
1117 tcg_gen_mov_i64(psw_addr
, cdest
);
1118 ret
= EXIT_PC_UPDATED
;
1123 if (use_goto_tb(s
, s
->next_pc
)) {
1124 if (is_imm
&& use_goto_tb(s
, dest
)) {
1125 /* Both exits can use goto_tb. */
1128 lab
= gen_new_label();
1130 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1132 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1135 /* Branch not taken. */
1137 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1138 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1143 tcg_gen_movi_i64(psw_addr
, dest
);
1144 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1148 /* Fallthru can use goto_tb, but taken branch cannot. */
1149 /* Store taken branch destination before the brcond. This
1150 avoids having to allocate a new local temp to hold it.
1151 We'll overwrite this in the not taken case anyway. */
1153 tcg_gen_mov_i64(psw_addr
, cdest
);
1156 lab
= gen_new_label();
1158 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1160 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1163 /* Branch not taken. */
1166 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1167 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1171 tcg_gen_movi_i64(psw_addr
, dest
);
1173 ret
= EXIT_PC_UPDATED
;
1176 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1177 Most commonly we're single-stepping or some other condition that
1178 disables all use of goto_tb. Just update the PC and exit. */
1180 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1182 cdest
= tcg_const_i64(dest
);
1186 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1189 TCGv_i32 t0
= tcg_temp_new_i32();
1190 TCGv_i64 t1
= tcg_temp_new_i64();
1191 TCGv_i64 z
= tcg_const_i64(0);
1192 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1193 tcg_gen_extu_i32_i64(t1
, t0
);
1194 tcg_temp_free_i32(t0
);
1195 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1196 tcg_temp_free_i64(t1
);
1197 tcg_temp_free_i64(z
);
1201 tcg_temp_free_i64(cdest
);
1203 tcg_temp_free_i64(next
);
1205 ret
= EXIT_PC_UPDATED
;
1213 /* ====================================================================== */
1214 /* The operations. These perform the bulk of the work for any insn,
1215 usually after the operands have been loaded and output initialized. */
1217 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1219 gen_helper_abs_i64(o
->out
, o
->in2
);
1223 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1225 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1229 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1231 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1235 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1237 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1238 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1242 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1244 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1248 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1252 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1254 /* XXX possible optimization point */
1256 cc
= tcg_temp_new_i64();
1257 tcg_gen_extu_i32_i64(cc
, cc_op
);
1258 tcg_gen_shri_i64(cc
, cc
, 1);
1260 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1261 tcg_temp_free_i64(cc
);
1265 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1267 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1271 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1273 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1277 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1279 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1280 return_low128(o
->out2
);
1284 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1286 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1290 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1292 int shift
= s
->insn
->data
& 0xff;
1293 int size
= s
->insn
->data
>> 8;
1294 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1297 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1298 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1299 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1301 /* Produce the CC from only the bits manipulated. */
1302 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1303 set_cc_nz_u64(s
, cc_dst
);
1307 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1309 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1310 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1311 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1312 return EXIT_PC_UPDATED
;
1318 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1320 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1321 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1324 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1326 int m1
= get_field(s
->fields
, m1
);
1327 bool is_imm
= have_field(s
->fields
, i2
);
1328 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1331 disas_jcc(s
, &c
, m1
);
1332 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1335 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1337 int r1
= get_field(s
->fields
, r1
);
1338 bool is_imm
= have_field(s
->fields
, i2
);
1339 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1343 c
.cond
= TCG_COND_NE
;
1348 t
= tcg_temp_new_i64();
1349 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1350 store_reg32_i64(r1
, t
);
1351 c
.u
.s32
.a
= tcg_temp_new_i32();
1352 c
.u
.s32
.b
= tcg_const_i32(0);
1353 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1354 tcg_temp_free_i64(t
);
1356 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1359 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1361 int r1
= get_field(s
->fields
, r1
);
1362 bool is_imm
= have_field(s
->fields
, i2
);
1363 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1366 c
.cond
= TCG_COND_NE
;
1371 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1372 c
.u
.s64
.a
= regs
[r1
];
1373 c
.u
.s64
.b
= tcg_const_i64(0);
1375 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1378 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1380 int r1
= get_field(s
->fields
, r1
);
1381 int r3
= get_field(s
->fields
, r3
);
1382 bool is_imm
= have_field(s
->fields
, i2
);
1383 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1387 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1392 t
= tcg_temp_new_i64();
1393 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1394 c
.u
.s32
.a
= tcg_temp_new_i32();
1395 c
.u
.s32
.b
= tcg_temp_new_i32();
1396 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1397 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1398 store_reg32_i64(r1
, t
);
1399 tcg_temp_free_i64(t
);
1401 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1404 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1406 int r1
= get_field(s
->fields
, r1
);
1407 int r3
= get_field(s
->fields
, r3
);
1408 bool is_imm
= have_field(s
->fields
, i2
);
1409 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1412 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1415 if (r1
== (r3
| 1)) {
1416 c
.u
.s64
.b
= load_reg(r3
| 1);
1419 c
.u
.s64
.b
= regs
[r3
| 1];
1423 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1424 c
.u
.s64
.a
= regs
[r1
];
1427 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1430 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1432 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1437 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1439 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1444 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1446 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1451 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1453 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1454 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1455 tcg_temp_free_i32(m3
);
1456 gen_set_cc_nz_f32(s
, o
->in2
);
1460 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1462 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1463 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1464 tcg_temp_free_i32(m3
);
1465 gen_set_cc_nz_f64(s
, o
->in2
);
1469 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1471 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1472 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1473 tcg_temp_free_i32(m3
);
1474 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1478 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1480 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1481 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1482 tcg_temp_free_i32(m3
);
1483 gen_set_cc_nz_f32(s
, o
->in2
);
1487 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1489 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1490 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1491 tcg_temp_free_i32(m3
);
1492 gen_set_cc_nz_f64(s
, o
->in2
);
1496 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1498 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1499 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1500 tcg_temp_free_i32(m3
);
1501 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1505 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1507 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1508 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1509 tcg_temp_free_i32(m3
);
1513 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1515 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1516 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1517 tcg_temp_free_i32(m3
);
1521 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1523 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1524 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1525 tcg_temp_free_i32(m3
);
1526 return_low128(o
->out2
);
1530 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1532 int r2
= get_field(s
->fields
, r2
);
1533 TCGv_i64 len
= tcg_temp_new_i64();
1535 potential_page_fault(s
);
1536 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1538 return_low128(o
->out
);
1540 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1541 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1542 tcg_temp_free_i64(len
);
1547 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1549 int l
= get_field(s
->fields
, l1
);
1554 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1555 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1558 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1559 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1562 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1563 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1566 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1567 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1570 potential_page_fault(s
);
1571 vl
= tcg_const_i32(l
);
1572 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1573 tcg_temp_free_i32(vl
);
1577 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1581 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1583 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1584 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1585 potential_page_fault(s
);
1586 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1587 tcg_temp_free_i32(r1
);
1588 tcg_temp_free_i32(r3
);
1593 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1595 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1596 TCGv_i32 t1
= tcg_temp_new_i32();
1597 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1598 potential_page_fault(s
);
1599 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1601 tcg_temp_free_i32(t1
);
1602 tcg_temp_free_i32(m3
);
1606 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1608 potential_page_fault(s
);
1609 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1611 return_low128(o
->in2
);
1615 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1617 int r3
= get_field(s
->fields
, r3
);
1618 potential_page_fault(s
);
1619 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1624 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1626 int r3
= get_field(s
->fields
, r3
);
1627 potential_page_fault(s
);
1628 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1633 #ifndef CONFIG_USER_ONLY
1634 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1636 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1637 check_privileged(s
);
1638 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1639 tcg_temp_free_i32(r1
);
1645 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1647 int r3
= get_field(s
->fields
, r3
);
1648 TCGv_i64 in3
= tcg_temp_new_i64();
1649 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1650 potential_page_fault(s
);
1651 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1652 tcg_temp_free_i64(in3
);
1657 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1659 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1660 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1661 potential_page_fault(s
);
1662 /* XXX rewrite in tcg */
1663 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1668 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1670 TCGv_i64 t1
= tcg_temp_new_i64();
1671 TCGv_i32 t2
= tcg_temp_new_i32();
1672 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1673 gen_helper_cvd(t1
, t2
);
1674 tcg_temp_free_i32(t2
);
1675 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1676 tcg_temp_free_i64(t1
);
1680 #ifndef CONFIG_USER_ONLY
1681 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1685 check_privileged(s
);
1686 potential_page_fault(s
);
1688 /* We pretend the format is RX_a so that D2 is the field we want. */
1689 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1690 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1691 tcg_temp_free_i32(tmp
);
1696 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1698 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1699 return_low128(o
->out
);
1703 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1705 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1706 return_low128(o
->out
);
1710 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1712 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1713 return_low128(o
->out
);
1717 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
1719 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
1720 return_low128(o
->out
);
1724 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
1726 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1730 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
1732 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1736 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
1738 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1739 return_low128(o
->out2
);
1743 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
1745 int r2
= get_field(s
->fields
, r2
);
1746 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1750 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
1752 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1756 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
1758 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1759 tb->flags, (ab)use the tb->cs_base field as the address of
1760 the template in memory, and grab 8 bits of tb->flags/cflags for
1761 the contents of the register. We would then recognize all this
1762 in gen_intermediate_code_internal, generating code for exactly
1763 one instruction. This new TB then gets executed normally.
1765 On the other hand, this seems to be mostly used for modifying
1766 MVC inside of memcpy, which needs a helper call anyway. So
1767 perhaps this doesn't bear thinking about any further. */
1774 tmp
= tcg_const_i64(s
->next_pc
);
1775 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
1776 tcg_temp_free_i64(tmp
);
1782 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
1784 /* We'll use the original input for cc computation, since we get to
1785 compare that against 0, which ought to be better than comparing
1786 the real output against 64. It also lets cc_dst be a convenient
1787 temporary during our computation. */
1788 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
1790 /* R1 = IN ? CLZ(IN) : 64. */
1791 gen_helper_clz(o
->out
, o
->in2
);
1793 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1794 value by 64, which is undefined. But since the shift is 64 iff the
1795 input is zero, we still get the correct result after and'ing. */
1796 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
1797 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
1798 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
1802 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
1804 int m3
= get_field(s
->fields
, m3
);
1805 int pos
, len
, base
= s
->insn
->data
;
1806 TCGv_i64 tmp
= tcg_temp_new_i64();
1811 /* Effectively a 32-bit load. */
1812 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
1819 /* Effectively a 16-bit load. */
1820 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
1828 /* Effectively an 8-bit load. */
1829 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1834 pos
= base
+ ctz32(m3
) * 8;
1835 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
1836 ccm
= ((1ull << len
) - 1) << pos
;
1840 /* This is going to be a sequence of loads and inserts. */
1841 pos
= base
+ 32 - 8;
1845 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1846 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1847 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
1850 m3
= (m3
<< 1) & 0xf;
1856 tcg_gen_movi_i64(tmp
, ccm
);
1857 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
1858 tcg_temp_free_i64(tmp
);
1862 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
1864 int shift
= s
->insn
->data
& 0xff;
1865 int size
= s
->insn
->data
>> 8;
1866 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
1870 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
1875 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
1877 t1
= tcg_temp_new_i64();
1878 tcg_gen_shli_i64(t1
, psw_mask
, 20);
1879 tcg_gen_shri_i64(t1
, t1
, 36);
1880 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1882 tcg_gen_extu_i32_i64(t1
, cc_op
);
1883 tcg_gen_shli_i64(t1
, t1
, 28);
1884 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1885 tcg_temp_free_i64(t1
);
1889 #ifndef CONFIG_USER_ONLY
1890 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
1892 check_privileged(s
);
1893 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
1897 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
1899 check_privileged(s
);
1900 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
1905 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
1907 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
1911 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
1913 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
1917 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
1919 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1923 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
1925 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1929 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
1931 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
1932 return_low128(o
->out2
);
1936 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
1938 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
1939 return_low128(o
->out2
);
1943 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
1945 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
1949 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
1951 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
1955 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
1957 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
1961 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
1963 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
1967 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
1969 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
1973 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
1975 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
1979 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
1981 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
1985 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
1987 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
1991 #ifndef CONFIG_USER_ONLY
1992 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
1994 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1995 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1996 check_privileged(s
);
1997 potential_page_fault(s
);
1998 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
1999 tcg_temp_free_i32(r1
);
2000 tcg_temp_free_i32(r3
);
2004 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2006 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2007 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2008 check_privileged(s
);
2009 potential_page_fault(s
);
2010 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2011 tcg_temp_free_i32(r1
);
2012 tcg_temp_free_i32(r3
);
2015 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2017 check_privileged(s
);
2018 potential_page_fault(s
);
2019 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2024 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2028 check_privileged(s
);
2030 t1
= tcg_temp_new_i64();
2031 t2
= tcg_temp_new_i64();
2032 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2033 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2034 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2035 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2036 tcg_gen_shli_i64(t1
, t1
, 32);
2037 gen_helper_load_psw(cpu_env
, t1
, t2
);
2038 tcg_temp_free_i64(t1
);
2039 tcg_temp_free_i64(t2
);
2040 return EXIT_NORETURN
;
2043 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2047 check_privileged(s
);
2049 t1
= tcg_temp_new_i64();
2050 t2
= tcg_temp_new_i64();
2051 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2052 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2053 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2054 gen_helper_load_psw(cpu_env
, t1
, t2
);
2055 tcg_temp_free_i64(t1
);
2056 tcg_temp_free_i64(t2
);
2057 return EXIT_NORETURN
;
2061 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2063 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2064 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2065 potential_page_fault(s
);
2066 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2067 tcg_temp_free_i32(r1
);
2068 tcg_temp_free_i32(r3
);
2072 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2074 int r1
= get_field(s
->fields
, r1
);
2075 int r3
= get_field(s
->fields
, r3
);
2076 TCGv_i64 t
= tcg_temp_new_i64();
2077 TCGv_i64 t4
= tcg_const_i64(4);
2080 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2081 store_reg32_i64(r1
, t
);
2085 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2089 tcg_temp_free_i64(t
);
2090 tcg_temp_free_i64(t4
);
2094 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2096 int r1
= get_field(s
->fields
, r1
);
2097 int r3
= get_field(s
->fields
, r3
);
2098 TCGv_i64 t
= tcg_temp_new_i64();
2099 TCGv_i64 t4
= tcg_const_i64(4);
2102 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2103 store_reg32h_i64(r1
, t
);
2107 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2111 tcg_temp_free_i64(t
);
2112 tcg_temp_free_i64(t4
);
2116 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2118 int r1
= get_field(s
->fields
, r1
);
2119 int r3
= get_field(s
->fields
, r3
);
2120 TCGv_i64 t8
= tcg_const_i64(8);
2123 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2127 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2131 tcg_temp_free_i64(t8
);
2135 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2138 o
->g_out
= o
->g_in2
;
2139 TCGV_UNUSED_I64(o
->in2
);
2144 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2148 o
->g_out
= o
->g_in1
;
2149 o
->g_out2
= o
->g_in2
;
2150 TCGV_UNUSED_I64(o
->in1
);
2151 TCGV_UNUSED_I64(o
->in2
);
2152 o
->g_in1
= o
->g_in2
= false;
2156 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2158 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2159 potential_page_fault(s
);
2160 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2161 tcg_temp_free_i32(l
);
2165 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2167 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2168 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2169 potential_page_fault(s
);
2170 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2171 tcg_temp_free_i32(r1
);
2172 tcg_temp_free_i32(r2
);
2177 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2179 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2180 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2181 potential_page_fault(s
);
2182 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2183 tcg_temp_free_i32(r1
);
2184 tcg_temp_free_i32(r3
);
2189 #ifndef CONFIG_USER_ONLY
2190 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2192 int r1
= get_field(s
->fields
, l1
);
2193 check_privileged(s
);
2194 potential_page_fault(s
);
2195 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2200 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2202 int r1
= get_field(s
->fields
, l1
);
2203 check_privileged(s
);
2204 potential_page_fault(s
);
2205 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2211 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2213 potential_page_fault(s
);
2214 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2219 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2221 potential_page_fault(s
);
2222 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2224 return_low128(o
->in2
);
2228 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2230 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2234 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2236 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2237 return_low128(o
->out2
);
2241 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2243 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2247 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2249 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2253 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2255 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2259 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2261 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2262 return_low128(o
->out2
);
2266 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2268 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2269 return_low128(o
->out2
);
2273 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2275 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2276 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2277 tcg_temp_free_i64(r3
);
2281 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2283 int r3
= get_field(s
->fields
, r3
);
2284 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2288 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2290 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2291 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2292 tcg_temp_free_i64(r3
);
2296 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2298 int r3
= get_field(s
->fields
, r3
);
2299 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2303 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2305 gen_helper_nabs_i64(o
->out
, o
->in2
);
2309 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2311 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2315 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2317 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2321 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2323 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2324 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2328 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2330 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2331 potential_page_fault(s
);
2332 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2333 tcg_temp_free_i32(l
);
2338 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2340 tcg_gen_neg_i64(o
->out
, o
->in2
);
2344 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2346 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2350 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2352 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2356 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2358 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2359 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2363 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2365 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2366 potential_page_fault(s
);
2367 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2368 tcg_temp_free_i32(l
);
2373 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2375 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2379 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2381 int shift
= s
->insn
->data
& 0xff;
2382 int size
= s
->insn
->data
>> 8;
2383 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2386 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2387 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2389 /* Produce the CC from only the bits manipulated. */
2390 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2391 set_cc_nz_u64(s
, cc_dst
);
2395 #ifndef CONFIG_USER_ONLY
2396 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2398 check_privileged(s
);
2399 gen_helper_ptlb(cpu_env
);
2404 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2406 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2410 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2412 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2416 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2418 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2422 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2424 TCGv_i32 t1
= tcg_temp_new_i32();
2425 TCGv_i32 t2
= tcg_temp_new_i32();
2426 TCGv_i32 to
= tcg_temp_new_i32();
2427 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2428 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2429 tcg_gen_rotl_i32(to
, t1
, t2
);
2430 tcg_gen_extu_i32_i64(o
->out
, to
);
2431 tcg_temp_free_i32(t1
);
2432 tcg_temp_free_i32(t2
);
2433 tcg_temp_free_i32(to
);
2437 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2439 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2443 #ifndef CONFIG_USER_ONLY
2444 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2446 check_privileged(s
);
2447 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2452 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2454 check_privileged(s
);
2455 gen_helper_sacf(cpu_env
, o
->in2
);
2456 /* Addressing mode has changed, so end the block. */
2457 return EXIT_PC_STALE
;
2461 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2463 int r1
= get_field(s
->fields
, r1
);
2464 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2468 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2470 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2474 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2476 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2480 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2482 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2483 return_low128(o
->out2
);
2487 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2489 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2493 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2495 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2499 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2501 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2502 return_low128(o
->out2
);
2506 #ifndef CONFIG_USER_ONLY
2507 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2509 check_privileged(s
);
2510 potential_page_fault(s
);
2511 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2516 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2518 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2519 check_privileged(s
);
2520 potential_page_fault(s
);
2521 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2522 tcg_temp_free_i32(r1
);
2527 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2529 uint64_t sign
= 1ull << s
->insn
->data
;
2530 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2531 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2532 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2533 /* The arithmetic left shift is curious in that it does not affect
2534 the sign bit. Copy that over from the source unchanged. */
2535 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2536 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2537 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2541 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2543 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2547 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2549 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2553 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2555 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2559 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2561 gen_helper_sfpc(cpu_env
, o
->in2
);
2565 #ifndef CONFIG_USER_ONLY
2566 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2568 check_privileged(s
);
2569 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2570 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2574 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
2576 check_privileged(s
);
2577 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
2581 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2583 check_privileged(s
);
2584 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2588 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
2590 check_privileged(s
);
2591 /* ??? Surely cpu address != cpu number. In any case the previous
2592 version of this stored more than the required half-word, so it
2593 is unlikely this has ever been tested. */
2594 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2598 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2600 gen_helper_stck(o
->out
, cpu_env
);
2601 /* ??? We don't implement clock states. */
2602 gen_op_movi_cc(s
, 0);
2606 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
2608 TCGv_i64 c1
= tcg_temp_new_i64();
2609 TCGv_i64 c2
= tcg_temp_new_i64();
2610 gen_helper_stck(c1
, cpu_env
);
2611 /* Shift the 64-bit value into its place as a zero-extended
2612 104-bit value. Note that "bit positions 64-103 are always
2613 non-zero so that they compare differently to STCK"; we set
2614 the least significant bit to 1. */
2615 tcg_gen_shli_i64(c2
, c1
, 56);
2616 tcg_gen_shri_i64(c1
, c1
, 8);
2617 tcg_gen_ori_i64(c2
, c2
, 0x10000);
2618 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
2619 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2620 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
2621 tcg_temp_free_i64(c1
);
2622 tcg_temp_free_i64(c2
);
2623 /* ??? We don't implement clock states. */
2624 gen_op_movi_cc(s
, 0);
2628 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2630 check_privileged(s
);
2631 gen_helper_sckc(cpu_env
, o
->in2
);
2635 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2637 check_privileged(s
);
2638 gen_helper_stckc(o
->out
, cpu_env
);
2642 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2644 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2645 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2646 check_privileged(s
);
2647 potential_page_fault(s
);
2648 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2649 tcg_temp_free_i32(r1
);
2650 tcg_temp_free_i32(r3
);
2654 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2656 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2657 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2658 check_privileged(s
);
2659 potential_page_fault(s
);
2660 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2661 tcg_temp_free_i32(r1
);
2662 tcg_temp_free_i32(r3
);
2666 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2668 check_privileged(s
);
2669 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2673 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2675 check_privileged(s
);
2676 gen_helper_spt(cpu_env
, o
->in2
);
2680 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
2683 /* We really ought to have more complete indication of facilities
2684 that we implement. Address this when STFLE is implemented. */
2685 check_privileged(s
);
2686 f
= tcg_const_i64(0xc0000000);
2687 a
= tcg_const_i64(200);
2688 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
2689 tcg_temp_free_i64(f
);
2690 tcg_temp_free_i64(a
);
2694 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2696 check_privileged(s
);
2697 gen_helper_stpt(o
->out
, cpu_env
);
2701 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
2703 check_privileged(s
);
2704 potential_page_fault(s
);
2705 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
2710 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
2712 check_privileged(s
);
2713 gen_helper_spx(cpu_env
, o
->in2
);
2717 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
2719 check_privileged(s
);
2720 /* Not operational. */
2721 gen_op_movi_cc(s
, 3);
2725 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
2727 check_privileged(s
);
2728 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
2729 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
2733 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2735 uint64_t i2
= get_field(s
->fields
, i2
);
2738 check_privileged(s
);
2740 /* It is important to do what the instruction name says: STORE THEN.
2741 If we let the output hook perform the store then if we fault and
2742 restart, we'll have the wrong SYSTEM MASK in place. */
2743 t
= tcg_temp_new_i64();
2744 tcg_gen_shri_i64(t
, psw_mask
, 56);
2745 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2746 tcg_temp_free_i64(t
);
2748 if (s
->fields
->op
== 0xac) {
2749 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2750 (i2
<< 56) | 0x00ffffffffffffffull
);
2752 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2757 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
2759 check_privileged(s
);
2760 potential_page_fault(s
);
2761 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
2766 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2768 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2772 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2774 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2778 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2780 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2784 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2786 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2790 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2792 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2793 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2794 potential_page_fault(s
);
2795 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
2796 tcg_temp_free_i32(r1
);
2797 tcg_temp_free_i32(r3
);
2801 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
2803 int m3
= get_field(s
->fields
, m3
);
2804 int pos
, base
= s
->insn
->data
;
2805 TCGv_i64 tmp
= tcg_temp_new_i64();
2807 pos
= base
+ ctz32(m3
) * 8;
2810 /* Effectively a 32-bit store. */
2811 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2812 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
2818 /* Effectively a 16-bit store. */
2819 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2820 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
2827 /* Effectively an 8-bit store. */
2828 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2829 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2833 /* This is going to be a sequence of shifts and stores. */
2834 pos
= base
+ 32 - 8;
2837 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2838 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2839 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2841 m3
= (m3
<< 1) & 0xf;
2846 tcg_temp_free_i64(tmp
);
2850 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
2852 int r1
= get_field(s
->fields
, r1
);
2853 int r3
= get_field(s
->fields
, r3
);
2854 int size
= s
->insn
->data
;
2855 TCGv_i64 tsize
= tcg_const_i64(size
);
2859 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
2861 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
2866 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
2870 tcg_temp_free_i64(tsize
);
2874 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
2876 int r1
= get_field(s
->fields
, r1
);
2877 int r3
= get_field(s
->fields
, r3
);
2878 TCGv_i64 t
= tcg_temp_new_i64();
2879 TCGv_i64 t4
= tcg_const_i64(4);
2880 TCGv_i64 t32
= tcg_const_i64(32);
2883 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
2884 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
2888 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2892 tcg_temp_free_i64(t
);
2893 tcg_temp_free_i64(t4
);
2894 tcg_temp_free_i64(t32
);
2898 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
2900 potential_page_fault(s
);
2901 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2903 return_low128(o
->in2
);
2907 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
2909 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
2913 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
2918 tcg_gen_not_i64(o
->in2
, o
->in2
);
2919 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2921 /* XXX possible optimization point */
2923 cc
= tcg_temp_new_i64();
2924 tcg_gen_extu_i32_i64(cc
, cc_op
);
2925 tcg_gen_shri_i64(cc
, cc
, 1);
2926 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2927 tcg_temp_free_i64(cc
);
2931 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
2938 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
2939 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
2940 tcg_temp_free_i32(t
);
2942 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
2943 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
2944 tcg_temp_free_i32(t
);
2946 gen_exception(EXCP_SVC
);
2947 return EXIT_NORETURN
;
2950 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
2952 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
2957 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
2959 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
2964 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
2966 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
2971 #ifndef CONFIG_USER_ONLY
2972 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
2974 potential_page_fault(s
);
2975 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
2981 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
2983 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2984 potential_page_fault(s
);
2985 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
2986 tcg_temp_free_i32(l
);
2991 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
2993 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2994 potential_page_fault(s
);
2995 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
2996 tcg_temp_free_i32(l
);
3000 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3002 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3003 potential_page_fault(s
);
3004 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3005 tcg_temp_free_i32(l
);
3010 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3012 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3016 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3018 int shift
= s
->insn
->data
& 0xff;
3019 int size
= s
->insn
->data
>> 8;
3020 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3023 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3024 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3026 /* Produce the CC from only the bits manipulated. */
3027 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3028 set_cc_nz_u64(s
, cc_dst
);
3032 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3034 o
->out
= tcg_const_i64(0);
3038 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3040 o
->out
= tcg_const_i64(0);
3046 /* ====================================================================== */
3047 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3048 the original inputs), update the various cc data structures in order to
3049 be able to compute the new condition code. */
3051 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3053 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3056 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3058 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3061 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3063 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3066 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3068 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3071 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3073 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3076 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3078 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3081 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3083 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3086 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3088 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3091 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3093 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3096 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3098 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3101 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3103 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3106 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3108 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3111 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3113 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3116 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3118 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3121 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3123 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3126 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3128 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3131 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3133 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3136 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3138 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3141 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3143 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3146 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3148 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3149 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3152 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3154 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3157 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3159 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3162 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3164 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3167 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3169 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3172 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3174 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3177 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3179 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3182 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3184 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3187 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3189 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3192 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3194 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3197 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3199 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3202 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3204 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3207 /* ====================================================================== */
3208 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3209 with the TCG register to which we will write. Used in combination with
3210 the "wout" generators, in some cases we need a new temporary, and in
3211 some cases we can write to a TCG global. */
3213 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3215 o
->out
= tcg_temp_new_i64();
3218 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3220 o
->out
= tcg_temp_new_i64();
3221 o
->out2
= tcg_temp_new_i64();
3224 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3226 o
->out
= regs
[get_field(f
, r1
)];
3230 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3232 /* ??? Specification exception: r1 must be even. */
3233 int r1
= get_field(f
, r1
);
3235 o
->out2
= regs
[(r1
+ 1) & 15];
3236 o
->g_out
= o
->g_out2
= true;
3239 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3241 o
->out
= fregs
[get_field(f
, r1
)];
3245 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3247 /* ??? Specification exception: r1 must be < 14. */
3248 int r1
= get_field(f
, r1
);
3250 o
->out2
= fregs
[(r1
+ 2) & 15];
3251 o
->g_out
= o
->g_out2
= true;
3254 /* ====================================================================== */
3255 /* The "Write OUTput" generators. These generally perform some non-trivial
3256 copy of data to TCG globals, or to main memory. The trivial cases are
3257 generally handled by having a "prep" generator install the TCG global
3258 as the destination of the operation. */
3260 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3262 store_reg(get_field(f
, r1
), o
->out
);
3265 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3267 int r1
= get_field(f
, r1
);
3268 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3271 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3273 int r1
= get_field(f
, r1
);
3274 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3277 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3279 store_reg32_i64(get_field(f
, r1
), o
->out
);
3282 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3284 /* ??? Specification exception: r1 must be even. */
3285 int r1
= get_field(f
, r1
);
3286 store_reg32_i64(r1
, o
->out
);
3287 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3290 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3292 /* ??? Specification exception: r1 must be even. */
3293 int r1
= get_field(f
, r1
);
3294 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3295 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3296 store_reg32_i64(r1
, o
->out
);
3299 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3301 store_freg32_i64(get_field(f
, r1
), o
->out
);
3304 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3306 store_freg(get_field(f
, r1
), o
->out
);
3309 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3311 /* ??? Specification exception: r1 must be < 14. */
3312 int f1
= get_field(s
->fields
, r1
);
3313 store_freg(f1
, o
->out
);
3314 store_freg((f1
+ 2) & 15, o
->out2
);
3317 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3319 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3320 store_reg32_i64(get_field(f
, r1
), o
->out
);
3324 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3326 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3327 store_freg32_i64(get_field(f
, r1
), o
->out
);
3331 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3333 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3336 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3338 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3341 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3343 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3346 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3348 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3351 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3353 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3356 /* ====================================================================== */
3357 /* The "INput 1" generators. These load the first operand to an insn. */
3359 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3361 o
->in1
= load_reg(get_field(f
, r1
));
3364 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3366 o
->in1
= regs
[get_field(f
, r1
)];
3370 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3372 o
->in1
= tcg_temp_new_i64();
3373 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3376 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3378 o
->in1
= tcg_temp_new_i64();
3379 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3382 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3384 o
->in1
= tcg_temp_new_i64();
3385 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3388 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3390 /* ??? Specification exception: r1 must be even. */
3391 int r1
= get_field(f
, r1
);
3392 o
->in1
= load_reg((r1
+ 1) & 15);
3395 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3397 /* ??? Specification exception: r1 must be even. */
3398 int r1
= get_field(f
, r1
);
3399 o
->in1
= tcg_temp_new_i64();
3400 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3403 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3405 /* ??? Specification exception: r1 must be even. */
3406 int r1
= get_field(f
, r1
);
3407 o
->in1
= tcg_temp_new_i64();
3408 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3411 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3413 /* ??? Specification exception: r1 must be even. */
3414 int r1
= get_field(f
, r1
);
3415 o
->in1
= tcg_temp_new_i64();
3416 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3419 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3421 o
->in1
= load_reg(get_field(f
, r2
));
3424 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3426 o
->in1
= load_reg(get_field(f
, r3
));
3429 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3431 o
->in1
= regs
[get_field(f
, r3
)];
3435 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3437 o
->in1
= tcg_temp_new_i64();
3438 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3441 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3443 o
->in1
= tcg_temp_new_i64();
3444 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3447 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3449 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3452 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3454 o
->in1
= fregs
[get_field(f
, r1
)];
3458 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3460 /* ??? Specification exception: r1 must be < 14. */
3461 int r1
= get_field(f
, r1
);
3463 o
->out2
= fregs
[(r1
+ 2) & 15];
3464 o
->g_out
= o
->g_out2
= true;
3467 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3469 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3472 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3474 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3475 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3478 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3481 o
->in1
= tcg_temp_new_i64();
3482 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3485 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3488 o
->in1
= tcg_temp_new_i64();
3489 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3492 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3495 o
->in1
= tcg_temp_new_i64();
3496 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3499 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3502 o
->in1
= tcg_temp_new_i64();
3503 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3506 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3509 o
->in1
= tcg_temp_new_i64();
3510 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3513 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3516 o
->in1
= tcg_temp_new_i64();
3517 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3520 /* ====================================================================== */
3521 /* The "INput 2" generators. These load the second operand to an insn. */
3523 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3525 o
->in2
= regs
[get_field(f
, r1
)];
3529 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3531 o
->in2
= tcg_temp_new_i64();
3532 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3535 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3537 o
->in2
= tcg_temp_new_i64();
3538 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3541 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3543 o
->in2
= load_reg(get_field(f
, r2
));
3546 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3548 o
->in2
= regs
[get_field(f
, r2
)];
3552 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3554 int r2
= get_field(f
, r2
);
3556 o
->in2
= load_reg(r2
);
3560 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3562 o
->in2
= tcg_temp_new_i64();
3563 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3566 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3568 o
->in2
= tcg_temp_new_i64();
3569 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3572 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3574 o
->in2
= tcg_temp_new_i64();
3575 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3578 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3580 o
->in2
= tcg_temp_new_i64();
3581 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3584 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3586 o
->in2
= load_reg(get_field(f
, r3
));
3589 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3591 o
->in2
= tcg_temp_new_i64();
3592 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3595 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3597 o
->in2
= tcg_temp_new_i64();
3598 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3601 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3603 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3606 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3608 o
->in2
= fregs
[get_field(f
, r2
)];
3612 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3614 /* ??? Specification exception: r1 must be < 14. */
3615 int r2
= get_field(f
, r2
);
3617 o
->in2
= fregs
[(r2
+ 2) & 15];
3618 o
->g_in1
= o
->g_in2
= true;
3621 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3623 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3626 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3628 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3629 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3632 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3634 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3637 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3639 help_l2_shift(s
, f
, o
, 31);
3642 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3644 help_l2_shift(s
, f
, o
, 63);
3647 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3650 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3653 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3656 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3659 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3662 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3665 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3668 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3671 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3674 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3677 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3680 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3683 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3686 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3689 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3692 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3695 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3698 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3701 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3704 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3707 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3709 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3712 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3714 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3717 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3719 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3722 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3724 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3727 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3729 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3730 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3733 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3735 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3736 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3739 /* ====================================================================== */
3741 /* Find opc within the table of insns. This is formulated as a switch
3742 statement so that (1) we get compile-time notice of cut-paste errors
3743 for duplicated opcodes, and (2) the compiler generates the binary
3744 search tree, rather than us having to post-process the table. */
3746 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3747 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3749 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3751 enum DisasInsnEnum
{
3752 #include "insn-data.def"
3756 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3761 .help_in1 = in1_##I1, \
3762 .help_in2 = in2_##I2, \
3763 .help_prep = prep_##P, \
3764 .help_wout = wout_##W, \
3765 .help_cout = cout_##CC, \
3766 .help_op = op_##OP, \
3770 /* Allow 0 to be used for NULL in the table below. */
3778 static const DisasInsn insn_info
[] = {
3779 #include "insn-data.def"
3783 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3784 case OPC: return &insn_info[insn_ ## NM];
3786 static const DisasInsn
*lookup_opc(uint16_t opc
)
3789 #include "insn-data.def"
3798 /* Extract a field from the insn. The INSN should be left-aligned in
3799 the uint64_t so that we can more easily utilize the big-bit-endian
3800 definitions we extract from the Principals of Operation. */
3802 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
3810 /* Zero extract the field from the insn. */
3811 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
3813 /* Sign-extend, or un-swap the field as necessary. */
3815 case 0: /* unsigned */
3817 case 1: /* signed */
3818 assert(f
->size
<= 32);
3819 m
= 1u << (f
->size
- 1);
3822 case 2: /* dl+dh split, signed 20 bit. */
3823 r
= ((int8_t)r
<< 12) | (r
>> 8);
3829 /* Validate that the "compressed" encoding we selected above is valid.
3830 I.e. we havn't make two different original fields overlap. */
3831 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
3832 o
->presentC
|= 1 << f
->indexC
;
3833 o
->presentO
|= 1 << f
->indexO
;
3835 o
->c
[f
->indexC
] = r
;
3838 /* Lookup the insn at the current PC, extracting the operands into O and
3839 returning the info struct for the insn. Returns NULL for invalid insn. */
3841 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
3844 uint64_t insn
, pc
= s
->pc
;
3846 const DisasInsn
*info
;
3848 insn
= ld_code2(env
, pc
);
3849 op
= (insn
>> 8) & 0xff;
3850 ilen
= get_ilen(op
);
3851 s
->next_pc
= s
->pc
+ ilen
;
3858 insn
= ld_code4(env
, pc
) << 32;
3861 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
3867 /* We can't actually determine the insn format until we've looked up
3868 the full insn opcode. Which we can't do without locating the
3869 secondary opcode. Assume by default that OP2 is at bit 40; for
3870 those smaller insns that don't actually have a secondary opcode
3871 this will correctly result in OP2 = 0. */
3877 case 0xb2: /* S, RRF, RRE */
3878 case 0xb3: /* RRE, RRD, RRF */
3879 case 0xb9: /* RRE, RRF */
3880 case 0xe5: /* SSE, SIL */
3881 op2
= (insn
<< 8) >> 56;
3885 case 0xc0: /* RIL */
3886 case 0xc2: /* RIL */
3887 case 0xc4: /* RIL */
3888 case 0xc6: /* RIL */
3889 case 0xc8: /* SSF */
3890 case 0xcc: /* RIL */
3891 op2
= (insn
<< 12) >> 60;
3893 case 0xd0 ... 0xdf: /* SS */
3899 case 0xee ... 0xf3: /* SS */
3900 case 0xf8 ... 0xfd: /* SS */
3904 op2
= (insn
<< 40) >> 56;
3908 memset(f
, 0, sizeof(*f
));
3912 /* Lookup the instruction. */
3913 info
= lookup_opc(op
<< 8 | op2
);
3915 /* If we found it, extract the operands. */
3917 DisasFormat fmt
= info
->fmt
;
3920 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
3921 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
3927 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
3929 const DisasInsn
*insn
;
3930 ExitStatus ret
= NO_EXIT
;
3934 /* Search for the insn in the table. */
3935 insn
= extract_insn(env
, s
, &f
);
3937 /* Not found means unimplemented/illegal opcode. */
3939 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
3941 gen_illegal_opcode(s
);
3942 return EXIT_NORETURN
;
3945 /* Set up the strutures we use to communicate with the helpers. */
3948 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
3949 TCGV_UNUSED_I64(o
.out
);
3950 TCGV_UNUSED_I64(o
.out2
);
3951 TCGV_UNUSED_I64(o
.in1
);
3952 TCGV_UNUSED_I64(o
.in2
);
3953 TCGV_UNUSED_I64(o
.addr1
);
3955 /* Implement the instruction. */
3956 if (insn
->help_in1
) {
3957 insn
->help_in1(s
, &f
, &o
);
3959 if (insn
->help_in2
) {
3960 insn
->help_in2(s
, &f
, &o
);
3962 if (insn
->help_prep
) {
3963 insn
->help_prep(s
, &f
, &o
);
3965 if (insn
->help_op
) {
3966 ret
= insn
->help_op(s
, &o
);
3968 if (insn
->help_wout
) {
3969 insn
->help_wout(s
, &f
, &o
);
3971 if (insn
->help_cout
) {
3972 insn
->help_cout(s
, &o
);
3975 /* Free any temporaries created by the helpers. */
3976 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
3977 tcg_temp_free_i64(o
.out
);
3979 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
3980 tcg_temp_free_i64(o
.out2
);
3982 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
3983 tcg_temp_free_i64(o
.in1
);
3985 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
3986 tcg_temp_free_i64(o
.in2
);
3988 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
3989 tcg_temp_free_i64(o
.addr1
);
3992 /* Advance to the next instruction. */
3997 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
3998 TranslationBlock
*tb
,
4002 target_ulong pc_start
;
4003 uint64_t next_page_start
;
4004 uint16_t *gen_opc_end
;
4006 int num_insns
, max_insns
;
4014 if (!(tb
->flags
& FLAG_MASK_64
)) {
4015 pc_start
&= 0x7fffffff;
4020 dc
.cc_op
= CC_OP_DYNAMIC
;
4021 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4023 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4025 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4028 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4029 if (max_insns
== 0) {
4030 max_insns
= CF_COUNT_MASK
;
4037 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4041 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4044 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4045 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4046 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4047 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4049 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4053 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4054 tcg_gen_debug_insn_start(dc
.pc
);
4058 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4059 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4060 if (bp
->pc
== dc
.pc
) {
4061 status
= EXIT_PC_STALE
;
4067 if (status
== NO_EXIT
) {
4068 status
= translate_one(env
, &dc
);
4071 /* If we reach a page boundary, are single stepping,
4072 or exhaust instruction count, stop generation. */
4073 if (status
== NO_EXIT
4074 && (dc
.pc
>= next_page_start
4075 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4076 || num_insns
>= max_insns
4078 || env
->singlestep_enabled
)) {
4079 status
= EXIT_PC_STALE
;
4081 } while (status
== NO_EXIT
);
4083 if (tb
->cflags
& CF_LAST_IO
) {
4092 update_psw_addr(&dc
);
4094 case EXIT_PC_UPDATED
:
4095 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4096 cc op type is in env */
4098 /* Exit the TB, either by raising a debug exception or by return. */
4100 gen_exception(EXCP_DEBUG
);
4109 gen_icount_end(tb
, num_insns
);
4110 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4112 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4115 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4118 tb
->size
= dc
.pc
- pc_start
;
4119 tb
->icount
= num_insns
;
4122 #if defined(S390X_DEBUG_DISAS)
4123 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4124 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4125 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4131 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4133 gen_intermediate_code_internal(env
, tb
, 0);
4136 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4138 gen_intermediate_code_internal(env
, tb
, 1);
4141 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4144 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4145 cc_op
= gen_opc_cc_op
[pc_pos
];
4146 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {