4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit
[CC_OP_MAX
];
76 static uint64_t inline_branch_miss
[CC_OP_MAX
];
79 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
81 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
82 if (s
->tb
->flags
& FLAG_MASK_32
) {
83 return pc
| 0x80000000;
89 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
95 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
96 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
98 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
99 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
102 for (i
= 0; i
< 16; i
++) {
103 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
105 cpu_fprintf(f
, "\n");
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
114 cpu_fprintf(f
, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i
= 0; i
< 16; i
++) {
122 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
124 cpu_fprintf(f
, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i
= 0; i
< CC_OP_MAX
; i
++) {
133 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
134 inline_branch_miss
[i
], inline_branch_hit
[i
]);
138 cpu_fprintf(f
, "\n");
141 static TCGv_i64 psw_addr
;
142 static TCGv_i64 psw_mask
;
144 static TCGv_i32 cc_op
;
145 static TCGv_i64 cc_src
;
146 static TCGv_i64 cc_dst
;
147 static TCGv_i64 cc_vr
;
149 static char cpu_reg_names
[32][4];
150 static TCGv_i64 regs
[16];
151 static TCGv_i64 fregs
[16];
153 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
155 void s390x_translate_init(void)
159 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
160 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
161 offsetof(CPUS390XState
, psw
.addr
),
163 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
164 offsetof(CPUS390XState
, psw
.mask
),
167 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
169 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
171 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
173 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
176 for (i
= 0; i
< 16; i
++) {
177 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
178 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
179 offsetof(CPUS390XState
, regs
[i
]),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
185 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, fregs
[i
].d
),
187 cpu_reg_names
[i
+ 16]);
190 /* register helpers */
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
271 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
274 static int get_mem_index(DisasContext
*s
)
276 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
277 case PSW_ASC_PRIMARY
>> 32:
279 case PSW_ASC_SECONDARY
>> 32:
281 case PSW_ASC_HOME
>> 32:
289 static void gen_exception(int excp
)
291 TCGv_i32 tmp
= tcg_const_i32(excp
);
292 gen_helper_exception(cpu_env
, tmp
);
293 tcg_temp_free_i32(tmp
);
296 static void gen_program_exception(DisasContext
*s
, int code
)
300 /* Remember what pgm exeption this was. */
301 tmp
= tcg_const_i32(code
);
302 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
303 tcg_temp_free_i32(tmp
);
305 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
306 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
307 tcg_temp_free_i32(tmp
);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM
);
320 static inline void gen_illegal_opcode(DisasContext
*s
)
322 gen_program_exception(s
, PGM_SPECIFICATION
);
325 static inline void check_privileged(DisasContext
*s
)
327 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
328 gen_program_exception(s
, PGM_PRIVILEGED
);
332 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
343 tmp
= tcg_const_i64(d2
);
344 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
349 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
353 tmp
= tcg_const_i64(d2
);
354 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
359 tmp
= tcg_const_i64(d2
);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
364 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
370 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
372 s
->cc_op
= CC_OP_CONST0
+ val
;
375 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
377 tcg_gen_discard_i64(cc_src
);
378 tcg_gen_mov_i64(cc_dst
, dst
);
379 tcg_gen_discard_i64(cc_vr
);
383 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
386 tcg_gen_mov_i64(cc_src
, src
);
387 tcg_gen_mov_i64(cc_dst
, dst
);
388 tcg_gen_discard_i64(cc_vr
);
392 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
393 TCGv_i64 dst
, TCGv_i64 vr
)
395 tcg_gen_mov_i64(cc_src
, src
);
396 tcg_gen_mov_i64(cc_dst
, dst
);
397 tcg_gen_mov_i64(cc_vr
, vr
);
401 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
403 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
406 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
408 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
411 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
413 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
416 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
418 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
421 /* CC value is in env->cc_op */
422 static void set_cc_static(DisasContext
*s
)
424 tcg_gen_discard_i64(cc_src
);
425 tcg_gen_discard_i64(cc_dst
);
426 tcg_gen_discard_i64(cc_vr
);
427 s
->cc_op
= CC_OP_STATIC
;
430 /* calculates cc into cc_op */
431 static void gen_op_calc_cc(DisasContext
*s
)
433 TCGv_i32 local_cc_op
;
436 TCGV_UNUSED_I32(local_cc_op
);
437 TCGV_UNUSED_I64(dummy
);
440 dummy
= tcg_const_i64(0);
454 local_cc_op
= tcg_const_i32(s
->cc_op
);
470 /* s->cc_op is the cc value */
471 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
474 /* env->cc_op already is the cc value */
489 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
494 case CC_OP_LTUGTU_32
:
495 case CC_OP_LTUGTU_64
:
502 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
517 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
520 /* unknown operation - assume 3 arguments and cc_op in env */
521 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
527 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
528 tcg_temp_free_i32(local_cc_op
);
530 if (!TCGV_IS_UNUSED_I64(dummy
)) {
531 tcg_temp_free_i64(dummy
);
534 /* We now have cc in cc_op as constant */
538 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
540 /* NOTE: we handle the case where the TB spans two pages here */
541 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
542 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
543 && !s
->singlestep_enabled
544 && !(s
->tb
->cflags
& CF_LAST_IO
));
547 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
549 #ifdef DEBUG_INLINE_BRANCHES
550 inline_branch_miss
[cc_op
]++;
554 static void account_inline_branch(DisasContext
*s
, int cc_op
)
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_hit
[cc_op
]++;
561 /* Table of mask values to comparison codes, given a comparison as input.
562 For a true comparison CC=3 will never be set, but we treat this
563 conservatively for possible use when CC=3 indicates overflow. */
564 static const TCGCond ltgt_cond
[16] = {
565 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
566 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
567 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
568 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
569 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
570 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
571 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
572 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
575 /* Table of mask values to comparison codes, given a logic op as input.
576 For such, only CC=0 and CC=1 should be possible. */
577 static const TCGCond nz_cond
[16] = {
579 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
581 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
583 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
584 /* EQ | NE | x | x */
585 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
588 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
589 details required to generate a TCG comparison. */
590 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
593 enum cc_op old_cc_op
= s
->cc_op
;
595 if (mask
== 15 || mask
== 0) {
596 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
599 c
->g1
= c
->g2
= true;
604 /* Find the TCG condition for the mask + cc op. */
610 cond
= ltgt_cond
[mask
];
611 if (cond
== TCG_COND_NEVER
) {
614 account_inline_branch(s
, old_cc_op
);
617 case CC_OP_LTUGTU_32
:
618 case CC_OP_LTUGTU_64
:
619 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
620 if (cond
== TCG_COND_NEVER
) {
623 account_inline_branch(s
, old_cc_op
);
627 cond
= nz_cond
[mask
];
628 if (cond
== TCG_COND_NEVER
) {
631 account_inline_branch(s
, old_cc_op
);
646 account_inline_branch(s
, old_cc_op
);
661 account_inline_branch(s
, old_cc_op
);
665 switch (mask
& 0xa) {
666 case 8: /* src == 0 -> no one bit found */
669 case 2: /* src != 0 -> one bit found */
675 account_inline_branch(s
, old_cc_op
);
680 /* Calculate cc value. */
685 /* Jump based on CC. We'll load up the real cond below;
686 the assignment here merely avoids a compiler warning. */
687 account_noninline_branch(s
, old_cc_op
);
688 old_cc_op
= CC_OP_STATIC
;
689 cond
= TCG_COND_NEVER
;
693 /* Load up the arguments of the comparison. */
695 c
->g1
= c
->g2
= false;
699 c
->u
.s32
.a
= tcg_temp_new_i32();
700 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
701 c
->u
.s32
.b
= tcg_const_i32(0);
704 case CC_OP_LTUGTU_32
:
706 c
->u
.s32
.a
= tcg_temp_new_i32();
707 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
708 c
->u
.s32
.b
= tcg_temp_new_i32();
709 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
716 c
->u
.s64
.b
= tcg_const_i64(0);
720 case CC_OP_LTUGTU_64
:
723 c
->g1
= c
->g2
= true;
729 c
->u
.s64
.a
= tcg_temp_new_i64();
730 c
->u
.s64
.b
= tcg_const_i64(0);
731 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
739 case 0x8 | 0x4 | 0x2: /* cc != 3 */
741 c
->u
.s32
.b
= tcg_const_i32(3);
743 case 0x8 | 0x4 | 0x1: /* cc != 2 */
745 c
->u
.s32
.b
= tcg_const_i32(2);
747 case 0x8 | 0x2 | 0x1: /* cc != 1 */
749 c
->u
.s32
.b
= tcg_const_i32(1);
751 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
754 c
->u
.s32
.a
= tcg_temp_new_i32();
755 c
->u
.s32
.b
= tcg_const_i32(0);
756 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
758 case 0x8 | 0x4: /* cc < 2 */
760 c
->u
.s32
.b
= tcg_const_i32(2);
762 case 0x8: /* cc == 0 */
764 c
->u
.s32
.b
= tcg_const_i32(0);
766 case 0x4 | 0x2 | 0x1: /* cc != 0 */
768 c
->u
.s32
.b
= tcg_const_i32(0);
770 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
773 c
->u
.s32
.a
= tcg_temp_new_i32();
774 c
->u
.s32
.b
= tcg_const_i32(0);
775 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
777 case 0x4: /* cc == 1 */
779 c
->u
.s32
.b
= tcg_const_i32(1);
781 case 0x2 | 0x1: /* cc > 1 */
783 c
->u
.s32
.b
= tcg_const_i32(1);
785 case 0x2: /* cc == 2 */
787 c
->u
.s32
.b
= tcg_const_i32(2);
789 case 0x1: /* cc == 3 */
791 c
->u
.s32
.b
= tcg_const_i32(3);
794 /* CC is masked by something else: (8 >> cc) & mask. */
797 c
->u
.s32
.a
= tcg_const_i32(8);
798 c
->u
.s32
.b
= tcg_const_i32(0);
799 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
800 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
811 static void free_compare(DisasCompare
*c
)
815 tcg_temp_free_i64(c
->u
.s64
.a
);
817 tcg_temp_free_i32(c
->u
.s32
.a
);
822 tcg_temp_free_i64(c
->u
.s64
.b
);
824 tcg_temp_free_i32(c
->u
.s32
.b
);
829 /* ====================================================================== */
830 /* Define the insn format enumeration. */
831 #define F0(N) FMT_##N,
832 #define F1(N, X1) F0(N)
833 #define F2(N, X1, X2) F0(N)
834 #define F3(N, X1, X2, X3) F0(N)
835 #define F4(N, X1, X2, X3, X4) F0(N)
836 #define F5(N, X1, X2, X3, X4, X5) F0(N)
839 #include "insn-format.def"
849 /* Define a structure to hold the decoded fields. We'll store each inside
850 an array indexed by an enum. In order to conserve memory, we'll arrange
851 for fields that do not exist at the same time to overlap, thus the "C"
852 for compact. For checking purposes there is an "O" for original index
853 as well that will be applied to availability bitmaps. */
855 enum DisasFieldIndexO
{
878 enum DisasFieldIndexC
{
912 unsigned presentC
:16;
913 unsigned int presentO
;
917 /* This is the way fields are to be accessed out of DisasFields. */
918 #define have_field(S, F) have_field1((S), FLD_O_##F)
919 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
921 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
923 return (f
->presentO
>> c
) & 1;
926 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
927 enum DisasFieldIndexC c
)
929 assert(have_field1(f
, o
));
933 /* Describe the layout of each field in each format. */
934 typedef struct DisasField
{
938 unsigned int indexC
:6;
939 enum DisasFieldIndexO indexO
:8;
942 typedef struct DisasFormatInfo
{
943 DisasField op
[NUM_C_FIELD
];
946 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
947 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
948 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
949 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
950 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
951 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
952 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
953 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
954 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
955 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
956 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
957 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
958 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
959 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
961 #define F0(N) { { } },
962 #define F1(N, X1) { { X1 } },
963 #define F2(N, X1, X2) { { X1, X2 } },
964 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
965 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
966 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
968 static const DisasFormatInfo format_info
[] = {
969 #include "insn-format.def"
987 /* Generally, we'll extract operands into this structures, operate upon
988 them, and store them back. See the "in1", "in2", "prep", "wout" sets
989 of routines below for more details. */
991 bool g_out
, g_out2
, g_in1
, g_in2
;
992 TCGv_i64 out
, out2
, in1
, in2
;
996 /* Return values from translate_one, indicating the state of the TB. */
998 /* Continue the TB. */
1000 /* We have emitted one or more goto_tb. No fixup required. */
1002 /* We are not using a goto_tb (for whatever reason), but have updated
1003 the PC (for whatever reason), so there's no need to do it again on
1006 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1007 updated the PC for the next instruction to be executed. */
1009 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1010 No following code will be executed. */
1014 typedef enum DisasFacility
{
1015 FAC_Z
, /* zarch (default) */
1016 FAC_CASS
, /* compare and swap and store */
1017 FAC_CASS2
, /* compare and swap and store 2*/
1018 FAC_DFP
, /* decimal floating point */
1019 FAC_DFPR
, /* decimal floating point rounding */
1020 FAC_DO
, /* distinct operands */
1021 FAC_EE
, /* execute extensions */
1022 FAC_EI
, /* extended immediate */
1023 FAC_FPE
, /* floating point extension */
1024 FAC_FPSSH
, /* floating point support sign handling */
1025 FAC_FPRGR
, /* FPR-GR transfer */
1026 FAC_GIE
, /* general instructions extension */
1027 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1028 FAC_HW
, /* high-word */
1029 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1030 FAC_LOC
, /* load/store on condition */
1031 FAC_LD
, /* long displacement */
1032 FAC_PC
, /* population count */
1033 FAC_SCF
, /* store clock fast */
1034 FAC_SFLE
, /* store facility list extended */
1040 DisasFacility fac
:6;
1044 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1045 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1046 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1047 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1048 void (*help_cout
)(DisasContext
*, DisasOps
*);
1049 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1054 /* ====================================================================== */
1055 /* Miscelaneous helpers, used by several operations. */
1057 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1058 DisasOps
*o
, int mask
)
1060 int b2
= get_field(f
, b2
);
1061 int d2
= get_field(f
, d2
);
1064 o
->in2
= tcg_const_i64(d2
& mask
);
1066 o
->in2
= get_address(s
, 0, b2
, d2
);
1067 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1071 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1073 if (dest
== s
->next_pc
) {
1076 if (use_goto_tb(s
, dest
)) {
1079 tcg_gen_movi_i64(psw_addr
, dest
);
1080 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1081 return EXIT_GOTO_TB
;
1083 tcg_gen_movi_i64(psw_addr
, dest
);
1084 return EXIT_PC_UPDATED
;
1088 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1089 bool is_imm
, int imm
, TCGv_i64 cdest
)
1092 uint64_t dest
= s
->pc
+ 2 * imm
;
1095 /* Take care of the special cases first. */
1096 if (c
->cond
== TCG_COND_NEVER
) {
1101 if (dest
== s
->next_pc
) {
1102 /* Branch to next. */
1106 if (c
->cond
== TCG_COND_ALWAYS
) {
1107 ret
= help_goto_direct(s
, dest
);
1111 if (TCGV_IS_UNUSED_I64(cdest
)) {
1112 /* E.g. bcr %r0 -> no branch. */
1116 if (c
->cond
== TCG_COND_ALWAYS
) {
1117 tcg_gen_mov_i64(psw_addr
, cdest
);
1118 ret
= EXIT_PC_UPDATED
;
1123 if (use_goto_tb(s
, s
->next_pc
)) {
1124 if (is_imm
&& use_goto_tb(s
, dest
)) {
1125 /* Both exits can use goto_tb. */
1128 lab
= gen_new_label();
1130 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1132 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1135 /* Branch not taken. */
1137 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1138 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1143 tcg_gen_movi_i64(psw_addr
, dest
);
1144 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1148 /* Fallthru can use goto_tb, but taken branch cannot. */
1149 /* Store taken branch destination before the brcond. This
1150 avoids having to allocate a new local temp to hold it.
1151 We'll overwrite this in the not taken case anyway. */
1153 tcg_gen_mov_i64(psw_addr
, cdest
);
1156 lab
= gen_new_label();
1158 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1160 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1163 /* Branch not taken. */
1166 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1167 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1171 tcg_gen_movi_i64(psw_addr
, dest
);
1173 ret
= EXIT_PC_UPDATED
;
1176 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1177 Most commonly we're single-stepping or some other condition that
1178 disables all use of goto_tb. Just update the PC and exit. */
1180 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1182 cdest
= tcg_const_i64(dest
);
1186 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1189 TCGv_i32 t0
= tcg_temp_new_i32();
1190 TCGv_i64 t1
= tcg_temp_new_i64();
1191 TCGv_i64 z
= tcg_const_i64(0);
1192 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1193 tcg_gen_extu_i32_i64(t1
, t0
);
1194 tcg_temp_free_i32(t0
);
1195 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1196 tcg_temp_free_i64(t1
);
1197 tcg_temp_free_i64(z
);
1201 tcg_temp_free_i64(cdest
);
1203 tcg_temp_free_i64(next
);
1205 ret
= EXIT_PC_UPDATED
;
1213 /* ====================================================================== */
1214 /* The operations. These perform the bulk of the work for any insn,
1215 usually after the operands have been loaded and output initialized. */
1217 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1219 gen_helper_abs_i64(o
->out
, o
->in2
);
1223 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1225 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1229 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1231 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1235 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1237 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1238 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1242 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1244 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1248 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1252 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1254 /* XXX possible optimization point */
1256 cc
= tcg_temp_new_i64();
1257 tcg_gen_extu_i32_i64(cc
, cc_op
);
1258 tcg_gen_shri_i64(cc
, cc
, 1);
1260 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1261 tcg_temp_free_i64(cc
);
1265 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1267 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1271 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1273 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1277 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1279 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1280 return_low128(o
->out2
);
1284 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1286 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1290 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1292 int shift
= s
->insn
->data
& 0xff;
1293 int size
= s
->insn
->data
>> 8;
1294 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1297 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1298 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1299 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1301 /* Produce the CC from only the bits manipulated. */
1302 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1303 set_cc_nz_u64(s
, cc_dst
);
1307 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1309 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1310 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1311 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1312 return EXIT_PC_UPDATED
;
1318 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1320 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1321 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1324 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1326 int m1
= get_field(s
->fields
, m1
);
1327 bool is_imm
= have_field(s
->fields
, i2
);
1328 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1331 disas_jcc(s
, &c
, m1
);
1332 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1335 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1337 int r1
= get_field(s
->fields
, r1
);
1338 bool is_imm
= have_field(s
->fields
, i2
);
1339 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1343 c
.cond
= TCG_COND_NE
;
1348 t
= tcg_temp_new_i64();
1349 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1350 store_reg32_i64(r1
, t
);
1351 c
.u
.s32
.a
= tcg_temp_new_i32();
1352 c
.u
.s32
.b
= tcg_const_i32(0);
1353 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1354 tcg_temp_free_i64(t
);
1356 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1359 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1361 int r1
= get_field(s
->fields
, r1
);
1362 bool is_imm
= have_field(s
->fields
, i2
);
1363 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1366 c
.cond
= TCG_COND_NE
;
1371 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1372 c
.u
.s64
.a
= regs
[r1
];
1373 c
.u
.s64
.b
= tcg_const_i64(0);
1375 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1378 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1380 int r1
= get_field(s
->fields
, r1
);
1381 int r3
= get_field(s
->fields
, r3
);
1382 bool is_imm
= have_field(s
->fields
, i2
);
1383 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1387 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1392 t
= tcg_temp_new_i64();
1393 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1394 c
.u
.s32
.a
= tcg_temp_new_i32();
1395 c
.u
.s32
.b
= tcg_temp_new_i32();
1396 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1397 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1398 store_reg32_i64(r1
, t
);
1399 tcg_temp_free_i64(t
);
1401 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1404 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1406 int r1
= get_field(s
->fields
, r1
);
1407 int r3
= get_field(s
->fields
, r3
);
1408 bool is_imm
= have_field(s
->fields
, i2
);
1409 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1412 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1415 if (r1
== (r3
| 1)) {
1416 c
.u
.s64
.b
= load_reg(r3
| 1);
1419 c
.u
.s64
.b
= regs
[r3
| 1];
1423 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1424 c
.u
.s64
.a
= regs
[r1
];
1427 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1430 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1432 int imm
, m3
= get_field(s
->fields
, m3
);
1436 /* Bit 3 of the m3 field is reserved and should be zero.
1437 Choose to ignore it wrt the ltgt_cond table above. */
1438 c
.cond
= ltgt_cond
[m3
& 14];
1439 if (s
->insn
->data
) {
1440 c
.cond
= tcg_unsigned_cond(c
.cond
);
1442 c
.is_64
= c
.g1
= c
.g2
= true;
1446 is_imm
= have_field(s
->fields
, i4
);
1448 imm
= get_field(s
->fields
, i4
);
1451 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1452 get_field(s
->fields
, d4
));
1455 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1458 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1460 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1465 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1467 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1472 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1474 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1479 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1481 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1482 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1483 tcg_temp_free_i32(m3
);
1484 gen_set_cc_nz_f32(s
, o
->in2
);
1488 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1490 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1491 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1492 tcg_temp_free_i32(m3
);
1493 gen_set_cc_nz_f64(s
, o
->in2
);
1497 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1499 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1500 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1501 tcg_temp_free_i32(m3
);
1502 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1506 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1508 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1509 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1510 tcg_temp_free_i32(m3
);
1511 gen_set_cc_nz_f32(s
, o
->in2
);
1515 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1517 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1518 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1519 tcg_temp_free_i32(m3
);
1520 gen_set_cc_nz_f64(s
, o
->in2
);
1524 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1526 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1527 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1528 tcg_temp_free_i32(m3
);
1529 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1533 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1535 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1536 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1537 tcg_temp_free_i32(m3
);
1541 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1543 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1544 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1545 tcg_temp_free_i32(m3
);
1549 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1551 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1552 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1553 tcg_temp_free_i32(m3
);
1554 return_low128(o
->out2
);
1558 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1560 int r2
= get_field(s
->fields
, r2
);
1561 TCGv_i64 len
= tcg_temp_new_i64();
1563 potential_page_fault(s
);
1564 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1566 return_low128(o
->out
);
1568 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1569 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1570 tcg_temp_free_i64(len
);
1575 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1577 int l
= get_field(s
->fields
, l1
);
1582 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1583 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1586 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1587 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1590 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1591 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1594 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1595 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1598 potential_page_fault(s
);
1599 vl
= tcg_const_i32(l
);
1600 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1601 tcg_temp_free_i32(vl
);
1605 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1609 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1611 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1612 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1613 potential_page_fault(s
);
1614 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1615 tcg_temp_free_i32(r1
);
1616 tcg_temp_free_i32(r3
);
1621 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1623 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1624 TCGv_i32 t1
= tcg_temp_new_i32();
1625 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1626 potential_page_fault(s
);
1627 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1629 tcg_temp_free_i32(t1
);
1630 tcg_temp_free_i32(m3
);
1634 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1636 potential_page_fault(s
);
1637 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1639 return_low128(o
->in2
);
1643 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1645 int r3
= get_field(s
->fields
, r3
);
1646 potential_page_fault(s
);
1647 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1652 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1654 int r3
= get_field(s
->fields
, r3
);
1655 potential_page_fault(s
);
1656 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1661 #ifndef CONFIG_USER_ONLY
1662 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1664 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1665 check_privileged(s
);
1666 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1667 tcg_temp_free_i32(r1
);
1673 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1675 int r3
= get_field(s
->fields
, r3
);
1676 TCGv_i64 in3
= tcg_temp_new_i64();
1677 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1678 potential_page_fault(s
);
1679 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1680 tcg_temp_free_i64(in3
);
1685 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1687 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1688 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1689 potential_page_fault(s
);
1690 /* XXX rewrite in tcg */
1691 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1696 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1698 TCGv_i64 t1
= tcg_temp_new_i64();
1699 TCGv_i32 t2
= tcg_temp_new_i32();
1700 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1701 gen_helper_cvd(t1
, t2
);
1702 tcg_temp_free_i32(t2
);
1703 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1704 tcg_temp_free_i64(t1
);
1708 #ifndef CONFIG_USER_ONLY
1709 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1713 check_privileged(s
);
1714 potential_page_fault(s
);
1716 /* We pretend the format is RX_a so that D2 is the field we want. */
1717 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1718 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1719 tcg_temp_free_i32(tmp
);
1724 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1726 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1727 return_low128(o
->out
);
1731 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1733 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1734 return_low128(o
->out
);
1738 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1740 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1741 return_low128(o
->out
);
1745 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
1747 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
1748 return_low128(o
->out
);
1752 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
1754 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1758 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
1760 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1764 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
1766 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1767 return_low128(o
->out2
);
1771 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
1773 int r2
= get_field(s
->fields
, r2
);
1774 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1778 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
1780 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1784 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
1786 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1787 tb->flags, (ab)use the tb->cs_base field as the address of
1788 the template in memory, and grab 8 bits of tb->flags/cflags for
1789 the contents of the register. We would then recognize all this
1790 in gen_intermediate_code_internal, generating code for exactly
1791 one instruction. This new TB then gets executed normally.
1793 On the other hand, this seems to be mostly used for modifying
1794 MVC inside of memcpy, which needs a helper call anyway. So
1795 perhaps this doesn't bear thinking about any further. */
1802 tmp
= tcg_const_i64(s
->next_pc
);
1803 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
1804 tcg_temp_free_i64(tmp
);
1810 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
1812 /* We'll use the original input for cc computation, since we get to
1813 compare that against 0, which ought to be better than comparing
1814 the real output against 64. It also lets cc_dst be a convenient
1815 temporary during our computation. */
1816 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
1818 /* R1 = IN ? CLZ(IN) : 64. */
1819 gen_helper_clz(o
->out
, o
->in2
);
1821 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1822 value by 64, which is undefined. But since the shift is 64 iff the
1823 input is zero, we still get the correct result after and'ing. */
1824 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
1825 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
1826 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
1830 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
1832 int m3
= get_field(s
->fields
, m3
);
1833 int pos
, len
, base
= s
->insn
->data
;
1834 TCGv_i64 tmp
= tcg_temp_new_i64();
1839 /* Effectively a 32-bit load. */
1840 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
1847 /* Effectively a 16-bit load. */
1848 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
1856 /* Effectively an 8-bit load. */
1857 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1862 pos
= base
+ ctz32(m3
) * 8;
1863 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
1864 ccm
= ((1ull << len
) - 1) << pos
;
1868 /* This is going to be a sequence of loads and inserts. */
1869 pos
= base
+ 32 - 8;
1873 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1874 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1875 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
1878 m3
= (m3
<< 1) & 0xf;
1884 tcg_gen_movi_i64(tmp
, ccm
);
1885 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
1886 tcg_temp_free_i64(tmp
);
1890 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
1892 int shift
= s
->insn
->data
& 0xff;
1893 int size
= s
->insn
->data
>> 8;
1894 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
1898 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
1903 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
1905 t1
= tcg_temp_new_i64();
1906 tcg_gen_shli_i64(t1
, psw_mask
, 20);
1907 tcg_gen_shri_i64(t1
, t1
, 36);
1908 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1910 tcg_gen_extu_i32_i64(t1
, cc_op
);
1911 tcg_gen_shli_i64(t1
, t1
, 28);
1912 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1913 tcg_temp_free_i64(t1
);
1917 #ifndef CONFIG_USER_ONLY
1918 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
1920 check_privileged(s
);
1921 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
1925 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
1927 check_privileged(s
);
1928 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
1933 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
1935 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
1939 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
1941 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
1945 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
1947 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1951 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
1953 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1957 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
1959 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
1960 return_low128(o
->out2
);
1964 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
1966 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
1967 return_low128(o
->out2
);
1971 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
1973 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
1977 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
1979 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
1983 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
1985 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
1989 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
1991 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
1995 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
1997 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2001 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2003 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2007 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2009 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2013 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2015 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2019 #ifndef CONFIG_USER_ONLY
2020 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2022 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2023 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2024 check_privileged(s
);
2025 potential_page_fault(s
);
2026 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2027 tcg_temp_free_i32(r1
);
2028 tcg_temp_free_i32(r3
);
2032 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2034 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2035 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2036 check_privileged(s
);
2037 potential_page_fault(s
);
2038 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2039 tcg_temp_free_i32(r1
);
2040 tcg_temp_free_i32(r3
);
2043 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2045 check_privileged(s
);
2046 potential_page_fault(s
);
2047 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2052 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2056 check_privileged(s
);
2058 t1
= tcg_temp_new_i64();
2059 t2
= tcg_temp_new_i64();
2060 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2061 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2062 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2063 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2064 tcg_gen_shli_i64(t1
, t1
, 32);
2065 gen_helper_load_psw(cpu_env
, t1
, t2
);
2066 tcg_temp_free_i64(t1
);
2067 tcg_temp_free_i64(t2
);
2068 return EXIT_NORETURN
;
2071 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2075 check_privileged(s
);
2077 t1
= tcg_temp_new_i64();
2078 t2
= tcg_temp_new_i64();
2079 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2080 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2081 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2082 gen_helper_load_psw(cpu_env
, t1
, t2
);
2083 tcg_temp_free_i64(t1
);
2084 tcg_temp_free_i64(t2
);
2085 return EXIT_NORETURN
;
2089 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2091 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2092 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2093 potential_page_fault(s
);
2094 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2095 tcg_temp_free_i32(r1
);
2096 tcg_temp_free_i32(r3
);
2100 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2102 int r1
= get_field(s
->fields
, r1
);
2103 int r3
= get_field(s
->fields
, r3
);
2104 TCGv_i64 t
= tcg_temp_new_i64();
2105 TCGv_i64 t4
= tcg_const_i64(4);
2108 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2109 store_reg32_i64(r1
, t
);
2113 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2117 tcg_temp_free_i64(t
);
2118 tcg_temp_free_i64(t4
);
2122 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2124 int r1
= get_field(s
->fields
, r1
);
2125 int r3
= get_field(s
->fields
, r3
);
2126 TCGv_i64 t
= tcg_temp_new_i64();
2127 TCGv_i64 t4
= tcg_const_i64(4);
2130 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2131 store_reg32h_i64(r1
, t
);
2135 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2139 tcg_temp_free_i64(t
);
2140 tcg_temp_free_i64(t4
);
2144 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2146 int r1
= get_field(s
->fields
, r1
);
2147 int r3
= get_field(s
->fields
, r3
);
2148 TCGv_i64 t8
= tcg_const_i64(8);
2151 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2155 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2159 tcg_temp_free_i64(t8
);
2163 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2166 o
->g_out
= o
->g_in2
;
2167 TCGV_UNUSED_I64(o
->in2
);
2172 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2176 o
->g_out
= o
->g_in1
;
2177 o
->g_out2
= o
->g_in2
;
2178 TCGV_UNUSED_I64(o
->in1
);
2179 TCGV_UNUSED_I64(o
->in2
);
2180 o
->g_in1
= o
->g_in2
= false;
2184 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2186 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2187 potential_page_fault(s
);
2188 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2189 tcg_temp_free_i32(l
);
2193 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2195 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2196 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2197 potential_page_fault(s
);
2198 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2199 tcg_temp_free_i32(r1
);
2200 tcg_temp_free_i32(r2
);
2205 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2207 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2208 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2209 potential_page_fault(s
);
2210 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2211 tcg_temp_free_i32(r1
);
2212 tcg_temp_free_i32(r3
);
2217 #ifndef CONFIG_USER_ONLY
2218 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2220 int r1
= get_field(s
->fields
, l1
);
2221 check_privileged(s
);
2222 potential_page_fault(s
);
2223 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2228 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2230 int r1
= get_field(s
->fields
, l1
);
2231 check_privileged(s
);
2232 potential_page_fault(s
);
2233 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2239 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2241 potential_page_fault(s
);
2242 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2247 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2249 potential_page_fault(s
);
2250 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2252 return_low128(o
->in2
);
2256 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2258 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2262 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2264 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2265 return_low128(o
->out2
);
2269 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2271 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2275 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2277 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2281 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2283 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2287 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2289 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2290 return_low128(o
->out2
);
2294 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2296 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2297 return_low128(o
->out2
);
2301 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2303 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2304 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2305 tcg_temp_free_i64(r3
);
2309 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2311 int r3
= get_field(s
->fields
, r3
);
2312 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2316 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2318 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2319 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2320 tcg_temp_free_i64(r3
);
2324 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2326 int r3
= get_field(s
->fields
, r3
);
2327 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2331 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2333 gen_helper_nabs_i64(o
->out
, o
->in2
);
2337 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2339 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2343 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2345 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2349 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2351 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2352 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2356 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2358 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2359 potential_page_fault(s
);
2360 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2361 tcg_temp_free_i32(l
);
2366 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2368 tcg_gen_neg_i64(o
->out
, o
->in2
);
2372 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2374 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2378 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2380 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2384 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2386 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2387 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2391 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2393 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2394 potential_page_fault(s
);
2395 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2396 tcg_temp_free_i32(l
);
2401 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2403 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2407 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2409 int shift
= s
->insn
->data
& 0xff;
2410 int size
= s
->insn
->data
>> 8;
2411 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2414 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2415 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2417 /* Produce the CC from only the bits manipulated. */
2418 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2419 set_cc_nz_u64(s
, cc_dst
);
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2426 check_privileged(s
);
2427 gen_helper_ptlb(cpu_env
);
2432 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2434 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2438 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2440 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2444 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2446 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2450 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2452 TCGv_i32 t1
= tcg_temp_new_i32();
2453 TCGv_i32 t2
= tcg_temp_new_i32();
2454 TCGv_i32 to
= tcg_temp_new_i32();
2455 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2456 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2457 tcg_gen_rotl_i32(to
, t1
, t2
);
2458 tcg_gen_extu_i32_i64(o
->out
, to
);
2459 tcg_temp_free_i32(t1
);
2460 tcg_temp_free_i32(t2
);
2461 tcg_temp_free_i32(to
);
2465 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2467 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2471 #ifndef CONFIG_USER_ONLY
2472 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2474 check_privileged(s
);
2475 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2480 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2482 check_privileged(s
);
2483 gen_helper_sacf(cpu_env
, o
->in2
);
2484 /* Addressing mode has changed, so end the block. */
2485 return EXIT_PC_STALE
;
2489 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2491 int r1
= get_field(s
->fields
, r1
);
2492 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2496 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2498 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2502 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2504 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2508 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2510 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2511 return_low128(o
->out2
);
2515 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2517 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2521 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2523 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2527 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2529 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2530 return_low128(o
->out2
);
2534 #ifndef CONFIG_USER_ONLY
2535 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2537 check_privileged(s
);
2538 potential_page_fault(s
);
2539 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2544 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2546 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2547 check_privileged(s
);
2548 potential_page_fault(s
);
2549 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2550 tcg_temp_free_i32(r1
);
2555 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2557 uint64_t sign
= 1ull << s
->insn
->data
;
2558 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2559 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2560 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2561 /* The arithmetic left shift is curious in that it does not affect
2562 the sign bit. Copy that over from the source unchanged. */
2563 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2564 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2565 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2569 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2571 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2575 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2577 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2581 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2583 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2587 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2589 gen_helper_sfpc(cpu_env
, o
->in2
);
2593 #ifndef CONFIG_USER_ONLY
2594 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2596 check_privileged(s
);
2597 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2598 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2602 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
2604 check_privileged(s
);
2605 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
2609 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2611 check_privileged(s
);
2612 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2616 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
2618 check_privileged(s
);
2619 /* ??? Surely cpu address != cpu number. In any case the previous
2620 version of this stored more than the required half-word, so it
2621 is unlikely this has ever been tested. */
2622 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2626 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2628 gen_helper_stck(o
->out
, cpu_env
);
2629 /* ??? We don't implement clock states. */
2630 gen_op_movi_cc(s
, 0);
2634 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
2636 TCGv_i64 c1
= tcg_temp_new_i64();
2637 TCGv_i64 c2
= tcg_temp_new_i64();
2638 gen_helper_stck(c1
, cpu_env
);
2639 /* Shift the 64-bit value into its place as a zero-extended
2640 104-bit value. Note that "bit positions 64-103 are always
2641 non-zero so that they compare differently to STCK"; we set
2642 the least significant bit to 1. */
2643 tcg_gen_shli_i64(c2
, c1
, 56);
2644 tcg_gen_shri_i64(c1
, c1
, 8);
2645 tcg_gen_ori_i64(c2
, c2
, 0x10000);
2646 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
2647 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2648 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
2649 tcg_temp_free_i64(c1
);
2650 tcg_temp_free_i64(c2
);
2651 /* ??? We don't implement clock states. */
2652 gen_op_movi_cc(s
, 0);
2656 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2658 check_privileged(s
);
2659 gen_helper_sckc(cpu_env
, o
->in2
);
2663 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2665 check_privileged(s
);
2666 gen_helper_stckc(o
->out
, cpu_env
);
2670 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2672 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2673 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2674 check_privileged(s
);
2675 potential_page_fault(s
);
2676 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2677 tcg_temp_free_i32(r1
);
2678 tcg_temp_free_i32(r3
);
2682 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2684 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2685 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2686 check_privileged(s
);
2687 potential_page_fault(s
);
2688 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2689 tcg_temp_free_i32(r1
);
2690 tcg_temp_free_i32(r3
);
2694 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2696 check_privileged(s
);
2697 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2701 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2703 check_privileged(s
);
2704 gen_helper_spt(cpu_env
, o
->in2
);
2708 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
2711 /* We really ought to have more complete indication of facilities
2712 that we implement. Address this when STFLE is implemented. */
2713 check_privileged(s
);
2714 f
= tcg_const_i64(0xc0000000);
2715 a
= tcg_const_i64(200);
2716 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
2717 tcg_temp_free_i64(f
);
2718 tcg_temp_free_i64(a
);
2722 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2724 check_privileged(s
);
2725 gen_helper_stpt(o
->out
, cpu_env
);
2729 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
2731 check_privileged(s
);
2732 potential_page_fault(s
);
2733 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
2738 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
2740 check_privileged(s
);
2741 gen_helper_spx(cpu_env
, o
->in2
);
2745 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
2747 check_privileged(s
);
2748 /* Not operational. */
2749 gen_op_movi_cc(s
, 3);
2753 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
2755 check_privileged(s
);
2756 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
2757 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
2761 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2763 uint64_t i2
= get_field(s
->fields
, i2
);
2766 check_privileged(s
);
2768 /* It is important to do what the instruction name says: STORE THEN.
2769 If we let the output hook perform the store then if we fault and
2770 restart, we'll have the wrong SYSTEM MASK in place. */
2771 t
= tcg_temp_new_i64();
2772 tcg_gen_shri_i64(t
, psw_mask
, 56);
2773 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2774 tcg_temp_free_i64(t
);
2776 if (s
->fields
->op
== 0xac) {
2777 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2778 (i2
<< 56) | 0x00ffffffffffffffull
);
2780 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2785 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
2787 check_privileged(s
);
2788 potential_page_fault(s
);
2789 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
2794 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2796 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2800 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2802 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2806 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2808 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2812 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2814 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2818 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2820 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2821 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2822 potential_page_fault(s
);
2823 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
2824 tcg_temp_free_i32(r1
);
2825 tcg_temp_free_i32(r3
);
2829 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
2831 int m3
= get_field(s
->fields
, m3
);
2832 int pos
, base
= s
->insn
->data
;
2833 TCGv_i64 tmp
= tcg_temp_new_i64();
2835 pos
= base
+ ctz32(m3
) * 8;
2838 /* Effectively a 32-bit store. */
2839 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2840 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
2846 /* Effectively a 16-bit store. */
2847 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2848 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
2855 /* Effectively an 8-bit store. */
2856 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2857 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2861 /* This is going to be a sequence of shifts and stores. */
2862 pos
= base
+ 32 - 8;
2865 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2866 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2867 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2869 m3
= (m3
<< 1) & 0xf;
2874 tcg_temp_free_i64(tmp
);
2878 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
2880 int r1
= get_field(s
->fields
, r1
);
2881 int r3
= get_field(s
->fields
, r3
);
2882 int size
= s
->insn
->data
;
2883 TCGv_i64 tsize
= tcg_const_i64(size
);
2887 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
2889 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
2894 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
2898 tcg_temp_free_i64(tsize
);
2902 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
2904 int r1
= get_field(s
->fields
, r1
);
2905 int r3
= get_field(s
->fields
, r3
);
2906 TCGv_i64 t
= tcg_temp_new_i64();
2907 TCGv_i64 t4
= tcg_const_i64(4);
2908 TCGv_i64 t32
= tcg_const_i64(32);
2911 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
2912 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
2916 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2920 tcg_temp_free_i64(t
);
2921 tcg_temp_free_i64(t4
);
2922 tcg_temp_free_i64(t32
);
2926 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
2928 potential_page_fault(s
);
2929 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2931 return_low128(o
->in2
);
2935 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
2937 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
2941 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
2946 tcg_gen_not_i64(o
->in2
, o
->in2
);
2947 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2949 /* XXX possible optimization point */
2951 cc
= tcg_temp_new_i64();
2952 tcg_gen_extu_i32_i64(cc
, cc_op
);
2953 tcg_gen_shri_i64(cc
, cc
, 1);
2954 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2955 tcg_temp_free_i64(cc
);
2959 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
2966 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
2967 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
2968 tcg_temp_free_i32(t
);
2970 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
2971 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
2972 tcg_temp_free_i32(t
);
2974 gen_exception(EXCP_SVC
);
2975 return EXIT_NORETURN
;
2978 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
2980 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
2985 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
2987 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
2992 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
2994 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
2999 #ifndef CONFIG_USER_ONLY
3000 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3002 potential_page_fault(s
);
3003 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3009 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3011 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3012 potential_page_fault(s
);
3013 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3014 tcg_temp_free_i32(l
);
3019 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3021 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3022 potential_page_fault(s
);
3023 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3024 tcg_temp_free_i32(l
);
3028 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3030 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3031 potential_page_fault(s
);
3032 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3033 tcg_temp_free_i32(l
);
3038 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3040 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3044 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3046 int shift
= s
->insn
->data
& 0xff;
3047 int size
= s
->insn
->data
>> 8;
3048 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3051 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3052 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3054 /* Produce the CC from only the bits manipulated. */
3055 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3056 set_cc_nz_u64(s
, cc_dst
);
3060 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3062 o
->out
= tcg_const_i64(0);
3066 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3068 o
->out
= tcg_const_i64(0);
3074 /* ====================================================================== */
3075 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3076 the original inputs), update the various cc data structures in order to
3077 be able to compute the new condition code. */
3079 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3081 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3084 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3086 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3089 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3091 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3094 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3096 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3099 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3101 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3104 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3106 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3109 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3111 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3114 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3116 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3119 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3121 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3124 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3126 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3129 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3131 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3134 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3136 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3139 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3141 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3144 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3146 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3149 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3151 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3154 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3156 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3159 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3161 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3164 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3166 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3169 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3171 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3174 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3176 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3177 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3180 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3182 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3185 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3187 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3190 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3192 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3195 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3197 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3200 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3202 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3205 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3207 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3210 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3212 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3215 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3217 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3220 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3222 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3225 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3227 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3230 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3232 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3235 /* ====================================================================== */
3236 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3237 with the TCG register to which we will write. Used in combination with
3238 the "wout" generators, in some cases we need a new temporary, and in
3239 some cases we can write to a TCG global. */
3241 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3243 o
->out
= tcg_temp_new_i64();
3246 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3248 o
->out
= tcg_temp_new_i64();
3249 o
->out2
= tcg_temp_new_i64();
3252 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3254 o
->out
= regs
[get_field(f
, r1
)];
3258 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3260 /* ??? Specification exception: r1 must be even. */
3261 int r1
= get_field(f
, r1
);
3263 o
->out2
= regs
[(r1
+ 1) & 15];
3264 o
->g_out
= o
->g_out2
= true;
3267 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3269 o
->out
= fregs
[get_field(f
, r1
)];
3273 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3275 /* ??? Specification exception: r1 must be < 14. */
3276 int r1
= get_field(f
, r1
);
3278 o
->out2
= fregs
[(r1
+ 2) & 15];
3279 o
->g_out
= o
->g_out2
= true;
3282 /* ====================================================================== */
3283 /* The "Write OUTput" generators. These generally perform some non-trivial
3284 copy of data to TCG globals, or to main memory. The trivial cases are
3285 generally handled by having a "prep" generator install the TCG global
3286 as the destination of the operation. */
3288 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3290 store_reg(get_field(f
, r1
), o
->out
);
3293 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3295 int r1
= get_field(f
, r1
);
3296 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3299 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3301 int r1
= get_field(f
, r1
);
3302 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3305 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3307 store_reg32_i64(get_field(f
, r1
), o
->out
);
3310 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3312 /* ??? Specification exception: r1 must be even. */
3313 int r1
= get_field(f
, r1
);
3314 store_reg32_i64(r1
, o
->out
);
3315 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3318 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3320 /* ??? Specification exception: r1 must be even. */
3321 int r1
= get_field(f
, r1
);
3322 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3323 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3324 store_reg32_i64(r1
, o
->out
);
3327 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3329 store_freg32_i64(get_field(f
, r1
), o
->out
);
3332 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3334 store_freg(get_field(f
, r1
), o
->out
);
3337 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3339 /* ??? Specification exception: r1 must be < 14. */
3340 int f1
= get_field(s
->fields
, r1
);
3341 store_freg(f1
, o
->out
);
3342 store_freg((f1
+ 2) & 15, o
->out2
);
3345 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3347 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3348 store_reg32_i64(get_field(f
, r1
), o
->out
);
3352 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3354 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3355 store_freg32_i64(get_field(f
, r1
), o
->out
);
3359 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3361 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3364 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3366 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3369 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3371 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3374 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3376 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3379 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3381 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3384 /* ====================================================================== */
3385 /* The "INput 1" generators. These load the first operand to an insn. */
3387 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3389 o
->in1
= load_reg(get_field(f
, r1
));
3392 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3394 o
->in1
= regs
[get_field(f
, r1
)];
3398 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3400 o
->in1
= tcg_temp_new_i64();
3401 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3404 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3406 o
->in1
= tcg_temp_new_i64();
3407 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3410 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3412 o
->in1
= tcg_temp_new_i64();
3413 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3416 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3418 /* ??? Specification exception: r1 must be even. */
3419 int r1
= get_field(f
, r1
);
3420 o
->in1
= load_reg((r1
+ 1) & 15);
3423 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3425 /* ??? Specification exception: r1 must be even. */
3426 int r1
= get_field(f
, r1
);
3427 o
->in1
= tcg_temp_new_i64();
3428 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3431 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3433 /* ??? Specification exception: r1 must be even. */
3434 int r1
= get_field(f
, r1
);
3435 o
->in1
= tcg_temp_new_i64();
3436 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3439 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3441 /* ??? Specification exception: r1 must be even. */
3442 int r1
= get_field(f
, r1
);
3443 o
->in1
= tcg_temp_new_i64();
3444 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3447 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3449 o
->in1
= load_reg(get_field(f
, r2
));
3452 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3454 o
->in1
= load_reg(get_field(f
, r3
));
3457 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3459 o
->in1
= regs
[get_field(f
, r3
)];
3463 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3465 o
->in1
= tcg_temp_new_i64();
3466 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3469 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3471 o
->in1
= tcg_temp_new_i64();
3472 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3475 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3477 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3480 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3482 o
->in1
= fregs
[get_field(f
, r1
)];
3486 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3488 /* ??? Specification exception: r1 must be < 14. */
3489 int r1
= get_field(f
, r1
);
3491 o
->out2
= fregs
[(r1
+ 2) & 15];
3492 o
->g_out
= o
->g_out2
= true;
3495 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3497 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3500 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3502 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3503 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3506 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3509 o
->in1
= tcg_temp_new_i64();
3510 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3513 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3516 o
->in1
= tcg_temp_new_i64();
3517 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3520 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 o
->in1
= tcg_temp_new_i64();
3524 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3527 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3530 o
->in1
= tcg_temp_new_i64();
3531 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3534 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3537 o
->in1
= tcg_temp_new_i64();
3538 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3541 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3544 o
->in1
= tcg_temp_new_i64();
3545 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3548 /* ====================================================================== */
3549 /* The "INput 2" generators. These load the second operand to an insn. */
3551 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3553 o
->in2
= regs
[get_field(f
, r1
)];
3557 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3559 o
->in2
= tcg_temp_new_i64();
3560 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3563 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3565 o
->in2
= tcg_temp_new_i64();
3566 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3569 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3571 o
->in2
= load_reg(get_field(f
, r2
));
3574 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3576 o
->in2
= regs
[get_field(f
, r2
)];
3580 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3582 int r2
= get_field(f
, r2
);
3584 o
->in2
= load_reg(r2
);
3588 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3590 o
->in2
= tcg_temp_new_i64();
3591 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3594 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3596 o
->in2
= tcg_temp_new_i64();
3597 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3600 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3602 o
->in2
= tcg_temp_new_i64();
3603 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3606 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3608 o
->in2
= tcg_temp_new_i64();
3609 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3612 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3614 o
->in2
= load_reg(get_field(f
, r3
));
3617 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3619 o
->in2
= tcg_temp_new_i64();
3620 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3623 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3625 o
->in2
= tcg_temp_new_i64();
3626 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3629 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3631 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3634 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3636 o
->in2
= fregs
[get_field(f
, r2
)];
3640 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3642 /* ??? Specification exception: r1 must be < 14. */
3643 int r2
= get_field(f
, r2
);
3645 o
->in2
= fregs
[(r2
+ 2) & 15];
3646 o
->g_in1
= o
->g_in2
= true;
3649 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3651 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3654 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3656 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3657 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3660 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3662 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3665 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3667 help_l2_shift(s
, f
, o
, 31);
3670 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3672 help_l2_shift(s
, f
, o
, 63);
3675 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3678 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3681 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3684 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3687 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3690 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3693 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3696 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3699 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3702 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3705 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3708 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3711 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3714 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3717 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3720 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3723 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3726 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3729 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3732 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3735 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3737 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3740 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3742 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3745 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3747 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3750 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3752 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3755 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3757 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3758 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3761 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3763 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3764 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3767 /* ====================================================================== */
3769 /* Find opc within the table of insns. This is formulated as a switch
3770 statement so that (1) we get compile-time notice of cut-paste errors
3771 for duplicated opcodes, and (2) the compiler generates the binary
3772 search tree, rather than us having to post-process the table. */
3774 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3775 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3777 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3779 enum DisasInsnEnum
{
3780 #include "insn-data.def"
3784 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3789 .help_in1 = in1_##I1, \
3790 .help_in2 = in2_##I2, \
3791 .help_prep = prep_##P, \
3792 .help_wout = wout_##W, \
3793 .help_cout = cout_##CC, \
3794 .help_op = op_##OP, \
3798 /* Allow 0 to be used for NULL in the table below. */
3806 static const DisasInsn insn_info
[] = {
3807 #include "insn-data.def"
3811 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3812 case OPC: return &insn_info[insn_ ## NM];
3814 static const DisasInsn
*lookup_opc(uint16_t opc
)
3817 #include "insn-data.def"
3826 /* Extract a field from the insn. The INSN should be left-aligned in
3827 the uint64_t so that we can more easily utilize the big-bit-endian
3828 definitions we extract from the Principals of Operation. */
3830 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
3838 /* Zero extract the field from the insn. */
3839 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
3841 /* Sign-extend, or un-swap the field as necessary. */
3843 case 0: /* unsigned */
3845 case 1: /* signed */
3846 assert(f
->size
<= 32);
3847 m
= 1u << (f
->size
- 1);
3850 case 2: /* dl+dh split, signed 20 bit. */
3851 r
= ((int8_t)r
<< 12) | (r
>> 8);
3857 /* Validate that the "compressed" encoding we selected above is valid.
3858 I.e. we havn't make two different original fields overlap. */
3859 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
3860 o
->presentC
|= 1 << f
->indexC
;
3861 o
->presentO
|= 1 << f
->indexO
;
3863 o
->c
[f
->indexC
] = r
;
3866 /* Lookup the insn at the current PC, extracting the operands into O and
3867 returning the info struct for the insn. Returns NULL for invalid insn. */
3869 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
3872 uint64_t insn
, pc
= s
->pc
;
3874 const DisasInsn
*info
;
3876 insn
= ld_code2(env
, pc
);
3877 op
= (insn
>> 8) & 0xff;
3878 ilen
= get_ilen(op
);
3879 s
->next_pc
= s
->pc
+ ilen
;
3886 insn
= ld_code4(env
, pc
) << 32;
3889 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
3895 /* We can't actually determine the insn format until we've looked up
3896 the full insn opcode. Which we can't do without locating the
3897 secondary opcode. Assume by default that OP2 is at bit 40; for
3898 those smaller insns that don't actually have a secondary opcode
3899 this will correctly result in OP2 = 0. */
3905 case 0xb2: /* S, RRF, RRE */
3906 case 0xb3: /* RRE, RRD, RRF */
3907 case 0xb9: /* RRE, RRF */
3908 case 0xe5: /* SSE, SIL */
3909 op2
= (insn
<< 8) >> 56;
3913 case 0xc0: /* RIL */
3914 case 0xc2: /* RIL */
3915 case 0xc4: /* RIL */
3916 case 0xc6: /* RIL */
3917 case 0xc8: /* SSF */
3918 case 0xcc: /* RIL */
3919 op2
= (insn
<< 12) >> 60;
3921 case 0xd0 ... 0xdf: /* SS */
3927 case 0xee ... 0xf3: /* SS */
3928 case 0xf8 ... 0xfd: /* SS */
3932 op2
= (insn
<< 40) >> 56;
3936 memset(f
, 0, sizeof(*f
));
3940 /* Lookup the instruction. */
3941 info
= lookup_opc(op
<< 8 | op2
);
3943 /* If we found it, extract the operands. */
3945 DisasFormat fmt
= info
->fmt
;
3948 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
3949 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
3955 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
3957 const DisasInsn
*insn
;
3958 ExitStatus ret
= NO_EXIT
;
3962 /* Search for the insn in the table. */
3963 insn
= extract_insn(env
, s
, &f
);
3965 /* Not found means unimplemented/illegal opcode. */
3967 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
3969 gen_illegal_opcode(s
);
3970 return EXIT_NORETURN
;
3973 /* Set up the strutures we use to communicate with the helpers. */
3976 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
3977 TCGV_UNUSED_I64(o
.out
);
3978 TCGV_UNUSED_I64(o
.out2
);
3979 TCGV_UNUSED_I64(o
.in1
);
3980 TCGV_UNUSED_I64(o
.in2
);
3981 TCGV_UNUSED_I64(o
.addr1
);
3983 /* Implement the instruction. */
3984 if (insn
->help_in1
) {
3985 insn
->help_in1(s
, &f
, &o
);
3987 if (insn
->help_in2
) {
3988 insn
->help_in2(s
, &f
, &o
);
3990 if (insn
->help_prep
) {
3991 insn
->help_prep(s
, &f
, &o
);
3993 if (insn
->help_op
) {
3994 ret
= insn
->help_op(s
, &o
);
3996 if (insn
->help_wout
) {
3997 insn
->help_wout(s
, &f
, &o
);
3999 if (insn
->help_cout
) {
4000 insn
->help_cout(s
, &o
);
4003 /* Free any temporaries created by the helpers. */
4004 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4005 tcg_temp_free_i64(o
.out
);
4007 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4008 tcg_temp_free_i64(o
.out2
);
4010 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4011 tcg_temp_free_i64(o
.in1
);
4013 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4014 tcg_temp_free_i64(o
.in2
);
4016 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4017 tcg_temp_free_i64(o
.addr1
);
4020 /* Advance to the next instruction. */
4025 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4026 TranslationBlock
*tb
,
4030 target_ulong pc_start
;
4031 uint64_t next_page_start
;
4032 uint16_t *gen_opc_end
;
4034 int num_insns
, max_insns
;
4042 if (!(tb
->flags
& FLAG_MASK_64
)) {
4043 pc_start
&= 0x7fffffff;
4048 dc
.cc_op
= CC_OP_DYNAMIC
;
4049 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4051 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4053 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4056 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4057 if (max_insns
== 0) {
4058 max_insns
= CF_COUNT_MASK
;
4065 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4069 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4072 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4073 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4074 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4075 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4077 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4081 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4082 tcg_gen_debug_insn_start(dc
.pc
);
4086 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4087 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4088 if (bp
->pc
== dc
.pc
) {
4089 status
= EXIT_PC_STALE
;
4095 if (status
== NO_EXIT
) {
4096 status
= translate_one(env
, &dc
);
4099 /* If we reach a page boundary, are single stepping,
4100 or exhaust instruction count, stop generation. */
4101 if (status
== NO_EXIT
4102 && (dc
.pc
>= next_page_start
4103 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4104 || num_insns
>= max_insns
4106 || env
->singlestep_enabled
)) {
4107 status
= EXIT_PC_STALE
;
4109 } while (status
== NO_EXIT
);
4111 if (tb
->cflags
& CF_LAST_IO
) {
4120 update_psw_addr(&dc
);
4122 case EXIT_PC_UPDATED
:
4123 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4124 cc op type is in env */
4126 /* Exit the TB, either by raising a debug exception or by return. */
4128 gen_exception(EXCP_DEBUG
);
4137 gen_icount_end(tb
, num_insns
);
4138 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4140 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4143 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4146 tb
->size
= dc
.pc
- pc_start
;
4147 tb
->icount
= num_insns
;
4150 #if defined(S390X_DEBUG_DISAS)
4151 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4152 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4153 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4159 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4161 gen_intermediate_code_internal(env
, tb
, 0);
4164 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4166 gen_intermediate_code_internal(env
, tb
, 1);
4169 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4172 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4173 cc_op
= gen_opc_cc_op
[pc_pos
];
4174 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {