4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 DisasContextBase base
;
57 const DisasInsn
*insn
;
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
71 /* Information carried about a condition to be evaluated. */
78 struct { TCGv_i64 a
, b
; } s64
;
79 struct { TCGv_i32 a
, b
; } s32
;
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit
[CC_OP_MAX
];
85 static uint64_t inline_branch_miss
[CC_OP_MAX
];
88 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
92 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
93 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
94 tcg_gen_movi_i64(out
, pc
);
99 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
100 tmp
= tcg_const_i64(pc
);
101 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
102 tcg_temp_free_i64(tmp
);
105 static TCGv_i64 psw_addr
;
106 static TCGv_i64 psw_mask
;
107 static TCGv_i64 gbea
;
109 static TCGv_i32 cc_op
;
110 static TCGv_i64 cc_src
;
111 static TCGv_i64 cc_dst
;
112 static TCGv_i64 cc_vr
;
114 static char cpu_reg_names
[16][4];
115 static TCGv_i64 regs
[16];
117 void s390x_translate_init(void)
121 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUS390XState
, psw
.addr
),
124 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
125 offsetof(CPUS390XState
, psw
.mask
),
127 gbea
= tcg_global_mem_new_i64(cpu_env
,
128 offsetof(CPUS390XState
, gbea
),
131 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
133 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
135 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
137 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
140 for (i
= 0; i
< 16; i
++) {
141 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
142 regs
[i
] = tcg_global_mem_new(cpu_env
,
143 offsetof(CPUS390XState
, regs
[i
]),
148 static inline int vec_full_reg_offset(uint8_t reg
)
151 return offsetof(CPUS390XState
, vregs
[reg
][0].d
);
154 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, TCGMemOp es
)
156 /* Convert element size (es) - e.g. MO_8 - to bytes */
157 const uint8_t bytes
= 1 << es
;
158 int offs
= enr
* bytes
;
161 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
162 * of the 16 byte vector, on both, little and big endian systems.
164 * Big Endian (target/possible host)
165 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
166 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
167 * W: [ 0][ 1] - [ 2][ 3]
170 * Little Endian (possible host)
171 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
172 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
173 * W: [ 1][ 0] - [ 3][ 2]
176 * For 16 byte elements, the two 8 byte halves will not form a host
177 * int128 if the host is little endian, since they're in the wrong order.
178 * Some operations (e.g. xor) do not care. For operations like addition,
179 * the two 8 byte elements have to be loaded separately. Let's force all
180 * 16 byte operations to handle it in a special way.
182 g_assert(es
<= MO_64
);
183 #ifndef HOST_WORDS_BIGENDIAN
186 return offs
+ vec_full_reg_offset(reg
);
189 static inline int freg64_offset(uint8_t reg
)
192 return vec_reg_offset(reg
, 0, MO_64
);
195 static inline int freg32_offset(uint8_t reg
)
198 return vec_reg_offset(reg
, 0, MO_32
);
201 static TCGv_i64
load_reg(int reg
)
203 TCGv_i64 r
= tcg_temp_new_i64();
204 tcg_gen_mov_i64(r
, regs
[reg
]);
208 static TCGv_i64
load_freg(int reg
)
210 TCGv_i64 r
= tcg_temp_new_i64();
212 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
216 static TCGv_i64
load_freg32_i64(int reg
)
218 TCGv_i64 r
= tcg_temp_new_i64();
220 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
224 static void store_reg(int reg
, TCGv_i64 v
)
226 tcg_gen_mov_i64(regs
[reg
], v
);
229 static void store_freg(int reg
, TCGv_i64 v
)
231 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
234 static void store_reg32_i64(int reg
, TCGv_i64 v
)
236 /* 32 bit register writes keep the upper half */
237 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
240 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
242 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
245 static void store_freg32_i64(int reg
, TCGv_i64 v
)
247 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
250 static void return_low128(TCGv_i64 dest
)
252 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
255 static void update_psw_addr(DisasContext
*s
)
258 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
261 static void per_branch(DisasContext
*s
, bool to_next
)
263 #ifndef CONFIG_USER_ONLY
264 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
266 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
267 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
268 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
270 tcg_temp_free_i64(next_pc
);
276 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
277 TCGv_i64 arg1
, TCGv_i64 arg2
)
279 #ifndef CONFIG_USER_ONLY
280 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
281 TCGLabel
*lab
= gen_new_label();
282 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
284 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
285 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
289 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
290 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
291 tcg_temp_free_i64(pc
);
296 static void per_breaking_event(DisasContext
*s
)
298 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
301 static void update_cc_op(DisasContext
*s
)
303 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
304 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
308 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
310 return (uint64_t)cpu_lduw_code(env
, pc
);
313 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
315 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
318 static int get_mem_index(DisasContext
*s
)
320 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
324 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
325 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
326 return MMU_PRIMARY_IDX
;
327 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
328 return MMU_SECONDARY_IDX
;
329 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
337 static void gen_exception(int excp
)
339 TCGv_i32 tmp
= tcg_const_i32(excp
);
340 gen_helper_exception(cpu_env
, tmp
);
341 tcg_temp_free_i32(tmp
);
344 static void gen_program_exception(DisasContext
*s
, int code
)
348 /* Remember what pgm exeption this was. */
349 tmp
= tcg_const_i32(code
);
350 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
351 tcg_temp_free_i32(tmp
);
353 tmp
= tcg_const_i32(s
->ilen
);
354 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
355 tcg_temp_free_i32(tmp
);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM
);
367 static inline void gen_illegal_opcode(DisasContext
*s
)
369 gen_program_exception(s
, PGM_OPERATION
);
372 static inline void gen_data_exception(uint8_t dxc
)
374 TCGv_i32 tmp
= tcg_const_i32(dxc
);
375 gen_helper_data_exception(cpu_env
, tmp
);
376 tcg_temp_free_i32(tmp
);
379 static inline void gen_trap(DisasContext
*s
)
381 /* Set DXC to 0xff */
382 gen_data_exception(0xff);
385 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
388 tcg_gen_addi_i64(dst
, src
, imm
);
389 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
390 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
391 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
393 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
398 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
400 TCGv_i64 tmp
= tcg_temp_new_i64();
403 * Note that d2 is limited to 20 bits, signed. If we crop negative
404 * displacements early we create larger immedate addends.
407 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
408 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
410 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
412 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
413 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
414 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
415 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
417 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
420 tcg_gen_movi_i64(tmp
, d2
);
426 static inline bool live_cc_data(DisasContext
*s
)
428 return (s
->cc_op
!= CC_OP_DYNAMIC
429 && s
->cc_op
!= CC_OP_STATIC
433 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
435 if (live_cc_data(s
)) {
436 tcg_gen_discard_i64(cc_src
);
437 tcg_gen_discard_i64(cc_dst
);
438 tcg_gen_discard_i64(cc_vr
);
440 s
->cc_op
= CC_OP_CONST0
+ val
;
443 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
445 if (live_cc_data(s
)) {
446 tcg_gen_discard_i64(cc_src
);
447 tcg_gen_discard_i64(cc_vr
);
449 tcg_gen_mov_i64(cc_dst
, dst
);
453 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
456 if (live_cc_data(s
)) {
457 tcg_gen_discard_i64(cc_vr
);
459 tcg_gen_mov_i64(cc_src
, src
);
460 tcg_gen_mov_i64(cc_dst
, dst
);
464 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
465 TCGv_i64 dst
, TCGv_i64 vr
)
467 tcg_gen_mov_i64(cc_src
, src
);
468 tcg_gen_mov_i64(cc_dst
, dst
);
469 tcg_gen_mov_i64(cc_vr
, vr
);
473 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
475 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
478 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
480 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
483 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
488 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
490 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext
*s
)
496 if (live_cc_data(s
)) {
497 tcg_gen_discard_i64(cc_src
);
498 tcg_gen_discard_i64(cc_dst
);
499 tcg_gen_discard_i64(cc_vr
);
501 s
->cc_op
= CC_OP_STATIC
;
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext
*s
)
507 TCGv_i32 local_cc_op
= NULL
;
508 TCGv_i64 dummy
= NULL
;
512 dummy
= tcg_const_i64(0);
526 local_cc_op
= tcg_const_i32(s
->cc_op
);
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
546 /* env->cc_op already is the cc value */
561 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
566 case CC_OP_LTUGTU_32
:
567 case CC_OP_LTUGTU_64
:
574 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
589 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
592 /* unknown operation - assume 3 arguments and cc_op in env */
593 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
600 tcg_temp_free_i32(local_cc_op
);
603 tcg_temp_free_i64(dummy
);
606 /* We now have cc in cc_op as constant */
610 static bool use_exit_tb(DisasContext
*s
)
612 return s
->base
.singlestep_enabled
||
613 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
614 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
617 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
619 if (unlikely(use_exit_tb(s
))) {
622 #ifndef CONFIG_USER_ONLY
623 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
624 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
630 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
632 #ifdef DEBUG_INLINE_BRANCHES
633 inline_branch_miss
[cc_op
]++;
637 static void account_inline_branch(DisasContext
*s
, int cc_op
)
639 #ifdef DEBUG_INLINE_BRANCHES
640 inline_branch_hit
[cc_op
]++;
644 /* Table of mask values to comparison codes, given a comparison as input.
645 For such, CC=3 should not be possible. */
646 static const TCGCond ltgt_cond
[16] = {
647 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
648 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
649 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
650 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
651 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
652 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
653 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
654 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
657 /* Table of mask values to comparison codes, given a logic op as input.
658 For such, only CC=0 and CC=1 should be possible. */
659 static const TCGCond nz_cond
[16] = {
660 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
661 TCG_COND_NEVER
, TCG_COND_NEVER
,
662 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
663 TCG_COND_NE
, TCG_COND_NE
,
664 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
665 TCG_COND_EQ
, TCG_COND_EQ
,
666 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
667 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
670 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
671 details required to generate a TCG comparison. */
672 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
675 enum cc_op old_cc_op
= s
->cc_op
;
677 if (mask
== 15 || mask
== 0) {
678 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
681 c
->g1
= c
->g2
= true;
686 /* Find the TCG condition for the mask + cc op. */
692 cond
= ltgt_cond
[mask
];
693 if (cond
== TCG_COND_NEVER
) {
696 account_inline_branch(s
, old_cc_op
);
699 case CC_OP_LTUGTU_32
:
700 case CC_OP_LTUGTU_64
:
701 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
702 if (cond
== TCG_COND_NEVER
) {
705 account_inline_branch(s
, old_cc_op
);
709 cond
= nz_cond
[mask
];
710 if (cond
== TCG_COND_NEVER
) {
713 account_inline_branch(s
, old_cc_op
);
728 account_inline_branch(s
, old_cc_op
);
743 account_inline_branch(s
, old_cc_op
);
747 switch (mask
& 0xa) {
748 case 8: /* src == 0 -> no one bit found */
751 case 2: /* src != 0 -> one bit found */
757 account_inline_branch(s
, old_cc_op
);
763 case 8 | 2: /* vr == 0 */
766 case 4 | 1: /* vr != 0 */
769 case 8 | 4: /* no carry -> vr >= src */
772 case 2 | 1: /* carry -> vr < src */
778 account_inline_branch(s
, old_cc_op
);
783 /* Note that CC=0 is impossible; treat it as dont-care. */
785 case 2: /* zero -> op1 == op2 */
788 case 4 | 1: /* !zero -> op1 != op2 */
791 case 4: /* borrow (!carry) -> op1 < op2 */
794 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
800 account_inline_branch(s
, old_cc_op
);
805 /* Calculate cc value. */
810 /* Jump based on CC. We'll load up the real cond below;
811 the assignment here merely avoids a compiler warning. */
812 account_noninline_branch(s
, old_cc_op
);
813 old_cc_op
= CC_OP_STATIC
;
814 cond
= TCG_COND_NEVER
;
818 /* Load up the arguments of the comparison. */
820 c
->g1
= c
->g2
= false;
824 c
->u
.s32
.a
= tcg_temp_new_i32();
825 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
826 c
->u
.s32
.b
= tcg_const_i32(0);
829 case CC_OP_LTUGTU_32
:
832 c
->u
.s32
.a
= tcg_temp_new_i32();
833 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
834 c
->u
.s32
.b
= tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
842 c
->u
.s64
.b
= tcg_const_i64(0);
846 case CC_OP_LTUGTU_64
:
850 c
->g1
= c
->g2
= true;
856 c
->u
.s64
.a
= tcg_temp_new_i64();
857 c
->u
.s64
.b
= tcg_const_i64(0);
858 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
863 c
->u
.s32
.a
= tcg_temp_new_i32();
864 c
->u
.s32
.b
= tcg_temp_new_i32();
865 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
866 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
867 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
869 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
876 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
877 c
->u
.s64
.b
= tcg_const_i64(0);
889 case 0x8 | 0x4 | 0x2: /* cc != 3 */
891 c
->u
.s32
.b
= tcg_const_i32(3);
893 case 0x8 | 0x4 | 0x1: /* cc != 2 */
895 c
->u
.s32
.b
= tcg_const_i32(2);
897 case 0x8 | 0x2 | 0x1: /* cc != 1 */
899 c
->u
.s32
.b
= tcg_const_i32(1);
901 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
904 c
->u
.s32
.a
= tcg_temp_new_i32();
905 c
->u
.s32
.b
= tcg_const_i32(0);
906 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
908 case 0x8 | 0x4: /* cc < 2 */
910 c
->u
.s32
.b
= tcg_const_i32(2);
912 case 0x8: /* cc == 0 */
914 c
->u
.s32
.b
= tcg_const_i32(0);
916 case 0x4 | 0x2 | 0x1: /* cc != 0 */
918 c
->u
.s32
.b
= tcg_const_i32(0);
920 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
923 c
->u
.s32
.a
= tcg_temp_new_i32();
924 c
->u
.s32
.b
= tcg_const_i32(0);
925 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
927 case 0x4: /* cc == 1 */
929 c
->u
.s32
.b
= tcg_const_i32(1);
931 case 0x2 | 0x1: /* cc > 1 */
933 c
->u
.s32
.b
= tcg_const_i32(1);
935 case 0x2: /* cc == 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x1: /* cc == 3 */
941 c
->u
.s32
.b
= tcg_const_i32(3);
944 /* CC is masked by something else: (8 >> cc) & mask. */
947 c
->u
.s32
.a
= tcg_const_i32(8);
948 c
->u
.s32
.b
= tcg_const_i32(0);
949 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
950 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
961 static void free_compare(DisasCompare
*c
)
965 tcg_temp_free_i64(c
->u
.s64
.a
);
967 tcg_temp_free_i32(c
->u
.s32
.a
);
972 tcg_temp_free_i64(c
->u
.s64
.b
);
974 tcg_temp_free_i32(c
->u
.s32
.b
);
979 /* ====================================================================== */
980 /* Define the insn format enumeration. */
981 #define F0(N) FMT_##N,
982 #define F1(N, X1) F0(N)
983 #define F2(N, X1, X2) F0(N)
984 #define F3(N, X1, X2, X3) F0(N)
985 #define F4(N, X1, X2, X3, X4) F0(N)
986 #define F5(N, X1, X2, X3, X4, X5) F0(N)
989 #include "insn-format.def"
999 /* Define a structure to hold the decoded fields. We'll store each inside
1000 an array indexed by an enum. In order to conserve memory, we'll arrange
1001 for fields that do not exist at the same time to overlap, thus the "C"
1002 for compact. For checking purposes there is an "O" for original index
1003 as well that will be applied to availability bitmaps. */
1005 enum DisasFieldIndexO
{
1028 enum DisasFieldIndexC
{
1059 struct DisasFields
{
1063 unsigned presentC
:16;
1064 unsigned int presentO
;
1068 /* This is the way fields are to be accessed out of DisasFields. */
1069 #define have_field(S, F) have_field1((S), FLD_O_##F)
1070 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1072 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1074 return (f
->presentO
>> c
) & 1;
1077 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1078 enum DisasFieldIndexC c
)
1080 assert(have_field1(f
, o
));
1084 /* Describe the layout of each field in each format. */
1085 typedef struct DisasField
{
1087 unsigned int size
:8;
1088 unsigned int type
:2;
1089 unsigned int indexC
:6;
1090 enum DisasFieldIndexO indexO
:8;
1093 typedef struct DisasFormatInfo
{
1094 DisasField op
[NUM_C_FIELD
];
1097 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1098 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1099 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1100 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1101 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1103 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1106 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1107 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1110 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1112 #define F0(N) { { } },
1113 #define F1(N, X1) { { X1 } },
1114 #define F2(N, X1, X2) { { X1, X2 } },
1115 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1116 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1117 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1119 static const DisasFormatInfo format_info
[] = {
1120 #include "insn-format.def"
1138 /* Generally, we'll extract operands into this structures, operate upon
1139 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1140 of routines below for more details. */
1142 bool g_out
, g_out2
, g_in1
, g_in2
;
1143 TCGv_i64 out
, out2
, in1
, in2
;
1147 /* Instructions can place constraints on their operands, raising specification
1148 exceptions if they are violated. To make this easy to automate, each "in1",
1149 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1150 of the following, or 0. To make this easy to document, we'll put the
1151 SPEC_<name> defines next to <name>. */
1153 #define SPEC_r1_even 1
1154 #define SPEC_r2_even 2
1155 #define SPEC_r3_even 4
1156 #define SPEC_r1_f128 8
1157 #define SPEC_r2_f128 16
1159 /* Return values from translate_one, indicating the state of the TB. */
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1164 #define DISAS_PC_UPDATED DISAS_TARGET_0
1166 /* We have emitted one or more goto_tb. No fixup required. */
1167 #define DISAS_GOTO_TB DISAS_TARGET_1
1169 /* We have updated the PC and CC values. */
1170 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1172 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1173 updated the PC for the next instruction to be executed. */
1174 #define DISAS_PC_STALE DISAS_TARGET_3
1176 /* We are exiting the TB to the main loop. */
1177 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1180 /* Instruction flags */
1181 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1182 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1183 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1184 #define IF_BFP 0x0008 /* binary floating point instruction */
1185 #define IF_DFP 0x0010 /* decimal floating point instruction */
1186 #define IF_PRIV 0x0020 /* privileged instruction */
1197 /* Pre-process arguments before HELP_OP. */
1198 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1199 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1200 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1203 * Post-process output after HELP_OP.
1204 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1206 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1207 void (*help_cout
)(DisasContext
*, DisasOps
*);
1209 /* Implement the operation itself. */
1210 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1215 /* ====================================================================== */
1216 /* Miscellaneous helpers, used by several operations. */
1218 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1219 DisasOps
*o
, int mask
)
1221 int b2
= get_field(f
, b2
);
1222 int d2
= get_field(f
, d2
);
1225 o
->in2
= tcg_const_i64(d2
& mask
);
1227 o
->in2
= get_address(s
, 0, b2
, d2
);
1228 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1232 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1234 if (dest
== s
->pc_tmp
) {
1235 per_branch(s
, true);
1238 if (use_goto_tb(s
, dest
)) {
1240 per_breaking_event(s
);
1242 tcg_gen_movi_i64(psw_addr
, dest
);
1243 tcg_gen_exit_tb(s
->base
.tb
, 0);
1244 return DISAS_GOTO_TB
;
1246 tcg_gen_movi_i64(psw_addr
, dest
);
1247 per_branch(s
, false);
1248 return DISAS_PC_UPDATED
;
1252 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1253 bool is_imm
, int imm
, TCGv_i64 cdest
)
1256 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1259 /* Take care of the special cases first. */
1260 if (c
->cond
== TCG_COND_NEVER
) {
1265 if (dest
== s
->pc_tmp
) {
1266 /* Branch to next. */
1267 per_branch(s
, true);
1271 if (c
->cond
== TCG_COND_ALWAYS
) {
1272 ret
= help_goto_direct(s
, dest
);
1277 /* E.g. bcr %r0 -> no branch. */
1281 if (c
->cond
== TCG_COND_ALWAYS
) {
1282 tcg_gen_mov_i64(psw_addr
, cdest
);
1283 per_branch(s
, false);
1284 ret
= DISAS_PC_UPDATED
;
1289 if (use_goto_tb(s
, s
->pc_tmp
)) {
1290 if (is_imm
&& use_goto_tb(s
, dest
)) {
1291 /* Both exits can use goto_tb. */
1294 lab
= gen_new_label();
1296 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1298 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1301 /* Branch not taken. */
1303 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1304 tcg_gen_exit_tb(s
->base
.tb
, 0);
1308 per_breaking_event(s
);
1310 tcg_gen_movi_i64(psw_addr
, dest
);
1311 tcg_gen_exit_tb(s
->base
.tb
, 1);
1313 ret
= DISAS_GOTO_TB
;
1315 /* Fallthru can use goto_tb, but taken branch cannot. */
1316 /* Store taken branch destination before the brcond. This
1317 avoids having to allocate a new local temp to hold it.
1318 We'll overwrite this in the not taken case anyway. */
1320 tcg_gen_mov_i64(psw_addr
, cdest
);
1323 lab
= gen_new_label();
1325 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1327 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1330 /* Branch not taken. */
1333 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1334 tcg_gen_exit_tb(s
->base
.tb
, 0);
1338 tcg_gen_movi_i64(psw_addr
, dest
);
1340 per_breaking_event(s
);
1341 ret
= DISAS_PC_UPDATED
;
1344 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1345 Most commonly we're single-stepping or some other condition that
1346 disables all use of goto_tb. Just update the PC and exit. */
1348 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1350 cdest
= tcg_const_i64(dest
);
1354 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1356 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1358 TCGv_i32 t0
= tcg_temp_new_i32();
1359 TCGv_i64 t1
= tcg_temp_new_i64();
1360 TCGv_i64 z
= tcg_const_i64(0);
1361 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1362 tcg_gen_extu_i32_i64(t1
, t0
);
1363 tcg_temp_free_i32(t0
);
1364 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1365 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1366 tcg_temp_free_i64(t1
);
1367 tcg_temp_free_i64(z
);
1371 tcg_temp_free_i64(cdest
);
1373 tcg_temp_free_i64(next
);
1375 ret
= DISAS_PC_UPDATED
;
1383 /* ====================================================================== */
1384 /* The operations. These perform the bulk of the work for any insn,
1385 usually after the operands have been loaded and output initialized. */
1387 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1390 z
= tcg_const_i64(0);
1391 n
= tcg_temp_new_i64();
1392 tcg_gen_neg_i64(n
, o
->in2
);
1393 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1394 tcg_temp_free_i64(n
);
1395 tcg_temp_free_i64(z
);
1399 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1401 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1405 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1407 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1411 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1413 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1414 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1418 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1420 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1424 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1429 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1431 /* The carry flag is the msb of CC, therefore the branch mask that would
1432 create that comparison is 3. Feeding the generated comparison to
1433 setcond produces the carry flag that we desire. */
1434 disas_jcc(s
, &cmp
, 3);
1435 carry
= tcg_temp_new_i64();
1437 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1439 TCGv_i32 t
= tcg_temp_new_i32();
1440 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1441 tcg_gen_extu_i32_i64(carry
, t
);
1442 tcg_temp_free_i32(t
);
1446 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1447 tcg_temp_free_i64(carry
);
1451 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1453 o
->in1
= tcg_temp_new_i64();
1455 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1456 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1458 /* Perform the atomic addition in memory. */
1459 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1463 /* Recompute also for atomic case: needed for setting CC. */
1464 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1466 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1467 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1472 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1474 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1478 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1480 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1484 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1486 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1487 return_low128(o
->out2
);
1491 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1493 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1497 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1499 int shift
= s
->insn
->data
& 0xff;
1500 int size
= s
->insn
->data
>> 8;
1501 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1504 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1505 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1506 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1508 /* Produce the CC from only the bits manipulated. */
1509 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1510 set_cc_nz_u64(s
, cc_dst
);
1514 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1516 o
->in1
= tcg_temp_new_i64();
1518 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1519 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1521 /* Perform the atomic operation in memory. */
1522 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1526 /* Recompute also for atomic case: needed for setting CC. */
1527 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1529 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1530 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1535 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1537 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1539 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1540 per_branch(s
, false);
1541 return DISAS_PC_UPDATED
;
1547 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1551 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1552 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1556 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1557 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1558 t
= tcg_temp_new_i64();
1559 tcg_gen_shri_i64(t
, psw_mask
, 16);
1560 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1561 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1562 tcg_gen_extu_i32_i64(t
, cc_op
);
1563 tcg_gen_shli_i64(t
, t
, 28);
1564 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1565 tcg_temp_free_i64(t
);
1568 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1570 save_link_info(s
, o
);
1572 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1573 per_branch(s
, false);
1574 return DISAS_PC_UPDATED
;
1580 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1582 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1583 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1586 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1588 int m1
= get_field(s
->fields
, m1
);
1589 bool is_imm
= have_field(s
->fields
, i2
);
1590 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1593 /* BCR with R2 = 0 causes no branching */
1594 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1596 /* Perform serialization */
1597 /* FIXME: check for fast-BCR-serialization facility */
1598 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1601 /* Perform serialization */
1602 /* FIXME: perform checkpoint-synchronisation */
1603 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1608 disas_jcc(s
, &c
, m1
);
1609 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1612 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1614 int r1
= get_field(s
->fields
, r1
);
1615 bool is_imm
= have_field(s
->fields
, i2
);
1616 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1620 c
.cond
= TCG_COND_NE
;
1625 t
= tcg_temp_new_i64();
1626 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1627 store_reg32_i64(r1
, t
);
1628 c
.u
.s32
.a
= tcg_temp_new_i32();
1629 c
.u
.s32
.b
= tcg_const_i32(0);
1630 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1631 tcg_temp_free_i64(t
);
1633 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1636 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1638 int r1
= get_field(s
->fields
, r1
);
1639 int imm
= get_field(s
->fields
, i2
);
1643 c
.cond
= TCG_COND_NE
;
1648 t
= tcg_temp_new_i64();
1649 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1650 tcg_gen_subi_i64(t
, t
, 1);
1651 store_reg32h_i64(r1
, t
);
1652 c
.u
.s32
.a
= tcg_temp_new_i32();
1653 c
.u
.s32
.b
= tcg_const_i32(0);
1654 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1655 tcg_temp_free_i64(t
);
1657 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1660 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1662 int r1
= get_field(s
->fields
, r1
);
1663 bool is_imm
= have_field(s
->fields
, i2
);
1664 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1667 c
.cond
= TCG_COND_NE
;
1672 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1673 c
.u
.s64
.a
= regs
[r1
];
1674 c
.u
.s64
.b
= tcg_const_i64(0);
1676 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1679 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1681 int r1
= get_field(s
->fields
, r1
);
1682 int r3
= get_field(s
->fields
, r3
);
1683 bool is_imm
= have_field(s
->fields
, i2
);
1684 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1688 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1693 t
= tcg_temp_new_i64();
1694 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1695 c
.u
.s32
.a
= tcg_temp_new_i32();
1696 c
.u
.s32
.b
= tcg_temp_new_i32();
1697 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1698 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1699 store_reg32_i64(r1
, t
);
1700 tcg_temp_free_i64(t
);
1702 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1705 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1707 int r1
= get_field(s
->fields
, r1
);
1708 int r3
= get_field(s
->fields
, r3
);
1709 bool is_imm
= have_field(s
->fields
, i2
);
1710 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1713 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1716 if (r1
== (r3
| 1)) {
1717 c
.u
.s64
.b
= load_reg(r3
| 1);
1720 c
.u
.s64
.b
= regs
[r3
| 1];
1724 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1725 c
.u
.s64
.a
= regs
[r1
];
1728 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1731 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1733 int imm
, m3
= get_field(s
->fields
, m3
);
1737 c
.cond
= ltgt_cond
[m3
];
1738 if (s
->insn
->data
) {
1739 c
.cond
= tcg_unsigned_cond(c
.cond
);
1741 c
.is_64
= c
.g1
= c
.g2
= true;
1745 is_imm
= have_field(s
->fields
, i4
);
1747 imm
= get_field(s
->fields
, i4
);
1750 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1751 get_field(s
->fields
, d4
));
1754 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1757 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1759 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1764 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1766 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1771 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1773 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1778 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1780 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1781 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1782 tcg_temp_free_i32(m3
);
1783 gen_set_cc_nz_f32(s
, o
->in2
);
1787 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1789 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1790 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1791 tcg_temp_free_i32(m3
);
1792 gen_set_cc_nz_f64(s
, o
->in2
);
1796 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1798 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1799 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1800 tcg_temp_free_i32(m3
);
1801 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1805 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1809 tcg_temp_free_i32(m3
);
1810 gen_set_cc_nz_f32(s
, o
->in2
);
1814 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1816 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1817 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1818 tcg_temp_free_i32(m3
);
1819 gen_set_cc_nz_f64(s
, o
->in2
);
1823 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1825 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1826 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1827 tcg_temp_free_i32(m3
);
1828 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1832 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1834 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1835 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1836 tcg_temp_free_i32(m3
);
1837 gen_set_cc_nz_f32(s
, o
->in2
);
1841 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1843 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1844 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1845 tcg_temp_free_i32(m3
);
1846 gen_set_cc_nz_f64(s
, o
->in2
);
1850 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1852 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1853 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1854 tcg_temp_free_i32(m3
);
1855 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1859 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1861 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1862 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1863 tcg_temp_free_i32(m3
);
1864 gen_set_cc_nz_f32(s
, o
->in2
);
1868 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1870 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1871 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1872 tcg_temp_free_i32(m3
);
1873 gen_set_cc_nz_f64(s
, o
->in2
);
1877 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1879 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1880 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1881 tcg_temp_free_i32(m3
);
1882 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1886 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1888 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1889 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1890 tcg_temp_free_i32(m3
);
1894 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1896 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1897 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1898 tcg_temp_free_i32(m3
);
1902 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1904 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1905 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1906 tcg_temp_free_i32(m3
);
1907 return_low128(o
->out2
);
1911 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1913 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1914 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1915 tcg_temp_free_i32(m3
);
1919 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1921 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1922 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1923 tcg_temp_free_i32(m3
);
1927 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1929 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1930 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1931 tcg_temp_free_i32(m3
);
1932 return_low128(o
->out2
);
1936 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1938 int r2
= get_field(s
->fields
, r2
);
1939 TCGv_i64 len
= tcg_temp_new_i64();
1941 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1943 return_low128(o
->out
);
1945 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1946 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1947 tcg_temp_free_i64(len
);
1952 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
1954 int l
= get_field(s
->fields
, l1
);
1959 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1960 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1963 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1964 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1967 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1968 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1971 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1972 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1975 vl
= tcg_const_i32(l
);
1976 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1977 tcg_temp_free_i32(vl
);
1981 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1985 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
1987 int r1
= get_field(s
->fields
, r1
);
1988 int r2
= get_field(s
->fields
, r2
);
1991 /* r1 and r2 must be even. */
1992 if (r1
& 1 || r2
& 1) {
1993 gen_program_exception(s
, PGM_SPECIFICATION
);
1994 return DISAS_NORETURN
;
1997 t1
= tcg_const_i32(r1
);
1998 t2
= tcg_const_i32(r2
);
1999 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2000 tcg_temp_free_i32(t1
);
2001 tcg_temp_free_i32(t2
);
2006 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2008 int r1
= get_field(s
->fields
, r1
);
2009 int r3
= get_field(s
->fields
, r3
);
2012 /* r1 and r3 must be even. */
2013 if (r1
& 1 || r3
& 1) {
2014 gen_program_exception(s
, PGM_SPECIFICATION
);
2015 return DISAS_NORETURN
;
2018 t1
= tcg_const_i32(r1
);
2019 t3
= tcg_const_i32(r3
);
2020 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2021 tcg_temp_free_i32(t1
);
2022 tcg_temp_free_i32(t3
);
2027 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2029 int r1
= get_field(s
->fields
, r1
);
2030 int r3
= get_field(s
->fields
, r3
);
2033 /* r1 and r3 must be even. */
2034 if (r1
& 1 || r3
& 1) {
2035 gen_program_exception(s
, PGM_SPECIFICATION
);
2036 return DISAS_NORETURN
;
2039 t1
= tcg_const_i32(r1
);
2040 t3
= tcg_const_i32(r3
);
2041 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2042 tcg_temp_free_i32(t1
);
2043 tcg_temp_free_i32(t3
);
2048 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2050 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2051 TCGv_i32 t1
= tcg_temp_new_i32();
2052 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2053 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2055 tcg_temp_free_i32(t1
);
2056 tcg_temp_free_i32(m3
);
2060 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2062 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2064 return_low128(o
->in2
);
2068 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2070 TCGv_i64 t
= tcg_temp_new_i64();
2071 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2072 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2073 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2074 tcg_temp_free_i64(t
);
2078 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2080 int d2
= get_field(s
->fields
, d2
);
2081 int b2
= get_field(s
->fields
, b2
);
2084 /* Note that in1 = R3 (new value) and
2085 in2 = (zero-extended) R1 (expected value). */
2087 addr
= get_address(s
, 0, b2
, d2
);
2088 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2089 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2090 tcg_temp_free_i64(addr
);
2092 /* Are the memory and expected values (un)equal? Note that this setcond
2093 produces the output CC value, thus the NE sense of the test. */
2094 cc
= tcg_temp_new_i64();
2095 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2096 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2097 tcg_temp_free_i64(cc
);
2103 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2105 int r1
= get_field(s
->fields
, r1
);
2106 int r3
= get_field(s
->fields
, r3
);
2107 int d2
= get_field(s
->fields
, d2
);
2108 int b2
= get_field(s
->fields
, b2
);
2109 DisasJumpType ret
= DISAS_NEXT
;
2111 TCGv_i32 t_r1
, t_r3
;
2113 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2114 addr
= get_address(s
, 0, b2
, d2
);
2115 t_r1
= tcg_const_i32(r1
);
2116 t_r3
= tcg_const_i32(r3
);
2117 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2118 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2119 } else if (HAVE_CMPXCHG128
) {
2120 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2122 gen_helper_exit_atomic(cpu_env
);
2123 ret
= DISAS_NORETURN
;
2125 tcg_temp_free_i64(addr
);
2126 tcg_temp_free_i32(t_r1
);
2127 tcg_temp_free_i32(t_r3
);
2133 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2135 int r3
= get_field(s
->fields
, r3
);
2136 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2138 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2139 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2141 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2143 tcg_temp_free_i32(t_r3
);
2149 #ifndef CONFIG_USER_ONLY
2150 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2152 TCGMemOp mop
= s
->insn
->data
;
2153 TCGv_i64 addr
, old
, cc
;
2154 TCGLabel
*lab
= gen_new_label();
2156 /* Note that in1 = R1 (zero-extended expected value),
2157 out = R1 (original reg), out2 = R1+1 (new value). */
2159 addr
= tcg_temp_new_i64();
2160 old
= tcg_temp_new_i64();
2161 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2162 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2163 get_mem_index(s
), mop
| MO_ALIGN
);
2164 tcg_temp_free_i64(addr
);
2166 /* Are the memory and expected values (un)equal? */
2167 cc
= tcg_temp_new_i64();
2168 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2169 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2171 /* Write back the output now, so that it happens before the
2172 following branch, so that we don't need local temps. */
2173 if ((mop
& MO_SIZE
) == MO_32
) {
2174 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2176 tcg_gen_mov_i64(o
->out
, old
);
2178 tcg_temp_free_i64(old
);
2180 /* If the comparison was equal, and the LSB of R2 was set,
2181 then we need to flush the TLB (for all cpus). */
2182 tcg_gen_xori_i64(cc
, cc
, 1);
2183 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2184 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2185 tcg_temp_free_i64(cc
);
2187 gen_helper_purge(cpu_env
);
2194 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2196 TCGv_i64 t1
= tcg_temp_new_i64();
2197 TCGv_i32 t2
= tcg_temp_new_i32();
2198 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2199 gen_helper_cvd(t1
, t2
);
2200 tcg_temp_free_i32(t2
);
2201 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2202 tcg_temp_free_i64(t1
);
2206 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2208 int m3
= get_field(s
->fields
, m3
);
2209 TCGLabel
*lab
= gen_new_label();
2212 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2213 if (s
->insn
->data
) {
2214 c
= tcg_unsigned_cond(c
);
2216 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2225 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2227 int m3
= get_field(s
->fields
, m3
);
2228 int r1
= get_field(s
->fields
, r1
);
2229 int r2
= get_field(s
->fields
, r2
);
2230 TCGv_i32 tr1
, tr2
, chk
;
2232 /* R1 and R2 must both be even. */
2233 if ((r1
| r2
) & 1) {
2234 gen_program_exception(s
, PGM_SPECIFICATION
);
2235 return DISAS_NORETURN
;
2237 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2241 tr1
= tcg_const_i32(r1
);
2242 tr2
= tcg_const_i32(r2
);
2243 chk
= tcg_const_i32(m3
);
2245 switch (s
->insn
->data
) {
2247 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2250 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2253 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2256 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2259 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2262 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2265 g_assert_not_reached();
2268 tcg_temp_free_i32(tr1
);
2269 tcg_temp_free_i32(tr2
);
2270 tcg_temp_free_i32(chk
);
2275 #ifndef CONFIG_USER_ONLY
2276 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2278 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2279 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2280 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2282 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2284 tcg_temp_free_i32(func_code
);
2285 tcg_temp_free_i32(r3
);
2286 tcg_temp_free_i32(r1
);
2291 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2293 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2294 return_low128(o
->out
);
2298 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2300 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2301 return_low128(o
->out
);
2305 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2307 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2308 return_low128(o
->out
);
2312 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2314 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2315 return_low128(o
->out
);
2319 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2321 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2325 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2327 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2331 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2333 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2334 return_low128(o
->out2
);
2338 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2340 int r2
= get_field(s
->fields
, r2
);
2341 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2345 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2347 /* No cache information provided. */
2348 tcg_gen_movi_i64(o
->out
, -1);
2352 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2354 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2358 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2360 int r1
= get_field(s
->fields
, r1
);
2361 int r2
= get_field(s
->fields
, r2
);
2362 TCGv_i64 t
= tcg_temp_new_i64();
2364 /* Note the "subsequently" in the PoO, which implies a defined result
2365 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2366 tcg_gen_shri_i64(t
, psw_mask
, 32);
2367 store_reg32_i64(r1
, t
);
2369 store_reg32_i64(r2
, psw_mask
);
2372 tcg_temp_free_i64(t
);
2376 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2378 int r1
= get_field(s
->fields
, r1
);
2382 /* Nested EXECUTE is not allowed. */
2383 if (unlikely(s
->ex_value
)) {
2384 gen_program_exception(s
, PGM_EXECUTE
);
2385 return DISAS_NORETURN
;
2392 v1
= tcg_const_i64(0);
2397 ilen
= tcg_const_i32(s
->ilen
);
2398 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2399 tcg_temp_free_i32(ilen
);
2402 tcg_temp_free_i64(v1
);
2405 return DISAS_PC_CC_UPDATED
;
2408 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2410 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2411 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2412 tcg_temp_free_i32(m3
);
2416 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2418 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2419 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2420 tcg_temp_free_i32(m3
);
2424 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2426 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2427 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2428 return_low128(o
->out2
);
2429 tcg_temp_free_i32(m3
);
2433 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2435 /* We'll use the original input for cc computation, since we get to
2436 compare that against 0, which ought to be better than comparing
2437 the real output against 64. It also lets cc_dst be a convenient
2438 temporary during our computation. */
2439 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2441 /* R1 = IN ? CLZ(IN) : 64. */
2442 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2444 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2445 value by 64, which is undefined. But since the shift is 64 iff the
2446 input is zero, we still get the correct result after and'ing. */
2447 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2448 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2449 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2453 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2455 int m3
= get_field(s
->fields
, m3
);
2456 int pos
, len
, base
= s
->insn
->data
;
2457 TCGv_i64 tmp
= tcg_temp_new_i64();
2462 /* Effectively a 32-bit load. */
2463 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2470 /* Effectively a 16-bit load. */
2471 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2479 /* Effectively an 8-bit load. */
2480 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2485 pos
= base
+ ctz32(m3
) * 8;
2486 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2487 ccm
= ((1ull << len
) - 1) << pos
;
2491 /* This is going to be a sequence of loads and inserts. */
2492 pos
= base
+ 32 - 8;
2496 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2497 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2498 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2501 m3
= (m3
<< 1) & 0xf;
2507 tcg_gen_movi_i64(tmp
, ccm
);
2508 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2509 tcg_temp_free_i64(tmp
);
2513 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2515 int shift
= s
->insn
->data
& 0xff;
2516 int size
= s
->insn
->data
>> 8;
2517 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2521 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2526 t1
= tcg_temp_new_i64();
2527 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2528 t2
= tcg_temp_new_i64();
2529 tcg_gen_extu_i32_i64(t2
, cc_op
);
2530 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2531 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2532 tcg_temp_free_i64(t1
);
2533 tcg_temp_free_i64(t2
);
2537 #ifndef CONFIG_USER_ONLY
2538 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2542 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2543 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2545 m4
= tcg_const_i32(0);
2547 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2548 tcg_temp_free_i32(m4
);
2552 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2556 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2557 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2559 m4
= tcg_const_i32(0);
2561 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2562 tcg_temp_free_i32(m4
);
2566 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2568 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2573 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2575 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2576 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2577 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2578 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2580 switch (s
->insn
->data
) {
2581 case S390_FEAT_TYPE_KMCTR
:
2582 if (r3
& 1 || !r3
) {
2583 gen_program_exception(s
, PGM_SPECIFICATION
);
2584 return DISAS_NORETURN
;
2587 case S390_FEAT_TYPE_PPNO
:
2588 case S390_FEAT_TYPE_KMF
:
2589 case S390_FEAT_TYPE_KMC
:
2590 case S390_FEAT_TYPE_KMO
:
2591 case S390_FEAT_TYPE_KM
:
2592 if (r1
& 1 || !r1
) {
2593 gen_program_exception(s
, PGM_SPECIFICATION
);
2594 return DISAS_NORETURN
;
2597 case S390_FEAT_TYPE_KMAC
:
2598 case S390_FEAT_TYPE_KIMD
:
2599 case S390_FEAT_TYPE_KLMD
:
2600 if (r2
& 1 || !r2
) {
2601 gen_program_exception(s
, PGM_SPECIFICATION
);
2602 return DISAS_NORETURN
;
2605 case S390_FEAT_TYPE_PCKMO
:
2606 case S390_FEAT_TYPE_PCC
:
2609 g_assert_not_reached();
2612 t_r1
= tcg_const_i32(r1
);
2613 t_r2
= tcg_const_i32(r2
);
2614 t_r3
= tcg_const_i32(r3
);
2615 type
= tcg_const_i32(s
->insn
->data
);
2616 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2618 tcg_temp_free_i32(t_r1
);
2619 tcg_temp_free_i32(t_r2
);
2620 tcg_temp_free_i32(t_r3
);
2621 tcg_temp_free_i32(type
);
2625 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2627 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2632 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2634 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2639 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2641 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2646 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2648 /* The real output is indeed the original value in memory;
2649 recompute the addition for the computation of CC. */
2650 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2651 s
->insn
->data
| MO_ALIGN
);
2652 /* However, we need to recompute the addition for setting CC. */
2653 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2657 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2659 /* The real output is indeed the original value in memory;
2660 recompute the addition for the computation of CC. */
2661 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2662 s
->insn
->data
| MO_ALIGN
);
2663 /* However, we need to recompute the operation for setting CC. */
2664 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2668 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2670 /* The real output is indeed the original value in memory;
2671 recompute the addition for the computation of CC. */
2672 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2673 s
->insn
->data
| MO_ALIGN
);
2674 /* However, we need to recompute the operation for setting CC. */
2675 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2679 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2681 /* The real output is indeed the original value in memory;
2682 recompute the addition for the computation of CC. */
2683 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2684 s
->insn
->data
| MO_ALIGN
);
2685 /* However, we need to recompute the operation for setting CC. */
2686 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2690 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2692 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2696 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2698 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2702 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2704 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2708 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2710 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2714 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2716 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2717 return_low128(o
->out2
);
2721 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2723 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2724 return_low128(o
->out2
);
2728 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2730 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2734 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2736 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2740 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2742 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2746 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2748 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2752 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2754 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2758 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2760 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2764 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2766 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2770 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2772 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2776 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2778 TCGLabel
*lab
= gen_new_label();
2779 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2780 /* The value is stored even in case of trap. */
2781 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2787 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2789 TCGLabel
*lab
= gen_new_label();
2790 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2791 /* The value is stored even in case of trap. */
2792 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2798 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2800 TCGLabel
*lab
= gen_new_label();
2801 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2802 /* The value is stored even in case of trap. */
2803 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2809 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2811 TCGLabel
*lab
= gen_new_label();
2812 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2813 /* The value is stored even in case of trap. */
2814 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2820 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2822 TCGLabel
*lab
= gen_new_label();
2823 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2824 /* The value is stored even in case of trap. */
2825 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2831 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2835 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2838 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2842 TCGv_i32 t32
= tcg_temp_new_i32();
2845 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2848 t
= tcg_temp_new_i64();
2849 tcg_gen_extu_i32_i64(t
, t32
);
2850 tcg_temp_free_i32(t32
);
2852 z
= tcg_const_i64(0);
2853 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2854 tcg_temp_free_i64(t
);
2855 tcg_temp_free_i64(z
);
2861 #ifndef CONFIG_USER_ONLY
2862 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2864 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2865 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2866 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2867 tcg_temp_free_i32(r1
);
2868 tcg_temp_free_i32(r3
);
2869 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2870 return DISAS_PC_STALE_NOCHAIN
;
2873 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2875 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2876 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2877 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2878 tcg_temp_free_i32(r1
);
2879 tcg_temp_free_i32(r3
);
2880 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2881 return DISAS_PC_STALE_NOCHAIN
;
2884 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2886 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2891 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2893 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2897 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2901 per_breaking_event(s
);
2903 t1
= tcg_temp_new_i64();
2904 t2
= tcg_temp_new_i64();
2905 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2906 MO_TEUL
| MO_ALIGN_8
);
2907 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2908 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2909 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2910 tcg_gen_shli_i64(t1
, t1
, 32);
2911 gen_helper_load_psw(cpu_env
, t1
, t2
);
2912 tcg_temp_free_i64(t1
);
2913 tcg_temp_free_i64(t2
);
2914 return DISAS_NORETURN
;
2917 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2921 per_breaking_event(s
);
2923 t1
= tcg_temp_new_i64();
2924 t2
= tcg_temp_new_i64();
2925 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2926 MO_TEQ
| MO_ALIGN_8
);
2927 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2928 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2929 gen_helper_load_psw(cpu_env
, t1
, t2
);
2930 tcg_temp_free_i64(t1
);
2931 tcg_temp_free_i64(t2
);
2932 return DISAS_NORETURN
;
2936 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2938 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2939 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2940 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2941 tcg_temp_free_i32(r1
);
2942 tcg_temp_free_i32(r3
);
2946 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2948 int r1
= get_field(s
->fields
, r1
);
2949 int r3
= get_field(s
->fields
, r3
);
2952 /* Only one register to read. */
2953 t1
= tcg_temp_new_i64();
2954 if (unlikely(r1
== r3
)) {
2955 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2956 store_reg32_i64(r1
, t1
);
2961 /* First load the values of the first and last registers to trigger
2962 possible page faults. */
2963 t2
= tcg_temp_new_i64();
2964 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2965 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2966 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2967 store_reg32_i64(r1
, t1
);
2968 store_reg32_i64(r3
, t2
);
2970 /* Only two registers to read. */
2971 if (((r1
+ 1) & 15) == r3
) {
2977 /* Then load the remaining registers. Page fault can't occur. */
2979 tcg_gen_movi_i64(t2
, 4);
2982 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2983 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2984 store_reg32_i64(r1
, t1
);
2992 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
2994 int r1
= get_field(s
->fields
, r1
);
2995 int r3
= get_field(s
->fields
, r3
);
2998 /* Only one register to read. */
2999 t1
= tcg_temp_new_i64();
3000 if (unlikely(r1
== r3
)) {
3001 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3002 store_reg32h_i64(r1
, t1
);
3007 /* First load the values of the first and last registers to trigger
3008 possible page faults. */
3009 t2
= tcg_temp_new_i64();
3010 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3011 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3012 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3013 store_reg32h_i64(r1
, t1
);
3014 store_reg32h_i64(r3
, t2
);
3016 /* Only two registers to read. */
3017 if (((r1
+ 1) & 15) == r3
) {
3023 /* Then load the remaining registers. Page fault can't occur. */
3025 tcg_gen_movi_i64(t2
, 4);
3028 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3029 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3030 store_reg32h_i64(r1
, t1
);
3038 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3040 int r1
= get_field(s
->fields
, r1
);
3041 int r3
= get_field(s
->fields
, r3
);
3044 /* Only one register to read. */
3045 if (unlikely(r1
== r3
)) {
3046 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3050 /* First load the values of the first and last registers to trigger
3051 possible page faults. */
3052 t1
= tcg_temp_new_i64();
3053 t2
= tcg_temp_new_i64();
3054 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3055 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3056 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3057 tcg_gen_mov_i64(regs
[r1
], t1
);
3060 /* Only two registers to read. */
3061 if (((r1
+ 1) & 15) == r3
) {
3066 /* Then load the remaining registers. Page fault can't occur. */
3068 tcg_gen_movi_i64(t1
, 8);
3071 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3072 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3079 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3082 TCGMemOp mop
= s
->insn
->data
;
3084 /* In a parallel context, stop the world and single step. */
3085 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3088 gen_exception(EXCP_ATOMIC
);
3089 return DISAS_NORETURN
;
3092 /* In a serial context, perform the two loads ... */
3093 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
3094 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3095 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3096 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3097 tcg_temp_free_i64(a1
);
3098 tcg_temp_free_i64(a2
);
3100 /* ... and indicate that we performed them while interlocked. */
3101 gen_op_movi_cc(s
, 0);
3105 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3107 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3108 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3109 } else if (HAVE_ATOMIC128
) {
3110 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3112 gen_helper_exit_atomic(cpu_env
);
3113 return DISAS_NORETURN
;
3115 return_low128(o
->out2
);
3119 #ifndef CONFIG_USER_ONLY
3120 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3122 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3126 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3128 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3133 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3135 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3139 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3142 o
->g_out
= o
->g_in2
;
3148 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3150 int b2
= get_field(s
->fields
, b2
);
3151 TCGv ar1
= tcg_temp_new_i64();
3154 o
->g_out
= o
->g_in2
;
3158 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3159 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3160 tcg_gen_movi_i64(ar1
, 0);
3162 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3163 tcg_gen_movi_i64(ar1
, 1);
3165 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3167 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3169 tcg_gen_movi_i64(ar1
, 0);
3172 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3173 tcg_gen_movi_i64(ar1
, 2);
3177 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3178 tcg_temp_free_i64(ar1
);
3183 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3187 o
->g_out
= o
->g_in1
;
3188 o
->g_out2
= o
->g_in2
;
3191 o
->g_in1
= o
->g_in2
= false;
3195 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3197 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3198 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3199 tcg_temp_free_i32(l
);
3203 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3205 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3206 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3207 tcg_temp_free_i32(l
);
3211 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3213 int r1
= get_field(s
->fields
, r1
);
3214 int r2
= get_field(s
->fields
, r2
);
3217 /* r1 and r2 must be even. */
3218 if (r1
& 1 || r2
& 1) {
3219 gen_program_exception(s
, PGM_SPECIFICATION
);
3220 return DISAS_NORETURN
;
3223 t1
= tcg_const_i32(r1
);
3224 t2
= tcg_const_i32(r2
);
3225 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3226 tcg_temp_free_i32(t1
);
3227 tcg_temp_free_i32(t2
);
3232 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3234 int r1
= get_field(s
->fields
, r1
);
3235 int r3
= get_field(s
->fields
, r3
);
3238 /* r1 and r3 must be even. */
3239 if (r1
& 1 || r3
& 1) {
3240 gen_program_exception(s
, PGM_SPECIFICATION
);
3241 return DISAS_NORETURN
;
3244 t1
= tcg_const_i32(r1
);
3245 t3
= tcg_const_i32(r3
);
3246 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3247 tcg_temp_free_i32(t1
);
3248 tcg_temp_free_i32(t3
);
3253 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3255 int r1
= get_field(s
->fields
, r1
);
3256 int r3
= get_field(s
->fields
, r3
);
3259 /* r1 and r3 must be even. */
3260 if (r1
& 1 || r3
& 1) {
3261 gen_program_exception(s
, PGM_SPECIFICATION
);
3262 return DISAS_NORETURN
;
3265 t1
= tcg_const_i32(r1
);
3266 t3
= tcg_const_i32(r3
);
3267 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3268 tcg_temp_free_i32(t1
);
3269 tcg_temp_free_i32(t3
);
3274 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3276 int r3
= get_field(s
->fields
, r3
);
3277 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3282 #ifndef CONFIG_USER_ONLY
3283 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3285 int r1
= get_field(s
->fields
, l1
);
3286 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3291 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3293 int r1
= get_field(s
->fields
, l1
);
3294 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3300 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3302 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3303 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3304 tcg_temp_free_i32(l
);
3308 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3310 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3311 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3312 tcg_temp_free_i32(l
);
3316 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3318 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3323 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3325 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3327 return_low128(o
->in2
);
3331 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3333 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3334 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3335 tcg_temp_free_i32(l
);
3339 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3341 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3345 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3347 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3351 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3353 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3357 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3359 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3363 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3365 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3369 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3371 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3372 return_low128(o
->out2
);
3376 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3378 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3379 return_low128(o
->out2
);
3383 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3385 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3386 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3387 tcg_temp_free_i64(r3
);
3391 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3393 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3394 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3395 tcg_temp_free_i64(r3
);
3399 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3401 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3402 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3403 tcg_temp_free_i64(r3
);
3407 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3409 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3410 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3411 tcg_temp_free_i64(r3
);
3415 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3418 z
= tcg_const_i64(0);
3419 n
= tcg_temp_new_i64();
3420 tcg_gen_neg_i64(n
, o
->in2
);
3421 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3422 tcg_temp_free_i64(n
);
3423 tcg_temp_free_i64(z
);
3427 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3429 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3433 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3435 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3439 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3441 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3442 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3446 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3448 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3449 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3450 tcg_temp_free_i32(l
);
3455 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3457 tcg_gen_neg_i64(o
->out
, o
->in2
);
3461 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3463 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3467 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3469 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3473 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3475 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3476 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3480 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3482 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3483 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3484 tcg_temp_free_i32(l
);
3489 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3491 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3495 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3497 int shift
= s
->insn
->data
& 0xff;
3498 int size
= s
->insn
->data
>> 8;
3499 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3502 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3503 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3505 /* Produce the CC from only the bits manipulated. */
3506 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3507 set_cc_nz_u64(s
, cc_dst
);
3511 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3513 o
->in1
= tcg_temp_new_i64();
3515 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3516 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3518 /* Perform the atomic operation in memory. */
3519 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3523 /* Recompute also for atomic case: needed for setting CC. */
3524 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3526 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3527 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3532 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3534 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3535 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3536 tcg_temp_free_i32(l
);
3540 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3542 int l2
= get_field(s
->fields
, l2
) + 1;
3545 /* The length must not exceed 32 bytes. */
3547 gen_program_exception(s
, PGM_SPECIFICATION
);
3548 return DISAS_NORETURN
;
3550 l
= tcg_const_i32(l2
);
3551 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3552 tcg_temp_free_i32(l
);
3556 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3558 int l2
= get_field(s
->fields
, l2
) + 1;
3561 /* The length must be even and should not exceed 64 bytes. */
3562 if ((l2
& 1) || (l2
> 64)) {
3563 gen_program_exception(s
, PGM_SPECIFICATION
);
3564 return DISAS_NORETURN
;
3566 l
= tcg_const_i32(l2
);
3567 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3568 tcg_temp_free_i32(l
);
3572 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3574 gen_helper_popcnt(o
->out
, o
->in2
);
3578 #ifndef CONFIG_USER_ONLY
3579 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3581 gen_helper_ptlb(cpu_env
);
3586 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3588 int i3
= get_field(s
->fields
, i3
);
3589 int i4
= get_field(s
->fields
, i4
);
3590 int i5
= get_field(s
->fields
, i5
);
3591 int do_zero
= i4
& 0x80;
3592 uint64_t mask
, imask
, pmask
;
3595 /* Adjust the arguments for the specific insn. */
3596 switch (s
->fields
->op2
) {
3597 case 0x55: /* risbg */
3598 case 0x59: /* risbgn */
3603 case 0x5d: /* risbhg */
3606 pmask
= 0xffffffff00000000ull
;
3608 case 0x51: /* risblg */
3611 pmask
= 0x00000000ffffffffull
;
3614 g_assert_not_reached();
3617 /* MASK is the set of bits to be inserted from R2.
3618 Take care for I3/I4 wraparound. */
3621 mask
^= pmask
>> i4
>> 1;
3623 mask
|= ~(pmask
>> i4
>> 1);
3627 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3628 insns, we need to keep the other half of the register. */
3629 imask
= ~mask
| ~pmask
;
3637 if (s
->fields
->op2
== 0x5d) {
3641 /* In some cases we can implement this with extract. */
3642 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3643 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3647 /* In some cases we can implement this with deposit. */
3648 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3649 /* Note that we rotate the bits to be inserted to the lsb, not to
3650 the position as described in the PoO. */
3651 rot
= (rot
- pos
) & 63;
3656 /* Rotate the input as necessary. */
3657 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3659 /* Insert the selected bits into the output. */
3662 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3664 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3666 } else if (imask
== 0) {
3667 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3669 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3670 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3671 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3676 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3678 int i3
= get_field(s
->fields
, i3
);
3679 int i4
= get_field(s
->fields
, i4
);
3680 int i5
= get_field(s
->fields
, i5
);
3683 /* If this is a test-only form, arrange to discard the result. */
3685 o
->out
= tcg_temp_new_i64();
3693 /* MASK is the set of bits to be operated on from R2.
3694 Take care for I3/I4 wraparound. */
3697 mask
^= ~0ull >> i4
>> 1;
3699 mask
|= ~(~0ull >> i4
>> 1);
3702 /* Rotate the input as necessary. */
3703 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3706 switch (s
->fields
->op2
) {
3707 case 0x55: /* AND */
3708 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3709 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3712 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3713 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3715 case 0x57: /* XOR */
3716 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3717 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3724 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3725 set_cc_nz_u64(s
, cc_dst
);
3729 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3731 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3735 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3737 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3741 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3743 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3747 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3749 TCGv_i32 t1
= tcg_temp_new_i32();
3750 TCGv_i32 t2
= tcg_temp_new_i32();
3751 TCGv_i32 to
= tcg_temp_new_i32();
3752 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3753 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3754 tcg_gen_rotl_i32(to
, t1
, t2
);
3755 tcg_gen_extu_i32_i64(o
->out
, to
);
3756 tcg_temp_free_i32(t1
);
3757 tcg_temp_free_i32(t2
);
3758 tcg_temp_free_i32(to
);
3762 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3764 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3768 #ifndef CONFIG_USER_ONLY
3769 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3771 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3776 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3778 gen_helper_sacf(cpu_env
, o
->in2
);
3779 /* Addressing mode has changed, so end the block. */
3780 return DISAS_PC_STALE
;
3784 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3786 int sam
= s
->insn
->data
;
3802 /* Bizarre but true, we check the address of the current insn for the
3803 specification exception, not the next to be executed. Thus the PoO
3804 documents that Bad Things Happen two bytes before the end. */
3805 if (s
->base
.pc_next
& ~mask
) {
3806 gen_program_exception(s
, PGM_SPECIFICATION
);
3807 return DISAS_NORETURN
;
3811 tsam
= tcg_const_i64(sam
);
3812 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3813 tcg_temp_free_i64(tsam
);
3815 /* Always exit the TB, since we (may have) changed execution mode. */
3816 return DISAS_PC_STALE
;
3819 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3821 int r1
= get_field(s
->fields
, r1
);
3822 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3826 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3828 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3832 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3834 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3838 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3840 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3841 return_low128(o
->out2
);
3845 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3847 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3851 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3853 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3857 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3859 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3860 return_low128(o
->out2
);
3864 #ifndef CONFIG_USER_ONLY
3865 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3867 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3872 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3874 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3875 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3876 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3878 tcg_temp_free_i32(r1
);
3879 tcg_temp_free_i32(r3
);
3884 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3891 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3893 /* We want to store when the condition is fulfilled, so branch
3894 out when it's not */
3895 c
.cond
= tcg_invert_cond(c
.cond
);
3897 lab
= gen_new_label();
3899 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3901 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3905 r1
= get_field(s
->fields
, r1
);
3906 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3907 switch (s
->insn
->data
) {
3909 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3912 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3914 case 2: /* STOCFH */
3915 h
= tcg_temp_new_i64();
3916 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3917 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3918 tcg_temp_free_i64(h
);
3921 g_assert_not_reached();
3923 tcg_temp_free_i64(a
);
3929 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3931 uint64_t sign
= 1ull << s
->insn
->data
;
3932 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3933 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3934 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3935 /* The arithmetic left shift is curious in that it does not affect
3936 the sign bit. Copy that over from the source unchanged. */
3937 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3938 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3939 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3943 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3945 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3949 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
3951 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3955 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
3957 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3961 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3963 gen_helper_sfpc(cpu_env
, o
->in2
);
3967 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
3969 gen_helper_sfas(cpu_env
, o
->in2
);
3973 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
3975 int b2
= get_field(s
->fields
, b2
);
3976 int d2
= get_field(s
->fields
, d2
);
3977 TCGv_i64 t1
= tcg_temp_new_i64();
3978 TCGv_i64 t2
= tcg_temp_new_i64();
3981 switch (s
->fields
->op2
) {
3982 case 0x99: /* SRNM */
3985 case 0xb8: /* SRNMB */
3988 case 0xb9: /* SRNMT */
3994 mask
= (1 << len
) - 1;
3996 /* Insert the value into the appropriate field of the FPC. */
3998 tcg_gen_movi_i64(t1
, d2
& mask
);
4000 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
4001 tcg_gen_andi_i64(t1
, t1
, mask
);
4003 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4004 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
4005 tcg_temp_free_i64(t1
);
4007 /* Then install the new FPC to set the rounding mode in fpu_status. */
4008 gen_helper_sfpc(cpu_env
, t2
);
4009 tcg_temp_free_i64(t2
);
4013 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4015 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4016 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4019 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4020 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4024 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4026 int b1
= get_field(s
->fields
, b1
);
4027 int d1
= get_field(s
->fields
, d1
);
4028 int b2
= get_field(s
->fields
, b2
);
4029 int d2
= get_field(s
->fields
, d2
);
4030 int r3
= get_field(s
->fields
, r3
);
4031 TCGv_i64 tmp
= tcg_temp_new_i64();
4033 /* fetch all operands first */
4034 o
->in1
= tcg_temp_new_i64();
4035 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4036 o
->in2
= tcg_temp_new_i64();
4037 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4038 o
->addr1
= get_address(s
, 0, r3
, 0);
4040 /* load the third operand into r3 before modifying anything */
4041 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4043 /* subtract CPU timer from first operand and store in GR0 */
4044 gen_helper_stpt(tmp
, cpu_env
);
4045 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4047 /* store second operand in GR1 */
4048 tcg_gen_mov_i64(regs
[1], o
->in2
);
4050 tcg_temp_free_i64(tmp
);
4054 #ifndef CONFIG_USER_ONLY
4055 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4057 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4058 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4062 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4064 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4068 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4070 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4071 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4072 return DISAS_PC_STALE_NOCHAIN
;
4075 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4077 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4082 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4084 gen_helper_stck(o
->out
, cpu_env
);
4085 /* ??? We don't implement clock states. */
4086 gen_op_movi_cc(s
, 0);
4090 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4092 TCGv_i64 c1
= tcg_temp_new_i64();
4093 TCGv_i64 c2
= tcg_temp_new_i64();
4094 TCGv_i64 todpr
= tcg_temp_new_i64();
4095 gen_helper_stck(c1
, cpu_env
);
4096 /* 16 bit value store in an uint32_t (only valid bits set) */
4097 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4098 /* Shift the 64-bit value into its place as a zero-extended
4099 104-bit value. Note that "bit positions 64-103 are always
4100 non-zero so that they compare differently to STCK"; we set
4101 the least significant bit to 1. */
4102 tcg_gen_shli_i64(c2
, c1
, 56);
4103 tcg_gen_shri_i64(c1
, c1
, 8);
4104 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4105 tcg_gen_or_i64(c2
, c2
, todpr
);
4106 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4107 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4108 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4109 tcg_temp_free_i64(c1
);
4110 tcg_temp_free_i64(c2
);
4111 tcg_temp_free_i64(todpr
);
4112 /* ??? We don't implement clock states. */
4113 gen_op_movi_cc(s
, 0);
4117 #ifndef CONFIG_USER_ONLY
4118 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4120 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4121 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4126 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4128 gen_helper_sckc(cpu_env
, o
->in2
);
4132 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4134 gen_helper_sckpf(cpu_env
, regs
[0]);
4138 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4140 gen_helper_stckc(o
->out
, cpu_env
);
4144 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4146 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4147 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4148 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4149 tcg_temp_free_i32(r1
);
4150 tcg_temp_free_i32(r3
);
4154 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4156 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4157 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4158 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4159 tcg_temp_free_i32(r1
);
4160 tcg_temp_free_i32(r3
);
4164 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4166 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4170 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4172 gen_helper_spt(cpu_env
, o
->in2
);
4176 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4178 gen_helper_stfl(cpu_env
);
4182 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4184 gen_helper_stpt(o
->out
, cpu_env
);
4188 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4190 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4195 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4197 gen_helper_spx(cpu_env
, o
->in2
);
4201 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4203 gen_helper_xsch(cpu_env
, regs
[1]);
4208 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4210 gen_helper_csch(cpu_env
, regs
[1]);
4215 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4217 gen_helper_hsch(cpu_env
, regs
[1]);
4222 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4224 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4229 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4231 gen_helper_rchp(cpu_env
, regs
[1]);
4236 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4238 gen_helper_rsch(cpu_env
, regs
[1]);
4243 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4245 gen_helper_sal(cpu_env
, regs
[1]);
4249 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4251 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4255 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4257 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4258 gen_op_movi_cc(s
, 3);
4262 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4264 /* The instruction is suppressed if not provided. */
4268 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4270 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4275 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4277 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4282 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4284 gen_helper_stcrw(cpu_env
, o
->in2
);
4289 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4291 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4296 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4298 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4303 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4305 gen_helper_chsc(cpu_env
, o
->in2
);
4310 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4312 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4313 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4317 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4319 uint64_t i2
= get_field(s
->fields
, i2
);
4322 /* It is important to do what the instruction name says: STORE THEN.
4323 If we let the output hook perform the store then if we fault and
4324 restart, we'll have the wrong SYSTEM MASK in place. */
4325 t
= tcg_temp_new_i64();
4326 tcg_gen_shri_i64(t
, psw_mask
, 56);
4327 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4328 tcg_temp_free_i64(t
);
4330 if (s
->fields
->op
== 0xac) {
4331 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4332 (i2
<< 56) | 0x00ffffffffffffffull
);
4334 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4337 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4338 return DISAS_PC_STALE_NOCHAIN
;
4341 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4343 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4347 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4349 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4354 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4356 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4361 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4363 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4367 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4369 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4373 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4375 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4379 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4381 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4385 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4387 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4388 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4389 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4390 tcg_temp_free_i32(r1
);
4391 tcg_temp_free_i32(r3
);
4395 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4397 int m3
= get_field(s
->fields
, m3
);
4398 int pos
, base
= s
->insn
->data
;
4399 TCGv_i64 tmp
= tcg_temp_new_i64();
4401 pos
= base
+ ctz32(m3
) * 8;
4404 /* Effectively a 32-bit store. */
4405 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4406 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4412 /* Effectively a 16-bit store. */
4413 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4414 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4421 /* Effectively an 8-bit store. */
4422 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4423 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4427 /* This is going to be a sequence of shifts and stores. */
4428 pos
= base
+ 32 - 8;
4431 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4432 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4433 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4435 m3
= (m3
<< 1) & 0xf;
4440 tcg_temp_free_i64(tmp
);
4444 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4446 int r1
= get_field(s
->fields
, r1
);
4447 int r3
= get_field(s
->fields
, r3
);
4448 int size
= s
->insn
->data
;
4449 TCGv_i64 tsize
= tcg_const_i64(size
);
4453 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4455 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4460 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4464 tcg_temp_free_i64(tsize
);
4468 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4470 int r1
= get_field(s
->fields
, r1
);
4471 int r3
= get_field(s
->fields
, r3
);
4472 TCGv_i64 t
= tcg_temp_new_i64();
4473 TCGv_i64 t4
= tcg_const_i64(4);
4474 TCGv_i64 t32
= tcg_const_i64(32);
4477 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4478 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4482 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4486 tcg_temp_free_i64(t
);
4487 tcg_temp_free_i64(t4
);
4488 tcg_temp_free_i64(t32
);
4492 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4494 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4495 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4496 } else if (HAVE_ATOMIC128
) {
4497 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4499 gen_helper_exit_atomic(cpu_env
);
4500 return DISAS_NORETURN
;
4505 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4507 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4508 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4510 gen_helper_srst(cpu_env
, r1
, r2
);
4512 tcg_temp_free_i32(r1
);
4513 tcg_temp_free_i32(r2
);
4518 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4520 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4521 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4523 gen_helper_srstu(cpu_env
, r1
, r2
);
4525 tcg_temp_free_i32(r1
);
4526 tcg_temp_free_i32(r2
);
4531 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4533 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4537 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4542 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4544 /* The !borrow flag is the msb of CC. Since we want the inverse of
4545 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4546 disas_jcc(s
, &cmp
, 8 | 4);
4547 borrow
= tcg_temp_new_i64();
4549 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4551 TCGv_i32 t
= tcg_temp_new_i32();
4552 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4553 tcg_gen_extu_i32_i64(borrow
, t
);
4554 tcg_temp_free_i32(t
);
4558 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4559 tcg_temp_free_i64(borrow
);
4563 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4570 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4571 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4572 tcg_temp_free_i32(t
);
4574 t
= tcg_const_i32(s
->ilen
);
4575 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4576 tcg_temp_free_i32(t
);
4578 gen_exception(EXCP_SVC
);
4579 return DISAS_NORETURN
;
4582 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4586 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4587 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4588 gen_op_movi_cc(s
, cc
);
4592 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4594 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4599 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4601 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4606 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4608 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4613 #ifndef CONFIG_USER_ONLY
4615 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4617 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4622 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4624 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4631 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4633 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4634 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4635 tcg_temp_free_i32(l1
);
4640 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4642 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4643 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4644 tcg_temp_free_i32(l
);
4649 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4651 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4652 return_low128(o
->out2
);
4657 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4659 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4660 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4661 tcg_temp_free_i32(l
);
4666 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4668 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4669 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4670 tcg_temp_free_i32(l
);
4675 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4677 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4678 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4679 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4680 TCGv_i32 tst
= tcg_temp_new_i32();
4681 int m3
= get_field(s
->fields
, m3
);
4683 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4687 tcg_gen_movi_i32(tst
, -1);
4689 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4690 if (s
->insn
->opc
& 3) {
4691 tcg_gen_ext8u_i32(tst
, tst
);
4693 tcg_gen_ext16u_i32(tst
, tst
);
4696 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4698 tcg_temp_free_i32(r1
);
4699 tcg_temp_free_i32(r2
);
4700 tcg_temp_free_i32(sizes
);
4701 tcg_temp_free_i32(tst
);
4706 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4708 TCGv_i32 t1
= tcg_const_i32(0xff);
4709 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4710 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4711 tcg_temp_free_i32(t1
);
4716 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4718 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4719 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4720 tcg_temp_free_i32(l
);
4724 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4726 int l1
= get_field(s
->fields
, l1
) + 1;
4729 /* The length must not exceed 32 bytes. */
4731 gen_program_exception(s
, PGM_SPECIFICATION
);
4732 return DISAS_NORETURN
;
4734 l
= tcg_const_i32(l1
);
4735 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4736 tcg_temp_free_i32(l
);
4741 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4743 int l1
= get_field(s
->fields
, l1
) + 1;
4746 /* The length must be even and should not exceed 64 bytes. */
4747 if ((l1
& 1) || (l1
> 64)) {
4748 gen_program_exception(s
, PGM_SPECIFICATION
);
4749 return DISAS_NORETURN
;
4751 l
= tcg_const_i32(l1
);
4752 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4753 tcg_temp_free_i32(l
);
4759 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4761 int d1
= get_field(s
->fields
, d1
);
4762 int d2
= get_field(s
->fields
, d2
);
4763 int b1
= get_field(s
->fields
, b1
);
4764 int b2
= get_field(s
->fields
, b2
);
4765 int l
= get_field(s
->fields
, l1
);
4768 o
->addr1
= get_address(s
, 0, b1
, d1
);
4770 /* If the addresses are identical, this is a store/memset of zero. */
4771 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4772 o
->in2
= tcg_const_i64(0);
4776 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4779 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4783 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4786 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4790 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4793 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4797 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4799 gen_op_movi_cc(s
, 0);
4803 /* But in general we'll defer to a helper. */
4804 o
->in2
= get_address(s
, 0, b2
, d2
);
4805 t32
= tcg_const_i32(l
);
4806 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4807 tcg_temp_free_i32(t32
);
4812 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4814 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4818 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4820 int shift
= s
->insn
->data
& 0xff;
4821 int size
= s
->insn
->data
>> 8;
4822 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4825 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4826 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4828 /* Produce the CC from only the bits manipulated. */
4829 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4830 set_cc_nz_u64(s
, cc_dst
);
4834 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4836 o
->in1
= tcg_temp_new_i64();
4838 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4839 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4841 /* Perform the atomic operation in memory. */
4842 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4846 /* Recompute also for atomic case: needed for setting CC. */
4847 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4849 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4850 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4855 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4857 o
->out
= tcg_const_i64(0);
4861 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4863 o
->out
= tcg_const_i64(0);
4869 #ifndef CONFIG_USER_ONLY
4870 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4872 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4874 gen_helper_clp(cpu_env
, r2
);
4875 tcg_temp_free_i32(r2
);
4880 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4882 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4883 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4885 gen_helper_pcilg(cpu_env
, r1
, r2
);
4886 tcg_temp_free_i32(r1
);
4887 tcg_temp_free_i32(r2
);
4892 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4894 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4895 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4897 gen_helper_pcistg(cpu_env
, r1
, r2
);
4898 tcg_temp_free_i32(r1
);
4899 tcg_temp_free_i32(r2
);
4904 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4906 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4907 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4909 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4910 tcg_temp_free_i32(ar
);
4911 tcg_temp_free_i32(r1
);
4916 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4918 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4922 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4924 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4925 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4927 gen_helper_rpcit(cpu_env
, r1
, r2
);
4928 tcg_temp_free_i32(r1
);
4929 tcg_temp_free_i32(r2
);
4934 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4936 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4937 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4938 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4940 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4941 tcg_temp_free_i32(ar
);
4942 tcg_temp_free_i32(r1
);
4943 tcg_temp_free_i32(r3
);
4948 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4950 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4951 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4953 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4954 tcg_temp_free_i32(ar
);
4955 tcg_temp_free_i32(r1
);
4961 /* ====================================================================== */
4962 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4963 the original inputs), update the various cc data structures in order to
4964 be able to compute the new condition code. */
4966 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4968 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4971 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4973 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4976 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4978 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4981 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4983 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4986 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4988 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4991 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4993 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4996 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4998 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
5001 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
5003 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
5006 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5008 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5011 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5013 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5016 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5018 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5021 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5023 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5026 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5028 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5031 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5033 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5036 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5038 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5041 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5043 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5046 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5048 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5051 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5053 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5056 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5058 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5061 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5063 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5064 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5067 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5069 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5072 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5074 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5077 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5079 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5082 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5084 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5087 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5089 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5092 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5094 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5097 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5099 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5102 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5104 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5107 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5109 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5112 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5114 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5117 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5119 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5122 /* ====================================================================== */
5123 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5124 with the TCG register to which we will write. Used in combination with
5125 the "wout" generators, in some cases we need a new temporary, and in
5126 some cases we can write to a TCG global. */
5128 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5130 o
->out
= tcg_temp_new_i64();
5132 #define SPEC_prep_new 0
5134 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5136 o
->out
= tcg_temp_new_i64();
5137 o
->out2
= tcg_temp_new_i64();
5139 #define SPEC_prep_new_P 0
5141 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5143 o
->out
= regs
[get_field(f
, r1
)];
5146 #define SPEC_prep_r1 0
5148 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5150 int r1
= get_field(f
, r1
);
5152 o
->out2
= regs
[r1
+ 1];
5153 o
->g_out
= o
->g_out2
= true;
5155 #define SPEC_prep_r1_P SPEC_r1_even
5157 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5158 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5160 o
->out
= load_freg(get_field(f
, r1
));
5161 o
->out2
= load_freg(get_field(f
, r1
) + 2);
5163 #define SPEC_prep_x1 SPEC_r1_f128
5165 /* ====================================================================== */
5166 /* The "Write OUTput" generators. These generally perform some non-trivial
5167 copy of data to TCG globals, or to main memory. The trivial cases are
5168 generally handled by having a "prep" generator install the TCG global
5169 as the destination of the operation. */
5171 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5173 store_reg(get_field(f
, r1
), o
->out
);
5175 #define SPEC_wout_r1 0
5177 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5179 int r1
= get_field(f
, r1
);
5180 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5182 #define SPEC_wout_r1_8 0
5184 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5186 int r1
= get_field(f
, r1
);
5187 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5189 #define SPEC_wout_r1_16 0
5191 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5193 store_reg32_i64(get_field(f
, r1
), o
->out
);
5195 #define SPEC_wout_r1_32 0
5197 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5199 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5201 #define SPEC_wout_r1_32h 0
5203 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5205 int r1
= get_field(f
, r1
);
5206 store_reg32_i64(r1
, o
->out
);
5207 store_reg32_i64(r1
+ 1, o
->out2
);
5209 #define SPEC_wout_r1_P32 SPEC_r1_even
5211 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5213 int r1
= get_field(f
, r1
);
5214 store_reg32_i64(r1
+ 1, o
->out
);
5215 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5216 store_reg32_i64(r1
, o
->out
);
5218 #define SPEC_wout_r1_D32 SPEC_r1_even
5220 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5222 int r3
= get_field(f
, r3
);
5223 store_reg32_i64(r3
, o
->out
);
5224 store_reg32_i64(r3
+ 1, o
->out2
);
5226 #define SPEC_wout_r3_P32 SPEC_r3_even
5228 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5230 int r3
= get_field(f
, r3
);
5231 store_reg(r3
, o
->out
);
5232 store_reg(r3
+ 1, o
->out2
);
5234 #define SPEC_wout_r3_P64 SPEC_r3_even
5236 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5238 store_freg32_i64(get_field(f
, r1
), o
->out
);
5240 #define SPEC_wout_e1 0
5242 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5244 store_freg(get_field(f
, r1
), o
->out
);
5246 #define SPEC_wout_f1 0
5248 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5250 int f1
= get_field(s
->fields
, r1
);
5251 store_freg(f1
, o
->out
);
5252 store_freg(f1
+ 2, o
->out2
);
5254 #define SPEC_wout_x1 SPEC_r1_f128
5256 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5258 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5259 store_reg32_i64(get_field(f
, r1
), o
->out
);
5262 #define SPEC_wout_cond_r1r2_32 0
5264 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5266 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5267 store_freg32_i64(get_field(f
, r1
), o
->out
);
5270 #define SPEC_wout_cond_e1e2 0
5272 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5274 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5276 #define SPEC_wout_m1_8 0
5278 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5280 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5282 #define SPEC_wout_m1_16 0
5284 #ifndef CONFIG_USER_ONLY
5285 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5287 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5289 #define SPEC_wout_m1_16a 0
5292 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5294 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5296 #define SPEC_wout_m1_32 0
5298 #ifndef CONFIG_USER_ONLY
5299 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5301 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5303 #define SPEC_wout_m1_32a 0
5306 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5308 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5310 #define SPEC_wout_m1_64 0
5312 #ifndef CONFIG_USER_ONLY
5313 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5315 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5317 #define SPEC_wout_m1_64a 0
5320 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5322 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5324 #define SPEC_wout_m2_32 0
5326 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5328 store_reg(get_field(f
, r1
), o
->in2
);
5330 #define SPEC_wout_in2_r1 0
5332 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5334 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5336 #define SPEC_wout_in2_r1_32 0
5338 /* ====================================================================== */
5339 /* The "INput 1" generators. These load the first operand to an insn. */
5341 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5343 o
->in1
= load_reg(get_field(f
, r1
));
5345 #define SPEC_in1_r1 0
5347 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5349 o
->in1
= regs
[get_field(f
, r1
)];
5352 #define SPEC_in1_r1_o 0
5354 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5356 o
->in1
= tcg_temp_new_i64();
5357 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5359 #define SPEC_in1_r1_32s 0
5361 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5363 o
->in1
= tcg_temp_new_i64();
5364 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5366 #define SPEC_in1_r1_32u 0
5368 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5370 o
->in1
= tcg_temp_new_i64();
5371 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5373 #define SPEC_in1_r1_sr32 0
5375 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5377 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5379 #define SPEC_in1_r1p1 SPEC_r1_even
5381 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5383 o
->in1
= tcg_temp_new_i64();
5384 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5386 #define SPEC_in1_r1p1_32s SPEC_r1_even
5388 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5390 o
->in1
= tcg_temp_new_i64();
5391 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5393 #define SPEC_in1_r1p1_32u SPEC_r1_even
5395 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5397 int r1
= get_field(f
, r1
);
5398 o
->in1
= tcg_temp_new_i64();
5399 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5401 #define SPEC_in1_r1_D32 SPEC_r1_even
5403 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5405 o
->in1
= load_reg(get_field(f
, r2
));
5407 #define SPEC_in1_r2 0
5409 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5411 o
->in1
= tcg_temp_new_i64();
5412 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5414 #define SPEC_in1_r2_sr32 0
5416 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5418 o
->in1
= load_reg(get_field(f
, r3
));
5420 #define SPEC_in1_r3 0
5422 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5424 o
->in1
= regs
[get_field(f
, r3
)];
5427 #define SPEC_in1_r3_o 0
5429 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5431 o
->in1
= tcg_temp_new_i64();
5432 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5434 #define SPEC_in1_r3_32s 0
5436 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5438 o
->in1
= tcg_temp_new_i64();
5439 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5441 #define SPEC_in1_r3_32u 0
5443 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5445 int r3
= get_field(f
, r3
);
5446 o
->in1
= tcg_temp_new_i64();
5447 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5449 #define SPEC_in1_r3_D32 SPEC_r3_even
5451 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5453 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5455 #define SPEC_in1_e1 0
5457 static void in1_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5459 o
->in1
= load_freg(get_field(f
, r1
));
5461 #define SPEC_in1_f1 0
5463 /* Load the high double word of an extended (128-bit) format FP number */
5464 static void in1_x2h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5466 o
->in1
= load_freg(get_field(f
, r2
));
5468 #define SPEC_in1_x2h SPEC_r2_f128
5470 static void in1_f3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5472 o
->in1
= load_freg(get_field(f
, r3
));
5474 #define SPEC_in1_f3 0
5476 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5478 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5480 #define SPEC_in1_la1 0
5482 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5484 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5485 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5487 #define SPEC_in1_la2 0
5489 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5492 o
->in1
= tcg_temp_new_i64();
5493 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5495 #define SPEC_in1_m1_8u 0
5497 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5500 o
->in1
= tcg_temp_new_i64();
5501 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5503 #define SPEC_in1_m1_16s 0
5505 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5508 o
->in1
= tcg_temp_new_i64();
5509 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5511 #define SPEC_in1_m1_16u 0
5513 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5516 o
->in1
= tcg_temp_new_i64();
5517 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5519 #define SPEC_in1_m1_32s 0
5521 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5524 o
->in1
= tcg_temp_new_i64();
5525 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5527 #define SPEC_in1_m1_32u 0
5529 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5532 o
->in1
= tcg_temp_new_i64();
5533 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5535 #define SPEC_in1_m1_64 0
5537 /* ====================================================================== */
5538 /* The "INput 2" generators. These load the second operand to an insn. */
5540 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5542 o
->in2
= regs
[get_field(f
, r1
)];
5545 #define SPEC_in2_r1_o 0
5547 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5549 o
->in2
= tcg_temp_new_i64();
5550 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5552 #define SPEC_in2_r1_16u 0
5554 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5556 o
->in2
= tcg_temp_new_i64();
5557 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5559 #define SPEC_in2_r1_32u 0
5561 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5563 int r1
= get_field(f
, r1
);
5564 o
->in2
= tcg_temp_new_i64();
5565 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5567 #define SPEC_in2_r1_D32 SPEC_r1_even
5569 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5571 o
->in2
= load_reg(get_field(f
, r2
));
5573 #define SPEC_in2_r2 0
5575 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5577 o
->in2
= regs
[get_field(f
, r2
)];
5580 #define SPEC_in2_r2_o 0
5582 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5584 int r2
= get_field(f
, r2
);
5586 o
->in2
= load_reg(r2
);
5589 #define SPEC_in2_r2_nz 0
5591 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5593 o
->in2
= tcg_temp_new_i64();
5594 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5596 #define SPEC_in2_r2_8s 0
5598 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5600 o
->in2
= tcg_temp_new_i64();
5601 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5603 #define SPEC_in2_r2_8u 0
5605 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5607 o
->in2
= tcg_temp_new_i64();
5608 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5610 #define SPEC_in2_r2_16s 0
5612 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5614 o
->in2
= tcg_temp_new_i64();
5615 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5617 #define SPEC_in2_r2_16u 0
5619 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5621 o
->in2
= load_reg(get_field(f
, r3
));
5623 #define SPEC_in2_r3 0
5625 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5627 o
->in2
= tcg_temp_new_i64();
5628 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5630 #define SPEC_in2_r3_sr32 0
5632 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5634 o
->in2
= tcg_temp_new_i64();
5635 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5637 #define SPEC_in2_r2_32s 0
5639 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5641 o
->in2
= tcg_temp_new_i64();
5642 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5644 #define SPEC_in2_r2_32u 0
5646 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5648 o
->in2
= tcg_temp_new_i64();
5649 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5651 #define SPEC_in2_r2_sr32 0
5653 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5655 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5657 #define SPEC_in2_e2 0
5659 static void in2_f2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5661 o
->in2
= load_freg(get_field(f
, r2
));
5663 #define SPEC_in2_f2 0
5665 /* Load the low double word of an extended (128-bit) format FP number */
5666 static void in2_x2l(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5668 o
->in2
= load_freg(get_field(f
, r2
) + 2);
5670 #define SPEC_in2_x2l SPEC_r2_f128
5672 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5674 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5676 #define SPEC_in2_ra2 0
5678 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5680 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5681 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5683 #define SPEC_in2_a2 0
5685 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5687 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5689 #define SPEC_in2_ri2 0
5691 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5693 help_l2_shift(s
, f
, o
, 31);
5695 #define SPEC_in2_sh32 0
5697 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5699 help_l2_shift(s
, f
, o
, 63);
5701 #define SPEC_in2_sh64 0
5703 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5706 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5708 #define SPEC_in2_m2_8u 0
5710 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5713 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5715 #define SPEC_in2_m2_16s 0
5717 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5720 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5722 #define SPEC_in2_m2_16u 0
5724 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5727 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5729 #define SPEC_in2_m2_32s 0
5731 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5734 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5736 #define SPEC_in2_m2_32u 0
5738 #ifndef CONFIG_USER_ONLY
5739 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5742 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5744 #define SPEC_in2_m2_32ua 0
5747 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5750 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5752 #define SPEC_in2_m2_64 0
5754 #ifndef CONFIG_USER_ONLY
5755 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5758 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5760 #define SPEC_in2_m2_64a 0
5763 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5766 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5768 #define SPEC_in2_mri2_16u 0
5770 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5773 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5775 #define SPEC_in2_mri2_32s 0
5777 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5780 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5782 #define SPEC_in2_mri2_32u 0
5784 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5787 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5789 #define SPEC_in2_mri2_64 0
5791 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5793 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5795 #define SPEC_in2_i2 0
5797 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5799 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5801 #define SPEC_in2_i2_8u 0
5803 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5805 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5807 #define SPEC_in2_i2_16u 0
5809 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5811 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5813 #define SPEC_in2_i2_32u 0
5815 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5817 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5818 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5820 #define SPEC_in2_i2_16u_shl 0
5822 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5824 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5825 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5827 #define SPEC_in2_i2_32u_shl 0
5829 #ifndef CONFIG_USER_ONLY
5830 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5832 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5834 #define SPEC_in2_insn 0
5837 /* ====================================================================== */
5839 /* Find opc within the table of insns. This is formulated as a switch
5840 statement so that (1) we get compile-time notice of cut-paste errors
5841 for duplicated opcodes, and (2) the compiler generates the binary
5842 search tree, rather than us having to post-process the table. */
5844 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5845 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5847 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5848 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5850 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5851 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5853 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5855 enum DisasInsnEnum
{
5856 #include "insn-data.def"
5860 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5865 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5867 .help_in1 = in1_##I1, \
5868 .help_in2 = in2_##I2, \
5869 .help_prep = prep_##P, \
5870 .help_wout = wout_##W, \
5871 .help_cout = cout_##CC, \
5872 .help_op = op_##OP, \
5876 /* Allow 0 to be used for NULL in the table below. */
5884 #define SPEC_in1_0 0
5885 #define SPEC_in2_0 0
5886 #define SPEC_prep_0 0
5887 #define SPEC_wout_0 0
5889 /* Give smaller names to the various facilities. */
5890 #define FAC_Z S390_FEAT_ZARCH
5891 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5892 #define FAC_DFP S390_FEAT_DFP
5893 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5894 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5895 #define FAC_EE S390_FEAT_EXECUTE_EXT
5896 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5897 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5898 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5899 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5900 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5901 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5902 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5903 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5904 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5905 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5906 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5907 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5908 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5909 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5910 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5911 #define FAC_SFLE S390_FEAT_STFLE
5912 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5913 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5914 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5915 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5916 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5917 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5918 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5919 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5920 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5921 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5922 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5923 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5924 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5925 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5926 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5927 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5929 static const DisasInsn insn_info
[] = {
5930 #include "insn-data.def"
5934 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5935 case OPC: return &insn_info[insn_ ## NM];
5937 static const DisasInsn
*lookup_opc(uint16_t opc
)
5940 #include "insn-data.def"
5951 /* Extract a field from the insn. The INSN should be left-aligned in
5952 the uint64_t so that we can more easily utilize the big-bit-endian
5953 definitions we extract from the Principals of Operation. */
5955 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5963 /* Zero extract the field from the insn. */
5964 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5966 /* Sign-extend, or un-swap the field as necessary. */
5968 case 0: /* unsigned */
5970 case 1: /* signed */
5971 assert(f
->size
<= 32);
5972 m
= 1u << (f
->size
- 1);
5975 case 2: /* dl+dh split, signed 20 bit. */
5976 r
= ((int8_t)r
<< 12) | (r
>> 8);
5982 /* Validate that the "compressed" encoding we selected above is valid.
5983 I.e. we havn't make two different original fields overlap. */
5984 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5985 o
->presentC
|= 1 << f
->indexC
;
5986 o
->presentO
|= 1 << f
->indexO
;
5988 o
->c
[f
->indexC
] = r
;
5991 /* Lookup the insn at the current PC, extracting the operands into O and
5992 returning the info struct for the insn. Returns NULL for invalid insn. */
5994 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5997 uint64_t insn
, pc
= s
->base
.pc_next
;
5999 const DisasInsn
*info
;
6001 if (unlikely(s
->ex_value
)) {
6002 /* Drop the EX data now, so that it's clear on exception paths. */
6003 TCGv_i64 zero
= tcg_const_i64(0);
6004 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6005 tcg_temp_free_i64(zero
);
6007 /* Extract the values saved by EXECUTE. */
6008 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6009 ilen
= s
->ex_value
& 0xf;
6012 insn
= ld_code2(env
, pc
);
6013 op
= (insn
>> 8) & 0xff;
6014 ilen
= get_ilen(op
);
6020 insn
= ld_code4(env
, pc
) << 32;
6023 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6026 g_assert_not_reached();
6029 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6032 /* We can't actually determine the insn format until we've looked up
6033 the full insn opcode. Which we can't do without locating the
6034 secondary opcode. Assume by default that OP2 is at bit 40; for
6035 those smaller insns that don't actually have a secondary opcode
6036 this will correctly result in OP2 = 0. */
6042 case 0xb2: /* S, RRF, RRE, IE */
6043 case 0xb3: /* RRE, RRD, RRF */
6044 case 0xb9: /* RRE, RRF */
6045 case 0xe5: /* SSE, SIL */
6046 op2
= (insn
<< 8) >> 56;
6050 case 0xc0: /* RIL */
6051 case 0xc2: /* RIL */
6052 case 0xc4: /* RIL */
6053 case 0xc6: /* RIL */
6054 case 0xc8: /* SSF */
6055 case 0xcc: /* RIL */
6056 op2
= (insn
<< 12) >> 60;
6058 case 0xc5: /* MII */
6059 case 0xc7: /* SMI */
6060 case 0xd0 ... 0xdf: /* SS */
6066 case 0xee ... 0xf3: /* SS */
6067 case 0xf8 ... 0xfd: /* SS */
6071 op2
= (insn
<< 40) >> 56;
6075 memset(f
, 0, sizeof(*f
));
6080 /* Lookup the instruction. */
6081 info
= lookup_opc(op
<< 8 | op2
);
6083 /* If we found it, extract the operands. */
6085 DisasFormat fmt
= info
->fmt
;
6088 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6089 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6095 static bool is_afp_reg(int reg
)
6097 return reg
% 2 || reg
> 6;
6100 static bool is_fp_pair(int reg
)
6102 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6103 return !(reg
& 0x2);
6106 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6108 const DisasInsn
*insn
;
6109 DisasJumpType ret
= DISAS_NEXT
;
6113 /* Search for the insn in the table. */
6114 insn
= extract_insn(env
, s
, &f
);
6116 /* Not found means unimplemented/illegal opcode. */
6118 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6120 gen_illegal_opcode(s
);
6121 return DISAS_NORETURN
;
6124 #ifndef CONFIG_USER_ONLY
6125 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6126 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6127 gen_helper_per_ifetch(cpu_env
, addr
);
6128 tcg_temp_free_i64(addr
);
6134 /* privileged instruction */
6135 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6136 gen_program_exception(s
, PGM_PRIVILEGED
);
6137 return DISAS_NORETURN
;
6140 /* if AFP is not enabled, instructions and registers are forbidden */
6141 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6144 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(&f
, r1
))) {
6147 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(&f
, r2
))) {
6150 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(&f
, r3
))) {
6153 if (insn
->flags
& IF_BFP
) {
6156 if (insn
->flags
& IF_DFP
) {
6160 gen_data_exception(dxc
);
6161 return DISAS_NORETURN
;
6166 /* Check for insn specification exceptions. */
6168 if ((insn
->spec
& SPEC_r1_even
&& get_field(&f
, r1
) & 1) ||
6169 (insn
->spec
& SPEC_r2_even
&& get_field(&f
, r2
) & 1) ||
6170 (insn
->spec
& SPEC_r3_even
&& get_field(&f
, r3
) & 1) ||
6171 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(&f
, r1
))) ||
6172 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(&f
, r2
)))) {
6173 gen_program_exception(s
, PGM_SPECIFICATION
);
6174 return DISAS_NORETURN
;
6178 /* Set up the strutures we use to communicate with the helpers. */
6182 /* Implement the instruction. */
6183 if (insn
->help_in1
) {
6184 insn
->help_in1(s
, &f
, &o
);
6186 if (insn
->help_in2
) {
6187 insn
->help_in2(s
, &f
, &o
);
6189 if (insn
->help_prep
) {
6190 insn
->help_prep(s
, &f
, &o
);
6192 if (insn
->help_op
) {
6193 ret
= insn
->help_op(s
, &o
);
6195 if (ret
!= DISAS_NORETURN
) {
6196 if (insn
->help_wout
) {
6197 insn
->help_wout(s
, &f
, &o
);
6199 if (insn
->help_cout
) {
6200 insn
->help_cout(s
, &o
);
6204 /* Free any temporaries created by the helpers. */
6205 if (o
.out
&& !o
.g_out
) {
6206 tcg_temp_free_i64(o
.out
);
6208 if (o
.out2
&& !o
.g_out2
) {
6209 tcg_temp_free_i64(o
.out2
);
6211 if (o
.in1
&& !o
.g_in1
) {
6212 tcg_temp_free_i64(o
.in1
);
6214 if (o
.in2
&& !o
.g_in2
) {
6215 tcg_temp_free_i64(o
.in2
);
6218 tcg_temp_free_i64(o
.addr1
);
6221 #ifndef CONFIG_USER_ONLY
6222 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6223 /* An exception might be triggered, save PSW if not already done. */
6224 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6225 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6228 /* Call the helper to check for a possible PER exception. */
6229 gen_helper_per_check_exception(cpu_env
);
6233 /* Advance to the next instruction. */
6234 s
->base
.pc_next
= s
->pc_tmp
;
6238 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6240 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6243 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6244 dc
->base
.pc_first
&= 0x7fffffff;
6245 dc
->base
.pc_next
= dc
->base
.pc_first
;
6248 dc
->cc_op
= CC_OP_DYNAMIC
;
6249 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6250 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6253 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6257 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6259 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6261 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6264 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6265 const CPUBreakpoint
*bp
)
6267 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6269 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6270 dc
->do_debug
= true;
6271 /* The address covered by the breakpoint must be included in
6272 [tb->pc, tb->pc + tb->size) in order to for it to be
6273 properly cleared -- thus we increment the PC here so that
6274 the logic setting tb->size does the right thing. */
6275 dc
->base
.pc_next
+= 2;
6279 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6281 CPUS390XState
*env
= cs
->env_ptr
;
6282 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6284 dc
->base
.is_jmp
= translate_one(env
, dc
);
6285 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6286 uint64_t page_start
;
6288 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6289 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6290 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6295 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6297 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6299 switch (dc
->base
.is_jmp
) {
6301 case DISAS_NORETURN
:
6303 case DISAS_TOO_MANY
:
6304 case DISAS_PC_STALE
:
6305 case DISAS_PC_STALE_NOCHAIN
:
6306 update_psw_addr(dc
);
6308 case DISAS_PC_UPDATED
:
6309 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6310 cc op type is in env */
6313 case DISAS_PC_CC_UPDATED
:
6314 /* Exit the TB, either by raising a debug exception or by return. */
6316 gen_exception(EXCP_DEBUG
);
6317 } else if (use_exit_tb(dc
) ||
6318 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6319 tcg_gen_exit_tb(NULL
, 0);
6321 tcg_gen_lookup_and_goto_ptr();
6325 g_assert_not_reached();
6329 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6331 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6333 if (unlikely(dc
->ex_value
)) {
6334 /* ??? Unfortunately log_target_disas can't use host memory. */
6335 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6337 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6338 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6342 static const TranslatorOps s390x_tr_ops
= {
6343 .init_disas_context
= s390x_tr_init_disas_context
,
6344 .tb_start
= s390x_tr_tb_start
,
6345 .insn_start
= s390x_tr_insn_start
,
6346 .breakpoint_check
= s390x_tr_breakpoint_check
,
6347 .translate_insn
= s390x_tr_translate_insn
,
6348 .tb_stop
= s390x_tr_tb_stop
,
6349 .disas_log
= s390x_tr_disas_log
,
6352 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
6356 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
);
6359 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6362 int cc_op
= data
[1];
6363 env
->psw
.addr
= data
[0];
6364 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {