4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 * Define a structure to hold the decoded fields. We'll store each inside
57 * an array indexed by an enum. In order to conserve memory, we'll arrange
58 * for fields that do not exist at the same time to overlap, thus the "C"
59 * for compact. For checking purposes there is an "O" for original index
60 * as well that will be applied to availability bitmaps.
63 enum DisasFieldIndexO
{
92 enum DisasFieldIndexC
{
133 unsigned presentC
:16;
134 unsigned int presentO
;
138 struct DisasContext
{
139 DisasContextBase base
;
140 const DisasInsn
*insn
;
144 * During translate_one(), pc_tmp is used to determine the instruction
145 * to be executed after base.pc_next - e.g. next sequential instruction
146 * or a branch target.
154 /* Information carried about a condition to be evaluated. */
161 struct { TCGv_i64 a
, b
; } s64
;
162 struct { TCGv_i32 a
, b
; } s32
;
166 #ifdef DEBUG_INLINE_BRANCHES
167 static uint64_t inline_branch_hit
[CC_OP_MAX
];
168 static uint64_t inline_branch_miss
[CC_OP_MAX
];
171 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
175 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
176 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
177 tcg_gen_movi_i64(out
, pc
);
182 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
183 tmp
= tcg_const_i64(pc
);
184 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
185 tcg_temp_free_i64(tmp
);
188 static TCGv_i64 psw_addr
;
189 static TCGv_i64 psw_mask
;
190 static TCGv_i64 gbea
;
192 static TCGv_i32 cc_op
;
193 static TCGv_i64 cc_src
;
194 static TCGv_i64 cc_dst
;
195 static TCGv_i64 cc_vr
;
197 static char cpu_reg_names
[16][4];
198 static TCGv_i64 regs
[16];
200 void s390x_translate_init(void)
204 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
205 offsetof(CPUS390XState
, psw
.addr
),
207 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
208 offsetof(CPUS390XState
, psw
.mask
),
210 gbea
= tcg_global_mem_new_i64(cpu_env
,
211 offsetof(CPUS390XState
, gbea
),
214 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
216 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
218 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
220 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
223 for (i
= 0; i
< 16; i
++) {
224 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
225 regs
[i
] = tcg_global_mem_new(cpu_env
,
226 offsetof(CPUS390XState
, regs
[i
]),
231 static inline int vec_full_reg_offset(uint8_t reg
)
234 return offsetof(CPUS390XState
, vregs
[reg
][0]);
237 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
239 /* Convert element size (es) - e.g. MO_8 - to bytes */
240 const uint8_t bytes
= 1 << es
;
241 int offs
= enr
* bytes
;
244 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
245 * of the 16 byte vector, on both, little and big endian systems.
247 * Big Endian (target/possible host)
248 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
249 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
250 * W: [ 0][ 1] - [ 2][ 3]
253 * Little Endian (possible host)
254 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
255 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
256 * W: [ 1][ 0] - [ 3][ 2]
259 * For 16 byte elements, the two 8 byte halves will not form a host
260 * int128 if the host is little endian, since they're in the wrong order.
261 * Some operations (e.g. xor) do not care. For operations like addition,
262 * the two 8 byte elements have to be loaded separately. Let's force all
263 * 16 byte operations to handle it in a special way.
265 g_assert(es
<= MO_64
);
266 #ifndef HOST_WORDS_BIGENDIAN
269 return offs
+ vec_full_reg_offset(reg
);
272 static inline int freg64_offset(uint8_t reg
)
275 return vec_reg_offset(reg
, 0, MO_64
);
278 static inline int freg32_offset(uint8_t reg
)
281 return vec_reg_offset(reg
, 0, MO_32
);
284 static TCGv_i64
load_reg(int reg
)
286 TCGv_i64 r
= tcg_temp_new_i64();
287 tcg_gen_mov_i64(r
, regs
[reg
]);
291 static TCGv_i64
load_freg(int reg
)
293 TCGv_i64 r
= tcg_temp_new_i64();
295 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
299 static TCGv_i64
load_freg32_i64(int reg
)
301 TCGv_i64 r
= tcg_temp_new_i64();
303 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
307 static void store_reg(int reg
, TCGv_i64 v
)
309 tcg_gen_mov_i64(regs
[reg
], v
);
312 static void store_freg(int reg
, TCGv_i64 v
)
314 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
317 static void store_reg32_i64(int reg
, TCGv_i64 v
)
319 /* 32 bit register writes keep the upper half */
320 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
323 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
325 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
328 static void store_freg32_i64(int reg
, TCGv_i64 v
)
330 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
333 static void return_low128(TCGv_i64 dest
)
335 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
338 static void update_psw_addr(DisasContext
*s
)
341 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
344 static void per_branch(DisasContext
*s
, bool to_next
)
346 #ifndef CONFIG_USER_ONLY
347 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
349 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
350 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
351 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
353 tcg_temp_free_i64(next_pc
);
359 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
360 TCGv_i64 arg1
, TCGv_i64 arg2
)
362 #ifndef CONFIG_USER_ONLY
363 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
364 TCGLabel
*lab
= gen_new_label();
365 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
367 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
368 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
372 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
373 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
374 tcg_temp_free_i64(pc
);
379 static void per_breaking_event(DisasContext
*s
)
381 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
384 static void update_cc_op(DisasContext
*s
)
386 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
387 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
391 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
393 return (uint64_t)cpu_lduw_code(env
, pc
);
396 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
398 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
401 static int get_mem_index(DisasContext
*s
)
403 #ifdef CONFIG_USER_ONLY
406 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
410 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
411 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
412 return MMU_PRIMARY_IDX
;
413 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
414 return MMU_SECONDARY_IDX
;
415 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
424 static void gen_exception(int excp
)
426 TCGv_i32 tmp
= tcg_const_i32(excp
);
427 gen_helper_exception(cpu_env
, tmp
);
428 tcg_temp_free_i32(tmp
);
431 static void gen_program_exception(DisasContext
*s
, int code
)
435 /* Remember what pgm exeption this was. */
436 tmp
= tcg_const_i32(code
);
437 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
438 tcg_temp_free_i32(tmp
);
440 tmp
= tcg_const_i32(s
->ilen
);
441 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
442 tcg_temp_free_i32(tmp
);
450 /* Trigger exception. */
451 gen_exception(EXCP_PGM
);
454 static inline void gen_illegal_opcode(DisasContext
*s
)
456 gen_program_exception(s
, PGM_OPERATION
);
459 static inline void gen_data_exception(uint8_t dxc
)
461 TCGv_i32 tmp
= tcg_const_i32(dxc
);
462 gen_helper_data_exception(cpu_env
, tmp
);
463 tcg_temp_free_i32(tmp
);
466 static inline void gen_trap(DisasContext
*s
)
468 /* Set DXC to 0xff */
469 gen_data_exception(0xff);
472 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
475 tcg_gen_addi_i64(dst
, src
, imm
);
476 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
477 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
478 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
480 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
485 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
487 TCGv_i64 tmp
= tcg_temp_new_i64();
490 * Note that d2 is limited to 20 bits, signed. If we crop negative
491 * displacements early we create larger immedate addends.
494 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
495 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
497 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
499 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
500 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
501 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
502 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
504 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
507 tcg_gen_movi_i64(tmp
, d2
);
513 static inline bool live_cc_data(DisasContext
*s
)
515 return (s
->cc_op
!= CC_OP_DYNAMIC
516 && s
->cc_op
!= CC_OP_STATIC
520 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
522 if (live_cc_data(s
)) {
523 tcg_gen_discard_i64(cc_src
);
524 tcg_gen_discard_i64(cc_dst
);
525 tcg_gen_discard_i64(cc_vr
);
527 s
->cc_op
= CC_OP_CONST0
+ val
;
530 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
532 if (live_cc_data(s
)) {
533 tcg_gen_discard_i64(cc_src
);
534 tcg_gen_discard_i64(cc_vr
);
536 tcg_gen_mov_i64(cc_dst
, dst
);
540 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
543 if (live_cc_data(s
)) {
544 tcg_gen_discard_i64(cc_vr
);
546 tcg_gen_mov_i64(cc_src
, src
);
547 tcg_gen_mov_i64(cc_dst
, dst
);
551 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
552 TCGv_i64 dst
, TCGv_i64 vr
)
554 tcg_gen_mov_i64(cc_src
, src
);
555 tcg_gen_mov_i64(cc_dst
, dst
);
556 tcg_gen_mov_i64(cc_vr
, vr
);
560 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
562 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
565 /* CC value is in env->cc_op */
566 static void set_cc_static(DisasContext
*s
)
568 if (live_cc_data(s
)) {
569 tcg_gen_discard_i64(cc_src
);
570 tcg_gen_discard_i64(cc_dst
);
571 tcg_gen_discard_i64(cc_vr
);
573 s
->cc_op
= CC_OP_STATIC
;
576 /* calculates cc into cc_op */
577 static void gen_op_calc_cc(DisasContext
*s
)
579 TCGv_i32 local_cc_op
= NULL
;
580 TCGv_i64 dummy
= NULL
;
584 dummy
= tcg_const_i64(0);
590 local_cc_op
= tcg_const_i32(s
->cc_op
);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
627 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
633 case CC_OP_LTUGTU_32
:
634 case CC_OP_LTUGTU_64
:
644 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
651 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
654 /* unknown operation - assume 3 arguments and cc_op in env */
655 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
662 tcg_temp_free_i32(local_cc_op
);
665 tcg_temp_free_i64(dummy
);
668 /* We now have cc in cc_op as constant */
672 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
674 if (unlikely(s
->base
.tb
->flags
& FLAG_MASK_PER
)) {
677 return translator_use_goto_tb(&s
->base
, dest
);
680 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
682 #ifdef DEBUG_INLINE_BRANCHES
683 inline_branch_miss
[cc_op
]++;
687 static void account_inline_branch(DisasContext
*s
, int cc_op
)
689 #ifdef DEBUG_INLINE_BRANCHES
690 inline_branch_hit
[cc_op
]++;
694 /* Table of mask values to comparison codes, given a comparison as input.
695 For such, CC=3 should not be possible. */
696 static const TCGCond ltgt_cond
[16] = {
697 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
698 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
699 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
700 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
701 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
702 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
703 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
704 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
707 /* Table of mask values to comparison codes, given a logic op as input.
708 For such, only CC=0 and CC=1 should be possible. */
709 static const TCGCond nz_cond
[16] = {
710 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
711 TCG_COND_NEVER
, TCG_COND_NEVER
,
712 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
713 TCG_COND_NE
, TCG_COND_NE
,
714 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
715 TCG_COND_EQ
, TCG_COND_EQ
,
716 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
717 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
720 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
721 details required to generate a TCG comparison. */
722 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
725 enum cc_op old_cc_op
= s
->cc_op
;
727 if (mask
== 15 || mask
== 0) {
728 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
731 c
->g1
= c
->g2
= true;
736 /* Find the TCG condition for the mask + cc op. */
742 cond
= ltgt_cond
[mask
];
743 if (cond
== TCG_COND_NEVER
) {
746 account_inline_branch(s
, old_cc_op
);
749 case CC_OP_LTUGTU_32
:
750 case CC_OP_LTUGTU_64
:
751 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
752 if (cond
== TCG_COND_NEVER
) {
755 account_inline_branch(s
, old_cc_op
);
759 cond
= nz_cond
[mask
];
760 if (cond
== TCG_COND_NEVER
) {
763 account_inline_branch(s
, old_cc_op
);
778 account_inline_branch(s
, old_cc_op
);
793 account_inline_branch(s
, old_cc_op
);
797 switch (mask
& 0xa) {
798 case 8: /* src == 0 -> no one bit found */
801 case 2: /* src != 0 -> one bit found */
807 account_inline_branch(s
, old_cc_op
);
813 case 8 | 2: /* result == 0 */
816 case 4 | 1: /* result != 0 */
819 case 8 | 4: /* !carry (borrow) */
820 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
822 case 2 | 1: /* carry (!borrow) */
823 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
828 account_inline_branch(s
, old_cc_op
);
833 /* Calculate cc value. */
838 /* Jump based on CC. We'll load up the real cond below;
839 the assignment here merely avoids a compiler warning. */
840 account_noninline_branch(s
, old_cc_op
);
841 old_cc_op
= CC_OP_STATIC
;
842 cond
= TCG_COND_NEVER
;
846 /* Load up the arguments of the comparison. */
848 c
->g1
= c
->g2
= false;
852 c
->u
.s32
.a
= tcg_temp_new_i32();
853 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
854 c
->u
.s32
.b
= tcg_const_i32(0);
857 case CC_OP_LTUGTU_32
:
859 c
->u
.s32
.a
= tcg_temp_new_i32();
860 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
861 c
->u
.s32
.b
= tcg_temp_new_i32();
862 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
869 c
->u
.s64
.b
= tcg_const_i64(0);
873 case CC_OP_LTUGTU_64
:
876 c
->g1
= c
->g2
= true;
882 c
->u
.s64
.a
= tcg_temp_new_i64();
883 c
->u
.s64
.b
= tcg_const_i64(0);
884 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
890 c
->u
.s64
.b
= tcg_const_i64(0);
894 case 4 | 1: /* result */
898 case 2 | 1: /* carry */
902 g_assert_not_reached();
911 case 0x8 | 0x4 | 0x2: /* cc != 3 */
913 c
->u
.s32
.b
= tcg_const_i32(3);
915 case 0x8 | 0x4 | 0x1: /* cc != 2 */
917 c
->u
.s32
.b
= tcg_const_i32(2);
919 case 0x8 | 0x2 | 0x1: /* cc != 1 */
921 c
->u
.s32
.b
= tcg_const_i32(1);
923 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
926 c
->u
.s32
.a
= tcg_temp_new_i32();
927 c
->u
.s32
.b
= tcg_const_i32(0);
928 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
930 case 0x8 | 0x4: /* cc < 2 */
932 c
->u
.s32
.b
= tcg_const_i32(2);
934 case 0x8: /* cc == 0 */
936 c
->u
.s32
.b
= tcg_const_i32(0);
938 case 0x4 | 0x2 | 0x1: /* cc != 0 */
940 c
->u
.s32
.b
= tcg_const_i32(0);
942 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
945 c
->u
.s32
.a
= tcg_temp_new_i32();
946 c
->u
.s32
.b
= tcg_const_i32(0);
947 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
949 case 0x4: /* cc == 1 */
951 c
->u
.s32
.b
= tcg_const_i32(1);
953 case 0x2 | 0x1: /* cc > 1 */
955 c
->u
.s32
.b
= tcg_const_i32(1);
957 case 0x2: /* cc == 2 */
959 c
->u
.s32
.b
= tcg_const_i32(2);
961 case 0x1: /* cc == 3 */
963 c
->u
.s32
.b
= tcg_const_i32(3);
966 /* CC is masked by something else: (8 >> cc) & mask. */
969 c
->u
.s32
.a
= tcg_const_i32(8);
970 c
->u
.s32
.b
= tcg_const_i32(0);
971 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
972 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
983 static void free_compare(DisasCompare
*c
)
987 tcg_temp_free_i64(c
->u
.s64
.a
);
989 tcg_temp_free_i32(c
->u
.s32
.a
);
994 tcg_temp_free_i64(c
->u
.s64
.b
);
996 tcg_temp_free_i32(c
->u
.s32
.b
);
1001 /* ====================================================================== */
1002 /* Define the insn format enumeration. */
1003 #define F0(N) FMT_##N,
1004 #define F1(N, X1) F0(N)
1005 #define F2(N, X1, X2) F0(N)
1006 #define F3(N, X1, X2, X3) F0(N)
1007 #define F4(N, X1, X2, X3, X4) F0(N)
1008 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1009 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1012 #include "insn-format.def"
1023 /* This is the way fields are to be accessed out of DisasFields. */
1024 #define have_field(S, F) have_field1((S), FLD_O_##F)
1025 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1027 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
1029 return (s
->fields
.presentO
>> c
) & 1;
1032 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
1033 enum DisasFieldIndexC c
)
1035 assert(have_field1(s
, o
));
1036 return s
->fields
.c
[c
];
1039 /* Describe the layout of each field in each format. */
1040 typedef struct DisasField
{
1042 unsigned int size
:8;
1043 unsigned int type
:2;
1044 unsigned int indexC
:6;
1045 enum DisasFieldIndexO indexO
:8;
1048 typedef struct DisasFormatInfo
{
1049 DisasField op
[NUM_C_FIELD
];
1052 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1053 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1054 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1055 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1056 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1057 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1058 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1059 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1060 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1061 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1062 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1063 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1064 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1065 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1066 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1068 #define F0(N) { { } },
1069 #define F1(N, X1) { { X1 } },
1070 #define F2(N, X1, X2) { { X1, X2 } },
1071 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1072 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1073 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1074 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1076 static const DisasFormatInfo format_info
[] = {
1077 #include "insn-format.def"
1097 /* Generally, we'll extract operands into this structures, operate upon
1098 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1099 of routines below for more details. */
1101 bool g_out
, g_out2
, g_in1
, g_in2
;
1102 TCGv_i64 out
, out2
, in1
, in2
;
1106 /* Instructions can place constraints on their operands, raising specification
1107 exceptions if they are violated. To make this easy to automate, each "in1",
1108 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1109 of the following, or 0. To make this easy to document, we'll put the
1110 SPEC_<name> defines next to <name>. */
1112 #define SPEC_r1_even 1
1113 #define SPEC_r2_even 2
1114 #define SPEC_r3_even 4
1115 #define SPEC_r1_f128 8
1116 #define SPEC_r2_f128 16
1118 /* Return values from translate_one, indicating the state of the TB. */
1120 /* We are not using a goto_tb (for whatever reason), but have updated
1121 the PC (for whatever reason), so there's no need to do it again on
1123 #define DISAS_PC_UPDATED DISAS_TARGET_0
1125 /* We have emitted one or more goto_tb. No fixup required. */
1126 #define DISAS_GOTO_TB DISAS_TARGET_1
1128 /* We have updated the PC and CC values. */
1129 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1131 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1132 updated the PC for the next instruction to be executed. */
1133 #define DISAS_PC_STALE DISAS_TARGET_3
1135 /* We are exiting the TB to the main loop. */
1136 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1139 /* Instruction flags */
1140 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1141 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1142 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1143 #define IF_BFP 0x0008 /* binary floating point instruction */
1144 #define IF_DFP 0x0010 /* decimal floating point instruction */
1145 #define IF_PRIV 0x0020 /* privileged instruction */
1146 #define IF_VEC 0x0040 /* vector instruction */
1147 #define IF_IO 0x0080 /* input/output instruction */
1158 /* Pre-process arguments before HELP_OP. */
1159 void (*help_in1
)(DisasContext
*, DisasOps
*);
1160 void (*help_in2
)(DisasContext
*, DisasOps
*);
1161 void (*help_prep
)(DisasContext
*, DisasOps
*);
1164 * Post-process output after HELP_OP.
1165 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1167 void (*help_wout
)(DisasContext
*, DisasOps
*);
1168 void (*help_cout
)(DisasContext
*, DisasOps
*);
1170 /* Implement the operation itself. */
1171 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1176 /* ====================================================================== */
1177 /* Miscellaneous helpers, used by several operations. */
1179 static void help_l2_shift(DisasContext
*s
, DisasOps
*o
, int mask
)
1181 int b2
= get_field(s
, b2
);
1182 int d2
= get_field(s
, d2
);
1185 o
->in2
= tcg_const_i64(d2
& mask
);
1187 o
->in2
= get_address(s
, 0, b2
, d2
);
1188 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1192 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1194 if (dest
== s
->pc_tmp
) {
1195 per_branch(s
, true);
1198 if (use_goto_tb(s
, dest
)) {
1200 per_breaking_event(s
);
1202 tcg_gen_movi_i64(psw_addr
, dest
);
1203 tcg_gen_exit_tb(s
->base
.tb
, 0);
1204 return DISAS_GOTO_TB
;
1206 tcg_gen_movi_i64(psw_addr
, dest
);
1207 per_branch(s
, false);
1208 return DISAS_PC_UPDATED
;
1212 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1213 bool is_imm
, int imm
, TCGv_i64 cdest
)
1216 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1219 /* Take care of the special cases first. */
1220 if (c
->cond
== TCG_COND_NEVER
) {
1225 if (dest
== s
->pc_tmp
) {
1226 /* Branch to next. */
1227 per_branch(s
, true);
1231 if (c
->cond
== TCG_COND_ALWAYS
) {
1232 ret
= help_goto_direct(s
, dest
);
1237 /* E.g. bcr %r0 -> no branch. */
1241 if (c
->cond
== TCG_COND_ALWAYS
) {
1242 tcg_gen_mov_i64(psw_addr
, cdest
);
1243 per_branch(s
, false);
1244 ret
= DISAS_PC_UPDATED
;
1249 if (use_goto_tb(s
, s
->pc_tmp
)) {
1250 if (is_imm
&& use_goto_tb(s
, dest
)) {
1251 /* Both exits can use goto_tb. */
1254 lab
= gen_new_label();
1256 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1258 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1261 /* Branch not taken. */
1263 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1264 tcg_gen_exit_tb(s
->base
.tb
, 0);
1268 per_breaking_event(s
);
1270 tcg_gen_movi_i64(psw_addr
, dest
);
1271 tcg_gen_exit_tb(s
->base
.tb
, 1);
1273 ret
= DISAS_GOTO_TB
;
1275 /* Fallthru can use goto_tb, but taken branch cannot. */
1276 /* Store taken branch destination before the brcond. This
1277 avoids having to allocate a new local temp to hold it.
1278 We'll overwrite this in the not taken case anyway. */
1280 tcg_gen_mov_i64(psw_addr
, cdest
);
1283 lab
= gen_new_label();
1285 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1287 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1290 /* Branch not taken. */
1293 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1294 tcg_gen_exit_tb(s
->base
.tb
, 0);
1298 tcg_gen_movi_i64(psw_addr
, dest
);
1300 per_breaking_event(s
);
1301 ret
= DISAS_PC_UPDATED
;
1304 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1305 Most commonly we're single-stepping or some other condition that
1306 disables all use of goto_tb. Just update the PC and exit. */
1308 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1310 cdest
= tcg_const_i64(dest
);
1314 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1316 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1318 TCGv_i32 t0
= tcg_temp_new_i32();
1319 TCGv_i64 t1
= tcg_temp_new_i64();
1320 TCGv_i64 z
= tcg_const_i64(0);
1321 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1322 tcg_gen_extu_i32_i64(t1
, t0
);
1323 tcg_temp_free_i32(t0
);
1324 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1325 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1326 tcg_temp_free_i64(t1
);
1327 tcg_temp_free_i64(z
);
1331 tcg_temp_free_i64(cdest
);
1333 tcg_temp_free_i64(next
);
1335 ret
= DISAS_PC_UPDATED
;
1343 /* ====================================================================== */
1344 /* The operations. These perform the bulk of the work for any insn,
1345 usually after the operands have been loaded and output initialized. */
1347 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1349 tcg_gen_abs_i64(o
->out
, o
->in2
);
1353 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1359 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1361 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1365 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1367 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1368 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1372 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1374 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1378 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1380 tcg_gen_movi_i64(cc_src
, 0);
1381 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1385 /* Compute carry into cc_src. */
1386 static void compute_carry(DisasContext
*s
)
1390 /* The carry value is already in cc_src (1,0). */
1393 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1399 /* The carry flag is the msb of CC; compute into cc_src. */
1400 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1401 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1406 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1409 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1410 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1414 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1418 TCGv_i64 zero
= tcg_const_i64(0);
1419 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1420 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1421 tcg_temp_free_i64(zero
);
1426 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1428 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1430 o
->in1
= tcg_temp_new_i64();
1432 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1434 /* Perform the atomic addition in memory. */
1435 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1439 /* Recompute also for atomic case: needed for setting CC. */
1440 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1443 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1448 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1450 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1452 o
->in1
= tcg_temp_new_i64();
1454 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1456 /* Perform the atomic addition in memory. */
1457 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1461 /* Recompute also for atomic case: needed for setting CC. */
1462 tcg_gen_movi_i64(cc_src
, 0);
1463 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1466 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1471 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1473 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1477 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1479 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1483 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1485 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1486 return_low128(o
->out2
);
1490 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1492 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1496 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1498 int shift
= s
->insn
->data
& 0xff;
1499 int size
= s
->insn
->data
>> 8;
1500 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1503 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1504 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1505 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1507 /* Produce the CC from only the bits manipulated. */
1508 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1509 set_cc_nz_u64(s
, cc_dst
);
1513 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1515 o
->in1
= tcg_temp_new_i64();
1517 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1518 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1520 /* Perform the atomic operation in memory. */
1521 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1525 /* Recompute also for atomic case: needed for setting CC. */
1526 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1528 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1529 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1534 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1536 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1538 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1539 per_branch(s
, false);
1540 return DISAS_PC_UPDATED
;
1546 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1550 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1551 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1555 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1556 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1557 t
= tcg_temp_new_i64();
1558 tcg_gen_shri_i64(t
, psw_mask
, 16);
1559 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1560 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1561 tcg_gen_extu_i32_i64(t
, cc_op
);
1562 tcg_gen_shli_i64(t
, t
, 28);
1563 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1564 tcg_temp_free_i64(t
);
1567 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1569 save_link_info(s
, o
);
1571 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1572 per_branch(s
, false);
1573 return DISAS_PC_UPDATED
;
1579 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1581 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1582 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
, i2
));
1585 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1587 int m1
= get_field(s
, m1
);
1588 bool is_imm
= have_field(s
, i2
);
1589 int imm
= is_imm
? get_field(s
, i2
) : 0;
1592 /* BCR with R2 = 0 causes no branching */
1593 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1595 /* Perform serialization */
1596 /* FIXME: check for fast-BCR-serialization facility */
1597 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1600 /* Perform serialization */
1601 /* FIXME: perform checkpoint-synchronisation */
1602 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1607 disas_jcc(s
, &c
, m1
);
1608 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1611 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1613 int r1
= get_field(s
, r1
);
1614 bool is_imm
= have_field(s
, i2
);
1615 int imm
= is_imm
? get_field(s
, i2
) : 0;
1619 c
.cond
= TCG_COND_NE
;
1624 t
= tcg_temp_new_i64();
1625 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1626 store_reg32_i64(r1
, t
);
1627 c
.u
.s32
.a
= tcg_temp_new_i32();
1628 c
.u
.s32
.b
= tcg_const_i32(0);
1629 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1630 tcg_temp_free_i64(t
);
1632 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1635 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1637 int r1
= get_field(s
, r1
);
1638 int imm
= get_field(s
, i2
);
1642 c
.cond
= TCG_COND_NE
;
1647 t
= tcg_temp_new_i64();
1648 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1649 tcg_gen_subi_i64(t
, t
, 1);
1650 store_reg32h_i64(r1
, t
);
1651 c
.u
.s32
.a
= tcg_temp_new_i32();
1652 c
.u
.s32
.b
= tcg_const_i32(0);
1653 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1654 tcg_temp_free_i64(t
);
1656 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1659 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1661 int r1
= get_field(s
, r1
);
1662 bool is_imm
= have_field(s
, i2
);
1663 int imm
= is_imm
? get_field(s
, i2
) : 0;
1666 c
.cond
= TCG_COND_NE
;
1671 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1672 c
.u
.s64
.a
= regs
[r1
];
1673 c
.u
.s64
.b
= tcg_const_i64(0);
1675 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1678 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1680 int r1
= get_field(s
, r1
);
1681 int r3
= get_field(s
, r3
);
1682 bool is_imm
= have_field(s
, i2
);
1683 int imm
= is_imm
? get_field(s
, i2
) : 0;
1687 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1692 t
= tcg_temp_new_i64();
1693 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1694 c
.u
.s32
.a
= tcg_temp_new_i32();
1695 c
.u
.s32
.b
= tcg_temp_new_i32();
1696 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1697 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1698 store_reg32_i64(r1
, t
);
1699 tcg_temp_free_i64(t
);
1701 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1704 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1706 int r1
= get_field(s
, r1
);
1707 int r3
= get_field(s
, r3
);
1708 bool is_imm
= have_field(s
, i2
);
1709 int imm
= is_imm
? get_field(s
, i2
) : 0;
1712 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1715 if (r1
== (r3
| 1)) {
1716 c
.u
.s64
.b
= load_reg(r3
| 1);
1719 c
.u
.s64
.b
= regs
[r3
| 1];
1723 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1724 c
.u
.s64
.a
= regs
[r1
];
1727 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1730 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1732 int imm
, m3
= get_field(s
, m3
);
1736 c
.cond
= ltgt_cond
[m3
];
1737 if (s
->insn
->data
) {
1738 c
.cond
= tcg_unsigned_cond(c
.cond
);
1740 c
.is_64
= c
.g1
= c
.g2
= true;
1744 is_imm
= have_field(s
, i4
);
1746 imm
= get_field(s
, i4
);
1749 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1753 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1756 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1758 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1763 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1765 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1770 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1772 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1777 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1780 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1781 uint8_t m3
= get_field(s
, m3
);
1782 uint8_t m4
= get_field(s
, m4
);
1784 /* m3 field was introduced with FPE */
1785 if (!fpe
&& m3_with_fpe
) {
1788 /* m4 field was introduced with FPE */
1789 if (!fpe
&& m4_with_fpe
) {
1793 /* Check for valid rounding modes. Mode 3 was introduced later. */
1794 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1795 gen_program_exception(s
, PGM_SPECIFICATION
);
1799 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1802 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1804 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1807 return DISAS_NORETURN
;
1809 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1810 tcg_temp_free_i32(m34
);
1815 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1820 return DISAS_NORETURN
;
1822 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1823 tcg_temp_free_i32(m34
);
1828 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1830 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1833 return DISAS_NORETURN
;
1835 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1836 tcg_temp_free_i32(m34
);
1841 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1843 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1846 return DISAS_NORETURN
;
1848 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1849 tcg_temp_free_i32(m34
);
1854 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1856 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1859 return DISAS_NORETURN
;
1861 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1862 tcg_temp_free_i32(m34
);
1867 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1869 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1872 return DISAS_NORETURN
;
1874 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1875 tcg_temp_free_i32(m34
);
1880 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1882 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1885 return DISAS_NORETURN
;
1887 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1888 tcg_temp_free_i32(m34
);
1893 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1895 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1898 return DISAS_NORETURN
;
1900 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1901 tcg_temp_free_i32(m34
);
1906 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1908 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1911 return DISAS_NORETURN
;
1913 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1914 tcg_temp_free_i32(m34
);
1919 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1921 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1924 return DISAS_NORETURN
;
1926 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1927 tcg_temp_free_i32(m34
);
1932 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1934 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1937 return DISAS_NORETURN
;
1939 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1940 tcg_temp_free_i32(m34
);
1945 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1947 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1950 return DISAS_NORETURN
;
1952 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1953 tcg_temp_free_i32(m34
);
1958 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1960 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1963 return DISAS_NORETURN
;
1965 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1966 tcg_temp_free_i32(m34
);
1970 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1972 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1975 return DISAS_NORETURN
;
1977 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
1978 tcg_temp_free_i32(m34
);
1982 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1984 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1987 return DISAS_NORETURN
;
1989 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
1990 tcg_temp_free_i32(m34
);
1991 return_low128(o
->out2
);
1995 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1997 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2000 return DISAS_NORETURN
;
2002 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
2003 tcg_temp_free_i32(m34
);
2007 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
2009 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2012 return DISAS_NORETURN
;
2014 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2015 tcg_temp_free_i32(m34
);
2019 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2021 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2024 return DISAS_NORETURN
;
2026 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2027 tcg_temp_free_i32(m34
);
2028 return_low128(o
->out2
);
2032 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2034 int r2
= get_field(s
, r2
);
2035 TCGv_i64 len
= tcg_temp_new_i64();
2037 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2039 return_low128(o
->out
);
2041 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2042 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2043 tcg_temp_free_i64(len
);
2048 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2050 int l
= get_field(s
, l1
);
2055 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2056 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2059 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2060 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2063 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2064 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2067 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2068 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2071 vl
= tcg_const_i32(l
);
2072 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2073 tcg_temp_free_i32(vl
);
2077 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2081 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2083 int r1
= get_field(s
, r1
);
2084 int r2
= get_field(s
, r2
);
2087 /* r1 and r2 must be even. */
2088 if (r1
& 1 || r2
& 1) {
2089 gen_program_exception(s
, PGM_SPECIFICATION
);
2090 return DISAS_NORETURN
;
2093 t1
= tcg_const_i32(r1
);
2094 t2
= tcg_const_i32(r2
);
2095 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2096 tcg_temp_free_i32(t1
);
2097 tcg_temp_free_i32(t2
);
2102 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2104 int r1
= get_field(s
, r1
);
2105 int r3
= get_field(s
, r3
);
2108 /* r1 and r3 must be even. */
2109 if (r1
& 1 || r3
& 1) {
2110 gen_program_exception(s
, PGM_SPECIFICATION
);
2111 return DISAS_NORETURN
;
2114 t1
= tcg_const_i32(r1
);
2115 t3
= tcg_const_i32(r3
);
2116 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2117 tcg_temp_free_i32(t1
);
2118 tcg_temp_free_i32(t3
);
2123 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2125 int r1
= get_field(s
, r1
);
2126 int r3
= get_field(s
, r3
);
2129 /* r1 and r3 must be even. */
2130 if (r1
& 1 || r3
& 1) {
2131 gen_program_exception(s
, PGM_SPECIFICATION
);
2132 return DISAS_NORETURN
;
2135 t1
= tcg_const_i32(r1
);
2136 t3
= tcg_const_i32(r3
);
2137 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2138 tcg_temp_free_i32(t1
);
2139 tcg_temp_free_i32(t3
);
2144 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2146 TCGv_i32 m3
= tcg_const_i32(get_field(s
, m3
));
2147 TCGv_i32 t1
= tcg_temp_new_i32();
2148 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2149 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2151 tcg_temp_free_i32(t1
);
2152 tcg_temp_free_i32(m3
);
2156 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2158 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2160 return_low128(o
->in2
);
2164 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2166 TCGv_i64 t
= tcg_temp_new_i64();
2167 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2168 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2169 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2170 tcg_temp_free_i64(t
);
2174 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2176 int d2
= get_field(s
, d2
);
2177 int b2
= get_field(s
, b2
);
2180 /* Note that in1 = R3 (new value) and
2181 in2 = (zero-extended) R1 (expected value). */
2183 addr
= get_address(s
, 0, b2
, d2
);
2184 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2185 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2186 tcg_temp_free_i64(addr
);
2188 /* Are the memory and expected values (un)equal? Note that this setcond
2189 produces the output CC value, thus the NE sense of the test. */
2190 cc
= tcg_temp_new_i64();
2191 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2192 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2193 tcg_temp_free_i64(cc
);
2199 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2201 int r1
= get_field(s
, r1
);
2202 int r3
= get_field(s
, r3
);
2203 int d2
= get_field(s
, d2
);
2204 int b2
= get_field(s
, b2
);
2205 DisasJumpType ret
= DISAS_NEXT
;
2207 TCGv_i32 t_r1
, t_r3
;
2209 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2210 addr
= get_address(s
, 0, b2
, d2
);
2211 t_r1
= tcg_const_i32(r1
);
2212 t_r3
= tcg_const_i32(r3
);
2213 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2214 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2215 } else if (HAVE_CMPXCHG128
) {
2216 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2218 gen_helper_exit_atomic(cpu_env
);
2219 ret
= DISAS_NORETURN
;
2221 tcg_temp_free_i64(addr
);
2222 tcg_temp_free_i32(t_r1
);
2223 tcg_temp_free_i32(t_r3
);
2229 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2231 int r3
= get_field(s
, r3
);
2232 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2234 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2235 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2237 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2239 tcg_temp_free_i32(t_r3
);
2245 #ifndef CONFIG_USER_ONLY
2246 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2248 MemOp mop
= s
->insn
->data
;
2249 TCGv_i64 addr
, old
, cc
;
2250 TCGLabel
*lab
= gen_new_label();
2252 /* Note that in1 = R1 (zero-extended expected value),
2253 out = R1 (original reg), out2 = R1+1 (new value). */
2255 addr
= tcg_temp_new_i64();
2256 old
= tcg_temp_new_i64();
2257 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2258 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2259 get_mem_index(s
), mop
| MO_ALIGN
);
2260 tcg_temp_free_i64(addr
);
2262 /* Are the memory and expected values (un)equal? */
2263 cc
= tcg_temp_new_i64();
2264 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2265 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2267 /* Write back the output now, so that it happens before the
2268 following branch, so that we don't need local temps. */
2269 if ((mop
& MO_SIZE
) == MO_32
) {
2270 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2272 tcg_gen_mov_i64(o
->out
, old
);
2274 tcg_temp_free_i64(old
);
2276 /* If the comparison was equal, and the LSB of R2 was set,
2277 then we need to flush the TLB (for all cpus). */
2278 tcg_gen_xori_i64(cc
, cc
, 1);
2279 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2280 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2281 tcg_temp_free_i64(cc
);
2283 gen_helper_purge(cpu_env
);
2290 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2292 TCGv_i64 t1
= tcg_temp_new_i64();
2293 TCGv_i32 t2
= tcg_temp_new_i32();
2294 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2295 gen_helper_cvd(t1
, t2
);
2296 tcg_temp_free_i32(t2
);
2297 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2298 tcg_temp_free_i64(t1
);
2302 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2304 int m3
= get_field(s
, m3
);
2305 TCGLabel
*lab
= gen_new_label();
2308 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2309 if (s
->insn
->data
) {
2310 c
= tcg_unsigned_cond(c
);
2312 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2321 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2323 int m3
= get_field(s
, m3
);
2324 int r1
= get_field(s
, r1
);
2325 int r2
= get_field(s
, r2
);
2326 TCGv_i32 tr1
, tr2
, chk
;
2328 /* R1 and R2 must both be even. */
2329 if ((r1
| r2
) & 1) {
2330 gen_program_exception(s
, PGM_SPECIFICATION
);
2331 return DISAS_NORETURN
;
2333 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2337 tr1
= tcg_const_i32(r1
);
2338 tr2
= tcg_const_i32(r2
);
2339 chk
= tcg_const_i32(m3
);
2341 switch (s
->insn
->data
) {
2343 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2346 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2349 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2352 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2355 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2358 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2361 g_assert_not_reached();
2364 tcg_temp_free_i32(tr1
);
2365 tcg_temp_free_i32(tr2
);
2366 tcg_temp_free_i32(chk
);
2371 #ifndef CONFIG_USER_ONLY
2372 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2374 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
2375 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
2376 TCGv_i32 func_code
= tcg_const_i32(get_field(s
, i2
));
2378 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2380 tcg_temp_free_i32(func_code
);
2381 tcg_temp_free_i32(r3
);
2382 tcg_temp_free_i32(r1
);
2387 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2389 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2390 return_low128(o
->out
);
2394 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2396 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2397 return_low128(o
->out
);
2401 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2403 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2404 return_low128(o
->out
);
2408 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2410 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2411 return_low128(o
->out
);
2415 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2417 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2421 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2423 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2427 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2429 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2430 return_low128(o
->out2
);
2434 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2436 int r2
= get_field(s
, r2
);
2437 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2441 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2443 /* No cache information provided. */
2444 tcg_gen_movi_i64(o
->out
, -1);
2448 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2450 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2454 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2456 int r1
= get_field(s
, r1
);
2457 int r2
= get_field(s
, r2
);
2458 TCGv_i64 t
= tcg_temp_new_i64();
2460 /* Note the "subsequently" in the PoO, which implies a defined result
2461 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2462 tcg_gen_shri_i64(t
, psw_mask
, 32);
2463 store_reg32_i64(r1
, t
);
2465 store_reg32_i64(r2
, psw_mask
);
2468 tcg_temp_free_i64(t
);
2472 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2474 int r1
= get_field(s
, r1
);
2478 /* Nested EXECUTE is not allowed. */
2479 if (unlikely(s
->ex_value
)) {
2480 gen_program_exception(s
, PGM_EXECUTE
);
2481 return DISAS_NORETURN
;
2488 v1
= tcg_const_i64(0);
2493 ilen
= tcg_const_i32(s
->ilen
);
2494 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2495 tcg_temp_free_i32(ilen
);
2498 tcg_temp_free_i64(v1
);
2501 return DISAS_PC_CC_UPDATED
;
2504 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2506 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2509 return DISAS_NORETURN
;
2511 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2512 tcg_temp_free_i32(m34
);
2516 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2518 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2521 return DISAS_NORETURN
;
2523 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2524 tcg_temp_free_i32(m34
);
2528 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2530 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2533 return DISAS_NORETURN
;
2535 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2536 return_low128(o
->out2
);
2537 tcg_temp_free_i32(m34
);
2541 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2543 /* We'll use the original input for cc computation, since we get to
2544 compare that against 0, which ought to be better than comparing
2545 the real output against 64. It also lets cc_dst be a convenient
2546 temporary during our computation. */
2547 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2549 /* R1 = IN ? CLZ(IN) : 64. */
2550 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2552 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2553 value by 64, which is undefined. But since the shift is 64 iff the
2554 input is zero, we still get the correct result after and'ing. */
2555 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2556 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2557 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2561 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2563 int m3
= get_field(s
, m3
);
2564 int pos
, len
, base
= s
->insn
->data
;
2565 TCGv_i64 tmp
= tcg_temp_new_i64();
2570 /* Effectively a 32-bit load. */
2571 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2578 /* Effectively a 16-bit load. */
2579 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2587 /* Effectively an 8-bit load. */
2588 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2593 pos
= base
+ ctz32(m3
) * 8;
2594 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2595 ccm
= ((1ull << len
) - 1) << pos
;
2599 /* This is going to be a sequence of loads and inserts. */
2600 pos
= base
+ 32 - 8;
2604 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2605 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2606 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2609 m3
= (m3
<< 1) & 0xf;
2615 tcg_gen_movi_i64(tmp
, ccm
);
2616 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2617 tcg_temp_free_i64(tmp
);
2621 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2623 int shift
= s
->insn
->data
& 0xff;
2624 int size
= s
->insn
->data
>> 8;
2625 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2629 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2634 t1
= tcg_temp_new_i64();
2635 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2636 t2
= tcg_temp_new_i64();
2637 tcg_gen_extu_i32_i64(t2
, cc_op
);
2638 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2639 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2640 tcg_temp_free_i64(t1
);
2641 tcg_temp_free_i64(t2
);
2645 #ifndef CONFIG_USER_ONLY
2646 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2650 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2651 m4
= tcg_const_i32(get_field(s
, m4
));
2653 m4
= tcg_const_i32(0);
2655 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2656 tcg_temp_free_i32(m4
);
2660 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2664 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2665 m4
= tcg_const_i32(get_field(s
, m4
));
2667 m4
= tcg_const_i32(0);
2669 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2670 tcg_temp_free_i32(m4
);
2674 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2676 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2681 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2683 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2684 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2685 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2686 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2688 switch (s
->insn
->data
) {
2689 case S390_FEAT_TYPE_KMA
:
2690 if (r3
== r1
|| r3
== r2
) {
2691 gen_program_exception(s
, PGM_SPECIFICATION
);
2692 return DISAS_NORETURN
;
2695 case S390_FEAT_TYPE_KMCTR
:
2696 if (r3
& 1 || !r3
) {
2697 gen_program_exception(s
, PGM_SPECIFICATION
);
2698 return DISAS_NORETURN
;
2701 case S390_FEAT_TYPE_PPNO
:
2702 case S390_FEAT_TYPE_KMF
:
2703 case S390_FEAT_TYPE_KMC
:
2704 case S390_FEAT_TYPE_KMO
:
2705 case S390_FEAT_TYPE_KM
:
2706 if (r1
& 1 || !r1
) {
2707 gen_program_exception(s
, PGM_SPECIFICATION
);
2708 return DISAS_NORETURN
;
2711 case S390_FEAT_TYPE_KMAC
:
2712 case S390_FEAT_TYPE_KIMD
:
2713 case S390_FEAT_TYPE_KLMD
:
2714 if (r2
& 1 || !r2
) {
2715 gen_program_exception(s
, PGM_SPECIFICATION
);
2716 return DISAS_NORETURN
;
2719 case S390_FEAT_TYPE_PCKMO
:
2720 case S390_FEAT_TYPE_PCC
:
2723 g_assert_not_reached();
2726 t_r1
= tcg_const_i32(r1
);
2727 t_r2
= tcg_const_i32(r2
);
2728 t_r3
= tcg_const_i32(r3
);
2729 type
= tcg_const_i32(s
->insn
->data
);
2730 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2732 tcg_temp_free_i32(t_r1
);
2733 tcg_temp_free_i32(t_r2
);
2734 tcg_temp_free_i32(t_r3
);
2735 tcg_temp_free_i32(type
);
2739 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2741 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2746 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2748 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2753 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2755 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2760 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2762 /* The real output is indeed the original value in memory;
2763 recompute the addition for the computation of CC. */
2764 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2765 s
->insn
->data
| MO_ALIGN
);
2766 /* However, we need to recompute the addition for setting CC. */
2767 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2771 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2773 /* The real output is indeed the original value in memory;
2774 recompute the addition for the computation of CC. */
2775 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2776 s
->insn
->data
| MO_ALIGN
);
2777 /* However, we need to recompute the operation for setting CC. */
2778 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2782 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2784 /* The real output is indeed the original value in memory;
2785 recompute the addition for the computation of CC. */
2786 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2787 s
->insn
->data
| MO_ALIGN
);
2788 /* However, we need to recompute the operation for setting CC. */
2789 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2793 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2795 /* The real output is indeed the original value in memory;
2796 recompute the addition for the computation of CC. */
2797 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2798 s
->insn
->data
| MO_ALIGN
);
2799 /* However, we need to recompute the operation for setting CC. */
2800 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2804 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2806 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2810 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2812 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2815 return DISAS_NORETURN
;
2817 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2818 tcg_temp_free_i32(m34
);
2822 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2824 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2827 return DISAS_NORETURN
;
2829 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2830 tcg_temp_free_i32(m34
);
2834 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2836 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2839 return DISAS_NORETURN
;
2841 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2842 tcg_temp_free_i32(m34
);
2846 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2848 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2849 return_low128(o
->out2
);
2853 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2855 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2856 return_low128(o
->out2
);
2860 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2862 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2866 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2868 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2872 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2874 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2878 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2880 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2884 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2886 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2890 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2892 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2896 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2898 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2902 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2904 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2908 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2910 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2914 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2916 TCGLabel
*lab
= gen_new_label();
2917 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2918 /* The value is stored even in case of trap. */
2919 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2925 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2927 TCGLabel
*lab
= gen_new_label();
2928 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2929 /* The value is stored even in case of trap. */
2930 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2936 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2938 TCGLabel
*lab
= gen_new_label();
2939 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2940 /* The value is stored even in case of trap. */
2941 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2947 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2949 TCGLabel
*lab
= gen_new_label();
2950 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2951 /* The value is stored even in case of trap. */
2952 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2958 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2960 TCGLabel
*lab
= gen_new_label();
2961 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2962 /* The value is stored even in case of trap. */
2963 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2969 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2973 disas_jcc(s
, &c
, get_field(s
, m3
));
2976 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2980 TCGv_i32 t32
= tcg_temp_new_i32();
2983 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2986 t
= tcg_temp_new_i64();
2987 tcg_gen_extu_i32_i64(t
, t32
);
2988 tcg_temp_free_i32(t32
);
2990 z
= tcg_const_i64(0);
2991 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2992 tcg_temp_free_i64(t
);
2993 tcg_temp_free_i64(z
);
2999 #ifndef CONFIG_USER_ONLY
3000 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
3002 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3003 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3004 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
3005 tcg_temp_free_i32(r1
);
3006 tcg_temp_free_i32(r3
);
3007 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3008 return DISAS_PC_STALE_NOCHAIN
;
3011 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3013 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3014 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3015 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3016 tcg_temp_free_i32(r1
);
3017 tcg_temp_free_i32(r3
);
3018 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3019 return DISAS_PC_STALE_NOCHAIN
;
3022 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3024 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3029 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3031 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3035 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3039 per_breaking_event(s
);
3041 t1
= tcg_temp_new_i64();
3042 t2
= tcg_temp_new_i64();
3043 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3044 MO_TEUL
| MO_ALIGN_8
);
3045 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3046 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3047 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3048 tcg_gen_shli_i64(t1
, t1
, 32);
3049 gen_helper_load_psw(cpu_env
, t1
, t2
);
3050 tcg_temp_free_i64(t1
);
3051 tcg_temp_free_i64(t2
);
3052 return DISAS_NORETURN
;
3055 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3059 per_breaking_event(s
);
3061 t1
= tcg_temp_new_i64();
3062 t2
= tcg_temp_new_i64();
3063 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3064 MO_TEQ
| MO_ALIGN_8
);
3065 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3066 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3067 gen_helper_load_psw(cpu_env
, t1
, t2
);
3068 tcg_temp_free_i64(t1
);
3069 tcg_temp_free_i64(t2
);
3070 return DISAS_NORETURN
;
3074 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3076 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
3077 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
3078 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3079 tcg_temp_free_i32(r1
);
3080 tcg_temp_free_i32(r3
);
3084 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3086 int r1
= get_field(s
, r1
);
3087 int r3
= get_field(s
, r3
);
3090 /* Only one register to read. */
3091 t1
= tcg_temp_new_i64();
3092 if (unlikely(r1
== r3
)) {
3093 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3094 store_reg32_i64(r1
, t1
);
3099 /* First load the values of the first and last registers to trigger
3100 possible page faults. */
3101 t2
= tcg_temp_new_i64();
3102 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3103 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3104 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3105 store_reg32_i64(r1
, t1
);
3106 store_reg32_i64(r3
, t2
);
3108 /* Only two registers to read. */
3109 if (((r1
+ 1) & 15) == r3
) {
3115 /* Then load the remaining registers. Page fault can't occur. */
3117 tcg_gen_movi_i64(t2
, 4);
3120 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3121 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3122 store_reg32_i64(r1
, t1
);
3130 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3132 int r1
= get_field(s
, r1
);
3133 int r3
= get_field(s
, r3
);
3136 /* Only one register to read. */
3137 t1
= tcg_temp_new_i64();
3138 if (unlikely(r1
== r3
)) {
3139 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3140 store_reg32h_i64(r1
, t1
);
3145 /* First load the values of the first and last registers to trigger
3146 possible page faults. */
3147 t2
= tcg_temp_new_i64();
3148 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3149 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3150 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3151 store_reg32h_i64(r1
, t1
);
3152 store_reg32h_i64(r3
, t2
);
3154 /* Only two registers to read. */
3155 if (((r1
+ 1) & 15) == r3
) {
3161 /* Then load the remaining registers. Page fault can't occur. */
3163 tcg_gen_movi_i64(t2
, 4);
3166 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3167 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3168 store_reg32h_i64(r1
, t1
);
3176 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3178 int r1
= get_field(s
, r1
);
3179 int r3
= get_field(s
, r3
);
3182 /* Only one register to read. */
3183 if (unlikely(r1
== r3
)) {
3184 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3188 /* First load the values of the first and last registers to trigger
3189 possible page faults. */
3190 t1
= tcg_temp_new_i64();
3191 t2
= tcg_temp_new_i64();
3192 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3193 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3194 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3195 tcg_gen_mov_i64(regs
[r1
], t1
);
3198 /* Only two registers to read. */
3199 if (((r1
+ 1) & 15) == r3
) {
3204 /* Then load the remaining registers. Page fault can't occur. */
3206 tcg_gen_movi_i64(t1
, 8);
3209 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3210 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3217 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3220 MemOp mop
= s
->insn
->data
;
3222 /* In a parallel context, stop the world and single step. */
3223 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3226 gen_exception(EXCP_ATOMIC
);
3227 return DISAS_NORETURN
;
3230 /* In a serial context, perform the two loads ... */
3231 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3232 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3233 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3234 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3235 tcg_temp_free_i64(a1
);
3236 tcg_temp_free_i64(a2
);
3238 /* ... and indicate that we performed them while interlocked. */
3239 gen_op_movi_cc(s
, 0);
3243 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3245 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3246 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3247 } else if (HAVE_ATOMIC128
) {
3248 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3250 gen_helper_exit_atomic(cpu_env
);
3251 return DISAS_NORETURN
;
3253 return_low128(o
->out2
);
3257 #ifndef CONFIG_USER_ONLY
3258 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3260 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
3265 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3267 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3271 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3273 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3275 if (get_field(s
, m3
) > 6) {
3276 gen_program_exception(s
, PGM_SPECIFICATION
);
3277 return DISAS_NORETURN
;
3280 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3281 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3282 tcg_gen_movi_i64(o
->out
, 16);
3283 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3284 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3288 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3290 #if !defined(CONFIG_USER_ONLY)
3293 const uint16_t monitor_class
= get_field(s
, i2
);
3295 if (monitor_class
& 0xff00) {
3296 gen_program_exception(s
, PGM_SPECIFICATION
);
3297 return DISAS_NORETURN
;
3300 #if !defined(CONFIG_USER_ONLY)
3301 i2
= tcg_const_i32(monitor_class
);
3302 gen_helper_monitor_call(cpu_env
, o
->addr1
, i2
);
3303 tcg_temp_free_i32(i2
);
3305 /* Defaults to a NOP. */
3309 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3312 o
->g_out
= o
->g_in2
;
3318 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3320 int b2
= get_field(s
, b2
);
3321 TCGv ar1
= tcg_temp_new_i64();
3324 o
->g_out
= o
->g_in2
;
3328 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3329 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3330 tcg_gen_movi_i64(ar1
, 0);
3332 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3333 tcg_gen_movi_i64(ar1
, 1);
3335 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3337 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3339 tcg_gen_movi_i64(ar1
, 0);
3342 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3343 tcg_gen_movi_i64(ar1
, 2);
3347 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3348 tcg_temp_free_i64(ar1
);
3353 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3357 o
->g_out
= o
->g_in1
;
3358 o
->g_out2
= o
->g_in2
;
3361 o
->g_in1
= o
->g_in2
= false;
3365 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3367 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3368 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3369 tcg_temp_free_i32(l
);
3373 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3375 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3376 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3377 tcg_temp_free_i32(l
);
3381 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3383 int r1
= get_field(s
, r1
);
3384 int r2
= get_field(s
, r2
);
3387 /* r1 and r2 must be even. */
3388 if (r1
& 1 || r2
& 1) {
3389 gen_program_exception(s
, PGM_SPECIFICATION
);
3390 return DISAS_NORETURN
;
3393 t1
= tcg_const_i32(r1
);
3394 t2
= tcg_const_i32(r2
);
3395 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3396 tcg_temp_free_i32(t1
);
3397 tcg_temp_free_i32(t2
);
3402 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3404 int r1
= get_field(s
, r1
);
3405 int r3
= get_field(s
, r3
);
3408 /* r1 and r3 must be even. */
3409 if (r1
& 1 || r3
& 1) {
3410 gen_program_exception(s
, PGM_SPECIFICATION
);
3411 return DISAS_NORETURN
;
3414 t1
= tcg_const_i32(r1
);
3415 t3
= tcg_const_i32(r3
);
3416 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3417 tcg_temp_free_i32(t1
);
3418 tcg_temp_free_i32(t3
);
3423 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3425 int r1
= get_field(s
, r1
);
3426 int r3
= get_field(s
, r3
);
3429 /* r1 and r3 must be even. */
3430 if (r1
& 1 || r3
& 1) {
3431 gen_program_exception(s
, PGM_SPECIFICATION
);
3432 return DISAS_NORETURN
;
3435 t1
= tcg_const_i32(r1
);
3436 t3
= tcg_const_i32(r3
);
3437 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3438 tcg_temp_free_i32(t1
);
3439 tcg_temp_free_i32(t3
);
3444 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3446 int r3
= get_field(s
, r3
);
3447 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3452 #ifndef CONFIG_USER_ONLY
3453 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3455 int r1
= get_field(s
, l1
);
3456 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3461 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3463 int r1
= get_field(s
, l1
);
3464 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3470 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3472 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3473 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3474 tcg_temp_free_i32(l
);
3478 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3480 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3481 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3482 tcg_temp_free_i32(l
);
3486 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3488 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3489 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3491 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], t1
, t2
);
3492 tcg_temp_free_i32(t1
);
3493 tcg_temp_free_i32(t2
);
3498 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3500 TCGv_i32 t1
= tcg_const_i32(get_field(s
, r1
));
3501 TCGv_i32 t2
= tcg_const_i32(get_field(s
, r2
));
3503 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3504 tcg_temp_free_i32(t1
);
3505 tcg_temp_free_i32(t2
);
3510 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3512 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3513 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3514 tcg_temp_free_i32(l
);
3518 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3520 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3524 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3526 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3530 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3532 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3536 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3538 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3542 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3544 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3548 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3550 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3554 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3556 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3557 return_low128(o
->out2
);
3561 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3563 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3564 return_low128(o
->out2
);
3568 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3570 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3571 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3572 tcg_temp_free_i64(r3
);
3576 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3578 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3579 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3580 tcg_temp_free_i64(r3
);
3584 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3586 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3587 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3588 tcg_temp_free_i64(r3
);
3592 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3594 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3595 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3596 tcg_temp_free_i64(r3
);
3600 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3603 z
= tcg_const_i64(0);
3604 n
= tcg_temp_new_i64();
3605 tcg_gen_neg_i64(n
, o
->in2
);
3606 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3607 tcg_temp_free_i64(n
);
3608 tcg_temp_free_i64(z
);
3612 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3614 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3618 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3620 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3624 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3626 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3627 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3631 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3633 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3634 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3635 tcg_temp_free_i32(l
);
3640 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3642 tcg_gen_neg_i64(o
->out
, o
->in2
);
3646 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3648 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3652 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3654 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3658 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3660 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3661 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3665 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3667 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3668 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3669 tcg_temp_free_i32(l
);
3674 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3676 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3680 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3682 int shift
= s
->insn
->data
& 0xff;
3683 int size
= s
->insn
->data
>> 8;
3684 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3687 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3688 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3690 /* Produce the CC from only the bits manipulated. */
3691 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3692 set_cc_nz_u64(s
, cc_dst
);
3696 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3698 o
->in1
= tcg_temp_new_i64();
3700 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3701 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3703 /* Perform the atomic operation in memory. */
3704 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3708 /* Recompute also for atomic case: needed for setting CC. */
3709 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3711 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3712 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3717 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3719 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
3720 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3721 tcg_temp_free_i32(l
);
3725 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3727 int l2
= get_field(s
, l2
) + 1;
3730 /* The length must not exceed 32 bytes. */
3732 gen_program_exception(s
, PGM_SPECIFICATION
);
3733 return DISAS_NORETURN
;
3735 l
= tcg_const_i32(l2
);
3736 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3737 tcg_temp_free_i32(l
);
3741 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3743 int l2
= get_field(s
, l2
) + 1;
3746 /* The length must be even and should not exceed 64 bytes. */
3747 if ((l2
& 1) || (l2
> 64)) {
3748 gen_program_exception(s
, PGM_SPECIFICATION
);
3749 return DISAS_NORETURN
;
3751 l
= tcg_const_i32(l2
);
3752 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3753 tcg_temp_free_i32(l
);
3757 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3759 gen_helper_popcnt(o
->out
, o
->in2
);
3763 #ifndef CONFIG_USER_ONLY
3764 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3766 gen_helper_ptlb(cpu_env
);
3771 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3773 int i3
= get_field(s
, i3
);
3774 int i4
= get_field(s
, i4
);
3775 int i5
= get_field(s
, i5
);
3776 int do_zero
= i4
& 0x80;
3777 uint64_t mask
, imask
, pmask
;
3780 /* Adjust the arguments for the specific insn. */
3781 switch (s
->fields
.op2
) {
3782 case 0x55: /* risbg */
3783 case 0x59: /* risbgn */
3788 case 0x5d: /* risbhg */
3791 pmask
= 0xffffffff00000000ull
;
3793 case 0x51: /* risblg */
3794 i3
= (i3
& 31) + 32;
3795 i4
= (i4
& 31) + 32;
3796 pmask
= 0x00000000ffffffffull
;
3799 g_assert_not_reached();
3802 /* MASK is the set of bits to be inserted from R2. */
3804 /* [0...i3---i4...63] */
3805 mask
= (-1ull >> i3
) & (-1ull << (63 - i4
));
3807 /* [0---i4...i3---63] */
3808 mask
= (-1ull >> i3
) | (-1ull << (63 - i4
));
3810 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3813 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3814 insns, we need to keep the other half of the register. */
3815 imask
= ~mask
| ~pmask
;
3824 /* In some cases we can implement this with extract. */
3825 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3826 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3830 /* In some cases we can implement this with deposit. */
3831 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3832 /* Note that we rotate the bits to be inserted to the lsb, not to
3833 the position as described in the PoO. */
3834 rot
= (rot
- pos
) & 63;
3839 /* Rotate the input as necessary. */
3840 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3842 /* Insert the selected bits into the output. */
3845 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3847 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3849 } else if (imask
== 0) {
3850 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3852 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3853 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3854 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3859 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3861 int i3
= get_field(s
, i3
);
3862 int i4
= get_field(s
, i4
);
3863 int i5
= get_field(s
, i5
);
3866 /* If this is a test-only form, arrange to discard the result. */
3868 o
->out
= tcg_temp_new_i64();
3876 /* MASK is the set of bits to be operated on from R2.
3877 Take care for I3/I4 wraparound. */
3880 mask
^= ~0ull >> i4
>> 1;
3882 mask
|= ~(~0ull >> i4
>> 1);
3885 /* Rotate the input as necessary. */
3886 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3889 switch (s
->fields
.op2
) {
3890 case 0x54: /* AND */
3891 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3892 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3895 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3896 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3898 case 0x57: /* XOR */
3899 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3900 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3907 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3908 set_cc_nz_u64(s
, cc_dst
);
3912 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3914 tcg_gen_bswap16_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3918 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3920 tcg_gen_bswap32_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3924 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3926 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3930 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3932 TCGv_i32 t1
= tcg_temp_new_i32();
3933 TCGv_i32 t2
= tcg_temp_new_i32();
3934 TCGv_i32 to
= tcg_temp_new_i32();
3935 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3936 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3937 tcg_gen_rotl_i32(to
, t1
, t2
);
3938 tcg_gen_extu_i32_i64(o
->out
, to
);
3939 tcg_temp_free_i32(t1
);
3940 tcg_temp_free_i32(t2
);
3941 tcg_temp_free_i32(to
);
3945 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3947 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3951 #ifndef CONFIG_USER_ONLY
3952 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3954 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3959 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3961 gen_helper_sacf(cpu_env
, o
->in2
);
3962 /* Addressing mode has changed, so end the block. */
3963 return DISAS_PC_STALE
;
3967 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3969 int sam
= s
->insn
->data
;
3985 /* Bizarre but true, we check the address of the current insn for the
3986 specification exception, not the next to be executed. Thus the PoO
3987 documents that Bad Things Happen two bytes before the end. */
3988 if (s
->base
.pc_next
& ~mask
) {
3989 gen_program_exception(s
, PGM_SPECIFICATION
);
3990 return DISAS_NORETURN
;
3994 tsam
= tcg_const_i64(sam
);
3995 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3996 tcg_temp_free_i64(tsam
);
3998 /* Always exit the TB, since we (may have) changed execution mode. */
3999 return DISAS_PC_STALE
;
4002 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
4004 int r1
= get_field(s
, r1
);
4005 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
4009 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
4011 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4015 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
4017 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4021 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4023 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4024 return_low128(o
->out2
);
4028 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4030 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4034 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4036 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4040 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4042 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4043 return_low128(o
->out2
);
4047 #ifndef CONFIG_USER_ONLY
4048 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4050 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4055 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4057 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4058 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4059 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4061 tcg_temp_free_i32(r1
);
4062 tcg_temp_free_i32(r3
);
4067 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4074 disas_jcc(s
, &c
, get_field(s
, m3
));
4076 /* We want to store when the condition is fulfilled, so branch
4077 out when it's not */
4078 c
.cond
= tcg_invert_cond(c
.cond
);
4080 lab
= gen_new_label();
4082 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4084 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4088 r1
= get_field(s
, r1
);
4089 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
4090 switch (s
->insn
->data
) {
4092 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4095 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4097 case 2: /* STOCFH */
4098 h
= tcg_temp_new_i64();
4099 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4100 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4101 tcg_temp_free_i64(h
);
4104 g_assert_not_reached();
4106 tcg_temp_free_i64(a
);
4112 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4114 uint64_t sign
= 1ull << s
->insn
->data
;
4115 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
4116 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
4117 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4118 /* The arithmetic left shift is curious in that it does not affect
4119 the sign bit. Copy that over from the source unchanged. */
4120 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4121 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4122 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4126 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4128 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4132 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4134 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4138 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4140 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4144 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4146 gen_helper_sfpc(cpu_env
, o
->in2
);
4150 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4152 gen_helper_sfas(cpu_env
, o
->in2
);
4156 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4158 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4159 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4160 gen_helper_srnm(cpu_env
, o
->addr1
);
4164 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4166 /* Bits 0-55 are are ignored. */
4167 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4168 gen_helper_srnm(cpu_env
, o
->addr1
);
4172 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4174 TCGv_i64 tmp
= tcg_temp_new_i64();
4176 /* Bits other than 61-63 are ignored. */
4177 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4179 /* No need to call a helper, we don't implement dfp */
4180 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4181 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4182 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4184 tcg_temp_free_i64(tmp
);
4188 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4190 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4191 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4194 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4195 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4199 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4201 int b1
= get_field(s
, b1
);
4202 int d1
= get_field(s
, d1
);
4203 int b2
= get_field(s
, b2
);
4204 int d2
= get_field(s
, d2
);
4205 int r3
= get_field(s
, r3
);
4206 TCGv_i64 tmp
= tcg_temp_new_i64();
4208 /* fetch all operands first */
4209 o
->in1
= tcg_temp_new_i64();
4210 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4211 o
->in2
= tcg_temp_new_i64();
4212 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4213 o
->addr1
= tcg_temp_new_i64();
4214 gen_addi_and_wrap_i64(s
, o
->addr1
, regs
[r3
], 0);
4216 /* load the third operand into r3 before modifying anything */
4217 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4219 /* subtract CPU timer from first operand and store in GR0 */
4220 gen_helper_stpt(tmp
, cpu_env
);
4221 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4223 /* store second operand in GR1 */
4224 tcg_gen_mov_i64(regs
[1], o
->in2
);
4226 tcg_temp_free_i64(tmp
);
4230 #ifndef CONFIG_USER_ONLY
4231 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4233 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4234 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4238 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4240 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4244 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4246 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4247 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4248 return DISAS_PC_STALE_NOCHAIN
;
4251 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4253 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4258 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4260 gen_helper_stck(o
->out
, cpu_env
);
4261 /* ??? We don't implement clock states. */
4262 gen_op_movi_cc(s
, 0);
4266 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4268 TCGv_i64 c1
= tcg_temp_new_i64();
4269 TCGv_i64 c2
= tcg_temp_new_i64();
4270 TCGv_i64 todpr
= tcg_temp_new_i64();
4271 gen_helper_stck(c1
, cpu_env
);
4272 /* 16 bit value store in an uint32_t (only valid bits set) */
4273 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4274 /* Shift the 64-bit value into its place as a zero-extended
4275 104-bit value. Note that "bit positions 64-103 are always
4276 non-zero so that they compare differently to STCK"; we set
4277 the least significant bit to 1. */
4278 tcg_gen_shli_i64(c2
, c1
, 56);
4279 tcg_gen_shri_i64(c1
, c1
, 8);
4280 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4281 tcg_gen_or_i64(c2
, c2
, todpr
);
4282 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4283 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4284 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4285 tcg_temp_free_i64(c1
);
4286 tcg_temp_free_i64(c2
);
4287 tcg_temp_free_i64(todpr
);
4288 /* ??? We don't implement clock states. */
4289 gen_op_movi_cc(s
, 0);
4293 #ifndef CONFIG_USER_ONLY
4294 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4296 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4297 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4302 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4304 gen_helper_sckc(cpu_env
, o
->in2
);
4308 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4310 gen_helper_sckpf(cpu_env
, regs
[0]);
4314 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4316 gen_helper_stckc(o
->out
, cpu_env
);
4320 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4322 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4323 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4324 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4325 tcg_temp_free_i32(r1
);
4326 tcg_temp_free_i32(r3
);
4330 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4332 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4333 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4334 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4335 tcg_temp_free_i32(r1
);
4336 tcg_temp_free_i32(r3
);
4340 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4342 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4346 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4348 gen_helper_spt(cpu_env
, o
->in2
);
4352 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4354 gen_helper_stfl(cpu_env
);
4358 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4360 gen_helper_stpt(o
->out
, cpu_env
);
4364 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4366 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4371 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4373 gen_helper_spx(cpu_env
, o
->in2
);
4377 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4379 gen_helper_xsch(cpu_env
, regs
[1]);
4384 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4386 gen_helper_csch(cpu_env
, regs
[1]);
4391 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4393 gen_helper_hsch(cpu_env
, regs
[1]);
4398 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4400 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4405 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4407 gen_helper_rchp(cpu_env
, regs
[1]);
4412 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4414 gen_helper_rsch(cpu_env
, regs
[1]);
4419 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4421 gen_helper_sal(cpu_env
, regs
[1]);
4425 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4427 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4431 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4433 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4434 gen_op_movi_cc(s
, 3);
4438 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4440 /* The instruction is suppressed if not provided. */
4444 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4446 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4451 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4453 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4458 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4460 gen_helper_stcrw(cpu_env
, o
->in2
);
4465 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4467 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4472 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4474 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4479 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4481 gen_helper_chsc(cpu_env
, o
->in2
);
4486 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4488 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4489 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4493 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4495 uint64_t i2
= get_field(s
, i2
);
4498 /* It is important to do what the instruction name says: STORE THEN.
4499 If we let the output hook perform the store then if we fault and
4500 restart, we'll have the wrong SYSTEM MASK in place. */
4501 t
= tcg_temp_new_i64();
4502 tcg_gen_shri_i64(t
, psw_mask
, 56);
4503 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4504 tcg_temp_free_i64(t
);
4506 if (s
->fields
.op
== 0xac) {
4507 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4508 (i2
<< 56) | 0x00ffffffffffffffull
);
4510 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4513 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4514 return DISAS_PC_STALE_NOCHAIN
;
4517 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4519 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
4521 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4523 gen_helper_per_store_real(cpu_env
);
4529 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4531 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4536 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4538 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4542 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4544 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4548 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4550 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4554 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4556 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4560 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4562 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4563 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
4564 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4565 tcg_temp_free_i32(r1
);
4566 tcg_temp_free_i32(r3
);
4570 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4572 int m3
= get_field(s
, m3
);
4573 int pos
, base
= s
->insn
->data
;
4574 TCGv_i64 tmp
= tcg_temp_new_i64();
4576 pos
= base
+ ctz32(m3
) * 8;
4579 /* Effectively a 32-bit store. */
4580 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4581 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4587 /* Effectively a 16-bit store. */
4588 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4589 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4596 /* Effectively an 8-bit store. */
4597 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4598 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4602 /* This is going to be a sequence of shifts and stores. */
4603 pos
= base
+ 32 - 8;
4606 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4607 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4608 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4610 m3
= (m3
<< 1) & 0xf;
4615 tcg_temp_free_i64(tmp
);
4619 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4621 int r1
= get_field(s
, r1
);
4622 int r3
= get_field(s
, r3
);
4623 int size
= s
->insn
->data
;
4624 TCGv_i64 tsize
= tcg_const_i64(size
);
4628 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4630 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4635 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4639 tcg_temp_free_i64(tsize
);
4643 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4645 int r1
= get_field(s
, r1
);
4646 int r3
= get_field(s
, r3
);
4647 TCGv_i64 t
= tcg_temp_new_i64();
4648 TCGv_i64 t4
= tcg_const_i64(4);
4649 TCGv_i64 t32
= tcg_const_i64(32);
4652 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4653 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4657 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4661 tcg_temp_free_i64(t
);
4662 tcg_temp_free_i64(t4
);
4663 tcg_temp_free_i64(t32
);
4667 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4669 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4670 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4671 } else if (HAVE_ATOMIC128
) {
4672 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4674 gen_helper_exit_atomic(cpu_env
);
4675 return DISAS_NORETURN
;
4680 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4682 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4683 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4685 gen_helper_srst(cpu_env
, r1
, r2
);
4687 tcg_temp_free_i32(r1
);
4688 tcg_temp_free_i32(r2
);
4693 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4695 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4696 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4698 gen_helper_srstu(cpu_env
, r1
, r2
);
4700 tcg_temp_free_i32(r1
);
4701 tcg_temp_free_i32(r2
);
4706 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4708 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4712 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4714 tcg_gen_movi_i64(cc_src
, 0);
4715 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4719 /* Compute borrow (0, -1) into cc_src. */
4720 static void compute_borrow(DisasContext
*s
)
4724 /* The borrow value is already in cc_src (0,-1). */
4730 /* The carry flag is the msb of CC; compute into cc_src. */
4731 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4732 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4735 /* Convert carry (1,0) to borrow (0,-1). */
4736 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4741 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4745 /* Borrow is {0, -1}, so add to subtract. */
4746 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4747 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4751 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4756 * Borrow is {0, -1}, so add to subtract; replicate the
4757 * borrow input to produce 128-bit -1 for the addition.
4759 TCGv_i64 zero
= tcg_const_i64(0);
4760 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4761 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4762 tcg_temp_free_i64(zero
);
4767 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4774 t
= tcg_const_i32(get_field(s
, i1
) & 0xff);
4775 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4776 tcg_temp_free_i32(t
);
4778 t
= tcg_const_i32(s
->ilen
);
4779 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4780 tcg_temp_free_i32(t
);
4782 gen_exception(EXCP_SVC
);
4783 return DISAS_NORETURN
;
4786 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4790 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4791 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4792 gen_op_movi_cc(s
, cc
);
4796 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4798 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4803 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4805 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4810 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4812 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4817 #ifndef CONFIG_USER_ONLY
4819 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4821 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4826 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4828 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4835 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4837 TCGv_i32 l1
= tcg_const_i32(get_field(s
, l1
) + 1);
4838 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4839 tcg_temp_free_i32(l1
);
4844 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4846 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4847 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4848 tcg_temp_free_i32(l
);
4853 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4855 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4856 return_low128(o
->out2
);
4861 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4863 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4864 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4865 tcg_temp_free_i32(l
);
4870 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4872 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4873 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4874 tcg_temp_free_i32(l
);
4879 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4881 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
4882 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
4883 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4884 TCGv_i32 tst
= tcg_temp_new_i32();
4885 int m3
= get_field(s
, m3
);
4887 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4891 tcg_gen_movi_i32(tst
, -1);
4893 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4894 if (s
->insn
->opc
& 3) {
4895 tcg_gen_ext8u_i32(tst
, tst
);
4897 tcg_gen_ext16u_i32(tst
, tst
);
4900 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4902 tcg_temp_free_i32(r1
);
4903 tcg_temp_free_i32(r2
);
4904 tcg_temp_free_i32(sizes
);
4905 tcg_temp_free_i32(tst
);
4910 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4912 TCGv_i32 t1
= tcg_const_i32(0xff);
4913 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4914 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4915 tcg_temp_free_i32(t1
);
4920 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4922 TCGv_i32 l
= tcg_const_i32(get_field(s
, l1
));
4923 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4924 tcg_temp_free_i32(l
);
4928 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4930 int l1
= get_field(s
, l1
) + 1;
4933 /* The length must not exceed 32 bytes. */
4935 gen_program_exception(s
, PGM_SPECIFICATION
);
4936 return DISAS_NORETURN
;
4938 l
= tcg_const_i32(l1
);
4939 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4940 tcg_temp_free_i32(l
);
4945 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4947 int l1
= get_field(s
, l1
) + 1;
4950 /* The length must be even and should not exceed 64 bytes. */
4951 if ((l1
& 1) || (l1
> 64)) {
4952 gen_program_exception(s
, PGM_SPECIFICATION
);
4953 return DISAS_NORETURN
;
4955 l
= tcg_const_i32(l1
);
4956 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4957 tcg_temp_free_i32(l
);
4963 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4965 int d1
= get_field(s
, d1
);
4966 int d2
= get_field(s
, d2
);
4967 int b1
= get_field(s
, b1
);
4968 int b2
= get_field(s
, b2
);
4969 int l
= get_field(s
, l1
);
4972 o
->addr1
= get_address(s
, 0, b1
, d1
);
4974 /* If the addresses are identical, this is a store/memset of zero. */
4975 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4976 o
->in2
= tcg_const_i64(0);
4980 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4983 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4987 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4990 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4994 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4997 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
5001 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
5003 gen_op_movi_cc(s
, 0);
5007 /* But in general we'll defer to a helper. */
5008 o
->in2
= get_address(s
, 0, b2
, d2
);
5009 t32
= tcg_const_i32(l
);
5010 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
5011 tcg_temp_free_i32(t32
);
5016 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
5018 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5022 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
5024 int shift
= s
->insn
->data
& 0xff;
5025 int size
= s
->insn
->data
>> 8;
5026 uint64_t mask
= ((1ull << size
) - 1) << shift
;
5029 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
5030 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5032 /* Produce the CC from only the bits manipulated. */
5033 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
5034 set_cc_nz_u64(s
, cc_dst
);
5038 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
5040 o
->in1
= tcg_temp_new_i64();
5042 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5043 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5045 /* Perform the atomic operation in memory. */
5046 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5050 /* Recompute also for atomic case: needed for setting CC. */
5051 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5053 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5054 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5059 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5061 o
->out
= tcg_const_i64(0);
5065 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5067 o
->out
= tcg_const_i64(0);
5073 #ifndef CONFIG_USER_ONLY
5074 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5076 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5078 gen_helper_clp(cpu_env
, r2
);
5079 tcg_temp_free_i32(r2
);
5084 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5086 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5087 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5089 gen_helper_pcilg(cpu_env
, r1
, r2
);
5090 tcg_temp_free_i32(r1
);
5091 tcg_temp_free_i32(r2
);
5096 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5098 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5099 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5101 gen_helper_pcistg(cpu_env
, r1
, r2
);
5102 tcg_temp_free_i32(r1
);
5103 tcg_temp_free_i32(r2
);
5108 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5110 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5111 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5113 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5114 tcg_temp_free_i32(ar
);
5115 tcg_temp_free_i32(r1
);
5120 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5122 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5126 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5128 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5129 TCGv_i32 r2
= tcg_const_i32(get_field(s
, r2
));
5131 gen_helper_rpcit(cpu_env
, r1
, r2
);
5132 tcg_temp_free_i32(r1
);
5133 tcg_temp_free_i32(r2
);
5138 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5140 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5141 TCGv_i32 r3
= tcg_const_i32(get_field(s
, r3
));
5142 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5144 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5145 tcg_temp_free_i32(ar
);
5146 tcg_temp_free_i32(r1
);
5147 tcg_temp_free_i32(r3
);
5152 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5154 TCGv_i32 r1
= tcg_const_i32(get_field(s
, r1
));
5155 TCGv_i32 ar
= tcg_const_i32(get_field(s
, b2
));
5157 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5158 tcg_temp_free_i32(ar
);
5159 tcg_temp_free_i32(r1
);
5165 #include "translate_vx.c.inc"
5167 /* ====================================================================== */
5168 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5169 the original inputs), update the various cc data structures in order to
5170 be able to compute the new condition code. */
5172 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5174 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5177 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5179 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5182 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5184 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5187 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5189 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5192 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5194 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5195 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5196 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5199 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5201 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5204 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5206 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5209 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5211 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5214 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5216 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5219 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5221 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5224 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5226 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5229 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5231 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5234 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5236 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5239 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5241 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5244 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5246 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5249 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5251 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5254 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5256 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5259 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5261 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5262 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5265 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5267 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5270 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5272 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5275 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5277 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5280 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5282 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5285 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5287 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5290 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5292 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5293 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5294 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5297 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5299 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5302 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5304 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5307 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5309 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5312 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5314 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5317 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5319 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5320 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5323 /* ====================================================================== */
5324 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5325 with the TCG register to which we will write. Used in combination with
5326 the "wout" generators, in some cases we need a new temporary, and in
5327 some cases we can write to a TCG global. */
5329 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5331 o
->out
= tcg_temp_new_i64();
5333 #define SPEC_prep_new 0
5335 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5337 o
->out
= tcg_temp_new_i64();
5338 o
->out2
= tcg_temp_new_i64();
5340 #define SPEC_prep_new_P 0
5342 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5344 o
->out
= regs
[get_field(s
, r1
)];
5347 #define SPEC_prep_r1 0
5349 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5351 int r1
= get_field(s
, r1
);
5353 o
->out2
= regs
[r1
+ 1];
5354 o
->g_out
= o
->g_out2
= true;
5356 #define SPEC_prep_r1_P SPEC_r1_even
5358 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5359 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5361 o
->out
= load_freg(get_field(s
, r1
));
5362 o
->out2
= load_freg(get_field(s
, r1
) + 2);
5364 #define SPEC_prep_x1 SPEC_r1_f128
5366 /* ====================================================================== */
5367 /* The "Write OUTput" generators. These generally perform some non-trivial
5368 copy of data to TCG globals, or to main memory. The trivial cases are
5369 generally handled by having a "prep" generator install the TCG global
5370 as the destination of the operation. */
5372 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5374 store_reg(get_field(s
, r1
), o
->out
);
5376 #define SPEC_wout_r1 0
5378 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5380 store_reg(get_field(s
, r1
), o
->out2
);
5382 #define SPEC_wout_out2_r1 0
5384 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5386 int r1
= get_field(s
, r1
);
5387 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5389 #define SPEC_wout_r1_8 0
5391 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5393 int r1
= get_field(s
, r1
);
5394 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5396 #define SPEC_wout_r1_16 0
5398 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5400 store_reg32_i64(get_field(s
, r1
), o
->out
);
5402 #define SPEC_wout_r1_32 0
5404 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5406 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5408 #define SPEC_wout_r1_32h 0
5410 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5412 int r1
= get_field(s
, r1
);
5413 store_reg32_i64(r1
, o
->out
);
5414 store_reg32_i64(r1
+ 1, o
->out2
);
5416 #define SPEC_wout_r1_P32 SPEC_r1_even
5418 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5420 int r1
= get_field(s
, r1
);
5421 store_reg32_i64(r1
+ 1, o
->out
);
5422 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5423 store_reg32_i64(r1
, o
->out
);
5425 #define SPEC_wout_r1_D32 SPEC_r1_even
5427 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5429 int r3
= get_field(s
, r3
);
5430 store_reg32_i64(r3
, o
->out
);
5431 store_reg32_i64(r3
+ 1, o
->out2
);
5433 #define SPEC_wout_r3_P32 SPEC_r3_even
5435 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5437 int r3
= get_field(s
, r3
);
5438 store_reg(r3
, o
->out
);
5439 store_reg(r3
+ 1, o
->out2
);
5441 #define SPEC_wout_r3_P64 SPEC_r3_even
5443 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5445 store_freg32_i64(get_field(s
, r1
), o
->out
);
5447 #define SPEC_wout_e1 0
5449 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5451 store_freg(get_field(s
, r1
), o
->out
);
5453 #define SPEC_wout_f1 0
5455 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5457 int f1
= get_field(s
, r1
);
5458 store_freg(f1
, o
->out
);
5459 store_freg(f1
+ 2, o
->out2
);
5461 #define SPEC_wout_x1 SPEC_r1_f128
5463 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5465 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5466 store_reg32_i64(get_field(s
, r1
), o
->out
);
5469 #define SPEC_wout_cond_r1r2_32 0
5471 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5473 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5474 store_freg32_i64(get_field(s
, r1
), o
->out
);
5477 #define SPEC_wout_cond_e1e2 0
5479 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5481 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5483 #define SPEC_wout_m1_8 0
5485 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5487 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5489 #define SPEC_wout_m1_16 0
5491 #ifndef CONFIG_USER_ONLY
5492 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5494 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5496 #define SPEC_wout_m1_16a 0
5499 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5501 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5503 #define SPEC_wout_m1_32 0
5505 #ifndef CONFIG_USER_ONLY
5506 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5508 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5510 #define SPEC_wout_m1_32a 0
5513 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5515 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5517 #define SPEC_wout_m1_64 0
5519 #ifndef CONFIG_USER_ONLY
5520 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5522 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5524 #define SPEC_wout_m1_64a 0
5527 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5529 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5531 #define SPEC_wout_m2_32 0
5533 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5535 store_reg(get_field(s
, r1
), o
->in2
);
5537 #define SPEC_wout_in2_r1 0
5539 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5541 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5543 #define SPEC_wout_in2_r1_32 0
5545 /* ====================================================================== */
5546 /* The "INput 1" generators. These load the first operand to an insn. */
5548 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5550 o
->in1
= load_reg(get_field(s
, r1
));
5552 #define SPEC_in1_r1 0
5554 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5556 o
->in1
= regs
[get_field(s
, r1
)];
5559 #define SPEC_in1_r1_o 0
5561 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5563 o
->in1
= tcg_temp_new_i64();
5564 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5566 #define SPEC_in1_r1_32s 0
5568 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5570 o
->in1
= tcg_temp_new_i64();
5571 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5573 #define SPEC_in1_r1_32u 0
5575 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5577 o
->in1
= tcg_temp_new_i64();
5578 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5580 #define SPEC_in1_r1_sr32 0
5582 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5584 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5586 #define SPEC_in1_r1p1 SPEC_r1_even
5588 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5590 o
->in1
= regs
[get_field(s
, r1
) + 1];
5593 #define SPEC_in1_r1p1_o SPEC_r1_even
5595 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5597 o
->in1
= tcg_temp_new_i64();
5598 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5600 #define SPEC_in1_r1p1_32s SPEC_r1_even
5602 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5604 o
->in1
= tcg_temp_new_i64();
5605 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5607 #define SPEC_in1_r1p1_32u SPEC_r1_even
5609 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5611 int r1
= get_field(s
, r1
);
5612 o
->in1
= tcg_temp_new_i64();
5613 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5615 #define SPEC_in1_r1_D32 SPEC_r1_even
5617 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5619 o
->in1
= load_reg(get_field(s
, r2
));
5621 #define SPEC_in1_r2 0
5623 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5625 o
->in1
= tcg_temp_new_i64();
5626 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5628 #define SPEC_in1_r2_sr32 0
5630 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5632 o
->in1
= tcg_temp_new_i64();
5633 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5635 #define SPEC_in1_r2_32u 0
5637 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5639 o
->in1
= load_reg(get_field(s
, r3
));
5641 #define SPEC_in1_r3 0
5643 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5645 o
->in1
= regs
[get_field(s
, r3
)];
5648 #define SPEC_in1_r3_o 0
5650 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5652 o
->in1
= tcg_temp_new_i64();
5653 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5655 #define SPEC_in1_r3_32s 0
5657 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5659 o
->in1
= tcg_temp_new_i64();
5660 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5662 #define SPEC_in1_r3_32u 0
5664 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5666 int r3
= get_field(s
, r3
);
5667 o
->in1
= tcg_temp_new_i64();
5668 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5670 #define SPEC_in1_r3_D32 SPEC_r3_even
5672 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5674 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5676 #define SPEC_in1_e1 0
5678 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5680 o
->in1
= load_freg(get_field(s
, r1
));
5682 #define SPEC_in1_f1 0
5684 /* Load the high double word of an extended (128-bit) format FP number */
5685 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5687 o
->in1
= load_freg(get_field(s
, r2
));
5689 #define SPEC_in1_x2h SPEC_r2_f128
5691 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5693 o
->in1
= load_freg(get_field(s
, r3
));
5695 #define SPEC_in1_f3 0
5697 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5699 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5701 #define SPEC_in1_la1 0
5703 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5705 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5706 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5708 #define SPEC_in1_la2 0
5710 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5713 o
->in1
= tcg_temp_new_i64();
5714 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5716 #define SPEC_in1_m1_8u 0
5718 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5721 o
->in1
= tcg_temp_new_i64();
5722 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5724 #define SPEC_in1_m1_16s 0
5726 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5729 o
->in1
= tcg_temp_new_i64();
5730 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5732 #define SPEC_in1_m1_16u 0
5734 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5737 o
->in1
= tcg_temp_new_i64();
5738 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5740 #define SPEC_in1_m1_32s 0
5742 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5745 o
->in1
= tcg_temp_new_i64();
5746 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5748 #define SPEC_in1_m1_32u 0
5750 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5753 o
->in1
= tcg_temp_new_i64();
5754 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5756 #define SPEC_in1_m1_64 0
5758 /* ====================================================================== */
5759 /* The "INput 2" generators. These load the second operand to an insn. */
5761 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5763 o
->in2
= regs
[get_field(s
, r1
)];
5766 #define SPEC_in2_r1_o 0
5768 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5770 o
->in2
= tcg_temp_new_i64();
5771 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5773 #define SPEC_in2_r1_16u 0
5775 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5777 o
->in2
= tcg_temp_new_i64();
5778 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5780 #define SPEC_in2_r1_32u 0
5782 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5784 int r1
= get_field(s
, r1
);
5785 o
->in2
= tcg_temp_new_i64();
5786 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5788 #define SPEC_in2_r1_D32 SPEC_r1_even
5790 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5792 o
->in2
= load_reg(get_field(s
, r2
));
5794 #define SPEC_in2_r2 0
5796 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5798 o
->in2
= regs
[get_field(s
, r2
)];
5801 #define SPEC_in2_r2_o 0
5803 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5805 int r2
= get_field(s
, r2
);
5807 o
->in2
= load_reg(r2
);
5810 #define SPEC_in2_r2_nz 0
5812 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5814 o
->in2
= tcg_temp_new_i64();
5815 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5817 #define SPEC_in2_r2_8s 0
5819 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5821 o
->in2
= tcg_temp_new_i64();
5822 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5824 #define SPEC_in2_r2_8u 0
5826 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5828 o
->in2
= tcg_temp_new_i64();
5829 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5831 #define SPEC_in2_r2_16s 0
5833 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5835 o
->in2
= tcg_temp_new_i64();
5836 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5838 #define SPEC_in2_r2_16u 0
5840 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5842 o
->in2
= load_reg(get_field(s
, r3
));
5844 #define SPEC_in2_r3 0
5846 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5848 o
->in2
= tcg_temp_new_i64();
5849 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5851 #define SPEC_in2_r3_sr32 0
5853 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5855 o
->in2
= tcg_temp_new_i64();
5856 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5858 #define SPEC_in2_r3_32u 0
5860 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5862 o
->in2
= tcg_temp_new_i64();
5863 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5865 #define SPEC_in2_r2_32s 0
5867 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5869 o
->in2
= tcg_temp_new_i64();
5870 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5872 #define SPEC_in2_r2_32u 0
5874 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5876 o
->in2
= tcg_temp_new_i64();
5877 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5879 #define SPEC_in2_r2_sr32 0
5881 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5883 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5885 #define SPEC_in2_e2 0
5887 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5889 o
->in2
= load_freg(get_field(s
, r2
));
5891 #define SPEC_in2_f2 0
5893 /* Load the low double word of an extended (128-bit) format FP number */
5894 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5896 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5898 #define SPEC_in2_x2l SPEC_r2_f128
5900 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5902 int r2
= get_field(s
, r2
);
5904 /* Note: *don't* treat !r2 as 0, use the reg value. */
5905 o
->in2
= tcg_temp_new_i64();
5906 gen_addi_and_wrap_i64(s
, o
->in2
, regs
[r2
], 0);
5908 #define SPEC_in2_ra2 0
5910 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5912 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5913 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5915 #define SPEC_in2_a2 0
5917 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5919 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(s
, i2
) * 2);
5921 #define SPEC_in2_ri2 0
5923 static void in2_sh32(DisasContext
*s
, DisasOps
*o
)
5925 help_l2_shift(s
, o
, 31);
5927 #define SPEC_in2_sh32 0
5929 static void in2_sh64(DisasContext
*s
, DisasOps
*o
)
5931 help_l2_shift(s
, o
, 63);
5933 #define SPEC_in2_sh64 0
5935 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5938 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5940 #define SPEC_in2_m2_8u 0
5942 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5945 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5947 #define SPEC_in2_m2_16s 0
5949 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
5952 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5954 #define SPEC_in2_m2_16u 0
5956 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
5959 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5961 #define SPEC_in2_m2_32s 0
5963 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
5966 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5968 #define SPEC_in2_m2_32u 0
5970 #ifndef CONFIG_USER_ONLY
5971 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
5974 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5976 #define SPEC_in2_m2_32ua 0
5979 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
5982 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5984 #define SPEC_in2_m2_64 0
5986 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
5989 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5990 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
5992 #define SPEC_in2_m2_64w 0
5994 #ifndef CONFIG_USER_ONLY
5995 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
5998 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
6000 #define SPEC_in2_m2_64a 0
6003 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
6006 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
6008 #define SPEC_in2_mri2_16u 0
6010 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
6013 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
6015 #define SPEC_in2_mri2_32s 0
6017 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
6020 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
6022 #define SPEC_in2_mri2_32u 0
6024 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
6027 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
6029 #define SPEC_in2_mri2_64 0
6031 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
6033 o
->in2
= tcg_const_i64(get_field(s
, i2
));
6035 #define SPEC_in2_i2 0
6037 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
6039 o
->in2
= tcg_const_i64((uint8_t)get_field(s
, i2
));
6041 #define SPEC_in2_i2_8u 0
6043 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
6045 o
->in2
= tcg_const_i64((uint16_t)get_field(s
, i2
));
6047 #define SPEC_in2_i2_16u 0
6049 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
6051 o
->in2
= tcg_const_i64((uint32_t)get_field(s
, i2
));
6053 #define SPEC_in2_i2_32u 0
6055 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
6057 uint64_t i2
= (uint16_t)get_field(s
, i2
);
6058 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6060 #define SPEC_in2_i2_16u_shl 0
6062 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
6064 uint64_t i2
= (uint32_t)get_field(s
, i2
);
6065 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
6067 #define SPEC_in2_i2_32u_shl 0
6069 #ifndef CONFIG_USER_ONLY
6070 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
6072 o
->in2
= tcg_const_i64(s
->fields
.raw_insn
);
6074 #define SPEC_in2_insn 0
6077 /* ====================================================================== */
6079 /* Find opc within the table of insns. This is formulated as a switch
6080 statement so that (1) we get compile-time notice of cut-paste errors
6081 for duplicated opcodes, and (2) the compiler generates the binary
6082 search tree, rather than us having to post-process the table. */
6084 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6085 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6087 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6088 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6090 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6091 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6093 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6095 enum DisasInsnEnum
{
6096 #include "insn-data.def"
6100 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6105 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6107 .help_in1 = in1_##I1, \
6108 .help_in2 = in2_##I2, \
6109 .help_prep = prep_##P, \
6110 .help_wout = wout_##W, \
6111 .help_cout = cout_##CC, \
6112 .help_op = op_##OP, \
6116 /* Allow 0 to be used for NULL in the table below. */
6124 #define SPEC_in1_0 0
6125 #define SPEC_in2_0 0
6126 #define SPEC_prep_0 0
6127 #define SPEC_wout_0 0
6129 /* Give smaller names to the various facilities. */
6130 #define FAC_Z S390_FEAT_ZARCH
6131 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6132 #define FAC_DFP S390_FEAT_DFP
6133 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6134 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6135 #define FAC_EE S390_FEAT_EXECUTE_EXT
6136 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6137 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6138 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6139 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6140 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6141 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6142 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6143 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6144 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6145 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6146 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6147 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6148 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6149 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6150 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6151 #define FAC_SFLE S390_FEAT_STFLE
6152 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6153 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6154 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6155 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6156 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6157 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6158 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6159 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6160 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6161 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6162 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6163 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6164 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6165 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6166 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6167 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6168 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6169 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6170 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6171 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6173 static const DisasInsn insn_info
[] = {
6174 #include "insn-data.def"
6178 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6179 case OPC: return &insn_info[insn_ ## NM];
6181 static const DisasInsn
*lookup_opc(uint16_t opc
)
6184 #include "insn-data.def"
6195 /* Extract a field from the insn. The INSN should be left-aligned in
6196 the uint64_t so that we can more easily utilize the big-bit-endian
6197 definitions we extract from the Principals of Operation. */
6199 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6207 /* Zero extract the field from the insn. */
6208 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6210 /* Sign-extend, or un-swap the field as necessary. */
6212 case 0: /* unsigned */
6214 case 1: /* signed */
6215 assert(f
->size
<= 32);
6216 m
= 1u << (f
->size
- 1);
6219 case 2: /* dl+dh split, signed 20 bit. */
6220 r
= ((int8_t)r
<< 12) | (r
>> 8);
6222 case 3: /* MSB stored in RXB */
6223 g_assert(f
->size
== 4);
6226 r
|= extract64(insn
, 63 - 36, 1) << 4;
6229 r
|= extract64(insn
, 63 - 37, 1) << 4;
6232 r
|= extract64(insn
, 63 - 38, 1) << 4;
6235 r
|= extract64(insn
, 63 - 39, 1) << 4;
6238 g_assert_not_reached();
6246 * Validate that the "compressed" encoding we selected above is valid.
6247 * I.e. we haven't made two different original fields overlap.
6249 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6250 o
->presentC
|= 1 << f
->indexC
;
6251 o
->presentO
|= 1 << f
->indexO
;
6253 o
->c
[f
->indexC
] = r
;
6256 /* Lookup the insn at the current PC, extracting the operands into O and
6257 returning the info struct for the insn. Returns NULL for invalid insn. */
6259 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6261 uint64_t insn
, pc
= s
->base
.pc_next
;
6263 const DisasInsn
*info
;
6265 if (unlikely(s
->ex_value
)) {
6266 /* Drop the EX data now, so that it's clear on exception paths. */
6267 TCGv_i64 zero
= tcg_const_i64(0);
6268 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6269 tcg_temp_free_i64(zero
);
6271 /* Extract the values saved by EXECUTE. */
6272 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6273 ilen
= s
->ex_value
& 0xf;
6276 insn
= ld_code2(env
, pc
);
6277 op
= (insn
>> 8) & 0xff;
6278 ilen
= get_ilen(op
);
6284 insn
= ld_code4(env
, pc
) << 32;
6287 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6290 g_assert_not_reached();
6293 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6296 /* We can't actually determine the insn format until we've looked up
6297 the full insn opcode. Which we can't do without locating the
6298 secondary opcode. Assume by default that OP2 is at bit 40; for
6299 those smaller insns that don't actually have a secondary opcode
6300 this will correctly result in OP2 = 0. */
6306 case 0xb2: /* S, RRF, RRE, IE */
6307 case 0xb3: /* RRE, RRD, RRF */
6308 case 0xb9: /* RRE, RRF */
6309 case 0xe5: /* SSE, SIL */
6310 op2
= (insn
<< 8) >> 56;
6314 case 0xc0: /* RIL */
6315 case 0xc2: /* RIL */
6316 case 0xc4: /* RIL */
6317 case 0xc6: /* RIL */
6318 case 0xc8: /* SSF */
6319 case 0xcc: /* RIL */
6320 op2
= (insn
<< 12) >> 60;
6322 case 0xc5: /* MII */
6323 case 0xc7: /* SMI */
6324 case 0xd0 ... 0xdf: /* SS */
6330 case 0xee ... 0xf3: /* SS */
6331 case 0xf8 ... 0xfd: /* SS */
6335 op2
= (insn
<< 40) >> 56;
6339 memset(&s
->fields
, 0, sizeof(s
->fields
));
6340 s
->fields
.raw_insn
= insn
;
6342 s
->fields
.op2
= op2
;
6344 /* Lookup the instruction. */
6345 info
= lookup_opc(op
<< 8 | op2
);
6348 /* If we found it, extract the operands. */
6350 DisasFormat fmt
= info
->fmt
;
6353 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6354 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6360 static bool is_afp_reg(int reg
)
6362 return reg
% 2 || reg
> 6;
6365 static bool is_fp_pair(int reg
)
6367 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6368 return !(reg
& 0x2);
6371 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6373 const DisasInsn
*insn
;
6374 DisasJumpType ret
= DISAS_NEXT
;
6376 bool icount
= false;
6378 /* Search for the insn in the table. */
6379 insn
= extract_insn(env
, s
);
6381 /* Emit insn_start now that we know the ILEN. */
6382 tcg_gen_insn_start(s
->base
.pc_next
, s
->cc_op
, s
->ilen
);
6384 /* Not found means unimplemented/illegal opcode. */
6386 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6387 s
->fields
.op
, s
->fields
.op2
);
6388 gen_illegal_opcode(s
);
6389 ret
= DISAS_NORETURN
;
6393 #ifndef CONFIG_USER_ONLY
6394 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6395 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6396 gen_helper_per_ifetch(cpu_env
, addr
);
6397 tcg_temp_free_i64(addr
);
6403 /* privileged instruction */
6404 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6405 gen_program_exception(s
, PGM_PRIVILEGED
);
6406 ret
= DISAS_NORETURN
;
6410 /* if AFP is not enabled, instructions and registers are forbidden */
6411 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6414 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6417 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6420 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6423 if (insn
->flags
& IF_BFP
) {
6426 if (insn
->flags
& IF_DFP
) {
6429 if (insn
->flags
& IF_VEC
) {
6433 gen_data_exception(dxc
);
6434 ret
= DISAS_NORETURN
;
6439 /* if vector instructions not enabled, executing them is forbidden */
6440 if (insn
->flags
& IF_VEC
) {
6441 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6442 gen_data_exception(0xfe);
6443 ret
= DISAS_NORETURN
;
6448 /* input/output is the special case for icount mode */
6449 if (unlikely(insn
->flags
& IF_IO
)) {
6450 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6457 /* Check for insn specification exceptions. */
6459 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6460 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6461 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6462 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6463 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6464 gen_program_exception(s
, PGM_SPECIFICATION
);
6465 ret
= DISAS_NORETURN
;
6470 /* Implement the instruction. */
6471 if (insn
->help_in1
) {
6472 insn
->help_in1(s
, &o
);
6474 if (insn
->help_in2
) {
6475 insn
->help_in2(s
, &o
);
6477 if (insn
->help_prep
) {
6478 insn
->help_prep(s
, &o
);
6480 if (insn
->help_op
) {
6481 ret
= insn
->help_op(s
, &o
);
6483 if (ret
!= DISAS_NORETURN
) {
6484 if (insn
->help_wout
) {
6485 insn
->help_wout(s
, &o
);
6487 if (insn
->help_cout
) {
6488 insn
->help_cout(s
, &o
);
6492 /* Free any temporaries created by the helpers. */
6493 if (o
.out
&& !o
.g_out
) {
6494 tcg_temp_free_i64(o
.out
);
6496 if (o
.out2
&& !o
.g_out2
) {
6497 tcg_temp_free_i64(o
.out2
);
6499 if (o
.in1
&& !o
.g_in1
) {
6500 tcg_temp_free_i64(o
.in1
);
6502 if (o
.in2
&& !o
.g_in2
) {
6503 tcg_temp_free_i64(o
.in2
);
6506 tcg_temp_free_i64(o
.addr1
);
6509 /* io should be the last instruction in tb when icount is enabled */
6510 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6511 ret
= DISAS_PC_STALE
;
6514 #ifndef CONFIG_USER_ONLY
6515 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6516 /* An exception might be triggered, save PSW if not already done. */
6517 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6518 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6521 /* Call the helper to check for a possible PER exception. */
6522 gen_helper_per_check_exception(cpu_env
);
6527 /* Advance to the next instruction. */
6528 s
->base
.pc_next
= s
->pc_tmp
;
6532 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6534 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6537 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6538 dc
->base
.pc_first
&= 0x7fffffff;
6539 dc
->base
.pc_next
= dc
->base
.pc_first
;
6542 dc
->cc_op
= CC_OP_DYNAMIC
;
6543 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6544 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6547 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6551 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6555 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6557 CPUS390XState
*env
= cs
->env_ptr
;
6558 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6560 dc
->base
.is_jmp
= translate_one(env
, dc
);
6561 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6562 uint64_t page_start
;
6564 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6565 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6566 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6571 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6573 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6575 switch (dc
->base
.is_jmp
) {
6577 case DISAS_NORETURN
:
6579 case DISAS_TOO_MANY
:
6580 case DISAS_PC_STALE
:
6581 case DISAS_PC_STALE_NOCHAIN
:
6582 update_psw_addr(dc
);
6584 case DISAS_PC_UPDATED
:
6585 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6586 cc op type is in env */
6589 case DISAS_PC_CC_UPDATED
:
6590 /* Exit the TB, either by raising a debug exception or by return. */
6592 gen_exception(EXCP_DEBUG
);
6593 } else if ((dc
->base
.tb
->flags
& FLAG_MASK_PER
) ||
6594 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6595 tcg_gen_exit_tb(NULL
, 0);
6597 tcg_gen_lookup_and_goto_ptr();
6601 g_assert_not_reached();
6605 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6607 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6609 if (unlikely(dc
->ex_value
)) {
6610 /* ??? Unfortunately log_target_disas can't use host memory. */
6611 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6613 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6614 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6618 static const TranslatorOps s390x_tr_ops
= {
6619 .init_disas_context
= s390x_tr_init_disas_context
,
6620 .tb_start
= s390x_tr_tb_start
,
6621 .insn_start
= s390x_tr_insn_start
,
6622 .translate_insn
= s390x_tr_translate_insn
,
6623 .tb_stop
= s390x_tr_tb_stop
,
6624 .disas_log
= s390x_tr_disas_log
,
6627 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6631 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6634 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6637 int cc_op
= data
[1];
6639 env
->psw
.addr
= data
[0];
6641 /* Update the CC opcode if it is not already up-to-date. */
6642 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6647 env
->int_pgm_ilen
= data
[2];