4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
37 #include "tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext
;
53 typedef struct DisasInsn DisasInsn
;
54 typedef struct DisasFields DisasFields
;
57 DisasContextBase base
;
58 const DisasInsn
*insn
;
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
72 /* Information carried about a condition to be evaluated. */
79 struct { TCGv_i64 a
, b
; } s64
;
80 struct { TCGv_i32 a
, b
; } s32
;
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit
[CC_OP_MAX
];
86 static uint64_t inline_branch_miss
[CC_OP_MAX
];
89 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
93 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
94 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
95 tcg_gen_movi_i64(out
, pc
);
100 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
101 tmp
= tcg_const_i64(pc
);
102 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
103 tcg_temp_free_i64(tmp
);
106 static TCGv_i64 psw_addr
;
107 static TCGv_i64 psw_mask
;
108 static TCGv_i64 gbea
;
110 static TCGv_i32 cc_op
;
111 static TCGv_i64 cc_src
;
112 static TCGv_i64 cc_dst
;
113 static TCGv_i64 cc_vr
;
115 static char cpu_reg_names
[16][4];
116 static TCGv_i64 regs
[16];
118 void s390x_translate_init(void)
122 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
123 offsetof(CPUS390XState
, psw
.addr
),
125 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
126 offsetof(CPUS390XState
, psw
.mask
),
128 gbea
= tcg_global_mem_new_i64(cpu_env
,
129 offsetof(CPUS390XState
, gbea
),
132 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
134 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
136 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
138 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
141 for (i
= 0; i
< 16; i
++) {
142 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
143 regs
[i
] = tcg_global_mem_new(cpu_env
,
144 offsetof(CPUS390XState
, regs
[i
]),
149 static inline int vec_full_reg_offset(uint8_t reg
)
152 return offsetof(CPUS390XState
, vregs
[reg
][0].d
);
155 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, TCGMemOp es
)
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes
= 1 << es
;
159 int offs
= enr
* bytes
;
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
183 g_assert(es
<= MO_64
);
184 #ifndef HOST_WORDS_BIGENDIAN
187 return offs
+ vec_full_reg_offset(reg
);
190 static inline int freg64_offset(uint8_t reg
)
193 return vec_reg_offset(reg
, 0, MO_64
);
196 static inline int freg32_offset(uint8_t reg
)
199 return vec_reg_offset(reg
, 0, MO_32
);
202 static TCGv_i64
load_reg(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_mov_i64(r
, regs
[reg
]);
209 static TCGv_i64
load_freg(int reg
)
211 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
217 static TCGv_i64
load_freg32_i64(int reg
)
219 TCGv_i64 r
= tcg_temp_new_i64();
221 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
225 static void store_reg(int reg
, TCGv_i64 v
)
227 tcg_gen_mov_i64(regs
[reg
], v
);
230 static void store_freg(int reg
, TCGv_i64 v
)
232 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
235 static void store_reg32_i64(int reg
, TCGv_i64 v
)
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
241 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
243 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
246 static void store_freg32_i64(int reg
, TCGv_i64 v
)
248 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
251 static void return_low128(TCGv_i64 dest
)
253 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
256 static void update_psw_addr(DisasContext
*s
)
259 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
262 static void per_branch(DisasContext
*s
, bool to_next
)
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
267 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
268 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
269 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
271 tcg_temp_free_i64(next_pc
);
277 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
278 TCGv_i64 arg1
, TCGv_i64 arg2
)
280 #ifndef CONFIG_USER_ONLY
281 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
282 TCGLabel
*lab
= gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
285 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
286 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
290 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
291 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
292 tcg_temp_free_i64(pc
);
297 static void per_breaking_event(DisasContext
*s
)
299 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
302 static void update_cc_op(DisasContext
*s
)
304 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
305 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
309 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
311 return (uint64_t)cpu_lduw_code(env
, pc
);
314 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
319 static int get_mem_index(DisasContext
*s
)
321 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
325 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
326 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
327 return MMU_PRIMARY_IDX
;
328 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
329 return MMU_SECONDARY_IDX
;
330 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
338 static void gen_exception(int excp
)
340 TCGv_i32 tmp
= tcg_const_i32(excp
);
341 gen_helper_exception(cpu_env
, tmp
);
342 tcg_temp_free_i32(tmp
);
345 static void gen_program_exception(DisasContext
*s
, int code
)
349 /* Remember what pgm exeption this was. */
350 tmp
= tcg_const_i32(code
);
351 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
352 tcg_temp_free_i32(tmp
);
354 tmp
= tcg_const_i32(s
->ilen
);
355 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
356 tcg_temp_free_i32(tmp
);
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM
);
368 static inline void gen_illegal_opcode(DisasContext
*s
)
370 gen_program_exception(s
, PGM_OPERATION
);
373 static inline void gen_data_exception(uint8_t dxc
)
375 TCGv_i32 tmp
= tcg_const_i32(dxc
);
376 gen_helper_data_exception(cpu_env
, tmp
);
377 tcg_temp_free_i32(tmp
);
380 static inline void gen_trap(DisasContext
*s
)
382 /* Set DXC to 0xff */
383 gen_data_exception(0xff);
386 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
389 tcg_gen_addi_i64(dst
, src
, imm
);
390 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
391 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
392 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
394 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
399 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
401 TCGv_i64 tmp
= tcg_temp_new_i64();
404 * Note that d2 is limited to 20 bits, signed. If we crop negative
405 * displacements early we create larger immedate addends.
408 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
409 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
411 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
413 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
414 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
415 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
416 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
418 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
421 tcg_gen_movi_i64(tmp
, d2
);
427 static inline bool live_cc_data(DisasContext
*s
)
429 return (s
->cc_op
!= CC_OP_DYNAMIC
430 && s
->cc_op
!= CC_OP_STATIC
434 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
436 if (live_cc_data(s
)) {
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_discard_i64(cc_dst
);
439 tcg_gen_discard_i64(cc_vr
);
441 s
->cc_op
= CC_OP_CONST0
+ val
;
444 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
446 if (live_cc_data(s
)) {
447 tcg_gen_discard_i64(cc_src
);
448 tcg_gen_discard_i64(cc_vr
);
450 tcg_gen_mov_i64(cc_dst
, dst
);
454 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
457 if (live_cc_data(s
)) {
458 tcg_gen_discard_i64(cc_vr
);
460 tcg_gen_mov_i64(cc_src
, src
);
461 tcg_gen_mov_i64(cc_dst
, dst
);
465 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
466 TCGv_i64 dst
, TCGv_i64 vr
)
468 tcg_gen_mov_i64(cc_src
, src
);
469 tcg_gen_mov_i64(cc_dst
, dst
);
470 tcg_gen_mov_i64(cc_vr
, vr
);
474 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
476 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
479 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
481 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
484 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
486 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
489 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
491 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext
*s
)
497 if (live_cc_data(s
)) {
498 tcg_gen_discard_i64(cc_src
);
499 tcg_gen_discard_i64(cc_dst
);
500 tcg_gen_discard_i64(cc_vr
);
502 s
->cc_op
= CC_OP_STATIC
;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext
*s
)
508 TCGv_i32 local_cc_op
= NULL
;
509 TCGv_i64 dummy
= NULL
;
513 dummy
= tcg_const_i64(0);
527 local_cc_op
= tcg_const_i32(s
->cc_op
);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
547 /* env->cc_op already is the cc value */
563 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
568 case CC_OP_LTUGTU_32
:
569 case CC_OP_LTUGTU_64
:
576 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
591 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
594 /* unknown operation - assume 3 arguments and cc_op in env */
595 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
602 tcg_temp_free_i32(local_cc_op
);
605 tcg_temp_free_i64(dummy
);
608 /* We now have cc in cc_op as constant */
612 static bool use_exit_tb(DisasContext
*s
)
614 return s
->base
.singlestep_enabled
||
615 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
616 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
619 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
621 if (unlikely(use_exit_tb(s
))) {
624 #ifndef CONFIG_USER_ONLY
625 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
626 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
632 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_miss
[cc_op
]++;
639 static void account_inline_branch(DisasContext
*s
, int cc_op
)
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_hit
[cc_op
]++;
646 /* Table of mask values to comparison codes, given a comparison as input.
647 For such, CC=3 should not be possible. */
648 static const TCGCond ltgt_cond
[16] = {
649 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
650 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
651 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
652 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
653 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
654 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
655 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
656 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
659 /* Table of mask values to comparison codes, given a logic op as input.
660 For such, only CC=0 and CC=1 should be possible. */
661 static const TCGCond nz_cond
[16] = {
662 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
663 TCG_COND_NEVER
, TCG_COND_NEVER
,
664 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
665 TCG_COND_NE
, TCG_COND_NE
,
666 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
667 TCG_COND_EQ
, TCG_COND_EQ
,
668 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
669 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
672 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
673 details required to generate a TCG comparison. */
674 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
677 enum cc_op old_cc_op
= s
->cc_op
;
679 if (mask
== 15 || mask
== 0) {
680 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
683 c
->g1
= c
->g2
= true;
688 /* Find the TCG condition for the mask + cc op. */
694 cond
= ltgt_cond
[mask
];
695 if (cond
== TCG_COND_NEVER
) {
698 account_inline_branch(s
, old_cc_op
);
701 case CC_OP_LTUGTU_32
:
702 case CC_OP_LTUGTU_64
:
703 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
704 if (cond
== TCG_COND_NEVER
) {
707 account_inline_branch(s
, old_cc_op
);
711 cond
= nz_cond
[mask
];
712 if (cond
== TCG_COND_NEVER
) {
715 account_inline_branch(s
, old_cc_op
);
730 account_inline_branch(s
, old_cc_op
);
745 account_inline_branch(s
, old_cc_op
);
749 switch (mask
& 0xa) {
750 case 8: /* src == 0 -> no one bit found */
753 case 2: /* src != 0 -> one bit found */
759 account_inline_branch(s
, old_cc_op
);
765 case 8 | 2: /* vr == 0 */
768 case 4 | 1: /* vr != 0 */
771 case 8 | 4: /* no carry -> vr >= src */
774 case 2 | 1: /* carry -> vr < src */
780 account_inline_branch(s
, old_cc_op
);
785 /* Note that CC=0 is impossible; treat it as dont-care. */
787 case 2: /* zero -> op1 == op2 */
790 case 4 | 1: /* !zero -> op1 != op2 */
793 case 4: /* borrow (!carry) -> op1 < op2 */
796 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
802 account_inline_branch(s
, old_cc_op
);
807 /* Calculate cc value. */
812 /* Jump based on CC. We'll load up the real cond below;
813 the assignment here merely avoids a compiler warning. */
814 account_noninline_branch(s
, old_cc_op
);
815 old_cc_op
= CC_OP_STATIC
;
816 cond
= TCG_COND_NEVER
;
820 /* Load up the arguments of the comparison. */
822 c
->g1
= c
->g2
= false;
826 c
->u
.s32
.a
= tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
828 c
->u
.s32
.b
= tcg_const_i32(0);
831 case CC_OP_LTUGTU_32
:
834 c
->u
.s32
.a
= tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
836 c
->u
.s32
.b
= tcg_temp_new_i32();
837 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
844 c
->u
.s64
.b
= tcg_const_i64(0);
848 case CC_OP_LTUGTU_64
:
852 c
->g1
= c
->g2
= true;
858 c
->u
.s64
.a
= tcg_temp_new_i64();
859 c
->u
.s64
.b
= tcg_const_i64(0);
860 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
865 c
->u
.s32
.a
= tcg_temp_new_i32();
866 c
->u
.s32
.b
= tcg_temp_new_i32();
867 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
868 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
869 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
871 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
878 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
879 c
->u
.s64
.b
= tcg_const_i64(0);
891 case 0x8 | 0x4 | 0x2: /* cc != 3 */
893 c
->u
.s32
.b
= tcg_const_i32(3);
895 case 0x8 | 0x4 | 0x1: /* cc != 2 */
897 c
->u
.s32
.b
= tcg_const_i32(2);
899 case 0x8 | 0x2 | 0x1: /* cc != 1 */
901 c
->u
.s32
.b
= tcg_const_i32(1);
903 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906 c
->u
.s32
.a
= tcg_temp_new_i32();
907 c
->u
.s32
.b
= tcg_const_i32(0);
908 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
910 case 0x8 | 0x4: /* cc < 2 */
912 c
->u
.s32
.b
= tcg_const_i32(2);
914 case 0x8: /* cc == 0 */
916 c
->u
.s32
.b
= tcg_const_i32(0);
918 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 c
->u
.s32
.b
= tcg_const_i32(0);
922 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
925 c
->u
.s32
.a
= tcg_temp_new_i32();
926 c
->u
.s32
.b
= tcg_const_i32(0);
927 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
929 case 0x4: /* cc == 1 */
931 c
->u
.s32
.b
= tcg_const_i32(1);
933 case 0x2 | 0x1: /* cc > 1 */
935 c
->u
.s32
.b
= tcg_const_i32(1);
937 case 0x2: /* cc == 2 */
939 c
->u
.s32
.b
= tcg_const_i32(2);
941 case 0x1: /* cc == 3 */
943 c
->u
.s32
.b
= tcg_const_i32(3);
946 /* CC is masked by something else: (8 >> cc) & mask. */
949 c
->u
.s32
.a
= tcg_const_i32(8);
950 c
->u
.s32
.b
= tcg_const_i32(0);
951 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
952 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
963 static void free_compare(DisasCompare
*c
)
967 tcg_temp_free_i64(c
->u
.s64
.a
);
969 tcg_temp_free_i32(c
->u
.s32
.a
);
974 tcg_temp_free_i64(c
->u
.s64
.b
);
976 tcg_temp_free_i32(c
->u
.s32
.b
);
981 /* ====================================================================== */
982 /* Define the insn format enumeration. */
983 #define F0(N) FMT_##N,
984 #define F1(N, X1) F0(N)
985 #define F2(N, X1, X2) F0(N)
986 #define F3(N, X1, X2, X3) F0(N)
987 #define F4(N, X1, X2, X3, X4) F0(N)
988 #define F5(N, X1, X2, X3, X4, X5) F0(N)
989 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
992 #include "insn-format.def"
1003 /* Define a structure to hold the decoded fields. We'll store each inside
1004 an array indexed by an enum. In order to conserve memory, we'll arrange
1005 for fields that do not exist at the same time to overlap, thus the "C"
1006 for compact. For checking purposes there is an "O" for original index
1007 as well that will be applied to availability bitmaps. */
1009 enum DisasFieldIndexO
{
1038 enum DisasFieldIndexC
{
1075 struct DisasFields
{
1079 unsigned presentC
:16;
1080 unsigned int presentO
;
1084 /* This is the way fields are to be accessed out of DisasFields. */
1085 #define have_field(S, F) have_field1((S), FLD_O_##F)
1086 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1088 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1090 return (f
->presentO
>> c
) & 1;
1093 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1094 enum DisasFieldIndexC c
)
1096 assert(have_field1(f
, o
));
1100 /* Describe the layout of each field in each format. */
1101 typedef struct DisasField
{
1103 unsigned int size
:8;
1104 unsigned int type
:2;
1105 unsigned int indexC
:6;
1106 enum DisasFieldIndexO indexO
:8;
1109 typedef struct DisasFormatInfo
{
1110 DisasField op
[NUM_C_FIELD
];
1113 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1114 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1115 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1116 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1117 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1118 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1119 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1120 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1121 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1123 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1126 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1127 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1129 #define F0(N) { { } },
1130 #define F1(N, X1) { { X1 } },
1131 #define F2(N, X1, X2) { { X1, X2 } },
1132 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1133 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1134 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1135 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1137 static const DisasFormatInfo format_info
[] = {
1138 #include "insn-format.def"
1158 /* Generally, we'll extract operands into this structures, operate upon
1159 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1160 of routines below for more details. */
1162 bool g_out
, g_out2
, g_in1
, g_in2
;
1163 TCGv_i64 out
, out2
, in1
, in2
;
1167 /* Instructions can place constraints on their operands, raising specification
1168 exceptions if they are violated. To make this easy to automate, each "in1",
1169 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1170 of the following, or 0. To make this easy to document, we'll put the
1171 SPEC_<name> defines next to <name>. */
1173 #define SPEC_r1_even 1
1174 #define SPEC_r2_even 2
1175 #define SPEC_r3_even 4
1176 #define SPEC_r1_f128 8
1177 #define SPEC_r2_f128 16
1179 /* Return values from translate_one, indicating the state of the TB. */
1181 /* We are not using a goto_tb (for whatever reason), but have updated
1182 the PC (for whatever reason), so there's no need to do it again on
1184 #define DISAS_PC_UPDATED DISAS_TARGET_0
1186 /* We have emitted one or more goto_tb. No fixup required. */
1187 #define DISAS_GOTO_TB DISAS_TARGET_1
1189 /* We have updated the PC and CC values. */
1190 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1192 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1193 updated the PC for the next instruction to be executed. */
1194 #define DISAS_PC_STALE DISAS_TARGET_3
1196 /* We are exiting the TB to the main loop. */
1197 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1200 /* Instruction flags */
1201 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1202 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1203 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1204 #define IF_BFP 0x0008 /* binary floating point instruction */
1205 #define IF_DFP 0x0010 /* decimal floating point instruction */
1206 #define IF_PRIV 0x0020 /* privileged instruction */
1207 #define IF_VEC 0x0040 /* vector instruction */
1218 /* Pre-process arguments before HELP_OP. */
1219 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1220 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1221 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1224 * Post-process output after HELP_OP.
1225 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1227 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1228 void (*help_cout
)(DisasContext
*, DisasOps
*);
1230 /* Implement the operation itself. */
1231 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1236 /* ====================================================================== */
1237 /* Miscellaneous helpers, used by several operations. */
1239 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1240 DisasOps
*o
, int mask
)
1242 int b2
= get_field(f
, b2
);
1243 int d2
= get_field(f
, d2
);
1246 o
->in2
= tcg_const_i64(d2
& mask
);
1248 o
->in2
= get_address(s
, 0, b2
, d2
);
1249 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1253 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1255 if (dest
== s
->pc_tmp
) {
1256 per_branch(s
, true);
1259 if (use_goto_tb(s
, dest
)) {
1261 per_breaking_event(s
);
1263 tcg_gen_movi_i64(psw_addr
, dest
);
1264 tcg_gen_exit_tb(s
->base
.tb
, 0);
1265 return DISAS_GOTO_TB
;
1267 tcg_gen_movi_i64(psw_addr
, dest
);
1268 per_branch(s
, false);
1269 return DISAS_PC_UPDATED
;
1273 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1274 bool is_imm
, int imm
, TCGv_i64 cdest
)
1277 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1280 /* Take care of the special cases first. */
1281 if (c
->cond
== TCG_COND_NEVER
) {
1286 if (dest
== s
->pc_tmp
) {
1287 /* Branch to next. */
1288 per_branch(s
, true);
1292 if (c
->cond
== TCG_COND_ALWAYS
) {
1293 ret
= help_goto_direct(s
, dest
);
1298 /* E.g. bcr %r0 -> no branch. */
1302 if (c
->cond
== TCG_COND_ALWAYS
) {
1303 tcg_gen_mov_i64(psw_addr
, cdest
);
1304 per_branch(s
, false);
1305 ret
= DISAS_PC_UPDATED
;
1310 if (use_goto_tb(s
, s
->pc_tmp
)) {
1311 if (is_imm
&& use_goto_tb(s
, dest
)) {
1312 /* Both exits can use goto_tb. */
1315 lab
= gen_new_label();
1317 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1319 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1322 /* Branch not taken. */
1324 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1325 tcg_gen_exit_tb(s
->base
.tb
, 0);
1329 per_breaking_event(s
);
1331 tcg_gen_movi_i64(psw_addr
, dest
);
1332 tcg_gen_exit_tb(s
->base
.tb
, 1);
1334 ret
= DISAS_GOTO_TB
;
1336 /* Fallthru can use goto_tb, but taken branch cannot. */
1337 /* Store taken branch destination before the brcond. This
1338 avoids having to allocate a new local temp to hold it.
1339 We'll overwrite this in the not taken case anyway. */
1341 tcg_gen_mov_i64(psw_addr
, cdest
);
1344 lab
= gen_new_label();
1346 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1348 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1351 /* Branch not taken. */
1354 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1355 tcg_gen_exit_tb(s
->base
.tb
, 0);
1359 tcg_gen_movi_i64(psw_addr
, dest
);
1361 per_breaking_event(s
);
1362 ret
= DISAS_PC_UPDATED
;
1365 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1366 Most commonly we're single-stepping or some other condition that
1367 disables all use of goto_tb. Just update the PC and exit. */
1369 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1371 cdest
= tcg_const_i64(dest
);
1375 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1377 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1379 TCGv_i32 t0
= tcg_temp_new_i32();
1380 TCGv_i64 t1
= tcg_temp_new_i64();
1381 TCGv_i64 z
= tcg_const_i64(0);
1382 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1383 tcg_gen_extu_i32_i64(t1
, t0
);
1384 tcg_temp_free_i32(t0
);
1385 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1386 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1387 tcg_temp_free_i64(t1
);
1388 tcg_temp_free_i64(z
);
1392 tcg_temp_free_i64(cdest
);
1394 tcg_temp_free_i64(next
);
1396 ret
= DISAS_PC_UPDATED
;
1404 /* ====================================================================== */
1405 /* The operations. These perform the bulk of the work for any insn,
1406 usually after the operands have been loaded and output initialized. */
1408 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1410 tcg_gen_abs_i64(o
->out
, o
->in2
);
1414 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1416 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1420 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1422 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1426 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1428 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1429 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1433 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1435 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1439 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1444 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1446 /* The carry flag is the msb of CC, therefore the branch mask that would
1447 create that comparison is 3. Feeding the generated comparison to
1448 setcond produces the carry flag that we desire. */
1449 disas_jcc(s
, &cmp
, 3);
1450 carry
= tcg_temp_new_i64();
1452 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1454 TCGv_i32 t
= tcg_temp_new_i32();
1455 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1456 tcg_gen_extu_i32_i64(carry
, t
);
1457 tcg_temp_free_i32(t
);
1461 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1462 tcg_temp_free_i64(carry
);
1466 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1468 o
->in1
= tcg_temp_new_i64();
1470 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1471 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1473 /* Perform the atomic addition in memory. */
1474 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1478 /* Recompute also for atomic case: needed for setting CC. */
1479 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1481 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1482 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1487 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1489 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1493 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1495 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1499 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1501 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1502 return_low128(o
->out2
);
1506 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1508 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1512 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1514 int shift
= s
->insn
->data
& 0xff;
1515 int size
= s
->insn
->data
>> 8;
1516 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1519 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1520 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1521 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1523 /* Produce the CC from only the bits manipulated. */
1524 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1525 set_cc_nz_u64(s
, cc_dst
);
1529 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1531 o
->in1
= tcg_temp_new_i64();
1533 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1534 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1536 /* Perform the atomic operation in memory. */
1537 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1541 /* Recompute also for atomic case: needed for setting CC. */
1542 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1544 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1545 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1550 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1552 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1554 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1555 per_branch(s
, false);
1556 return DISAS_PC_UPDATED
;
1562 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1566 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1567 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1571 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1572 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1573 t
= tcg_temp_new_i64();
1574 tcg_gen_shri_i64(t
, psw_mask
, 16);
1575 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1576 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1577 tcg_gen_extu_i32_i64(t
, cc_op
);
1578 tcg_gen_shli_i64(t
, t
, 28);
1579 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1580 tcg_temp_free_i64(t
);
1583 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1585 save_link_info(s
, o
);
1587 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1588 per_branch(s
, false);
1589 return DISAS_PC_UPDATED
;
1595 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1597 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1598 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1601 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1603 int m1
= get_field(s
->fields
, m1
);
1604 bool is_imm
= have_field(s
->fields
, i2
);
1605 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1608 /* BCR with R2 = 0 causes no branching */
1609 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1611 /* Perform serialization */
1612 /* FIXME: check for fast-BCR-serialization facility */
1613 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1616 /* Perform serialization */
1617 /* FIXME: perform checkpoint-synchronisation */
1618 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1623 disas_jcc(s
, &c
, m1
);
1624 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1627 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1629 int r1
= get_field(s
->fields
, r1
);
1630 bool is_imm
= have_field(s
->fields
, i2
);
1631 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1635 c
.cond
= TCG_COND_NE
;
1640 t
= tcg_temp_new_i64();
1641 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1642 store_reg32_i64(r1
, t
);
1643 c
.u
.s32
.a
= tcg_temp_new_i32();
1644 c
.u
.s32
.b
= tcg_const_i32(0);
1645 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1646 tcg_temp_free_i64(t
);
1648 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1651 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1653 int r1
= get_field(s
->fields
, r1
);
1654 int imm
= get_field(s
->fields
, i2
);
1658 c
.cond
= TCG_COND_NE
;
1663 t
= tcg_temp_new_i64();
1664 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1665 tcg_gen_subi_i64(t
, t
, 1);
1666 store_reg32h_i64(r1
, t
);
1667 c
.u
.s32
.a
= tcg_temp_new_i32();
1668 c
.u
.s32
.b
= tcg_const_i32(0);
1669 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1670 tcg_temp_free_i64(t
);
1672 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1675 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1677 int r1
= get_field(s
->fields
, r1
);
1678 bool is_imm
= have_field(s
->fields
, i2
);
1679 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1682 c
.cond
= TCG_COND_NE
;
1687 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1688 c
.u
.s64
.a
= regs
[r1
];
1689 c
.u
.s64
.b
= tcg_const_i64(0);
1691 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1694 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1696 int r1
= get_field(s
->fields
, r1
);
1697 int r3
= get_field(s
->fields
, r3
);
1698 bool is_imm
= have_field(s
->fields
, i2
);
1699 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1703 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1708 t
= tcg_temp_new_i64();
1709 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1710 c
.u
.s32
.a
= tcg_temp_new_i32();
1711 c
.u
.s32
.b
= tcg_temp_new_i32();
1712 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1713 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1714 store_reg32_i64(r1
, t
);
1715 tcg_temp_free_i64(t
);
1717 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1720 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1722 int r1
= get_field(s
->fields
, r1
);
1723 int r3
= get_field(s
->fields
, r3
);
1724 bool is_imm
= have_field(s
->fields
, i2
);
1725 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1728 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1731 if (r1
== (r3
| 1)) {
1732 c
.u
.s64
.b
= load_reg(r3
| 1);
1735 c
.u
.s64
.b
= regs
[r3
| 1];
1739 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1740 c
.u
.s64
.a
= regs
[r1
];
1743 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1746 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1748 int imm
, m3
= get_field(s
->fields
, m3
);
1752 c
.cond
= ltgt_cond
[m3
];
1753 if (s
->insn
->data
) {
1754 c
.cond
= tcg_unsigned_cond(c
.cond
);
1756 c
.is_64
= c
.g1
= c
.g2
= true;
1760 is_imm
= have_field(s
->fields
, i4
);
1762 imm
= get_field(s
->fields
, i4
);
1765 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1766 get_field(s
->fields
, d4
));
1769 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1772 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1774 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1779 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1781 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1786 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1788 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1793 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1796 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1797 uint8_t m3
= get_field(s
->fields
, m3
);
1798 uint8_t m4
= get_field(s
->fields
, m4
);
1800 /* m3 field was introduced with FPE */
1801 if (!fpe
&& m3_with_fpe
) {
1804 /* m4 field was introduced with FPE */
1805 if (!fpe
&& m4_with_fpe
) {
1809 /* Check for valid rounding modes. Mode 3 was introduced later. */
1810 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1811 gen_program_exception(s
, PGM_SPECIFICATION
);
1815 return tcg_const_i32(deposit32(m3
, 4, 4, m4
));
1818 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1820 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1823 return DISAS_NORETURN
;
1825 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1826 tcg_temp_free_i32(m34
);
1827 gen_set_cc_nz_f32(s
, o
->in2
);
1831 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1833 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1836 return DISAS_NORETURN
;
1838 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1839 tcg_temp_free_i32(m34
);
1840 gen_set_cc_nz_f64(s
, o
->in2
);
1844 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1846 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1849 return DISAS_NORETURN
;
1851 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1852 tcg_temp_free_i32(m34
);
1853 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1857 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1859 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1862 return DISAS_NORETURN
;
1864 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1865 tcg_temp_free_i32(m34
);
1866 gen_set_cc_nz_f32(s
, o
->in2
);
1870 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1872 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1875 return DISAS_NORETURN
;
1877 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1878 tcg_temp_free_i32(m34
);
1879 gen_set_cc_nz_f64(s
, o
->in2
);
1883 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1885 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1888 return DISAS_NORETURN
;
1890 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1891 tcg_temp_free_i32(m34
);
1892 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1896 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1898 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1901 return DISAS_NORETURN
;
1903 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1904 tcg_temp_free_i32(m34
);
1905 gen_set_cc_nz_f32(s
, o
->in2
);
1909 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1911 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1914 return DISAS_NORETURN
;
1916 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1917 tcg_temp_free_i32(m34
);
1918 gen_set_cc_nz_f64(s
, o
->in2
);
1922 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1924 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1927 return DISAS_NORETURN
;
1929 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1930 tcg_temp_free_i32(m34
);
1931 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1935 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1937 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1940 return DISAS_NORETURN
;
1942 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1943 tcg_temp_free_i32(m34
);
1944 gen_set_cc_nz_f32(s
, o
->in2
);
1948 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1950 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1953 return DISAS_NORETURN
;
1955 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1956 tcg_temp_free_i32(m34
);
1957 gen_set_cc_nz_f64(s
, o
->in2
);
1961 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1963 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1966 return DISAS_NORETURN
;
1968 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
1969 tcg_temp_free_i32(m34
);
1970 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1974 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1976 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1979 return DISAS_NORETURN
;
1981 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1982 tcg_temp_free_i32(m34
);
1986 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1988 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1991 return DISAS_NORETURN
;
1993 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
1994 tcg_temp_free_i32(m34
);
1998 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
2000 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2003 return DISAS_NORETURN
;
2005 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m34
);
2006 tcg_temp_free_i32(m34
);
2007 return_low128(o
->out2
);
2011 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
2013 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2016 return DISAS_NORETURN
;
2018 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
2019 tcg_temp_free_i32(m34
);
2023 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
2025 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2028 return DISAS_NORETURN
;
2030 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2031 tcg_temp_free_i32(m34
);
2035 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
2037 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
2040 return DISAS_NORETURN
;
2042 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m34
);
2043 tcg_temp_free_i32(m34
);
2044 return_low128(o
->out2
);
2048 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
2050 int r2
= get_field(s
->fields
, r2
);
2051 TCGv_i64 len
= tcg_temp_new_i64();
2053 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2055 return_low128(o
->out
);
2057 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2058 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2059 tcg_temp_free_i64(len
);
2064 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2066 int l
= get_field(s
->fields
, l1
);
2071 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2072 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2075 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2076 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2079 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2080 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2083 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2084 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2087 vl
= tcg_const_i32(l
);
2088 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2089 tcg_temp_free_i32(vl
);
2093 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2097 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2099 int r1
= get_field(s
->fields
, r1
);
2100 int r2
= get_field(s
->fields
, r2
);
2103 /* r1 and r2 must be even. */
2104 if (r1
& 1 || r2
& 1) {
2105 gen_program_exception(s
, PGM_SPECIFICATION
);
2106 return DISAS_NORETURN
;
2109 t1
= tcg_const_i32(r1
);
2110 t2
= tcg_const_i32(r2
);
2111 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2112 tcg_temp_free_i32(t1
);
2113 tcg_temp_free_i32(t2
);
2118 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2120 int r1
= get_field(s
->fields
, r1
);
2121 int r3
= get_field(s
->fields
, r3
);
2124 /* r1 and r3 must be even. */
2125 if (r1
& 1 || r3
& 1) {
2126 gen_program_exception(s
, PGM_SPECIFICATION
);
2127 return DISAS_NORETURN
;
2130 t1
= tcg_const_i32(r1
);
2131 t3
= tcg_const_i32(r3
);
2132 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2133 tcg_temp_free_i32(t1
);
2134 tcg_temp_free_i32(t3
);
2139 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2141 int r1
= get_field(s
->fields
, r1
);
2142 int r3
= get_field(s
->fields
, r3
);
2145 /* r1 and r3 must be even. */
2146 if (r1
& 1 || r3
& 1) {
2147 gen_program_exception(s
, PGM_SPECIFICATION
);
2148 return DISAS_NORETURN
;
2151 t1
= tcg_const_i32(r1
);
2152 t3
= tcg_const_i32(r3
);
2153 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2154 tcg_temp_free_i32(t1
);
2155 tcg_temp_free_i32(t3
);
2160 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2162 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2163 TCGv_i32 t1
= tcg_temp_new_i32();
2164 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2165 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2167 tcg_temp_free_i32(t1
);
2168 tcg_temp_free_i32(m3
);
2172 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2174 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2176 return_low128(o
->in2
);
2180 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2182 TCGv_i64 t
= tcg_temp_new_i64();
2183 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2184 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2185 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2186 tcg_temp_free_i64(t
);
2190 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2192 int d2
= get_field(s
->fields
, d2
);
2193 int b2
= get_field(s
->fields
, b2
);
2196 /* Note that in1 = R3 (new value) and
2197 in2 = (zero-extended) R1 (expected value). */
2199 addr
= get_address(s
, 0, b2
, d2
);
2200 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2201 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2202 tcg_temp_free_i64(addr
);
2204 /* Are the memory and expected values (un)equal? Note that this setcond
2205 produces the output CC value, thus the NE sense of the test. */
2206 cc
= tcg_temp_new_i64();
2207 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2208 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2209 tcg_temp_free_i64(cc
);
2215 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2217 int r1
= get_field(s
->fields
, r1
);
2218 int r3
= get_field(s
->fields
, r3
);
2219 int d2
= get_field(s
->fields
, d2
);
2220 int b2
= get_field(s
->fields
, b2
);
2221 DisasJumpType ret
= DISAS_NEXT
;
2223 TCGv_i32 t_r1
, t_r3
;
2225 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2226 addr
= get_address(s
, 0, b2
, d2
);
2227 t_r1
= tcg_const_i32(r1
);
2228 t_r3
= tcg_const_i32(r3
);
2229 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
2230 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2231 } else if (HAVE_CMPXCHG128
) {
2232 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2234 gen_helper_exit_atomic(cpu_env
);
2235 ret
= DISAS_NORETURN
;
2237 tcg_temp_free_i64(addr
);
2238 tcg_temp_free_i32(t_r1
);
2239 tcg_temp_free_i32(t_r3
);
2245 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2247 int r3
= get_field(s
->fields
, r3
);
2248 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2250 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2251 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2253 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2255 tcg_temp_free_i32(t_r3
);
2261 #ifndef CONFIG_USER_ONLY
2262 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2264 TCGMemOp mop
= s
->insn
->data
;
2265 TCGv_i64 addr
, old
, cc
;
2266 TCGLabel
*lab
= gen_new_label();
2268 /* Note that in1 = R1 (zero-extended expected value),
2269 out = R1 (original reg), out2 = R1+1 (new value). */
2271 addr
= tcg_temp_new_i64();
2272 old
= tcg_temp_new_i64();
2273 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2274 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2275 get_mem_index(s
), mop
| MO_ALIGN
);
2276 tcg_temp_free_i64(addr
);
2278 /* Are the memory and expected values (un)equal? */
2279 cc
= tcg_temp_new_i64();
2280 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2281 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2283 /* Write back the output now, so that it happens before the
2284 following branch, so that we don't need local temps. */
2285 if ((mop
& MO_SIZE
) == MO_32
) {
2286 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2288 tcg_gen_mov_i64(o
->out
, old
);
2290 tcg_temp_free_i64(old
);
2292 /* If the comparison was equal, and the LSB of R2 was set,
2293 then we need to flush the TLB (for all cpus). */
2294 tcg_gen_xori_i64(cc
, cc
, 1);
2295 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2296 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2297 tcg_temp_free_i64(cc
);
2299 gen_helper_purge(cpu_env
);
2306 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2308 TCGv_i64 t1
= tcg_temp_new_i64();
2309 TCGv_i32 t2
= tcg_temp_new_i32();
2310 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2311 gen_helper_cvd(t1
, t2
);
2312 tcg_temp_free_i32(t2
);
2313 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2314 tcg_temp_free_i64(t1
);
2318 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2320 int m3
= get_field(s
->fields
, m3
);
2321 TCGLabel
*lab
= gen_new_label();
2324 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2325 if (s
->insn
->data
) {
2326 c
= tcg_unsigned_cond(c
);
2328 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2337 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2339 int m3
= get_field(s
->fields
, m3
);
2340 int r1
= get_field(s
->fields
, r1
);
2341 int r2
= get_field(s
->fields
, r2
);
2342 TCGv_i32 tr1
, tr2
, chk
;
2344 /* R1 and R2 must both be even. */
2345 if ((r1
| r2
) & 1) {
2346 gen_program_exception(s
, PGM_SPECIFICATION
);
2347 return DISAS_NORETURN
;
2349 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2353 tr1
= tcg_const_i32(r1
);
2354 tr2
= tcg_const_i32(r2
);
2355 chk
= tcg_const_i32(m3
);
2357 switch (s
->insn
->data
) {
2359 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2362 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2365 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2368 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2371 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2374 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2377 g_assert_not_reached();
2380 tcg_temp_free_i32(tr1
);
2381 tcg_temp_free_i32(tr2
);
2382 tcg_temp_free_i32(chk
);
2387 #ifndef CONFIG_USER_ONLY
2388 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2390 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2391 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2392 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2394 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2396 tcg_temp_free_i32(func_code
);
2397 tcg_temp_free_i32(r3
);
2398 tcg_temp_free_i32(r1
);
2403 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2405 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2406 return_low128(o
->out
);
2410 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2412 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2413 return_low128(o
->out
);
2417 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2419 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2420 return_low128(o
->out
);
2424 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2426 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2427 return_low128(o
->out
);
2431 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2433 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2437 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2439 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2443 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2445 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2446 return_low128(o
->out2
);
2450 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2452 int r2
= get_field(s
->fields
, r2
);
2453 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2457 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2459 /* No cache information provided. */
2460 tcg_gen_movi_i64(o
->out
, -1);
2464 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2466 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2470 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2472 int r1
= get_field(s
->fields
, r1
);
2473 int r2
= get_field(s
->fields
, r2
);
2474 TCGv_i64 t
= tcg_temp_new_i64();
2476 /* Note the "subsequently" in the PoO, which implies a defined result
2477 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2478 tcg_gen_shri_i64(t
, psw_mask
, 32);
2479 store_reg32_i64(r1
, t
);
2481 store_reg32_i64(r2
, psw_mask
);
2484 tcg_temp_free_i64(t
);
2488 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2490 int r1
= get_field(s
->fields
, r1
);
2494 /* Nested EXECUTE is not allowed. */
2495 if (unlikely(s
->ex_value
)) {
2496 gen_program_exception(s
, PGM_EXECUTE
);
2497 return DISAS_NORETURN
;
2504 v1
= tcg_const_i64(0);
2509 ilen
= tcg_const_i32(s
->ilen
);
2510 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2511 tcg_temp_free_i32(ilen
);
2514 tcg_temp_free_i64(v1
);
2517 return DISAS_PC_CC_UPDATED
;
2520 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2522 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2525 return DISAS_NORETURN
;
2527 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2528 tcg_temp_free_i32(m34
);
2532 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2534 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2537 return DISAS_NORETURN
;
2539 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2540 tcg_temp_free_i32(m34
);
2544 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2546 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2549 return DISAS_NORETURN
;
2551 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2552 return_low128(o
->out2
);
2553 tcg_temp_free_i32(m34
);
2557 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2559 /* We'll use the original input for cc computation, since we get to
2560 compare that against 0, which ought to be better than comparing
2561 the real output against 64. It also lets cc_dst be a convenient
2562 temporary during our computation. */
2563 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2565 /* R1 = IN ? CLZ(IN) : 64. */
2566 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2568 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2569 value by 64, which is undefined. But since the shift is 64 iff the
2570 input is zero, we still get the correct result after and'ing. */
2571 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2572 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2573 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2577 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2579 int m3
= get_field(s
->fields
, m3
);
2580 int pos
, len
, base
= s
->insn
->data
;
2581 TCGv_i64 tmp
= tcg_temp_new_i64();
2586 /* Effectively a 32-bit load. */
2587 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2594 /* Effectively a 16-bit load. */
2595 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2603 /* Effectively an 8-bit load. */
2604 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2609 pos
= base
+ ctz32(m3
) * 8;
2610 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2611 ccm
= ((1ull << len
) - 1) << pos
;
2615 /* This is going to be a sequence of loads and inserts. */
2616 pos
= base
+ 32 - 8;
2620 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2621 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2622 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2625 m3
= (m3
<< 1) & 0xf;
2631 tcg_gen_movi_i64(tmp
, ccm
);
2632 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2633 tcg_temp_free_i64(tmp
);
2637 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2639 int shift
= s
->insn
->data
& 0xff;
2640 int size
= s
->insn
->data
>> 8;
2641 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2645 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2650 t1
= tcg_temp_new_i64();
2651 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2652 t2
= tcg_temp_new_i64();
2653 tcg_gen_extu_i32_i64(t2
, cc_op
);
2654 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2655 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2656 tcg_temp_free_i64(t1
);
2657 tcg_temp_free_i64(t2
);
2661 #ifndef CONFIG_USER_ONLY
2662 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2666 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2667 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2669 m4
= tcg_const_i32(0);
2671 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2672 tcg_temp_free_i32(m4
);
2676 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2680 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2681 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2683 m4
= tcg_const_i32(0);
2685 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2686 tcg_temp_free_i32(m4
);
2690 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2692 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2697 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2699 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2700 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2701 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2702 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2704 switch (s
->insn
->data
) {
2705 case S390_FEAT_TYPE_KMCTR
:
2706 if (r3
& 1 || !r3
) {
2707 gen_program_exception(s
, PGM_SPECIFICATION
);
2708 return DISAS_NORETURN
;
2711 case S390_FEAT_TYPE_PPNO
:
2712 case S390_FEAT_TYPE_KMF
:
2713 case S390_FEAT_TYPE_KMC
:
2714 case S390_FEAT_TYPE_KMO
:
2715 case S390_FEAT_TYPE_KM
:
2716 if (r1
& 1 || !r1
) {
2717 gen_program_exception(s
, PGM_SPECIFICATION
);
2718 return DISAS_NORETURN
;
2721 case S390_FEAT_TYPE_KMAC
:
2722 case S390_FEAT_TYPE_KIMD
:
2723 case S390_FEAT_TYPE_KLMD
:
2724 if (r2
& 1 || !r2
) {
2725 gen_program_exception(s
, PGM_SPECIFICATION
);
2726 return DISAS_NORETURN
;
2729 case S390_FEAT_TYPE_PCKMO
:
2730 case S390_FEAT_TYPE_PCC
:
2733 g_assert_not_reached();
2736 t_r1
= tcg_const_i32(r1
);
2737 t_r2
= tcg_const_i32(r2
);
2738 t_r3
= tcg_const_i32(r3
);
2739 type
= tcg_const_i32(s
->insn
->data
);
2740 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2742 tcg_temp_free_i32(t_r1
);
2743 tcg_temp_free_i32(t_r2
);
2744 tcg_temp_free_i32(t_r3
);
2745 tcg_temp_free_i32(type
);
2749 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2751 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2756 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2758 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2763 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2765 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2770 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2772 /* The real output is indeed the original value in memory;
2773 recompute the addition for the computation of CC. */
2774 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2775 s
->insn
->data
| MO_ALIGN
);
2776 /* However, we need to recompute the addition for setting CC. */
2777 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2781 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2783 /* The real output is indeed the original value in memory;
2784 recompute the addition for the computation of CC. */
2785 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2786 s
->insn
->data
| MO_ALIGN
);
2787 /* However, we need to recompute the operation for setting CC. */
2788 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2792 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2794 /* The real output is indeed the original value in memory;
2795 recompute the addition for the computation of CC. */
2796 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2797 s
->insn
->data
| MO_ALIGN
);
2798 /* However, we need to recompute the operation for setting CC. */
2799 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2803 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2805 /* The real output is indeed the original value in memory;
2806 recompute the addition for the computation of CC. */
2807 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2808 s
->insn
->data
| MO_ALIGN
);
2809 /* However, we need to recompute the operation for setting CC. */
2810 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2814 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2816 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2820 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2822 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2825 return DISAS_NORETURN
;
2827 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2828 tcg_temp_free_i32(m34
);
2832 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2834 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2837 return DISAS_NORETURN
;
2839 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2840 tcg_temp_free_i32(m34
);
2844 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2846 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2849 return DISAS_NORETURN
;
2851 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m34
);
2852 tcg_temp_free_i32(m34
);
2856 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2858 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2859 return_low128(o
->out2
);
2863 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2865 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2866 return_low128(o
->out2
);
2870 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2872 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2876 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2878 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2882 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2884 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2888 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2890 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2894 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2896 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2900 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2902 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2906 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2908 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2912 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2914 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2918 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2920 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2924 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2926 TCGLabel
*lab
= gen_new_label();
2927 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2928 /* The value is stored even in case of trap. */
2929 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2935 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2937 TCGLabel
*lab
= gen_new_label();
2938 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2939 /* The value is stored even in case of trap. */
2940 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2946 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2948 TCGLabel
*lab
= gen_new_label();
2949 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2950 /* The value is stored even in case of trap. */
2951 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2957 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2959 TCGLabel
*lab
= gen_new_label();
2960 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2961 /* The value is stored even in case of trap. */
2962 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2968 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2970 TCGLabel
*lab
= gen_new_label();
2971 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2972 /* The value is stored even in case of trap. */
2973 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2979 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2983 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2986 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2990 TCGv_i32 t32
= tcg_temp_new_i32();
2993 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2996 t
= tcg_temp_new_i64();
2997 tcg_gen_extu_i32_i64(t
, t32
);
2998 tcg_temp_free_i32(t32
);
3000 z
= tcg_const_i64(0);
3001 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
3002 tcg_temp_free_i64(t
);
3003 tcg_temp_free_i64(z
);
3009 #ifndef CONFIG_USER_ONLY
3010 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
3012 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3013 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3014 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
3015 tcg_temp_free_i32(r1
);
3016 tcg_temp_free_i32(r3
);
3017 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3018 return DISAS_PC_STALE_NOCHAIN
;
3021 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
3023 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3024 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3025 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
3026 tcg_temp_free_i32(r1
);
3027 tcg_temp_free_i32(r3
);
3028 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3029 return DISAS_PC_STALE_NOCHAIN
;
3032 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
3034 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
3039 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
3041 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
3045 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3049 per_breaking_event(s
);
3051 t1
= tcg_temp_new_i64();
3052 t2
= tcg_temp_new_i64();
3053 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3054 MO_TEUL
| MO_ALIGN_8
);
3055 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3056 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3057 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3058 tcg_gen_shli_i64(t1
, t1
, 32);
3059 gen_helper_load_psw(cpu_env
, t1
, t2
);
3060 tcg_temp_free_i64(t1
);
3061 tcg_temp_free_i64(t2
);
3062 return DISAS_NORETURN
;
3065 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
3069 per_breaking_event(s
);
3071 t1
= tcg_temp_new_i64();
3072 t2
= tcg_temp_new_i64();
3073 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
3074 MO_TEQ
| MO_ALIGN_8
);
3075 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3076 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
3077 gen_helper_load_psw(cpu_env
, t1
, t2
);
3078 tcg_temp_free_i64(t1
);
3079 tcg_temp_free_i64(t2
);
3080 return DISAS_NORETURN
;
3084 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
3086 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3087 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3088 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3089 tcg_temp_free_i32(r1
);
3090 tcg_temp_free_i32(r3
);
3094 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
3096 int r1
= get_field(s
->fields
, r1
);
3097 int r3
= get_field(s
->fields
, r3
);
3100 /* Only one register to read. */
3101 t1
= tcg_temp_new_i64();
3102 if (unlikely(r1
== r3
)) {
3103 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3104 store_reg32_i64(r1
, t1
);
3109 /* First load the values of the first and last registers to trigger
3110 possible page faults. */
3111 t2
= tcg_temp_new_i64();
3112 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3113 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3114 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3115 store_reg32_i64(r1
, t1
);
3116 store_reg32_i64(r3
, t2
);
3118 /* Only two registers to read. */
3119 if (((r1
+ 1) & 15) == r3
) {
3125 /* Then load the remaining registers. Page fault can't occur. */
3127 tcg_gen_movi_i64(t2
, 4);
3130 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3131 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3132 store_reg32_i64(r1
, t1
);
3140 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3142 int r1
= get_field(s
->fields
, r1
);
3143 int r3
= get_field(s
->fields
, r3
);
3146 /* Only one register to read. */
3147 t1
= tcg_temp_new_i64();
3148 if (unlikely(r1
== r3
)) {
3149 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3150 store_reg32h_i64(r1
, t1
);
3155 /* First load the values of the first and last registers to trigger
3156 possible page faults. */
3157 t2
= tcg_temp_new_i64();
3158 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3159 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3160 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
3161 store_reg32h_i64(r1
, t1
);
3162 store_reg32h_i64(r3
, t2
);
3164 /* Only two registers to read. */
3165 if (((r1
+ 1) & 15) == r3
) {
3171 /* Then load the remaining registers. Page fault can't occur. */
3173 tcg_gen_movi_i64(t2
, 4);
3176 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3177 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3178 store_reg32h_i64(r1
, t1
);
3186 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3188 int r1
= get_field(s
->fields
, r1
);
3189 int r3
= get_field(s
->fields
, r3
);
3192 /* Only one register to read. */
3193 if (unlikely(r1
== r3
)) {
3194 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3198 /* First load the values of the first and last registers to trigger
3199 possible page faults. */
3200 t1
= tcg_temp_new_i64();
3201 t2
= tcg_temp_new_i64();
3202 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
3203 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3204 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
3205 tcg_gen_mov_i64(regs
[r1
], t1
);
3208 /* Only two registers to read. */
3209 if (((r1
+ 1) & 15) == r3
) {
3214 /* Then load the remaining registers. Page fault can't occur. */
3216 tcg_gen_movi_i64(t1
, 8);
3219 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3220 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3227 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3230 TCGMemOp mop
= s
->insn
->data
;
3232 /* In a parallel context, stop the world and single step. */
3233 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3236 gen_exception(EXCP_ATOMIC
);
3237 return DISAS_NORETURN
;
3240 /* In a serial context, perform the two loads ... */
3241 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
3242 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3243 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3244 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3245 tcg_temp_free_i64(a1
);
3246 tcg_temp_free_i64(a2
);
3248 /* ... and indicate that we performed them while interlocked. */
3249 gen_op_movi_cc(s
, 0);
3253 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3255 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3256 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3257 } else if (HAVE_ATOMIC128
) {
3258 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3260 gen_helper_exit_atomic(cpu_env
);
3261 return DISAS_NORETURN
;
3263 return_low128(o
->out2
);
3267 #ifndef CONFIG_USER_ONLY
3268 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3270 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3274 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3276 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3281 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3283 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3287 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3289 const int64_t block_size
= (1ull << (get_field(s
->fields
, m3
) + 6));
3291 if (get_field(s
->fields
, m3
) > 6) {
3292 gen_program_exception(s
, PGM_SPECIFICATION
);
3293 return DISAS_NORETURN
;
3296 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3297 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3298 tcg_gen_movi_i64(o
->out
, 16);
3299 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3300 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3304 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3307 o
->g_out
= o
->g_in2
;
3313 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3315 int b2
= get_field(s
->fields
, b2
);
3316 TCGv ar1
= tcg_temp_new_i64();
3319 o
->g_out
= o
->g_in2
;
3323 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3324 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3325 tcg_gen_movi_i64(ar1
, 0);
3327 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3328 tcg_gen_movi_i64(ar1
, 1);
3330 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3332 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3334 tcg_gen_movi_i64(ar1
, 0);
3337 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3338 tcg_gen_movi_i64(ar1
, 2);
3342 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3343 tcg_temp_free_i64(ar1
);
3348 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3352 o
->g_out
= o
->g_in1
;
3353 o
->g_out2
= o
->g_in2
;
3356 o
->g_in1
= o
->g_in2
= false;
3360 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3362 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3363 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3364 tcg_temp_free_i32(l
);
3368 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3370 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3371 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3372 tcg_temp_free_i32(l
);
3376 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3378 int r1
= get_field(s
->fields
, r1
);
3379 int r2
= get_field(s
->fields
, r2
);
3382 /* r1 and r2 must be even. */
3383 if (r1
& 1 || r2
& 1) {
3384 gen_program_exception(s
, PGM_SPECIFICATION
);
3385 return DISAS_NORETURN
;
3388 t1
= tcg_const_i32(r1
);
3389 t2
= tcg_const_i32(r2
);
3390 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3391 tcg_temp_free_i32(t1
);
3392 tcg_temp_free_i32(t2
);
3397 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3399 int r1
= get_field(s
->fields
, r1
);
3400 int r3
= get_field(s
->fields
, r3
);
3403 /* r1 and r3 must be even. */
3404 if (r1
& 1 || r3
& 1) {
3405 gen_program_exception(s
, PGM_SPECIFICATION
);
3406 return DISAS_NORETURN
;
3409 t1
= tcg_const_i32(r1
);
3410 t3
= tcg_const_i32(r3
);
3411 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3412 tcg_temp_free_i32(t1
);
3413 tcg_temp_free_i32(t3
);
3418 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3420 int r1
= get_field(s
->fields
, r1
);
3421 int r3
= get_field(s
->fields
, r3
);
3424 /* r1 and r3 must be even. */
3425 if (r1
& 1 || r3
& 1) {
3426 gen_program_exception(s
, PGM_SPECIFICATION
);
3427 return DISAS_NORETURN
;
3430 t1
= tcg_const_i32(r1
);
3431 t3
= tcg_const_i32(r3
);
3432 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3433 tcg_temp_free_i32(t1
);
3434 tcg_temp_free_i32(t3
);
3439 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3441 int r3
= get_field(s
->fields
, r3
);
3442 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3447 #ifndef CONFIG_USER_ONLY
3448 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3450 int r1
= get_field(s
->fields
, l1
);
3451 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3456 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3458 int r1
= get_field(s
->fields
, l1
);
3459 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3465 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3467 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3468 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3469 tcg_temp_free_i32(l
);
3473 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3475 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3476 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3477 tcg_temp_free_i32(l
);
3481 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3483 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3488 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3490 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3492 return_low128(o
->in2
);
3496 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3498 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3499 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3500 tcg_temp_free_i32(l
);
3504 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3506 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3510 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3512 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3516 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3518 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3522 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3524 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3528 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3530 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3534 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3536 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3537 return_low128(o
->out2
);
3541 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3543 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3544 return_low128(o
->out2
);
3548 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3550 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3551 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3552 tcg_temp_free_i64(r3
);
3556 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3558 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3559 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3560 tcg_temp_free_i64(r3
);
3564 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3566 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3567 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3568 tcg_temp_free_i64(r3
);
3572 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3574 TCGv_i64 r3
= load_freg(get_field(s
->fields
, r3
));
3575 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3576 tcg_temp_free_i64(r3
);
3580 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3583 z
= tcg_const_i64(0);
3584 n
= tcg_temp_new_i64();
3585 tcg_gen_neg_i64(n
, o
->in2
);
3586 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3587 tcg_temp_free_i64(n
);
3588 tcg_temp_free_i64(z
);
3592 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3594 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3598 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3600 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3604 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3606 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3607 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3611 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3613 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3614 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3615 tcg_temp_free_i32(l
);
3620 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3622 tcg_gen_neg_i64(o
->out
, o
->in2
);
3626 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3628 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3632 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3634 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3638 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3640 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3641 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3645 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3647 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3648 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3649 tcg_temp_free_i32(l
);
3654 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3656 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3660 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3662 int shift
= s
->insn
->data
& 0xff;
3663 int size
= s
->insn
->data
>> 8;
3664 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3667 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3668 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3670 /* Produce the CC from only the bits manipulated. */
3671 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3672 set_cc_nz_u64(s
, cc_dst
);
3676 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3678 o
->in1
= tcg_temp_new_i64();
3680 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3681 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3683 /* Perform the atomic operation in memory. */
3684 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3688 /* Recompute also for atomic case: needed for setting CC. */
3689 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3691 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3692 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3697 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3699 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3700 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3701 tcg_temp_free_i32(l
);
3705 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3707 int l2
= get_field(s
->fields
, l2
) + 1;
3710 /* The length must not exceed 32 bytes. */
3712 gen_program_exception(s
, PGM_SPECIFICATION
);
3713 return DISAS_NORETURN
;
3715 l
= tcg_const_i32(l2
);
3716 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3717 tcg_temp_free_i32(l
);
3721 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3723 int l2
= get_field(s
->fields
, l2
) + 1;
3726 /* The length must be even and should not exceed 64 bytes. */
3727 if ((l2
& 1) || (l2
> 64)) {
3728 gen_program_exception(s
, PGM_SPECIFICATION
);
3729 return DISAS_NORETURN
;
3731 l
= tcg_const_i32(l2
);
3732 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3733 tcg_temp_free_i32(l
);
3737 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3739 gen_helper_popcnt(o
->out
, o
->in2
);
3743 #ifndef CONFIG_USER_ONLY
3744 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3746 gen_helper_ptlb(cpu_env
);
3751 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3753 int i3
= get_field(s
->fields
, i3
);
3754 int i4
= get_field(s
->fields
, i4
);
3755 int i5
= get_field(s
->fields
, i5
);
3756 int do_zero
= i4
& 0x80;
3757 uint64_t mask
, imask
, pmask
;
3760 /* Adjust the arguments for the specific insn. */
3761 switch (s
->fields
->op2
) {
3762 case 0x55: /* risbg */
3763 case 0x59: /* risbgn */
3768 case 0x5d: /* risbhg */
3771 pmask
= 0xffffffff00000000ull
;
3773 case 0x51: /* risblg */
3776 pmask
= 0x00000000ffffffffull
;
3779 g_assert_not_reached();
3782 /* MASK is the set of bits to be inserted from R2.
3783 Take care for I3/I4 wraparound. */
3786 mask
^= pmask
>> i4
>> 1;
3788 mask
|= ~(pmask
>> i4
>> 1);
3792 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3793 insns, we need to keep the other half of the register. */
3794 imask
= ~mask
| ~pmask
;
3802 if (s
->fields
->op2
== 0x5d) {
3806 /* In some cases we can implement this with extract. */
3807 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3808 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3812 /* In some cases we can implement this with deposit. */
3813 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3814 /* Note that we rotate the bits to be inserted to the lsb, not to
3815 the position as described in the PoO. */
3816 rot
= (rot
- pos
) & 63;
3821 /* Rotate the input as necessary. */
3822 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3824 /* Insert the selected bits into the output. */
3827 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3829 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3831 } else if (imask
== 0) {
3832 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3834 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3835 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3836 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3841 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3843 int i3
= get_field(s
->fields
, i3
);
3844 int i4
= get_field(s
->fields
, i4
);
3845 int i5
= get_field(s
->fields
, i5
);
3848 /* If this is a test-only form, arrange to discard the result. */
3850 o
->out
= tcg_temp_new_i64();
3858 /* MASK is the set of bits to be operated on from R2.
3859 Take care for I3/I4 wraparound. */
3862 mask
^= ~0ull >> i4
>> 1;
3864 mask
|= ~(~0ull >> i4
>> 1);
3867 /* Rotate the input as necessary. */
3868 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3871 switch (s
->fields
->op2
) {
3872 case 0x55: /* AND */
3873 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3874 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3877 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3878 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3880 case 0x57: /* XOR */
3881 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3882 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3889 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3890 set_cc_nz_u64(s
, cc_dst
);
3894 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3896 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3900 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3902 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3906 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3908 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3912 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3914 TCGv_i32 t1
= tcg_temp_new_i32();
3915 TCGv_i32 t2
= tcg_temp_new_i32();
3916 TCGv_i32 to
= tcg_temp_new_i32();
3917 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3918 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3919 tcg_gen_rotl_i32(to
, t1
, t2
);
3920 tcg_gen_extu_i32_i64(o
->out
, to
);
3921 tcg_temp_free_i32(t1
);
3922 tcg_temp_free_i32(t2
);
3923 tcg_temp_free_i32(to
);
3927 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3929 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3933 #ifndef CONFIG_USER_ONLY
3934 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3936 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3941 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3943 gen_helper_sacf(cpu_env
, o
->in2
);
3944 /* Addressing mode has changed, so end the block. */
3945 return DISAS_PC_STALE
;
3949 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3951 int sam
= s
->insn
->data
;
3967 /* Bizarre but true, we check the address of the current insn for the
3968 specification exception, not the next to be executed. Thus the PoO
3969 documents that Bad Things Happen two bytes before the end. */
3970 if (s
->base
.pc_next
& ~mask
) {
3971 gen_program_exception(s
, PGM_SPECIFICATION
);
3972 return DISAS_NORETURN
;
3976 tsam
= tcg_const_i64(sam
);
3977 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3978 tcg_temp_free_i64(tsam
);
3980 /* Always exit the TB, since we (may have) changed execution mode. */
3981 return DISAS_PC_STALE
;
3984 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3986 int r1
= get_field(s
->fields
, r1
);
3987 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3991 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3993 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3997 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3999 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4003 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
4005 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
4006 return_low128(o
->out2
);
4010 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
4012 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
4016 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
4018 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
4022 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
4024 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
4025 return_low128(o
->out2
);
4029 #ifndef CONFIG_USER_ONLY
4030 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
4032 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
4037 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
4039 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4040 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4041 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
4043 tcg_temp_free_i32(r1
);
4044 tcg_temp_free_i32(r3
);
4049 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
4056 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
4058 /* We want to store when the condition is fulfilled, so branch
4059 out when it's not */
4060 c
.cond
= tcg_invert_cond(c
.cond
);
4062 lab
= gen_new_label();
4064 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
4066 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
4070 r1
= get_field(s
->fields
, r1
);
4071 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
4072 switch (s
->insn
->data
) {
4074 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
4077 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
4079 case 2: /* STOCFH */
4080 h
= tcg_temp_new_i64();
4081 tcg_gen_shri_i64(h
, regs
[r1
], 32);
4082 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
4083 tcg_temp_free_i64(h
);
4086 g_assert_not_reached();
4088 tcg_temp_free_i64(a
);
4094 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
4096 uint64_t sign
= 1ull << s
->insn
->data
;
4097 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
4098 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
4099 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4100 /* The arithmetic left shift is curious in that it does not affect
4101 the sign bit. Copy that over from the source unchanged. */
4102 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
4103 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
4104 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
4108 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
4110 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4114 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4116 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4120 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4122 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4126 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4128 gen_helper_sfpc(cpu_env
, o
->in2
);
4132 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4134 gen_helper_sfas(cpu_env
, o
->in2
);
4138 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4140 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4141 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4142 gen_helper_srnm(cpu_env
, o
->addr1
);
4146 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4148 /* Bits 0-55 are are ignored. */
4149 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4150 gen_helper_srnm(cpu_env
, o
->addr1
);
4154 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4156 TCGv_i64 tmp
= tcg_temp_new_i64();
4158 /* Bits other than 61-63 are ignored. */
4159 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4161 /* No need to call a helper, we don't implement dfp */
4162 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4163 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4164 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4166 tcg_temp_free_i64(tmp
);
4170 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4172 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4173 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4176 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4177 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4181 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4183 int b1
= get_field(s
->fields
, b1
);
4184 int d1
= get_field(s
->fields
, d1
);
4185 int b2
= get_field(s
->fields
, b2
);
4186 int d2
= get_field(s
->fields
, d2
);
4187 int r3
= get_field(s
->fields
, r3
);
4188 TCGv_i64 tmp
= tcg_temp_new_i64();
4190 /* fetch all operands first */
4191 o
->in1
= tcg_temp_new_i64();
4192 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4193 o
->in2
= tcg_temp_new_i64();
4194 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4195 o
->addr1
= get_address(s
, 0, r3
, 0);
4197 /* load the third operand into r3 before modifying anything */
4198 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
4200 /* subtract CPU timer from first operand and store in GR0 */
4201 gen_helper_stpt(tmp
, cpu_env
);
4202 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4204 /* store second operand in GR1 */
4205 tcg_gen_mov_i64(regs
[1], o
->in2
);
4207 tcg_temp_free_i64(tmp
);
4211 #ifndef CONFIG_USER_ONLY
4212 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4214 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4215 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4219 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4221 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4225 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4227 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4228 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4229 return DISAS_PC_STALE_NOCHAIN
;
4232 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4234 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4239 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4241 gen_helper_stck(o
->out
, cpu_env
);
4242 /* ??? We don't implement clock states. */
4243 gen_op_movi_cc(s
, 0);
4247 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4249 TCGv_i64 c1
= tcg_temp_new_i64();
4250 TCGv_i64 c2
= tcg_temp_new_i64();
4251 TCGv_i64 todpr
= tcg_temp_new_i64();
4252 gen_helper_stck(c1
, cpu_env
);
4253 /* 16 bit value store in an uint32_t (only valid bits set) */
4254 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4255 /* Shift the 64-bit value into its place as a zero-extended
4256 104-bit value. Note that "bit positions 64-103 are always
4257 non-zero so that they compare differently to STCK"; we set
4258 the least significant bit to 1. */
4259 tcg_gen_shli_i64(c2
, c1
, 56);
4260 tcg_gen_shri_i64(c1
, c1
, 8);
4261 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4262 tcg_gen_or_i64(c2
, c2
, todpr
);
4263 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4264 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4265 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4266 tcg_temp_free_i64(c1
);
4267 tcg_temp_free_i64(c2
);
4268 tcg_temp_free_i64(todpr
);
4269 /* ??? We don't implement clock states. */
4270 gen_op_movi_cc(s
, 0);
4274 #ifndef CONFIG_USER_ONLY
4275 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4277 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4278 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4283 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4285 gen_helper_sckc(cpu_env
, o
->in2
);
4289 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4291 gen_helper_sckpf(cpu_env
, regs
[0]);
4295 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4297 gen_helper_stckc(o
->out
, cpu_env
);
4301 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4303 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4304 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4305 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4306 tcg_temp_free_i32(r1
);
4307 tcg_temp_free_i32(r3
);
4311 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4313 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4314 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4315 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4316 tcg_temp_free_i32(r1
);
4317 tcg_temp_free_i32(r3
);
4321 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4323 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4327 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4329 gen_helper_spt(cpu_env
, o
->in2
);
4333 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4335 gen_helper_stfl(cpu_env
);
4339 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4341 gen_helper_stpt(o
->out
, cpu_env
);
4345 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4347 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4352 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4354 gen_helper_spx(cpu_env
, o
->in2
);
4358 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4360 gen_helper_xsch(cpu_env
, regs
[1]);
4365 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4367 gen_helper_csch(cpu_env
, regs
[1]);
4372 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4374 gen_helper_hsch(cpu_env
, regs
[1]);
4379 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4381 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4386 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4388 gen_helper_rchp(cpu_env
, regs
[1]);
4393 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4395 gen_helper_rsch(cpu_env
, regs
[1]);
4400 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4402 gen_helper_sal(cpu_env
, regs
[1]);
4406 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4408 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4412 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4414 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4415 gen_op_movi_cc(s
, 3);
4419 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4421 /* The instruction is suppressed if not provided. */
4425 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4427 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4432 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4434 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4439 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4441 gen_helper_stcrw(cpu_env
, o
->in2
);
4446 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4448 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4453 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4455 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4460 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4462 gen_helper_chsc(cpu_env
, o
->in2
);
4467 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4469 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4470 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4474 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4476 uint64_t i2
= get_field(s
->fields
, i2
);
4479 /* It is important to do what the instruction name says: STORE THEN.
4480 If we let the output hook perform the store then if we fault and
4481 restart, we'll have the wrong SYSTEM MASK in place. */
4482 t
= tcg_temp_new_i64();
4483 tcg_gen_shri_i64(t
, psw_mask
, 56);
4484 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4485 tcg_temp_free_i64(t
);
4487 if (s
->fields
->op
== 0xac) {
4488 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4489 (i2
<< 56) | 0x00ffffffffffffffull
);
4491 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4494 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4495 return DISAS_PC_STALE_NOCHAIN
;
4498 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4500 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4504 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4506 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4511 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4513 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4518 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4520 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4524 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4526 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4530 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4532 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4536 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4538 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4542 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4544 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4545 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4546 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4547 tcg_temp_free_i32(r1
);
4548 tcg_temp_free_i32(r3
);
4552 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4554 int m3
= get_field(s
->fields
, m3
);
4555 int pos
, base
= s
->insn
->data
;
4556 TCGv_i64 tmp
= tcg_temp_new_i64();
4558 pos
= base
+ ctz32(m3
) * 8;
4561 /* Effectively a 32-bit store. */
4562 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4563 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4569 /* Effectively a 16-bit store. */
4570 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4571 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4578 /* Effectively an 8-bit store. */
4579 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4580 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4584 /* This is going to be a sequence of shifts and stores. */
4585 pos
= base
+ 32 - 8;
4588 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4589 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4590 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4592 m3
= (m3
<< 1) & 0xf;
4597 tcg_temp_free_i64(tmp
);
4601 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4603 int r1
= get_field(s
->fields
, r1
);
4604 int r3
= get_field(s
->fields
, r3
);
4605 int size
= s
->insn
->data
;
4606 TCGv_i64 tsize
= tcg_const_i64(size
);
4610 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4612 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4617 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4621 tcg_temp_free_i64(tsize
);
4625 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4627 int r1
= get_field(s
->fields
, r1
);
4628 int r3
= get_field(s
->fields
, r3
);
4629 TCGv_i64 t
= tcg_temp_new_i64();
4630 TCGv_i64 t4
= tcg_const_i64(4);
4631 TCGv_i64 t32
= tcg_const_i64(32);
4634 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4635 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4639 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4643 tcg_temp_free_i64(t
);
4644 tcg_temp_free_i64(t4
);
4645 tcg_temp_free_i64(t32
);
4649 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4651 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4652 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4653 } else if (HAVE_ATOMIC128
) {
4654 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4656 gen_helper_exit_atomic(cpu_env
);
4657 return DISAS_NORETURN
;
4662 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4664 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4665 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4667 gen_helper_srst(cpu_env
, r1
, r2
);
4669 tcg_temp_free_i32(r1
);
4670 tcg_temp_free_i32(r2
);
4675 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4677 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4678 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4680 gen_helper_srstu(cpu_env
, r1
, r2
);
4682 tcg_temp_free_i32(r1
);
4683 tcg_temp_free_i32(r2
);
4688 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4690 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4694 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4699 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4701 /* The !borrow flag is the msb of CC. Since we want the inverse of
4702 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4703 disas_jcc(s
, &cmp
, 8 | 4);
4704 borrow
= tcg_temp_new_i64();
4706 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4708 TCGv_i32 t
= tcg_temp_new_i32();
4709 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4710 tcg_gen_extu_i32_i64(borrow
, t
);
4711 tcg_temp_free_i32(t
);
4715 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4716 tcg_temp_free_i64(borrow
);
4720 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4727 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4728 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4729 tcg_temp_free_i32(t
);
4731 t
= tcg_const_i32(s
->ilen
);
4732 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4733 tcg_temp_free_i32(t
);
4735 gen_exception(EXCP_SVC
);
4736 return DISAS_NORETURN
;
4739 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4743 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4744 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4745 gen_op_movi_cc(s
, cc
);
4749 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4751 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4756 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4758 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4763 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4765 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4770 #ifndef CONFIG_USER_ONLY
4772 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4774 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4779 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4781 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4788 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4790 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4791 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4792 tcg_temp_free_i32(l1
);
4797 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4799 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4800 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4801 tcg_temp_free_i32(l
);
4806 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4808 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4809 return_low128(o
->out2
);
4814 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4816 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4817 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4818 tcg_temp_free_i32(l
);
4823 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4825 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4826 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4827 tcg_temp_free_i32(l
);
4832 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4834 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4835 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4836 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4837 TCGv_i32 tst
= tcg_temp_new_i32();
4838 int m3
= get_field(s
->fields
, m3
);
4840 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4844 tcg_gen_movi_i32(tst
, -1);
4846 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4847 if (s
->insn
->opc
& 3) {
4848 tcg_gen_ext8u_i32(tst
, tst
);
4850 tcg_gen_ext16u_i32(tst
, tst
);
4853 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4855 tcg_temp_free_i32(r1
);
4856 tcg_temp_free_i32(r2
);
4857 tcg_temp_free_i32(sizes
);
4858 tcg_temp_free_i32(tst
);
4863 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4865 TCGv_i32 t1
= tcg_const_i32(0xff);
4866 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4867 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4868 tcg_temp_free_i32(t1
);
4873 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4875 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4876 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4877 tcg_temp_free_i32(l
);
4881 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4883 int l1
= get_field(s
->fields
, l1
) + 1;
4886 /* The length must not exceed 32 bytes. */
4888 gen_program_exception(s
, PGM_SPECIFICATION
);
4889 return DISAS_NORETURN
;
4891 l
= tcg_const_i32(l1
);
4892 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4893 tcg_temp_free_i32(l
);
4898 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4900 int l1
= get_field(s
->fields
, l1
) + 1;
4903 /* The length must be even and should not exceed 64 bytes. */
4904 if ((l1
& 1) || (l1
> 64)) {
4905 gen_program_exception(s
, PGM_SPECIFICATION
);
4906 return DISAS_NORETURN
;
4908 l
= tcg_const_i32(l1
);
4909 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4910 tcg_temp_free_i32(l
);
4916 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4918 int d1
= get_field(s
->fields
, d1
);
4919 int d2
= get_field(s
->fields
, d2
);
4920 int b1
= get_field(s
->fields
, b1
);
4921 int b2
= get_field(s
->fields
, b2
);
4922 int l
= get_field(s
->fields
, l1
);
4925 o
->addr1
= get_address(s
, 0, b1
, d1
);
4927 /* If the addresses are identical, this is a store/memset of zero. */
4928 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4929 o
->in2
= tcg_const_i64(0);
4933 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4936 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4940 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4943 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4947 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4950 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4954 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4956 gen_op_movi_cc(s
, 0);
4960 /* But in general we'll defer to a helper. */
4961 o
->in2
= get_address(s
, 0, b2
, d2
);
4962 t32
= tcg_const_i32(l
);
4963 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4964 tcg_temp_free_i32(t32
);
4969 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4971 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4975 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4977 int shift
= s
->insn
->data
& 0xff;
4978 int size
= s
->insn
->data
>> 8;
4979 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4982 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4983 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4985 /* Produce the CC from only the bits manipulated. */
4986 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4987 set_cc_nz_u64(s
, cc_dst
);
4991 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4993 o
->in1
= tcg_temp_new_i64();
4995 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4996 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4998 /* Perform the atomic operation in memory. */
4999 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
5003 /* Recompute also for atomic case: needed for setting CC. */
5004 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
5006 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
5007 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
5012 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
5014 o
->out
= tcg_const_i64(0);
5018 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
5020 o
->out
= tcg_const_i64(0);
5026 #ifndef CONFIG_USER_ONLY
5027 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
5029 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
5031 gen_helper_clp(cpu_env
, r2
);
5032 tcg_temp_free_i32(r2
);
5037 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
5039 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5040 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
5042 gen_helper_pcilg(cpu_env
, r1
, r2
);
5043 tcg_temp_free_i32(r1
);
5044 tcg_temp_free_i32(r2
);
5049 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
5051 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5052 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
5054 gen_helper_pcistg(cpu_env
, r1
, r2
);
5055 tcg_temp_free_i32(r1
);
5056 tcg_temp_free_i32(r2
);
5061 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
5063 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5064 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
5066 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5067 tcg_temp_free_i32(ar
);
5068 tcg_temp_free_i32(r1
);
5073 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
5075 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
5079 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
5081 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5082 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
5084 gen_helper_rpcit(cpu_env
, r1
, r2
);
5085 tcg_temp_free_i32(r1
);
5086 tcg_temp_free_i32(r2
);
5091 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
5093 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5094 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
5095 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
5097 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
5098 tcg_temp_free_i32(ar
);
5099 tcg_temp_free_i32(r1
);
5100 tcg_temp_free_i32(r3
);
5105 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
5107 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
5108 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
5110 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5111 tcg_temp_free_i32(ar
);
5112 tcg_temp_free_i32(r1
);
5118 #include "translate_vx.inc.c"
5120 /* ====================================================================== */
5121 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5122 the original inputs), update the various cc data structures in order to
5123 be able to compute the new condition code. */
5125 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5127 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5130 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5132 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5135 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5137 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5140 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5142 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5145 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5147 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
5150 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5152 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
5155 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
5157 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
5160 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
5162 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
5165 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5167 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5170 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5172 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5175 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5177 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5180 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5182 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5185 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5187 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5190 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5192 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5195 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5197 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5200 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5202 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5205 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5207 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5210 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5212 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5215 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5217 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5220 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5222 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5223 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5226 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5228 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5231 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5233 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5236 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5238 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5241 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5243 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5246 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5248 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5251 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5253 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5256 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5258 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5261 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5263 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5266 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5268 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5271 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5273 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5276 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5278 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5281 /* ====================================================================== */
5282 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5283 with the TCG register to which we will write. Used in combination with
5284 the "wout" generators, in some cases we need a new temporary, and in
5285 some cases we can write to a TCG global. */
5287 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5289 o
->out
= tcg_temp_new_i64();
5291 #define SPEC_prep_new 0
5293 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5295 o
->out
= tcg_temp_new_i64();
5296 o
->out2
= tcg_temp_new_i64();
5298 #define SPEC_prep_new_P 0
5300 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5302 o
->out
= regs
[get_field(f
, r1
)];
5305 #define SPEC_prep_r1 0
5307 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 int r1
= get_field(f
, r1
);
5311 o
->out2
= regs
[r1
+ 1];
5312 o
->g_out
= o
->g_out2
= true;
5314 #define SPEC_prep_r1_P SPEC_r1_even
5316 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5317 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5319 o
->out
= load_freg(get_field(f
, r1
));
5320 o
->out2
= load_freg(get_field(f
, r1
) + 2);
5322 #define SPEC_prep_x1 SPEC_r1_f128
5324 /* ====================================================================== */
5325 /* The "Write OUTput" generators. These generally perform some non-trivial
5326 copy of data to TCG globals, or to main memory. The trivial cases are
5327 generally handled by having a "prep" generator install the TCG global
5328 as the destination of the operation. */
5330 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5332 store_reg(get_field(f
, r1
), o
->out
);
5334 #define SPEC_wout_r1 0
5336 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5338 int r1
= get_field(f
, r1
);
5339 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5341 #define SPEC_wout_r1_8 0
5343 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5345 int r1
= get_field(f
, r1
);
5346 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5348 #define SPEC_wout_r1_16 0
5350 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5352 store_reg32_i64(get_field(f
, r1
), o
->out
);
5354 #define SPEC_wout_r1_32 0
5356 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5358 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5360 #define SPEC_wout_r1_32h 0
5362 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5364 int r1
= get_field(f
, r1
);
5365 store_reg32_i64(r1
, o
->out
);
5366 store_reg32_i64(r1
+ 1, o
->out2
);
5368 #define SPEC_wout_r1_P32 SPEC_r1_even
5370 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5372 int r1
= get_field(f
, r1
);
5373 store_reg32_i64(r1
+ 1, o
->out
);
5374 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5375 store_reg32_i64(r1
, o
->out
);
5377 #define SPEC_wout_r1_D32 SPEC_r1_even
5379 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5381 int r3
= get_field(f
, r3
);
5382 store_reg32_i64(r3
, o
->out
);
5383 store_reg32_i64(r3
+ 1, o
->out2
);
5385 #define SPEC_wout_r3_P32 SPEC_r3_even
5387 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5389 int r3
= get_field(f
, r3
);
5390 store_reg(r3
, o
->out
);
5391 store_reg(r3
+ 1, o
->out2
);
5393 #define SPEC_wout_r3_P64 SPEC_r3_even
5395 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5397 store_freg32_i64(get_field(f
, r1
), o
->out
);
5399 #define SPEC_wout_e1 0
5401 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5403 store_freg(get_field(f
, r1
), o
->out
);
5405 #define SPEC_wout_f1 0
5407 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5409 int f1
= get_field(s
->fields
, r1
);
5410 store_freg(f1
, o
->out
);
5411 store_freg(f1
+ 2, o
->out2
);
5413 #define SPEC_wout_x1 SPEC_r1_f128
5415 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5417 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5418 store_reg32_i64(get_field(f
, r1
), o
->out
);
5421 #define SPEC_wout_cond_r1r2_32 0
5423 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5425 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5426 store_freg32_i64(get_field(f
, r1
), o
->out
);
5429 #define SPEC_wout_cond_e1e2 0
5431 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5433 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5435 #define SPEC_wout_m1_8 0
5437 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5439 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5441 #define SPEC_wout_m1_16 0
5443 #ifndef CONFIG_USER_ONLY
5444 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5446 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5448 #define SPEC_wout_m1_16a 0
5451 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5453 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5455 #define SPEC_wout_m1_32 0
5457 #ifndef CONFIG_USER_ONLY
5458 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5460 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5462 #define SPEC_wout_m1_32a 0
5465 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5467 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5469 #define SPEC_wout_m1_64 0
5471 #ifndef CONFIG_USER_ONLY
5472 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5474 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5476 #define SPEC_wout_m1_64a 0
5479 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5481 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5483 #define SPEC_wout_m2_32 0
5485 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5487 store_reg(get_field(f
, r1
), o
->in2
);
5489 #define SPEC_wout_in2_r1 0
5491 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5493 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5495 #define SPEC_wout_in2_r1_32 0
5497 /* ====================================================================== */
5498 /* The "INput 1" generators. These load the first operand to an insn. */
5500 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5502 o
->in1
= load_reg(get_field(f
, r1
));
5504 #define SPEC_in1_r1 0
5506 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5508 o
->in1
= regs
[get_field(f
, r1
)];
5511 #define SPEC_in1_r1_o 0
5513 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5515 o
->in1
= tcg_temp_new_i64();
5516 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5518 #define SPEC_in1_r1_32s 0
5520 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5522 o
->in1
= tcg_temp_new_i64();
5523 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5525 #define SPEC_in1_r1_32u 0
5527 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5529 o
->in1
= tcg_temp_new_i64();
5530 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5532 #define SPEC_in1_r1_sr32 0
5534 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5536 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5538 #define SPEC_in1_r1p1 SPEC_r1_even
5540 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5542 o
->in1
= tcg_temp_new_i64();
5543 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5545 #define SPEC_in1_r1p1_32s SPEC_r1_even
5547 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5549 o
->in1
= tcg_temp_new_i64();
5550 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5552 #define SPEC_in1_r1p1_32u SPEC_r1_even
5554 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5556 int r1
= get_field(f
, r1
);
5557 o
->in1
= tcg_temp_new_i64();
5558 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5560 #define SPEC_in1_r1_D32 SPEC_r1_even
5562 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5564 o
->in1
= load_reg(get_field(f
, r2
));
5566 #define SPEC_in1_r2 0
5568 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5570 o
->in1
= tcg_temp_new_i64();
5571 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5573 #define SPEC_in1_r2_sr32 0
5575 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5577 o
->in1
= load_reg(get_field(f
, r3
));
5579 #define SPEC_in1_r3 0
5581 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5583 o
->in1
= regs
[get_field(f
, r3
)];
5586 #define SPEC_in1_r3_o 0
5588 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5590 o
->in1
= tcg_temp_new_i64();
5591 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5593 #define SPEC_in1_r3_32s 0
5595 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5597 o
->in1
= tcg_temp_new_i64();
5598 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5600 #define SPEC_in1_r3_32u 0
5602 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5604 int r3
= get_field(f
, r3
);
5605 o
->in1
= tcg_temp_new_i64();
5606 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5608 #define SPEC_in1_r3_D32 SPEC_r3_even
5610 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5612 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5614 #define SPEC_in1_e1 0
5616 static void in1_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5618 o
->in1
= load_freg(get_field(f
, r1
));
5620 #define SPEC_in1_f1 0
5622 /* Load the high double word of an extended (128-bit) format FP number */
5623 static void in1_x2h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5625 o
->in1
= load_freg(get_field(f
, r2
));
5627 #define SPEC_in1_x2h SPEC_r2_f128
5629 static void in1_f3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5631 o
->in1
= load_freg(get_field(f
, r3
));
5633 #define SPEC_in1_f3 0
5635 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5637 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5639 #define SPEC_in1_la1 0
5641 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5643 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5644 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5646 #define SPEC_in1_la2 0
5648 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5651 o
->in1
= tcg_temp_new_i64();
5652 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5654 #define SPEC_in1_m1_8u 0
5656 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5659 o
->in1
= tcg_temp_new_i64();
5660 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5662 #define SPEC_in1_m1_16s 0
5664 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5667 o
->in1
= tcg_temp_new_i64();
5668 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5670 #define SPEC_in1_m1_16u 0
5672 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5675 o
->in1
= tcg_temp_new_i64();
5676 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5678 #define SPEC_in1_m1_32s 0
5680 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5683 o
->in1
= tcg_temp_new_i64();
5684 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5686 #define SPEC_in1_m1_32u 0
5688 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5691 o
->in1
= tcg_temp_new_i64();
5692 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5694 #define SPEC_in1_m1_64 0
5696 /* ====================================================================== */
5697 /* The "INput 2" generators. These load the second operand to an insn. */
5699 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5701 o
->in2
= regs
[get_field(f
, r1
)];
5704 #define SPEC_in2_r1_o 0
5706 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5708 o
->in2
= tcg_temp_new_i64();
5709 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5711 #define SPEC_in2_r1_16u 0
5713 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5715 o
->in2
= tcg_temp_new_i64();
5716 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5718 #define SPEC_in2_r1_32u 0
5720 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5722 int r1
= get_field(f
, r1
);
5723 o
->in2
= tcg_temp_new_i64();
5724 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5726 #define SPEC_in2_r1_D32 SPEC_r1_even
5728 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5730 o
->in2
= load_reg(get_field(f
, r2
));
5732 #define SPEC_in2_r2 0
5734 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5736 o
->in2
= regs
[get_field(f
, r2
)];
5739 #define SPEC_in2_r2_o 0
5741 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5743 int r2
= get_field(f
, r2
);
5745 o
->in2
= load_reg(r2
);
5748 #define SPEC_in2_r2_nz 0
5750 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5752 o
->in2
= tcg_temp_new_i64();
5753 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5755 #define SPEC_in2_r2_8s 0
5757 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5759 o
->in2
= tcg_temp_new_i64();
5760 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5762 #define SPEC_in2_r2_8u 0
5764 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5766 o
->in2
= tcg_temp_new_i64();
5767 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5769 #define SPEC_in2_r2_16s 0
5771 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5773 o
->in2
= tcg_temp_new_i64();
5774 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5776 #define SPEC_in2_r2_16u 0
5778 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5780 o
->in2
= load_reg(get_field(f
, r3
));
5782 #define SPEC_in2_r3 0
5784 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5786 o
->in2
= tcg_temp_new_i64();
5787 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5789 #define SPEC_in2_r3_sr32 0
5791 static void in2_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5793 o
->in2
= tcg_temp_new_i64();
5794 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r3
)]);
5796 #define SPEC_in2_r3_32u 0
5798 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5800 o
->in2
= tcg_temp_new_i64();
5801 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5803 #define SPEC_in2_r2_32s 0
5805 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5807 o
->in2
= tcg_temp_new_i64();
5808 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5810 #define SPEC_in2_r2_32u 0
5812 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5814 o
->in2
= tcg_temp_new_i64();
5815 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5817 #define SPEC_in2_r2_sr32 0
5819 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5821 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5823 #define SPEC_in2_e2 0
5825 static void in2_f2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5827 o
->in2
= load_freg(get_field(f
, r2
));
5829 #define SPEC_in2_f2 0
5831 /* Load the low double word of an extended (128-bit) format FP number */
5832 static void in2_x2l(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5834 o
->in2
= load_freg(get_field(f
, r2
) + 2);
5836 #define SPEC_in2_x2l SPEC_r2_f128
5838 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5840 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5842 #define SPEC_in2_ra2 0
5844 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5846 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5847 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5849 #define SPEC_in2_a2 0
5851 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5853 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5855 #define SPEC_in2_ri2 0
5857 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5859 help_l2_shift(s
, f
, o
, 31);
5861 #define SPEC_in2_sh32 0
5863 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5865 help_l2_shift(s
, f
, o
, 63);
5867 #define SPEC_in2_sh64 0
5869 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5872 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5874 #define SPEC_in2_m2_8u 0
5876 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5879 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5881 #define SPEC_in2_m2_16s 0
5883 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5886 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5888 #define SPEC_in2_m2_16u 0
5890 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5893 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5895 #define SPEC_in2_m2_32s 0
5897 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5900 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5902 #define SPEC_in2_m2_32u 0
5904 #ifndef CONFIG_USER_ONLY
5905 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5908 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5910 #define SPEC_in2_m2_32ua 0
5913 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5916 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5918 #define SPEC_in2_m2_64 0
5920 #ifndef CONFIG_USER_ONLY
5921 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5924 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5926 #define SPEC_in2_m2_64a 0
5929 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5932 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5934 #define SPEC_in2_mri2_16u 0
5936 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5939 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5941 #define SPEC_in2_mri2_32s 0
5943 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5946 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5948 #define SPEC_in2_mri2_32u 0
5950 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5953 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5955 #define SPEC_in2_mri2_64 0
5957 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5959 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5961 #define SPEC_in2_i2 0
5963 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5965 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5967 #define SPEC_in2_i2_8u 0
5969 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5971 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5973 #define SPEC_in2_i2_16u 0
5975 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5977 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5979 #define SPEC_in2_i2_32u 0
5981 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5983 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5984 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5986 #define SPEC_in2_i2_16u_shl 0
5988 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5990 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5991 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5993 #define SPEC_in2_i2_32u_shl 0
5995 #ifndef CONFIG_USER_ONLY
5996 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5998 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
6000 #define SPEC_in2_insn 0
6003 /* ====================================================================== */
6005 /* Find opc within the table of insns. This is formulated as a switch
6006 statement so that (1) we get compile-time notice of cut-paste errors
6007 for duplicated opcodes, and (2) the compiler generates the binary
6008 search tree, rather than us having to post-process the table. */
6010 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6011 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6013 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6014 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6016 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6017 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6019 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6021 enum DisasInsnEnum
{
6022 #include "insn-data.def"
6026 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6031 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6033 .help_in1 = in1_##I1, \
6034 .help_in2 = in2_##I2, \
6035 .help_prep = prep_##P, \
6036 .help_wout = wout_##W, \
6037 .help_cout = cout_##CC, \
6038 .help_op = op_##OP, \
6042 /* Allow 0 to be used for NULL in the table below. */
6050 #define SPEC_in1_0 0
6051 #define SPEC_in2_0 0
6052 #define SPEC_prep_0 0
6053 #define SPEC_wout_0 0
6055 /* Give smaller names to the various facilities. */
6056 #define FAC_Z S390_FEAT_ZARCH
6057 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6058 #define FAC_DFP S390_FEAT_DFP
6059 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6060 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6061 #define FAC_EE S390_FEAT_EXECUTE_EXT
6062 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6063 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6064 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6065 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6066 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6067 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6068 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6069 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6070 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6071 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6072 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6073 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6074 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6075 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6076 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6077 #define FAC_SFLE S390_FEAT_STFLE
6078 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6079 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6080 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6081 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6082 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6083 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6084 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6085 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6086 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6087 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6088 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6089 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6090 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6091 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6092 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6093 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6094 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6096 static const DisasInsn insn_info
[] = {
6097 #include "insn-data.def"
6101 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6102 case OPC: return &insn_info[insn_ ## NM];
6104 static const DisasInsn
*lookup_opc(uint16_t opc
)
6107 #include "insn-data.def"
6118 /* Extract a field from the insn. The INSN should be left-aligned in
6119 the uint64_t so that we can more easily utilize the big-bit-endian
6120 definitions we extract from the Principals of Operation. */
6122 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6130 /* Zero extract the field from the insn. */
6131 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6133 /* Sign-extend, or un-swap the field as necessary. */
6135 case 0: /* unsigned */
6137 case 1: /* signed */
6138 assert(f
->size
<= 32);
6139 m
= 1u << (f
->size
- 1);
6142 case 2: /* dl+dh split, signed 20 bit. */
6143 r
= ((int8_t)r
<< 12) | (r
>> 8);
6145 case 3: /* MSB stored in RXB */
6146 g_assert(f
->size
== 4);
6149 r
|= extract64(insn
, 63 - 36, 1) << 4;
6152 r
|= extract64(insn
, 63 - 37, 1) << 4;
6155 r
|= extract64(insn
, 63 - 38, 1) << 4;
6158 r
|= extract64(insn
, 63 - 39, 1) << 4;
6161 g_assert_not_reached();
6168 /* Validate that the "compressed" encoding we selected above is valid.
6169 I.e. we havn't make two different original fields overlap. */
6170 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6171 o
->presentC
|= 1 << f
->indexC
;
6172 o
->presentO
|= 1 << f
->indexO
;
6174 o
->c
[f
->indexC
] = r
;
6177 /* Lookup the insn at the current PC, extracting the operands into O and
6178 returning the info struct for the insn. Returns NULL for invalid insn. */
6180 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
6183 uint64_t insn
, pc
= s
->base
.pc_next
;
6185 const DisasInsn
*info
;
6187 if (unlikely(s
->ex_value
)) {
6188 /* Drop the EX data now, so that it's clear on exception paths. */
6189 TCGv_i64 zero
= tcg_const_i64(0);
6190 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
6191 tcg_temp_free_i64(zero
);
6193 /* Extract the values saved by EXECUTE. */
6194 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6195 ilen
= s
->ex_value
& 0xf;
6198 insn
= ld_code2(env
, pc
);
6199 op
= (insn
>> 8) & 0xff;
6200 ilen
= get_ilen(op
);
6206 insn
= ld_code4(env
, pc
) << 32;
6209 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
6212 g_assert_not_reached();
6215 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6218 /* We can't actually determine the insn format until we've looked up
6219 the full insn opcode. Which we can't do without locating the
6220 secondary opcode. Assume by default that OP2 is at bit 40; for
6221 those smaller insns that don't actually have a secondary opcode
6222 this will correctly result in OP2 = 0. */
6228 case 0xb2: /* S, RRF, RRE, IE */
6229 case 0xb3: /* RRE, RRD, RRF */
6230 case 0xb9: /* RRE, RRF */
6231 case 0xe5: /* SSE, SIL */
6232 op2
= (insn
<< 8) >> 56;
6236 case 0xc0: /* RIL */
6237 case 0xc2: /* RIL */
6238 case 0xc4: /* RIL */
6239 case 0xc6: /* RIL */
6240 case 0xc8: /* SSF */
6241 case 0xcc: /* RIL */
6242 op2
= (insn
<< 12) >> 60;
6244 case 0xc5: /* MII */
6245 case 0xc7: /* SMI */
6246 case 0xd0 ... 0xdf: /* SS */
6252 case 0xee ... 0xf3: /* SS */
6253 case 0xf8 ... 0xfd: /* SS */
6257 op2
= (insn
<< 40) >> 56;
6261 memset(f
, 0, sizeof(*f
));
6266 /* Lookup the instruction. */
6267 info
= lookup_opc(op
<< 8 | op2
);
6269 /* If we found it, extract the operands. */
6271 DisasFormat fmt
= info
->fmt
;
6274 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6275 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6281 static bool is_afp_reg(int reg
)
6283 return reg
% 2 || reg
> 6;
6286 static bool is_fp_pair(int reg
)
6288 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6289 return !(reg
& 0x2);
6292 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6294 const DisasInsn
*insn
;
6295 DisasJumpType ret
= DISAS_NEXT
;
6299 /* Search for the insn in the table. */
6300 insn
= extract_insn(env
, s
, &f
);
6302 /* Not found means unimplemented/illegal opcode. */
6304 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6306 gen_illegal_opcode(s
);
6307 return DISAS_NORETURN
;
6310 #ifndef CONFIG_USER_ONLY
6311 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6312 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6313 gen_helper_per_ifetch(cpu_env
, addr
);
6314 tcg_temp_free_i64(addr
);
6320 /* privileged instruction */
6321 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6322 gen_program_exception(s
, PGM_PRIVILEGED
);
6323 return DISAS_NORETURN
;
6326 /* if AFP is not enabled, instructions and registers are forbidden */
6327 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6330 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(&f
, r1
))) {
6333 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(&f
, r2
))) {
6336 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(&f
, r3
))) {
6339 if (insn
->flags
& IF_BFP
) {
6342 if (insn
->flags
& IF_DFP
) {
6345 if (insn
->flags
& IF_VEC
) {
6349 gen_data_exception(dxc
);
6350 return DISAS_NORETURN
;
6354 /* if vector instructions not enabled, executing them is forbidden */
6355 if (insn
->flags
& IF_VEC
) {
6356 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6357 gen_data_exception(0xfe);
6358 return DISAS_NORETURN
;
6363 /* Check for insn specification exceptions. */
6365 if ((insn
->spec
& SPEC_r1_even
&& get_field(&f
, r1
) & 1) ||
6366 (insn
->spec
& SPEC_r2_even
&& get_field(&f
, r2
) & 1) ||
6367 (insn
->spec
& SPEC_r3_even
&& get_field(&f
, r3
) & 1) ||
6368 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(&f
, r1
))) ||
6369 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(&f
, r2
)))) {
6370 gen_program_exception(s
, PGM_SPECIFICATION
);
6371 return DISAS_NORETURN
;
6375 /* Set up the strutures we use to communicate with the helpers. */
6379 /* Implement the instruction. */
6380 if (insn
->help_in1
) {
6381 insn
->help_in1(s
, &f
, &o
);
6383 if (insn
->help_in2
) {
6384 insn
->help_in2(s
, &f
, &o
);
6386 if (insn
->help_prep
) {
6387 insn
->help_prep(s
, &f
, &o
);
6389 if (insn
->help_op
) {
6390 ret
= insn
->help_op(s
, &o
);
6392 if (ret
!= DISAS_NORETURN
) {
6393 if (insn
->help_wout
) {
6394 insn
->help_wout(s
, &f
, &o
);
6396 if (insn
->help_cout
) {
6397 insn
->help_cout(s
, &o
);
6401 /* Free any temporaries created by the helpers. */
6402 if (o
.out
&& !o
.g_out
) {
6403 tcg_temp_free_i64(o
.out
);
6405 if (o
.out2
&& !o
.g_out2
) {
6406 tcg_temp_free_i64(o
.out2
);
6408 if (o
.in1
&& !o
.g_in1
) {
6409 tcg_temp_free_i64(o
.in1
);
6411 if (o
.in2
&& !o
.g_in2
) {
6412 tcg_temp_free_i64(o
.in2
);
6415 tcg_temp_free_i64(o
.addr1
);
6418 #ifndef CONFIG_USER_ONLY
6419 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6420 /* An exception might be triggered, save PSW if not already done. */
6421 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6422 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6425 /* Call the helper to check for a possible PER exception. */
6426 gen_helper_per_check_exception(cpu_env
);
6430 /* Advance to the next instruction. */
6431 s
->base
.pc_next
= s
->pc_tmp
;
6435 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6437 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6440 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6441 dc
->base
.pc_first
&= 0x7fffffff;
6442 dc
->base
.pc_next
= dc
->base
.pc_first
;
6445 dc
->cc_op
= CC_OP_DYNAMIC
;
6446 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6447 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6450 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6454 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6456 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6458 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6461 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6462 const CPUBreakpoint
*bp
)
6464 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6466 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6467 dc
->do_debug
= true;
6468 /* The address covered by the breakpoint must be included in
6469 [tb->pc, tb->pc + tb->size) in order to for it to be
6470 properly cleared -- thus we increment the PC here so that
6471 the logic setting tb->size does the right thing. */
6472 dc
->base
.pc_next
+= 2;
6476 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6478 CPUS390XState
*env
= cs
->env_ptr
;
6479 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6481 dc
->base
.is_jmp
= translate_one(env
, dc
);
6482 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6483 uint64_t page_start
;
6485 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6486 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6487 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6492 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6494 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6496 switch (dc
->base
.is_jmp
) {
6498 case DISAS_NORETURN
:
6500 case DISAS_TOO_MANY
:
6501 case DISAS_PC_STALE
:
6502 case DISAS_PC_STALE_NOCHAIN
:
6503 update_psw_addr(dc
);
6505 case DISAS_PC_UPDATED
:
6506 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6507 cc op type is in env */
6510 case DISAS_PC_CC_UPDATED
:
6511 /* Exit the TB, either by raising a debug exception or by return. */
6513 gen_exception(EXCP_DEBUG
);
6514 } else if (use_exit_tb(dc
) ||
6515 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6516 tcg_gen_exit_tb(NULL
, 0);
6518 tcg_gen_lookup_and_goto_ptr();
6522 g_assert_not_reached();
6526 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6528 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6530 if (unlikely(dc
->ex_value
)) {
6531 /* ??? Unfortunately log_target_disas can't use host memory. */
6532 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6534 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6535 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6539 static const TranslatorOps s390x_tr_ops
= {
6540 .init_disas_context
= s390x_tr_init_disas_context
,
6541 .tb_start
= s390x_tr_tb_start
,
6542 .insn_start
= s390x_tr_insn_start
,
6543 .breakpoint_check
= s390x_tr_breakpoint_check
,
6544 .translate_insn
= s390x_tr_translate_insn
,
6545 .tb_stop
= s390x_tr_tb_stop
,
6546 .disas_log
= s390x_tr_disas_log
,
6549 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
6553 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
6556 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6559 int cc_op
= data
[1];
6560 env
->psw
.addr
= data
[0];
6561 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {