4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext
;
52 typedef struct DisasInsn DisasInsn
;
53 typedef struct DisasFields DisasFields
;
56 * Define a structure to hold the decoded fields. We'll store each inside
57 * an array indexed by an enum. In order to conserve memory, we'll arrange
58 * for fields that do not exist at the same time to overlap, thus the "C"
59 * for compact. For checking purposes there is an "O" for original index
60 * as well that will be applied to availability bitmaps.
63 enum DisasFieldIndexO
{
92 enum DisasFieldIndexC
{
133 unsigned presentC
:16;
134 unsigned int presentO
;
138 struct DisasContext
{
139 DisasContextBase base
;
140 const DisasInsn
*insn
;
145 * During translate_one(), pc_tmp is used to determine the instruction
146 * to be executed after base.pc_next - e.g. next sequential instruction
147 * or a branch target.
152 bool exit_to_mainloop
;
155 /* Information carried about a condition to be evaluated. */
160 struct { TCGv_i64 a
, b
; } s64
;
161 struct { TCGv_i32 a
, b
; } s32
;
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit
[CC_OP_MAX
];
167 static uint64_t inline_branch_miss
[CC_OP_MAX
];
170 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
172 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
173 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
174 tcg_gen_movi_i64(out
, pc
);
179 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
180 tcg_gen_deposit_i64(out
, out
, tcg_constant_i64(pc
), 0, 32);
183 static TCGv_i64 psw_addr
;
184 static TCGv_i64 psw_mask
;
185 static TCGv_i64 gbea
;
187 static TCGv_i32 cc_op
;
188 static TCGv_i64 cc_src
;
189 static TCGv_i64 cc_dst
;
190 static TCGv_i64 cc_vr
;
192 static char cpu_reg_names
[16][4];
193 static TCGv_i64 regs
[16];
195 void s390x_translate_init(void)
199 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
200 offsetof(CPUS390XState
, psw
.addr
),
202 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
203 offsetof(CPUS390XState
, psw
.mask
),
205 gbea
= tcg_global_mem_new_i64(cpu_env
,
206 offsetof(CPUS390XState
, gbea
),
209 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
211 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
213 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
215 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
218 for (i
= 0; i
< 16; i
++) {
219 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
220 regs
[i
] = tcg_global_mem_new(cpu_env
,
221 offsetof(CPUS390XState
, regs
[i
]),
226 static inline int vec_full_reg_offset(uint8_t reg
)
229 return offsetof(CPUS390XState
, vregs
[reg
][0]);
232 static inline int vec_reg_offset(uint8_t reg
, uint8_t enr
, MemOp es
)
234 /* Convert element size (es) - e.g. MO_8 - to bytes */
235 const uint8_t bytes
= 1 << es
;
236 int offs
= enr
* bytes
;
239 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240 * of the 16 byte vector, on both, little and big endian systems.
242 * Big Endian (target/possible host)
243 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
245 * W: [ 0][ 1] - [ 2][ 3]
248 * Little Endian (possible host)
249 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
251 * W: [ 1][ 0] - [ 3][ 2]
254 * For 16 byte elements, the two 8 byte halves will not form a host
255 * int128 if the host is little endian, since they're in the wrong order.
256 * Some operations (e.g. xor) do not care. For operations like addition,
257 * the two 8 byte elements have to be loaded separately. Let's force all
258 * 16 byte operations to handle it in a special way.
260 g_assert(es
<= MO_64
);
264 return offs
+ vec_full_reg_offset(reg
);
267 static inline int freg64_offset(uint8_t reg
)
270 return vec_reg_offset(reg
, 0, MO_64
);
273 static inline int freg32_offset(uint8_t reg
)
276 return vec_reg_offset(reg
, 0, MO_32
);
279 static TCGv_i64
load_reg(int reg
)
281 TCGv_i64 r
= tcg_temp_new_i64();
282 tcg_gen_mov_i64(r
, regs
[reg
]);
286 static TCGv_i64
load_freg(int reg
)
288 TCGv_i64 r
= tcg_temp_new_i64();
290 tcg_gen_ld_i64(r
, cpu_env
, freg64_offset(reg
));
294 static TCGv_i64
load_freg32_i64(int reg
)
296 TCGv_i64 r
= tcg_temp_new_i64();
298 tcg_gen_ld32u_i64(r
, cpu_env
, freg32_offset(reg
));
302 static TCGv_i128
load_freg_128(int reg
)
304 TCGv_i64 h
= load_freg(reg
);
305 TCGv_i64 l
= load_freg(reg
+ 2);
306 TCGv_i128 r
= tcg_temp_new_i128();
308 tcg_gen_concat_i64_i128(r
, l
, h
);
312 static void store_reg(int reg
, TCGv_i64 v
)
314 tcg_gen_mov_i64(regs
[reg
], v
);
317 static void store_freg(int reg
, TCGv_i64 v
)
319 tcg_gen_st_i64(v
, cpu_env
, freg64_offset(reg
));
322 static void store_reg32_i64(int reg
, TCGv_i64 v
)
324 /* 32 bit register writes keep the upper half */
325 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
328 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
330 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
333 static void store_freg32_i64(int reg
, TCGv_i64 v
)
335 tcg_gen_st32_i64(v
, cpu_env
, freg32_offset(reg
));
338 static void return_low128(TCGv_i64 dest
)
340 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
343 static void update_psw_addr(DisasContext
*s
)
346 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
349 static void per_branch(DisasContext
*s
, bool to_next
)
351 #ifndef CONFIG_USER_ONLY
352 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
354 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
355 TCGv_i64 next_pc
= to_next
? tcg_constant_i64(s
->pc_tmp
) : psw_addr
;
356 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
361 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
362 TCGv_i64 arg1
, TCGv_i64 arg2
)
364 #ifndef CONFIG_USER_ONLY
365 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
366 TCGLabel
*lab
= gen_new_label();
367 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
369 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
370 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
374 TCGv_i64 pc
= tcg_constant_i64(s
->base
.pc_next
);
375 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
380 static void per_breaking_event(DisasContext
*s
)
382 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
385 static void update_cc_op(DisasContext
*s
)
387 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
388 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
392 static inline uint64_t ld_code2(CPUS390XState
*env
, DisasContext
*s
,
395 return (uint64_t)translator_lduw(env
, &s
->base
, pc
);
398 static inline uint64_t ld_code4(CPUS390XState
*env
, DisasContext
*s
,
401 return (uint64_t)(uint32_t)translator_ldl(env
, &s
->base
, pc
);
404 static int get_mem_index(DisasContext
*s
)
406 #ifdef CONFIG_USER_ONLY
409 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
413 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
414 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
415 return MMU_PRIMARY_IDX
;
416 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
417 return MMU_SECONDARY_IDX
;
418 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
421 g_assert_not_reached();
427 static void gen_exception(int excp
)
429 gen_helper_exception(cpu_env
, tcg_constant_i32(excp
));
432 static void gen_program_exception(DisasContext
*s
, int code
)
434 /* Remember what pgm exeption this was. */
435 tcg_gen_st_i32(tcg_constant_i32(code
), cpu_env
,
436 offsetof(CPUS390XState
, int_pgm_code
));
438 tcg_gen_st_i32(tcg_constant_i32(s
->ilen
), cpu_env
,
439 offsetof(CPUS390XState
, int_pgm_ilen
));
447 /* Trigger exception. */
448 gen_exception(EXCP_PGM
);
451 static inline void gen_illegal_opcode(DisasContext
*s
)
453 gen_program_exception(s
, PGM_OPERATION
);
456 static inline void gen_data_exception(uint8_t dxc
)
458 gen_helper_data_exception(cpu_env
, tcg_constant_i32(dxc
));
461 static inline void gen_trap(DisasContext
*s
)
463 /* Set DXC to 0xff */
464 gen_data_exception(0xff);
467 static void gen_addi_and_wrap_i64(DisasContext
*s
, TCGv_i64 dst
, TCGv_i64 src
,
470 tcg_gen_addi_i64(dst
, src
, imm
);
471 if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
472 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
473 tcg_gen_andi_i64(dst
, dst
, 0x7fffffff);
475 tcg_gen_andi_i64(dst
, dst
, 0x00ffffff);
480 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
482 TCGv_i64 tmp
= tcg_temp_new_i64();
485 * Note that d2 is limited to 20 bits, signed. If we crop negative
486 * displacements early we create larger immediate addends.
489 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
490 gen_addi_and_wrap_i64(s
, tmp
, tmp
, d2
);
492 gen_addi_and_wrap_i64(s
, tmp
, regs
[b2
], d2
);
494 gen_addi_and_wrap_i64(s
, tmp
, regs
[x2
], d2
);
495 } else if (!(s
->base
.tb
->flags
& FLAG_MASK_64
)) {
496 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
497 tcg_gen_movi_i64(tmp
, d2
& 0x7fffffff);
499 tcg_gen_movi_i64(tmp
, d2
& 0x00ffffff);
502 tcg_gen_movi_i64(tmp
, d2
);
508 static inline bool live_cc_data(DisasContext
*s
)
510 return (s
->cc_op
!= CC_OP_DYNAMIC
511 && s
->cc_op
!= CC_OP_STATIC
515 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
517 if (live_cc_data(s
)) {
518 tcg_gen_discard_i64(cc_src
);
519 tcg_gen_discard_i64(cc_dst
);
520 tcg_gen_discard_i64(cc_vr
);
522 s
->cc_op
= CC_OP_CONST0
+ val
;
525 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
527 if (live_cc_data(s
)) {
528 tcg_gen_discard_i64(cc_src
);
529 tcg_gen_discard_i64(cc_vr
);
531 tcg_gen_mov_i64(cc_dst
, dst
);
535 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
538 if (live_cc_data(s
)) {
539 tcg_gen_discard_i64(cc_vr
);
541 tcg_gen_mov_i64(cc_src
, src
);
542 tcg_gen_mov_i64(cc_dst
, dst
);
546 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
547 TCGv_i64 dst
, TCGv_i64 vr
)
549 tcg_gen_mov_i64(cc_src
, src
);
550 tcg_gen_mov_i64(cc_dst
, dst
);
551 tcg_gen_mov_i64(cc_vr
, vr
);
555 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
557 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
560 /* CC value is in env->cc_op */
561 static void set_cc_static(DisasContext
*s
)
563 if (live_cc_data(s
)) {
564 tcg_gen_discard_i64(cc_src
);
565 tcg_gen_discard_i64(cc_dst
);
566 tcg_gen_discard_i64(cc_vr
);
568 s
->cc_op
= CC_OP_STATIC
;
571 /* calculates cc into cc_op */
572 static void gen_op_calc_cc(DisasContext
*s
)
574 TCGv_i32 local_cc_op
= NULL
;
575 TCGv_i64 dummy
= NULL
;
579 dummy
= tcg_constant_i64(0);
585 local_cc_op
= tcg_constant_i32(s
->cc_op
);
601 /* s->cc_op is the cc value */
602 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
605 /* env->cc_op already is the cc value */
608 tcg_gen_setcondi_i64(TCG_COND_NE
, cc_dst
, cc_dst
, 0);
609 tcg_gen_extrl_i64_i32(cc_op
, cc_dst
);
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
631 case CC_OP_LTUGTU_32
:
632 case CC_OP_LTUGTU_64
:
641 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
648 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
651 /* unknown operation - assume 3 arguments and cc_op in env */
652 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
655 g_assert_not_reached();
658 /* We now have cc in cc_op as constant */
662 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
664 if (unlikely(s
->base
.tb
->flags
& FLAG_MASK_PER
)) {
667 return translator_use_goto_tb(&s
->base
, dest
);
670 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
672 #ifdef DEBUG_INLINE_BRANCHES
673 inline_branch_miss
[cc_op
]++;
677 static void account_inline_branch(DisasContext
*s
, int cc_op
)
679 #ifdef DEBUG_INLINE_BRANCHES
680 inline_branch_hit
[cc_op
]++;
684 /* Table of mask values to comparison codes, given a comparison as input.
685 For such, CC=3 should not be possible. */
686 static const TCGCond ltgt_cond
[16] = {
687 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
688 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
689 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
690 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
691 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
692 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
693 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
694 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
697 /* Table of mask values to comparison codes, given a logic op as input.
698 For such, only CC=0 and CC=1 should be possible. */
699 static const TCGCond nz_cond
[16] = {
700 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
701 TCG_COND_NEVER
, TCG_COND_NEVER
,
702 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
703 TCG_COND_NE
, TCG_COND_NE
,
704 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
705 TCG_COND_EQ
, TCG_COND_EQ
,
706 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
707 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
710 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
711 details required to generate a TCG comparison. */
712 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
715 enum cc_op old_cc_op
= s
->cc_op
;
717 if (mask
== 15 || mask
== 0) {
718 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
725 /* Find the TCG condition for the mask + cc op. */
731 cond
= ltgt_cond
[mask
];
732 if (cond
== TCG_COND_NEVER
) {
735 account_inline_branch(s
, old_cc_op
);
738 case CC_OP_LTUGTU_32
:
739 case CC_OP_LTUGTU_64
:
740 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
741 if (cond
== TCG_COND_NEVER
) {
744 account_inline_branch(s
, old_cc_op
);
748 cond
= nz_cond
[mask
];
749 if (cond
== TCG_COND_NEVER
) {
752 account_inline_branch(s
, old_cc_op
);
767 account_inline_branch(s
, old_cc_op
);
782 account_inline_branch(s
, old_cc_op
);
786 switch (mask
& 0xa) {
787 case 8: /* src == 0 -> no one bit found */
790 case 2: /* src != 0 -> one bit found */
796 account_inline_branch(s
, old_cc_op
);
802 case 8 | 2: /* result == 0 */
805 case 4 | 1: /* result != 0 */
808 case 8 | 4: /* !carry (borrow) */
809 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_EQ
: TCG_COND_NE
;
811 case 2 | 1: /* carry (!borrow) */
812 cond
= old_cc_op
== CC_OP_ADDU
? TCG_COND_NE
: TCG_COND_EQ
;
817 account_inline_branch(s
, old_cc_op
);
822 /* Calculate cc value. */
827 /* Jump based on CC. We'll load up the real cond below;
828 the assignment here merely avoids a compiler warning. */
829 account_noninline_branch(s
, old_cc_op
);
830 old_cc_op
= CC_OP_STATIC
;
831 cond
= TCG_COND_NEVER
;
835 /* Load up the arguments of the comparison. */
840 c
->u
.s32
.a
= tcg_temp_new_i32();
841 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
842 c
->u
.s32
.b
= tcg_constant_i32(0);
845 case CC_OP_LTUGTU_32
:
847 c
->u
.s32
.a
= tcg_temp_new_i32();
848 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
849 c
->u
.s32
.b
= tcg_temp_new_i32();
850 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
857 c
->u
.s64
.b
= tcg_constant_i64(0);
860 case CC_OP_LTUGTU_64
:
868 c
->u
.s64
.a
= tcg_temp_new_i64();
869 c
->u
.s64
.b
= tcg_constant_i64(0);
870 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
876 c
->u
.s64
.b
= tcg_constant_i64(0);
879 case 4 | 1: /* result */
883 case 2 | 1: /* carry */
887 g_assert_not_reached();
895 case 0x8 | 0x4 | 0x2: /* cc != 3 */
897 c
->u
.s32
.b
= tcg_constant_i32(3);
899 case 0x8 | 0x4 | 0x1: /* cc != 2 */
901 c
->u
.s32
.b
= tcg_constant_i32(2);
903 case 0x8 | 0x2 | 0x1: /* cc != 1 */
905 c
->u
.s32
.b
= tcg_constant_i32(1);
907 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
909 c
->u
.s32
.a
= tcg_temp_new_i32();
910 c
->u
.s32
.b
= tcg_constant_i32(0);
911 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
913 case 0x8 | 0x4: /* cc < 2 */
915 c
->u
.s32
.b
= tcg_constant_i32(2);
917 case 0x8: /* cc == 0 */
919 c
->u
.s32
.b
= tcg_constant_i32(0);
921 case 0x4 | 0x2 | 0x1: /* cc != 0 */
923 c
->u
.s32
.b
= tcg_constant_i32(0);
925 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
927 c
->u
.s32
.a
= tcg_temp_new_i32();
928 c
->u
.s32
.b
= tcg_constant_i32(0);
929 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
931 case 0x4: /* cc == 1 */
933 c
->u
.s32
.b
= tcg_constant_i32(1);
935 case 0x2 | 0x1: /* cc > 1 */
937 c
->u
.s32
.b
= tcg_constant_i32(1);
939 case 0x2: /* cc == 2 */
941 c
->u
.s32
.b
= tcg_constant_i32(2);
943 case 0x1: /* cc == 3 */
945 c
->u
.s32
.b
= tcg_constant_i32(3);
948 /* CC is masked by something else: (8 >> cc) & mask. */
950 c
->u
.s32
.a
= tcg_temp_new_i32();
951 c
->u
.s32
.b
= tcg_constant_i32(0);
952 tcg_gen_shr_i32(c
->u
.s32
.a
, tcg_constant_i32(8), cc_op
);
953 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
964 /* ====================================================================== */
965 /* Define the insn format enumeration. */
966 #define F0(N) FMT_##N,
967 #define F1(N, X1) F0(N)
968 #define F2(N, X1, X2) F0(N)
969 #define F3(N, X1, X2, X3) F0(N)
970 #define F4(N, X1, X2, X3, X4) F0(N)
971 #define F5(N, X1, X2, X3, X4, X5) F0(N)
972 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
975 #include "insn-format.h.inc"
986 /* This is the way fields are to be accessed out of DisasFields. */
987 #define have_field(S, F) have_field1((S), FLD_O_##F)
988 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
990 static bool have_field1(const DisasContext
*s
, enum DisasFieldIndexO c
)
992 return (s
->fields
.presentO
>> c
) & 1;
995 static int get_field1(const DisasContext
*s
, enum DisasFieldIndexO o
,
996 enum DisasFieldIndexC c
)
998 assert(have_field1(s
, o
));
999 return s
->fields
.c
[c
];
1002 /* Describe the layout of each field in each format. */
1003 typedef struct DisasField
{
1005 unsigned int size
:8;
1006 unsigned int type
:2;
1007 unsigned int indexC
:6;
1008 enum DisasFieldIndexO indexO
:8;
1011 typedef struct DisasFormatInfo
{
1012 DisasField op
[NUM_C_FIELD
];
1015 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1016 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1017 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1018 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1031 #define F0(N) { { } },
1032 #define F1(N, X1) { { X1 } },
1033 #define F2(N, X1, X2) { { X1, X2 } },
1034 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1037 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1039 static const DisasFormatInfo format_info
[] = {
1040 #include "insn-format.h.inc"
1060 /* Generally, we'll extract operands into this structures, operate upon
1061 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1062 of routines below for more details. */
1064 TCGv_i64 out
, out2
, in1
, in2
;
1066 TCGv_i128 out_128
, in1_128
, in2_128
;
1069 /* Instructions can place constraints on their operands, raising specification
1070 exceptions if they are violated. To make this easy to automate, each "in1",
1071 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1072 of the following, or 0. To make this easy to document, we'll put the
1073 SPEC_<name> defines next to <name>. */
1075 #define SPEC_r1_even 1
1076 #define SPEC_r2_even 2
1077 #define SPEC_r3_even 4
1078 #define SPEC_r1_f128 8
1079 #define SPEC_r2_f128 16
1081 /* Return values from translate_one, indicating the state of the TB. */
1083 /* We are not using a goto_tb (for whatever reason), but have updated
1084 the PC (for whatever reason), so there's no need to do it again on
1086 #define DISAS_PC_UPDATED DISAS_TARGET_0
1088 /* We have updated the PC and CC values. */
1089 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1092 /* Instruction flags */
1093 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1095 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1096 #define IF_BFP 0x0008 /* binary floating point instruction */
1097 #define IF_DFP 0x0010 /* decimal floating point instruction */
1098 #define IF_PRIV 0x0020 /* privileged instruction */
1099 #define IF_VEC 0x0040 /* vector instruction */
1100 #define IF_IO 0x0080 /* input/output instruction */
1111 /* Pre-process arguments before HELP_OP. */
1112 void (*help_in1
)(DisasContext
*, DisasOps
*);
1113 void (*help_in2
)(DisasContext
*, DisasOps
*);
1114 void (*help_prep
)(DisasContext
*, DisasOps
*);
1117 * Post-process output after HELP_OP.
1118 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1120 void (*help_wout
)(DisasContext
*, DisasOps
*);
1121 void (*help_cout
)(DisasContext
*, DisasOps
*);
1123 /* Implement the operation itself. */
1124 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations. */
1132 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1134 if (dest
== s
->pc_tmp
) {
1135 per_branch(s
, true);
1138 if (use_goto_tb(s
, dest
)) {
1140 per_breaking_event(s
);
1142 tcg_gen_movi_i64(psw_addr
, dest
);
1143 tcg_gen_exit_tb(s
->base
.tb
, 0);
1144 return DISAS_NORETURN
;
1146 tcg_gen_movi_i64(psw_addr
, dest
);
1147 per_branch(s
, false);
1148 return DISAS_PC_UPDATED
;
1152 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1153 bool is_imm
, int imm
, TCGv_i64 cdest
)
1156 uint64_t dest
= s
->base
.pc_next
+ (int64_t)imm
* 2;
1159 /* Take care of the special cases first. */
1160 if (c
->cond
== TCG_COND_NEVER
) {
1165 if (dest
== s
->pc_tmp
) {
1166 /* Branch to next. */
1167 per_branch(s
, true);
1171 if (c
->cond
== TCG_COND_ALWAYS
) {
1172 ret
= help_goto_direct(s
, dest
);
1177 /* E.g. bcr %r0 -> no branch. */
1181 if (c
->cond
== TCG_COND_ALWAYS
) {
1182 tcg_gen_mov_i64(psw_addr
, cdest
);
1183 per_branch(s
, false);
1184 ret
= DISAS_PC_UPDATED
;
1189 if (use_goto_tb(s
, s
->pc_tmp
)) {
1190 if (is_imm
&& use_goto_tb(s
, dest
)) {
1191 /* Both exits can use goto_tb. */
1194 lab
= gen_new_label();
1196 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1198 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1201 /* Branch not taken. */
1203 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1204 tcg_gen_exit_tb(s
->base
.tb
, 0);
1208 per_breaking_event(s
);
1210 tcg_gen_movi_i64(psw_addr
, dest
);
1211 tcg_gen_exit_tb(s
->base
.tb
, 1);
1213 ret
= DISAS_NORETURN
;
1215 /* Fallthru can use goto_tb, but taken branch cannot. */
1216 /* Store taken branch destination before the brcond. This
1217 avoids having to allocate a new local temp to hold it.
1218 We'll overwrite this in the not taken case anyway. */
1220 tcg_gen_mov_i64(psw_addr
, cdest
);
1223 lab
= gen_new_label();
1225 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1227 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1230 /* Branch not taken. */
1233 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1234 tcg_gen_exit_tb(s
->base
.tb
, 0);
1238 tcg_gen_movi_i64(psw_addr
, dest
);
1240 per_breaking_event(s
);
1241 ret
= DISAS_PC_UPDATED
;
1244 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1245 Most commonly we're single-stepping or some other condition that
1246 disables all use of goto_tb. Just update the PC and exit. */
1248 TCGv_i64 next
= tcg_constant_i64(s
->pc_tmp
);
1250 cdest
= tcg_constant_i64(dest
);
1254 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1256 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1258 TCGv_i32 t0
= tcg_temp_new_i32();
1259 TCGv_i64 t1
= tcg_temp_new_i64();
1260 TCGv_i64 z
= tcg_constant_i64(0);
1261 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1262 tcg_gen_extu_i32_i64(t1
, t0
);
1263 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1264 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1267 ret
= DISAS_PC_UPDATED
;
1274 /* ====================================================================== */
1275 /* The operations. These perform the bulk of the work for any insn,
1276 usually after the operands have been loaded and output initialized. */
1278 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1280 tcg_gen_abs_i64(o
->out
, o
->in2
);
1284 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1286 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1290 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1292 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1296 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1298 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1299 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1303 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1305 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1309 static DisasJumpType
op_addu64(DisasContext
*s
, DisasOps
*o
)
1311 tcg_gen_movi_i64(cc_src
, 0);
1312 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1316 /* Compute carry into cc_src. */
1317 static void compute_carry(DisasContext
*s
)
1321 /* The carry value is already in cc_src (1,0). */
1324 tcg_gen_addi_i64(cc_src
, cc_src
, 1);
1330 /* The carry flag is the msb of CC; compute into cc_src. */
1331 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
1332 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
1337 static DisasJumpType
op_addc32(DisasContext
*s
, DisasOps
*o
)
1340 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1341 tcg_gen_add_i64(o
->out
, o
->out
, cc_src
);
1345 static DisasJumpType
op_addc64(DisasContext
*s
, DisasOps
*o
)
1349 TCGv_i64 zero
= tcg_constant_i64(0);
1350 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, zero
);
1351 tcg_gen_add2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
1356 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1358 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1360 o
->in1
= tcg_temp_new_i64();
1362 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1364 /* Perform the atomic addition in memory. */
1365 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1369 /* Recompute also for atomic case: needed for setting CC. */
1370 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1373 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1378 static DisasJumpType
op_asiu64(DisasContext
*s
, DisasOps
*o
)
1380 bool non_atomic
= !s390_has_feat(S390_FEAT_STFLE_45
);
1382 o
->in1
= tcg_temp_new_i64();
1384 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1386 /* Perform the atomic addition in memory. */
1387 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1391 /* Recompute also for atomic case: needed for setting CC. */
1392 tcg_gen_movi_i64(cc_src
, 0);
1393 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
1396 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1401 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1403 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1407 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1409 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1413 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1415 gen_helper_axb(o
->out_128
, cpu_env
, o
->in1_128
, o
->in2_128
);
1419 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1421 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1425 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1427 int shift
= s
->insn
->data
& 0xff;
1428 int size
= s
->insn
->data
>> 8;
1429 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1430 TCGv_i64 t
= tcg_temp_new_i64();
1432 tcg_gen_shli_i64(t
, o
->in2
, shift
);
1433 tcg_gen_ori_i64(t
, t
, ~mask
);
1434 tcg_gen_and_i64(o
->out
, o
->in1
, t
);
1436 /* Produce the CC from only the bits manipulated. */
1437 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1438 set_cc_nz_u64(s
, cc_dst
);
1442 static DisasJumpType
op_andc(DisasContext
*s
, DisasOps
*o
)
1444 tcg_gen_andc_i64(o
->out
, o
->in1
, o
->in2
);
1448 static DisasJumpType
op_orc(DisasContext
*s
, DisasOps
*o
)
1450 tcg_gen_orc_i64(o
->out
, o
->in1
, o
->in2
);
1454 static DisasJumpType
op_nand(DisasContext
*s
, DisasOps
*o
)
1456 tcg_gen_nand_i64(o
->out
, o
->in1
, o
->in2
);
1460 static DisasJumpType
op_nor(DisasContext
*s
, DisasOps
*o
)
1462 tcg_gen_nor_i64(o
->out
, o
->in1
, o
->in2
);
1466 static DisasJumpType
op_nxor(DisasContext
*s
, DisasOps
*o
)
1468 tcg_gen_eqv_i64(o
->out
, o
->in1
, o
->in2
);
1472 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1474 o
->in1
= tcg_temp_new_i64();
1476 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1477 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1479 /* Perform the atomic operation in memory. */
1480 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1484 /* Recompute also for atomic case: needed for setting CC. */
1485 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1487 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1488 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1493 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1495 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1497 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1498 per_branch(s
, false);
1499 return DISAS_PC_UPDATED
;
1505 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1509 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1510 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1514 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1515 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1516 t
= tcg_temp_new_i64();
1517 tcg_gen_shri_i64(t
, psw_mask
, 16);
1518 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1519 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1520 tcg_gen_extu_i32_i64(t
, cc_op
);
1521 tcg_gen_shli_i64(t
, t
, 28);
1522 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1525 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1527 save_link_info(s
, o
);
1529 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1530 per_branch(s
, false);
1531 return DISAS_PC_UPDATED
;
1538 * Disassemble the target of a branch. The results are returned in a form
1539 * suitable for passing into help_branch():
1541 * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1542 * branches, whose DisasContext *S contains the relative immediate field RI,
1543 * are considered fixed. All the other branches are considered computed.
1544 * - int IMM is the value of RI.
1545 * - TCGv_i64 CDEST is the address of the computed target.
1547 #define disas_jdest(s, ri, is_imm, imm, cdest) do { \
1548 if (have_field(s, ri)) { \
1549 if (unlikely(s->ex_value)) { \
1550 cdest = tcg_temp_new_i64(); \
1551 tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1552 tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
1560 imm = is_imm ? get_field(s, ri) : 0; \
1563 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1569 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1571 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1572 disas_jcc(s
, &c
, 0xf);
1573 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1576 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1578 int m1
= get_field(s
, m1
);
1583 /* BCR with R2 = 0 causes no branching */
1584 if (have_field(s
, r2
) && get_field(s
, r2
) == 0) {
1586 /* Perform serialization */
1587 /* FIXME: check for fast-BCR-serialization facility */
1588 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1591 /* Perform serialization */
1592 /* FIXME: perform checkpoint-synchronisation */
1593 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1598 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1599 disas_jcc(s
, &c
, m1
);
1600 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1603 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1605 int r1
= get_field(s
, r1
);
1611 c
.cond
= TCG_COND_NE
;
1614 t
= tcg_temp_new_i64();
1615 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1616 store_reg32_i64(r1
, t
);
1617 c
.u
.s32
.a
= tcg_temp_new_i32();
1618 c
.u
.s32
.b
= tcg_constant_i32(0);
1619 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1621 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1622 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1625 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1627 int r1
= get_field(s
, r1
);
1628 int imm
= get_field(s
, i2
);
1632 c
.cond
= TCG_COND_NE
;
1635 t
= tcg_temp_new_i64();
1636 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1637 tcg_gen_subi_i64(t
, t
, 1);
1638 store_reg32h_i64(r1
, t
);
1639 c
.u
.s32
.a
= tcg_temp_new_i32();
1640 c
.u
.s32
.b
= tcg_constant_i32(0);
1641 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1643 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1646 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1648 int r1
= get_field(s
, r1
);
1653 c
.cond
= TCG_COND_NE
;
1656 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1657 c
.u
.s64
.a
= regs
[r1
];
1658 c
.u
.s64
.b
= tcg_constant_i64(0);
1660 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1661 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1664 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1666 int r1
= get_field(s
, r1
);
1667 int r3
= get_field(s
, r3
);
1673 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1676 t
= tcg_temp_new_i64();
1677 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1678 c
.u
.s32
.a
= tcg_temp_new_i32();
1679 c
.u
.s32
.b
= tcg_temp_new_i32();
1680 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1681 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1682 store_reg32_i64(r1
, t
);
1684 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1685 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1688 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1690 int r1
= get_field(s
, r1
);
1691 int r3
= get_field(s
, r3
);
1696 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1699 if (r1
== (r3
| 1)) {
1700 c
.u
.s64
.b
= load_reg(r3
| 1);
1702 c
.u
.s64
.b
= regs
[r3
| 1];
1705 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1706 c
.u
.s64
.a
= regs
[r1
];
1708 disas_jdest(s
, i2
, is_imm
, imm
, o
->in2
);
1709 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1712 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1714 int imm
, m3
= get_field(s
, m3
);
1718 c
.cond
= ltgt_cond
[m3
];
1719 if (s
->insn
->data
) {
1720 c
.cond
= tcg_unsigned_cond(c
.cond
);
1727 disas_jdest(s
, i4
, is_imm
, imm
, o
->out
);
1728 if (!is_imm
&& !o
->out
) {
1730 o
->out
= get_address(s
, 0, get_field(s
, b4
),
1734 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1737 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1739 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1744 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1746 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1751 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1753 gen_helper_cxb(cc_op
, cpu_env
, o
->in1_128
, o
->in2_128
);
1758 static TCGv_i32
fpinst_extract_m34(DisasContext
*s
, bool m3_with_fpe
,
1761 const bool fpe
= s390_has_feat(S390_FEAT_FLOATING_POINT_EXT
);
1762 uint8_t m3
= get_field(s
, m3
);
1763 uint8_t m4
= get_field(s
, m4
);
1765 /* m3 field was introduced with FPE */
1766 if (!fpe
&& m3_with_fpe
) {
1769 /* m4 field was introduced with FPE */
1770 if (!fpe
&& m4_with_fpe
) {
1774 /* Check for valid rounding modes. Mode 3 was introduced later. */
1775 if (m3
== 2 || m3
> 7 || (!fpe
&& m3
== 3)) {
1776 gen_program_exception(s
, PGM_SPECIFICATION
);
1780 return tcg_constant_i32(deposit32(m3
, 4, 4, m4
));
1783 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1785 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1788 return DISAS_NORETURN
;
1790 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1795 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1797 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1800 return DISAS_NORETURN
;
1802 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1807 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1809 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1812 return DISAS_NORETURN
;
1814 gen_helper_cfxb(o
->out
, cpu_env
, o
->in2_128
, m34
);
1819 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1821 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1824 return DISAS_NORETURN
;
1826 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1831 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1833 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1836 return DISAS_NORETURN
;
1838 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1843 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1845 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
1848 return DISAS_NORETURN
;
1850 gen_helper_cgxb(o
->out
, cpu_env
, o
->in2_128
, m34
);
1855 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1857 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1860 return DISAS_NORETURN
;
1862 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m34
);
1867 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1869 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1872 return DISAS_NORETURN
;
1874 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m34
);
1879 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1881 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1884 return DISAS_NORETURN
;
1886 gen_helper_clfxb(o
->out
, cpu_env
, o
->in2_128
, m34
);
1891 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1893 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1896 return DISAS_NORETURN
;
1898 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m34
);
1903 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1905 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1908 return DISAS_NORETURN
;
1910 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m34
);
1915 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1917 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1920 return DISAS_NORETURN
;
1922 gen_helper_clgxb(o
->out
, cpu_env
, o
->in2_128
, m34
);
1927 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1929 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1932 return DISAS_NORETURN
;
1934 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m34
);
1938 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1940 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1943 return DISAS_NORETURN
;
1945 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m34
);
1949 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1951 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
1954 return DISAS_NORETURN
;
1956 gen_helper_cxgb(o
->out_128
, cpu_env
, o
->in2
, m34
);
1960 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1962 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1965 return DISAS_NORETURN
;
1967 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m34
);
1971 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1973 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1976 return DISAS_NORETURN
;
1978 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m34
);
1982 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1984 TCGv_i32 m34
= fpinst_extract_m34(s
, false, false);
1987 return DISAS_NORETURN
;
1989 gen_helper_cxlgb(o
->out_128
, cpu_env
, o
->in2
, m34
);
1993 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1995 int r2
= get_field(s
, r2
);
1996 TCGv_i128 pair
= tcg_temp_new_i128();
1997 TCGv_i64 len
= tcg_temp_new_i64();
1999 gen_helper_cksm(pair
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
2001 tcg_gen_extr_i128_i64(o
->out
, len
, pair
);
2003 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
2004 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
2009 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
2011 int l
= get_field(s
, l1
);
2020 mop
= ctz32(l
+ 1) | MO_TE
;
2021 tcg_gen_qemu_ld_tl(cc_src
, o
->addr1
, get_mem_index(s
), mop
);
2022 tcg_gen_qemu_ld_tl(cc_dst
, o
->in2
, get_mem_index(s
), mop
);
2023 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2026 vl
= tcg_constant_i32(l
);
2027 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2033 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
2035 int r1
= get_field(s
, r1
);
2036 int r2
= get_field(s
, r2
);
2039 /* r1 and r2 must be even. */
2040 if (r1
& 1 || r2
& 1) {
2041 gen_program_exception(s
, PGM_SPECIFICATION
);
2042 return DISAS_NORETURN
;
2045 t1
= tcg_constant_i32(r1
);
2046 t2
= tcg_constant_i32(r2
);
2047 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
2052 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
2054 int r1
= get_field(s
, r1
);
2055 int r3
= get_field(s
, r3
);
2058 /* r1 and r3 must be even. */
2059 if (r1
& 1 || r3
& 1) {
2060 gen_program_exception(s
, PGM_SPECIFICATION
);
2061 return DISAS_NORETURN
;
2064 t1
= tcg_constant_i32(r1
);
2065 t3
= tcg_constant_i32(r3
);
2066 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2071 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
2073 int r1
= get_field(s
, r1
);
2074 int r3
= get_field(s
, r3
);
2077 /* r1 and r3 must be even. */
2078 if (r1
& 1 || r3
& 1) {
2079 gen_program_exception(s
, PGM_SPECIFICATION
);
2080 return DISAS_NORETURN
;
2083 t1
= tcg_constant_i32(r1
);
2084 t3
= tcg_constant_i32(r3
);
2085 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
2090 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
2092 TCGv_i32 m3
= tcg_constant_i32(get_field(s
, m3
));
2093 TCGv_i32 t1
= tcg_temp_new_i32();
2095 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
2096 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2101 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
2103 TCGv_i128 pair
= tcg_temp_new_i128();
2105 gen_helper_clst(pair
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2106 tcg_gen_extr_i128_i64(o
->in2
, o
->in1
, pair
);
2112 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
2114 TCGv_i64 t
= tcg_temp_new_i64();
2115 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
2116 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
2117 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2121 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2123 int d2
= get_field(s
, d2
);
2124 int b2
= get_field(s
, b2
);
2127 /* Note that in1 = R3 (new value) and
2128 in2 = (zero-extended) R1 (expected value). */
2130 addr
= get_address(s
, 0, b2
, d2
);
2131 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2132 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2134 /* Are the memory and expected values (un)equal? Note that this setcond
2135 produces the output CC value, thus the NE sense of the test. */
2136 cc
= tcg_temp_new_i64();
2137 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2138 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2144 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2146 int r1
= get_field(s
, r1
);
2148 o
->out_128
= tcg_temp_new_i128();
2149 tcg_gen_concat_i64_i128(o
->out_128
, regs
[r1
+ 1], regs
[r1
]);
2151 /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value. */
2152 tcg_gen_atomic_cmpxchg_i128(o
->out_128
, o
->addr1
, o
->out_128
, o
->in2_128
,
2153 get_mem_index(s
), MO_BE
| MO_128
| MO_ALIGN
);
2156 * Extract result into cc_dst:cc_src, compare vs the expected value
2157 * in the as yet unmodified input registers, then update CC_OP.
2159 tcg_gen_extr_i128_i64(cc_src
, cc_dst
, o
->out_128
);
2160 tcg_gen_xor_i64(cc_dst
, cc_dst
, regs
[r1
]);
2161 tcg_gen_xor_i64(cc_src
, cc_src
, regs
[r1
+ 1]);
2162 tcg_gen_or_i64(cc_dst
, cc_dst
, cc_src
);
2163 set_cc_nz_u64(s
, cc_dst
);
2168 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2170 int r3
= get_field(s
, r3
);
2171 TCGv_i32 t_r3
= tcg_constant_i32(r3
);
2173 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2174 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2176 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2183 #ifndef CONFIG_USER_ONLY
2184 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2186 MemOp mop
= s
->insn
->data
;
2187 TCGv_i64 addr
, old
, cc
;
2188 TCGLabel
*lab
= gen_new_label();
2190 /* Note that in1 = R1 (zero-extended expected value),
2191 out = R1 (original reg), out2 = R1+1 (new value). */
2193 addr
= tcg_temp_new_i64();
2194 old
= tcg_temp_new_i64();
2195 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2196 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2197 get_mem_index(s
), mop
| MO_ALIGN
);
2199 /* Are the memory and expected values (un)equal? */
2200 cc
= tcg_temp_new_i64();
2201 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2202 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2204 /* Write back the output now, so that it happens before the
2205 following branch, so that we don't need local temps. */
2206 if ((mop
& MO_SIZE
) == MO_32
) {
2207 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2209 tcg_gen_mov_i64(o
->out
, old
);
2212 /* If the comparison was equal, and the LSB of R2 was set,
2213 then we need to flush the TLB (for all cpus). */
2214 tcg_gen_xori_i64(cc
, cc
, 1);
2215 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2216 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2218 gen_helper_purge(cpu_env
);
2225 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2227 TCGv_i64 t1
= tcg_temp_new_i64();
2228 TCGv_i32 t2
= tcg_temp_new_i32();
2229 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2230 gen_helper_cvd(t1
, t2
);
2231 tcg_gen_qemu_st_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
2235 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2237 int m3
= get_field(s
, m3
);
2238 TCGLabel
*lab
= gen_new_label();
2241 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2242 if (s
->insn
->data
) {
2243 c
= tcg_unsigned_cond(c
);
2245 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2254 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2256 int m3
= get_field(s
, m3
);
2257 int r1
= get_field(s
, r1
);
2258 int r2
= get_field(s
, r2
);
2259 TCGv_i32 tr1
, tr2
, chk
;
2261 /* R1 and R2 must both be even. */
2262 if ((r1
| r2
) & 1) {
2263 gen_program_exception(s
, PGM_SPECIFICATION
);
2264 return DISAS_NORETURN
;
2266 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2270 tr1
= tcg_constant_i32(r1
);
2271 tr2
= tcg_constant_i32(r2
);
2272 chk
= tcg_constant_i32(m3
);
2274 switch (s
->insn
->data
) {
2276 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2279 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2282 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2285 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2288 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2291 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2294 g_assert_not_reached();
2301 #ifndef CONFIG_USER_ONLY
2302 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2304 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
2305 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
2306 TCGv_i32 func_code
= tcg_constant_i32(get_field(s
, i2
));
2308 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2313 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2315 gen_helper_divs32(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2316 tcg_gen_extr32_i64(o
->out2
, o
->out
, o
->out
);
2320 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2322 gen_helper_divu32(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2323 tcg_gen_extr32_i64(o
->out2
, o
->out
, o
->out
);
2327 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2329 TCGv_i128 t
= tcg_temp_new_i128();
2331 gen_helper_divs64(t
, cpu_env
, o
->in1
, o
->in2
);
2332 tcg_gen_extr_i128_i64(o
->out2
, o
->out
, t
);
2336 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2338 TCGv_i128 t
= tcg_temp_new_i128();
2340 gen_helper_divu64(t
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2341 tcg_gen_extr_i128_i64(o
->out2
, o
->out
, t
);
2345 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2347 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2351 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2353 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2357 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2359 gen_helper_dxb(o
->out_128
, cpu_env
, o
->in1_128
, o
->in2_128
);
2363 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2365 int r2
= get_field(s
, r2
);
2366 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2370 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2372 /* No cache information provided. */
2373 tcg_gen_movi_i64(o
->out
, -1);
2377 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2379 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2383 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2385 int r1
= get_field(s
, r1
);
2386 int r2
= get_field(s
, r2
);
2387 TCGv_i64 t
= tcg_temp_new_i64();
2389 /* Note the "subsequently" in the PoO, which implies a defined result
2390 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2391 tcg_gen_shri_i64(t
, psw_mask
, 32);
2392 store_reg32_i64(r1
, t
);
2394 store_reg32_i64(r2
, psw_mask
);
2399 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2401 int r1
= get_field(s
, r1
);
2405 /* Nested EXECUTE is not allowed. */
2406 if (unlikely(s
->ex_value
)) {
2407 gen_program_exception(s
, PGM_EXECUTE
);
2408 return DISAS_NORETURN
;
2415 v1
= tcg_constant_i64(0);
2420 ilen
= tcg_constant_i32(s
->ilen
);
2421 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2423 return DISAS_PC_CC_UPDATED
;
2426 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2428 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2431 return DISAS_NORETURN
;
2433 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m34
);
2437 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2439 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2442 return DISAS_NORETURN
;
2444 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m34
);
2448 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2450 TCGv_i32 m34
= fpinst_extract_m34(s
, false, true);
2453 return DISAS_NORETURN
;
2455 gen_helper_fixb(o
->out_128
, cpu_env
, o
->in2_128
, m34
);
2459 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2461 /* We'll use the original input for cc computation, since we get to
2462 compare that against 0, which ought to be better than comparing
2463 the real output against 64. It also lets cc_dst be a convenient
2464 temporary during our computation. */
2465 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2467 /* R1 = IN ? CLZ(IN) : 64. */
2468 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2470 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2471 value by 64, which is undefined. But since the shift is 64 iff the
2472 input is zero, we still get the correct result after and'ing. */
2473 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2474 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2475 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2479 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2481 int m3
= get_field(s
, m3
);
2482 int pos
, len
, base
= s
->insn
->data
;
2483 TCGv_i64 tmp
= tcg_temp_new_i64();
2488 /* Effectively a 32-bit load. */
2489 tcg_gen_qemu_ld_i64(tmp
, o
->in2
, get_mem_index(s
), MO_TEUL
);
2496 /* Effectively a 16-bit load. */
2497 tcg_gen_qemu_ld_i64(tmp
, o
->in2
, get_mem_index(s
), MO_TEUW
);
2505 /* Effectively an 8-bit load. */
2506 tcg_gen_qemu_ld_i64(tmp
, o
->in2
, get_mem_index(s
), MO_UB
);
2511 pos
= base
+ ctz32(m3
) * 8;
2512 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2513 ccm
= ((1ull << len
) - 1) << pos
;
2517 /* This is going to be a sequence of loads and inserts. */
2518 pos
= base
+ 32 - 8;
2522 tcg_gen_qemu_ld_i64(tmp
, o
->in2
, get_mem_index(s
), MO_UB
);
2523 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2524 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2525 ccm
|= 0xffull
<< pos
;
2527 m3
= (m3
<< 1) & 0xf;
2533 tcg_gen_movi_i64(tmp
, ccm
);
2534 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2538 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2540 int shift
= s
->insn
->data
& 0xff;
2541 int size
= s
->insn
->data
>> 8;
2542 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2546 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2551 t1
= tcg_temp_new_i64();
2552 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2553 t2
= tcg_temp_new_i64();
2554 tcg_gen_extu_i32_i64(t2
, cc_op
);
2555 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2556 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2560 #ifndef CONFIG_USER_ONLY
2561 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2565 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2566 m4
= tcg_constant_i32(get_field(s
, m4
));
2568 m4
= tcg_constant_i32(0);
2570 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2574 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2578 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2579 m4
= tcg_constant_i32(get_field(s
, m4
));
2581 m4
= tcg_constant_i32(0);
2583 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2587 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2589 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2594 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2596 int r1
= have_field(s
, r1
) ? get_field(s
, r1
) : 0;
2597 int r2
= have_field(s
, r2
) ? get_field(s
, r2
) : 0;
2598 int r3
= have_field(s
, r3
) ? get_field(s
, r3
) : 0;
2599 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2601 switch (s
->insn
->data
) {
2602 case S390_FEAT_TYPE_KMA
:
2603 if (r3
== r1
|| r3
== r2
) {
2604 gen_program_exception(s
, PGM_SPECIFICATION
);
2605 return DISAS_NORETURN
;
2608 case S390_FEAT_TYPE_KMCTR
:
2609 if (r3
& 1 || !r3
) {
2610 gen_program_exception(s
, PGM_SPECIFICATION
);
2611 return DISAS_NORETURN
;
2614 case S390_FEAT_TYPE_PPNO
:
2615 case S390_FEAT_TYPE_KMF
:
2616 case S390_FEAT_TYPE_KMC
:
2617 case S390_FEAT_TYPE_KMO
:
2618 case S390_FEAT_TYPE_KM
:
2619 if (r1
& 1 || !r1
) {
2620 gen_program_exception(s
, PGM_SPECIFICATION
);
2621 return DISAS_NORETURN
;
2624 case S390_FEAT_TYPE_KMAC
:
2625 case S390_FEAT_TYPE_KIMD
:
2626 case S390_FEAT_TYPE_KLMD
:
2627 if (r2
& 1 || !r2
) {
2628 gen_program_exception(s
, PGM_SPECIFICATION
);
2629 return DISAS_NORETURN
;
2632 case S390_FEAT_TYPE_PCKMO
:
2633 case S390_FEAT_TYPE_PCC
:
2636 g_assert_not_reached();
2639 t_r1
= tcg_constant_i32(r1
);
2640 t_r2
= tcg_constant_i32(r2
);
2641 t_r3
= tcg_constant_i32(r3
);
2642 type
= tcg_constant_i32(s
->insn
->data
);
2643 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2648 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2650 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2655 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2657 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2662 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2664 gen_helper_kxb(cc_op
, cpu_env
, o
->in1_128
, o
->in2_128
);
2669 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2671 /* The real output is indeed the original value in memory;
2672 recompute the addition for the computation of CC. */
2673 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2674 s
->insn
->data
| MO_ALIGN
);
2675 /* However, we need to recompute the addition for setting CC. */
2676 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2680 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2682 /* The real output is indeed the original value in memory;
2683 recompute the addition for the computation of CC. */
2684 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2685 s
->insn
->data
| MO_ALIGN
);
2686 /* However, we need to recompute the operation for setting CC. */
2687 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2691 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2693 /* The real output is indeed the original value in memory;
2694 recompute the addition for the computation of CC. */
2695 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2696 s
->insn
->data
| MO_ALIGN
);
2697 /* However, we need to recompute the operation for setting CC. */
2698 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2702 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2704 /* The real output is indeed the original value in memory;
2705 recompute the addition for the computation of CC. */
2706 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2707 s
->insn
->data
| MO_ALIGN
);
2708 /* However, we need to recompute the operation for setting CC. */
2709 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2713 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2715 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2719 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2721 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2724 return DISAS_NORETURN
;
2726 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
, m34
);
2730 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2732 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2735 return DISAS_NORETURN
;
2737 gen_helper_ldxb(o
->out
, cpu_env
, o
->in2_128
, m34
);
2741 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2743 TCGv_i32 m34
= fpinst_extract_m34(s
, true, true);
2746 return DISAS_NORETURN
;
2748 gen_helper_lexb(o
->out
, cpu_env
, o
->in2_128
, m34
);
2752 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2754 gen_helper_lxdb(o
->out_128
, cpu_env
, o
->in2
);
2758 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2760 gen_helper_lxeb(o
->out_128
, cpu_env
, o
->in2
);
2764 static DisasJumpType
op_lde(DisasContext
*s
, DisasOps
*o
)
2766 tcg_gen_shli_i64(o
->out
, o
->in2
, 32);
2770 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2772 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2776 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2778 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_SB
);
2782 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2784 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_UB
);
2788 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2790 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_TESW
);
2794 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2796 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_TEUW
);
2800 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2802 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, get_mem_index(s
),
2803 MO_TESL
| s
->insn
->data
);
2807 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2809 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, get_mem_index(s
),
2810 MO_TEUL
| s
->insn
->data
);
2814 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2816 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
),
2817 MO_TEUQ
| s
->insn
->data
);
2821 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2823 TCGLabel
*lab
= gen_new_label();
2824 store_reg32_i64(get_field(s
, r1
), o
->in2
);
2825 /* The value is stored even in case of trap. */
2826 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2832 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2834 TCGLabel
*lab
= gen_new_label();
2835 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
2836 /* The value is stored even in case of trap. */
2837 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2843 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2845 TCGLabel
*lab
= gen_new_label();
2846 store_reg32h_i64(get_field(s
, r1
), o
->in2
);
2847 /* The value is stored even in case of trap. */
2848 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2854 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2856 TCGLabel
*lab
= gen_new_label();
2858 tcg_gen_qemu_ld_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_TEUL
);
2859 /* The value is stored even in case of trap. */
2860 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2866 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2868 TCGLabel
*lab
= gen_new_label();
2869 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2870 /* The value is stored even in case of trap. */
2871 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2877 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2881 if (have_field(s
, m3
)) {
2882 /* LOAD * ON CONDITION */
2883 disas_jcc(s
, &c
, get_field(s
, m3
));
2886 disas_jcc(s
, &c
, get_field(s
, m4
));
2890 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2893 TCGv_i32 t32
= tcg_temp_new_i32();
2896 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2898 t
= tcg_temp_new_i64();
2899 tcg_gen_extu_i32_i64(t
, t32
);
2901 z
= tcg_constant_i64(0);
2902 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2908 #ifndef CONFIG_USER_ONLY
2909 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2911 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
2912 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
2914 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2915 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2916 s
->exit_to_mainloop
= true;
2917 return DISAS_TOO_MANY
;
2920 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2922 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
2923 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
2925 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2926 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2927 s
->exit_to_mainloop
= true;
2928 return DISAS_TOO_MANY
;
2931 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2933 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2938 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2940 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2944 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2946 TCGv_i64 mask
, addr
;
2948 per_breaking_event(s
);
2951 * Convert the short PSW into the normal PSW, similar to what
2952 * s390_cpu_load_normal() does.
2954 mask
= tcg_temp_new_i64();
2955 addr
= tcg_temp_new_i64();
2956 tcg_gen_qemu_ld_i64(mask
, o
->in2
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN_8
);
2957 tcg_gen_andi_i64(addr
, mask
, PSW_MASK_SHORT_ADDR
);
2958 tcg_gen_andi_i64(mask
, mask
, PSW_MASK_SHORT_CTRL
);
2959 tcg_gen_xori_i64(mask
, mask
, PSW_MASK_SHORTPSW
);
2960 gen_helper_load_psw(cpu_env
, mask
, addr
);
2961 return DISAS_NORETURN
;
2964 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2968 per_breaking_event(s
);
2970 t1
= tcg_temp_new_i64();
2971 t2
= tcg_temp_new_i64();
2972 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2973 MO_TEUQ
| MO_ALIGN_8
);
2974 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2975 tcg_gen_qemu_ld_i64(t2
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
2976 gen_helper_load_psw(cpu_env
, t1
, t2
);
2977 return DISAS_NORETURN
;
2981 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2983 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
2984 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
2986 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2990 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2992 int r1
= get_field(s
, r1
);
2993 int r3
= get_field(s
, r3
);
2996 /* Only one register to read. */
2997 t1
= tcg_temp_new_i64();
2998 if (unlikely(r1
== r3
)) {
2999 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3000 store_reg32_i64(r1
, t1
);
3004 /* First load the values of the first and last registers to trigger
3005 possible page faults. */
3006 t2
= tcg_temp_new_i64();
3007 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3008 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3009 tcg_gen_qemu_ld_i64(t2
, t2
, get_mem_index(s
), MO_TEUL
);
3010 store_reg32_i64(r1
, t1
);
3011 store_reg32_i64(r3
, t2
);
3013 /* Only two registers to read. */
3014 if (((r1
+ 1) & 15) == r3
) {
3018 /* Then load the remaining registers. Page fault can't occur. */
3020 tcg_gen_movi_i64(t2
, 4);
3023 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3024 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3025 store_reg32_i64(r1
, t1
);
3030 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
3032 int r1
= get_field(s
, r1
);
3033 int r3
= get_field(s
, r3
);
3036 /* Only one register to read. */
3037 t1
= tcg_temp_new_i64();
3038 if (unlikely(r1
== r3
)) {
3039 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3040 store_reg32h_i64(r1
, t1
);
3044 /* First load the values of the first and last registers to trigger
3045 possible page faults. */
3046 t2
= tcg_temp_new_i64();
3047 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3048 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
3049 tcg_gen_qemu_ld_i64(t2
, t2
, get_mem_index(s
), MO_TEUL
);
3050 store_reg32h_i64(r1
, t1
);
3051 store_reg32h_i64(r3
, t2
);
3053 /* Only two registers to read. */
3054 if (((r1
+ 1) & 15) == r3
) {
3058 /* Then load the remaining registers. Page fault can't occur. */
3060 tcg_gen_movi_i64(t2
, 4);
3063 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
3064 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUL
);
3065 store_reg32h_i64(r1
, t1
);
3070 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
3072 int r1
= get_field(s
, r1
);
3073 int r3
= get_field(s
, r3
);
3076 /* Only one register to read. */
3077 if (unlikely(r1
== r3
)) {
3078 tcg_gen_qemu_ld_i64(regs
[r1
], o
->in2
, get_mem_index(s
), MO_TEUQ
);
3082 /* First load the values of the first and last registers to trigger
3083 possible page faults. */
3084 t1
= tcg_temp_new_i64();
3085 t2
= tcg_temp_new_i64();
3086 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
3087 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
3088 tcg_gen_qemu_ld_i64(regs
[r3
], t2
, get_mem_index(s
), MO_TEUQ
);
3089 tcg_gen_mov_i64(regs
[r1
], t1
);
3091 /* Only two registers to read. */
3092 if (((r1
+ 1) & 15) == r3
) {
3096 /* Then load the remaining registers. Page fault can't occur. */
3098 tcg_gen_movi_i64(t1
, 8);
3101 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
3102 tcg_gen_qemu_ld_i64(regs
[r1
], o
->in2
, get_mem_index(s
), MO_TEUQ
);
3107 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3110 MemOp mop
= s
->insn
->data
;
3112 /* In a parallel context, stop the world and single step. */
3113 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3116 gen_exception(EXCP_ATOMIC
);
3117 return DISAS_NORETURN
;
3120 /* In a serial context, perform the two loads ... */
3121 a1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
3122 a2
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3123 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3124 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3126 /* ... and indicate that we performed them while interlocked. */
3127 gen_op_movi_cc(s
, 0);
3131 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3133 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3134 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3135 } else if (HAVE_ATOMIC128
) {
3136 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3138 gen_helper_exit_atomic(cpu_env
);
3139 return DISAS_NORETURN
;
3141 return_low128(o
->out2
);
3145 #ifndef CONFIG_USER_ONLY
3146 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3148 tcg_gen_qemu_ld_tl(o
->out
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
3153 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3155 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3159 static DisasJumpType
op_lcbb(DisasContext
*s
, DisasOps
*o
)
3161 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
3163 if (get_field(s
, m3
) > 6) {
3164 gen_program_exception(s
, PGM_SPECIFICATION
);
3165 return DISAS_NORETURN
;
3168 tcg_gen_ori_i64(o
->addr1
, o
->addr1
, -block_size
);
3169 tcg_gen_neg_i64(o
->addr1
, o
->addr1
);
3170 tcg_gen_movi_i64(o
->out
, 16);
3171 tcg_gen_umin_i64(o
->out
, o
->out
, o
->addr1
);
3172 gen_op_update1_cc_i64(s
, CC_OP_LCBB
, o
->out
);
3176 static DisasJumpType
op_mc(DisasContext
*s
, DisasOps
*o
)
3178 const uint16_t monitor_class
= get_field(s
, i2
);
3180 if (monitor_class
& 0xff00) {
3181 gen_program_exception(s
, PGM_SPECIFICATION
);
3182 return DISAS_NORETURN
;
3185 #if !defined(CONFIG_USER_ONLY)
3186 gen_helper_monitor_call(cpu_env
, o
->addr1
,
3187 tcg_constant_i32(monitor_class
));
3189 /* Defaults to a NOP. */
3193 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3200 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3202 int b2
= get_field(s
, b2
);
3203 TCGv ar1
= tcg_temp_new_i64();
3208 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3209 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3210 tcg_gen_movi_i64(ar1
, 0);
3212 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3213 tcg_gen_movi_i64(ar1
, 1);
3215 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3217 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3219 tcg_gen_movi_i64(ar1
, 0);
3222 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3223 tcg_gen_movi_i64(ar1
, 2);
3227 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3231 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3240 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3242 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3244 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3248 static DisasJumpType
op_mvcrl(DisasContext
*s
, DisasOps
*o
)
3250 gen_helper_mvcrl(cpu_env
, regs
[0], o
->addr1
, o
->in2
);
3254 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3256 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3258 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3262 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3264 int r1
= get_field(s
, r1
);
3265 int r2
= get_field(s
, r2
);
3268 /* r1 and r2 must be even. */
3269 if (r1
& 1 || r2
& 1) {
3270 gen_program_exception(s
, PGM_SPECIFICATION
);
3271 return DISAS_NORETURN
;
3274 t1
= tcg_constant_i32(r1
);
3275 t2
= tcg_constant_i32(r2
);
3276 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3281 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3283 int r1
= get_field(s
, r1
);
3284 int r3
= get_field(s
, r3
);
3287 /* r1 and r3 must be even. */
3288 if (r1
& 1 || r3
& 1) {
3289 gen_program_exception(s
, PGM_SPECIFICATION
);
3290 return DISAS_NORETURN
;
3293 t1
= tcg_constant_i32(r1
);
3294 t3
= tcg_constant_i32(r3
);
3295 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3300 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3302 int r1
= get_field(s
, r1
);
3303 int r3
= get_field(s
, r3
);
3306 /* r1 and r3 must be even. */
3307 if (r1
& 1 || r3
& 1) {
3308 gen_program_exception(s
, PGM_SPECIFICATION
);
3309 return DISAS_NORETURN
;
3312 t1
= tcg_constant_i32(r1
);
3313 t3
= tcg_constant_i32(r3
);
3314 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3319 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3321 int r3
= get_field(s
, r3
);
3322 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3327 #ifndef CONFIG_USER_ONLY
3328 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3330 int r1
= get_field(s
, l1
);
3331 int r3
= get_field(s
, r3
);
3332 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
, regs
[r3
]);
3337 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3339 int r1
= get_field(s
, l1
);
3340 int r3
= get_field(s
, r3
);
3341 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
, regs
[r3
]);
3347 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3349 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3351 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3355 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3357 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3359 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3363 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3365 TCGv_i32 t1
= tcg_constant_i32(get_field(s
, r1
));
3366 TCGv_i32 t2
= tcg_constant_i32(get_field(s
, r2
));
3368 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], t1
, t2
);
3373 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3375 TCGv_i32 t1
= tcg_constant_i32(get_field(s
, r1
));
3376 TCGv_i32 t2
= tcg_constant_i32(get_field(s
, r2
));
3378 gen_helper_mvst(cc_op
, cpu_env
, t1
, t2
);
3383 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3385 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3387 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3391 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3393 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3397 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3399 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3403 static DisasJumpType
op_muls128(DisasContext
*s
, DisasOps
*o
)
3405 tcg_gen_muls2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3409 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3411 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3415 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3417 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3421 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3423 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3427 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3429 gen_helper_mxb(o
->out_128
, cpu_env
, o
->in1_128
, o
->in2_128
);
3433 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3435 gen_helper_mxdb(o
->out_128
, cpu_env
, o
->in1_128
, o
->in2
);
3439 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3441 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3442 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3446 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3448 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3449 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3453 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3455 TCGv_i64 r3
= load_freg32_i64(get_field(s
, r3
));
3456 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3460 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3462 TCGv_i64 r3
= load_freg(get_field(s
, r3
));
3463 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3467 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3469 TCGv_i64 z
= tcg_constant_i64(0);
3470 TCGv_i64 n
= tcg_temp_new_i64();
3472 tcg_gen_neg_i64(n
, o
->in2
);
3473 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3477 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3479 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3483 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3485 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3489 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3491 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3492 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3496 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3498 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3500 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3505 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3507 tcg_gen_neg_i64(o
->out
, o
->in2
);
3511 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3513 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3517 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3519 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3523 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3525 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3526 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3530 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3532 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3534 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3539 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3541 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3545 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3547 int shift
= s
->insn
->data
& 0xff;
3548 int size
= s
->insn
->data
>> 8;
3549 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3550 TCGv_i64 t
= tcg_temp_new_i64();
3552 tcg_gen_shli_i64(t
, o
->in2
, shift
);
3553 tcg_gen_or_i64(o
->out
, o
->in1
, t
);
3555 /* Produce the CC from only the bits manipulated. */
3556 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3557 set_cc_nz_u64(s
, cc_dst
);
3561 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3563 o
->in1
= tcg_temp_new_i64();
3565 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3566 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3568 /* Perform the atomic operation in memory. */
3569 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3573 /* Recompute also for atomic case: needed for setting CC. */
3574 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3576 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3577 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3582 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3584 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
3586 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3590 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3592 int l2
= get_field(s
, l2
) + 1;
3595 /* The length must not exceed 32 bytes. */
3597 gen_program_exception(s
, PGM_SPECIFICATION
);
3598 return DISAS_NORETURN
;
3600 l
= tcg_constant_i32(l2
);
3601 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3605 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3607 int l2
= get_field(s
, l2
) + 1;
3610 /* The length must be even and should not exceed 64 bytes. */
3611 if ((l2
& 1) || (l2
> 64)) {
3612 gen_program_exception(s
, PGM_SPECIFICATION
);
3613 return DISAS_NORETURN
;
3615 l
= tcg_constant_i32(l2
);
3616 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3620 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3622 const uint8_t m3
= get_field(s
, m3
);
3624 if ((m3
& 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3
)) {
3625 tcg_gen_ctpop_i64(o
->out
, o
->in2
);
3627 gen_helper_popcnt(o
->out
, o
->in2
);
3632 #ifndef CONFIG_USER_ONLY
3633 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3635 gen_helper_ptlb(cpu_env
);
3640 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3642 int i3
= get_field(s
, i3
);
3643 int i4
= get_field(s
, i4
);
3644 int i5
= get_field(s
, i5
);
3645 int do_zero
= i4
& 0x80;
3646 uint64_t mask
, imask
, pmask
;
3649 /* Adjust the arguments for the specific insn. */
3650 switch (s
->fields
.op2
) {
3651 case 0x55: /* risbg */
3652 case 0x59: /* risbgn */
3657 case 0x5d: /* risbhg */
3660 pmask
= 0xffffffff00000000ull
;
3662 case 0x51: /* risblg */
3663 i3
= (i3
& 31) + 32;
3664 i4
= (i4
& 31) + 32;
3665 pmask
= 0x00000000ffffffffull
;
3668 g_assert_not_reached();
3671 /* MASK is the set of bits to be inserted from R2. */
3673 /* [0...i3---i4...63] */
3674 mask
= (-1ull >> i3
) & (-1ull << (63 - i4
));
3676 /* [0---i4...i3---63] */
3677 mask
= (-1ull >> i3
) | (-1ull << (63 - i4
));
3679 /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3682 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3683 insns, we need to keep the other half of the register. */
3684 imask
= ~mask
| ~pmask
;
3693 /* In some cases we can implement this with extract. */
3694 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3695 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3699 /* In some cases we can implement this with deposit. */
3700 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3701 /* Note that we rotate the bits to be inserted to the lsb, not to
3702 the position as described in the PoO. */
3703 rot
= (rot
- pos
) & 63;
3708 /* Rotate the input as necessary. */
3709 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3711 /* Insert the selected bits into the output. */
3714 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3716 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3718 } else if (imask
== 0) {
3719 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3721 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3722 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3723 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3728 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3730 int i3
= get_field(s
, i3
);
3731 int i4
= get_field(s
, i4
);
3732 int i5
= get_field(s
, i5
);
3736 /* If this is a test-only form, arrange to discard the result. */
3738 tcg_debug_assert(o
->out
!= NULL
);
3740 o
->out
= tcg_temp_new_i64();
3741 tcg_gen_mov_i64(o
->out
, orig_out
);
3748 /* MASK is the set of bits to be operated on from R2.
3749 Take care for I3/I4 wraparound. */
3752 mask
^= ~0ull >> i4
>> 1;
3754 mask
|= ~(~0ull >> i4
>> 1);
3757 /* Rotate the input as necessary. */
3758 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3761 switch (s
->fields
.op2
) {
3762 case 0x54: /* AND */
3763 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3764 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3767 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3768 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3770 case 0x57: /* XOR */
3771 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3772 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3779 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3780 set_cc_nz_u64(s
, cc_dst
);
3784 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3786 tcg_gen_bswap16_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3790 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3792 tcg_gen_bswap32_i64(o
->out
, o
->in2
, TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
3796 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3798 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3802 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3804 TCGv_i32 t1
= tcg_temp_new_i32();
3805 TCGv_i32 t2
= tcg_temp_new_i32();
3806 TCGv_i32 to
= tcg_temp_new_i32();
3807 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3808 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3809 tcg_gen_rotl_i32(to
, t1
, t2
);
3810 tcg_gen_extu_i32_i64(o
->out
, to
);
3814 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3816 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3820 #ifndef CONFIG_USER_ONLY
3821 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3823 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3828 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3830 gen_helper_sacf(cpu_env
, o
->in2
);
3831 /* Addressing mode has changed, so end the block. */
3832 return DISAS_TOO_MANY
;
3836 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3838 int sam
= s
->insn
->data
;
3854 /* Bizarre but true, we check the address of the current insn for the
3855 specification exception, not the next to be executed. Thus the PoO
3856 documents that Bad Things Happen two bytes before the end. */
3857 if (s
->base
.pc_next
& ~mask
) {
3858 gen_program_exception(s
, PGM_SPECIFICATION
);
3859 return DISAS_NORETURN
;
3863 tsam
= tcg_constant_i64(sam
);
3864 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3866 /* Always exit the TB, since we (may have) changed execution mode. */
3867 return DISAS_TOO_MANY
;
3870 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3872 int r1
= get_field(s
, r1
);
3873 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3877 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3879 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3883 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3885 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3889 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3891 gen_helper_sxb(o
->out_128
, cpu_env
, o
->in1_128
, o
->in2_128
);
3895 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3897 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3901 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3903 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3907 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3909 gen_helper_sqxb(o
->out_128
, cpu_env
, o
->in2_128
);
3913 #ifndef CONFIG_USER_ONLY
3914 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3916 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3921 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3923 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
3924 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
3926 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3932 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3939 disas_jcc(s
, &c
, get_field(s
, m3
));
3941 /* We want to store when the condition is fulfilled, so branch
3942 out when it's not */
3943 c
.cond
= tcg_invert_cond(c
.cond
);
3945 lab
= gen_new_label();
3947 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3949 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3952 r1
= get_field(s
, r1
);
3953 a
= get_address(s
, 0, get_field(s
, b2
), get_field(s
, d2
));
3954 switch (s
->insn
->data
) {
3956 tcg_gen_qemu_st_i64(regs
[r1
], a
, get_mem_index(s
), MO_TEUQ
);
3959 tcg_gen_qemu_st_i64(regs
[r1
], a
, get_mem_index(s
), MO_TEUL
);
3961 case 2: /* STOCFH */
3962 h
= tcg_temp_new_i64();
3963 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3964 tcg_gen_qemu_st_i64(h
, a
, get_mem_index(s
), MO_TEUL
);
3967 g_assert_not_reached();
3974 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3977 uint64_t sign
= 1ull << s
->insn
->data
;
3978 if (s
->insn
->data
== 31) {
3979 t
= tcg_temp_new_i64();
3980 tcg_gen_shli_i64(t
, o
->in1
, 32);
3984 gen_op_update2_cc_i64(s
, CC_OP_SLA
, t
, o
->in2
);
3985 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3986 /* The arithmetic left shift is curious in that it does not affect
3987 the sign bit. Copy that over from the source unchanged. */
3988 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3989 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3990 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3994 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3996 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
4000 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
4002 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
4006 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
4008 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
4012 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
4014 gen_helper_sfpc(cpu_env
, o
->in2
);
4018 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
4020 gen_helper_sfas(cpu_env
, o
->in2
);
4024 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
4026 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4027 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x3ull
);
4028 gen_helper_srnm(cpu_env
, o
->addr1
);
4032 static DisasJumpType
op_srnmb(DisasContext
*s
, DisasOps
*o
)
4034 /* Bits 0-55 are are ignored. */
4035 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0xffull
);
4036 gen_helper_srnm(cpu_env
, o
->addr1
);
4040 static DisasJumpType
op_srnmt(DisasContext
*s
, DisasOps
*o
)
4042 TCGv_i64 tmp
= tcg_temp_new_i64();
4044 /* Bits other than 61-63 are ignored. */
4045 tcg_gen_andi_i64(o
->addr1
, o
->addr1
, 0x7ull
);
4047 /* No need to call a helper, we don't implement dfp */
4048 tcg_gen_ld32u_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4049 tcg_gen_deposit_i64(tmp
, tmp
, o
->addr1
, 4, 3);
4050 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUS390XState
, fpc
));
4054 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
4056 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
4057 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
4060 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
4061 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
4065 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
4067 int b1
= get_field(s
, b1
);
4068 int d1
= get_field(s
, d1
);
4069 int b2
= get_field(s
, b2
);
4070 int d2
= get_field(s
, d2
);
4071 int r3
= get_field(s
, r3
);
4072 TCGv_i64 tmp
= tcg_temp_new_i64();
4074 /* fetch all operands first */
4075 o
->in1
= tcg_temp_new_i64();
4076 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
4077 o
->in2
= tcg_temp_new_i64();
4078 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
4079 o
->addr1
= tcg_temp_new_i64();
4080 gen_addi_and_wrap_i64(s
, o
->addr1
, regs
[r3
], 0);
4082 /* load the third operand into r3 before modifying anything */
4083 tcg_gen_qemu_ld_i64(regs
[r3
], o
->addr1
, get_mem_index(s
), MO_TEUQ
);
4085 /* subtract CPU timer from first operand and store in GR0 */
4086 gen_helper_stpt(tmp
, cpu_env
);
4087 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
4089 /* store second operand in GR1 */
4090 tcg_gen_mov_i64(regs
[1], o
->in2
);
4094 #ifndef CONFIG_USER_ONLY
4095 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
4097 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
4098 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
4102 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
4104 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
4108 static void gen_check_psw_mask(DisasContext
*s
)
4110 TCGv_i64 reserved
= tcg_temp_new_i64();
4111 TCGLabel
*ok
= gen_new_label();
4113 tcg_gen_andi_i64(reserved
, psw_mask
, PSW_MASK_RESERVED
);
4114 tcg_gen_brcondi_i64(TCG_COND_EQ
, reserved
, 0, ok
);
4115 gen_program_exception(s
, PGM_SPECIFICATION
);
4119 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
4121 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
4123 gen_check_psw_mask(s
);
4125 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4126 s
->exit_to_mainloop
= true;
4127 return DISAS_TOO_MANY
;
4130 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
4132 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
4137 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4139 gen_helper_stck(o
->out
, cpu_env
);
4140 /* ??? We don't implement clock states. */
4141 gen_op_movi_cc(s
, 0);
4145 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4147 TCGv_i64 c1
= tcg_temp_new_i64();
4148 TCGv_i64 c2
= tcg_temp_new_i64();
4149 TCGv_i64 todpr
= tcg_temp_new_i64();
4150 gen_helper_stck(c1
, cpu_env
);
4151 /* 16 bit value store in an uint32_t (only valid bits set) */
4152 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4153 /* Shift the 64-bit value into its place as a zero-extended
4154 104-bit value. Note that "bit positions 64-103 are always
4155 non-zero so that they compare differently to STCK"; we set
4156 the least significant bit to 1. */
4157 tcg_gen_shli_i64(c2
, c1
, 56);
4158 tcg_gen_shri_i64(c1
, c1
, 8);
4159 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4160 tcg_gen_or_i64(c2
, c2
, todpr
);
4161 tcg_gen_qemu_st_i64(c1
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
4162 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4163 tcg_gen_qemu_st_i64(c2
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
4164 /* ??? We don't implement clock states. */
4165 gen_op_movi_cc(s
, 0);
4169 #ifndef CONFIG_USER_ONLY
4170 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4172 gen_helper_sck(cc_op
, cpu_env
, o
->in2
);
4177 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4179 gen_helper_sckc(cpu_env
, o
->in2
);
4183 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4185 gen_helper_sckpf(cpu_env
, regs
[0]);
4189 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4191 gen_helper_stckc(o
->out
, cpu_env
);
4195 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4197 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4198 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
4200 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4204 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4206 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4207 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
4209 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4213 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4215 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4219 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4221 gen_helper_spt(cpu_env
, o
->in2
);
4225 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4227 gen_helper_stfl(cpu_env
);
4231 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4233 gen_helper_stpt(o
->out
, cpu_env
);
4237 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4239 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4244 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4246 gen_helper_spx(cpu_env
, o
->in2
);
4250 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4252 gen_helper_xsch(cpu_env
, regs
[1]);
4257 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4259 gen_helper_csch(cpu_env
, regs
[1]);
4264 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4266 gen_helper_hsch(cpu_env
, regs
[1]);
4271 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4273 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4278 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4280 gen_helper_rchp(cpu_env
, regs
[1]);
4285 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4287 gen_helper_rsch(cpu_env
, regs
[1]);
4292 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4294 gen_helper_sal(cpu_env
, regs
[1]);
4298 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4300 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4304 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4306 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4307 gen_op_movi_cc(s
, 3);
4311 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4313 /* The instruction is suppressed if not provided. */
4317 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4319 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4324 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4326 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4331 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4333 gen_helper_stcrw(cpu_env
, o
->in2
);
4338 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4340 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4345 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4347 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4352 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4354 gen_helper_chsc(cpu_env
, o
->in2
);
4359 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4361 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4362 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4366 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4368 uint64_t i2
= get_field(s
, i2
);
4371 /* It is important to do what the instruction name says: STORE THEN.
4372 If we let the output hook perform the store then if we fault and
4373 restart, we'll have the wrong SYSTEM MASK in place. */
4374 t
= tcg_temp_new_i64();
4375 tcg_gen_shri_i64(t
, psw_mask
, 56);
4376 tcg_gen_qemu_st_i64(t
, o
->addr1
, get_mem_index(s
), MO_UB
);
4378 if (s
->fields
.op
== 0xac) {
4379 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4380 (i2
<< 56) | 0x00ffffffffffffffull
);
4382 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4385 gen_check_psw_mask(s
);
4387 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4388 s
->exit_to_mainloop
= true;
4389 return DISAS_TOO_MANY
;
4392 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4394 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, MMU_REAL_IDX
, s
->insn
->data
);
4396 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
4398 gen_helper_per_store_real(cpu_env
);
4404 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4406 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4411 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4413 tcg_gen_qemu_st_i64(o
->in1
, o
->in2
, get_mem_index(s
), MO_UB
);
4417 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4419 tcg_gen_qemu_st_i64(o
->in1
, o
->in2
, get_mem_index(s
), MO_TEUW
);
4423 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4425 tcg_gen_qemu_st_tl(o
->in1
, o
->in2
, get_mem_index(s
),
4426 MO_TEUL
| s
->insn
->data
);
4430 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4432 tcg_gen_qemu_st_i64(o
->in1
, o
->in2
, get_mem_index(s
),
4433 MO_TEUQ
| s
->insn
->data
);
4437 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4439 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4440 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
4442 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4446 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4448 int m3
= get_field(s
, m3
);
4449 int pos
, base
= s
->insn
->data
;
4450 TCGv_i64 tmp
= tcg_temp_new_i64();
4452 pos
= base
+ ctz32(m3
) * 8;
4455 /* Effectively a 32-bit store. */
4456 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4457 tcg_gen_qemu_st_i64(tmp
, o
->in2
, get_mem_index(s
), MO_TEUL
);
4463 /* Effectively a 16-bit store. */
4464 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4465 tcg_gen_qemu_st_i64(tmp
, o
->in2
, get_mem_index(s
), MO_TEUW
);
4472 /* Effectively an 8-bit store. */
4473 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4474 tcg_gen_qemu_st_i64(tmp
, o
->in2
, get_mem_index(s
), MO_UB
);
4478 /* This is going to be a sequence of shifts and stores. */
4479 pos
= base
+ 32 - 8;
4482 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4483 tcg_gen_qemu_st_i64(tmp
, o
->in2
, get_mem_index(s
), MO_UB
);
4484 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4486 m3
= (m3
<< 1) & 0xf;
4494 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4496 int r1
= get_field(s
, r1
);
4497 int r3
= get_field(s
, r3
);
4498 int size
= s
->insn
->data
;
4499 TCGv_i64 tsize
= tcg_constant_i64(size
);
4502 tcg_gen_qemu_st_i64(regs
[r1
], o
->in2
, get_mem_index(s
),
4503 size
== 8 ? MO_TEUQ
: MO_TEUL
);
4507 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4514 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4516 int r1
= get_field(s
, r1
);
4517 int r3
= get_field(s
, r3
);
4518 TCGv_i64 t
= tcg_temp_new_i64();
4519 TCGv_i64 t4
= tcg_constant_i64(4);
4520 TCGv_i64 t32
= tcg_constant_i64(32);
4523 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4524 tcg_gen_qemu_st_i64(t
, o
->in2
, get_mem_index(s
), MO_TEUL
);
4528 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4534 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4536 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4537 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4538 } else if (HAVE_ATOMIC128
) {
4539 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4541 gen_helper_exit_atomic(cpu_env
);
4542 return DISAS_NORETURN
;
4547 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4549 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4550 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4552 gen_helper_srst(cpu_env
, r1
, r2
);
4557 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4559 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4560 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4562 gen_helper_srstu(cpu_env
, r1
, r2
);
4567 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4569 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4573 static DisasJumpType
op_subu64(DisasContext
*s
, DisasOps
*o
)
4575 tcg_gen_movi_i64(cc_src
, 0);
4576 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->in1
, cc_src
, o
->in2
, cc_src
);
4580 /* Compute borrow (0, -1) into cc_src. */
4581 static void compute_borrow(DisasContext
*s
)
4585 /* The borrow value is already in cc_src (0,-1). */
4591 /* The carry flag is the msb of CC; compute into cc_src. */
4592 tcg_gen_extu_i32_i64(cc_src
, cc_op
);
4593 tcg_gen_shri_i64(cc_src
, cc_src
, 1);
4596 /* Convert carry (1,0) to borrow (0,-1). */
4597 tcg_gen_subi_i64(cc_src
, cc_src
, 1);
4602 static DisasJumpType
op_subb32(DisasContext
*s
, DisasOps
*o
)
4606 /* Borrow is {0, -1}, so add to subtract. */
4607 tcg_gen_add_i64(o
->out
, o
->in1
, cc_src
);
4608 tcg_gen_sub_i64(o
->out
, o
->out
, o
->in2
);
4612 static DisasJumpType
op_subb64(DisasContext
*s
, DisasOps
*o
)
4617 * Borrow is {0, -1}, so add to subtract; replicate the
4618 * borrow input to produce 128-bit -1 for the addition.
4620 TCGv_i64 zero
= tcg_constant_i64(0);
4621 tcg_gen_add2_i64(o
->out
, cc_src
, o
->in1
, zero
, cc_src
, cc_src
);
4622 tcg_gen_sub2_i64(o
->out
, cc_src
, o
->out
, cc_src
, o
->in2
, zero
);
4627 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4634 t
= tcg_constant_i32(get_field(s
, i1
) & 0xff);
4635 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4637 t
= tcg_constant_i32(s
->ilen
);
4638 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4640 gen_exception(EXCP_SVC
);
4641 return DISAS_NORETURN
;
4644 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4648 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4649 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4650 gen_op_movi_cc(s
, cc
);
4654 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4656 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4661 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4663 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4668 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4670 gen_helper_tcxb(cc_op
, cpu_env
, o
->in1_128
, o
->in2
);
4675 #ifndef CONFIG_USER_ONLY
4677 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4679 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4684 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4686 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4693 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4695 TCGv_i32 l1
= tcg_constant_i32(get_field(s
, l1
) + 1);
4697 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4702 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4704 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
4706 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4711 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4713 TCGv_i128 pair
= tcg_temp_new_i128();
4715 gen_helper_tre(pair
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4716 tcg_gen_extr_i128_i64(o
->out2
, o
->out
, pair
);
4721 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4723 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
4725 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4730 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4732 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
4734 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4739 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4741 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4742 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4743 TCGv_i32 sizes
= tcg_constant_i32(s
->insn
->opc
& 3);
4744 TCGv_i32 tst
= tcg_temp_new_i32();
4745 int m3
= get_field(s
, m3
);
4747 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4751 tcg_gen_movi_i32(tst
, -1);
4753 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4754 if (s
->insn
->opc
& 3) {
4755 tcg_gen_ext8u_i32(tst
, tst
);
4757 tcg_gen_ext16u_i32(tst
, tst
);
4760 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4766 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4768 TCGv_i32 t1
= tcg_constant_i32(0xff);
4770 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4771 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4776 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4778 TCGv_i32 l
= tcg_constant_i32(get_field(s
, l1
));
4780 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4784 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4786 int l1
= get_field(s
, l1
) + 1;
4789 /* The length must not exceed 32 bytes. */
4791 gen_program_exception(s
, PGM_SPECIFICATION
);
4792 return DISAS_NORETURN
;
4794 l
= tcg_constant_i32(l1
);
4795 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4800 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4802 int l1
= get_field(s
, l1
) + 1;
4805 /* The length must be even and should not exceed 64 bytes. */
4806 if ((l1
& 1) || (l1
> 64)) {
4807 gen_program_exception(s
, PGM_SPECIFICATION
);
4808 return DISAS_NORETURN
;
4810 l
= tcg_constant_i32(l1
);
4811 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4817 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4819 int d1
= get_field(s
, d1
);
4820 int d2
= get_field(s
, d2
);
4821 int b1
= get_field(s
, b1
);
4822 int b2
= get_field(s
, b2
);
4823 int l
= get_field(s
, l1
);
4826 o
->addr1
= get_address(s
, 0, b1
, d1
);
4828 /* If the addresses are identical, this is a store/memset of zero. */
4829 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4830 o
->in2
= tcg_constant_i64(0);
4834 tcg_gen_qemu_st_i64(o
->in2
, o
->addr1
, get_mem_index(s
), MO_UQ
);
4837 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4841 tcg_gen_qemu_st_i64(o
->in2
, o
->addr1
, get_mem_index(s
), MO_UL
);
4844 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4848 tcg_gen_qemu_st_i64(o
->in2
, o
->addr1
, get_mem_index(s
), MO_UW
);
4851 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4855 tcg_gen_qemu_st_i64(o
->in2
, o
->addr1
, get_mem_index(s
), MO_UB
);
4857 gen_op_movi_cc(s
, 0);
4861 /* But in general we'll defer to a helper. */
4862 o
->in2
= get_address(s
, 0, b2
, d2
);
4863 t32
= tcg_constant_i32(l
);
4864 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4869 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4871 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4875 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4877 int shift
= s
->insn
->data
& 0xff;
4878 int size
= s
->insn
->data
>> 8;
4879 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4880 TCGv_i64 t
= tcg_temp_new_i64();
4882 tcg_gen_shli_i64(t
, o
->in2
, shift
);
4883 tcg_gen_xor_i64(o
->out
, o
->in1
, t
);
4885 /* Produce the CC from only the bits manipulated. */
4886 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4887 set_cc_nz_u64(s
, cc_dst
);
4891 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4893 o
->in1
= tcg_temp_new_i64();
4895 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4896 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4898 /* Perform the atomic operation in memory. */
4899 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4903 /* Recompute also for atomic case: needed for setting CC. */
4904 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4906 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4907 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4912 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4914 o
->out
= tcg_constant_i64(0);
4918 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4920 o
->out
= tcg_constant_i64(0);
4925 #ifndef CONFIG_USER_ONLY
4926 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4928 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4930 gen_helper_clp(cpu_env
, r2
);
4935 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4937 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4938 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4940 gen_helper_pcilg(cpu_env
, r1
, r2
);
4945 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4947 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4948 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4950 gen_helper_pcistg(cpu_env
, r1
, r2
);
4955 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4957 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4958 TCGv_i32 ar
= tcg_constant_i32(get_field(s
, b2
));
4960 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4965 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4967 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4971 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4973 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4974 TCGv_i32 r2
= tcg_constant_i32(get_field(s
, r2
));
4976 gen_helper_rpcit(cpu_env
, r1
, r2
);
4981 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4983 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4984 TCGv_i32 r3
= tcg_constant_i32(get_field(s
, r3
));
4985 TCGv_i32 ar
= tcg_constant_i32(get_field(s
, b2
));
4987 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4992 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4994 TCGv_i32 r1
= tcg_constant_i32(get_field(s
, r1
));
4995 TCGv_i32 ar
= tcg_constant_i32(get_field(s
, b2
));
4997 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
5003 #include "translate_vx.c.inc"
5005 /* ====================================================================== */
5006 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5007 the original inputs), update the various cc data structures in order to
5008 be able to compute the new condition code. */
5010 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
5012 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
5015 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
5017 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
5020 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
5022 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
5025 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
5027 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
5030 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
5032 tcg_gen_shri_i64(cc_src
, o
->out
, 32);
5033 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5034 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, cc_dst
);
5037 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
5039 gen_op_update2_cc_i64(s
, CC_OP_ADDU
, cc_src
, o
->out
);
5042 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
5044 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
5047 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
5049 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
5052 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
5054 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
5057 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
5059 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
5062 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
5064 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
5067 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
5069 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
5072 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
5074 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
5077 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
5079 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
5082 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
5084 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
5087 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
5089 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
5092 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
5094 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
5097 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
5099 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5100 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
5103 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
5105 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
5108 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
5110 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
5113 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
5115 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
5118 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
5120 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
5123 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5125 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5128 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5130 tcg_gen_sari_i64(cc_src
, o
->out
, 32);
5131 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
5132 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, cc_dst
);
5135 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5137 gen_op_update2_cc_i64(s
, CC_OP_SUBU
, cc_src
, o
->out
);
5140 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5142 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5145 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5147 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5150 static void cout_muls32(DisasContext
*s
, DisasOps
*o
)
5152 gen_op_update1_cc_i64(s
, CC_OP_MULS_32
, o
->out
);
5155 static void cout_muls64(DisasContext
*s
, DisasOps
*o
)
5157 /* out contains "high" part, out2 contains "low" part of 128 bit result */
5158 gen_op_update2_cc_i64(s
, CC_OP_MULS_64
, o
->out
, o
->out2
);
5161 /* ====================================================================== */
5162 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5163 with the TCG register to which we will write. Used in combination with
5164 the "wout" generators, in some cases we need a new temporary, and in
5165 some cases we can write to a TCG global. */
5167 static void prep_new(DisasContext
*s
, DisasOps
*o
)
5169 o
->out
= tcg_temp_new_i64();
5171 #define SPEC_prep_new 0
5173 static void prep_new_P(DisasContext
*s
, DisasOps
*o
)
5175 o
->out
= tcg_temp_new_i64();
5176 o
->out2
= tcg_temp_new_i64();
5178 #define SPEC_prep_new_P 0
5180 static void prep_new_x(DisasContext
*s
, DisasOps
*o
)
5182 o
->out_128
= tcg_temp_new_i128();
5184 #define SPEC_prep_new_x 0
5186 static void prep_r1(DisasContext
*s
, DisasOps
*o
)
5188 o
->out
= regs
[get_field(s
, r1
)];
5190 #define SPEC_prep_r1 0
5192 static void prep_r1_P(DisasContext
*s
, DisasOps
*o
)
5194 int r1
= get_field(s
, r1
);
5196 o
->out2
= regs
[r1
+ 1];
5198 #define SPEC_prep_r1_P SPEC_r1_even
5200 static void prep_x1(DisasContext
*s
, DisasOps
*o
)
5202 o
->out_128
= load_freg_128(get_field(s
, r1
));
5204 #define SPEC_prep_x1 SPEC_r1_f128
5206 /* ====================================================================== */
5207 /* The "Write OUTput" generators. These generally perform some non-trivial
5208 copy of data to TCG globals, or to main memory. The trivial cases are
5209 generally handled by having a "prep" generator install the TCG global
5210 as the destination of the operation. */
5212 static void wout_r1(DisasContext
*s
, DisasOps
*o
)
5214 store_reg(get_field(s
, r1
), o
->out
);
5216 #define SPEC_wout_r1 0
5218 static void wout_out2_r1(DisasContext
*s
, DisasOps
*o
)
5220 store_reg(get_field(s
, r1
), o
->out2
);
5222 #define SPEC_wout_out2_r1 0
5224 static void wout_r1_8(DisasContext
*s
, DisasOps
*o
)
5226 int r1
= get_field(s
, r1
);
5227 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5229 #define SPEC_wout_r1_8 0
5231 static void wout_r1_16(DisasContext
*s
, DisasOps
*o
)
5233 int r1
= get_field(s
, r1
);
5234 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5236 #define SPEC_wout_r1_16 0
5238 static void wout_r1_32(DisasContext
*s
, DisasOps
*o
)
5240 store_reg32_i64(get_field(s
, r1
), o
->out
);
5242 #define SPEC_wout_r1_32 0
5244 static void wout_r1_32h(DisasContext
*s
, DisasOps
*o
)
5246 store_reg32h_i64(get_field(s
, r1
), o
->out
);
5248 #define SPEC_wout_r1_32h 0
5250 static void wout_r1_P32(DisasContext
*s
, DisasOps
*o
)
5252 int r1
= get_field(s
, r1
);
5253 store_reg32_i64(r1
, o
->out
);
5254 store_reg32_i64(r1
+ 1, o
->out2
);
5256 #define SPEC_wout_r1_P32 SPEC_r1_even
5258 static void wout_r1_D32(DisasContext
*s
, DisasOps
*o
)
5260 int r1
= get_field(s
, r1
);
5261 TCGv_i64 t
= tcg_temp_new_i64();
5262 store_reg32_i64(r1
+ 1, o
->out
);
5263 tcg_gen_shri_i64(t
, o
->out
, 32);
5264 store_reg32_i64(r1
, t
);
5266 #define SPEC_wout_r1_D32 SPEC_r1_even
5268 static void wout_r1_D64(DisasContext
*s
, DisasOps
*o
)
5270 int r1
= get_field(s
, r1
);
5271 tcg_gen_extr_i128_i64(regs
[r1
+ 1], regs
[r1
], o
->out_128
);
5273 #define SPEC_wout_r1_D64 SPEC_r1_even
5275 static void wout_r3_P32(DisasContext
*s
, DisasOps
*o
)
5277 int r3
= get_field(s
, r3
);
5278 store_reg32_i64(r3
, o
->out
);
5279 store_reg32_i64(r3
+ 1, o
->out2
);
5281 #define SPEC_wout_r3_P32 SPEC_r3_even
5283 static void wout_r3_P64(DisasContext
*s
, DisasOps
*o
)
5285 int r3
= get_field(s
, r3
);
5286 store_reg(r3
, o
->out
);
5287 store_reg(r3
+ 1, o
->out2
);
5289 #define SPEC_wout_r3_P64 SPEC_r3_even
5291 static void wout_e1(DisasContext
*s
, DisasOps
*o
)
5293 store_freg32_i64(get_field(s
, r1
), o
->out
);
5295 #define SPEC_wout_e1 0
5297 static void wout_f1(DisasContext
*s
, DisasOps
*o
)
5299 store_freg(get_field(s
, r1
), o
->out
);
5301 #define SPEC_wout_f1 0
5303 static void wout_x1(DisasContext
*s
, DisasOps
*o
)
5305 int f1
= get_field(s
, r1
);
5307 /* Split out_128 into out+out2 for cout_f128. */
5308 tcg_debug_assert(o
->out
== NULL
);
5309 o
->out
= tcg_temp_new_i64();
5310 o
->out2
= tcg_temp_new_i64();
5312 tcg_gen_extr_i128_i64(o
->out2
, o
->out
, o
->out_128
);
5313 store_freg(f1
, o
->out
);
5314 store_freg(f1
+ 2, o
->out2
);
5316 #define SPEC_wout_x1 SPEC_r1_f128
5318 static void wout_x1_P(DisasContext
*s
, DisasOps
*o
)
5320 int f1
= get_field(s
, r1
);
5321 store_freg(f1
, o
->out
);
5322 store_freg(f1
+ 2, o
->out2
);
5324 #define SPEC_wout_x1_P SPEC_r1_f128
5326 static void wout_cond_r1r2_32(DisasContext
*s
, DisasOps
*o
)
5328 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5329 store_reg32_i64(get_field(s
, r1
), o
->out
);
5332 #define SPEC_wout_cond_r1r2_32 0
5334 static void wout_cond_e1e2(DisasContext
*s
, DisasOps
*o
)
5336 if (get_field(s
, r1
) != get_field(s
, r2
)) {
5337 store_freg32_i64(get_field(s
, r1
), o
->out
);
5340 #define SPEC_wout_cond_e1e2 0
5342 static void wout_m1_8(DisasContext
*s
, DisasOps
*o
)
5344 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_UB
);
5346 #define SPEC_wout_m1_8 0
5348 static void wout_m1_16(DisasContext
*s
, DisasOps
*o
)
5350 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
);
5352 #define SPEC_wout_m1_16 0
5354 #ifndef CONFIG_USER_ONLY
5355 static void wout_m1_16a(DisasContext
*s
, DisasOps
*o
)
5357 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5359 #define SPEC_wout_m1_16a 0
5362 static void wout_m1_32(DisasContext
*s
, DisasOps
*o
)
5364 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
);
5366 #define SPEC_wout_m1_32 0
5368 #ifndef CONFIG_USER_ONLY
5369 static void wout_m1_32a(DisasContext
*s
, DisasOps
*o
)
5371 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5373 #define SPEC_wout_m1_32a 0
5376 static void wout_m1_64(DisasContext
*s
, DisasOps
*o
)
5378 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUQ
);
5380 #define SPEC_wout_m1_64 0
5382 #ifndef CONFIG_USER_ONLY
5383 static void wout_m1_64a(DisasContext
*s
, DisasOps
*o
)
5385 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
5387 #define SPEC_wout_m1_64a 0
5390 static void wout_m2_32(DisasContext
*s
, DisasOps
*o
)
5392 tcg_gen_qemu_st_i64(o
->out
, o
->in2
, get_mem_index(s
), MO_TEUL
);
5394 #define SPEC_wout_m2_32 0
5396 static void wout_in2_r1(DisasContext
*s
, DisasOps
*o
)
5398 store_reg(get_field(s
, r1
), o
->in2
);
5400 #define SPEC_wout_in2_r1 0
5402 static void wout_in2_r1_32(DisasContext
*s
, DisasOps
*o
)
5404 store_reg32_i64(get_field(s
, r1
), o
->in2
);
5406 #define SPEC_wout_in2_r1_32 0
5408 /* ====================================================================== */
5409 /* The "INput 1" generators. These load the first operand to an insn. */
5411 static void in1_r1(DisasContext
*s
, DisasOps
*o
)
5413 o
->in1
= load_reg(get_field(s
, r1
));
5415 #define SPEC_in1_r1 0
5417 static void in1_r1_o(DisasContext
*s
, DisasOps
*o
)
5419 o
->in1
= regs
[get_field(s
, r1
)];
5421 #define SPEC_in1_r1_o 0
5423 static void in1_r1_32s(DisasContext
*s
, DisasOps
*o
)
5425 o
->in1
= tcg_temp_new_i64();
5426 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5428 #define SPEC_in1_r1_32s 0
5430 static void in1_r1_32u(DisasContext
*s
, DisasOps
*o
)
5432 o
->in1
= tcg_temp_new_i64();
5433 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
)]);
5435 #define SPEC_in1_r1_32u 0
5437 static void in1_r1_sr32(DisasContext
*s
, DisasOps
*o
)
5439 o
->in1
= tcg_temp_new_i64();
5440 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r1
)], 32);
5442 #define SPEC_in1_r1_sr32 0
5444 static void in1_r1p1(DisasContext
*s
, DisasOps
*o
)
5446 o
->in1
= load_reg(get_field(s
, r1
) + 1);
5448 #define SPEC_in1_r1p1 SPEC_r1_even
5450 static void in1_r1p1_o(DisasContext
*s
, DisasOps
*o
)
5452 o
->in1
= regs
[get_field(s
, r1
) + 1];
5454 #define SPEC_in1_r1p1_o SPEC_r1_even
5456 static void in1_r1p1_32s(DisasContext
*s
, DisasOps
*o
)
5458 o
->in1
= tcg_temp_new_i64();
5459 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5461 #define SPEC_in1_r1p1_32s SPEC_r1_even
5463 static void in1_r1p1_32u(DisasContext
*s
, DisasOps
*o
)
5465 o
->in1
= tcg_temp_new_i64();
5466 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r1
) + 1]);
5468 #define SPEC_in1_r1p1_32u SPEC_r1_even
5470 static void in1_r1_D32(DisasContext
*s
, DisasOps
*o
)
5472 int r1
= get_field(s
, r1
);
5473 o
->in1
= tcg_temp_new_i64();
5474 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5476 #define SPEC_in1_r1_D32 SPEC_r1_even
5478 static void in1_r2(DisasContext
*s
, DisasOps
*o
)
5480 o
->in1
= load_reg(get_field(s
, r2
));
5482 #define SPEC_in1_r2 0
5484 static void in1_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5486 o
->in1
= tcg_temp_new_i64();
5487 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r2
)], 32);
5489 #define SPEC_in1_r2_sr32 0
5491 static void in1_r2_32u(DisasContext
*s
, DisasOps
*o
)
5493 o
->in1
= tcg_temp_new_i64();
5494 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r2
)]);
5496 #define SPEC_in1_r2_32u 0
5498 static void in1_r3(DisasContext
*s
, DisasOps
*o
)
5500 o
->in1
= load_reg(get_field(s
, r3
));
5502 #define SPEC_in1_r3 0
5504 static void in1_r3_o(DisasContext
*s
, DisasOps
*o
)
5506 o
->in1
= regs
[get_field(s
, r3
)];
5508 #define SPEC_in1_r3_o 0
5510 static void in1_r3_32s(DisasContext
*s
, DisasOps
*o
)
5512 o
->in1
= tcg_temp_new_i64();
5513 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5515 #define SPEC_in1_r3_32s 0
5517 static void in1_r3_32u(DisasContext
*s
, DisasOps
*o
)
5519 o
->in1
= tcg_temp_new_i64();
5520 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(s
, r3
)]);
5522 #define SPEC_in1_r3_32u 0
5524 static void in1_r3_D32(DisasContext
*s
, DisasOps
*o
)
5526 int r3
= get_field(s
, r3
);
5527 o
->in1
= tcg_temp_new_i64();
5528 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5530 #define SPEC_in1_r3_D32 SPEC_r3_even
5532 static void in1_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5534 o
->in1
= tcg_temp_new_i64();
5535 tcg_gen_shri_i64(o
->in1
, regs
[get_field(s
, r3
)], 32);
5537 #define SPEC_in1_r3_sr32 0
5539 static void in1_e1(DisasContext
*s
, DisasOps
*o
)
5541 o
->in1
= load_freg32_i64(get_field(s
, r1
));
5543 #define SPEC_in1_e1 0
5545 static void in1_f1(DisasContext
*s
, DisasOps
*o
)
5547 o
->in1
= load_freg(get_field(s
, r1
));
5549 #define SPEC_in1_f1 0
5551 static void in1_x1(DisasContext
*s
, DisasOps
*o
)
5553 o
->in1_128
= load_freg_128(get_field(s
, r1
));
5555 #define SPEC_in1_x1 SPEC_r1_f128
5557 /* Load the high double word of an extended (128-bit) format FP number */
5558 static void in1_x2h(DisasContext
*s
, DisasOps
*o
)
5560 o
->in1
= load_freg(get_field(s
, r2
));
5562 #define SPEC_in1_x2h SPEC_r2_f128
5564 static void in1_f3(DisasContext
*s
, DisasOps
*o
)
5566 o
->in1
= load_freg(get_field(s
, r3
));
5568 #define SPEC_in1_f3 0
5570 static void in1_la1(DisasContext
*s
, DisasOps
*o
)
5572 o
->addr1
= get_address(s
, 0, get_field(s
, b1
), get_field(s
, d1
));
5574 #define SPEC_in1_la1 0
5576 static void in1_la2(DisasContext
*s
, DisasOps
*o
)
5578 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5579 o
->addr1
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5581 #define SPEC_in1_la2 0
5583 static void in1_m1_8u(DisasContext
*s
, DisasOps
*o
)
5586 o
->in1
= tcg_temp_new_i64();
5587 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_UB
);
5589 #define SPEC_in1_m1_8u 0
5591 static void in1_m1_16s(DisasContext
*s
, DisasOps
*o
)
5594 o
->in1
= tcg_temp_new_i64();
5595 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TESW
);
5597 #define SPEC_in1_m1_16s 0
5599 static void in1_m1_16u(DisasContext
*s
, DisasOps
*o
)
5602 o
->in1
= tcg_temp_new_i64();
5603 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEUW
);
5605 #define SPEC_in1_m1_16u 0
5607 static void in1_m1_32s(DisasContext
*s
, DisasOps
*o
)
5610 o
->in1
= tcg_temp_new_i64();
5611 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TESL
);
5613 #define SPEC_in1_m1_32s 0
5615 static void in1_m1_32u(DisasContext
*s
, DisasOps
*o
)
5618 o
->in1
= tcg_temp_new_i64();
5619 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEUL
);
5621 #define SPEC_in1_m1_32u 0
5623 static void in1_m1_64(DisasContext
*s
, DisasOps
*o
)
5626 o
->in1
= tcg_temp_new_i64();
5627 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEUQ
);
5629 #define SPEC_in1_m1_64 0
5631 /* ====================================================================== */
5632 /* The "INput 2" generators. These load the second operand to an insn. */
5634 static void in2_r1_o(DisasContext
*s
, DisasOps
*o
)
5636 o
->in2
= regs
[get_field(s
, r1
)];
5638 #define SPEC_in2_r1_o 0
5640 static void in2_r1_16u(DisasContext
*s
, DisasOps
*o
)
5642 o
->in2
= tcg_temp_new_i64();
5643 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5645 #define SPEC_in2_r1_16u 0
5647 static void in2_r1_32u(DisasContext
*s
, DisasOps
*o
)
5649 o
->in2
= tcg_temp_new_i64();
5650 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r1
)]);
5652 #define SPEC_in2_r1_32u 0
5654 static void in2_r1_D32(DisasContext
*s
, DisasOps
*o
)
5656 int r1
= get_field(s
, r1
);
5657 o
->in2
= tcg_temp_new_i64();
5658 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5660 #define SPEC_in2_r1_D32 SPEC_r1_even
5662 static void in2_r2(DisasContext
*s
, DisasOps
*o
)
5664 o
->in2
= load_reg(get_field(s
, r2
));
5666 #define SPEC_in2_r2 0
5668 static void in2_r2_o(DisasContext
*s
, DisasOps
*o
)
5670 o
->in2
= regs
[get_field(s
, r2
)];
5672 #define SPEC_in2_r2_o 0
5674 static void in2_r2_nz(DisasContext
*s
, DisasOps
*o
)
5676 int r2
= get_field(s
, r2
);
5678 o
->in2
= load_reg(r2
);
5681 #define SPEC_in2_r2_nz 0
5683 static void in2_r2_8s(DisasContext
*s
, DisasOps
*o
)
5685 o
->in2
= tcg_temp_new_i64();
5686 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5688 #define SPEC_in2_r2_8s 0
5690 static void in2_r2_8u(DisasContext
*s
, DisasOps
*o
)
5692 o
->in2
= tcg_temp_new_i64();
5693 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5695 #define SPEC_in2_r2_8u 0
5697 static void in2_r2_16s(DisasContext
*s
, DisasOps
*o
)
5699 o
->in2
= tcg_temp_new_i64();
5700 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5702 #define SPEC_in2_r2_16s 0
5704 static void in2_r2_16u(DisasContext
*s
, DisasOps
*o
)
5706 o
->in2
= tcg_temp_new_i64();
5707 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5709 #define SPEC_in2_r2_16u 0
5711 static void in2_r3(DisasContext
*s
, DisasOps
*o
)
5713 o
->in2
= load_reg(get_field(s
, r3
));
5715 #define SPEC_in2_r3 0
5717 static void in2_r3_D64(DisasContext
*s
, DisasOps
*o
)
5719 int r3
= get_field(s
, r3
);
5720 o
->in2_128
= tcg_temp_new_i128();
5721 tcg_gen_concat_i64_i128(o
->in2_128
, regs
[r3
+ 1], regs
[r3
]);
5723 #define SPEC_in2_r3_D64 SPEC_r3_even
5725 static void in2_r3_sr32(DisasContext
*s
, DisasOps
*o
)
5727 o
->in2
= tcg_temp_new_i64();
5728 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r3
)], 32);
5730 #define SPEC_in2_r3_sr32 0
5732 static void in2_r3_32u(DisasContext
*s
, DisasOps
*o
)
5734 o
->in2
= tcg_temp_new_i64();
5735 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r3
)]);
5737 #define SPEC_in2_r3_32u 0
5739 static void in2_r2_32s(DisasContext
*s
, DisasOps
*o
)
5741 o
->in2
= tcg_temp_new_i64();
5742 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5744 #define SPEC_in2_r2_32s 0
5746 static void in2_r2_32u(DisasContext
*s
, DisasOps
*o
)
5748 o
->in2
= tcg_temp_new_i64();
5749 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(s
, r2
)]);
5751 #define SPEC_in2_r2_32u 0
5753 static void in2_r2_sr32(DisasContext
*s
, DisasOps
*o
)
5755 o
->in2
= tcg_temp_new_i64();
5756 tcg_gen_shri_i64(o
->in2
, regs
[get_field(s
, r2
)], 32);
5758 #define SPEC_in2_r2_sr32 0
5760 static void in2_e2(DisasContext
*s
, DisasOps
*o
)
5762 o
->in2
= load_freg32_i64(get_field(s
, r2
));
5764 #define SPEC_in2_e2 0
5766 static void in2_f2(DisasContext
*s
, DisasOps
*o
)
5768 o
->in2
= load_freg(get_field(s
, r2
));
5770 #define SPEC_in2_f2 0
5772 static void in2_x2(DisasContext
*s
, DisasOps
*o
)
5774 o
->in2_128
= load_freg_128(get_field(s
, r2
));
5776 #define SPEC_in2_x2 SPEC_r2_f128
5778 /* Load the low double word of an extended (128-bit) format FP number */
5779 static void in2_x2l(DisasContext
*s
, DisasOps
*o
)
5781 o
->in2
= load_freg(get_field(s
, r2
) + 2);
5783 #define SPEC_in2_x2l SPEC_r2_f128
5785 static void in2_ra2(DisasContext
*s
, DisasOps
*o
)
5787 int r2
= get_field(s
, r2
);
5789 /* Note: *don't* treat !r2 as 0, use the reg value. */
5790 o
->in2
= tcg_temp_new_i64();
5791 gen_addi_and_wrap_i64(s
, o
->in2
, regs
[r2
], 0);
5793 #define SPEC_in2_ra2 0
5795 static void in2_a2(DisasContext
*s
, DisasOps
*o
)
5797 int x2
= have_field(s
, x2
) ? get_field(s
, x2
) : 0;
5798 o
->in2
= get_address(s
, x2
, get_field(s
, b2
), get_field(s
, d2
));
5800 #define SPEC_in2_a2 0
5802 static TCGv
gen_ri2(DisasContext
*s
)
5808 disas_jdest(s
, i2
, is_imm
, imm
, ri2
);
5810 ri2
= tcg_constant_i64(s
->base
.pc_next
+ imm
* 2);
5816 static void in2_ri2(DisasContext
*s
, DisasOps
*o
)
5818 o
->in2
= gen_ri2(s
);
5820 #define SPEC_in2_ri2 0
5822 static void in2_sh(DisasContext
*s
, DisasOps
*o
)
5824 int b2
= get_field(s
, b2
);
5825 int d2
= get_field(s
, d2
);
5828 o
->in2
= tcg_constant_i64(d2
& 0x3f);
5830 o
->in2
= get_address(s
, 0, b2
, d2
);
5831 tcg_gen_andi_i64(o
->in2
, o
->in2
, 0x3f);
5834 #define SPEC_in2_sh 0
5836 static void in2_m2_8u(DisasContext
*s
, DisasOps
*o
)
5839 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_UB
);
5841 #define SPEC_in2_m2_8u 0
5843 static void in2_m2_16s(DisasContext
*s
, DisasOps
*o
)
5846 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TESW
);
5848 #define SPEC_in2_m2_16s 0
5850 static void in2_m2_16u(DisasContext
*s
, DisasOps
*o
)
5853 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUW
);
5855 #define SPEC_in2_m2_16u 0
5857 static void in2_m2_32s(DisasContext
*s
, DisasOps
*o
)
5860 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TESL
);
5862 #define SPEC_in2_m2_32s 0
5864 static void in2_m2_32u(DisasContext
*s
, DisasOps
*o
)
5867 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
);
5869 #define SPEC_in2_m2_32u 0
5871 #ifndef CONFIG_USER_ONLY
5872 static void in2_m2_32ua(DisasContext
*s
, DisasOps
*o
)
5875 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5877 #define SPEC_in2_m2_32ua 0
5880 static void in2_m2_64(DisasContext
*s
, DisasOps
*o
)
5883 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
5885 #define SPEC_in2_m2_64 0
5887 static void in2_m2_64w(DisasContext
*s
, DisasOps
*o
)
5890 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUQ
);
5891 gen_addi_and_wrap_i64(s
, o
->in2
, o
->in2
, 0);
5893 #define SPEC_in2_m2_64w 0
5895 #ifndef CONFIG_USER_ONLY
5896 static void in2_m2_64a(DisasContext
*s
, DisasOps
*o
)
5899 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUQ
| MO_ALIGN
);
5901 #define SPEC_in2_m2_64a 0
5904 static void in2_mri2_16s(DisasContext
*s
, DisasOps
*o
)
5906 o
->in2
= tcg_temp_new_i64();
5907 tcg_gen_qemu_ld_i64(o
->in2
, gen_ri2(s
), get_mem_index(s
), MO_TESW
);
5909 #define SPEC_in2_mri2_16s 0
5911 static void in2_mri2_16u(DisasContext
*s
, DisasOps
*o
)
5913 o
->in2
= tcg_temp_new_i64();
5914 tcg_gen_qemu_ld_i64(o
->in2
, gen_ri2(s
), get_mem_index(s
), MO_TEUW
);
5916 #define SPEC_in2_mri2_16u 0
5918 static void in2_mri2_32s(DisasContext
*s
, DisasOps
*o
)
5920 o
->in2
= tcg_temp_new_i64();
5921 tcg_gen_qemu_ld_tl(o
->in2
, gen_ri2(s
), get_mem_index(s
),
5922 MO_TESL
| MO_ALIGN
);
5924 #define SPEC_in2_mri2_32s 0
5926 static void in2_mri2_32u(DisasContext
*s
, DisasOps
*o
)
5928 o
->in2
= tcg_temp_new_i64();
5929 tcg_gen_qemu_ld_tl(o
->in2
, gen_ri2(s
), get_mem_index(s
),
5930 MO_TEUL
| MO_ALIGN
);
5932 #define SPEC_in2_mri2_32u 0
5934 static void in2_mri2_64(DisasContext
*s
, DisasOps
*o
)
5936 o
->in2
= tcg_temp_new_i64();
5937 tcg_gen_qemu_ld_i64(o
->in2
, gen_ri2(s
), get_mem_index(s
),
5938 MO_TEUQ
| MO_ALIGN
);
5940 #define SPEC_in2_mri2_64 0
5942 static void in2_i2(DisasContext
*s
, DisasOps
*o
)
5944 o
->in2
= tcg_constant_i64(get_field(s
, i2
));
5946 #define SPEC_in2_i2 0
5948 static void in2_i2_8u(DisasContext
*s
, DisasOps
*o
)
5950 o
->in2
= tcg_constant_i64((uint8_t)get_field(s
, i2
));
5952 #define SPEC_in2_i2_8u 0
5954 static void in2_i2_16u(DisasContext
*s
, DisasOps
*o
)
5956 o
->in2
= tcg_constant_i64((uint16_t)get_field(s
, i2
));
5958 #define SPEC_in2_i2_16u 0
5960 static void in2_i2_32u(DisasContext
*s
, DisasOps
*o
)
5962 o
->in2
= tcg_constant_i64((uint32_t)get_field(s
, i2
));
5964 #define SPEC_in2_i2_32u 0
5966 static void in2_i2_16u_shl(DisasContext
*s
, DisasOps
*o
)
5968 uint64_t i2
= (uint16_t)get_field(s
, i2
);
5969 o
->in2
= tcg_constant_i64(i2
<< s
->insn
->data
);
5971 #define SPEC_in2_i2_16u_shl 0
5973 static void in2_i2_32u_shl(DisasContext
*s
, DisasOps
*o
)
5975 uint64_t i2
= (uint32_t)get_field(s
, i2
);
5976 o
->in2
= tcg_constant_i64(i2
<< s
->insn
->data
);
5978 #define SPEC_in2_i2_32u_shl 0
5980 #ifndef CONFIG_USER_ONLY
5981 static void in2_insn(DisasContext
*s
, DisasOps
*o
)
5983 o
->in2
= tcg_constant_i64(s
->fields
.raw_insn
);
5985 #define SPEC_in2_insn 0
5988 /* ====================================================================== */
5990 /* Find opc within the table of insns. This is formulated as a switch
5991 statement so that (1) we get compile-time notice of cut-paste errors
5992 for duplicated opcodes, and (2) the compiler generates the binary
5993 search tree, rather than us having to post-process the table. */
5995 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5996 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5999 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6001 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6002 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6004 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6006 enum DisasInsnEnum
{
6007 #include "insn-data.h.inc"
6011 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6016 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6018 .help_in1 = in1_##I1, \
6019 .help_in2 = in2_##I2, \
6020 .help_prep = prep_##P, \
6021 .help_wout = wout_##W, \
6022 .help_cout = cout_##CC, \
6023 .help_op = op_##OP, \
6027 /* Allow 0 to be used for NULL in the table below. */
6035 #define SPEC_in1_0 0
6036 #define SPEC_in2_0 0
6037 #define SPEC_prep_0 0
6038 #define SPEC_wout_0 0
6040 /* Give smaller names to the various facilities. */
6041 #define FAC_Z S390_FEAT_ZARCH
6042 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6043 #define FAC_DFP S390_FEAT_DFP
6044 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6045 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6046 #define FAC_EE S390_FEAT_EXECUTE_EXT
6047 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6048 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6049 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6050 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6051 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6052 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6053 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6054 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6055 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6056 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6057 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6058 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6059 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6060 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6061 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6062 #define FAC_SFLE S390_FEAT_STFLE
6063 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6064 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6065 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6066 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6067 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6068 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6069 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6070 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6071 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6072 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6073 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6074 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6075 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6076 #define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6077 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6078 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6079 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6080 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6081 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6082 #define FAC_VE2 S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6083 #define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6084 #define FAC_MIE3 S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6086 static const DisasInsn insn_info
[] = {
6087 #include "insn-data.h.inc"
6091 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6092 case OPC: return &insn_info[insn_ ## NM];
6094 static const DisasInsn
*lookup_opc(uint16_t opc
)
6097 #include "insn-data.h.inc"
6108 /* Extract a field from the insn. The INSN should be left-aligned in
6109 the uint64_t so that we can more easily utilize the big-bit-endian
6110 definitions we extract from the Principals of Operation. */
6112 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
6120 /* Zero extract the field from the insn. */
6121 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
6123 /* Sign-extend, or un-swap the field as necessary. */
6125 case 0: /* unsigned */
6127 case 1: /* signed */
6128 assert(f
->size
<= 32);
6129 m
= 1u << (f
->size
- 1);
6132 case 2: /* dl+dh split, signed 20 bit. */
6133 r
= ((int8_t)r
<< 12) | (r
>> 8);
6135 case 3: /* MSB stored in RXB */
6136 g_assert(f
->size
== 4);
6139 r
|= extract64(insn
, 63 - 36, 1) << 4;
6142 r
|= extract64(insn
, 63 - 37, 1) << 4;
6145 r
|= extract64(insn
, 63 - 38, 1) << 4;
6148 r
|= extract64(insn
, 63 - 39, 1) << 4;
6151 g_assert_not_reached();
6159 * Validate that the "compressed" encoding we selected above is valid.
6160 * I.e. we haven't made two different original fields overlap.
6162 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
6163 o
->presentC
|= 1 << f
->indexC
;
6164 o
->presentO
|= 1 << f
->indexO
;
6166 o
->c
[f
->indexC
] = r
;
6169 /* Lookup the insn at the current PC, extracting the operands into O and
6170 returning the info struct for the insn. Returns NULL for invalid insn. */
6172 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
)
6174 uint64_t insn
, pc
= s
->base
.pc_next
;
6176 const DisasInsn
*info
;
6178 if (unlikely(s
->ex_value
)) {
6179 /* Drop the EX data now, so that it's clear on exception paths. */
6180 tcg_gen_st_i64(tcg_constant_i64(0), cpu_env
,
6181 offsetof(CPUS390XState
, ex_value
));
6183 /* Extract the values saved by EXECUTE. */
6184 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
6185 ilen
= s
->ex_value
& 0xf;
6187 /* Register insn bytes with translator so plugins work. */
6188 for (int i
= 0; i
< ilen
; i
++) {
6189 uint8_t byte
= extract64(insn
, 56 - (i
* 8), 8);
6190 translator_fake_ldb(byte
, pc
+ i
);
6194 insn
= ld_code2(env
, s
, pc
);
6195 op
= (insn
>> 8) & 0xff;
6196 ilen
= get_ilen(op
);
6202 insn
= ld_code4(env
, s
, pc
) << 32;
6205 insn
= (insn
<< 48) | (ld_code4(env
, s
, pc
+ 2) << 16);
6208 g_assert_not_reached();
6211 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
6214 /* We can't actually determine the insn format until we've looked up
6215 the full insn opcode. Which we can't do without locating the
6216 secondary opcode. Assume by default that OP2 is at bit 40; for
6217 those smaller insns that don't actually have a secondary opcode
6218 this will correctly result in OP2 = 0. */
6224 case 0xb2: /* S, RRF, RRE, IE */
6225 case 0xb3: /* RRE, RRD, RRF */
6226 case 0xb9: /* RRE, RRF */
6227 case 0xe5: /* SSE, SIL */
6228 op2
= (insn
<< 8) >> 56;
6232 case 0xc0: /* RIL */
6233 case 0xc2: /* RIL */
6234 case 0xc4: /* RIL */
6235 case 0xc6: /* RIL */
6236 case 0xc8: /* SSF */
6237 case 0xcc: /* RIL */
6238 op2
= (insn
<< 12) >> 60;
6240 case 0xc5: /* MII */
6241 case 0xc7: /* SMI */
6242 case 0xd0 ... 0xdf: /* SS */
6248 case 0xee ... 0xf3: /* SS */
6249 case 0xf8 ... 0xfd: /* SS */
6253 op2
= (insn
<< 40) >> 56;
6257 memset(&s
->fields
, 0, sizeof(s
->fields
));
6258 s
->fields
.raw_insn
= insn
;
6260 s
->fields
.op2
= op2
;
6262 /* Lookup the instruction. */
6263 info
= lookup_opc(op
<< 8 | op2
);
6266 /* If we found it, extract the operands. */
6268 DisasFormat fmt
= info
->fmt
;
6271 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6272 extract_field(&s
->fields
, &format_info
[fmt
].op
[i
], insn
);
6278 static bool is_afp_reg(int reg
)
6280 return reg
% 2 || reg
> 6;
6283 static bool is_fp_pair(int reg
)
6285 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6286 return !(reg
& 0x2);
6289 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6291 const DisasInsn
*insn
;
6292 DisasJumpType ret
= DISAS_NEXT
;
6294 bool icount
= false;
6296 /* Search for the insn in the table. */
6297 insn
= extract_insn(env
, s
);
6299 /* Update insn_start now that we know the ILEN. */
6300 tcg_set_insn_start_param(s
->insn_start
, 2, s
->ilen
);
6302 /* Not found means unimplemented/illegal opcode. */
6304 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6305 s
->fields
.op
, s
->fields
.op2
);
6306 gen_illegal_opcode(s
);
6307 ret
= DISAS_NORETURN
;
6311 #ifndef CONFIG_USER_ONLY
6312 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6313 TCGv_i64 addr
= tcg_constant_i64(s
->base
.pc_next
);
6314 gen_helper_per_ifetch(cpu_env
, addr
);
6320 /* privileged instruction */
6321 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6322 gen_program_exception(s
, PGM_PRIVILEGED
);
6323 ret
= DISAS_NORETURN
;
6327 /* if AFP is not enabled, instructions and registers are forbidden */
6328 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6331 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(s
, r1
))) {
6334 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(s
, r2
))) {
6337 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(s
, r3
))) {
6340 if (insn
->flags
& IF_BFP
) {
6343 if (insn
->flags
& IF_DFP
) {
6346 if (insn
->flags
& IF_VEC
) {
6350 gen_data_exception(dxc
);
6351 ret
= DISAS_NORETURN
;
6356 /* if vector instructions not enabled, executing them is forbidden */
6357 if (insn
->flags
& IF_VEC
) {
6358 if (!((s
->base
.tb
->flags
& FLAG_MASK_VECTOR
))) {
6359 gen_data_exception(0xfe);
6360 ret
= DISAS_NORETURN
;
6365 /* input/output is the special case for icount mode */
6366 if (unlikely(insn
->flags
& IF_IO
)) {
6367 icount
= tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
;
6374 /* Check for insn specification exceptions. */
6376 if ((insn
->spec
& SPEC_r1_even
&& get_field(s
, r1
) & 1) ||
6377 (insn
->spec
& SPEC_r2_even
&& get_field(s
, r2
) & 1) ||
6378 (insn
->spec
& SPEC_r3_even
&& get_field(s
, r3
) & 1) ||
6379 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(s
, r1
))) ||
6380 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(s
, r2
)))) {
6381 gen_program_exception(s
, PGM_SPECIFICATION
);
6382 ret
= DISAS_NORETURN
;
6387 /* Implement the instruction. */
6388 if (insn
->help_in1
) {
6389 insn
->help_in1(s
, &o
);
6391 if (insn
->help_in2
) {
6392 insn
->help_in2(s
, &o
);
6394 if (insn
->help_prep
) {
6395 insn
->help_prep(s
, &o
);
6397 if (insn
->help_op
) {
6398 ret
= insn
->help_op(s
, &o
);
6400 if (ret
!= DISAS_NORETURN
) {
6401 if (insn
->help_wout
) {
6402 insn
->help_wout(s
, &o
);
6404 if (insn
->help_cout
) {
6405 insn
->help_cout(s
, &o
);
6409 /* io should be the last instruction in tb when icount is enabled */
6410 if (unlikely(icount
&& ret
== DISAS_NEXT
)) {
6411 ret
= DISAS_TOO_MANY
;
6414 #ifndef CONFIG_USER_ONLY
6415 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6416 /* An exception might be triggered, save PSW if not already done. */
6417 if (ret
== DISAS_NEXT
|| ret
== DISAS_TOO_MANY
) {
6418 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6421 /* Call the helper to check for a possible PER exception. */
6422 gen_helper_per_check_exception(cpu_env
);
6427 /* Advance to the next instruction. */
6428 s
->base
.pc_next
= s
->pc_tmp
;
6432 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6434 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6437 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6438 dc
->base
.pc_first
&= 0x7fffffff;
6439 dc
->base
.pc_next
= dc
->base
.pc_first
;
6442 dc
->cc_op
= CC_OP_DYNAMIC
;
6443 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6444 dc
->exit_to_mainloop
= (dc
->base
.tb
->flags
& FLAG_MASK_PER
) || dc
->ex_value
;
6447 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6451 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6453 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6455 /* Delay the set of ilen until we've read the insn. */
6456 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
, 0);
6457 dc
->insn_start
= tcg_last_op();
6460 static target_ulong
get_next_pc(CPUS390XState
*env
, DisasContext
*s
,
6463 uint64_t insn
= cpu_lduw_code(env
, pc
);
6465 return pc
+ get_ilen((insn
>> 8) & 0xff);
6468 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6470 CPUS390XState
*env
= cs
->env_ptr
;
6471 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6473 dc
->base
.is_jmp
= translate_one(env
, dc
);
6474 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6476 !is_same_page(dcbase
, dc
->base
.pc_next
) ||
6477 !is_same_page(dcbase
, get_next_pc(env
, dc
, dc
->base
.pc_next
))) {
6478 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6483 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6485 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6487 switch (dc
->base
.is_jmp
) {
6488 case DISAS_NORETURN
:
6490 case DISAS_TOO_MANY
:
6491 update_psw_addr(dc
);
6493 case DISAS_PC_UPDATED
:
6494 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6495 cc op type is in env */
6498 case DISAS_PC_CC_UPDATED
:
6499 /* Exit the TB, either by raising a debug exception or by return. */
6500 if (dc
->exit_to_mainloop
) {
6501 tcg_gen_exit_tb(NULL
, 0);
6503 tcg_gen_lookup_and_goto_ptr();
6507 g_assert_not_reached();
6511 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
,
6512 CPUState
*cs
, FILE *logfile
)
6514 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6516 if (unlikely(dc
->ex_value
)) {
6517 /* ??? Unfortunately target_disas can't use host memory. */
6518 fprintf(logfile
, "IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6520 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6521 target_disas(logfile
, cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6525 static const TranslatorOps s390x_tr_ops
= {
6526 .init_disas_context
= s390x_tr_init_disas_context
,
6527 .tb_start
= s390x_tr_tb_start
,
6528 .insn_start
= s390x_tr_insn_start
,
6529 .translate_insn
= s390x_tr_translate_insn
,
6530 .tb_stop
= s390x_tr_tb_stop
,
6531 .disas_log
= s390x_tr_disas_log
,
6534 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
6535 target_ulong pc
, void *host_pc
)
6539 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &s390x_tr_ops
, &dc
.base
);
6542 void s390x_restore_state_to_opc(CPUState
*cs
,
6543 const TranslationBlock
*tb
,
6544 const uint64_t *data
)
6546 S390CPU
*cpu
= S390_CPU(cs
);
6547 CPUS390XState
*env
= &cpu
->env
;
6548 int cc_op
= data
[1];
6550 env
->psw
.addr
= data
[0];
6552 /* Update the CC opcode if it is not already up-to-date. */
6553 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {
6558 env
->int_pgm_ilen
= data
[2];