4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
491 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
494 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
497 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
500 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
502 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
505 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
507 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
510 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp
= tcg_const_i32(v2
);
514 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
515 tcg_temp_free_i32(tmp
);
518 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
520 TCGv_i32 tmp
= tcg_const_i32(v2
);
521 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
522 tcg_temp_free_i32(tmp
);
525 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
527 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
530 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
532 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
535 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
537 TCGv_i64 tmp
= tcg_const_i64(v2
);
539 tcg_temp_free_i64(tmp
);
542 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
544 TCGv_i64 tmp
= tcg_const_i64(v2
);
546 tcg_temp_free_i64(tmp
);
549 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
551 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
554 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
556 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext
*s
)
562 tcg_gen_discard_i64(cc_src
);
563 tcg_gen_discard_i64(cc_dst
);
564 tcg_gen_discard_i64(cc_vr
);
565 s
->cc_op
= CC_OP_STATIC
;
568 static inline void gen_op_set_cc_op(DisasContext
*s
)
570 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
571 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
575 static inline void gen_update_cc_op(DisasContext
*s
)
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext
*s
)
583 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
584 TCGv_i64 dummy
= tcg_const_i64(0);
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
595 /* env->cc_op already is the cc value */
609 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
614 case CC_OP_LTUGTU_32
:
615 case CC_OP_LTUGTU_64
:
622 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
637 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
647 tcg_temp_free_i32(local_cc_op
);
648 tcg_temp_free_i64(dummy
);
650 /* We now have cc in cc_op as constant */
654 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
658 *r1
= (insn
>> 4) & 0xf;
662 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
663 int *x2
, int *b2
, int *d2
)
667 *r1
= (insn
>> 20) & 0xf;
668 *x2
= (insn
>> 16) & 0xf;
669 *b2
= (insn
>> 12) & 0xf;
672 return get_address(s
, *x2
, *b2
, *d2
);
675 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
680 *r1
= (insn
>> 20) & 0xf;
682 *r3
= (insn
>> 16) & 0xf;
683 *b2
= (insn
>> 12) & 0xf;
687 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
692 *i2
= (insn
>> 16) & 0xff;
693 *b1
= (insn
>> 12) & 0xf;
696 return get_address(s
, 0, *b1
, *d1
);
699 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
703 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
704 && !s
->singlestep_enabled
705 && !(s
->tb
->cflags
& CF_LAST_IO
));
708 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
712 if (use_goto_tb(s
, pc
)) {
713 tcg_gen_goto_tb(tb_num
);
714 tcg_gen_movi_i64(psw_addr
, pc
);
715 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr
, pc
);
723 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss
[cc_op
]++;
730 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit
[cc_op
]++;
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond
[16] = {
741 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
742 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
743 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
744 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
745 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
746 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
747 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
748 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond
[16] = {
755 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
757 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
759 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
769 enum cc_op old_cc_op
= s
->cc_op
;
771 if (mask
== 15 || mask
== 0) {
772 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
775 c
->g1
= c
->g2
= true;
780 /* Find the TCG condition for the mask + cc op. */
786 cond
= ltgt_cond
[mask
];
787 if (cond
== TCG_COND_NEVER
) {
790 account_inline_branch(s
, old_cc_op
);
793 case CC_OP_LTUGTU_32
:
794 case CC_OP_LTUGTU_64
:
795 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
796 if (cond
== TCG_COND_NEVER
) {
799 account_inline_branch(s
, old_cc_op
);
803 cond
= nz_cond
[mask
];
804 if (cond
== TCG_COND_NEVER
) {
807 account_inline_branch(s
, old_cc_op
);
822 account_inline_branch(s
, old_cc_op
);
837 account_inline_branch(s
, old_cc_op
);
842 /* Calculate cc value. */
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s
, old_cc_op
);
850 old_cc_op
= CC_OP_STATIC
;
851 cond
= TCG_COND_NEVER
;
855 /* Load up the arguments of the comparison. */
857 c
->g1
= c
->g2
= false;
861 c
->u
.s32
.a
= tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
863 c
->u
.s32
.b
= tcg_const_i32(0);
866 case CC_OP_LTUGTU_32
:
868 c
->u
.s32
.a
= tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
870 c
->u
.s32
.b
= tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
877 c
->u
.s64
.b
= tcg_const_i64(0);
881 case CC_OP_LTUGTU_64
:
884 c
->g1
= c
->g2
= true;
890 c
->u
.s64
.a
= tcg_temp_new_i64();
891 c
->u
.s64
.b
= tcg_const_i64(0);
892 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
902 c
->u
.s32
.b
= tcg_const_i32(3);
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
906 c
->u
.s32
.b
= tcg_const_i32(2);
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
910 c
->u
.s32
.b
= tcg_const_i32(1);
912 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
915 c
->u
.s32
.a
= tcg_temp_new_i32();
916 c
->u
.s32
.b
= tcg_const_i32(0);
917 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
919 case 0x8 | 0x4: /* cc < 2 */
921 c
->u
.s32
.b
= tcg_const_i32(2);
923 case 0x8: /* cc == 0 */
925 c
->u
.s32
.b
= tcg_const_i32(0);
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
929 c
->u
.s32
.b
= tcg_const_i32(0);
931 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
934 c
->u
.s32
.a
= tcg_temp_new_i32();
935 c
->u
.s32
.b
= tcg_const_i32(0);
936 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
938 case 0x4: /* cc == 1 */
940 c
->u
.s32
.b
= tcg_const_i32(1);
942 case 0x2 | 0x1: /* cc > 1 */
944 c
->u
.s32
.b
= tcg_const_i32(1);
946 case 0x2: /* cc == 2 */
948 c
->u
.s32
.b
= tcg_const_i32(2);
950 case 0x1: /* cc == 3 */
952 c
->u
.s32
.b
= tcg_const_i32(3);
955 /* CC is masked by something else: (8 >> cc) & mask. */
958 c
->u
.s32
.a
= tcg_const_i32(8);
959 c
->u
.s32
.b
= tcg_const_i32(0);
960 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
972 static void free_compare(DisasCompare
*c
)
976 tcg_temp_free_i64(c
->u
.s64
.a
);
978 tcg_temp_free_i32(c
->u
.s32
.a
);
983 tcg_temp_free_i64(c
->u
.s64
.b
);
985 tcg_temp_free_i32(c
->u
.s32
.b
);
990 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
993 TCGv_i64 tmp
, tmp2
, tmp3
;
994 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
996 #ifndef CONFIG_USER_ONLY
1000 r1
= (insn
>> 4) & 0xf;
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1
= tcg_const_i32(r1
);
1009 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1010 tcg_temp_free_i32(tmp32_1
);
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1
= tcg_const_i32(r1
);
1014 tmp32_2
= tcg_const_i32(r2
);
1015 potential_page_fault(s
);
1016 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1017 tcg_temp_free_i32(tmp32_1
);
1018 tcg_temp_free_i32(tmp32_2
);
1019 gen_op_movi_cc(s
, 0);
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1
= load_reg32(r2
);
1023 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1024 tcg_temp_free_i32(tmp32_1
);
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1
= tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1029 store_reg32(r1
, tmp32_1
);
1030 tcg_temp_free_i32(tmp32_1
);
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1034 tmp2
= load_reg(r1
);
1035 tmp3
= load_reg(r2
);
1036 potential_page_fault(s
);
1037 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1038 tcg_temp_free_i64(tmp
);
1039 tcg_temp_free_i64(tmp2
);
1040 tcg_temp_free_i64(tmp3
);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s
, 0);
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1
= load_reg32(0);
1046 tmp32_2
= tcg_const_i32(r1
);
1047 tmp32_3
= tcg_const_i32(r2
);
1048 potential_page_fault(s
);
1049 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1050 tcg_temp_free_i32(tmp32_1
);
1051 tcg_temp_free_i32(tmp32_2
);
1052 tcg_temp_free_i32(tmp32_3
);
1053 gen_op_movi_cc(s
, 1);
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1
= load_reg32(0);
1057 tmp32_2
= tcg_const_i32(r1
);
1058 tmp32_3
= tcg_const_i32(r2
);
1059 potential_page_fault(s
);
1060 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1062 tcg_temp_free_i32(tmp32_1
);
1063 tcg_temp_free_i32(tmp32_2
);
1064 tcg_temp_free_i32(tmp32_3
);
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1
= load_reg32(0);
1068 tmp32_2
= tcg_const_i32(r1
);
1069 tmp32_3
= tcg_const_i32(r2
);
1070 potential_page_fault(s
);
1071 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1073 tcg_temp_free_i32(tmp32_1
);
1074 tcg_temp_free_i32(tmp32_2
);
1075 tcg_temp_free_i32(tmp32_3
);
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1081 check_privileged(s
);
1082 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1083 tmp
= get_address(s
, 0, b2
, d2
);
1084 potential_page_fault(s
);
1085 gen_helper_stidp(cpu_env
, tmp
);
1086 tcg_temp_free_i64(tmp
);
1088 case 0x04: /* SCK D2(B2) [S] */
1090 check_privileged(s
);
1091 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1092 tmp
= get_address(s
, 0, b2
, d2
);
1093 potential_page_fault(s
);
1094 gen_helper_sck(cc_op
, tmp
);
1096 tcg_temp_free_i64(tmp
);
1098 case 0x05: /* STCK D2(B2) [S] */
1100 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1101 tmp
= get_address(s
, 0, b2
, d2
);
1102 potential_page_fault(s
);
1103 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1105 tcg_temp_free_i64(tmp
);
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s
);
1110 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1111 tmp
= get_address(s
, 0, b2
, d2
);
1112 potential_page_fault(s
);
1113 gen_helper_sckc(cpu_env
, tmp
);
1114 tcg_temp_free_i64(tmp
);
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s
);
1119 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1120 tmp
= get_address(s
, 0, b2
, d2
);
1121 potential_page_fault(s
);
1122 gen_helper_stckc(cpu_env
, tmp
);
1123 tcg_temp_free_i64(tmp
);
1125 case 0x08: /* SPT D2(B2) [S] */
1127 check_privileged(s
);
1128 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1129 tmp
= get_address(s
, 0, b2
, d2
);
1130 potential_page_fault(s
);
1131 gen_helper_spt(cpu_env
, tmp
);
1132 tcg_temp_free_i64(tmp
);
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s
);
1137 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1138 tmp
= get_address(s
, 0, b2
, d2
);
1139 potential_page_fault(s
);
1140 gen_helper_stpt(cpu_env
, tmp
);
1141 tcg_temp_free_i64(tmp
);
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s
);
1146 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1147 tmp
= get_address(s
, 0, b2
, d2
);
1148 tmp2
= tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1150 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1151 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1152 tcg_temp_free_i64(tmp2
);
1153 tcg_temp_free_i64(tmp
);
1155 case 0x0d: /* PTLB [S] */
1157 check_privileged(s
);
1158 gen_helper_ptlb(cpu_env
);
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s
);
1163 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1164 tmp
= get_address(s
, 0, b2
, d2
);
1165 potential_page_fault(s
);
1166 gen_helper_spx(cpu_env
, tmp
);
1167 tcg_temp_free_i64(tmp
);
1169 case 0x11: /* STPX D2(B2) [S] */
1171 check_privileged(s
);
1172 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1173 tmp
= get_address(s
, 0, b2
, d2
);
1174 tmp2
= tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1176 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1177 tcg_temp_free_i64(tmp
);
1178 tcg_temp_free_i64(tmp2
);
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s
);
1183 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1184 tmp
= get_address(s
, 0, b2
, d2
);
1185 tmp2
= tcg_temp_new_i64();
1186 tmp32_1
= tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1188 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1189 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1190 tcg_temp_free_i64(tmp
);
1191 tcg_temp_free_i64(tmp2
);
1192 tcg_temp_free_i32(tmp32_1
);
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s
);
1197 r1
= (insn
>> 4) & 0xf;
1200 tmp2
= load_reg(r2
);
1201 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1202 tcg_temp_free_i64(tmp
);
1203 tcg_temp_free_i64(tmp2
);
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s
);
1208 r1
= (insn
>> 4) & 0xf;
1211 tmp2
= tcg_temp_new_i64();
1212 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1213 store_reg(r1
, tmp2
);
1214 tcg_temp_free_i64(tmp
);
1215 tcg_temp_free_i64(tmp2
);
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s
);
1220 r1
= (insn
>> 4) & 0xf;
1222 tmp32_1
= load_reg32(r1
);
1224 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1226 tcg_temp_free_i32(tmp32_1
);
1227 tcg_temp_free_i64(tmp
);
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s
);
1232 r1
= (insn
>> 4) & 0xf;
1234 tmp32_1
= load_reg32(r1
);
1236 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1237 tcg_temp_free_i32(tmp32_1
);
1238 tcg_temp_free_i64(tmp
);
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s
);
1243 gen_op_movi_cc(s
, 3);
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s
);
1248 r1
= (insn
>> 4) & 0xf;
1250 tmp32_1
= load_reg32(r1
);
1252 potential_page_fault(s
);
1253 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1254 tcg_temp_free_i32(tmp32_1
);
1255 tcg_temp_free_i64(tmp
);
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s
);
1260 r1
= (insn
>> 4) & 0xf;
1262 tmp32_1
= tcg_const_i32(r1
);
1263 tmp32_2
= tcg_const_i32(r2
);
1264 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1266 tcg_temp_free_i32(tmp32_1
);
1267 tcg_temp_free_i32(tmp32_2
);
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s
);
1272 gen_op_movi_cc(s
, 3);
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1277 tmp
= get_address(s
, 0, b2
, d2
);
1278 potential_page_fault(s
);
1279 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1281 tcg_temp_free_i64(tmp
);
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s
);
1286 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1287 tmp
= get_address(s
, 0, b2
, d2
);
1288 potential_page_fault(s
);
1289 gen_helper_sacf(cpu_env
, tmp
);
1290 tcg_temp_free_i64(tmp
);
1291 /* addressing mode has changed, so end the block */
1294 s
->is_jmp
= DISAS_JUMP
;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s
);
1298 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1299 tmp
= get_address(s
, 0, b2
, d2
);
1300 tmp32_1
= load_reg32(0);
1301 tmp32_2
= load_reg32(1);
1302 potential_page_fault(s
);
1303 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1305 tcg_temp_free_i64(tmp
);
1306 tcg_temp_free_i32(tmp32_1
);
1307 tcg_temp_free_i32(tmp32_2
);
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1311 tmp
= get_address(s
, 0, b2
, d2
);
1312 tmp2
= tcg_temp_new_i64();
1313 tmp32_1
= tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1315 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1316 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1317 tcg_temp_free_i64(tmp
);
1318 tcg_temp_free_i64(tmp2
);
1319 tcg_temp_free_i32(tmp32_1
);
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s
);
1324 tmp2
= tcg_const_i64(0xc0000000);
1325 tmp
= tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1327 tcg_temp_free_i64(tmp2
);
1328 tcg_temp_free_i64(tmp
);
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s
);
1333 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1334 tmp
= get_address(s
, 0, b2
, d2
);
1335 tmp2
= tcg_temp_new_i64();
1336 tmp3
= tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1338 tcg_gen_addi_i64(tmp
, tmp
, 8);
1339 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1340 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1341 /* we need to keep cc_op intact */
1342 s
->is_jmp
= DISAS_JUMP
;
1343 tcg_temp_free_i64(tmp
);
1344 tcg_temp_free_i64(tmp2
);
1345 tcg_temp_free_i64(tmp3
);
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s
);
1350 potential_page_fault(s
);
1351 tmp32_1
= load_reg32(r2
);
1353 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1355 tcg_temp_free_i32(tmp32_1
);
1356 tcg_temp_free_i64(tmp
);
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1361 gen_illegal_opcode(s
);
1366 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1370 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1388 case 0x84: /* SFPC R1 [RRE] */
1389 tmp32_1
= load_reg32(r1
);
1390 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1391 tcg_temp_free_i32(tmp32_1
);
1393 case 0x94: /* CEFBR R1,R2 [RRE] */
1394 case 0x95: /* CDFBR R1,R2 [RRE] */
1395 case 0x96: /* CXFBR R1,R2 [RRE] */
1396 tmp32_1
= tcg_const_i32(r1
);
1397 tmp32_2
= load_reg32(r2
);
1400 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1403 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1406 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1411 tcg_temp_free_i32(tmp32_1
);
1412 tcg_temp_free_i32(tmp32_2
);
1414 case 0x98: /* CFEBR R1,R2 [RRE] */
1415 case 0x99: /* CFDBR R1,R2 [RRE] */
1416 case 0x9a: /* CFXBR R1,R2 [RRE] */
1417 tmp32_1
= tcg_const_i32(r1
);
1418 tmp32_2
= tcg_const_i32(r2
);
1419 tmp32_3
= tcg_const_i32(m3
);
1422 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1425 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1428 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1434 tcg_temp_free_i32(tmp32_1
);
1435 tcg_temp_free_i32(tmp32_2
);
1436 tcg_temp_free_i32(tmp32_3
);
1438 case 0xa4: /* CEGBR R1,R2 [RRE] */
1439 case 0xa5: /* CDGBR R1,R2 [RRE] */
1440 tmp32_1
= tcg_const_i32(r1
);
1444 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1447 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1452 tcg_temp_free_i32(tmp32_1
);
1453 tcg_temp_free_i64(tmp
);
1455 case 0xa6: /* CXGBR R1,R2 [RRE] */
1456 tmp32_1
= tcg_const_i32(r1
);
1458 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1459 tcg_temp_free_i32(tmp32_1
);
1460 tcg_temp_free_i64(tmp
);
1462 case 0xa8: /* CGEBR R1,R2 [RRE] */
1463 tmp32_1
= tcg_const_i32(r1
);
1464 tmp32_2
= tcg_const_i32(r2
);
1465 tmp32_3
= tcg_const_i32(m3
);
1466 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1468 tcg_temp_free_i32(tmp32_1
);
1469 tcg_temp_free_i32(tmp32_2
);
1470 tcg_temp_free_i32(tmp32_3
);
1472 case 0xa9: /* CGDBR R1,R2 [RRE] */
1473 tmp32_1
= tcg_const_i32(r1
);
1474 tmp32_2
= tcg_const_i32(r2
);
1475 tmp32_3
= tcg_const_i32(m3
);
1476 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1478 tcg_temp_free_i32(tmp32_1
);
1479 tcg_temp_free_i32(tmp32_2
);
1480 tcg_temp_free_i32(tmp32_3
);
1482 case 0xaa: /* CGXBR R1,R2 [RRE] */
1483 tmp32_1
= tcg_const_i32(r1
);
1484 tmp32_2
= tcg_const_i32(r2
);
1485 tmp32_3
= tcg_const_i32(m3
);
1486 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1488 tcg_temp_free_i32(tmp32_1
);
1489 tcg_temp_free_i32(tmp32_2
);
1490 tcg_temp_free_i32(tmp32_3
);
1493 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1494 gen_illegal_opcode(s
);
1502 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1508 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1510 case 0x83: /* FLOGR R1,R2 [RRE] */
1512 tmp32_1
= tcg_const_i32(r1
);
1513 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1515 tcg_temp_free_i64(tmp
);
1516 tcg_temp_free_i32(tmp32_1
);
1519 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1520 gen_illegal_opcode(s
);
1525 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1531 opc
= cpu_ldub_code(env
, s
->pc
);
1532 LOG_DISAS("opc 0x%x\n", opc
);
1536 insn
= ld_code4(env
, s
->pc
);
1537 op
= (insn
>> 16) & 0xff;
1538 disas_b2(env
, s
, op
, insn
);
1541 insn
= ld_code4(env
, s
->pc
);
1542 op
= (insn
>> 16) & 0xff;
1543 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1544 r1
= (insn
>> 4) & 0xf;
1546 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1549 insn
= ld_code4(env
, s
->pc
);
1550 r1
= (insn
>> 4) & 0xf;
1552 op
= (insn
>> 16) & 0xff;
1553 disas_b9(env
, s
, op
, r1
, r2
);
1556 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1557 gen_illegal_opcode(s
);
1562 /* ====================================================================== */
1563 /* Define the insn format enumeration. */
1564 #define F0(N) FMT_##N,
1565 #define F1(N, X1) F0(N)
1566 #define F2(N, X1, X2) F0(N)
1567 #define F3(N, X1, X2, X3) F0(N)
1568 #define F4(N, X1, X2, X3, X4) F0(N)
1569 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1572 #include "insn-format.def"
1582 /* Define a structure to hold the decoded fields. We'll store each inside
1583 an array indexed by an enum. In order to conserve memory, we'll arrange
1584 for fields that do not exist at the same time to overlap, thus the "C"
1585 for compact. For checking purposes there is an "O" for original index
1586 as well that will be applied to availability bitmaps. */
1588 enum DisasFieldIndexO
{
1611 enum DisasFieldIndexC
{
1642 struct DisasFields
{
1645 unsigned presentC
:16;
1646 unsigned int presentO
;
1650 /* This is the way fields are to be accessed out of DisasFields. */
1651 #define have_field(S, F) have_field1((S), FLD_O_##F)
1652 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1654 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1656 return (f
->presentO
>> c
) & 1;
1659 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1660 enum DisasFieldIndexC c
)
1662 assert(have_field1(f
, o
));
1666 /* Describe the layout of each field in each format. */
1667 typedef struct DisasField
{
1669 unsigned int size
:8;
1670 unsigned int type
:2;
1671 unsigned int indexC
:6;
1672 enum DisasFieldIndexO indexO
:8;
1675 typedef struct DisasFormatInfo
{
1676 DisasField op
[NUM_C_FIELD
];
1679 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1680 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1681 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1682 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1683 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1684 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1685 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1686 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1687 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1688 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1689 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1690 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1691 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1692 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1694 #define F0(N) { { } },
1695 #define F1(N, X1) { { X1 } },
1696 #define F2(N, X1, X2) { { X1, X2 } },
1697 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1698 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1699 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1701 static const DisasFormatInfo format_info
[] = {
1702 #include "insn-format.def"
1720 /* Generally, we'll extract operands into this structures, operate upon
1721 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1722 of routines below for more details. */
1724 bool g_out
, g_out2
, g_in1
, g_in2
;
1725 TCGv_i64 out
, out2
, in1
, in2
;
1729 /* Return values from translate_one, indicating the state of the TB. */
1731 /* Continue the TB. */
1733 /* We have emitted one or more goto_tb. No fixup required. */
1735 /* We are not using a goto_tb (for whatever reason), but have updated
1736 the PC (for whatever reason), so there's no need to do it again on
1739 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1740 updated the PC for the next instruction to be executed. */
1742 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1743 No following code will be executed. */
1747 typedef enum DisasFacility
{
1748 FAC_Z
, /* zarch (default) */
1749 FAC_CASS
, /* compare and swap and store */
1750 FAC_CASS2
, /* compare and swap and store 2*/
1751 FAC_DFP
, /* decimal floating point */
1752 FAC_DFPR
, /* decimal floating point rounding */
1753 FAC_DO
, /* distinct operands */
1754 FAC_EE
, /* execute extensions */
1755 FAC_EI
, /* extended immediate */
1756 FAC_FPE
, /* floating point extension */
1757 FAC_FPSSH
, /* floating point support sign handling */
1758 FAC_FPRGR
, /* FPR-GR transfer */
1759 FAC_GIE
, /* general instructions extension */
1760 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1761 FAC_HW
, /* high-word */
1762 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1763 FAC_LOC
, /* load/store on condition */
1764 FAC_LD
, /* long displacement */
1765 FAC_PC
, /* population count */
1766 FAC_SCF
, /* store clock fast */
1767 FAC_SFLE
, /* store facility list extended */
1773 DisasFacility fac
:6;
1777 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1778 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1779 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1780 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1781 void (*help_cout
)(DisasContext
*, DisasOps
*);
1782 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1787 /* ====================================================================== */
1788 /* Miscelaneous helpers, used by several operations. */
1790 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1791 DisasOps
*o
, int mask
)
1793 int b2
= get_field(f
, b2
);
1794 int d2
= get_field(f
, d2
);
1797 o
->in2
= tcg_const_i64(d2
& mask
);
1799 o
->in2
= get_address(s
, 0, b2
, d2
);
1800 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1804 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1806 if (dest
== s
->next_pc
) {
1809 if (use_goto_tb(s
, dest
)) {
1810 gen_update_cc_op(s
);
1812 tcg_gen_movi_i64(psw_addr
, dest
);
1813 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1814 return EXIT_GOTO_TB
;
1816 tcg_gen_movi_i64(psw_addr
, dest
);
1817 return EXIT_PC_UPDATED
;
1821 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1822 bool is_imm
, int imm
, TCGv_i64 cdest
)
1825 uint64_t dest
= s
->pc
+ 2 * imm
;
1828 /* Take care of the special cases first. */
1829 if (c
->cond
== TCG_COND_NEVER
) {
1834 if (dest
== s
->next_pc
) {
1835 /* Branch to next. */
1839 if (c
->cond
== TCG_COND_ALWAYS
) {
1840 ret
= help_goto_direct(s
, dest
);
1844 if (TCGV_IS_UNUSED_I64(cdest
)) {
1845 /* E.g. bcr %r0 -> no branch. */
1849 if (c
->cond
== TCG_COND_ALWAYS
) {
1850 tcg_gen_mov_i64(psw_addr
, cdest
);
1851 ret
= EXIT_PC_UPDATED
;
1856 if (use_goto_tb(s
, s
->next_pc
)) {
1857 if (is_imm
&& use_goto_tb(s
, dest
)) {
1858 /* Both exits can use goto_tb. */
1859 gen_update_cc_op(s
);
1861 lab
= gen_new_label();
1863 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1865 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1868 /* Branch not taken. */
1870 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1871 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1876 tcg_gen_movi_i64(psw_addr
, dest
);
1877 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1881 /* Fallthru can use goto_tb, but taken branch cannot. */
1882 /* Store taken branch destination before the brcond. This
1883 avoids having to allocate a new local temp to hold it.
1884 We'll overwrite this in the not taken case anyway. */
1886 tcg_gen_mov_i64(psw_addr
, cdest
);
1889 lab
= gen_new_label();
1891 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1893 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1896 /* Branch not taken. */
1897 gen_update_cc_op(s
);
1899 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1900 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1904 tcg_gen_movi_i64(psw_addr
, dest
);
1906 ret
= EXIT_PC_UPDATED
;
1909 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1910 Most commonly we're single-stepping or some other condition that
1911 disables all use of goto_tb. Just update the PC and exit. */
1913 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1915 cdest
= tcg_const_i64(dest
);
1919 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1922 TCGv_i32 t0
= tcg_temp_new_i32();
1923 TCGv_i64 t1
= tcg_temp_new_i64();
1924 TCGv_i64 z
= tcg_const_i64(0);
1925 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1926 tcg_gen_extu_i32_i64(t1
, t0
);
1927 tcg_temp_free_i32(t0
);
1928 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1929 tcg_temp_free_i64(t1
);
1930 tcg_temp_free_i64(z
);
1934 tcg_temp_free_i64(cdest
);
1936 tcg_temp_free_i64(next
);
1938 ret
= EXIT_PC_UPDATED
;
1946 /* ====================================================================== */
1947 /* The operations. These perform the bulk of the work for any insn,
1948 usually after the operands have been loaded and output initialized. */
1950 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1952 gen_helper_abs_i64(o
->out
, o
->in2
);
1956 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1958 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1962 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1964 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1968 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1970 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1971 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1975 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1977 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1981 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1985 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1987 /* XXX possible optimization point */
1989 cc
= tcg_temp_new_i64();
1990 tcg_gen_extu_i32_i64(cc
, cc_op
);
1991 tcg_gen_shri_i64(cc
, cc
, 1);
1993 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1994 tcg_temp_free_i64(cc
);
1998 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
2000 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2004 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
2006 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2010 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
2012 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2013 return_low128(o
->out2
);
2017 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2019 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2023 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2025 int shift
= s
->insn
->data
& 0xff;
2026 int size
= s
->insn
->data
>> 8;
2027 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2030 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2031 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2032 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2034 /* Produce the CC from only the bits manipulated. */
2035 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2036 set_cc_nz_u64(s
, cc_dst
);
2040 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2042 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2043 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2044 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2045 return EXIT_PC_UPDATED
;
2051 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2053 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2054 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2057 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2059 int m1
= get_field(s
->fields
, m1
);
2060 bool is_imm
= have_field(s
->fields
, i2
);
2061 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2064 disas_jcc(s
, &c
, m1
);
2065 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2068 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2070 int r1
= get_field(s
->fields
, r1
);
2071 bool is_imm
= have_field(s
->fields
, i2
);
2072 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2076 c
.cond
= TCG_COND_NE
;
2081 t
= tcg_temp_new_i64();
2082 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2083 store_reg32_i64(r1
, t
);
2084 c
.u
.s32
.a
= tcg_temp_new_i32();
2085 c
.u
.s32
.b
= tcg_const_i32(0);
2086 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2087 tcg_temp_free_i64(t
);
2089 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2092 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2094 int r1
= get_field(s
->fields
, r1
);
2095 bool is_imm
= have_field(s
->fields
, i2
);
2096 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2099 c
.cond
= TCG_COND_NE
;
2104 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2105 c
.u
.s64
.a
= regs
[r1
];
2106 c
.u
.s64
.b
= tcg_const_i64(0);
2108 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2111 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
2113 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2118 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
2120 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2125 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
2127 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2132 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2134 int l
= get_field(s
->fields
, l1
);
2139 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2140 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2143 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2144 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2147 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2148 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2151 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2152 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2155 potential_page_fault(s
);
2156 vl
= tcg_const_i32(l
);
2157 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2158 tcg_temp_free_i32(vl
);
2162 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2166 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2168 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2169 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2170 potential_page_fault(s
);
2171 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2172 tcg_temp_free_i32(r1
);
2173 tcg_temp_free_i32(r3
);
2178 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2180 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2181 TCGv_i32 t1
= tcg_temp_new_i32();
2182 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2183 potential_page_fault(s
);
2184 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2186 tcg_temp_free_i32(t1
);
2187 tcg_temp_free_i32(m3
);
2191 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2193 int r3
= get_field(s
->fields
, r3
);
2194 potential_page_fault(s
);
2195 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2200 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2202 int r3
= get_field(s
->fields
, r3
);
2203 potential_page_fault(s
);
2204 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2209 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2211 int r3
= get_field(s
->fields
, r3
);
2212 TCGv_i64 in3
= tcg_temp_new_i64();
2213 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2214 potential_page_fault(s
);
2215 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2216 tcg_temp_free_i64(in3
);
2221 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2223 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2224 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2225 potential_page_fault(s
);
2226 /* XXX rewrite in tcg */
2227 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2232 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2234 TCGv_i64 t1
= tcg_temp_new_i64();
2235 TCGv_i32 t2
= tcg_temp_new_i32();
2236 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2237 gen_helper_cvd(t1
, t2
);
2238 tcg_temp_free_i32(t2
);
2239 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2240 tcg_temp_free_i64(t1
);
2244 #ifndef CONFIG_USER_ONLY
2245 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2249 check_privileged(s
);
2250 potential_page_fault(s
);
2252 /* We pretend the format is RX_a so that D2 is the field we want. */
2253 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2254 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2255 tcg_temp_free_i32(tmp
);
2260 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2262 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2263 return_low128(o
->out
);
2267 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2269 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2270 return_low128(o
->out
);
2274 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2276 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2277 return_low128(o
->out
);
2281 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2283 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2284 return_low128(o
->out
);
2288 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2290 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2294 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2296 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2300 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2302 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2303 return_low128(o
->out2
);
2307 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2309 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2313 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2315 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2316 tb->flags, (ab)use the tb->cs_base field as the address of
2317 the template in memory, and grab 8 bits of tb->flags/cflags for
2318 the contents of the register. We would then recognize all this
2319 in gen_intermediate_code_internal, generating code for exactly
2320 one instruction. This new TB then gets executed normally.
2322 On the other hand, this seems to be mostly used for modifying
2323 MVC inside of memcpy, which needs a helper call anyway. So
2324 perhaps this doesn't bear thinking about any further. */
2331 tmp
= tcg_const_i64(s
->next_pc
);
2332 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2333 tcg_temp_free_i64(tmp
);
2339 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2341 int m3
= get_field(s
->fields
, m3
);
2342 int pos
, len
, base
= s
->insn
->data
;
2343 TCGv_i64 tmp
= tcg_temp_new_i64();
2348 /* Effectively a 32-bit load. */
2349 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2356 /* Effectively a 16-bit load. */
2357 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2365 /* Effectively an 8-bit load. */
2366 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2371 pos
= base
+ ctz32(m3
) * 8;
2372 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2373 ccm
= ((1ull << len
) - 1) << pos
;
2377 /* This is going to be a sequence of loads and inserts. */
2378 pos
= base
+ 32 - 8;
2382 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2383 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2384 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2387 m3
= (m3
<< 1) & 0xf;
2393 tcg_gen_movi_i64(tmp
, ccm
);
2394 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2395 tcg_temp_free_i64(tmp
);
2399 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2401 int shift
= s
->insn
->data
& 0xff;
2402 int size
= s
->insn
->data
>> 8;
2403 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2407 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2409 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2413 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2415 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2419 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2421 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2425 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2427 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2431 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2433 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2434 return_low128(o
->out2
);
2438 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2440 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2441 return_low128(o
->out2
);
2445 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2447 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2451 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2453 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2457 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2459 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2463 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2465 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2469 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2471 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2475 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2477 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2481 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2483 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2487 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2489 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2493 #ifndef CONFIG_USER_ONLY
2494 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2496 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2497 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2498 check_privileged(s
);
2499 potential_page_fault(s
);
2500 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2501 tcg_temp_free_i32(r1
);
2502 tcg_temp_free_i32(r3
);
2506 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2508 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2509 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2510 check_privileged(s
);
2511 potential_page_fault(s
);
2512 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2513 tcg_temp_free_i32(r1
);
2514 tcg_temp_free_i32(r3
);
2517 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2519 check_privileged(s
);
2520 potential_page_fault(s
);
2521 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2526 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2530 check_privileged(s
);
2532 t1
= tcg_temp_new_i64();
2533 t2
= tcg_temp_new_i64();
2534 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2535 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2536 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2537 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2538 tcg_gen_shli_i64(t1
, t1
, 32);
2539 gen_helper_load_psw(cpu_env
, t1
, t2
);
2540 tcg_temp_free_i64(t1
);
2541 tcg_temp_free_i64(t2
);
2542 return EXIT_NORETURN
;
2546 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2548 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2549 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2550 potential_page_fault(s
);
2551 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2552 tcg_temp_free_i32(r1
);
2553 tcg_temp_free_i32(r3
);
2557 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2559 int r1
= get_field(s
->fields
, r1
);
2560 int r3
= get_field(s
->fields
, r3
);
2561 TCGv_i64 t
= tcg_temp_new_i64();
2562 TCGv_i64 t4
= tcg_const_i64(4);
2565 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2566 store_reg32_i64(r1
, t
);
2570 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2574 tcg_temp_free_i64(t
);
2575 tcg_temp_free_i64(t4
);
2579 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2581 int r1
= get_field(s
->fields
, r1
);
2582 int r3
= get_field(s
->fields
, r3
);
2583 TCGv_i64 t
= tcg_temp_new_i64();
2584 TCGv_i64 t4
= tcg_const_i64(4);
2587 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2588 store_reg32h_i64(r1
, t
);
2592 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2596 tcg_temp_free_i64(t
);
2597 tcg_temp_free_i64(t4
);
2601 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2603 int r1
= get_field(s
->fields
, r1
);
2604 int r3
= get_field(s
->fields
, r3
);
2605 TCGv_i64 t8
= tcg_const_i64(8);
2608 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2612 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2616 tcg_temp_free_i64(t8
);
2620 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2623 o
->g_out
= o
->g_in2
;
2624 TCGV_UNUSED_I64(o
->in2
);
2629 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2633 o
->g_out
= o
->g_in1
;
2634 o
->g_out2
= o
->g_in2
;
2635 TCGV_UNUSED_I64(o
->in1
);
2636 TCGV_UNUSED_I64(o
->in2
);
2637 o
->g_in1
= o
->g_in2
= false;
2641 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2643 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2644 potential_page_fault(s
);
2645 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2646 tcg_temp_free_i32(l
);
2650 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2652 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2653 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2654 potential_page_fault(s
);
2655 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2656 tcg_temp_free_i32(r1
);
2657 tcg_temp_free_i32(r2
);
2662 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2664 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2665 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2666 potential_page_fault(s
);
2667 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2668 tcg_temp_free_i32(r1
);
2669 tcg_temp_free_i32(r3
);
2674 #ifndef CONFIG_USER_ONLY
2675 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2677 int r1
= get_field(s
->fields
, l1
);
2678 check_privileged(s
);
2679 potential_page_fault(s
);
2680 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2685 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2687 int r1
= get_field(s
->fields
, l1
);
2688 check_privileged(s
);
2689 potential_page_fault(s
);
2690 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2696 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2698 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2702 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2704 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2705 return_low128(o
->out2
);
2709 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2711 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2715 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2717 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2721 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2723 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2727 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2729 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2730 return_low128(o
->out2
);
2734 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2736 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2737 return_low128(o
->out2
);
2741 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2743 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2744 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2745 tcg_temp_free_i64(r3
);
2749 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2751 int r3
= get_field(s
->fields
, r3
);
2752 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2756 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2758 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2759 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2760 tcg_temp_free_i64(r3
);
2764 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2766 int r3
= get_field(s
->fields
, r3
);
2767 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2771 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2773 gen_helper_nabs_i64(o
->out
, o
->in2
);
2777 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2779 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2783 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2785 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2789 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2791 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2792 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2796 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2798 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2799 potential_page_fault(s
);
2800 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2801 tcg_temp_free_i32(l
);
2806 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2808 tcg_gen_neg_i64(o
->out
, o
->in2
);
2812 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2814 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2818 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2820 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2824 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2826 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2827 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2831 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2833 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2834 potential_page_fault(s
);
2835 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2836 tcg_temp_free_i32(l
);
2841 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2843 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2847 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2849 int shift
= s
->insn
->data
& 0xff;
2850 int size
= s
->insn
->data
>> 8;
2851 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2854 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2855 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2857 /* Produce the CC from only the bits manipulated. */
2858 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2859 set_cc_nz_u64(s
, cc_dst
);
2863 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2865 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2869 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2871 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2875 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2877 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2881 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2883 TCGv_i32 t1
= tcg_temp_new_i32();
2884 TCGv_i32 t2
= tcg_temp_new_i32();
2885 TCGv_i32 to
= tcg_temp_new_i32();
2886 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2887 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2888 tcg_gen_rotl_i32(to
, t1
, t2
);
2889 tcg_gen_extu_i32_i64(o
->out
, to
);
2890 tcg_temp_free_i32(t1
);
2891 tcg_temp_free_i32(t2
);
2892 tcg_temp_free_i32(to
);
2896 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2898 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2902 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2904 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2908 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2910 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2914 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2916 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2917 return_low128(o
->out2
);
2921 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2923 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2927 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2929 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2933 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2935 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2936 return_low128(o
->out2
);
2940 #ifndef CONFIG_USER_ONLY
2941 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2943 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2944 check_privileged(s
);
2945 potential_page_fault(s
);
2946 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2947 tcg_temp_free_i32(r1
);
2952 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2954 uint64_t sign
= 1ull << s
->insn
->data
;
2955 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2956 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2957 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2958 /* The arithmetic left shift is curious in that it does not affect
2959 the sign bit. Copy that over from the source unchanged. */
2960 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2961 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2962 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2966 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2968 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2972 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2974 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2978 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2980 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2984 #ifndef CONFIG_USER_ONLY
2985 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2987 check_privileged(s
);
2988 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2992 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2994 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2995 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2996 check_privileged(s
);
2997 potential_page_fault(s
);
2998 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2999 tcg_temp_free_i32(r1
);
3000 tcg_temp_free_i32(r3
);
3004 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3006 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3007 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3008 check_privileged(s
);
3009 potential_page_fault(s
);
3010 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3011 tcg_temp_free_i32(r1
);
3012 tcg_temp_free_i32(r3
);
3016 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3018 uint64_t i2
= get_field(s
->fields
, i2
);
3021 check_privileged(s
);
3023 /* It is important to do what the instruction name says: STORE THEN.
3024 If we let the output hook perform the store then if we fault and
3025 restart, we'll have the wrong SYSTEM MASK in place. */
3026 t
= tcg_temp_new_i64();
3027 tcg_gen_shri_i64(t
, psw_mask
, 56);
3028 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3029 tcg_temp_free_i64(t
);
3031 if (s
->fields
->op
== 0xac) {
3032 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3033 (i2
<< 56) | 0x00ffffffffffffffull
);
3035 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3041 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3043 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3047 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3049 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3053 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3055 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3059 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3061 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3065 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3067 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3068 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3069 potential_page_fault(s
);
3070 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3071 tcg_temp_free_i32(r1
);
3072 tcg_temp_free_i32(r3
);
3076 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3078 int m3
= get_field(s
->fields
, m3
);
3079 int pos
, base
= s
->insn
->data
;
3080 TCGv_i64 tmp
= tcg_temp_new_i64();
3082 pos
= base
+ ctz32(m3
) * 8;
3085 /* Effectively a 32-bit store. */
3086 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3087 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3093 /* Effectively a 16-bit store. */
3094 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3095 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3102 /* Effectively an 8-bit store. */
3103 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3104 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3108 /* This is going to be a sequence of shifts and stores. */
3109 pos
= base
+ 32 - 8;
3112 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3113 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3114 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3116 m3
= (m3
<< 1) & 0xf;
3121 tcg_temp_free_i64(tmp
);
3125 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3127 int r1
= get_field(s
->fields
, r1
);
3128 int r3
= get_field(s
->fields
, r3
);
3129 int size
= s
->insn
->data
;
3130 TCGv_i64 tsize
= tcg_const_i64(size
);
3134 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3136 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3141 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3145 tcg_temp_free_i64(tsize
);
3149 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3151 int r1
= get_field(s
->fields
, r1
);
3152 int r3
= get_field(s
->fields
, r3
);
3153 TCGv_i64 t
= tcg_temp_new_i64();
3154 TCGv_i64 t4
= tcg_const_i64(4);
3155 TCGv_i64 t32
= tcg_const_i64(32);
3158 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3159 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3163 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3167 tcg_temp_free_i64(t
);
3168 tcg_temp_free_i64(t4
);
3169 tcg_temp_free_i64(t32
);
3173 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3175 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3179 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3184 tcg_gen_not_i64(o
->in2
, o
->in2
);
3185 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3187 /* XXX possible optimization point */
3189 cc
= tcg_temp_new_i64();
3190 tcg_gen_extu_i32_i64(cc
, cc_op
);
3191 tcg_gen_shri_i64(cc
, cc
, 1);
3192 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3193 tcg_temp_free_i64(cc
);
3197 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3204 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3205 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3206 tcg_temp_free_i32(t
);
3208 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3209 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3210 tcg_temp_free_i32(t
);
3212 gen_exception(EXCP_SVC
);
3213 return EXIT_NORETURN
;
3216 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3218 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3223 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3225 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3230 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3232 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3237 #ifndef CONFIG_USER_ONLY
3238 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3240 potential_page_fault(s
);
3241 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3247 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3249 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3250 potential_page_fault(s
);
3251 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3252 tcg_temp_free_i32(l
);
3257 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3259 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3260 potential_page_fault(s
);
3261 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3262 tcg_temp_free_i32(l
);
3266 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3268 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3269 potential_page_fault(s
);
3270 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3271 tcg_temp_free_i32(l
);
3276 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3278 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3282 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3284 int shift
= s
->insn
->data
& 0xff;
3285 int size
= s
->insn
->data
>> 8;
3286 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3289 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3290 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3292 /* Produce the CC from only the bits manipulated. */
3293 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3294 set_cc_nz_u64(s
, cc_dst
);
3298 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3300 o
->out
= tcg_const_i64(0);
3304 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3306 o
->out
= tcg_const_i64(0);
3312 /* ====================================================================== */
3313 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3314 the original inputs), update the various cc data structures in order to
3315 be able to compute the new condition code. */
3317 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3319 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3322 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3324 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3327 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3329 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3332 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3334 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3337 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3339 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3342 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3344 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3347 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3349 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3352 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3354 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3357 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3359 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3362 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3364 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3367 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3369 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3372 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3374 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3377 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3379 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3382 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3384 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3387 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3389 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3392 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3394 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3397 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3399 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3402 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3404 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3407 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3409 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3412 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3414 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3415 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3418 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3420 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3423 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3425 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3428 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3430 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3433 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3435 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3438 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3440 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3443 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3445 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3448 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3450 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3453 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3455 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3458 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3460 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3463 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3465 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3468 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3470 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3473 /* ====================================================================== */
3474 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3475 with the TCG register to which we will write. Used in combination with
3476 the "wout" generators, in some cases we need a new temporary, and in
3477 some cases we can write to a TCG global. */
3479 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3481 o
->out
= tcg_temp_new_i64();
3484 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3486 o
->out
= tcg_temp_new_i64();
3487 o
->out2
= tcg_temp_new_i64();
3490 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3492 o
->out
= regs
[get_field(f
, r1
)];
3496 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3498 /* ??? Specification exception: r1 must be even. */
3499 int r1
= get_field(f
, r1
);
3501 o
->out2
= regs
[(r1
+ 1) & 15];
3502 o
->g_out
= o
->g_out2
= true;
3505 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3507 o
->out
= fregs
[get_field(f
, r1
)];
3511 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3513 /* ??? Specification exception: r1 must be < 14. */
3514 int r1
= get_field(f
, r1
);
3516 o
->out2
= fregs
[(r1
+ 2) & 15];
3517 o
->g_out
= o
->g_out2
= true;
3520 /* ====================================================================== */
3521 /* The "Write OUTput" generators. These generally perform some non-trivial
3522 copy of data to TCG globals, or to main memory. The trivial cases are
3523 generally handled by having a "prep" generator install the TCG global
3524 as the destination of the operation. */
3526 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3528 store_reg(get_field(f
, r1
), o
->out
);
3531 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3533 int r1
= get_field(f
, r1
);
3534 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3537 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3539 int r1
= get_field(f
, r1
);
3540 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3543 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3545 store_reg32_i64(get_field(f
, r1
), o
->out
);
3548 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3550 /* ??? Specification exception: r1 must be even. */
3551 int r1
= get_field(f
, r1
);
3552 store_reg32_i64(r1
, o
->out
);
3553 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3556 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3558 /* ??? Specification exception: r1 must be even. */
3559 int r1
= get_field(f
, r1
);
3560 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3561 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3562 store_reg32_i64(r1
, o
->out
);
3565 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3567 store_freg32_i64(get_field(f
, r1
), o
->out
);
3570 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3572 store_freg(get_field(f
, r1
), o
->out
);
3575 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3577 /* ??? Specification exception: r1 must be < 14. */
3578 int f1
= get_field(s
->fields
, r1
);
3579 store_freg(f1
, o
->out
);
3580 store_freg((f1
+ 2) & 15, o
->out2
);
3583 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3585 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3586 store_reg32_i64(get_field(f
, r1
), o
->out
);
3590 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3592 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3593 store_freg32_i64(get_field(f
, r1
), o
->out
);
3597 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3599 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3602 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3604 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3607 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3609 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3612 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3614 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3617 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3619 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3622 /* ====================================================================== */
3623 /* The "INput 1" generators. These load the first operand to an insn. */
3625 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3627 o
->in1
= load_reg(get_field(f
, r1
));
3630 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3632 o
->in1
= regs
[get_field(f
, r1
)];
3636 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3638 o
->in1
= tcg_temp_new_i64();
3639 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3642 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3644 o
->in1
= tcg_temp_new_i64();
3645 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3648 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3650 o
->in1
= tcg_temp_new_i64();
3651 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3654 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3656 /* ??? Specification exception: r1 must be even. */
3657 int r1
= get_field(f
, r1
);
3658 o
->in1
= load_reg((r1
+ 1) & 15);
3661 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3663 /* ??? Specification exception: r1 must be even. */
3664 int r1
= get_field(f
, r1
);
3665 o
->in1
= tcg_temp_new_i64();
3666 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3669 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3671 /* ??? Specification exception: r1 must be even. */
3672 int r1
= get_field(f
, r1
);
3673 o
->in1
= tcg_temp_new_i64();
3674 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3677 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3679 /* ??? Specification exception: r1 must be even. */
3680 int r1
= get_field(f
, r1
);
3681 o
->in1
= tcg_temp_new_i64();
3682 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3685 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3687 o
->in1
= load_reg(get_field(f
, r2
));
3690 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3692 o
->in1
= load_reg(get_field(f
, r3
));
3695 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3697 o
->in1
= regs
[get_field(f
, r3
)];
3701 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3703 o
->in1
= tcg_temp_new_i64();
3704 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3707 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3709 o
->in1
= tcg_temp_new_i64();
3710 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3713 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3715 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3718 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3720 o
->in1
= fregs
[get_field(f
, r1
)];
3724 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3726 /* ??? Specification exception: r1 must be < 14. */
3727 int r1
= get_field(f
, r1
);
3729 o
->out2
= fregs
[(r1
+ 2) & 15];
3730 o
->g_out
= o
->g_out2
= true;
3733 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3735 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3738 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3740 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3741 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3744 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3747 o
->in1
= tcg_temp_new_i64();
3748 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3751 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3754 o
->in1
= tcg_temp_new_i64();
3755 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3758 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3761 o
->in1
= tcg_temp_new_i64();
3762 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3765 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3768 o
->in1
= tcg_temp_new_i64();
3769 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3772 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3775 o
->in1
= tcg_temp_new_i64();
3776 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3779 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 o
->in1
= tcg_temp_new_i64();
3783 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3786 /* ====================================================================== */
3787 /* The "INput 2" generators. These load the second operand to an insn. */
3789 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3791 o
->in2
= regs
[get_field(f
, r1
)];
3795 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3797 o
->in2
= tcg_temp_new_i64();
3798 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3801 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3803 o
->in2
= tcg_temp_new_i64();
3804 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3807 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3809 o
->in2
= load_reg(get_field(f
, r2
));
3812 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3814 o
->in2
= regs
[get_field(f
, r2
)];
3818 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 int r2
= get_field(f
, r2
);
3822 o
->in2
= load_reg(r2
);
3826 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3828 o
->in2
= tcg_temp_new_i64();
3829 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3832 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3834 o
->in2
= tcg_temp_new_i64();
3835 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3838 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3840 o
->in2
= tcg_temp_new_i64();
3841 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3844 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3846 o
->in2
= tcg_temp_new_i64();
3847 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3850 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3852 o
->in2
= load_reg(get_field(f
, r3
));
3855 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3857 o
->in2
= tcg_temp_new_i64();
3858 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3861 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3863 o
->in2
= tcg_temp_new_i64();
3864 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3867 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3869 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3872 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3874 o
->in2
= fregs
[get_field(f
, r2
)];
3878 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3880 /* ??? Specification exception: r1 must be < 14. */
3881 int r2
= get_field(f
, r2
);
3883 o
->in2
= fregs
[(r2
+ 2) & 15];
3884 o
->g_in1
= o
->g_in2
= true;
3887 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3889 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3890 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3893 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3895 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3898 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3900 help_l2_shift(s
, f
, o
, 31);
3903 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3905 help_l2_shift(s
, f
, o
, 63);
3908 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3914 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3917 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3920 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3923 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3926 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3929 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3932 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3935 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3938 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3941 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3944 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3950 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3953 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3956 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3959 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3962 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3965 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3968 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3970 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3973 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3975 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3978 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3980 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3983 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3985 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3988 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3990 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3991 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3994 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3996 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3997 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4000 /* ====================================================================== */
4002 /* Find opc within the table of insns. This is formulated as a switch
4003 statement so that (1) we get compile-time notice of cut-paste errors
4004 for duplicated opcodes, and (2) the compiler generates the binary
4005 search tree, rather than us having to post-process the table. */
4007 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4008 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4010 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4012 enum DisasInsnEnum
{
4013 #include "insn-data.def"
4017 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4022 .help_in1 = in1_##I1, \
4023 .help_in2 = in2_##I2, \
4024 .help_prep = prep_##P, \
4025 .help_wout = wout_##W, \
4026 .help_cout = cout_##CC, \
4027 .help_op = op_##OP, \
4031 /* Allow 0 to be used for NULL in the table below. */
4039 static const DisasInsn insn_info
[] = {
4040 #include "insn-data.def"
4044 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4045 case OPC: return &insn_info[insn_ ## NM];
4047 static const DisasInsn
*lookup_opc(uint16_t opc
)
4050 #include "insn-data.def"
4059 /* Extract a field from the insn. The INSN should be left-aligned in
4060 the uint64_t so that we can more easily utilize the big-bit-endian
4061 definitions we extract from the Principals of Operation. */
4063 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4071 /* Zero extract the field from the insn. */
4072 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4074 /* Sign-extend, or un-swap the field as necessary. */
4076 case 0: /* unsigned */
4078 case 1: /* signed */
4079 assert(f
->size
<= 32);
4080 m
= 1u << (f
->size
- 1);
4083 case 2: /* dl+dh split, signed 20 bit. */
4084 r
= ((int8_t)r
<< 12) | (r
>> 8);
4090 /* Validate that the "compressed" encoding we selected above is valid.
4091 I.e. we havn't make two different original fields overlap. */
4092 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4093 o
->presentC
|= 1 << f
->indexC
;
4094 o
->presentO
|= 1 << f
->indexO
;
4096 o
->c
[f
->indexC
] = r
;
4099 /* Lookup the insn at the current PC, extracting the operands into O and
4100 returning the info struct for the insn. Returns NULL for invalid insn. */
4102 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4105 uint64_t insn
, pc
= s
->pc
;
4107 const DisasInsn
*info
;
4109 insn
= ld_code2(env
, pc
);
4110 op
= (insn
>> 8) & 0xff;
4111 ilen
= get_ilen(op
);
4112 s
->next_pc
= s
->pc
+ ilen
;
4119 insn
= ld_code4(env
, pc
) << 32;
4122 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4128 /* We can't actually determine the insn format until we've looked up
4129 the full insn opcode. Which we can't do without locating the
4130 secondary opcode. Assume by default that OP2 is at bit 40; for
4131 those smaller insns that don't actually have a secondary opcode
4132 this will correctly result in OP2 = 0. */
4138 case 0xb2: /* S, RRF, RRE */
4139 case 0xb3: /* RRE, RRD, RRF */
4140 case 0xb9: /* RRE, RRF */
4141 case 0xe5: /* SSE, SIL */
4142 op2
= (insn
<< 8) >> 56;
4146 case 0xc0: /* RIL */
4147 case 0xc2: /* RIL */
4148 case 0xc4: /* RIL */
4149 case 0xc6: /* RIL */
4150 case 0xc8: /* SSF */
4151 case 0xcc: /* RIL */
4152 op2
= (insn
<< 12) >> 60;
4154 case 0xd0 ... 0xdf: /* SS */
4160 case 0xee ... 0xf3: /* SS */
4161 case 0xf8 ... 0xfd: /* SS */
4165 op2
= (insn
<< 40) >> 56;
4169 memset(f
, 0, sizeof(*f
));
4173 /* Lookup the instruction. */
4174 info
= lookup_opc(op
<< 8 | op2
);
4176 /* If we found it, extract the operands. */
4178 DisasFormat fmt
= info
->fmt
;
4181 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4182 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4188 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4190 const DisasInsn
*insn
;
4191 ExitStatus ret
= NO_EXIT
;
4195 insn
= extract_insn(env
, s
, &f
);
4197 /* If not found, try the old interpreter. This includes ILLOPC. */
4199 disas_s390_insn(env
, s
);
4200 switch (s
->is_jmp
) {
4208 ret
= EXIT_PC_UPDATED
;
4211 ret
= EXIT_NORETURN
;
4221 /* Set up the strutures we use to communicate with the helpers. */
4224 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4225 TCGV_UNUSED_I64(o
.out
);
4226 TCGV_UNUSED_I64(o
.out2
);
4227 TCGV_UNUSED_I64(o
.in1
);
4228 TCGV_UNUSED_I64(o
.in2
);
4229 TCGV_UNUSED_I64(o
.addr1
);
4231 /* Implement the instruction. */
4232 if (insn
->help_in1
) {
4233 insn
->help_in1(s
, &f
, &o
);
4235 if (insn
->help_in2
) {
4236 insn
->help_in2(s
, &f
, &o
);
4238 if (insn
->help_prep
) {
4239 insn
->help_prep(s
, &f
, &o
);
4241 if (insn
->help_op
) {
4242 ret
= insn
->help_op(s
, &o
);
4244 if (insn
->help_wout
) {
4245 insn
->help_wout(s
, &f
, &o
);
4247 if (insn
->help_cout
) {
4248 insn
->help_cout(s
, &o
);
4251 /* Free any temporaries created by the helpers. */
4252 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4253 tcg_temp_free_i64(o
.out
);
4255 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4256 tcg_temp_free_i64(o
.out2
);
4258 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4259 tcg_temp_free_i64(o
.in1
);
4261 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4262 tcg_temp_free_i64(o
.in2
);
4264 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4265 tcg_temp_free_i64(o
.addr1
);
4268 /* Advance to the next instruction. */
4273 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4274 TranslationBlock
*tb
,
4278 target_ulong pc_start
;
4279 uint64_t next_page_start
;
4280 uint16_t *gen_opc_end
;
4282 int num_insns
, max_insns
;
4290 if (!(tb
->flags
& FLAG_MASK_64
)) {
4291 pc_start
&= 0x7fffffff;
4296 dc
.cc_op
= CC_OP_DYNAMIC
;
4297 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4298 dc
.is_jmp
= DISAS_NEXT
;
4300 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4302 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4305 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4306 if (max_insns
== 0) {
4307 max_insns
= CF_COUNT_MASK
;
4314 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4318 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4321 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4322 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4323 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4324 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4326 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4330 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4331 tcg_gen_debug_insn_start(dc
.pc
);
4335 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4336 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4337 if (bp
->pc
== dc
.pc
) {
4338 status
= EXIT_PC_STALE
;
4344 if (status
== NO_EXIT
) {
4345 status
= translate_one(env
, &dc
);
4348 /* If we reach a page boundary, are single stepping,
4349 or exhaust instruction count, stop generation. */
4350 if (status
== NO_EXIT
4351 && (dc
.pc
>= next_page_start
4352 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4353 || num_insns
>= max_insns
4355 || env
->singlestep_enabled
)) {
4356 status
= EXIT_PC_STALE
;
4358 } while (status
== NO_EXIT
);
4360 if (tb
->cflags
& CF_LAST_IO
) {
4369 update_psw_addr(&dc
);
4371 case EXIT_PC_UPDATED
:
4372 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4373 gen_op_calc_cc(&dc
);
4375 /* Next TB starts off with CC_OP_DYNAMIC,
4376 so make sure the cc op type is in env */
4377 gen_op_set_cc_op(&dc
);
4380 gen_exception(EXCP_DEBUG
);
4382 /* Generate the return instruction */
4390 gen_icount_end(tb
, num_insns
);
4391 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4393 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4396 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4399 tb
->size
= dc
.pc
- pc_start
;
4400 tb
->icount
= num_insns
;
4403 #if defined(S390X_DEBUG_DISAS)
4404 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4405 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4406 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4412 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4414 gen_intermediate_code_internal(env
, tb
, 0);
4417 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4419 gen_intermediate_code_internal(env
, tb
, 1);
4422 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4425 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4426 cc_op
= gen_opc_cc_op
[pc_pos
];
4427 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {