4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
491 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
494 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
497 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
500 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
502 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
505 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
507 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
510 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp
= tcg_const_i32(v2
);
514 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
515 tcg_temp_free_i32(tmp
);
518 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
520 TCGv_i32 tmp
= tcg_const_i32(v2
);
521 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
522 tcg_temp_free_i32(tmp
);
525 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
527 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
530 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
532 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
535 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
537 TCGv_i64 tmp
= tcg_const_i64(v2
);
539 tcg_temp_free_i64(tmp
);
542 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
544 TCGv_i64 tmp
= tcg_const_i64(v2
);
546 tcg_temp_free_i64(tmp
);
549 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
551 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
554 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
556 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
559 /* CC value is in env->cc_op */
560 static inline void set_cc_static(DisasContext
*s
)
562 tcg_gen_discard_i64(cc_src
);
563 tcg_gen_discard_i64(cc_dst
);
564 tcg_gen_discard_i64(cc_vr
);
565 s
->cc_op
= CC_OP_STATIC
;
568 static inline void gen_op_set_cc_op(DisasContext
*s
)
570 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
571 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
575 static inline void gen_update_cc_op(DisasContext
*s
)
580 /* calculates cc into cc_op */
581 static void gen_op_calc_cc(DisasContext
*s
)
583 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
584 TCGv_i64 dummy
= tcg_const_i64(0);
591 /* s->cc_op is the cc value */
592 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
595 /* env->cc_op already is the cc value */
609 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
614 case CC_OP_LTUGTU_32
:
615 case CC_OP_LTUGTU_64
:
622 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
637 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
640 /* unknown operation - assume 3 arguments and cc_op in env */
641 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
647 tcg_temp_free_i32(local_cc_op
);
648 tcg_temp_free_i64(dummy
);
650 /* We now have cc in cc_op as constant */
654 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
658 *r1
= (insn
>> 4) & 0xf;
662 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
663 int *x2
, int *b2
, int *d2
)
667 *r1
= (insn
>> 20) & 0xf;
668 *x2
= (insn
>> 16) & 0xf;
669 *b2
= (insn
>> 12) & 0xf;
672 return get_address(s
, *x2
, *b2
, *d2
);
675 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
680 *r1
= (insn
>> 20) & 0xf;
682 *r3
= (insn
>> 16) & 0xf;
683 *b2
= (insn
>> 12) & 0xf;
687 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
692 *i2
= (insn
>> 16) & 0xff;
693 *b1
= (insn
>> 12) & 0xf;
696 return get_address(s
, 0, *b1
, *d1
);
699 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
701 /* NOTE: we handle the case where the TB spans two pages here */
702 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
703 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
704 && !s
->singlestep_enabled
705 && !(s
->tb
->cflags
& CF_LAST_IO
));
708 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
712 if (use_goto_tb(s
, pc
)) {
713 tcg_gen_goto_tb(tb_num
);
714 tcg_gen_movi_i64(psw_addr
, pc
);
715 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
717 /* jump to another page: currently not optimized */
718 tcg_gen_movi_i64(psw_addr
, pc
);
723 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
725 #ifdef DEBUG_INLINE_BRANCHES
726 inline_branch_miss
[cc_op
]++;
730 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
732 #ifdef DEBUG_INLINE_BRANCHES
733 inline_branch_hit
[cc_op
]++;
737 /* Table of mask values to comparison codes, given a comparison as input.
738 For a true comparison CC=3 will never be set, but we treat this
739 conservatively for possible use when CC=3 indicates overflow. */
740 static const TCGCond ltgt_cond
[16] = {
741 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
742 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
743 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
744 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
745 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
746 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
747 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
748 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
751 /* Table of mask values to comparison codes, given a logic op as input.
752 For such, only CC=0 and CC=1 should be possible. */
753 static const TCGCond nz_cond
[16] = {
755 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
757 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
759 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
760 /* EQ | NE | x | x */
761 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
764 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
765 details required to generate a TCG comparison. */
766 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
769 enum cc_op old_cc_op
= s
->cc_op
;
771 if (mask
== 15 || mask
== 0) {
772 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
775 c
->g1
= c
->g2
= true;
780 /* Find the TCG condition for the mask + cc op. */
786 cond
= ltgt_cond
[mask
];
787 if (cond
== TCG_COND_NEVER
) {
790 account_inline_branch(s
, old_cc_op
);
793 case CC_OP_LTUGTU_32
:
794 case CC_OP_LTUGTU_64
:
795 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
796 if (cond
== TCG_COND_NEVER
) {
799 account_inline_branch(s
, old_cc_op
);
803 cond
= nz_cond
[mask
];
804 if (cond
== TCG_COND_NEVER
) {
807 account_inline_branch(s
, old_cc_op
);
822 account_inline_branch(s
, old_cc_op
);
837 account_inline_branch(s
, old_cc_op
);
842 /* Calculate cc value. */
847 /* Jump based on CC. We'll load up the real cond below;
848 the assignment here merely avoids a compiler warning. */
849 account_noninline_branch(s
, old_cc_op
);
850 old_cc_op
= CC_OP_STATIC
;
851 cond
= TCG_COND_NEVER
;
855 /* Load up the arguments of the comparison. */
857 c
->g1
= c
->g2
= false;
861 c
->u
.s32
.a
= tcg_temp_new_i32();
862 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
863 c
->u
.s32
.b
= tcg_const_i32(0);
866 case CC_OP_LTUGTU_32
:
868 c
->u
.s32
.a
= tcg_temp_new_i32();
869 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
870 c
->u
.s32
.b
= tcg_temp_new_i32();
871 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
877 c
->u
.s64
.b
= tcg_const_i64(0);
881 case CC_OP_LTUGTU_64
:
884 c
->g1
= c
->g2
= true;
890 c
->u
.s64
.a
= tcg_temp_new_i64();
891 c
->u
.s64
.b
= tcg_const_i64(0);
892 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
900 case 0x8 | 0x4 | 0x2: /* cc != 3 */
902 c
->u
.s32
.b
= tcg_const_i32(3);
904 case 0x8 | 0x4 | 0x1: /* cc != 2 */
906 c
->u
.s32
.b
= tcg_const_i32(2);
908 case 0x8 | 0x2 | 0x1: /* cc != 1 */
910 c
->u
.s32
.b
= tcg_const_i32(1);
912 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
915 c
->u
.s32
.a
= tcg_temp_new_i32();
916 c
->u
.s32
.b
= tcg_const_i32(0);
917 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
919 case 0x8 | 0x4: /* cc < 2 */
921 c
->u
.s32
.b
= tcg_const_i32(2);
923 case 0x8: /* cc == 0 */
925 c
->u
.s32
.b
= tcg_const_i32(0);
927 case 0x4 | 0x2 | 0x1: /* cc != 0 */
929 c
->u
.s32
.b
= tcg_const_i32(0);
931 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
934 c
->u
.s32
.a
= tcg_temp_new_i32();
935 c
->u
.s32
.b
= tcg_const_i32(0);
936 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
938 case 0x4: /* cc == 1 */
940 c
->u
.s32
.b
= tcg_const_i32(1);
942 case 0x2 | 0x1: /* cc > 1 */
944 c
->u
.s32
.b
= tcg_const_i32(1);
946 case 0x2: /* cc == 2 */
948 c
->u
.s32
.b
= tcg_const_i32(2);
950 case 0x1: /* cc == 3 */
952 c
->u
.s32
.b
= tcg_const_i32(3);
955 /* CC is masked by something else: (8 >> cc) & mask. */
958 c
->u
.s32
.a
= tcg_const_i32(8);
959 c
->u
.s32
.b
= tcg_const_i32(0);
960 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
972 static void free_compare(DisasCompare
*c
)
976 tcg_temp_free_i64(c
->u
.s64
.a
);
978 tcg_temp_free_i32(c
->u
.s32
.a
);
983 tcg_temp_free_i64(c
->u
.s64
.b
);
985 tcg_temp_free_i32(c
->u
.s32
.b
);
990 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
993 TCGv_i64 tmp
, tmp2
, tmp3
;
994 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
996 #ifndef CONFIG_USER_ONLY
1000 r1
= (insn
>> 4) & 0xf;
1003 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1006 case 0x22: /* IPM R1 [RRE] */
1007 tmp32_1
= tcg_const_i32(r1
);
1009 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1010 tcg_temp_free_i32(tmp32_1
);
1012 case 0x41: /* CKSM R1,R2 [RRE] */
1013 tmp32_1
= tcg_const_i32(r1
);
1014 tmp32_2
= tcg_const_i32(r2
);
1015 potential_page_fault(s
);
1016 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1017 tcg_temp_free_i32(tmp32_1
);
1018 tcg_temp_free_i32(tmp32_2
);
1019 gen_op_movi_cc(s
, 0);
1021 case 0x4e: /* SAR R1,R2 [RRE] */
1022 tmp32_1
= load_reg32(r2
);
1023 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1024 tcg_temp_free_i32(tmp32_1
);
1026 case 0x4f: /* EAR R1,R2 [RRE] */
1027 tmp32_1
= tcg_temp_new_i32();
1028 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1029 store_reg32(r1
, tmp32_1
);
1030 tcg_temp_free_i32(tmp32_1
);
1032 case 0x54: /* MVPG R1,R2 [RRE] */
1034 tmp2
= load_reg(r1
);
1035 tmp3
= load_reg(r2
);
1036 potential_page_fault(s
);
1037 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1038 tcg_temp_free_i64(tmp
);
1039 tcg_temp_free_i64(tmp2
);
1040 tcg_temp_free_i64(tmp3
);
1041 /* XXX check CCO bit and set CC accordingly */
1042 gen_op_movi_cc(s
, 0);
1044 case 0x55: /* MVST R1,R2 [RRE] */
1045 tmp32_1
= load_reg32(0);
1046 tmp32_2
= tcg_const_i32(r1
);
1047 tmp32_3
= tcg_const_i32(r2
);
1048 potential_page_fault(s
);
1049 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1050 tcg_temp_free_i32(tmp32_1
);
1051 tcg_temp_free_i32(tmp32_2
);
1052 tcg_temp_free_i32(tmp32_3
);
1053 gen_op_movi_cc(s
, 1);
1055 case 0x5d: /* CLST R1,R2 [RRE] */
1056 tmp32_1
= load_reg32(0);
1057 tmp32_2
= tcg_const_i32(r1
);
1058 tmp32_3
= tcg_const_i32(r2
);
1059 potential_page_fault(s
);
1060 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1062 tcg_temp_free_i32(tmp32_1
);
1063 tcg_temp_free_i32(tmp32_2
);
1064 tcg_temp_free_i32(tmp32_3
);
1066 case 0x5e: /* SRST R1,R2 [RRE] */
1067 tmp32_1
= load_reg32(0);
1068 tmp32_2
= tcg_const_i32(r1
);
1069 tmp32_3
= tcg_const_i32(r2
);
1070 potential_page_fault(s
);
1071 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1073 tcg_temp_free_i32(tmp32_1
);
1074 tcg_temp_free_i32(tmp32_2
);
1075 tcg_temp_free_i32(tmp32_3
);
1078 #ifndef CONFIG_USER_ONLY
1079 case 0x02: /* STIDP D2(B2) [S] */
1081 check_privileged(s
);
1082 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1083 tmp
= get_address(s
, 0, b2
, d2
);
1084 potential_page_fault(s
);
1085 gen_helper_stidp(cpu_env
, tmp
);
1086 tcg_temp_free_i64(tmp
);
1088 case 0x04: /* SCK D2(B2) [S] */
1090 check_privileged(s
);
1091 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1092 tmp
= get_address(s
, 0, b2
, d2
);
1093 potential_page_fault(s
);
1094 gen_helper_sck(cc_op
, tmp
);
1096 tcg_temp_free_i64(tmp
);
1098 case 0x05: /* STCK D2(B2) [S] */
1100 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1101 tmp
= get_address(s
, 0, b2
, d2
);
1102 potential_page_fault(s
);
1103 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1105 tcg_temp_free_i64(tmp
);
1107 case 0x06: /* SCKC D2(B2) [S] */
1108 /* Set Clock Comparator */
1109 check_privileged(s
);
1110 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1111 tmp
= get_address(s
, 0, b2
, d2
);
1112 potential_page_fault(s
);
1113 gen_helper_sckc(cpu_env
, tmp
);
1114 tcg_temp_free_i64(tmp
);
1116 case 0x07: /* STCKC D2(B2) [S] */
1117 /* Store Clock Comparator */
1118 check_privileged(s
);
1119 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1120 tmp
= get_address(s
, 0, b2
, d2
);
1121 potential_page_fault(s
);
1122 gen_helper_stckc(cpu_env
, tmp
);
1123 tcg_temp_free_i64(tmp
);
1125 case 0x08: /* SPT D2(B2) [S] */
1127 check_privileged(s
);
1128 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1129 tmp
= get_address(s
, 0, b2
, d2
);
1130 potential_page_fault(s
);
1131 gen_helper_spt(cpu_env
, tmp
);
1132 tcg_temp_free_i64(tmp
);
1134 case 0x09: /* STPT D2(B2) [S] */
1135 /* Store CPU Timer */
1136 check_privileged(s
);
1137 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1138 tmp
= get_address(s
, 0, b2
, d2
);
1139 potential_page_fault(s
);
1140 gen_helper_stpt(cpu_env
, tmp
);
1141 tcg_temp_free_i64(tmp
);
1143 case 0x0a: /* SPKA D2(B2) [S] */
1144 /* Set PSW Key from Address */
1145 check_privileged(s
);
1146 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1147 tmp
= get_address(s
, 0, b2
, d2
);
1148 tmp2
= tcg_temp_new_i64();
1149 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1150 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1151 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1152 tcg_temp_free_i64(tmp2
);
1153 tcg_temp_free_i64(tmp
);
1155 case 0x0d: /* PTLB [S] */
1157 check_privileged(s
);
1158 gen_helper_ptlb(cpu_env
);
1160 case 0x10: /* SPX D2(B2) [S] */
1161 /* Set Prefix Register */
1162 check_privileged(s
);
1163 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1164 tmp
= get_address(s
, 0, b2
, d2
);
1165 potential_page_fault(s
);
1166 gen_helper_spx(cpu_env
, tmp
);
1167 tcg_temp_free_i64(tmp
);
1169 case 0x11: /* STPX D2(B2) [S] */
1171 check_privileged(s
);
1172 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1173 tmp
= get_address(s
, 0, b2
, d2
);
1174 tmp2
= tcg_temp_new_i64();
1175 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1176 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1177 tcg_temp_free_i64(tmp
);
1178 tcg_temp_free_i64(tmp2
);
1180 case 0x12: /* STAP D2(B2) [S] */
1181 /* Store CPU Address */
1182 check_privileged(s
);
1183 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1184 tmp
= get_address(s
, 0, b2
, d2
);
1185 tmp2
= tcg_temp_new_i64();
1186 tmp32_1
= tcg_temp_new_i32();
1187 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1188 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1189 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1190 tcg_temp_free_i64(tmp
);
1191 tcg_temp_free_i64(tmp2
);
1192 tcg_temp_free_i32(tmp32_1
);
1194 case 0x21: /* IPTE R1,R2 [RRE] */
1195 /* Invalidate PTE */
1196 check_privileged(s
);
1197 r1
= (insn
>> 4) & 0xf;
1200 tmp2
= load_reg(r2
);
1201 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1202 tcg_temp_free_i64(tmp
);
1203 tcg_temp_free_i64(tmp2
);
1205 case 0x29: /* ISKE R1,R2 [RRE] */
1206 /* Insert Storage Key Extended */
1207 check_privileged(s
);
1208 r1
= (insn
>> 4) & 0xf;
1211 tmp2
= tcg_temp_new_i64();
1212 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1213 store_reg(r1
, tmp2
);
1214 tcg_temp_free_i64(tmp
);
1215 tcg_temp_free_i64(tmp2
);
1217 case 0x2a: /* RRBE R1,R2 [RRE] */
1218 /* Set Storage Key Extended */
1219 check_privileged(s
);
1220 r1
= (insn
>> 4) & 0xf;
1222 tmp32_1
= load_reg32(r1
);
1224 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1226 tcg_temp_free_i32(tmp32_1
);
1227 tcg_temp_free_i64(tmp
);
1229 case 0x2b: /* SSKE R1,R2 [RRE] */
1230 /* Set Storage Key Extended */
1231 check_privileged(s
);
1232 r1
= (insn
>> 4) & 0xf;
1234 tmp32_1
= load_reg32(r1
);
1236 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1237 tcg_temp_free_i32(tmp32_1
);
1238 tcg_temp_free_i64(tmp
);
1240 case 0x34: /* STCH ? */
1241 /* Store Subchannel */
1242 check_privileged(s
);
1243 gen_op_movi_cc(s
, 3);
1245 case 0x46: /* STURA R1,R2 [RRE] */
1246 /* Store Using Real Address */
1247 check_privileged(s
);
1248 r1
= (insn
>> 4) & 0xf;
1250 tmp32_1
= load_reg32(r1
);
1252 potential_page_fault(s
);
1253 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1254 tcg_temp_free_i32(tmp32_1
);
1255 tcg_temp_free_i64(tmp
);
1257 case 0x50: /* CSP R1,R2 [RRE] */
1258 /* Compare And Swap And Purge */
1259 check_privileged(s
);
1260 r1
= (insn
>> 4) & 0xf;
1262 tmp32_1
= tcg_const_i32(r1
);
1263 tmp32_2
= tcg_const_i32(r2
);
1264 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1266 tcg_temp_free_i32(tmp32_1
);
1267 tcg_temp_free_i32(tmp32_2
);
1269 case 0x5f: /* CHSC ? */
1270 /* Channel Subsystem Call */
1271 check_privileged(s
);
1272 gen_op_movi_cc(s
, 3);
1274 case 0x78: /* STCKE D2(B2) [S] */
1275 /* Store Clock Extended */
1276 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1277 tmp
= get_address(s
, 0, b2
, d2
);
1278 potential_page_fault(s
);
1279 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1281 tcg_temp_free_i64(tmp
);
1283 case 0x79: /* SACF D2(B2) [S] */
1284 /* Set Address Space Control Fast */
1285 check_privileged(s
);
1286 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1287 tmp
= get_address(s
, 0, b2
, d2
);
1288 potential_page_fault(s
);
1289 gen_helper_sacf(cpu_env
, tmp
);
1290 tcg_temp_free_i64(tmp
);
1291 /* addressing mode has changed, so end the block */
1294 s
->is_jmp
= DISAS_JUMP
;
1296 case 0x7d: /* STSI D2,(B2) [S] */
1297 check_privileged(s
);
1298 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1299 tmp
= get_address(s
, 0, b2
, d2
);
1300 tmp32_1
= load_reg32(0);
1301 tmp32_2
= load_reg32(1);
1302 potential_page_fault(s
);
1303 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1305 tcg_temp_free_i64(tmp
);
1306 tcg_temp_free_i32(tmp32_1
);
1307 tcg_temp_free_i32(tmp32_2
);
1309 case 0x9d: /* LFPC D2(B2) [S] */
1310 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1311 tmp
= get_address(s
, 0, b2
, d2
);
1312 tmp2
= tcg_temp_new_i64();
1313 tmp32_1
= tcg_temp_new_i32();
1314 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1315 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1316 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1317 tcg_temp_free_i64(tmp
);
1318 tcg_temp_free_i64(tmp2
);
1319 tcg_temp_free_i32(tmp32_1
);
1321 case 0xb1: /* STFL D2(B2) [S] */
1322 /* Store Facility List (CPU features) at 200 */
1323 check_privileged(s
);
1324 tmp2
= tcg_const_i64(0xc0000000);
1325 tmp
= tcg_const_i64(200);
1326 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1327 tcg_temp_free_i64(tmp2
);
1328 tcg_temp_free_i64(tmp
);
1330 case 0xb2: /* LPSWE D2(B2) [S] */
1331 /* Load PSW Extended */
1332 check_privileged(s
);
1333 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1334 tmp
= get_address(s
, 0, b2
, d2
);
1335 tmp2
= tcg_temp_new_i64();
1336 tmp3
= tcg_temp_new_i64();
1337 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1338 tcg_gen_addi_i64(tmp
, tmp
, 8);
1339 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1340 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1341 /* we need to keep cc_op intact */
1342 s
->is_jmp
= DISAS_JUMP
;
1343 tcg_temp_free_i64(tmp
);
1344 tcg_temp_free_i64(tmp2
);
1345 tcg_temp_free_i64(tmp3
);
1347 case 0x20: /* SERVC R1,R2 [RRE] */
1348 /* SCLP Service call (PV hypercall) */
1349 check_privileged(s
);
1350 potential_page_fault(s
);
1351 tmp32_1
= load_reg32(r2
);
1353 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1355 tcg_temp_free_i32(tmp32_1
);
1356 tcg_temp_free_i64(tmp
);
1360 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1361 gen_illegal_opcode(s
);
1366 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1370 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1371 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1372 #define FP_HELPER(i) \
1373 tmp32_1 = tcg_const_i32(r1); \
1374 tmp32_2 = tcg_const_i32(r2); \
1375 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1376 tcg_temp_free_i32(tmp32_1); \
1377 tcg_temp_free_i32(tmp32_2);
1379 #define FP_HELPER_CC(i) \
1380 tmp32_1 = tcg_const_i32(r1); \
1381 tmp32_2 = tcg_const_i32(r2); \
1382 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1384 tcg_temp_free_i32(tmp32_1); \
1385 tcg_temp_free_i32(tmp32_2);
1388 case 0x0: /* LPEBR R1,R2 [RRE] */
1389 FP_HELPER_CC(lpebr
);
1391 case 0x3: /* LCEBR R1,R2 [RRE] */
1392 FP_HELPER_CC(lcebr
);
1394 case 0x10: /* LPDBR R1,R2 [RRE] */
1395 FP_HELPER_CC(lpdbr
);
1397 case 0x13: /* LCDBR R1,R2 [RRE] */
1398 FP_HELPER_CC(lcdbr
);
1400 case 0x15: /* SQBDR R1,R2 [RRE] */
1403 case 0x40: /* LPXBR R1,R2 [RRE] */
1404 FP_HELPER_CC(lpxbr
);
1406 case 0x43: /* LCXBR R1,R2 [RRE] */
1407 FP_HELPER_CC(lcxbr
);
1409 case 0x65: /* LXR R1,R2 [RRE] */
1410 tmp
= load_freg(r2
);
1411 store_freg(r1
, tmp
);
1412 tcg_temp_free_i64(tmp
);
1413 tmp
= load_freg(r2
+ 2);
1414 store_freg(r1
+ 2, tmp
);
1415 tcg_temp_free_i64(tmp
);
1417 case 0x74: /* LZER R1 [RRE] */
1418 tmp32_1
= tcg_const_i32(r1
);
1419 gen_helper_lzer(cpu_env
, tmp32_1
);
1420 tcg_temp_free_i32(tmp32_1
);
1422 case 0x75: /* LZDR R1 [RRE] */
1423 tmp32_1
= tcg_const_i32(r1
);
1424 gen_helper_lzdr(cpu_env
, tmp32_1
);
1425 tcg_temp_free_i32(tmp32_1
);
1427 case 0x76: /* LZXR R1 [RRE] */
1428 tmp32_1
= tcg_const_i32(r1
);
1429 gen_helper_lzxr(cpu_env
, tmp32_1
);
1430 tcg_temp_free_i32(tmp32_1
);
1432 case 0x84: /* SFPC R1 [RRE] */
1433 tmp32_1
= load_reg32(r1
);
1434 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1435 tcg_temp_free_i32(tmp32_1
);
1437 case 0x94: /* CEFBR R1,R2 [RRE] */
1438 case 0x95: /* CDFBR R1,R2 [RRE] */
1439 case 0x96: /* CXFBR R1,R2 [RRE] */
1440 tmp32_1
= tcg_const_i32(r1
);
1441 tmp32_2
= load_reg32(r2
);
1444 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1447 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1450 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1455 tcg_temp_free_i32(tmp32_1
);
1456 tcg_temp_free_i32(tmp32_2
);
1458 case 0x98: /* CFEBR R1,R2 [RRE] */
1459 case 0x99: /* CFDBR R1,R2 [RRE] */
1460 case 0x9a: /* CFXBR R1,R2 [RRE] */
1461 tmp32_1
= tcg_const_i32(r1
);
1462 tmp32_2
= tcg_const_i32(r2
);
1463 tmp32_3
= tcg_const_i32(m3
);
1466 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1469 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1472 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1478 tcg_temp_free_i32(tmp32_1
);
1479 tcg_temp_free_i32(tmp32_2
);
1480 tcg_temp_free_i32(tmp32_3
);
1482 case 0xa4: /* CEGBR R1,R2 [RRE] */
1483 case 0xa5: /* CDGBR R1,R2 [RRE] */
1484 tmp32_1
= tcg_const_i32(r1
);
1488 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1491 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1496 tcg_temp_free_i32(tmp32_1
);
1497 tcg_temp_free_i64(tmp
);
1499 case 0xa6: /* CXGBR R1,R2 [RRE] */
1500 tmp32_1
= tcg_const_i32(r1
);
1502 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1503 tcg_temp_free_i32(tmp32_1
);
1504 tcg_temp_free_i64(tmp
);
1506 case 0xa8: /* CGEBR R1,R2 [RRE] */
1507 tmp32_1
= tcg_const_i32(r1
);
1508 tmp32_2
= tcg_const_i32(r2
);
1509 tmp32_3
= tcg_const_i32(m3
);
1510 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1512 tcg_temp_free_i32(tmp32_1
);
1513 tcg_temp_free_i32(tmp32_2
);
1514 tcg_temp_free_i32(tmp32_3
);
1516 case 0xa9: /* CGDBR R1,R2 [RRE] */
1517 tmp32_1
= tcg_const_i32(r1
);
1518 tmp32_2
= tcg_const_i32(r2
);
1519 tmp32_3
= tcg_const_i32(m3
);
1520 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1522 tcg_temp_free_i32(tmp32_1
);
1523 tcg_temp_free_i32(tmp32_2
);
1524 tcg_temp_free_i32(tmp32_3
);
1526 case 0xaa: /* CGXBR R1,R2 [RRE] */
1527 tmp32_1
= tcg_const_i32(r1
);
1528 tmp32_2
= tcg_const_i32(r2
);
1529 tmp32_3
= tcg_const_i32(m3
);
1530 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1532 tcg_temp_free_i32(tmp32_1
);
1533 tcg_temp_free_i32(tmp32_2
);
1534 tcg_temp_free_i32(tmp32_3
);
1537 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1538 gen_illegal_opcode(s
);
1546 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1552 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1554 case 0x83: /* FLOGR R1,R2 [RRE] */
1556 tmp32_1
= tcg_const_i32(r1
);
1557 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1559 tcg_temp_free_i64(tmp
);
1560 tcg_temp_free_i32(tmp32_1
);
1563 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1564 gen_illegal_opcode(s
);
1569 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1575 opc
= cpu_ldub_code(env
, s
->pc
);
1576 LOG_DISAS("opc 0x%x\n", opc
);
1580 insn
= ld_code4(env
, s
->pc
);
1581 op
= (insn
>> 16) & 0xff;
1582 disas_b2(env
, s
, op
, insn
);
1585 insn
= ld_code4(env
, s
->pc
);
1586 op
= (insn
>> 16) & 0xff;
1587 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1588 r1
= (insn
>> 4) & 0xf;
1590 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1593 insn
= ld_code4(env
, s
->pc
);
1594 r1
= (insn
>> 4) & 0xf;
1596 op
= (insn
>> 16) & 0xff;
1597 disas_b9(env
, s
, op
, r1
, r2
);
1600 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1601 gen_illegal_opcode(s
);
1606 /* ====================================================================== */
1607 /* Define the insn format enumeration. */
1608 #define F0(N) FMT_##N,
1609 #define F1(N, X1) F0(N)
1610 #define F2(N, X1, X2) F0(N)
1611 #define F3(N, X1, X2, X3) F0(N)
1612 #define F4(N, X1, X2, X3, X4) F0(N)
1613 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1616 #include "insn-format.def"
1626 /* Define a structure to hold the decoded fields. We'll store each inside
1627 an array indexed by an enum. In order to conserve memory, we'll arrange
1628 for fields that do not exist at the same time to overlap, thus the "C"
1629 for compact. For checking purposes there is an "O" for original index
1630 as well that will be applied to availability bitmaps. */
1632 enum DisasFieldIndexO
{
1655 enum DisasFieldIndexC
{
1686 struct DisasFields
{
1689 unsigned presentC
:16;
1690 unsigned int presentO
;
1694 /* This is the way fields are to be accessed out of DisasFields. */
1695 #define have_field(S, F) have_field1((S), FLD_O_##F)
1696 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1698 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1700 return (f
->presentO
>> c
) & 1;
1703 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1704 enum DisasFieldIndexC c
)
1706 assert(have_field1(f
, o
));
1710 /* Describe the layout of each field in each format. */
1711 typedef struct DisasField
{
1713 unsigned int size
:8;
1714 unsigned int type
:2;
1715 unsigned int indexC
:6;
1716 enum DisasFieldIndexO indexO
:8;
1719 typedef struct DisasFormatInfo
{
1720 DisasField op
[NUM_C_FIELD
];
1723 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1724 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1725 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1726 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1727 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1728 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1729 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1730 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1731 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1732 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1733 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1734 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1735 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1736 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1738 #define F0(N) { { } },
1739 #define F1(N, X1) { { X1 } },
1740 #define F2(N, X1, X2) { { X1, X2 } },
1741 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1742 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1743 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1745 static const DisasFormatInfo format_info
[] = {
1746 #include "insn-format.def"
1764 /* Generally, we'll extract operands into this structures, operate upon
1765 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1766 of routines below for more details. */
1768 bool g_out
, g_out2
, g_in1
, g_in2
;
1769 TCGv_i64 out
, out2
, in1
, in2
;
1773 /* Return values from translate_one, indicating the state of the TB. */
1775 /* Continue the TB. */
1777 /* We have emitted one or more goto_tb. No fixup required. */
1779 /* We are not using a goto_tb (for whatever reason), but have updated
1780 the PC (for whatever reason), so there's no need to do it again on
1783 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1784 updated the PC for the next instruction to be executed. */
1786 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1787 No following code will be executed. */
1791 typedef enum DisasFacility
{
1792 FAC_Z
, /* zarch (default) */
1793 FAC_CASS
, /* compare and swap and store */
1794 FAC_CASS2
, /* compare and swap and store 2*/
1795 FAC_DFP
, /* decimal floating point */
1796 FAC_DFPR
, /* decimal floating point rounding */
1797 FAC_DO
, /* distinct operands */
1798 FAC_EE
, /* execute extensions */
1799 FAC_EI
, /* extended immediate */
1800 FAC_FPE
, /* floating point extension */
1801 FAC_FPSSH
, /* floating point support sign handling */
1802 FAC_FPRGR
, /* FPR-GR transfer */
1803 FAC_GIE
, /* general instructions extension */
1804 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1805 FAC_HW
, /* high-word */
1806 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1807 FAC_LOC
, /* load/store on condition */
1808 FAC_LD
, /* long displacement */
1809 FAC_PC
, /* population count */
1810 FAC_SCF
, /* store clock fast */
1811 FAC_SFLE
, /* store facility list extended */
1817 DisasFacility fac
:6;
1821 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1822 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1823 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1824 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1825 void (*help_cout
)(DisasContext
*, DisasOps
*);
1826 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1831 /* ====================================================================== */
1832 /* Miscelaneous helpers, used by several operations. */
1834 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1835 DisasOps
*o
, int mask
)
1837 int b2
= get_field(f
, b2
);
1838 int d2
= get_field(f
, d2
);
1841 o
->in2
= tcg_const_i64(d2
& mask
);
1843 o
->in2
= get_address(s
, 0, b2
, d2
);
1844 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1848 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1850 if (dest
== s
->next_pc
) {
1853 if (use_goto_tb(s
, dest
)) {
1854 gen_update_cc_op(s
);
1856 tcg_gen_movi_i64(psw_addr
, dest
);
1857 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1858 return EXIT_GOTO_TB
;
1860 tcg_gen_movi_i64(psw_addr
, dest
);
1861 return EXIT_PC_UPDATED
;
1865 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1866 bool is_imm
, int imm
, TCGv_i64 cdest
)
1869 uint64_t dest
= s
->pc
+ 2 * imm
;
1872 /* Take care of the special cases first. */
1873 if (c
->cond
== TCG_COND_NEVER
) {
1878 if (dest
== s
->next_pc
) {
1879 /* Branch to next. */
1883 if (c
->cond
== TCG_COND_ALWAYS
) {
1884 ret
= help_goto_direct(s
, dest
);
1888 if (TCGV_IS_UNUSED_I64(cdest
)) {
1889 /* E.g. bcr %r0 -> no branch. */
1893 if (c
->cond
== TCG_COND_ALWAYS
) {
1894 tcg_gen_mov_i64(psw_addr
, cdest
);
1895 ret
= EXIT_PC_UPDATED
;
1900 if (use_goto_tb(s
, s
->next_pc
)) {
1901 if (is_imm
&& use_goto_tb(s
, dest
)) {
1902 /* Both exits can use goto_tb. */
1903 gen_update_cc_op(s
);
1905 lab
= gen_new_label();
1907 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1909 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1912 /* Branch not taken. */
1914 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1915 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1920 tcg_gen_movi_i64(psw_addr
, dest
);
1921 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1925 /* Fallthru can use goto_tb, but taken branch cannot. */
1926 /* Store taken branch destination before the brcond. This
1927 avoids having to allocate a new local temp to hold it.
1928 We'll overwrite this in the not taken case anyway. */
1930 tcg_gen_mov_i64(psw_addr
, cdest
);
1933 lab
= gen_new_label();
1935 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1937 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1940 /* Branch not taken. */
1941 gen_update_cc_op(s
);
1943 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1944 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1948 tcg_gen_movi_i64(psw_addr
, dest
);
1950 ret
= EXIT_PC_UPDATED
;
1953 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1954 Most commonly we're single-stepping or some other condition that
1955 disables all use of goto_tb. Just update the PC and exit. */
1957 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1959 cdest
= tcg_const_i64(dest
);
1963 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1966 TCGv_i32 t0
= tcg_temp_new_i32();
1967 TCGv_i64 t1
= tcg_temp_new_i64();
1968 TCGv_i64 z
= tcg_const_i64(0);
1969 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1970 tcg_gen_extu_i32_i64(t1
, t0
);
1971 tcg_temp_free_i32(t0
);
1972 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1973 tcg_temp_free_i64(t1
);
1974 tcg_temp_free_i64(z
);
1978 tcg_temp_free_i64(cdest
);
1980 tcg_temp_free_i64(next
);
1982 ret
= EXIT_PC_UPDATED
;
1990 /* ====================================================================== */
1991 /* The operations. These perform the bulk of the work for any insn,
1992 usually after the operands have been loaded and output initialized. */
1994 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1996 gen_helper_abs_i64(o
->out
, o
->in2
);
2000 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
2002 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2006 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2010 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2012 /* XXX possible optimization point */
2014 cc
= tcg_temp_new_i64();
2015 tcg_gen_extu_i32_i64(cc
, cc_op
);
2016 tcg_gen_shri_i64(cc
, cc
, 1);
2018 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2019 tcg_temp_free_i64(cc
);
2023 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
2025 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2029 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
2031 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2035 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
2037 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2038 return_low128(o
->out2
);
2042 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2044 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2048 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2050 int shift
= s
->insn
->data
& 0xff;
2051 int size
= s
->insn
->data
>> 8;
2052 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2055 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2056 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2057 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2059 /* Produce the CC from only the bits manipulated. */
2060 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2061 set_cc_nz_u64(s
, cc_dst
);
2065 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2067 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2068 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2069 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2070 return EXIT_PC_UPDATED
;
2076 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2078 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2079 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2082 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2084 int m1
= get_field(s
->fields
, m1
);
2085 bool is_imm
= have_field(s
->fields
, i2
);
2086 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2089 disas_jcc(s
, &c
, m1
);
2090 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2093 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2095 int r1
= get_field(s
->fields
, r1
);
2096 bool is_imm
= have_field(s
->fields
, i2
);
2097 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2101 c
.cond
= TCG_COND_NE
;
2106 t
= tcg_temp_new_i64();
2107 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2108 store_reg32_i64(r1
, t
);
2109 c
.u
.s32
.a
= tcg_temp_new_i32();
2110 c
.u
.s32
.b
= tcg_const_i32(0);
2111 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2112 tcg_temp_free_i64(t
);
2114 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2117 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2119 int r1
= get_field(s
->fields
, r1
);
2120 bool is_imm
= have_field(s
->fields
, i2
);
2121 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2124 c
.cond
= TCG_COND_NE
;
2129 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2130 c
.u
.s64
.a
= regs
[r1
];
2131 c
.u
.s64
.b
= tcg_const_i64(0);
2133 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2136 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
2138 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2143 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
2145 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2150 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
2152 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2157 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2159 int l
= get_field(s
->fields
, l1
);
2164 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2165 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2168 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2169 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2172 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2173 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2176 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2177 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2180 potential_page_fault(s
);
2181 vl
= tcg_const_i32(l
);
2182 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2183 tcg_temp_free_i32(vl
);
2187 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2191 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2193 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2194 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2195 potential_page_fault(s
);
2196 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2197 tcg_temp_free_i32(r1
);
2198 tcg_temp_free_i32(r3
);
2203 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2205 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2206 TCGv_i32 t1
= tcg_temp_new_i32();
2207 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2208 potential_page_fault(s
);
2209 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2211 tcg_temp_free_i32(t1
);
2212 tcg_temp_free_i32(m3
);
2216 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2218 int r3
= get_field(s
->fields
, r3
);
2219 potential_page_fault(s
);
2220 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2225 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2227 int r3
= get_field(s
->fields
, r3
);
2228 potential_page_fault(s
);
2229 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2234 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2236 int r3
= get_field(s
->fields
, r3
);
2237 TCGv_i64 in3
= tcg_temp_new_i64();
2238 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2239 potential_page_fault(s
);
2240 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2241 tcg_temp_free_i64(in3
);
2246 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2248 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2249 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2250 potential_page_fault(s
);
2251 /* XXX rewrite in tcg */
2252 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2257 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2259 TCGv_i64 t1
= tcg_temp_new_i64();
2260 TCGv_i32 t2
= tcg_temp_new_i32();
2261 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2262 gen_helper_cvd(t1
, t2
);
2263 tcg_temp_free_i32(t2
);
2264 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2265 tcg_temp_free_i64(t1
);
2269 #ifndef CONFIG_USER_ONLY
2270 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2274 check_privileged(s
);
2275 potential_page_fault(s
);
2277 /* We pretend the format is RX_a so that D2 is the field we want. */
2278 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2279 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2280 tcg_temp_free_i32(tmp
);
2285 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2287 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2288 return_low128(o
->out
);
2292 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2294 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2295 return_low128(o
->out
);
2299 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2301 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2302 return_low128(o
->out
);
2306 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2308 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2309 return_low128(o
->out
);
2313 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2315 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2319 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2321 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2325 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2327 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2328 return_low128(o
->out2
);
2332 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2334 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2338 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2340 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2341 tb->flags, (ab)use the tb->cs_base field as the address of
2342 the template in memory, and grab 8 bits of tb->flags/cflags for
2343 the contents of the register. We would then recognize all this
2344 in gen_intermediate_code_internal, generating code for exactly
2345 one instruction. This new TB then gets executed normally.
2347 On the other hand, this seems to be mostly used for modifying
2348 MVC inside of memcpy, which needs a helper call anyway. So
2349 perhaps this doesn't bear thinking about any further. */
2356 tmp
= tcg_const_i64(s
->next_pc
);
2357 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2358 tcg_temp_free_i64(tmp
);
2364 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2366 int m3
= get_field(s
->fields
, m3
);
2367 int pos
, len
, base
= s
->insn
->data
;
2368 TCGv_i64 tmp
= tcg_temp_new_i64();
2373 /* Effectively a 32-bit load. */
2374 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2381 /* Effectively a 16-bit load. */
2382 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2390 /* Effectively an 8-bit load. */
2391 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2396 pos
= base
+ ctz32(m3
) * 8;
2397 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2398 ccm
= ((1ull << len
) - 1) << pos
;
2402 /* This is going to be a sequence of loads and inserts. */
2403 pos
= base
+ 32 - 8;
2407 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2408 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2409 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2412 m3
= (m3
<< 1) & 0xf;
2418 tcg_gen_movi_i64(tmp
, ccm
);
2419 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2420 tcg_temp_free_i64(tmp
);
2424 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2426 int shift
= s
->insn
->data
& 0xff;
2427 int size
= s
->insn
->data
>> 8;
2428 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2432 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2434 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2438 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2440 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2444 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2446 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2450 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2452 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2456 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2458 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2459 return_low128(o
->out2
);
2463 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2465 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2466 return_low128(o
->out2
);
2470 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2472 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2476 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2478 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2482 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2484 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2488 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2490 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2494 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2496 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2500 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2502 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2506 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2508 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2512 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2514 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2518 #ifndef CONFIG_USER_ONLY
2519 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2521 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2522 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2523 check_privileged(s
);
2524 potential_page_fault(s
);
2525 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2526 tcg_temp_free_i32(r1
);
2527 tcg_temp_free_i32(r3
);
2531 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2533 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2534 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2535 check_privileged(s
);
2536 potential_page_fault(s
);
2537 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2538 tcg_temp_free_i32(r1
);
2539 tcg_temp_free_i32(r3
);
2542 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2544 check_privileged(s
);
2545 potential_page_fault(s
);
2546 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2551 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2555 check_privileged(s
);
2557 t1
= tcg_temp_new_i64();
2558 t2
= tcg_temp_new_i64();
2559 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2560 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2561 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2562 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2563 tcg_gen_shli_i64(t1
, t1
, 32);
2564 gen_helper_load_psw(cpu_env
, t1
, t2
);
2565 tcg_temp_free_i64(t1
);
2566 tcg_temp_free_i64(t2
);
2567 return EXIT_NORETURN
;
2571 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2573 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2574 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2575 potential_page_fault(s
);
2576 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2577 tcg_temp_free_i32(r1
);
2578 tcg_temp_free_i32(r3
);
2582 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2584 int r1
= get_field(s
->fields
, r1
);
2585 int r3
= get_field(s
->fields
, r3
);
2586 TCGv_i64 t
= tcg_temp_new_i64();
2587 TCGv_i64 t4
= tcg_const_i64(4);
2590 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2591 store_reg32_i64(r1
, t
);
2595 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2599 tcg_temp_free_i64(t
);
2600 tcg_temp_free_i64(t4
);
2604 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2606 int r1
= get_field(s
->fields
, r1
);
2607 int r3
= get_field(s
->fields
, r3
);
2608 TCGv_i64 t
= tcg_temp_new_i64();
2609 TCGv_i64 t4
= tcg_const_i64(4);
2612 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2613 store_reg32h_i64(r1
, t
);
2617 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2621 tcg_temp_free_i64(t
);
2622 tcg_temp_free_i64(t4
);
2626 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2628 int r1
= get_field(s
->fields
, r1
);
2629 int r3
= get_field(s
->fields
, r3
);
2630 TCGv_i64 t8
= tcg_const_i64(8);
2633 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2637 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2641 tcg_temp_free_i64(t8
);
2645 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2648 o
->g_out
= o
->g_in2
;
2649 TCGV_UNUSED_I64(o
->in2
);
2654 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2658 o
->g_out
= o
->g_in1
;
2659 o
->g_out2
= o
->g_in2
;
2660 TCGV_UNUSED_I64(o
->in1
);
2661 TCGV_UNUSED_I64(o
->in2
);
2662 o
->g_in1
= o
->g_in2
= false;
2666 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2668 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2669 potential_page_fault(s
);
2670 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2671 tcg_temp_free_i32(l
);
2675 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2677 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2678 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2679 potential_page_fault(s
);
2680 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2681 tcg_temp_free_i32(r1
);
2682 tcg_temp_free_i32(r2
);
2687 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2689 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2690 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2691 potential_page_fault(s
);
2692 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2693 tcg_temp_free_i32(r1
);
2694 tcg_temp_free_i32(r3
);
2699 #ifndef CONFIG_USER_ONLY
2700 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2702 int r1
= get_field(s
->fields
, l1
);
2703 check_privileged(s
);
2704 potential_page_fault(s
);
2705 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2710 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2712 int r1
= get_field(s
->fields
, l1
);
2713 check_privileged(s
);
2714 potential_page_fault(s
);
2715 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2721 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2723 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2727 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2729 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2730 return_low128(o
->out2
);
2734 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2736 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2740 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2742 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2746 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2748 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2752 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2754 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2755 return_low128(o
->out2
);
2759 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2761 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2762 return_low128(o
->out2
);
2766 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2768 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2769 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2770 tcg_temp_free_i64(r3
);
2774 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2776 int r3
= get_field(s
->fields
, r3
);
2777 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2781 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2783 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2784 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2785 tcg_temp_free_i64(r3
);
2789 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2791 int r3
= get_field(s
->fields
, r3
);
2792 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2796 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2798 gen_helper_nabs_i64(o
->out
, o
->in2
);
2802 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2804 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2805 potential_page_fault(s
);
2806 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2807 tcg_temp_free_i32(l
);
2812 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2814 tcg_gen_neg_i64(o
->out
, o
->in2
);
2818 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2820 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2821 potential_page_fault(s
);
2822 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2823 tcg_temp_free_i32(l
);
2828 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2830 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2834 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2836 int shift
= s
->insn
->data
& 0xff;
2837 int size
= s
->insn
->data
>> 8;
2838 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2841 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2842 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2844 /* Produce the CC from only the bits manipulated. */
2845 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2846 set_cc_nz_u64(s
, cc_dst
);
2850 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2852 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2856 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2858 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2862 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2864 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2868 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2870 TCGv_i32 t1
= tcg_temp_new_i32();
2871 TCGv_i32 t2
= tcg_temp_new_i32();
2872 TCGv_i32 to
= tcg_temp_new_i32();
2873 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2874 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2875 tcg_gen_rotl_i32(to
, t1
, t2
);
2876 tcg_gen_extu_i32_i64(o
->out
, to
);
2877 tcg_temp_free_i32(t1
);
2878 tcg_temp_free_i32(t2
);
2879 tcg_temp_free_i32(to
);
2883 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2885 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2889 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2891 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2895 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2897 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2901 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2903 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2904 return_low128(o
->out2
);
2908 #ifndef CONFIG_USER_ONLY
2909 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2911 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2912 check_privileged(s
);
2913 potential_page_fault(s
);
2914 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2915 tcg_temp_free_i32(r1
);
2920 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2922 uint64_t sign
= 1ull << s
->insn
->data
;
2923 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2924 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2925 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2926 /* The arithmetic left shift is curious in that it does not affect
2927 the sign bit. Copy that over from the source unchanged. */
2928 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2929 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2930 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2934 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2936 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2940 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2942 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2946 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2948 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2952 #ifndef CONFIG_USER_ONLY
2953 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2955 check_privileged(s
);
2956 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2960 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2962 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2963 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2964 check_privileged(s
);
2965 potential_page_fault(s
);
2966 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2967 tcg_temp_free_i32(r1
);
2968 tcg_temp_free_i32(r3
);
2972 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2974 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2975 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2976 check_privileged(s
);
2977 potential_page_fault(s
);
2978 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2979 tcg_temp_free_i32(r1
);
2980 tcg_temp_free_i32(r3
);
2984 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2986 uint64_t i2
= get_field(s
->fields
, i2
);
2989 check_privileged(s
);
2991 /* It is important to do what the instruction name says: STORE THEN.
2992 If we let the output hook perform the store then if we fault and
2993 restart, we'll have the wrong SYSTEM MASK in place. */
2994 t
= tcg_temp_new_i64();
2995 tcg_gen_shri_i64(t
, psw_mask
, 56);
2996 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2997 tcg_temp_free_i64(t
);
2999 if (s
->fields
->op
== 0xac) {
3000 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3001 (i2
<< 56) | 0x00ffffffffffffffull
);
3003 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3009 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3011 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3015 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3017 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3021 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3023 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3027 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3029 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3033 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3035 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3036 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3037 potential_page_fault(s
);
3038 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3039 tcg_temp_free_i32(r1
);
3040 tcg_temp_free_i32(r3
);
3044 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3046 int m3
= get_field(s
->fields
, m3
);
3047 int pos
, base
= s
->insn
->data
;
3048 TCGv_i64 tmp
= tcg_temp_new_i64();
3050 pos
= base
+ ctz32(m3
) * 8;
3053 /* Effectively a 32-bit store. */
3054 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3055 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3061 /* Effectively a 16-bit store. */
3062 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3063 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3070 /* Effectively an 8-bit store. */
3071 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3072 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3076 /* This is going to be a sequence of shifts and stores. */
3077 pos
= base
+ 32 - 8;
3080 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3081 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3082 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3084 m3
= (m3
<< 1) & 0xf;
3089 tcg_temp_free_i64(tmp
);
3093 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3095 int r1
= get_field(s
->fields
, r1
);
3096 int r3
= get_field(s
->fields
, r3
);
3097 int size
= s
->insn
->data
;
3098 TCGv_i64 tsize
= tcg_const_i64(size
);
3102 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3104 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3109 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3113 tcg_temp_free_i64(tsize
);
3117 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3119 int r1
= get_field(s
->fields
, r1
);
3120 int r3
= get_field(s
->fields
, r3
);
3121 TCGv_i64 t
= tcg_temp_new_i64();
3122 TCGv_i64 t4
= tcg_const_i64(4);
3123 TCGv_i64 t32
= tcg_const_i64(32);
3126 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3127 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3131 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3135 tcg_temp_free_i64(t
);
3136 tcg_temp_free_i64(t4
);
3137 tcg_temp_free_i64(t32
);
3141 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3143 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3147 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3152 tcg_gen_not_i64(o
->in2
, o
->in2
);
3153 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3155 /* XXX possible optimization point */
3157 cc
= tcg_temp_new_i64();
3158 tcg_gen_extu_i32_i64(cc
, cc_op
);
3159 tcg_gen_shri_i64(cc
, cc
, 1);
3160 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3161 tcg_temp_free_i64(cc
);
3165 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3172 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3173 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3174 tcg_temp_free_i32(t
);
3176 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3177 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3178 tcg_temp_free_i32(t
);
3180 gen_exception(EXCP_SVC
);
3181 return EXIT_NORETURN
;
3184 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3186 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3191 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3193 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3198 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3200 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3205 #ifndef CONFIG_USER_ONLY
3206 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3208 potential_page_fault(s
);
3209 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3215 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3217 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3218 potential_page_fault(s
);
3219 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3220 tcg_temp_free_i32(l
);
3225 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3227 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3228 potential_page_fault(s
);
3229 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3230 tcg_temp_free_i32(l
);
3234 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3236 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3237 potential_page_fault(s
);
3238 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3239 tcg_temp_free_i32(l
);
3244 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3246 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3250 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3252 int shift
= s
->insn
->data
& 0xff;
3253 int size
= s
->insn
->data
>> 8;
3254 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3257 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3258 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3260 /* Produce the CC from only the bits manipulated. */
3261 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3262 set_cc_nz_u64(s
, cc_dst
);
3266 /* ====================================================================== */
3267 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3268 the original inputs), update the various cc data structures in order to
3269 be able to compute the new condition code. */
3271 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3273 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3276 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3278 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3281 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3283 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3286 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3288 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3291 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3293 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3296 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3298 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3301 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3303 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3306 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3308 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3311 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3313 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3316 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3318 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3321 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3323 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3326 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3328 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3331 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3333 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3336 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3338 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3341 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3343 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3346 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3348 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3351 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3353 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3356 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3358 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3361 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3363 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3366 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3368 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3369 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3372 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3374 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3377 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3379 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3382 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3384 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3387 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3389 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3392 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3394 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3397 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3399 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3402 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3404 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3407 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3409 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3412 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3414 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3417 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3419 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3422 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3424 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3427 /* ====================================================================== */
3428 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3429 with the TCG register to which we will write. Used in combination with
3430 the "wout" generators, in some cases we need a new temporary, and in
3431 some cases we can write to a TCG global. */
3433 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3435 o
->out
= tcg_temp_new_i64();
3438 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3440 o
->out
= tcg_temp_new_i64();
3441 o
->out2
= tcg_temp_new_i64();
3444 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3446 o
->out
= regs
[get_field(f
, r1
)];
3450 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3452 /* ??? Specification exception: r1 must be even. */
3453 int r1
= get_field(f
, r1
);
3455 o
->out2
= regs
[(r1
+ 1) & 15];
3456 o
->g_out
= o
->g_out2
= true;
3459 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3461 o
->out
= fregs
[get_field(f
, r1
)];
3465 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3467 /* ??? Specification exception: r1 must be < 14. */
3468 int r1
= get_field(f
, r1
);
3470 o
->out2
= fregs
[(r1
+ 2) & 15];
3471 o
->g_out
= o
->g_out2
= true;
3474 /* ====================================================================== */
3475 /* The "Write OUTput" generators. These generally perform some non-trivial
3476 copy of data to TCG globals, or to main memory. The trivial cases are
3477 generally handled by having a "prep" generator install the TCG global
3478 as the destination of the operation. */
3480 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3482 store_reg(get_field(f
, r1
), o
->out
);
3485 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3487 int r1
= get_field(f
, r1
);
3488 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3491 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3493 int r1
= get_field(f
, r1
);
3494 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3497 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3499 store_reg32_i64(get_field(f
, r1
), o
->out
);
3502 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3504 /* ??? Specification exception: r1 must be even. */
3505 int r1
= get_field(f
, r1
);
3506 store_reg32_i64(r1
, o
->out
);
3507 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3510 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3512 /* ??? Specification exception: r1 must be even. */
3513 int r1
= get_field(f
, r1
);
3514 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3515 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3516 store_reg32_i64(r1
, o
->out
);
3519 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3521 store_freg32_i64(get_field(f
, r1
), o
->out
);
3524 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3526 store_freg(get_field(f
, r1
), o
->out
);
3529 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3531 /* ??? Specification exception: r1 must be < 14. */
3532 int f1
= get_field(s
->fields
, r1
);
3533 store_freg(f1
, o
->out
);
3534 store_freg((f1
+ 2) & 15, o
->out2
);
3537 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3539 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3540 store_reg32_i64(get_field(f
, r1
), o
->out
);
3544 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3546 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3547 store_freg32_i64(get_field(f
, r1
), o
->out
);
3551 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3553 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3556 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3558 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3561 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3563 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3566 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3568 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3571 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3573 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3576 /* ====================================================================== */
3577 /* The "INput 1" generators. These load the first operand to an insn. */
3579 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3581 o
->in1
= load_reg(get_field(f
, r1
));
3584 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3586 o
->in1
= regs
[get_field(f
, r1
)];
3590 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3592 o
->in1
= tcg_temp_new_i64();
3593 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3596 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3598 o
->in1
= tcg_temp_new_i64();
3599 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3602 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3604 o
->in1
= tcg_temp_new_i64();
3605 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3608 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3610 /* ??? Specification exception: r1 must be even. */
3611 int r1
= get_field(f
, r1
);
3612 o
->in1
= load_reg((r1
+ 1) & 15);
3615 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1
= get_field(f
, r1
);
3619 o
->in1
= tcg_temp_new_i64();
3620 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3623 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3625 /* ??? Specification exception: r1 must be even. */
3626 int r1
= get_field(f
, r1
);
3627 o
->in1
= tcg_temp_new_i64();
3628 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3631 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3633 /* ??? Specification exception: r1 must be even. */
3634 int r1
= get_field(f
, r1
);
3635 o
->in1
= tcg_temp_new_i64();
3636 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3639 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3641 o
->in1
= load_reg(get_field(f
, r2
));
3644 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3646 o
->in1
= load_reg(get_field(f
, r3
));
3649 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3651 o
->in1
= regs
[get_field(f
, r3
)];
3655 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3657 o
->in1
= tcg_temp_new_i64();
3658 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3661 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3663 o
->in1
= tcg_temp_new_i64();
3664 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3667 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3669 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3672 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3674 o
->in1
= fregs
[get_field(f
, r1
)];
3678 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3680 /* ??? Specification exception: r1 must be < 14. */
3681 int r1
= get_field(f
, r1
);
3683 o
->out2
= fregs
[(r1
+ 2) & 15];
3684 o
->g_out
= o
->g_out2
= true;
3687 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3689 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3692 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3694 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3695 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3698 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3701 o
->in1
= tcg_temp_new_i64();
3702 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3705 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3708 o
->in1
= tcg_temp_new_i64();
3709 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3712 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3715 o
->in1
= tcg_temp_new_i64();
3716 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3719 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3722 o
->in1
= tcg_temp_new_i64();
3723 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3726 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3729 o
->in1
= tcg_temp_new_i64();
3730 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3733 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3736 o
->in1
= tcg_temp_new_i64();
3737 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3740 /* ====================================================================== */
3741 /* The "INput 2" generators. These load the second operand to an insn. */
3743 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3745 o
->in2
= regs
[get_field(f
, r1
)];
3749 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3751 o
->in2
= tcg_temp_new_i64();
3752 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3755 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3757 o
->in2
= tcg_temp_new_i64();
3758 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3761 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3763 o
->in2
= load_reg(get_field(f
, r2
));
3766 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3768 o
->in2
= regs
[get_field(f
, r2
)];
3772 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3774 int r2
= get_field(f
, r2
);
3776 o
->in2
= load_reg(r2
);
3780 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 o
->in2
= tcg_temp_new_i64();
3783 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3786 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3788 o
->in2
= tcg_temp_new_i64();
3789 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3792 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3794 o
->in2
= tcg_temp_new_i64();
3795 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3798 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3800 o
->in2
= tcg_temp_new_i64();
3801 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3804 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3806 o
->in2
= load_reg(get_field(f
, r3
));
3809 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3811 o
->in2
= tcg_temp_new_i64();
3812 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3815 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3817 o
->in2
= tcg_temp_new_i64();
3818 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3821 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3823 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3826 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3828 o
->in2
= fregs
[get_field(f
, r2
)];
3832 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3834 /* ??? Specification exception: r1 must be < 14. */
3835 int r2
= get_field(f
, r2
);
3837 o
->in2
= fregs
[(r2
+ 2) & 15];
3838 o
->g_in1
= o
->g_in2
= true;
3841 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3843 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3844 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3847 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3849 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3852 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 help_l2_shift(s
, f
, o
, 31);
3857 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3859 help_l2_shift(s
, f
, o
, 63);
3862 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3865 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3868 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3871 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3874 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3877 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3880 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3883 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3886 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3889 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3892 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3895 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3898 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3901 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3904 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3907 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3910 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3913 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3916 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3919 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3922 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3924 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3927 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3929 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3932 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3934 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3937 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3939 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3942 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3944 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3945 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3948 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3950 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3951 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3954 /* ====================================================================== */
3956 /* Find opc within the table of insns. This is formulated as a switch
3957 statement so that (1) we get compile-time notice of cut-paste errors
3958 for duplicated opcodes, and (2) the compiler generates the binary
3959 search tree, rather than us having to post-process the table. */
3961 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3962 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3964 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3966 enum DisasInsnEnum
{
3967 #include "insn-data.def"
3971 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3976 .help_in1 = in1_##I1, \
3977 .help_in2 = in2_##I2, \
3978 .help_prep = prep_##P, \
3979 .help_wout = wout_##W, \
3980 .help_cout = cout_##CC, \
3981 .help_op = op_##OP, \
3985 /* Allow 0 to be used for NULL in the table below. */
3993 static const DisasInsn insn_info
[] = {
3994 #include "insn-data.def"
3998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3999 case OPC: return &insn_info[insn_ ## NM];
4001 static const DisasInsn
*lookup_opc(uint16_t opc
)
4004 #include "insn-data.def"
4013 /* Extract a field from the insn. The INSN should be left-aligned in
4014 the uint64_t so that we can more easily utilize the big-bit-endian
4015 definitions we extract from the Principals of Operation. */
4017 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4025 /* Zero extract the field from the insn. */
4026 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4028 /* Sign-extend, or un-swap the field as necessary. */
4030 case 0: /* unsigned */
4032 case 1: /* signed */
4033 assert(f
->size
<= 32);
4034 m
= 1u << (f
->size
- 1);
4037 case 2: /* dl+dh split, signed 20 bit. */
4038 r
= ((int8_t)r
<< 12) | (r
>> 8);
4044 /* Validate that the "compressed" encoding we selected above is valid.
4045 I.e. we havn't make two different original fields overlap. */
4046 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4047 o
->presentC
|= 1 << f
->indexC
;
4048 o
->presentO
|= 1 << f
->indexO
;
4050 o
->c
[f
->indexC
] = r
;
4053 /* Lookup the insn at the current PC, extracting the operands into O and
4054 returning the info struct for the insn. Returns NULL for invalid insn. */
4056 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4059 uint64_t insn
, pc
= s
->pc
;
4061 const DisasInsn
*info
;
4063 insn
= ld_code2(env
, pc
);
4064 op
= (insn
>> 8) & 0xff;
4065 ilen
= get_ilen(op
);
4066 s
->next_pc
= s
->pc
+ ilen
;
4073 insn
= ld_code4(env
, pc
) << 32;
4076 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4082 /* We can't actually determine the insn format until we've looked up
4083 the full insn opcode. Which we can't do without locating the
4084 secondary opcode. Assume by default that OP2 is at bit 40; for
4085 those smaller insns that don't actually have a secondary opcode
4086 this will correctly result in OP2 = 0. */
4092 case 0xb2: /* S, RRF, RRE */
4093 case 0xb3: /* RRE, RRD, RRF */
4094 case 0xb9: /* RRE, RRF */
4095 case 0xe5: /* SSE, SIL */
4096 op2
= (insn
<< 8) >> 56;
4100 case 0xc0: /* RIL */
4101 case 0xc2: /* RIL */
4102 case 0xc4: /* RIL */
4103 case 0xc6: /* RIL */
4104 case 0xc8: /* SSF */
4105 case 0xcc: /* RIL */
4106 op2
= (insn
<< 12) >> 60;
4108 case 0xd0 ... 0xdf: /* SS */
4114 case 0xee ... 0xf3: /* SS */
4115 case 0xf8 ... 0xfd: /* SS */
4119 op2
= (insn
<< 40) >> 56;
4123 memset(f
, 0, sizeof(*f
));
4127 /* Lookup the instruction. */
4128 info
= lookup_opc(op
<< 8 | op2
);
4130 /* If we found it, extract the operands. */
4132 DisasFormat fmt
= info
->fmt
;
4135 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4136 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4142 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4144 const DisasInsn
*insn
;
4145 ExitStatus ret
= NO_EXIT
;
4149 insn
= extract_insn(env
, s
, &f
);
4151 /* If not found, try the old interpreter. This includes ILLOPC. */
4153 disas_s390_insn(env
, s
);
4154 switch (s
->is_jmp
) {
4162 ret
= EXIT_PC_UPDATED
;
4165 ret
= EXIT_NORETURN
;
4175 /* Set up the strutures we use to communicate with the helpers. */
4178 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4179 TCGV_UNUSED_I64(o
.out
);
4180 TCGV_UNUSED_I64(o
.out2
);
4181 TCGV_UNUSED_I64(o
.in1
);
4182 TCGV_UNUSED_I64(o
.in2
);
4183 TCGV_UNUSED_I64(o
.addr1
);
4185 /* Implement the instruction. */
4186 if (insn
->help_in1
) {
4187 insn
->help_in1(s
, &f
, &o
);
4189 if (insn
->help_in2
) {
4190 insn
->help_in2(s
, &f
, &o
);
4192 if (insn
->help_prep
) {
4193 insn
->help_prep(s
, &f
, &o
);
4195 if (insn
->help_op
) {
4196 ret
= insn
->help_op(s
, &o
);
4198 if (insn
->help_wout
) {
4199 insn
->help_wout(s
, &f
, &o
);
4201 if (insn
->help_cout
) {
4202 insn
->help_cout(s
, &o
);
4205 /* Free any temporaries created by the helpers. */
4206 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4207 tcg_temp_free_i64(o
.out
);
4209 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4210 tcg_temp_free_i64(o
.out2
);
4212 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4213 tcg_temp_free_i64(o
.in1
);
4215 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4216 tcg_temp_free_i64(o
.in2
);
4218 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4219 tcg_temp_free_i64(o
.addr1
);
4222 /* Advance to the next instruction. */
4227 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4228 TranslationBlock
*tb
,
4232 target_ulong pc_start
;
4233 uint64_t next_page_start
;
4234 uint16_t *gen_opc_end
;
4236 int num_insns
, max_insns
;
4244 if (!(tb
->flags
& FLAG_MASK_64
)) {
4245 pc_start
&= 0x7fffffff;
4250 dc
.cc_op
= CC_OP_DYNAMIC
;
4251 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4252 dc
.is_jmp
= DISAS_NEXT
;
4254 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4256 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4259 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4260 if (max_insns
== 0) {
4261 max_insns
= CF_COUNT_MASK
;
4268 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4272 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4275 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4276 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4277 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4278 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4280 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4285 tcg_gen_debug_insn_start(dc
.pc
);
4289 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4290 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4291 if (bp
->pc
== dc
.pc
) {
4292 status
= EXIT_PC_STALE
;
4298 if (status
== NO_EXIT
) {
4299 status
= translate_one(env
, &dc
);
4302 /* If we reach a page boundary, are single stepping,
4303 or exhaust instruction count, stop generation. */
4304 if (status
== NO_EXIT
4305 && (dc
.pc
>= next_page_start
4306 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4307 || num_insns
>= max_insns
4309 || env
->singlestep_enabled
)) {
4310 status
= EXIT_PC_STALE
;
4312 } while (status
== NO_EXIT
);
4314 if (tb
->cflags
& CF_LAST_IO
) {
4323 update_psw_addr(&dc
);
4325 case EXIT_PC_UPDATED
:
4326 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4327 gen_op_calc_cc(&dc
);
4329 /* Next TB starts off with CC_OP_DYNAMIC,
4330 so make sure the cc op type is in env */
4331 gen_op_set_cc_op(&dc
);
4334 gen_exception(EXCP_DEBUG
);
4336 /* Generate the return instruction */
4344 gen_icount_end(tb
, num_insns
);
4345 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4347 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4350 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4353 tb
->size
= dc
.pc
- pc_start
;
4354 tb
->icount
= num_insns
;
4357 #if defined(S390X_DEBUG_DISAS)
4358 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4359 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4360 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4366 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4368 gen_intermediate_code_internal(env
, tb
, 0);
4371 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4373 gen_intermediate_code_internal(env
, tb
, 1);
4376 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4379 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4380 cc_op
= gen_opc_cc_op
[pc_pos
];
4381 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {