4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
491 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
494 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
497 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
500 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
502 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
505 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
507 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
510 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
512 /* XXX optimize for the constant? put it in s? */
513 TCGv_i32 tmp
= tcg_const_i32(v2
);
514 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
515 tcg_temp_free_i32(tmp
);
518 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
520 TCGv_i32 tmp
= tcg_const_i32(v2
);
521 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
522 tcg_temp_free_i32(tmp
);
525 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
527 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
530 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
532 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
535 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
537 TCGv_i64 tmp
= tcg_const_i64(v2
);
539 tcg_temp_free_i64(tmp
);
542 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
544 TCGv_i64 tmp
= tcg_const_i64(v2
);
546 tcg_temp_free_i64(tmp
);
549 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
551 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
554 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
556 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
559 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
561 tcg_gen_extu_i32_i64(cc_src
, v1
);
562 tcg_gen_mov_i64(cc_dst
, v2
);
563 tcg_gen_discard_i64(cc_vr
);
564 s
->cc_op
= CC_OP_LTGT_F32
;
567 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
569 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
572 /* CC value is in env->cc_op */
573 static inline void set_cc_static(DisasContext
*s
)
575 tcg_gen_discard_i64(cc_src
);
576 tcg_gen_discard_i64(cc_dst
);
577 tcg_gen_discard_i64(cc_vr
);
578 s
->cc_op
= CC_OP_STATIC
;
581 static inline void gen_op_set_cc_op(DisasContext
*s
)
583 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
584 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
588 static inline void gen_update_cc_op(DisasContext
*s
)
593 /* calculates cc into cc_op */
594 static void gen_op_calc_cc(DisasContext
*s
)
596 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
597 TCGv_i64 dummy
= tcg_const_i64(0);
604 /* s->cc_op is the cc value */
605 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
608 /* env->cc_op already is the cc value */
622 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
627 case CC_OP_LTUGTU_32
:
628 case CC_OP_LTUGTU_64
:
636 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
651 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
654 /* unknown operation - assume 3 arguments and cc_op in env */
655 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
661 tcg_temp_free_i32(local_cc_op
);
662 tcg_temp_free_i64(dummy
);
664 /* We now have cc in cc_op as constant */
668 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
672 *r1
= (insn
>> 4) & 0xf;
676 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
677 int *x2
, int *b2
, int *d2
)
681 *r1
= (insn
>> 20) & 0xf;
682 *x2
= (insn
>> 16) & 0xf;
683 *b2
= (insn
>> 12) & 0xf;
686 return get_address(s
, *x2
, *b2
, *d2
);
689 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
694 *r1
= (insn
>> 20) & 0xf;
696 *r3
= (insn
>> 16) & 0xf;
697 *b2
= (insn
>> 12) & 0xf;
701 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
706 *i2
= (insn
>> 16) & 0xff;
707 *b1
= (insn
>> 12) & 0xf;
710 return get_address(s
, 0, *b1
, *d1
);
713 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
715 /* NOTE: we handle the case where the TB spans two pages here */
716 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
717 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
718 && !s
->singlestep_enabled
719 && !(s
->tb
->cflags
& CF_LAST_IO
));
722 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
726 if (use_goto_tb(s
, pc
)) {
727 tcg_gen_goto_tb(tb_num
);
728 tcg_gen_movi_i64(psw_addr
, pc
);
729 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
731 /* jump to another page: currently not optimized */
732 tcg_gen_movi_i64(psw_addr
, pc
);
737 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
739 #ifdef DEBUG_INLINE_BRANCHES
740 inline_branch_miss
[cc_op
]++;
744 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
746 #ifdef DEBUG_INLINE_BRANCHES
747 inline_branch_hit
[cc_op
]++;
751 /* Table of mask values to comparison codes, given a comparison as input.
752 For a true comparison CC=3 will never be set, but we treat this
753 conservatively for possible use when CC=3 indicates overflow. */
754 static const TCGCond ltgt_cond
[16] = {
755 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
756 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
757 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
758 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
759 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
760 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
761 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
762 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
765 /* Table of mask values to comparison codes, given a logic op as input.
766 For such, only CC=0 and CC=1 should be possible. */
767 static const TCGCond nz_cond
[16] = {
769 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
771 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
773 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
774 /* EQ | NE | x | x */
775 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
778 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
779 details required to generate a TCG comparison. */
780 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
783 enum cc_op old_cc_op
= s
->cc_op
;
785 if (mask
== 15 || mask
== 0) {
786 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
789 c
->g1
= c
->g2
= true;
794 /* Find the TCG condition for the mask + cc op. */
800 cond
= ltgt_cond
[mask
];
801 if (cond
== TCG_COND_NEVER
) {
804 account_inline_branch(s
, old_cc_op
);
807 case CC_OP_LTUGTU_32
:
808 case CC_OP_LTUGTU_64
:
809 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
810 if (cond
== TCG_COND_NEVER
) {
813 account_inline_branch(s
, old_cc_op
);
817 cond
= nz_cond
[mask
];
818 if (cond
== TCG_COND_NEVER
) {
821 account_inline_branch(s
, old_cc_op
);
836 account_inline_branch(s
, old_cc_op
);
851 account_inline_branch(s
, old_cc_op
);
856 /* Calculate cc value. */
861 /* Jump based on CC. We'll load up the real cond below;
862 the assignment here merely avoids a compiler warning. */
863 account_noninline_branch(s
, old_cc_op
);
864 old_cc_op
= CC_OP_STATIC
;
865 cond
= TCG_COND_NEVER
;
869 /* Load up the arguments of the comparison. */
871 c
->g1
= c
->g2
= false;
875 c
->u
.s32
.a
= tcg_temp_new_i32();
876 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
877 c
->u
.s32
.b
= tcg_const_i32(0);
880 case CC_OP_LTUGTU_32
:
882 c
->u
.s32
.a
= tcg_temp_new_i32();
883 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
884 c
->u
.s32
.b
= tcg_temp_new_i32();
885 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
891 c
->u
.s64
.b
= tcg_const_i64(0);
895 case CC_OP_LTUGTU_64
:
898 c
->g1
= c
->g2
= true;
904 c
->u
.s64
.a
= tcg_temp_new_i64();
905 c
->u
.s64
.b
= tcg_const_i64(0);
906 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
914 case 0x8 | 0x4 | 0x2: /* cc != 3 */
916 c
->u
.s32
.b
= tcg_const_i32(3);
918 case 0x8 | 0x4 | 0x1: /* cc != 2 */
920 c
->u
.s32
.b
= tcg_const_i32(2);
922 case 0x8 | 0x2 | 0x1: /* cc != 1 */
924 c
->u
.s32
.b
= tcg_const_i32(1);
926 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
929 c
->u
.s32
.a
= tcg_temp_new_i32();
930 c
->u
.s32
.b
= tcg_const_i32(0);
931 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
933 case 0x8 | 0x4: /* cc < 2 */
935 c
->u
.s32
.b
= tcg_const_i32(2);
937 case 0x8: /* cc == 0 */
939 c
->u
.s32
.b
= tcg_const_i32(0);
941 case 0x4 | 0x2 | 0x1: /* cc != 0 */
943 c
->u
.s32
.b
= tcg_const_i32(0);
945 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
948 c
->u
.s32
.a
= tcg_temp_new_i32();
949 c
->u
.s32
.b
= tcg_const_i32(0);
950 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
952 case 0x4: /* cc == 1 */
954 c
->u
.s32
.b
= tcg_const_i32(1);
956 case 0x2 | 0x1: /* cc > 1 */
958 c
->u
.s32
.b
= tcg_const_i32(1);
960 case 0x2: /* cc == 2 */
962 c
->u
.s32
.b
= tcg_const_i32(2);
964 case 0x1: /* cc == 3 */
966 c
->u
.s32
.b
= tcg_const_i32(3);
969 /* CC is masked by something else: (8 >> cc) & mask. */
972 c
->u
.s32
.a
= tcg_const_i32(8);
973 c
->u
.s32
.b
= tcg_const_i32(0);
974 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
975 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
986 static void free_compare(DisasCompare
*c
)
990 tcg_temp_free_i64(c
->u
.s64
.a
);
992 tcg_temp_free_i32(c
->u
.s32
.a
);
997 tcg_temp_free_i64(c
->u
.s64
.b
);
999 tcg_temp_free_i32(c
->u
.s32
.b
);
1004 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1005 int x2
, int b2
, int d2
, int r1b
)
1007 TCGv_i32 tmp_r1
, tmp32
;
1009 addr
= get_address(s
, x2
, b2
, d2
);
1010 tmp_r1
= tcg_const_i32(r1
);
1012 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1013 potential_page_fault(s
);
1014 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1016 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1017 potential_page_fault(s
);
1018 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1020 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1021 tmp
= tcg_temp_new_i64();
1022 tmp32
= load_freg32(r1
);
1023 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1024 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1025 tcg_temp_free_i64(tmp
);
1026 tcg_temp_free_i32(tmp32
);
1028 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1029 tmp
= tcg_temp_new_i64();
1030 tmp32
= tcg_temp_new_i32();
1031 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1032 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1033 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1034 tcg_temp_free_i64(tmp
);
1035 tcg_temp_free_i32(tmp32
);
1037 tmp32
= load_freg32(r1
);
1038 gen_set_cc_nz_f32(s
, tmp32
);
1039 tcg_temp_free_i32(tmp32
);
1041 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1042 tmp
= tcg_temp_new_i64();
1043 tmp32
= tcg_temp_new_i32();
1044 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1045 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1046 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1047 tcg_temp_free_i64(tmp
);
1048 tcg_temp_free_i32(tmp32
);
1050 tmp32
= load_freg32(r1
);
1051 gen_set_cc_nz_f32(s
, tmp32
);
1052 tcg_temp_free_i32(tmp32
);
1054 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1055 tmp
= tcg_temp_new_i64();
1056 tmp32
= tcg_temp_new_i32();
1057 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1058 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1059 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1060 tcg_temp_free_i64(tmp
);
1061 tcg_temp_free_i32(tmp32
);
1063 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1064 potential_page_fault(s
);
1065 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1068 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1069 potential_page_fault(s
);
1070 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1073 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1074 potential_page_fault(s
);
1075 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1078 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1079 tmp
= tcg_temp_new_i64();
1080 tmp32
= tcg_temp_new_i32();
1081 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1082 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1083 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1084 tcg_temp_free_i64(tmp
);
1085 tcg_temp_free_i32(tmp32
);
1087 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1088 potential_page_fault(s
);
1089 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1092 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1093 potential_page_fault(s
);
1094 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1097 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1098 potential_page_fault(s
);
1099 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1102 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1103 potential_page_fault(s
);
1104 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1106 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1107 potential_page_fault(s
);
1108 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1110 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1111 /* for RXF insns, r1 is R3 and r1b is R1 */
1112 tmp32
= tcg_const_i32(r1b
);
1113 potential_page_fault(s
);
1114 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1115 tcg_temp_free_i32(tmp32
);
1118 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1119 gen_illegal_opcode(s
);
1122 tcg_temp_free_i32(tmp_r1
);
1123 tcg_temp_free_i64(addr
);
1126 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1129 TCGv_i64 tmp
, tmp2
, tmp3
;
1130 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1132 #ifndef CONFIG_USER_ONLY
1136 r1
= (insn
>> 4) & 0xf;
1139 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1142 case 0x22: /* IPM R1 [RRE] */
1143 tmp32_1
= tcg_const_i32(r1
);
1145 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1146 tcg_temp_free_i32(tmp32_1
);
1148 case 0x41: /* CKSM R1,R2 [RRE] */
1149 tmp32_1
= tcg_const_i32(r1
);
1150 tmp32_2
= tcg_const_i32(r2
);
1151 potential_page_fault(s
);
1152 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1153 tcg_temp_free_i32(tmp32_1
);
1154 tcg_temp_free_i32(tmp32_2
);
1155 gen_op_movi_cc(s
, 0);
1157 case 0x4e: /* SAR R1,R2 [RRE] */
1158 tmp32_1
= load_reg32(r2
);
1159 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1160 tcg_temp_free_i32(tmp32_1
);
1162 case 0x4f: /* EAR R1,R2 [RRE] */
1163 tmp32_1
= tcg_temp_new_i32();
1164 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1165 store_reg32(r1
, tmp32_1
);
1166 tcg_temp_free_i32(tmp32_1
);
1168 case 0x54: /* MVPG R1,R2 [RRE] */
1170 tmp2
= load_reg(r1
);
1171 tmp3
= load_reg(r2
);
1172 potential_page_fault(s
);
1173 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1174 tcg_temp_free_i64(tmp
);
1175 tcg_temp_free_i64(tmp2
);
1176 tcg_temp_free_i64(tmp3
);
1177 /* XXX check CCO bit and set CC accordingly */
1178 gen_op_movi_cc(s
, 0);
1180 case 0x55: /* MVST R1,R2 [RRE] */
1181 tmp32_1
= load_reg32(0);
1182 tmp32_2
= tcg_const_i32(r1
);
1183 tmp32_3
= tcg_const_i32(r2
);
1184 potential_page_fault(s
);
1185 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1186 tcg_temp_free_i32(tmp32_1
);
1187 tcg_temp_free_i32(tmp32_2
);
1188 tcg_temp_free_i32(tmp32_3
);
1189 gen_op_movi_cc(s
, 1);
1191 case 0x5d: /* CLST R1,R2 [RRE] */
1192 tmp32_1
= load_reg32(0);
1193 tmp32_2
= tcg_const_i32(r1
);
1194 tmp32_3
= tcg_const_i32(r2
);
1195 potential_page_fault(s
);
1196 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1198 tcg_temp_free_i32(tmp32_1
);
1199 tcg_temp_free_i32(tmp32_2
);
1200 tcg_temp_free_i32(tmp32_3
);
1202 case 0x5e: /* SRST R1,R2 [RRE] */
1203 tmp32_1
= load_reg32(0);
1204 tmp32_2
= tcg_const_i32(r1
);
1205 tmp32_3
= tcg_const_i32(r2
);
1206 potential_page_fault(s
);
1207 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1209 tcg_temp_free_i32(tmp32_1
);
1210 tcg_temp_free_i32(tmp32_2
);
1211 tcg_temp_free_i32(tmp32_3
);
1214 #ifndef CONFIG_USER_ONLY
1215 case 0x02: /* STIDP D2(B2) [S] */
1217 check_privileged(s
);
1218 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1219 tmp
= get_address(s
, 0, b2
, d2
);
1220 potential_page_fault(s
);
1221 gen_helper_stidp(cpu_env
, tmp
);
1222 tcg_temp_free_i64(tmp
);
1224 case 0x04: /* SCK D2(B2) [S] */
1226 check_privileged(s
);
1227 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1228 tmp
= get_address(s
, 0, b2
, d2
);
1229 potential_page_fault(s
);
1230 gen_helper_sck(cc_op
, tmp
);
1232 tcg_temp_free_i64(tmp
);
1234 case 0x05: /* STCK D2(B2) [S] */
1236 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1237 tmp
= get_address(s
, 0, b2
, d2
);
1238 potential_page_fault(s
);
1239 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1241 tcg_temp_free_i64(tmp
);
1243 case 0x06: /* SCKC D2(B2) [S] */
1244 /* Set Clock Comparator */
1245 check_privileged(s
);
1246 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1247 tmp
= get_address(s
, 0, b2
, d2
);
1248 potential_page_fault(s
);
1249 gen_helper_sckc(cpu_env
, tmp
);
1250 tcg_temp_free_i64(tmp
);
1252 case 0x07: /* STCKC D2(B2) [S] */
1253 /* Store Clock Comparator */
1254 check_privileged(s
);
1255 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1256 tmp
= get_address(s
, 0, b2
, d2
);
1257 potential_page_fault(s
);
1258 gen_helper_stckc(cpu_env
, tmp
);
1259 tcg_temp_free_i64(tmp
);
1261 case 0x08: /* SPT D2(B2) [S] */
1263 check_privileged(s
);
1264 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1265 tmp
= get_address(s
, 0, b2
, d2
);
1266 potential_page_fault(s
);
1267 gen_helper_spt(cpu_env
, tmp
);
1268 tcg_temp_free_i64(tmp
);
1270 case 0x09: /* STPT D2(B2) [S] */
1271 /* Store CPU Timer */
1272 check_privileged(s
);
1273 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1274 tmp
= get_address(s
, 0, b2
, d2
);
1275 potential_page_fault(s
);
1276 gen_helper_stpt(cpu_env
, tmp
);
1277 tcg_temp_free_i64(tmp
);
1279 case 0x0a: /* SPKA D2(B2) [S] */
1280 /* Set PSW Key from Address */
1281 check_privileged(s
);
1282 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1283 tmp
= get_address(s
, 0, b2
, d2
);
1284 tmp2
= tcg_temp_new_i64();
1285 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1286 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1287 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1288 tcg_temp_free_i64(tmp2
);
1289 tcg_temp_free_i64(tmp
);
1291 case 0x0d: /* PTLB [S] */
1293 check_privileged(s
);
1294 gen_helper_ptlb(cpu_env
);
1296 case 0x10: /* SPX D2(B2) [S] */
1297 /* Set Prefix Register */
1298 check_privileged(s
);
1299 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1300 tmp
= get_address(s
, 0, b2
, d2
);
1301 potential_page_fault(s
);
1302 gen_helper_spx(cpu_env
, tmp
);
1303 tcg_temp_free_i64(tmp
);
1305 case 0x11: /* STPX D2(B2) [S] */
1307 check_privileged(s
);
1308 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1309 tmp
= get_address(s
, 0, b2
, d2
);
1310 tmp2
= tcg_temp_new_i64();
1311 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1312 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1313 tcg_temp_free_i64(tmp
);
1314 tcg_temp_free_i64(tmp2
);
1316 case 0x12: /* STAP D2(B2) [S] */
1317 /* Store CPU Address */
1318 check_privileged(s
);
1319 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1320 tmp
= get_address(s
, 0, b2
, d2
);
1321 tmp2
= tcg_temp_new_i64();
1322 tmp32_1
= tcg_temp_new_i32();
1323 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1324 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1325 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1326 tcg_temp_free_i64(tmp
);
1327 tcg_temp_free_i64(tmp2
);
1328 tcg_temp_free_i32(tmp32_1
);
1330 case 0x21: /* IPTE R1,R2 [RRE] */
1331 /* Invalidate PTE */
1332 check_privileged(s
);
1333 r1
= (insn
>> 4) & 0xf;
1336 tmp2
= load_reg(r2
);
1337 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1338 tcg_temp_free_i64(tmp
);
1339 tcg_temp_free_i64(tmp2
);
1341 case 0x29: /* ISKE R1,R2 [RRE] */
1342 /* Insert Storage Key Extended */
1343 check_privileged(s
);
1344 r1
= (insn
>> 4) & 0xf;
1347 tmp2
= tcg_temp_new_i64();
1348 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1349 store_reg(r1
, tmp2
);
1350 tcg_temp_free_i64(tmp
);
1351 tcg_temp_free_i64(tmp2
);
1353 case 0x2a: /* RRBE R1,R2 [RRE] */
1354 /* Set Storage Key Extended */
1355 check_privileged(s
);
1356 r1
= (insn
>> 4) & 0xf;
1358 tmp32_1
= load_reg32(r1
);
1360 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1362 tcg_temp_free_i32(tmp32_1
);
1363 tcg_temp_free_i64(tmp
);
1365 case 0x2b: /* SSKE R1,R2 [RRE] */
1366 /* Set Storage Key Extended */
1367 check_privileged(s
);
1368 r1
= (insn
>> 4) & 0xf;
1370 tmp32_1
= load_reg32(r1
);
1372 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1373 tcg_temp_free_i32(tmp32_1
);
1374 tcg_temp_free_i64(tmp
);
1376 case 0x34: /* STCH ? */
1377 /* Store Subchannel */
1378 check_privileged(s
);
1379 gen_op_movi_cc(s
, 3);
1381 case 0x46: /* STURA R1,R2 [RRE] */
1382 /* Store Using Real Address */
1383 check_privileged(s
);
1384 r1
= (insn
>> 4) & 0xf;
1386 tmp32_1
= load_reg32(r1
);
1388 potential_page_fault(s
);
1389 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1390 tcg_temp_free_i32(tmp32_1
);
1391 tcg_temp_free_i64(tmp
);
1393 case 0x50: /* CSP R1,R2 [RRE] */
1394 /* Compare And Swap And Purge */
1395 check_privileged(s
);
1396 r1
= (insn
>> 4) & 0xf;
1398 tmp32_1
= tcg_const_i32(r1
);
1399 tmp32_2
= tcg_const_i32(r2
);
1400 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1402 tcg_temp_free_i32(tmp32_1
);
1403 tcg_temp_free_i32(tmp32_2
);
1405 case 0x5f: /* CHSC ? */
1406 /* Channel Subsystem Call */
1407 check_privileged(s
);
1408 gen_op_movi_cc(s
, 3);
1410 case 0x78: /* STCKE D2(B2) [S] */
1411 /* Store Clock Extended */
1412 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1413 tmp
= get_address(s
, 0, b2
, d2
);
1414 potential_page_fault(s
);
1415 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1417 tcg_temp_free_i64(tmp
);
1419 case 0x79: /* SACF D2(B2) [S] */
1420 /* Set Address Space Control Fast */
1421 check_privileged(s
);
1422 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1423 tmp
= get_address(s
, 0, b2
, d2
);
1424 potential_page_fault(s
);
1425 gen_helper_sacf(cpu_env
, tmp
);
1426 tcg_temp_free_i64(tmp
);
1427 /* addressing mode has changed, so end the block */
1430 s
->is_jmp
= DISAS_JUMP
;
1432 case 0x7d: /* STSI D2,(B2) [S] */
1433 check_privileged(s
);
1434 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1435 tmp
= get_address(s
, 0, b2
, d2
);
1436 tmp32_1
= load_reg32(0);
1437 tmp32_2
= load_reg32(1);
1438 potential_page_fault(s
);
1439 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1441 tcg_temp_free_i64(tmp
);
1442 tcg_temp_free_i32(tmp32_1
);
1443 tcg_temp_free_i32(tmp32_2
);
1445 case 0x9d: /* LFPC D2(B2) [S] */
1446 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1447 tmp
= get_address(s
, 0, b2
, d2
);
1448 tmp2
= tcg_temp_new_i64();
1449 tmp32_1
= tcg_temp_new_i32();
1450 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1451 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1452 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1453 tcg_temp_free_i64(tmp
);
1454 tcg_temp_free_i64(tmp2
);
1455 tcg_temp_free_i32(tmp32_1
);
1457 case 0xb1: /* STFL D2(B2) [S] */
1458 /* Store Facility List (CPU features) at 200 */
1459 check_privileged(s
);
1460 tmp2
= tcg_const_i64(0xc0000000);
1461 tmp
= tcg_const_i64(200);
1462 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1463 tcg_temp_free_i64(tmp2
);
1464 tcg_temp_free_i64(tmp
);
1466 case 0xb2: /* LPSWE D2(B2) [S] */
1467 /* Load PSW Extended */
1468 check_privileged(s
);
1469 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1470 tmp
= get_address(s
, 0, b2
, d2
);
1471 tmp2
= tcg_temp_new_i64();
1472 tmp3
= tcg_temp_new_i64();
1473 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1474 tcg_gen_addi_i64(tmp
, tmp
, 8);
1475 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1476 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1477 /* we need to keep cc_op intact */
1478 s
->is_jmp
= DISAS_JUMP
;
1479 tcg_temp_free_i64(tmp
);
1480 tcg_temp_free_i64(tmp2
);
1481 tcg_temp_free_i64(tmp3
);
1483 case 0x20: /* SERVC R1,R2 [RRE] */
1484 /* SCLP Service call (PV hypercall) */
1485 check_privileged(s
);
1486 potential_page_fault(s
);
1487 tmp32_1
= load_reg32(r2
);
1489 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1491 tcg_temp_free_i32(tmp32_1
);
1492 tcg_temp_free_i64(tmp
);
1496 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1497 gen_illegal_opcode(s
);
1502 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1506 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1507 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1508 #define FP_HELPER(i) \
1509 tmp32_1 = tcg_const_i32(r1); \
1510 tmp32_2 = tcg_const_i32(r2); \
1511 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1512 tcg_temp_free_i32(tmp32_1); \
1513 tcg_temp_free_i32(tmp32_2);
1515 #define FP_HELPER_CC(i) \
1516 tmp32_1 = tcg_const_i32(r1); \
1517 tmp32_2 = tcg_const_i32(r2); \
1518 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1520 tcg_temp_free_i32(tmp32_1); \
1521 tcg_temp_free_i32(tmp32_2);
1524 case 0x0: /* LPEBR R1,R2 [RRE] */
1525 FP_HELPER_CC(lpebr
);
1527 case 0x2: /* LTEBR R1,R2 [RRE] */
1528 FP_HELPER_CC(ltebr
);
1530 case 0x3: /* LCEBR R1,R2 [RRE] */
1531 FP_HELPER_CC(lcebr
);
1533 case 0x4: /* LDEBR R1,R2 [RRE] */
1536 case 0x5: /* LXDBR R1,R2 [RRE] */
1539 case 0x9: /* CEBR R1,R2 [RRE] */
1542 case 0xa: /* AEBR R1,R2 [RRE] */
1545 case 0xb: /* SEBR R1,R2 [RRE] */
1548 case 0xd: /* DEBR R1,R2 [RRE] */
1551 case 0x10: /* LPDBR R1,R2 [RRE] */
1552 FP_HELPER_CC(lpdbr
);
1554 case 0x12: /* LTDBR R1,R2 [RRE] */
1555 FP_HELPER_CC(ltdbr
);
1557 case 0x13: /* LCDBR R1,R2 [RRE] */
1558 FP_HELPER_CC(lcdbr
);
1560 case 0x15: /* SQBDR R1,R2 [RRE] */
1563 case 0x17: /* MEEBR R1,R2 [RRE] */
1566 case 0x19: /* CDBR R1,R2 [RRE] */
1569 case 0x1a: /* ADBR R1,R2 [RRE] */
1572 case 0x1b: /* SDBR R1,R2 [RRE] */
1575 case 0x1c: /* MDBR R1,R2 [RRE] */
1578 case 0x1d: /* DDBR R1,R2 [RRE] */
1581 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1582 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1583 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1584 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1585 tmp32_1
= tcg_const_i32(m3
);
1586 tmp32_2
= tcg_const_i32(r2
);
1587 tmp32_3
= tcg_const_i32(r1
);
1590 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1593 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1596 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1601 tcg_temp_free_i32(tmp32_1
);
1602 tcg_temp_free_i32(tmp32_2
);
1603 tcg_temp_free_i32(tmp32_3
);
1605 case 0x40: /* LPXBR R1,R2 [RRE] */
1606 FP_HELPER_CC(lpxbr
);
1608 case 0x42: /* LTXBR R1,R2 [RRE] */
1609 FP_HELPER_CC(ltxbr
);
1611 case 0x43: /* LCXBR R1,R2 [RRE] */
1612 FP_HELPER_CC(lcxbr
);
1614 case 0x44: /* LEDBR R1,R2 [RRE] */
1617 case 0x45: /* LDXBR R1,R2 [RRE] */
1620 case 0x46: /* LEXBR R1,R2 [RRE] */
1623 case 0x49: /* CXBR R1,R2 [RRE] */
1626 case 0x4a: /* AXBR R1,R2 [RRE] */
1629 case 0x4b: /* SXBR R1,R2 [RRE] */
1632 case 0x4c: /* MXBR R1,R2 [RRE] */
1635 case 0x4d: /* DXBR R1,R2 [RRE] */
1638 case 0x65: /* LXR R1,R2 [RRE] */
1639 tmp
= load_freg(r2
);
1640 store_freg(r1
, tmp
);
1641 tcg_temp_free_i64(tmp
);
1642 tmp
= load_freg(r2
+ 2);
1643 store_freg(r1
+ 2, tmp
);
1644 tcg_temp_free_i64(tmp
);
1646 case 0x74: /* LZER R1 [RRE] */
1647 tmp32_1
= tcg_const_i32(r1
);
1648 gen_helper_lzer(cpu_env
, tmp32_1
);
1649 tcg_temp_free_i32(tmp32_1
);
1651 case 0x75: /* LZDR R1 [RRE] */
1652 tmp32_1
= tcg_const_i32(r1
);
1653 gen_helper_lzdr(cpu_env
, tmp32_1
);
1654 tcg_temp_free_i32(tmp32_1
);
1656 case 0x76: /* LZXR R1 [RRE] */
1657 tmp32_1
= tcg_const_i32(r1
);
1658 gen_helper_lzxr(cpu_env
, tmp32_1
);
1659 tcg_temp_free_i32(tmp32_1
);
1661 case 0x84: /* SFPC R1 [RRE] */
1662 tmp32_1
= load_reg32(r1
);
1663 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1664 tcg_temp_free_i32(tmp32_1
);
1666 case 0x94: /* CEFBR R1,R2 [RRE] */
1667 case 0x95: /* CDFBR R1,R2 [RRE] */
1668 case 0x96: /* CXFBR R1,R2 [RRE] */
1669 tmp32_1
= tcg_const_i32(r1
);
1670 tmp32_2
= load_reg32(r2
);
1673 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1676 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1679 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1684 tcg_temp_free_i32(tmp32_1
);
1685 tcg_temp_free_i32(tmp32_2
);
1687 case 0x98: /* CFEBR R1,R2 [RRE] */
1688 case 0x99: /* CFDBR R1,R2 [RRE] */
1689 case 0x9a: /* CFXBR R1,R2 [RRE] */
1690 tmp32_1
= tcg_const_i32(r1
);
1691 tmp32_2
= tcg_const_i32(r2
);
1692 tmp32_3
= tcg_const_i32(m3
);
1695 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1698 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1701 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1707 tcg_temp_free_i32(tmp32_1
);
1708 tcg_temp_free_i32(tmp32_2
);
1709 tcg_temp_free_i32(tmp32_3
);
1711 case 0xa4: /* CEGBR R1,R2 [RRE] */
1712 case 0xa5: /* CDGBR R1,R2 [RRE] */
1713 tmp32_1
= tcg_const_i32(r1
);
1717 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1720 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1725 tcg_temp_free_i32(tmp32_1
);
1726 tcg_temp_free_i64(tmp
);
1728 case 0xa6: /* CXGBR R1,R2 [RRE] */
1729 tmp32_1
= tcg_const_i32(r1
);
1731 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1732 tcg_temp_free_i32(tmp32_1
);
1733 tcg_temp_free_i64(tmp
);
1735 case 0xa8: /* CGEBR R1,R2 [RRE] */
1736 tmp32_1
= tcg_const_i32(r1
);
1737 tmp32_2
= tcg_const_i32(r2
);
1738 tmp32_3
= tcg_const_i32(m3
);
1739 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1741 tcg_temp_free_i32(tmp32_1
);
1742 tcg_temp_free_i32(tmp32_2
);
1743 tcg_temp_free_i32(tmp32_3
);
1745 case 0xa9: /* CGDBR R1,R2 [RRE] */
1746 tmp32_1
= tcg_const_i32(r1
);
1747 tmp32_2
= tcg_const_i32(r2
);
1748 tmp32_3
= tcg_const_i32(m3
);
1749 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1751 tcg_temp_free_i32(tmp32_1
);
1752 tcg_temp_free_i32(tmp32_2
);
1753 tcg_temp_free_i32(tmp32_3
);
1755 case 0xaa: /* CGXBR R1,R2 [RRE] */
1756 tmp32_1
= tcg_const_i32(r1
);
1757 tmp32_2
= tcg_const_i32(r2
);
1758 tmp32_3
= tcg_const_i32(m3
);
1759 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1761 tcg_temp_free_i32(tmp32_1
);
1762 tcg_temp_free_i32(tmp32_2
);
1763 tcg_temp_free_i32(tmp32_3
);
1766 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1767 gen_illegal_opcode(s
);
1775 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1781 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1783 case 0x83: /* FLOGR R1,R2 [RRE] */
1785 tmp32_1
= tcg_const_i32(r1
);
1786 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1788 tcg_temp_free_i64(tmp
);
1789 tcg_temp_free_i32(tmp32_1
);
1792 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1793 gen_illegal_opcode(s
);
1798 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1802 int op
, r1
, r2
, r3
, d2
, x2
, b2
, r1b
;
1804 opc
= cpu_ldub_code(env
, s
->pc
);
1805 LOG_DISAS("opc 0x%x\n", opc
);
1809 insn
= ld_code4(env
, s
->pc
);
1810 op
= (insn
>> 16) & 0xff;
1811 disas_b2(env
, s
, op
, insn
);
1814 insn
= ld_code4(env
, s
->pc
);
1815 op
= (insn
>> 16) & 0xff;
1816 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1817 r1
= (insn
>> 4) & 0xf;
1819 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1822 insn
= ld_code4(env
, s
->pc
);
1823 r1
= (insn
>> 4) & 0xf;
1825 op
= (insn
>> 16) & 0xff;
1826 disas_b9(env
, s
, op
, r1
, r2
);
1829 insn
= ld_code6(env
, s
->pc
);
1832 r1
= (insn
>> 36) & 0xf;
1833 x2
= (insn
>> 32) & 0xf;
1834 b2
= (insn
>> 28) & 0xf;
1835 d2
= (short)((insn
>> 16) & 0xfff);
1836 r1b
= (insn
>> 12) & 0xf;
1837 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
1840 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1841 gen_illegal_opcode(s
);
1846 /* ====================================================================== */
1847 /* Define the insn format enumeration. */
1848 #define F0(N) FMT_##N,
1849 #define F1(N, X1) F0(N)
1850 #define F2(N, X1, X2) F0(N)
1851 #define F3(N, X1, X2, X3) F0(N)
1852 #define F4(N, X1, X2, X3, X4) F0(N)
1853 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1856 #include "insn-format.def"
1866 /* Define a structure to hold the decoded fields. We'll store each inside
1867 an array indexed by an enum. In order to conserve memory, we'll arrange
1868 for fields that do not exist at the same time to overlap, thus the "C"
1869 for compact. For checking purposes there is an "O" for original index
1870 as well that will be applied to availability bitmaps. */
1872 enum DisasFieldIndexO
{
1895 enum DisasFieldIndexC
{
1926 struct DisasFields
{
1929 unsigned presentC
:16;
1930 unsigned int presentO
;
1934 /* This is the way fields are to be accessed out of DisasFields. */
1935 #define have_field(S, F) have_field1((S), FLD_O_##F)
1936 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1938 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1940 return (f
->presentO
>> c
) & 1;
1943 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1944 enum DisasFieldIndexC c
)
1946 assert(have_field1(f
, o
));
1950 /* Describe the layout of each field in each format. */
1951 typedef struct DisasField
{
1953 unsigned int size
:8;
1954 unsigned int type
:2;
1955 unsigned int indexC
:6;
1956 enum DisasFieldIndexO indexO
:8;
1959 typedef struct DisasFormatInfo
{
1960 DisasField op
[NUM_C_FIELD
];
1963 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1964 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1965 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1966 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1967 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1969 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1970 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1971 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1972 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1973 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1974 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1975 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1976 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1978 #define F0(N) { { } },
1979 #define F1(N, X1) { { X1 } },
1980 #define F2(N, X1, X2) { { X1, X2 } },
1981 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1982 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1983 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1985 static const DisasFormatInfo format_info
[] = {
1986 #include "insn-format.def"
2004 /* Generally, we'll extract operands into this structures, operate upon
2005 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2006 of routines below for more details. */
2008 bool g_out
, g_out2
, g_in1
, g_in2
;
2009 TCGv_i64 out
, out2
, in1
, in2
;
2013 /* Return values from translate_one, indicating the state of the TB. */
2015 /* Continue the TB. */
2017 /* We have emitted one or more goto_tb. No fixup required. */
2019 /* We are not using a goto_tb (for whatever reason), but have updated
2020 the PC (for whatever reason), so there's no need to do it again on
2023 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2024 updated the PC for the next instruction to be executed. */
2026 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2027 No following code will be executed. */
2031 typedef enum DisasFacility
{
2032 FAC_Z
, /* zarch (default) */
2033 FAC_CASS
, /* compare and swap and store */
2034 FAC_CASS2
, /* compare and swap and store 2*/
2035 FAC_DFP
, /* decimal floating point */
2036 FAC_DFPR
, /* decimal floating point rounding */
2037 FAC_DO
, /* distinct operands */
2038 FAC_EE
, /* execute extensions */
2039 FAC_EI
, /* extended immediate */
2040 FAC_FPE
, /* floating point extension */
2041 FAC_FPSSH
, /* floating point support sign handling */
2042 FAC_FPRGR
, /* FPR-GR transfer */
2043 FAC_GIE
, /* general instructions extension */
2044 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
2045 FAC_HW
, /* high-word */
2046 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
2047 FAC_LOC
, /* load/store on condition */
2048 FAC_LD
, /* long displacement */
2049 FAC_PC
, /* population count */
2050 FAC_SCF
, /* store clock fast */
2051 FAC_SFLE
, /* store facility list extended */
2057 DisasFacility fac
:6;
2061 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
2062 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
2063 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
2064 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
2065 void (*help_cout
)(DisasContext
*, DisasOps
*);
2066 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
2071 /* ====================================================================== */
2072 /* Miscelaneous helpers, used by several operations. */
2074 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
2075 DisasOps
*o
, int mask
)
2077 int b2
= get_field(f
, b2
);
2078 int d2
= get_field(f
, d2
);
2081 o
->in2
= tcg_const_i64(d2
& mask
);
2083 o
->in2
= get_address(s
, 0, b2
, d2
);
2084 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2088 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
2090 if (dest
== s
->next_pc
) {
2093 if (use_goto_tb(s
, dest
)) {
2094 gen_update_cc_op(s
);
2096 tcg_gen_movi_i64(psw_addr
, dest
);
2097 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
2098 return EXIT_GOTO_TB
;
2100 tcg_gen_movi_i64(psw_addr
, dest
);
2101 return EXIT_PC_UPDATED
;
2105 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
2106 bool is_imm
, int imm
, TCGv_i64 cdest
)
2109 uint64_t dest
= s
->pc
+ 2 * imm
;
2112 /* Take care of the special cases first. */
2113 if (c
->cond
== TCG_COND_NEVER
) {
2118 if (dest
== s
->next_pc
) {
2119 /* Branch to next. */
2123 if (c
->cond
== TCG_COND_ALWAYS
) {
2124 ret
= help_goto_direct(s
, dest
);
2128 if (TCGV_IS_UNUSED_I64(cdest
)) {
2129 /* E.g. bcr %r0 -> no branch. */
2133 if (c
->cond
== TCG_COND_ALWAYS
) {
2134 tcg_gen_mov_i64(psw_addr
, cdest
);
2135 ret
= EXIT_PC_UPDATED
;
2140 if (use_goto_tb(s
, s
->next_pc
)) {
2141 if (is_imm
&& use_goto_tb(s
, dest
)) {
2142 /* Both exits can use goto_tb. */
2143 gen_update_cc_op(s
);
2145 lab
= gen_new_label();
2147 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2149 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2152 /* Branch not taken. */
2154 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2155 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2160 tcg_gen_movi_i64(psw_addr
, dest
);
2161 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
2165 /* Fallthru can use goto_tb, but taken branch cannot. */
2166 /* Store taken branch destination before the brcond. This
2167 avoids having to allocate a new local temp to hold it.
2168 We'll overwrite this in the not taken case anyway. */
2170 tcg_gen_mov_i64(psw_addr
, cdest
);
2173 lab
= gen_new_label();
2175 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2177 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2180 /* Branch not taken. */
2181 gen_update_cc_op(s
);
2183 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2184 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2188 tcg_gen_movi_i64(psw_addr
, dest
);
2190 ret
= EXIT_PC_UPDATED
;
2193 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2194 Most commonly we're single-stepping or some other condition that
2195 disables all use of goto_tb. Just update the PC and exit. */
2197 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
2199 cdest
= tcg_const_i64(dest
);
2203 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
2206 TCGv_i32 t0
= tcg_temp_new_i32();
2207 TCGv_i64 t1
= tcg_temp_new_i64();
2208 TCGv_i64 z
= tcg_const_i64(0);
2209 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
2210 tcg_gen_extu_i32_i64(t1
, t0
);
2211 tcg_temp_free_i32(t0
);
2212 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
2213 tcg_temp_free_i64(t1
);
2214 tcg_temp_free_i64(z
);
2218 tcg_temp_free_i64(cdest
);
2220 tcg_temp_free_i64(next
);
2222 ret
= EXIT_PC_UPDATED
;
2230 /* ====================================================================== */
2231 /* The operations. These perform the bulk of the work for any insn,
2232 usually after the operands have been loaded and output initialized. */
2234 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
2236 gen_helper_abs_i64(o
->out
, o
->in2
);
2240 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
2242 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2246 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2250 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2252 /* XXX possible optimization point */
2254 cc
= tcg_temp_new_i64();
2255 tcg_gen_extu_i32_i64(cc
, cc_op
);
2256 tcg_gen_shri_i64(cc
, cc
, 1);
2258 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2259 tcg_temp_free_i64(cc
);
2263 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2265 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2269 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2271 int shift
= s
->insn
->data
& 0xff;
2272 int size
= s
->insn
->data
>> 8;
2273 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2276 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2277 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2278 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2280 /* Produce the CC from only the bits manipulated. */
2281 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2282 set_cc_nz_u64(s
, cc_dst
);
2286 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2288 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2289 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2290 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2291 return EXIT_PC_UPDATED
;
2297 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2299 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2300 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2303 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2305 int m1
= get_field(s
->fields
, m1
);
2306 bool is_imm
= have_field(s
->fields
, i2
);
2307 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2310 disas_jcc(s
, &c
, m1
);
2311 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2314 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2316 int r1
= get_field(s
->fields
, r1
);
2317 bool is_imm
= have_field(s
->fields
, i2
);
2318 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2322 c
.cond
= TCG_COND_NE
;
2327 t
= tcg_temp_new_i64();
2328 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2329 store_reg32_i64(r1
, t
);
2330 c
.u
.s32
.a
= tcg_temp_new_i32();
2331 c
.u
.s32
.b
= tcg_const_i32(0);
2332 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2333 tcg_temp_free_i64(t
);
2335 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2338 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2340 int r1
= get_field(s
->fields
, r1
);
2341 bool is_imm
= have_field(s
->fields
, i2
);
2342 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2345 c
.cond
= TCG_COND_NE
;
2350 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2351 c
.u
.s64
.a
= regs
[r1
];
2352 c
.u
.s64
.b
= tcg_const_i64(0);
2354 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2357 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2359 int l
= get_field(s
->fields
, l1
);
2364 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2365 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2368 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2369 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2372 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2373 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2376 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2377 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2380 potential_page_fault(s
);
2381 vl
= tcg_const_i32(l
);
2382 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2383 tcg_temp_free_i32(vl
);
2387 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2391 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2393 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2394 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2395 potential_page_fault(s
);
2396 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2397 tcg_temp_free_i32(r1
);
2398 tcg_temp_free_i32(r3
);
2403 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2405 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2406 TCGv_i32 t1
= tcg_temp_new_i32();
2407 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2408 potential_page_fault(s
);
2409 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2411 tcg_temp_free_i32(t1
);
2412 tcg_temp_free_i32(m3
);
2416 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2418 int r3
= get_field(s
->fields
, r3
);
2419 potential_page_fault(s
);
2420 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2425 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2427 int r3
= get_field(s
->fields
, r3
);
2428 potential_page_fault(s
);
2429 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2434 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2436 int r3
= get_field(s
->fields
, r3
);
2437 TCGv_i64 in3
= tcg_temp_new_i64();
2438 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2439 potential_page_fault(s
);
2440 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2441 tcg_temp_free_i64(in3
);
2446 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2448 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2449 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2450 potential_page_fault(s
);
2451 /* XXX rewrite in tcg */
2452 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2457 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2459 TCGv_i64 t1
= tcg_temp_new_i64();
2460 TCGv_i32 t2
= tcg_temp_new_i32();
2461 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2462 gen_helper_cvd(t1
, t2
);
2463 tcg_temp_free_i32(t2
);
2464 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2465 tcg_temp_free_i64(t1
);
2469 #ifndef CONFIG_USER_ONLY
2470 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2474 check_privileged(s
);
2475 potential_page_fault(s
);
2477 /* We pretend the format is RX_a so that D2 is the field we want. */
2478 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2479 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2480 tcg_temp_free_i32(tmp
);
2485 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2487 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2488 return_low128(o
->out
);
2492 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2494 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2495 return_low128(o
->out
);
2499 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2501 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2502 return_low128(o
->out
);
2506 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2508 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2509 return_low128(o
->out
);
2513 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2515 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2519 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2521 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2522 tb->flags, (ab)use the tb->cs_base field as the address of
2523 the template in memory, and grab 8 bits of tb->flags/cflags for
2524 the contents of the register. We would then recognize all this
2525 in gen_intermediate_code_internal, generating code for exactly
2526 one instruction. This new TB then gets executed normally.
2528 On the other hand, this seems to be mostly used for modifying
2529 MVC inside of memcpy, which needs a helper call anyway. So
2530 perhaps this doesn't bear thinking about any further. */
2537 tmp
= tcg_const_i64(s
->next_pc
);
2538 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2539 tcg_temp_free_i64(tmp
);
2545 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2547 int m3
= get_field(s
->fields
, m3
);
2548 int pos
, len
, base
= s
->insn
->data
;
2549 TCGv_i64 tmp
= tcg_temp_new_i64();
2554 /* Effectively a 32-bit load. */
2555 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2562 /* Effectively a 16-bit load. */
2563 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2571 /* Effectively an 8-bit load. */
2572 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2577 pos
= base
+ ctz32(m3
) * 8;
2578 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2579 ccm
= ((1ull << len
) - 1) << pos
;
2583 /* This is going to be a sequence of loads and inserts. */
2584 pos
= base
+ 32 - 8;
2588 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2589 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2590 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2593 m3
= (m3
<< 1) & 0xf;
2599 tcg_gen_movi_i64(tmp
, ccm
);
2600 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2601 tcg_temp_free_i64(tmp
);
2605 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2607 int shift
= s
->insn
->data
& 0xff;
2608 int size
= s
->insn
->data
>> 8;
2609 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2613 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2615 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2619 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2621 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2625 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2627 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2631 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2633 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2637 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2639 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2643 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2645 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2649 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2651 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2655 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2657 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2661 #ifndef CONFIG_USER_ONLY
2662 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2664 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2665 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2666 check_privileged(s
);
2667 potential_page_fault(s
);
2668 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2669 tcg_temp_free_i32(r1
);
2670 tcg_temp_free_i32(r3
);
2674 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2676 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2677 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2678 check_privileged(s
);
2679 potential_page_fault(s
);
2680 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2681 tcg_temp_free_i32(r1
);
2682 tcg_temp_free_i32(r3
);
2685 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2687 check_privileged(s
);
2688 potential_page_fault(s
);
2689 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2694 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2698 check_privileged(s
);
2700 t1
= tcg_temp_new_i64();
2701 t2
= tcg_temp_new_i64();
2702 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2703 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2704 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2705 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2706 tcg_gen_shli_i64(t1
, t1
, 32);
2707 gen_helper_load_psw(cpu_env
, t1
, t2
);
2708 tcg_temp_free_i64(t1
);
2709 tcg_temp_free_i64(t2
);
2710 return EXIT_NORETURN
;
2714 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2716 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2717 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2718 potential_page_fault(s
);
2719 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2720 tcg_temp_free_i32(r1
);
2721 tcg_temp_free_i32(r3
);
2725 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2727 int r1
= get_field(s
->fields
, r1
);
2728 int r3
= get_field(s
->fields
, r3
);
2729 TCGv_i64 t
= tcg_temp_new_i64();
2730 TCGv_i64 t4
= tcg_const_i64(4);
2733 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2734 store_reg32_i64(r1
, t
);
2738 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2742 tcg_temp_free_i64(t
);
2743 tcg_temp_free_i64(t4
);
2747 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2749 int r1
= get_field(s
->fields
, r1
);
2750 int r3
= get_field(s
->fields
, r3
);
2751 TCGv_i64 t
= tcg_temp_new_i64();
2752 TCGv_i64 t4
= tcg_const_i64(4);
2755 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2756 store_reg32h_i64(r1
, t
);
2760 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2764 tcg_temp_free_i64(t
);
2765 tcg_temp_free_i64(t4
);
2769 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2771 int r1
= get_field(s
->fields
, r1
);
2772 int r3
= get_field(s
->fields
, r3
);
2773 TCGv_i64 t8
= tcg_const_i64(8);
2776 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2780 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2784 tcg_temp_free_i64(t8
);
2788 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2791 o
->g_out
= o
->g_in2
;
2792 TCGV_UNUSED_I64(o
->in2
);
2797 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2801 o
->g_out
= o
->g_in1
;
2802 o
->g_out2
= o
->g_in2
;
2803 TCGV_UNUSED_I64(o
->in1
);
2804 TCGV_UNUSED_I64(o
->in2
);
2805 o
->g_in1
= o
->g_in2
= false;
2809 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2811 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2812 potential_page_fault(s
);
2813 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2814 tcg_temp_free_i32(l
);
2818 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2820 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2821 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2822 potential_page_fault(s
);
2823 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2824 tcg_temp_free_i32(r1
);
2825 tcg_temp_free_i32(r2
);
2830 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2832 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2833 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2834 potential_page_fault(s
);
2835 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2836 tcg_temp_free_i32(r1
);
2837 tcg_temp_free_i32(r3
);
2842 #ifndef CONFIG_USER_ONLY
2843 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2845 int r1
= get_field(s
->fields
, l1
);
2846 check_privileged(s
);
2847 potential_page_fault(s
);
2848 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2853 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2855 int r1
= get_field(s
->fields
, l1
);
2856 check_privileged(s
);
2857 potential_page_fault(s
);
2858 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2864 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2866 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2870 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2872 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2873 return_low128(o
->out2
);
2877 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2879 gen_helper_nabs_i64(o
->out
, o
->in2
);
2883 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2885 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2886 potential_page_fault(s
);
2887 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2888 tcg_temp_free_i32(l
);
2893 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2895 tcg_gen_neg_i64(o
->out
, o
->in2
);
2899 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2901 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2902 potential_page_fault(s
);
2903 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2904 tcg_temp_free_i32(l
);
2909 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2911 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2915 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2917 int shift
= s
->insn
->data
& 0xff;
2918 int size
= s
->insn
->data
>> 8;
2919 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2922 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2923 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2925 /* Produce the CC from only the bits manipulated. */
2926 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2927 set_cc_nz_u64(s
, cc_dst
);
2931 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2933 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2937 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2939 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2943 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2945 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2949 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2951 TCGv_i32 t1
= tcg_temp_new_i32();
2952 TCGv_i32 t2
= tcg_temp_new_i32();
2953 TCGv_i32 to
= tcg_temp_new_i32();
2954 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2955 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2956 tcg_gen_rotl_i32(to
, t1
, t2
);
2957 tcg_gen_extu_i32_i64(o
->out
, to
);
2958 tcg_temp_free_i32(t1
);
2959 tcg_temp_free_i32(t2
);
2960 tcg_temp_free_i32(to
);
2964 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2966 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2970 #ifndef CONFIG_USER_ONLY
2971 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2973 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2974 check_privileged(s
);
2975 potential_page_fault(s
);
2976 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2977 tcg_temp_free_i32(r1
);
2982 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2984 uint64_t sign
= 1ull << s
->insn
->data
;
2985 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2986 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2987 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2988 /* The arithmetic left shift is curious in that it does not affect
2989 the sign bit. Copy that over from the source unchanged. */
2990 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2991 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2992 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2996 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2998 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3002 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3004 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3008 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3010 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3014 #ifndef CONFIG_USER_ONLY
3015 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3017 check_privileged(s
);
3018 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3022 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3024 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3025 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3026 check_privileged(s
);
3027 potential_page_fault(s
);
3028 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3029 tcg_temp_free_i32(r1
);
3030 tcg_temp_free_i32(r3
);
3034 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3036 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3037 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3038 check_privileged(s
);
3039 potential_page_fault(s
);
3040 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3041 tcg_temp_free_i32(r1
);
3042 tcg_temp_free_i32(r3
);
3046 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3048 uint64_t i2
= get_field(s
->fields
, i2
);
3051 check_privileged(s
);
3053 /* It is important to do what the instruction name says: STORE THEN.
3054 If we let the output hook perform the store then if we fault and
3055 restart, we'll have the wrong SYSTEM MASK in place. */
3056 t
= tcg_temp_new_i64();
3057 tcg_gen_shri_i64(t
, psw_mask
, 56);
3058 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3059 tcg_temp_free_i64(t
);
3061 if (s
->fields
->op
== 0xac) {
3062 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3063 (i2
<< 56) | 0x00ffffffffffffffull
);
3065 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3071 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3073 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3077 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3079 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3083 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3085 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3089 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3091 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3095 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3097 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3098 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3099 potential_page_fault(s
);
3100 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3101 tcg_temp_free_i32(r1
);
3102 tcg_temp_free_i32(r3
);
3106 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3108 int m3
= get_field(s
->fields
, m3
);
3109 int pos
, base
= s
->insn
->data
;
3110 TCGv_i64 tmp
= tcg_temp_new_i64();
3112 pos
= base
+ ctz32(m3
) * 8;
3115 /* Effectively a 32-bit store. */
3116 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3117 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3123 /* Effectively a 16-bit store. */
3124 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3125 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3132 /* Effectively an 8-bit store. */
3133 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3134 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3138 /* This is going to be a sequence of shifts and stores. */
3139 pos
= base
+ 32 - 8;
3142 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3143 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3144 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3146 m3
= (m3
<< 1) & 0xf;
3151 tcg_temp_free_i64(tmp
);
3155 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3157 int r1
= get_field(s
->fields
, r1
);
3158 int r3
= get_field(s
->fields
, r3
);
3159 int size
= s
->insn
->data
;
3160 TCGv_i64 tsize
= tcg_const_i64(size
);
3164 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3166 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3171 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3175 tcg_temp_free_i64(tsize
);
3179 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3181 int r1
= get_field(s
->fields
, r1
);
3182 int r3
= get_field(s
->fields
, r3
);
3183 TCGv_i64 t
= tcg_temp_new_i64();
3184 TCGv_i64 t4
= tcg_const_i64(4);
3185 TCGv_i64 t32
= tcg_const_i64(32);
3188 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3189 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3193 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3197 tcg_temp_free_i64(t
);
3198 tcg_temp_free_i64(t4
);
3199 tcg_temp_free_i64(t32
);
3203 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3205 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3209 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3214 tcg_gen_not_i64(o
->in2
, o
->in2
);
3215 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3217 /* XXX possible optimization point */
3219 cc
= tcg_temp_new_i64();
3220 tcg_gen_extu_i32_i64(cc
, cc_op
);
3221 tcg_gen_shri_i64(cc
, cc
, 1);
3222 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3223 tcg_temp_free_i64(cc
);
3227 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3234 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3235 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3236 tcg_temp_free_i32(t
);
3238 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3239 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3240 tcg_temp_free_i32(t
);
3242 gen_exception(EXCP_SVC
);
3243 return EXIT_NORETURN
;
3246 #ifndef CONFIG_USER_ONLY
3247 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3249 potential_page_fault(s
);
3250 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3256 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3258 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3259 potential_page_fault(s
);
3260 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3261 tcg_temp_free_i32(l
);
3266 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3268 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3269 potential_page_fault(s
);
3270 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3271 tcg_temp_free_i32(l
);
3275 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3277 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3278 potential_page_fault(s
);
3279 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3280 tcg_temp_free_i32(l
);
3285 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3287 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3291 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3293 int shift
= s
->insn
->data
& 0xff;
3294 int size
= s
->insn
->data
>> 8;
3295 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3298 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3299 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3301 /* Produce the CC from only the bits manipulated. */
3302 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3303 set_cc_nz_u64(s
, cc_dst
);
3307 /* ====================================================================== */
3308 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3309 the original inputs), update the various cc data structures in order to
3310 be able to compute the new condition code. */
3312 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3314 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3317 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3319 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3322 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3324 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3327 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3329 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3332 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3334 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3337 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3339 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3342 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3344 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3347 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3349 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3352 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3354 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3357 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3359 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3362 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3364 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3367 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3369 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3372 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3374 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3377 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3379 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3382 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3384 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3387 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3389 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3392 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3394 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3395 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3398 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3400 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3403 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3405 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3408 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3410 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3413 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3415 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3418 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3420 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3423 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3425 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3428 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3430 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3433 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3435 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3438 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3440 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3443 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3445 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3448 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3450 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3453 /* ====================================================================== */
3454 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3455 with the TCG register to which we will write. Used in combination with
3456 the "wout" generators, in some cases we need a new temporary, and in
3457 some cases we can write to a TCG global. */
3459 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3461 o
->out
= tcg_temp_new_i64();
3464 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3466 o
->out
= tcg_temp_new_i64();
3467 o
->out2
= tcg_temp_new_i64();
3470 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3472 o
->out
= regs
[get_field(f
, r1
)];
3476 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3478 /* ??? Specification exception: r1 must be even. */
3479 int r1
= get_field(f
, r1
);
3481 o
->out2
= regs
[(r1
+ 1) & 15];
3482 o
->g_out
= o
->g_out2
= true;
3485 /* ====================================================================== */
3486 /* The "Write OUTput" generators. These generally perform some non-trivial
3487 copy of data to TCG globals, or to main memory. The trivial cases are
3488 generally handled by having a "prep" generator install the TCG global
3489 as the destination of the operation. */
3491 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3493 store_reg(get_field(f
, r1
), o
->out
);
3496 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3498 int r1
= get_field(f
, r1
);
3499 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3502 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3504 int r1
= get_field(f
, r1
);
3505 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3508 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3510 store_reg32_i64(get_field(f
, r1
), o
->out
);
3513 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3515 /* ??? Specification exception: r1 must be even. */
3516 int r1
= get_field(f
, r1
);
3517 store_reg32_i64(r1
, o
->out
);
3518 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3521 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 /* ??? Specification exception: r1 must be even. */
3524 int r1
= get_field(f
, r1
);
3525 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3526 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3527 store_reg32_i64(r1
, o
->out
);
3530 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3532 store_freg32_i64(get_field(f
, r1
), o
->out
);
3535 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3537 store_freg(get_field(f
, r1
), o
->out
);
3540 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3542 int f1
= get_field(s
->fields
, r1
);
3543 store_freg(f1
, o
->out
);
3544 store_freg((f1
+ 2) & 15, o
->out2
);
3547 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3549 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3550 store_reg32_i64(get_field(f
, r1
), o
->out
);
3554 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3556 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3557 store_freg32_i64(get_field(f
, r1
), o
->out
);
3561 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3563 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3566 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3568 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3571 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3573 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3576 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3578 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3581 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3583 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3586 /* ====================================================================== */
3587 /* The "INput 1" generators. These load the first operand to an insn. */
3589 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3591 o
->in1
= load_reg(get_field(f
, r1
));
3594 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3596 o
->in1
= regs
[get_field(f
, r1
)];
3600 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3602 o
->in1
= tcg_temp_new_i64();
3603 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3606 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3608 o
->in1
= tcg_temp_new_i64();
3609 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3612 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3614 o
->in1
= tcg_temp_new_i64();
3615 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3618 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3620 /* ??? Specification exception: r1 must be even. */
3621 int r1
= get_field(f
, r1
);
3622 o
->in1
= load_reg((r1
+ 1) & 15);
3625 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3627 /* ??? Specification exception: r1 must be even. */
3628 int r1
= get_field(f
, r1
);
3629 o
->in1
= tcg_temp_new_i64();
3630 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3633 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3635 /* ??? Specification exception: r1 must be even. */
3636 int r1
= get_field(f
, r1
);
3637 o
->in1
= tcg_temp_new_i64();
3638 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3641 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3643 /* ??? Specification exception: r1 must be even. */
3644 int r1
= get_field(f
, r1
);
3645 o
->in1
= tcg_temp_new_i64();
3646 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3649 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3651 o
->in1
= load_reg(get_field(f
, r2
));
3654 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3656 o
->in1
= load_reg(get_field(f
, r3
));
3659 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3661 o
->in1
= regs
[get_field(f
, r3
)];
3665 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3667 o
->in1
= tcg_temp_new_i64();
3668 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3671 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3673 o
->in1
= tcg_temp_new_i64();
3674 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3677 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3679 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3682 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3684 o
->in1
= fregs
[get_field(f
, r1
)];
3688 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3690 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3693 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3695 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3696 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3699 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3702 o
->in1
= tcg_temp_new_i64();
3703 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3706 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3709 o
->in1
= tcg_temp_new_i64();
3710 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3713 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3716 o
->in1
= tcg_temp_new_i64();
3717 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3720 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3723 o
->in1
= tcg_temp_new_i64();
3724 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3727 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3730 o
->in1
= tcg_temp_new_i64();
3731 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3734 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3737 o
->in1
= tcg_temp_new_i64();
3738 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3741 /* ====================================================================== */
3742 /* The "INput 2" generators. These load the second operand to an insn. */
3744 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3746 o
->in2
= regs
[get_field(f
, r1
)];
3750 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3752 o
->in2
= tcg_temp_new_i64();
3753 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3756 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3758 o
->in2
= tcg_temp_new_i64();
3759 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3762 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3764 o
->in2
= load_reg(get_field(f
, r2
));
3767 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3769 o
->in2
= regs
[get_field(f
, r2
)];
3773 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3775 int r2
= get_field(f
, r2
);
3777 o
->in2
= load_reg(r2
);
3781 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3783 o
->in2
= tcg_temp_new_i64();
3784 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3787 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3789 o
->in2
= tcg_temp_new_i64();
3790 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3793 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3795 o
->in2
= tcg_temp_new_i64();
3796 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3799 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3801 o
->in2
= tcg_temp_new_i64();
3802 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3805 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3807 o
->in2
= load_reg(get_field(f
, r3
));
3810 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3812 o
->in2
= tcg_temp_new_i64();
3813 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3816 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3818 o
->in2
= tcg_temp_new_i64();
3819 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3822 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3824 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3827 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3829 o
->in2
= fregs
[get_field(f
, r2
)];
3833 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3835 int f2
= get_field(f
, r2
);
3837 o
->in2
= fregs
[(f2
+ 2) & 15];
3838 o
->g_in1
= o
->g_in2
= true;
3841 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3843 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3844 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3847 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3849 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3852 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 help_l2_shift(s
, f
, o
, 31);
3857 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3859 help_l2_shift(s
, f
, o
, 63);
3862 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3865 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3868 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3871 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3874 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3877 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3880 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3883 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3886 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3889 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3892 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3895 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3898 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3901 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3904 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3907 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3910 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3913 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3916 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3919 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3922 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3924 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3927 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3929 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3932 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3934 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3937 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3939 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3942 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3944 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3945 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3948 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3950 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3951 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3954 /* ====================================================================== */
3956 /* Find opc within the table of insns. This is formulated as a switch
3957 statement so that (1) we get compile-time notice of cut-paste errors
3958 for duplicated opcodes, and (2) the compiler generates the binary
3959 search tree, rather than us having to post-process the table. */
3961 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3962 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3964 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3966 enum DisasInsnEnum
{
3967 #include "insn-data.def"
3971 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3976 .help_in1 = in1_##I1, \
3977 .help_in2 = in2_##I2, \
3978 .help_prep = prep_##P, \
3979 .help_wout = wout_##W, \
3980 .help_cout = cout_##CC, \
3981 .help_op = op_##OP, \
3985 /* Allow 0 to be used for NULL in the table below. */
3993 static const DisasInsn insn_info
[] = {
3994 #include "insn-data.def"
3998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3999 case OPC: return &insn_info[insn_ ## NM];
4001 static const DisasInsn
*lookup_opc(uint16_t opc
)
4004 #include "insn-data.def"
4013 /* Extract a field from the insn. The INSN should be left-aligned in
4014 the uint64_t so that we can more easily utilize the big-bit-endian
4015 definitions we extract from the Principals of Operation. */
4017 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4025 /* Zero extract the field from the insn. */
4026 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4028 /* Sign-extend, or un-swap the field as necessary. */
4030 case 0: /* unsigned */
4032 case 1: /* signed */
4033 assert(f
->size
<= 32);
4034 m
= 1u << (f
->size
- 1);
4037 case 2: /* dl+dh split, signed 20 bit. */
4038 r
= ((int8_t)r
<< 12) | (r
>> 8);
4044 /* Validate that the "compressed" encoding we selected above is valid.
4045 I.e. we havn't make two different original fields overlap. */
4046 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4047 o
->presentC
|= 1 << f
->indexC
;
4048 o
->presentO
|= 1 << f
->indexO
;
4050 o
->c
[f
->indexC
] = r
;
4053 /* Lookup the insn at the current PC, extracting the operands into O and
4054 returning the info struct for the insn. Returns NULL for invalid insn. */
4056 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4059 uint64_t insn
, pc
= s
->pc
;
4061 const DisasInsn
*info
;
4063 insn
= ld_code2(env
, pc
);
4064 op
= (insn
>> 8) & 0xff;
4065 ilen
= get_ilen(op
);
4066 s
->next_pc
= s
->pc
+ ilen
;
4073 insn
= ld_code4(env
, pc
) << 32;
4076 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4082 /* We can't actually determine the insn format until we've looked up
4083 the full insn opcode. Which we can't do without locating the
4084 secondary opcode. Assume by default that OP2 is at bit 40; for
4085 those smaller insns that don't actually have a secondary opcode
4086 this will correctly result in OP2 = 0. */
4092 case 0xb2: /* S, RRF, RRE */
4093 case 0xb3: /* RRE, RRD, RRF */
4094 case 0xb9: /* RRE, RRF */
4095 case 0xe5: /* SSE, SIL */
4096 op2
= (insn
<< 8) >> 56;
4100 case 0xc0: /* RIL */
4101 case 0xc2: /* RIL */
4102 case 0xc4: /* RIL */
4103 case 0xc6: /* RIL */
4104 case 0xc8: /* SSF */
4105 case 0xcc: /* RIL */
4106 op2
= (insn
<< 12) >> 60;
4108 case 0xd0 ... 0xdf: /* SS */
4114 case 0xee ... 0xf3: /* SS */
4115 case 0xf8 ... 0xfd: /* SS */
4119 op2
= (insn
<< 40) >> 56;
4123 memset(f
, 0, sizeof(*f
));
4127 /* Lookup the instruction. */
4128 info
= lookup_opc(op
<< 8 | op2
);
4130 /* If we found it, extract the operands. */
4132 DisasFormat fmt
= info
->fmt
;
4135 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4136 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4142 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4144 const DisasInsn
*insn
;
4145 ExitStatus ret
= NO_EXIT
;
4149 insn
= extract_insn(env
, s
, &f
);
4151 /* If not found, try the old interpreter. This includes ILLOPC. */
4153 disas_s390_insn(env
, s
);
4154 switch (s
->is_jmp
) {
4162 ret
= EXIT_PC_UPDATED
;
4165 ret
= EXIT_NORETURN
;
4175 /* Set up the strutures we use to communicate with the helpers. */
4178 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4179 TCGV_UNUSED_I64(o
.out
);
4180 TCGV_UNUSED_I64(o
.out2
);
4181 TCGV_UNUSED_I64(o
.in1
);
4182 TCGV_UNUSED_I64(o
.in2
);
4183 TCGV_UNUSED_I64(o
.addr1
);
4185 /* Implement the instruction. */
4186 if (insn
->help_in1
) {
4187 insn
->help_in1(s
, &f
, &o
);
4189 if (insn
->help_in2
) {
4190 insn
->help_in2(s
, &f
, &o
);
4192 if (insn
->help_prep
) {
4193 insn
->help_prep(s
, &f
, &o
);
4195 if (insn
->help_op
) {
4196 ret
= insn
->help_op(s
, &o
);
4198 if (insn
->help_wout
) {
4199 insn
->help_wout(s
, &f
, &o
);
4201 if (insn
->help_cout
) {
4202 insn
->help_cout(s
, &o
);
4205 /* Free any temporaries created by the helpers. */
4206 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4207 tcg_temp_free_i64(o
.out
);
4209 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4210 tcg_temp_free_i64(o
.out2
);
4212 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4213 tcg_temp_free_i64(o
.in1
);
4215 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4216 tcg_temp_free_i64(o
.in2
);
4218 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4219 tcg_temp_free_i64(o
.addr1
);
4222 /* Advance to the next instruction. */
4227 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4228 TranslationBlock
*tb
,
4232 target_ulong pc_start
;
4233 uint64_t next_page_start
;
4234 uint16_t *gen_opc_end
;
4236 int num_insns
, max_insns
;
4244 if (!(tb
->flags
& FLAG_MASK_64
)) {
4245 pc_start
&= 0x7fffffff;
4250 dc
.cc_op
= CC_OP_DYNAMIC
;
4251 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4252 dc
.is_jmp
= DISAS_NEXT
;
4254 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4256 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4259 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4260 if (max_insns
== 0) {
4261 max_insns
= CF_COUNT_MASK
;
4268 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4272 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4275 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4276 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4277 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4278 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4280 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4285 tcg_gen_debug_insn_start(dc
.pc
);
4289 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4290 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4291 if (bp
->pc
== dc
.pc
) {
4292 status
= EXIT_PC_STALE
;
4298 if (status
== NO_EXIT
) {
4299 status
= translate_one(env
, &dc
);
4302 /* If we reach a page boundary, are single stepping,
4303 or exhaust instruction count, stop generation. */
4304 if (status
== NO_EXIT
4305 && (dc
.pc
>= next_page_start
4306 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4307 || num_insns
>= max_insns
4309 || env
->singlestep_enabled
)) {
4310 status
= EXIT_PC_STALE
;
4312 } while (status
== NO_EXIT
);
4314 if (tb
->cflags
& CF_LAST_IO
) {
4323 update_psw_addr(&dc
);
4325 case EXIT_PC_UPDATED
:
4326 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4327 gen_op_calc_cc(&dc
);
4329 /* Next TB starts off with CC_OP_DYNAMIC,
4330 so make sure the cc op type is in env */
4331 gen_op_set_cc_op(&dc
);
4334 gen_exception(EXCP_DEBUG
);
4336 /* Generate the return instruction */
4344 gen_icount_end(tb
, num_insns
);
4345 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4347 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4350 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4353 tb
->size
= dc
.pc
- pc_start
;
4354 tb
->icount
= num_insns
;
4357 #if defined(S390X_DEBUG_DISAS)
4358 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4359 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4360 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4366 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4368 gen_intermediate_code_internal(env
, tb
, 0);
4371 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4373 gen_intermediate_code_internal(env
, tb
, 1);
4376 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4379 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4380 cc_op
= gen_opc_cc_op
[pc_pos
];
4381 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {