4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
36 /* global register indexes */
37 static TCGv_ptr cpu_env
;
39 #include "exec/gen-icount.h"
45 /* Information that (most) every instruction needs to manipulate. */
46 typedef struct DisasContext DisasContext
;
47 typedef struct DisasInsn DisasInsn
;
48 typedef struct DisasFields DisasFields
;
51 struct TranslationBlock
*tb
;
52 const DisasInsn
*insn
;
56 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 static void gen_op_calc_cc(DisasContext
*s
);
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static inline void debug_insn(uint64_t insn
)
83 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
86 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
88 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
89 if (s
->tb
->flags
& FLAG_MASK_32
) {
90 return pc
| 0x80000000;
96 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
121 cpu_fprintf(f
, "\n");
127 #ifndef CONFIG_USER_ONLY
128 for (i
= 0; i
< 16; i
++) {
129 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
131 cpu_fprintf(f
, "\n");
138 #ifdef DEBUG_INLINE_BRANCHES
139 for (i
= 0; i
< CC_OP_MAX
; i
++) {
140 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
141 inline_branch_miss
[i
], inline_branch_hit
[i
]);
145 cpu_fprintf(f
, "\n");
148 static TCGv_i64 psw_addr
;
149 static TCGv_i64 psw_mask
;
151 static TCGv_i32 cc_op
;
152 static TCGv_i64 cc_src
;
153 static TCGv_i64 cc_dst
;
154 static TCGv_i64 cc_vr
;
156 static char cpu_reg_names
[32][4];
157 static TCGv_i64 regs
[16];
158 static TCGv_i64 fregs
[16];
160 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
162 void s390x_translate_init(void)
166 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
167 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
168 offsetof(CPUS390XState
, psw
.addr
),
170 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.mask
),
174 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
176 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
178 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
180 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
185 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, regs
[i
]),
190 for (i
= 0; i
< 16; i
++) {
191 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
192 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
193 offsetof(CPUS390XState
, fregs
[i
].d
),
194 cpu_reg_names
[i
+ 16]);
197 /* register helpers */
202 static inline TCGv_i64
load_reg(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_mov_i64(r
, regs
[reg
]);
209 static inline TCGv_i64
load_freg(int reg
)
211 TCGv_i64 r
= tcg_temp_new_i64();
212 tcg_gen_mov_i64(r
, fregs
[reg
]);
216 static inline TCGv_i32
load_freg32(int reg
)
218 TCGv_i32 r
= tcg_temp_new_i32();
219 #if HOST_LONG_BITS == 32
220 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
222 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
227 static inline TCGv_i32
load_reg32(int reg
)
229 TCGv_i32 r
= tcg_temp_new_i32();
230 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
234 static inline TCGv_i64
load_reg32_i64(int reg
)
236 TCGv_i64 r
= tcg_temp_new_i64();
237 tcg_gen_ext32s_i64(r
, regs
[reg
]);
241 static inline void store_reg(int reg
, TCGv_i64 v
)
243 tcg_gen_mov_i64(regs
[reg
], v
);
246 static inline void store_freg(int reg
, TCGv_i64 v
)
248 tcg_gen_mov_i64(fregs
[reg
], v
);
251 static inline void store_reg32(int reg
, TCGv_i32 v
)
253 /* 32 bit register writes keep the upper half */
254 #if HOST_LONG_BITS == 32
255 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
257 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
258 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
262 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
264 /* 32 bit register writes keep the upper half */
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
268 static inline void store_reg16(int reg
, TCGv_i32 v
)
270 /* 16 bit register writes keep the upper bytes */
271 #if HOST_LONG_BITS == 32
272 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
274 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
275 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
279 static inline void store_reg8(int reg
, TCGv_i64 v
)
281 /* 8 bit register writes keep the upper bytes */
282 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 8);
285 static inline void store_freg32(int reg
, TCGv_i32 v
)
287 /* 32 bit register writes keep the lower half */
288 #if HOST_LONG_BITS == 32
289 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
291 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
292 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
296 static inline void return_low128(TCGv_i64 dest
)
298 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
301 static inline void update_psw_addr(DisasContext
*s
)
304 tcg_gen_movi_i64(psw_addr
, s
->pc
);
307 static inline void potential_page_fault(DisasContext
*s
)
309 #ifndef CONFIG_USER_ONLY
315 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
317 return (uint64_t)cpu_lduw_code(env
, pc
);
320 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
322 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
325 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
327 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
330 static inline int get_mem_index(DisasContext
*s
)
332 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
333 case PSW_ASC_PRIMARY
>> 32:
335 case PSW_ASC_SECONDARY
>> 32:
337 case PSW_ASC_HOME
>> 32:
345 static void gen_exception(int excp
)
347 TCGv_i32 tmp
= tcg_const_i32(excp
);
348 gen_helper_exception(cpu_env
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 static void gen_program_exception(DisasContext
*s
, int code
)
356 /* Remember what pgm exeption this was. */
357 tmp
= tcg_const_i32(code
);
358 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
359 tcg_temp_free_i32(tmp
);
361 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
362 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
363 tcg_temp_free_i32(tmp
);
365 /* Advance past instruction. */
372 /* Trigger exception. */
373 gen_exception(EXCP_PGM
);
376 s
->is_jmp
= DISAS_EXCP
;
379 static inline void gen_illegal_opcode(DisasContext
*s
)
381 gen_program_exception(s
, PGM_SPECIFICATION
);
384 static inline void check_privileged(DisasContext
*s
)
386 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
387 gen_program_exception(s
, PGM_PRIVILEGED
);
391 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
395 /* 31-bitify the immediate part; register contents are dealt with below */
396 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
402 tmp
= tcg_const_i64(d2
);
403 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
408 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
412 tmp
= tcg_const_i64(d2
);
413 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
418 tmp
= tcg_const_i64(d2
);
421 /* 31-bit mode mask if there are values loaded from registers */
422 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
423 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
429 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
431 s
->cc_op
= CC_OP_CONST0
+ val
;
434 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
436 tcg_gen_discard_i64(cc_src
);
437 tcg_gen_mov_i64(cc_dst
, dst
);
438 tcg_gen_discard_i64(cc_vr
);
442 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
444 tcg_gen_discard_i64(cc_src
);
445 tcg_gen_extu_i32_i64(cc_dst
, dst
);
446 tcg_gen_discard_i64(cc_vr
);
450 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
453 tcg_gen_mov_i64(cc_src
, src
);
454 tcg_gen_mov_i64(cc_dst
, dst
);
455 tcg_gen_discard_i64(cc_vr
);
459 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
462 tcg_gen_extu_i32_i64(cc_src
, src
);
463 tcg_gen_extu_i32_i64(cc_dst
, dst
);
464 tcg_gen_discard_i64(cc_vr
);
468 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
469 TCGv_i64 dst
, TCGv_i64 vr
)
471 tcg_gen_mov_i64(cc_src
, src
);
472 tcg_gen_mov_i64(cc_dst
, dst
);
473 tcg_gen_mov_i64(cc_vr
, vr
);
477 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
479 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
482 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
487 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
490 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
493 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
496 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
499 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
501 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
504 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
506 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
509 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
511 /* XXX optimize for the constant? put it in s? */
512 TCGv_i32 tmp
= tcg_const_i32(v2
);
513 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
514 tcg_temp_free_i32(tmp
);
517 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
519 TCGv_i32 tmp
= tcg_const_i32(v2
);
520 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
521 tcg_temp_free_i32(tmp
);
524 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
526 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
529 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
531 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
534 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
536 TCGv_i64 tmp
= tcg_const_i64(v2
);
538 tcg_temp_free_i64(tmp
);
541 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
543 TCGv_i64 tmp
= tcg_const_i64(v2
);
545 tcg_temp_free_i64(tmp
);
548 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
550 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
553 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
555 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
558 static void set_cc_icm(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
560 gen_op_update2_cc_i32(s
, CC_OP_ICM
, v1
, v2
);
563 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
565 tcg_gen_extu_i32_i64(cc_src
, v1
);
566 tcg_gen_mov_i64(cc_dst
, v2
);
567 tcg_gen_discard_i64(cc_vr
);
568 s
->cc_op
= CC_OP_LTGT_F32
;
571 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
573 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
576 /* CC value is in env->cc_op */
577 static inline void set_cc_static(DisasContext
*s
)
579 tcg_gen_discard_i64(cc_src
);
580 tcg_gen_discard_i64(cc_dst
);
581 tcg_gen_discard_i64(cc_vr
);
582 s
->cc_op
= CC_OP_STATIC
;
585 static inline void gen_op_set_cc_op(DisasContext
*s
)
587 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
588 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
592 static inline void gen_update_cc_op(DisasContext
*s
)
597 /* calculates cc into cc_op */
598 static void gen_op_calc_cc(DisasContext
*s
)
600 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
601 TCGv_i64 dummy
= tcg_const_i64(0);
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
612 /* env->cc_op already is the cc value */
626 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
631 case CC_OP_LTUGTU_32
:
632 case CC_OP_LTUGTU_64
:
639 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
654 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
657 /* unknown operation - assume 3 arguments and cc_op in env */
658 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
664 tcg_temp_free_i32(local_cc_op
);
665 tcg_temp_free_i64(dummy
);
667 /* We now have cc in cc_op as constant */
671 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
675 *r1
= (insn
>> 4) & 0xf;
679 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
680 int *x2
, int *b2
, int *d2
)
684 *r1
= (insn
>> 20) & 0xf;
685 *x2
= (insn
>> 16) & 0xf;
686 *b2
= (insn
>> 12) & 0xf;
689 return get_address(s
, *x2
, *b2
, *d2
);
692 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
697 *r1
= (insn
>> 20) & 0xf;
699 *r3
= (insn
>> 16) & 0xf;
700 *b2
= (insn
>> 12) & 0xf;
704 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
709 *i2
= (insn
>> 16) & 0xff;
710 *b1
= (insn
>> 12) & 0xf;
713 return get_address(s
, 0, *b1
, *d1
);
716 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
718 /* NOTE: we handle the case where the TB spans two pages here */
719 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
720 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
721 && !s
->singlestep_enabled
722 && !(s
->tb
->cflags
& CF_LAST_IO
));
725 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
729 if (use_goto_tb(s
, pc
)) {
730 tcg_gen_goto_tb(tb_num
);
731 tcg_gen_movi_i64(psw_addr
, pc
);
732 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
734 /* jump to another page: currently not optimized */
735 tcg_gen_movi_i64(psw_addr
, pc
);
740 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
742 #ifdef DEBUG_INLINE_BRANCHES
743 inline_branch_miss
[cc_op
]++;
747 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
749 #ifdef DEBUG_INLINE_BRANCHES
750 inline_branch_hit
[cc_op
]++;
754 /* Table of mask values to comparison codes, given a comparison as input.
755 For a true comparison CC=3 will never be set, but we treat this
756 conservatively for possible use when CC=3 indicates overflow. */
757 static const TCGCond ltgt_cond
[16] = {
758 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
759 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
760 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
761 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
762 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
763 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
764 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
765 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
768 /* Table of mask values to comparison codes, given a logic op as input.
769 For such, only CC=0 and CC=1 should be possible. */
770 static const TCGCond nz_cond
[16] = {
772 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
774 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
776 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
777 /* EQ | NE | x | x */
778 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
781 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
782 details required to generate a TCG comparison. */
783 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
786 enum cc_op old_cc_op
= s
->cc_op
;
788 if (mask
== 15 || mask
== 0) {
789 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
792 c
->g1
= c
->g2
= true;
797 /* Find the TCG condition for the mask + cc op. */
803 cond
= ltgt_cond
[mask
];
804 if (cond
== TCG_COND_NEVER
) {
807 account_inline_branch(s
, old_cc_op
);
810 case CC_OP_LTUGTU_32
:
811 case CC_OP_LTUGTU_64
:
812 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
813 if (cond
== TCG_COND_NEVER
) {
816 account_inline_branch(s
, old_cc_op
);
820 cond
= nz_cond
[mask
];
821 if (cond
== TCG_COND_NEVER
) {
824 account_inline_branch(s
, old_cc_op
);
839 account_inline_branch(s
, old_cc_op
);
854 account_inline_branch(s
, old_cc_op
);
859 /* Calculate cc value. */
864 /* Jump based on CC. We'll load up the real cond below;
865 the assignment here merely avoids a compiler warning. */
866 account_noninline_branch(s
, old_cc_op
);
867 old_cc_op
= CC_OP_STATIC
;
868 cond
= TCG_COND_NEVER
;
872 /* Load up the arguments of the comparison. */
874 c
->g1
= c
->g2
= false;
878 c
->u
.s32
.a
= tcg_temp_new_i32();
879 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
880 c
->u
.s32
.b
= tcg_const_i32(0);
883 case CC_OP_LTUGTU_32
:
885 c
->u
.s32
.a
= tcg_temp_new_i32();
886 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
887 c
->u
.s32
.b
= tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
895 c
->u
.s64
.b
= tcg_const_i64(0);
899 case CC_OP_LTUGTU_64
:
902 c
->g1
= c
->g2
= true;
907 c
->u
.s64
.a
= tcg_temp_new_i64();
908 c
->u
.s64
.b
= tcg_const_i64(0);
909 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
917 case 0x8 | 0x4 | 0x2: /* cc != 3 */
919 c
->u
.s32
.b
= tcg_const_i32(3);
921 case 0x8 | 0x4 | 0x1: /* cc != 2 */
923 c
->u
.s32
.b
= tcg_const_i32(2);
925 case 0x8 | 0x2 | 0x1: /* cc != 1 */
927 c
->u
.s32
.b
= tcg_const_i32(1);
929 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
932 c
->u
.s32
.a
= tcg_temp_new_i32();
933 c
->u
.s32
.b
= tcg_const_i32(0);
934 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
936 case 0x8 | 0x4: /* cc < 2 */
938 c
->u
.s32
.b
= tcg_const_i32(2);
940 case 0x8: /* cc == 0 */
942 c
->u
.s32
.b
= tcg_const_i32(0);
944 case 0x4 | 0x2 | 0x1: /* cc != 0 */
946 c
->u
.s32
.b
= tcg_const_i32(0);
948 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
951 c
->u
.s32
.a
= tcg_temp_new_i32();
952 c
->u
.s32
.b
= tcg_const_i32(0);
953 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
955 case 0x4: /* cc == 1 */
957 c
->u
.s32
.b
= tcg_const_i32(1);
959 case 0x2 | 0x1: /* cc > 1 */
961 c
->u
.s32
.b
= tcg_const_i32(1);
963 case 0x2: /* cc == 2 */
965 c
->u
.s32
.b
= tcg_const_i32(2);
967 case 0x1: /* cc == 3 */
969 c
->u
.s32
.b
= tcg_const_i32(3);
972 /* CC is masked by something else: (8 >> cc) & mask. */
975 c
->u
.s32
.a
= tcg_const_i32(8);
976 c
->u
.s32
.b
= tcg_const_i32(0);
977 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
978 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
989 static void free_compare(DisasCompare
*c
)
993 tcg_temp_free_i64(c
->u
.s64
.a
);
995 tcg_temp_free_i32(c
->u
.s32
.a
);
1000 tcg_temp_free_i64(c
->u
.s64
.b
);
1002 tcg_temp_free_i32(c
->u
.s32
.b
);
1007 static void gen_op_mvc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1011 int l_memset
= gen_new_label();
1012 int l_out
= gen_new_label();
1013 TCGv_i64 dest
= tcg_temp_local_new_i64();
1014 TCGv_i64 src
= tcg_temp_local_new_i64();
1017 /* Find out if we should use the inline version of mvc */
1032 /* Fall back to helper */
1033 vl
= tcg_const_i32(l
);
1034 potential_page_fault(s
);
1035 gen_helper_mvc(cpu_env
, vl
, s1
, s2
);
1036 tcg_temp_free_i32(vl
);
1040 tcg_gen_mov_i64(dest
, s1
);
1041 tcg_gen_mov_i64(src
, s2
);
1043 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
1044 /* XXX what if we overflow while moving? */
1045 tcg_gen_andi_i64(dest
, dest
, 0x7fffffffUL
);
1046 tcg_gen_andi_i64(src
, src
, 0x7fffffffUL
);
1049 tmp
= tcg_temp_new_i64();
1050 tcg_gen_addi_i64(tmp
, src
, 1);
1051 tcg_gen_brcond_i64(TCG_COND_EQ
, dest
, tmp
, l_memset
);
1052 tcg_temp_free_i64(tmp
);
1056 tmp
= tcg_temp_new_i64();
1058 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1059 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1061 tcg_temp_free_i64(tmp
);
1064 tmp
= tcg_temp_new_i64();
1066 tcg_gen_qemu_ld16u(tmp
, src
, get_mem_index(s
));
1067 tcg_gen_qemu_st16(tmp
, dest
, get_mem_index(s
));
1069 tcg_temp_free_i64(tmp
);
1072 tmp
= tcg_temp_new_i64();
1074 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1075 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1077 tcg_temp_free_i64(tmp
);
1080 tmp
= tcg_temp_new_i64();
1081 tmp2
= tcg_temp_new_i64();
1083 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1084 tcg_gen_addi_i64(src
, src
, 4);
1085 tcg_gen_qemu_ld8u(tmp2
, src
, get_mem_index(s
));
1086 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1087 tcg_gen_addi_i64(dest
, dest
, 4);
1088 tcg_gen_qemu_st8(tmp2
, dest
, get_mem_index(s
));
1090 tcg_temp_free_i64(tmp
);
1091 tcg_temp_free_i64(tmp2
);
1094 tmp
= tcg_temp_new_i64();
1096 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1097 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1099 tcg_temp_free_i64(tmp
);
1102 /* The inline version can become too big for too uneven numbers, only
1103 use it on known good lengths */
1104 tmp
= tcg_temp_new_i64();
1105 tmp2
= tcg_const_i64(8);
1106 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1107 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1108 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1110 tcg_gen_add_i64(src
, src
, tmp2
);
1111 tcg_gen_add_i64(dest
, dest
, tmp2
);
1114 tcg_temp_free_i64(tmp2
);
1115 tmp2
= tcg_const_i64(1);
1117 for (; i
<= l
; i
++) {
1118 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1119 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1121 tcg_gen_add_i64(src
, src
, tmp2
);
1122 tcg_gen_add_i64(dest
, dest
, tmp2
);
1125 tcg_temp_free_i64(tmp2
);
1126 tcg_temp_free_i64(tmp
);
1132 gen_set_label(l_memset
);
1133 /* memset case (dest == (src + 1)) */
1135 tmp
= tcg_temp_new_i64();
1136 tmp2
= tcg_temp_new_i64();
1137 /* fill tmp with the byte */
1138 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1139 tcg_gen_shli_i64(tmp2
, tmp
, 8);
1140 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1141 tcg_gen_shli_i64(tmp2
, tmp
, 16);
1142 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1143 tcg_gen_shli_i64(tmp2
, tmp
, 32);
1144 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1145 tcg_temp_free_i64(tmp2
);
1147 tmp2
= tcg_const_i64(8);
1149 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1150 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1151 tcg_gen_addi_i64(dest
, dest
, 8);
1154 tcg_temp_free_i64(tmp2
);
1155 tmp2
= tcg_const_i64(1);
1157 for (; i
<= l
; i
++) {
1158 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1159 tcg_gen_addi_i64(dest
, dest
, 1);
1162 tcg_temp_free_i64(tmp2
);
1163 tcg_temp_free_i64(tmp
);
1165 gen_set_label(l_out
);
1167 tcg_temp_free(dest
);
1171 static void gen_op_clc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1177 /* check for simple 32bit or 64bit match */
1180 tmp
= tcg_temp_new_i64();
1181 tmp2
= tcg_temp_new_i64();
1183 tcg_gen_qemu_ld8u(tmp
, s1
, get_mem_index(s
));
1184 tcg_gen_qemu_ld8u(tmp2
, s2
, get_mem_index(s
));
1185 cmp_u64(s
, tmp
, tmp2
);
1187 tcg_temp_free_i64(tmp
);
1188 tcg_temp_free_i64(tmp2
);
1191 tmp
= tcg_temp_new_i64();
1192 tmp2
= tcg_temp_new_i64();
1194 tcg_gen_qemu_ld16u(tmp
, s1
, get_mem_index(s
));
1195 tcg_gen_qemu_ld16u(tmp2
, s2
, get_mem_index(s
));
1196 cmp_u64(s
, tmp
, tmp2
);
1198 tcg_temp_free_i64(tmp
);
1199 tcg_temp_free_i64(tmp2
);
1202 tmp
= tcg_temp_new_i64();
1203 tmp2
= tcg_temp_new_i64();
1205 tcg_gen_qemu_ld32u(tmp
, s1
, get_mem_index(s
));
1206 tcg_gen_qemu_ld32u(tmp2
, s2
, get_mem_index(s
));
1207 cmp_u64(s
, tmp
, tmp2
);
1209 tcg_temp_free_i64(tmp
);
1210 tcg_temp_free_i64(tmp2
);
1213 tmp
= tcg_temp_new_i64();
1214 tmp2
= tcg_temp_new_i64();
1216 tcg_gen_qemu_ld64(tmp
, s1
, get_mem_index(s
));
1217 tcg_gen_qemu_ld64(tmp2
, s2
, get_mem_index(s
));
1218 cmp_u64(s
, tmp
, tmp2
);
1220 tcg_temp_free_i64(tmp
);
1221 tcg_temp_free_i64(tmp2
);
1225 potential_page_fault(s
);
1226 vl
= tcg_const_i32(l
);
1227 gen_helper_clc(cc_op
, cpu_env
, vl
, s1
, s2
);
1228 tcg_temp_free_i32(vl
);
1232 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1233 int x2
, int b2
, int d2
)
1235 TCGv_i64 addr
, tmp2
, tmp3
;
1238 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1239 op
, r1
, x2
, b2
, d2
);
1240 addr
= get_address(s
, x2
, b2
, d2
);
1242 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1243 tmp2
= tcg_temp_new_i64();
1244 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1245 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1246 store_reg(r1
, tmp2
);
1247 tcg_temp_free_i64(tmp2
);
1249 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1250 tmp2
= tcg_temp_new_i64();
1251 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1252 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1253 store_reg(r1
, tmp2
);
1254 tcg_temp_free_i64(tmp2
);
1256 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1257 tmp2
= tcg_temp_new_i64();
1258 tmp32_1
= tcg_temp_new_i32();
1259 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1260 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1261 tcg_temp_free_i64(tmp2
);
1262 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1263 store_reg32(r1
, tmp32_1
);
1264 tcg_temp_free_i32(tmp32_1
);
1266 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1267 tmp2
= tcg_temp_new_i64();
1268 tmp32_1
= tcg_temp_new_i32();
1269 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1270 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1271 tcg_temp_free_i64(tmp2
);
1272 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1273 store_reg16(r1
, tmp32_1
);
1274 tcg_temp_free_i32(tmp32_1
);
1276 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1277 tmp32_1
= load_reg32(r1
);
1278 tmp2
= tcg_temp_new_i64();
1279 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1280 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1281 tcg_temp_free_i32(tmp32_1
);
1282 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1283 tcg_temp_free_i64(tmp2
);
1285 case 0x73: /* ICY R1,D2(X2,B2) [RXY] */
1286 tmp3
= tcg_temp_new_i64();
1287 tcg_gen_qemu_ld8u(tmp3
, addr
, get_mem_index(s
));
1288 store_reg8(r1
, tmp3
);
1289 tcg_temp_free_i64(tmp3
);
1292 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1293 gen_illegal_opcode(s
);
1296 tcg_temp_free_i64(addr
);
1299 #ifndef CONFIG_USER_ONLY
1300 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1303 int op
= (insn
>> 32) & 0xff;
1305 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1306 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1308 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1310 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1311 /* Test Protection */
1312 potential_page_fault(s
);
1313 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1317 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1318 gen_illegal_opcode(s
);
1322 tcg_temp_free_i64(tmp
);
1323 tcg_temp_free_i64(tmp2
);
1327 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1328 int r3
, int b2
, int d2
)
1330 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
1331 TCGv_i32 tmp32_1
, tmp32_2
;
1334 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1335 op
, r1
, r3
, b2
, d2
);
1337 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1338 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1339 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1340 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1341 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1343 tmp
= get_address(s
, 0, b2
, d2
);
1344 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1346 tmp
= tcg_const_i64(d2
& 0x3f);
1350 tcg_gen_shr_i64(regs
[r1
], regs
[r3
], tmp
);
1353 tcg_gen_shl_i64(regs
[r1
], regs
[r3
], tmp
);
1356 tcg_gen_sar_i64(regs
[r1
], regs
[r3
], tmp
);
1359 tmp2
= tcg_temp_new_i64();
1360 tmp3
= tcg_temp_new_i64();
1361 gen_op_update2_cc_i64(s
, CC_OP_SLAG
, regs
[r3
], tmp
);
1362 tcg_gen_shl_i64(tmp2
, regs
[r3
], tmp
);
1363 /* override sign bit with source sign */
1364 tcg_gen_andi_i64(tmp2
, tmp2
, ~0x8000000000000000ULL
);
1365 tcg_gen_andi_i64(tmp3
, regs
[r3
], 0x8000000000000000ULL
);
1366 tcg_gen_or_i64(regs
[r1
], tmp2
, tmp3
);
1367 tcg_temp_free_i64(tmp2
);
1368 tcg_temp_free_i64(tmp3
);
1371 tcg_gen_rotl_i64(regs
[r1
], regs
[r3
], tmp
);
1378 set_cc_s64(s
, regs
[r1
]);
1380 tcg_temp_free_i64(tmp
);
1382 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1384 tmp
= get_address(s
, 0, b2
, d2
);
1385 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1387 tmp
= tcg_const_i64(d2
& 0x3f);
1389 tmp32_1
= tcg_temp_new_i32();
1390 tmp32_2
= load_reg32(r3
);
1391 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
1394 tcg_gen_rotl_i32(tmp32_1
, tmp32_2
, tmp32_1
);
1400 store_reg32(r1
, tmp32_1
);
1401 tcg_temp_free_i64(tmp
);
1402 tcg_temp_free_i32(tmp32_1
);
1403 tcg_temp_free_i32(tmp32_2
);
1405 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1406 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1409 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1410 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1413 /* Apparently, unrolling lmg/stmg of any size gains performance -
1414 even for very long ones... */
1415 tmp
= get_address(s
, 0, b2
, d2
);
1416 tmp3
= tcg_const_i64(stm_len
);
1417 tmp4
= tcg_const_i64(op
== 0x26 ? 32 : 4);
1418 for (i
= r1
;; i
= (i
+ 1) % 16) {
1421 tcg_gen_qemu_ld64(regs
[i
], tmp
, get_mem_index(s
));
1424 tmp2
= tcg_temp_new_i64();
1425 #if HOST_LONG_BITS == 32
1426 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1427 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs
[i
]), tmp2
);
1429 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1430 tcg_gen_shl_i64(tmp2
, tmp2
, tmp4
);
1431 tcg_gen_ext32u_i64(regs
[i
], regs
[i
]);
1432 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
1434 tcg_temp_free_i64(tmp2
);
1437 tcg_gen_qemu_st64(regs
[i
], tmp
, get_mem_index(s
));
1440 tmp2
= tcg_temp_new_i64();
1441 tcg_gen_shr_i64(tmp2
, regs
[i
], tmp4
);
1442 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1443 tcg_temp_free_i64(tmp2
);
1451 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
1453 tcg_temp_free_i64(tmp
);
1454 tcg_temp_free_i64(tmp3
);
1455 tcg_temp_free_i64(tmp4
);
1457 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1458 tmp
= get_address(s
, 0, b2
, d2
);
1459 tmp32_1
= tcg_const_i32(r1
);
1460 tmp32_2
= tcg_const_i32(r3
);
1461 potential_page_fault(s
);
1462 gen_helper_stcmh(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1463 tcg_temp_free_i64(tmp
);
1464 tcg_temp_free_i32(tmp32_1
);
1465 tcg_temp_free_i32(tmp32_2
);
1467 #ifndef CONFIG_USER_ONLY
1468 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1470 check_privileged(s
);
1471 tmp
= get_address(s
, 0, b2
, d2
);
1472 tmp32_1
= tcg_const_i32(r1
);
1473 tmp32_2
= tcg_const_i32(r3
);
1474 potential_page_fault(s
);
1475 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1476 tcg_temp_free_i64(tmp
);
1477 tcg_temp_free_i32(tmp32_1
);
1478 tcg_temp_free_i32(tmp32_2
);
1480 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1482 check_privileged(s
);
1483 tmp
= get_address(s
, 0, b2
, d2
);
1484 tmp32_1
= tcg_const_i32(r1
);
1485 tmp32_2
= tcg_const_i32(r3
);
1486 potential_page_fault(s
);
1487 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1488 tcg_temp_free_i64(tmp
);
1489 tcg_temp_free_i32(tmp32_1
);
1490 tcg_temp_free_i32(tmp32_2
);
1493 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1494 tmp
= get_address(s
, 0, b2
, d2
);
1495 tmp32_1
= tcg_const_i32(r1
);
1496 tmp32_2
= tcg_const_i32(r3
);
1497 potential_page_fault(s
);
1498 /* XXX rewrite in tcg */
1499 gen_helper_csg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1501 tcg_temp_free_i64(tmp
);
1502 tcg_temp_free_i32(tmp32_1
);
1503 tcg_temp_free_i32(tmp32_2
);
1505 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1506 tmp
= get_address(s
, 0, b2
, d2
);
1507 tmp32_1
= tcg_const_i32(r1
);
1508 tmp32_2
= tcg_const_i32(r3
);
1509 potential_page_fault(s
);
1510 /* XXX rewrite in tcg */
1511 gen_helper_cdsg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1513 tcg_temp_free_i64(tmp
);
1514 tcg_temp_free_i32(tmp32_1
);
1515 tcg_temp_free_i32(tmp32_2
);
1517 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1518 tmp
= get_address(s
, 0, b2
, d2
); /* SIY -> this is the destination */
1519 tmp2
= tcg_const_i64((r1
<< 4) | r3
);
1520 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
1521 tcg_temp_free_i64(tmp
);
1522 tcg_temp_free_i64(tmp2
);
1524 case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */
1525 tmp
= get_address(s
, 0, b2
, d2
);
1526 tmp32_1
= tcg_const_i32(r1
);
1527 tmp32_2
= tcg_const_i32(r3
);
1528 potential_page_fault(s
);
1529 /* XXX split CC calculation out */
1530 gen_helper_icmh(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1532 tcg_temp_free_i64(tmp
);
1533 tcg_temp_free_i32(tmp32_1
);
1534 tcg_temp_free_i32(tmp32_2
);
1537 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1538 gen_illegal_opcode(s
);
1543 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1544 int x2
, int b2
, int d2
, int r1b
)
1546 TCGv_i32 tmp_r1
, tmp32
;
1548 addr
= get_address(s
, x2
, b2
, d2
);
1549 tmp_r1
= tcg_const_i32(r1
);
1551 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1552 potential_page_fault(s
);
1553 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1555 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1556 potential_page_fault(s
);
1557 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1559 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1560 tmp
= tcg_temp_new_i64();
1561 tmp32
= load_freg32(r1
);
1562 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1563 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1564 tcg_temp_free_i64(tmp
);
1565 tcg_temp_free_i32(tmp32
);
1567 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1568 tmp
= tcg_temp_new_i64();
1569 tmp32
= tcg_temp_new_i32();
1570 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1571 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1572 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1573 tcg_temp_free_i64(tmp
);
1574 tcg_temp_free_i32(tmp32
);
1576 tmp32
= load_freg32(r1
);
1577 gen_set_cc_nz_f32(s
, tmp32
);
1578 tcg_temp_free_i32(tmp32
);
1580 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1581 tmp
= tcg_temp_new_i64();
1582 tmp32
= tcg_temp_new_i32();
1583 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1584 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1585 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1586 tcg_temp_free_i64(tmp
);
1587 tcg_temp_free_i32(tmp32
);
1589 tmp32
= load_freg32(r1
);
1590 gen_set_cc_nz_f32(s
, tmp32
);
1591 tcg_temp_free_i32(tmp32
);
1593 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1594 tmp
= tcg_temp_new_i64();
1595 tmp32
= tcg_temp_new_i32();
1596 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1597 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1598 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1599 tcg_temp_free_i64(tmp
);
1600 tcg_temp_free_i32(tmp32
);
1602 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1603 potential_page_fault(s
);
1604 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1607 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1608 potential_page_fault(s
);
1609 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1612 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1613 potential_page_fault(s
);
1614 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1617 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1618 tmp
= tcg_temp_new_i64();
1619 tmp32
= tcg_temp_new_i32();
1620 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1621 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1622 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1623 tcg_temp_free_i64(tmp
);
1624 tcg_temp_free_i32(tmp32
);
1626 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1627 potential_page_fault(s
);
1628 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1631 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1632 potential_page_fault(s
);
1633 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1636 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1637 potential_page_fault(s
);
1638 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1641 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1642 potential_page_fault(s
);
1643 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1645 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1646 potential_page_fault(s
);
1647 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1649 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1650 /* for RXF insns, r1 is R3 and r1b is R1 */
1651 tmp32
= tcg_const_i32(r1b
);
1652 potential_page_fault(s
);
1653 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1654 tcg_temp_free_i32(tmp32
);
1657 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1658 gen_illegal_opcode(s
);
1661 tcg_temp_free_i32(tmp_r1
);
1662 tcg_temp_free_i64(addr
);
1665 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1668 TCGv_i64 tmp
, tmp2
, tmp3
;
1669 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1671 #ifndef CONFIG_USER_ONLY
1675 r1
= (insn
>> 4) & 0xf;
1678 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1681 case 0x22: /* IPM R1 [RRE] */
1682 tmp32_1
= tcg_const_i32(r1
);
1684 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1685 tcg_temp_free_i32(tmp32_1
);
1687 case 0x41: /* CKSM R1,R2 [RRE] */
1688 tmp32_1
= tcg_const_i32(r1
);
1689 tmp32_2
= tcg_const_i32(r2
);
1690 potential_page_fault(s
);
1691 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1692 tcg_temp_free_i32(tmp32_1
);
1693 tcg_temp_free_i32(tmp32_2
);
1694 gen_op_movi_cc(s
, 0);
1696 case 0x4e: /* SAR R1,R2 [RRE] */
1697 tmp32_1
= load_reg32(r2
);
1698 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1699 tcg_temp_free_i32(tmp32_1
);
1701 case 0x4f: /* EAR R1,R2 [RRE] */
1702 tmp32_1
= tcg_temp_new_i32();
1703 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1704 store_reg32(r1
, tmp32_1
);
1705 tcg_temp_free_i32(tmp32_1
);
1707 case 0x54: /* MVPG R1,R2 [RRE] */
1709 tmp2
= load_reg(r1
);
1710 tmp3
= load_reg(r2
);
1711 potential_page_fault(s
);
1712 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1713 tcg_temp_free_i64(tmp
);
1714 tcg_temp_free_i64(tmp2
);
1715 tcg_temp_free_i64(tmp3
);
1716 /* XXX check CCO bit and set CC accordingly */
1717 gen_op_movi_cc(s
, 0);
1719 case 0x55: /* MVST R1,R2 [RRE] */
1720 tmp32_1
= load_reg32(0);
1721 tmp32_2
= tcg_const_i32(r1
);
1722 tmp32_3
= tcg_const_i32(r2
);
1723 potential_page_fault(s
);
1724 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1725 tcg_temp_free_i32(tmp32_1
);
1726 tcg_temp_free_i32(tmp32_2
);
1727 tcg_temp_free_i32(tmp32_3
);
1728 gen_op_movi_cc(s
, 1);
1730 case 0x5d: /* CLST R1,R2 [RRE] */
1731 tmp32_1
= load_reg32(0);
1732 tmp32_2
= tcg_const_i32(r1
);
1733 tmp32_3
= tcg_const_i32(r2
);
1734 potential_page_fault(s
);
1735 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1737 tcg_temp_free_i32(tmp32_1
);
1738 tcg_temp_free_i32(tmp32_2
);
1739 tcg_temp_free_i32(tmp32_3
);
1741 case 0x5e: /* SRST R1,R2 [RRE] */
1742 tmp32_1
= load_reg32(0);
1743 tmp32_2
= tcg_const_i32(r1
);
1744 tmp32_3
= tcg_const_i32(r2
);
1745 potential_page_fault(s
);
1746 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1748 tcg_temp_free_i32(tmp32_1
);
1749 tcg_temp_free_i32(tmp32_2
);
1750 tcg_temp_free_i32(tmp32_3
);
1753 #ifndef CONFIG_USER_ONLY
1754 case 0x02: /* STIDP D2(B2) [S] */
1756 check_privileged(s
);
1757 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1758 tmp
= get_address(s
, 0, b2
, d2
);
1759 potential_page_fault(s
);
1760 gen_helper_stidp(cpu_env
, tmp
);
1761 tcg_temp_free_i64(tmp
);
1763 case 0x04: /* SCK D2(B2) [S] */
1765 check_privileged(s
);
1766 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1767 tmp
= get_address(s
, 0, b2
, d2
);
1768 potential_page_fault(s
);
1769 gen_helper_sck(cc_op
, tmp
);
1771 tcg_temp_free_i64(tmp
);
1773 case 0x05: /* STCK D2(B2) [S] */
1775 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1776 tmp
= get_address(s
, 0, b2
, d2
);
1777 potential_page_fault(s
);
1778 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1780 tcg_temp_free_i64(tmp
);
1782 case 0x06: /* SCKC D2(B2) [S] */
1783 /* Set Clock Comparator */
1784 check_privileged(s
);
1785 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1786 tmp
= get_address(s
, 0, b2
, d2
);
1787 potential_page_fault(s
);
1788 gen_helper_sckc(cpu_env
, tmp
);
1789 tcg_temp_free_i64(tmp
);
1791 case 0x07: /* STCKC D2(B2) [S] */
1792 /* Store Clock Comparator */
1793 check_privileged(s
);
1794 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1795 tmp
= get_address(s
, 0, b2
, d2
);
1796 potential_page_fault(s
);
1797 gen_helper_stckc(cpu_env
, tmp
);
1798 tcg_temp_free_i64(tmp
);
1800 case 0x08: /* SPT D2(B2) [S] */
1802 check_privileged(s
);
1803 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1804 tmp
= get_address(s
, 0, b2
, d2
);
1805 potential_page_fault(s
);
1806 gen_helper_spt(cpu_env
, tmp
);
1807 tcg_temp_free_i64(tmp
);
1809 case 0x09: /* STPT D2(B2) [S] */
1810 /* Store CPU Timer */
1811 check_privileged(s
);
1812 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1813 tmp
= get_address(s
, 0, b2
, d2
);
1814 potential_page_fault(s
);
1815 gen_helper_stpt(cpu_env
, tmp
);
1816 tcg_temp_free_i64(tmp
);
1818 case 0x0a: /* SPKA D2(B2) [S] */
1819 /* Set PSW Key from Address */
1820 check_privileged(s
);
1821 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1822 tmp
= get_address(s
, 0, b2
, d2
);
1823 tmp2
= tcg_temp_new_i64();
1824 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1825 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1826 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1827 tcg_temp_free_i64(tmp2
);
1828 tcg_temp_free_i64(tmp
);
1830 case 0x0d: /* PTLB [S] */
1832 check_privileged(s
);
1833 gen_helper_ptlb(cpu_env
);
1835 case 0x10: /* SPX D2(B2) [S] */
1836 /* Set Prefix Register */
1837 check_privileged(s
);
1838 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1839 tmp
= get_address(s
, 0, b2
, d2
);
1840 potential_page_fault(s
);
1841 gen_helper_spx(cpu_env
, tmp
);
1842 tcg_temp_free_i64(tmp
);
1844 case 0x11: /* STPX D2(B2) [S] */
1846 check_privileged(s
);
1847 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1848 tmp
= get_address(s
, 0, b2
, d2
);
1849 tmp2
= tcg_temp_new_i64();
1850 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1851 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1852 tcg_temp_free_i64(tmp
);
1853 tcg_temp_free_i64(tmp2
);
1855 case 0x12: /* STAP D2(B2) [S] */
1856 /* Store CPU Address */
1857 check_privileged(s
);
1858 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1859 tmp
= get_address(s
, 0, b2
, d2
);
1860 tmp2
= tcg_temp_new_i64();
1861 tmp32_1
= tcg_temp_new_i32();
1862 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1863 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1864 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1865 tcg_temp_free_i64(tmp
);
1866 tcg_temp_free_i64(tmp2
);
1867 tcg_temp_free_i32(tmp32_1
);
1869 case 0x21: /* IPTE R1,R2 [RRE] */
1870 /* Invalidate PTE */
1871 check_privileged(s
);
1872 r1
= (insn
>> 4) & 0xf;
1875 tmp2
= load_reg(r2
);
1876 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1877 tcg_temp_free_i64(tmp
);
1878 tcg_temp_free_i64(tmp2
);
1880 case 0x29: /* ISKE R1,R2 [RRE] */
1881 /* Insert Storage Key Extended */
1882 check_privileged(s
);
1883 r1
= (insn
>> 4) & 0xf;
1886 tmp2
= tcg_temp_new_i64();
1887 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1888 store_reg(r1
, tmp2
);
1889 tcg_temp_free_i64(tmp
);
1890 tcg_temp_free_i64(tmp2
);
1892 case 0x2a: /* RRBE R1,R2 [RRE] */
1893 /* Set Storage Key Extended */
1894 check_privileged(s
);
1895 r1
= (insn
>> 4) & 0xf;
1897 tmp32_1
= load_reg32(r1
);
1899 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1901 tcg_temp_free_i32(tmp32_1
);
1902 tcg_temp_free_i64(tmp
);
1904 case 0x2b: /* SSKE R1,R2 [RRE] */
1905 /* Set Storage Key Extended */
1906 check_privileged(s
);
1907 r1
= (insn
>> 4) & 0xf;
1909 tmp32_1
= load_reg32(r1
);
1911 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1912 tcg_temp_free_i32(tmp32_1
);
1913 tcg_temp_free_i64(tmp
);
1915 case 0x34: /* STCH ? */
1916 /* Store Subchannel */
1917 check_privileged(s
);
1918 gen_op_movi_cc(s
, 3);
1920 case 0x46: /* STURA R1,R2 [RRE] */
1921 /* Store Using Real Address */
1922 check_privileged(s
);
1923 r1
= (insn
>> 4) & 0xf;
1925 tmp32_1
= load_reg32(r1
);
1927 potential_page_fault(s
);
1928 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1929 tcg_temp_free_i32(tmp32_1
);
1930 tcg_temp_free_i64(tmp
);
1932 case 0x50: /* CSP R1,R2 [RRE] */
1933 /* Compare And Swap And Purge */
1934 check_privileged(s
);
1935 r1
= (insn
>> 4) & 0xf;
1937 tmp32_1
= tcg_const_i32(r1
);
1938 tmp32_2
= tcg_const_i32(r2
);
1939 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1941 tcg_temp_free_i32(tmp32_1
);
1942 tcg_temp_free_i32(tmp32_2
);
1944 case 0x5f: /* CHSC ? */
1945 /* Channel Subsystem Call */
1946 check_privileged(s
);
1947 gen_op_movi_cc(s
, 3);
1949 case 0x78: /* STCKE D2(B2) [S] */
1950 /* Store Clock Extended */
1951 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1952 tmp
= get_address(s
, 0, b2
, d2
);
1953 potential_page_fault(s
);
1954 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1956 tcg_temp_free_i64(tmp
);
1958 case 0x79: /* SACF D2(B2) [S] */
1959 /* Set Address Space Control Fast */
1960 check_privileged(s
);
1961 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1962 tmp
= get_address(s
, 0, b2
, d2
);
1963 potential_page_fault(s
);
1964 gen_helper_sacf(cpu_env
, tmp
);
1965 tcg_temp_free_i64(tmp
);
1966 /* addressing mode has changed, so end the block */
1969 s
->is_jmp
= DISAS_JUMP
;
1971 case 0x7d: /* STSI D2,(B2) [S] */
1972 check_privileged(s
);
1973 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1974 tmp
= get_address(s
, 0, b2
, d2
);
1975 tmp32_1
= load_reg32(0);
1976 tmp32_2
= load_reg32(1);
1977 potential_page_fault(s
);
1978 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1980 tcg_temp_free_i64(tmp
);
1981 tcg_temp_free_i32(tmp32_1
);
1982 tcg_temp_free_i32(tmp32_2
);
1984 case 0x9d: /* LFPC D2(B2) [S] */
1985 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1986 tmp
= get_address(s
, 0, b2
, d2
);
1987 tmp2
= tcg_temp_new_i64();
1988 tmp32_1
= tcg_temp_new_i32();
1989 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1990 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1991 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1992 tcg_temp_free_i64(tmp
);
1993 tcg_temp_free_i64(tmp2
);
1994 tcg_temp_free_i32(tmp32_1
);
1996 case 0xb1: /* STFL D2(B2) [S] */
1997 /* Store Facility List (CPU features) at 200 */
1998 check_privileged(s
);
1999 tmp2
= tcg_const_i64(0xc0000000);
2000 tmp
= tcg_const_i64(200);
2001 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
2002 tcg_temp_free_i64(tmp2
);
2003 tcg_temp_free_i64(tmp
);
2005 case 0xb2: /* LPSWE D2(B2) [S] */
2006 /* Load PSW Extended */
2007 check_privileged(s
);
2008 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2009 tmp
= get_address(s
, 0, b2
, d2
);
2010 tmp2
= tcg_temp_new_i64();
2011 tmp3
= tcg_temp_new_i64();
2012 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
2013 tcg_gen_addi_i64(tmp
, tmp
, 8);
2014 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
2015 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2016 /* we need to keep cc_op intact */
2017 s
->is_jmp
= DISAS_JUMP
;
2018 tcg_temp_free_i64(tmp
);
2019 tcg_temp_free_i64(tmp2
);
2020 tcg_temp_free_i64(tmp3
);
2022 case 0x20: /* SERVC R1,R2 [RRE] */
2023 /* SCLP Service call (PV hypercall) */
2024 check_privileged(s
);
2025 potential_page_fault(s
);
2026 tmp32_1
= load_reg32(r2
);
2028 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
2030 tcg_temp_free_i32(tmp32_1
);
2031 tcg_temp_free_i64(tmp
);
2035 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
2036 gen_illegal_opcode(s
);
2041 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
2045 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
2046 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
2047 #define FP_HELPER(i) \
2048 tmp32_1 = tcg_const_i32(r1); \
2049 tmp32_2 = tcg_const_i32(r2); \
2050 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2051 tcg_temp_free_i32(tmp32_1); \
2052 tcg_temp_free_i32(tmp32_2);
2054 #define FP_HELPER_CC(i) \
2055 tmp32_1 = tcg_const_i32(r1); \
2056 tmp32_2 = tcg_const_i32(r2); \
2057 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2059 tcg_temp_free_i32(tmp32_1); \
2060 tcg_temp_free_i32(tmp32_2);
2063 case 0x0: /* LPEBR R1,R2 [RRE] */
2064 FP_HELPER_CC(lpebr
);
2066 case 0x2: /* LTEBR R1,R2 [RRE] */
2067 FP_HELPER_CC(ltebr
);
2069 case 0x3: /* LCEBR R1,R2 [RRE] */
2070 FP_HELPER_CC(lcebr
);
2072 case 0x4: /* LDEBR R1,R2 [RRE] */
2075 case 0x5: /* LXDBR R1,R2 [RRE] */
2078 case 0x9: /* CEBR R1,R2 [RRE] */
2081 case 0xa: /* AEBR R1,R2 [RRE] */
2084 case 0xb: /* SEBR R1,R2 [RRE] */
2087 case 0xd: /* DEBR R1,R2 [RRE] */
2090 case 0x10: /* LPDBR R1,R2 [RRE] */
2091 FP_HELPER_CC(lpdbr
);
2093 case 0x12: /* LTDBR R1,R2 [RRE] */
2094 FP_HELPER_CC(ltdbr
);
2096 case 0x13: /* LCDBR R1,R2 [RRE] */
2097 FP_HELPER_CC(lcdbr
);
2099 case 0x15: /* SQBDR R1,R2 [RRE] */
2102 case 0x17: /* MEEBR R1,R2 [RRE] */
2105 case 0x19: /* CDBR R1,R2 [RRE] */
2108 case 0x1a: /* ADBR R1,R2 [RRE] */
2111 case 0x1b: /* SDBR R1,R2 [RRE] */
2114 case 0x1c: /* MDBR R1,R2 [RRE] */
2117 case 0x1d: /* DDBR R1,R2 [RRE] */
2120 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2121 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2122 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2123 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2124 tmp32_1
= tcg_const_i32(m3
);
2125 tmp32_2
= tcg_const_i32(r2
);
2126 tmp32_3
= tcg_const_i32(r1
);
2129 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2132 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2135 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2140 tcg_temp_free_i32(tmp32_1
);
2141 tcg_temp_free_i32(tmp32_2
);
2142 tcg_temp_free_i32(tmp32_3
);
2144 case 0x40: /* LPXBR R1,R2 [RRE] */
2145 FP_HELPER_CC(lpxbr
);
2147 case 0x42: /* LTXBR R1,R2 [RRE] */
2148 FP_HELPER_CC(ltxbr
);
2150 case 0x43: /* LCXBR R1,R2 [RRE] */
2151 FP_HELPER_CC(lcxbr
);
2153 case 0x44: /* LEDBR R1,R2 [RRE] */
2156 case 0x45: /* LDXBR R1,R2 [RRE] */
2159 case 0x46: /* LEXBR R1,R2 [RRE] */
2162 case 0x49: /* CXBR R1,R2 [RRE] */
2165 case 0x4a: /* AXBR R1,R2 [RRE] */
2168 case 0x4b: /* SXBR R1,R2 [RRE] */
2171 case 0x4c: /* MXBR R1,R2 [RRE] */
2174 case 0x4d: /* DXBR R1,R2 [RRE] */
2177 case 0x65: /* LXR R1,R2 [RRE] */
2178 tmp
= load_freg(r2
);
2179 store_freg(r1
, tmp
);
2180 tcg_temp_free_i64(tmp
);
2181 tmp
= load_freg(r2
+ 2);
2182 store_freg(r1
+ 2, tmp
);
2183 tcg_temp_free_i64(tmp
);
2185 case 0x74: /* LZER R1 [RRE] */
2186 tmp32_1
= tcg_const_i32(r1
);
2187 gen_helper_lzer(cpu_env
, tmp32_1
);
2188 tcg_temp_free_i32(tmp32_1
);
2190 case 0x75: /* LZDR R1 [RRE] */
2191 tmp32_1
= tcg_const_i32(r1
);
2192 gen_helper_lzdr(cpu_env
, tmp32_1
);
2193 tcg_temp_free_i32(tmp32_1
);
2195 case 0x76: /* LZXR R1 [RRE] */
2196 tmp32_1
= tcg_const_i32(r1
);
2197 gen_helper_lzxr(cpu_env
, tmp32_1
);
2198 tcg_temp_free_i32(tmp32_1
);
2200 case 0x84: /* SFPC R1 [RRE] */
2201 tmp32_1
= load_reg32(r1
);
2202 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2203 tcg_temp_free_i32(tmp32_1
);
2205 case 0x8c: /* EFPC R1 [RRE] */
2206 tmp32_1
= tcg_temp_new_i32();
2207 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2208 store_reg32(r1
, tmp32_1
);
2209 tcg_temp_free_i32(tmp32_1
);
2211 case 0x94: /* CEFBR R1,R2 [RRE] */
2212 case 0x95: /* CDFBR R1,R2 [RRE] */
2213 case 0x96: /* CXFBR R1,R2 [RRE] */
2214 tmp32_1
= tcg_const_i32(r1
);
2215 tmp32_2
= load_reg32(r2
);
2218 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
2221 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
2224 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
2229 tcg_temp_free_i32(tmp32_1
);
2230 tcg_temp_free_i32(tmp32_2
);
2232 case 0x98: /* CFEBR R1,R2 [RRE] */
2233 case 0x99: /* CFDBR R1,R2 [RRE] */
2234 case 0x9a: /* CFXBR R1,R2 [RRE] */
2235 tmp32_1
= tcg_const_i32(r1
);
2236 tmp32_2
= tcg_const_i32(r2
);
2237 tmp32_3
= tcg_const_i32(m3
);
2240 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2243 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2246 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2252 tcg_temp_free_i32(tmp32_1
);
2253 tcg_temp_free_i32(tmp32_2
);
2254 tcg_temp_free_i32(tmp32_3
);
2256 case 0xa4: /* CEGBR R1,R2 [RRE] */
2257 case 0xa5: /* CDGBR R1,R2 [RRE] */
2258 tmp32_1
= tcg_const_i32(r1
);
2262 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
2265 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
2270 tcg_temp_free_i32(tmp32_1
);
2271 tcg_temp_free_i64(tmp
);
2273 case 0xa6: /* CXGBR R1,R2 [RRE] */
2274 tmp32_1
= tcg_const_i32(r1
);
2276 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
2277 tcg_temp_free_i32(tmp32_1
);
2278 tcg_temp_free_i64(tmp
);
2280 case 0xa8: /* CGEBR R1,R2 [RRE] */
2281 tmp32_1
= tcg_const_i32(r1
);
2282 tmp32_2
= tcg_const_i32(r2
);
2283 tmp32_3
= tcg_const_i32(m3
);
2284 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2286 tcg_temp_free_i32(tmp32_1
);
2287 tcg_temp_free_i32(tmp32_2
);
2288 tcg_temp_free_i32(tmp32_3
);
2290 case 0xa9: /* CGDBR R1,R2 [RRE] */
2291 tmp32_1
= tcg_const_i32(r1
);
2292 tmp32_2
= tcg_const_i32(r2
);
2293 tmp32_3
= tcg_const_i32(m3
);
2294 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2296 tcg_temp_free_i32(tmp32_1
);
2297 tcg_temp_free_i32(tmp32_2
);
2298 tcg_temp_free_i32(tmp32_3
);
2300 case 0xaa: /* CGXBR R1,R2 [RRE] */
2301 tmp32_1
= tcg_const_i32(r1
);
2302 tmp32_2
= tcg_const_i32(r2
);
2303 tmp32_3
= tcg_const_i32(m3
);
2304 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2306 tcg_temp_free_i32(tmp32_1
);
2307 tcg_temp_free_i32(tmp32_2
);
2308 tcg_temp_free_i32(tmp32_3
);
2311 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
2312 gen_illegal_opcode(s
);
2320 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
2326 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
2328 case 0x17: /* LLGTR R1,R2 [RRE] */
2329 tmp32_1
= load_reg32(r2
);
2330 tmp
= tcg_temp_new_i64();
2331 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
2332 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2334 tcg_temp_free_i32(tmp32_1
);
2335 tcg_temp_free_i64(tmp
);
2337 case 0x0f: /* LRVGR R1,R2 [RRE] */
2338 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
2340 case 0x1f: /* LRVR R1,R2 [RRE] */
2341 tmp32_1
= load_reg32(r2
);
2342 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
2343 store_reg32(r1
, tmp32_1
);
2344 tcg_temp_free_i32(tmp32_1
);
2346 case 0x83: /* FLOGR R1,R2 [RRE] */
2348 tmp32_1
= tcg_const_i32(r1
);
2349 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
2351 tcg_temp_free_i64(tmp
);
2352 tcg_temp_free_i32(tmp32_1
);
2355 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
2356 gen_illegal_opcode(s
);
2361 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
2363 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
2364 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
, tmp32_4
;
2367 int op
, r1
, r2
, r3
, d1
, d2
, x2
, b1
, b2
, i
, i2
, r1b
;
2370 opc
= cpu_ldub_code(env
, s
->pc
);
2371 LOG_DISAS("opc 0x%x\n", opc
);
2374 case 0xa: /* SVC I [RR] */
2375 insn
= ld_code2(env
, s
->pc
);
2380 tmp32_1
= tcg_const_i32(i
);
2381 tmp32_2
= tcg_const_i32(s
->next_pc
- s
->pc
);
2382 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
2383 tcg_gen_st_i32(tmp32_2
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
2384 gen_exception(EXCP_SVC
);
2385 s
->is_jmp
= DISAS_EXCP
;
2386 tcg_temp_free_i32(tmp32_1
);
2387 tcg_temp_free_i32(tmp32_2
);
2389 case 0xe: /* MVCL R1,R2 [RR] */
2390 insn
= ld_code2(env
, s
->pc
);
2391 decode_rr(s
, insn
, &r1
, &r2
);
2392 tmp32_1
= tcg_const_i32(r1
);
2393 tmp32_2
= tcg_const_i32(r2
);
2394 potential_page_fault(s
);
2395 gen_helper_mvcl(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
2397 tcg_temp_free_i32(tmp32_1
);
2398 tcg_temp_free_i32(tmp32_2
);
2400 case 0x28: /* LDR R1,R2 [RR] */
2401 insn
= ld_code2(env
, s
->pc
);
2402 decode_rr(s
, insn
, &r1
, &r2
);
2403 tmp
= load_freg(r2
);
2404 store_freg(r1
, tmp
);
2405 tcg_temp_free_i64(tmp
);
2407 case 0x38: /* LER R1,R2 [RR] */
2408 insn
= ld_code2(env
, s
->pc
);
2409 decode_rr(s
, insn
, &r1
, &r2
);
2410 tmp32_1
= load_freg32(r2
);
2411 store_freg32(r1
, tmp32_1
);
2412 tcg_temp_free_i32(tmp32_1
);
2414 case 0x43: /* IC R1,D2(X2,B2) [RX] */
2415 insn
= ld_code4(env
, s
->pc
);
2416 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2417 tmp2
= tcg_temp_new_i64();
2418 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2419 store_reg8(r1
, tmp2
);
2420 tcg_temp_free_i64(tmp
);
2421 tcg_temp_free_i64(tmp2
);
2423 case 0x44: /* EX R1,D2(X2,B2) [RX] */
2424 insn
= ld_code4(env
, s
->pc
);
2425 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2426 tmp2
= load_reg(r1
);
2427 tmp3
= tcg_const_i64(s
->pc
+ 4);
2430 gen_helper_ex(cc_op
, cpu_env
, cc_op
, tmp2
, tmp
, tmp3
);
2432 tcg_temp_free_i64(tmp
);
2433 tcg_temp_free_i64(tmp2
);
2434 tcg_temp_free_i64(tmp3
);
2436 case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
2437 insn
= ld_code4(env
, s
->pc
);
2438 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2439 tmp2
= tcg_temp_new_i64();
2440 tmp32_1
= tcg_temp_new_i32();
2441 tcg_gen_trunc_i64_i32(tmp32_1
, regs
[r1
]);
2442 gen_helper_cvd(tmp2
, tmp32_1
);
2443 tcg_gen_qemu_st64(tmp2
, tmp
, get_mem_index(s
));
2444 tcg_temp_free_i64(tmp
);
2445 tcg_temp_free_i64(tmp2
);
2446 tcg_temp_free_i32(tmp32_1
);
2448 case 0x60: /* STD R1,D2(X2,B2) [RX] */
2449 insn
= ld_code4(env
, s
->pc
);
2450 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2451 tmp2
= load_freg(r1
);
2452 tcg_gen_qemu_st64(tmp2
, tmp
, get_mem_index(s
));
2453 tcg_temp_free_i64(tmp
);
2454 tcg_temp_free_i64(tmp2
);
2456 case 0x68: /* LD R1,D2(X2,B2) [RX] */
2457 insn
= ld_code4(env
, s
->pc
);
2458 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2459 tmp2
= tcg_temp_new_i64();
2460 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
2461 store_freg(r1
, tmp2
);
2462 tcg_temp_free_i64(tmp
);
2463 tcg_temp_free_i64(tmp2
);
2465 case 0x70: /* STE R1,D2(X2,B2) [RX] */
2466 insn
= ld_code4(env
, s
->pc
);
2467 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2468 tmp2
= tcg_temp_new_i64();
2469 tmp32_1
= load_freg32(r1
);
2470 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
2471 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
2472 tcg_temp_free_i64(tmp
);
2473 tcg_temp_free_i64(tmp2
);
2474 tcg_temp_free_i32(tmp32_1
);
2476 case 0x78: /* LE R1,D2(X2,B2) [RX] */
2477 insn
= ld_code4(env
, s
->pc
);
2478 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2479 tmp2
= tcg_temp_new_i64();
2480 tmp32_1
= tcg_temp_new_i32();
2481 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2482 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
2483 store_freg32(r1
, tmp32_1
);
2484 tcg_temp_free_i64(tmp
);
2485 tcg_temp_free_i64(tmp2
);
2486 tcg_temp_free_i32(tmp32_1
);
2488 #ifndef CONFIG_USER_ONLY
2489 case 0x80: /* SSM D2(B2) [S] */
2490 /* Set System Mask */
2491 check_privileged(s
);
2492 insn
= ld_code4(env
, s
->pc
);
2493 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2494 tmp
= get_address(s
, 0, b2
, d2
);
2495 tmp2
= tcg_temp_new_i64();
2496 tmp3
= tcg_temp_new_i64();
2497 tcg_gen_andi_i64(tmp3
, psw_mask
, ~0xff00000000000000ULL
);
2498 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2499 tcg_gen_shli_i64(tmp2
, tmp2
, 56);
2500 tcg_gen_or_i64(psw_mask
, tmp3
, tmp2
);
2501 tcg_temp_free_i64(tmp
);
2502 tcg_temp_free_i64(tmp2
);
2503 tcg_temp_free_i64(tmp3
);
2505 case 0x82: /* LPSW D2(B2) [S] */
2507 check_privileged(s
);
2508 insn
= ld_code4(env
, s
->pc
);
2509 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2510 tmp
= get_address(s
, 0, b2
, d2
);
2511 tmp2
= tcg_temp_new_i64();
2512 tmp3
= tcg_temp_new_i64();
2513 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2514 tcg_gen_addi_i64(tmp
, tmp
, 4);
2515 tcg_gen_qemu_ld32u(tmp3
, tmp
, get_mem_index(s
));
2516 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2517 tcg_gen_shli_i64(tmp2
, tmp2
, 32);
2518 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2519 tcg_temp_free_i64(tmp
);
2520 tcg_temp_free_i64(tmp2
);
2521 tcg_temp_free_i64(tmp3
);
2522 /* we need to keep cc_op intact */
2523 s
->is_jmp
= DISAS_JUMP
;
2525 case 0x83: /* DIAG R1,R3,D2 [RS] */
2526 /* Diagnose call (KVM hypercall) */
2527 check_privileged(s
);
2528 potential_page_fault(s
);
2529 insn
= ld_code4(env
, s
->pc
);
2530 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2531 tmp32_1
= tcg_const_i32(insn
& 0xfff);
2534 gen_helper_diag(tmp2
, cpu_env
, tmp32_1
, tmp2
, tmp3
);
2536 tcg_temp_free_i32(tmp32_1
);
2537 tcg_temp_free_i64(tmp2
);
2538 tcg_temp_free_i64(tmp3
);
2541 case 0x88: /* SRL R1,D2(B2) [RS] */
2542 case 0x89: /* SLL R1,D2(B2) [RS] */
2543 case 0x8a: /* SRA R1,D2(B2) [RS] */
2544 insn
= ld_code4(env
, s
->pc
);
2545 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2546 tmp
= get_address(s
, 0, b2
, d2
);
2547 tmp32_1
= load_reg32(r1
);
2548 tmp32_2
= tcg_temp_new_i32();
2549 tcg_gen_trunc_i64_i32(tmp32_2
, tmp
);
2550 tcg_gen_andi_i32(tmp32_2
, tmp32_2
, 0x3f);
2553 tcg_gen_shr_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2556 tcg_gen_shl_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2559 tcg_gen_sar_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2560 set_cc_s32(s
, tmp32_1
);
2565 store_reg32(r1
, tmp32_1
);
2566 tcg_temp_free_i64(tmp
);
2567 tcg_temp_free_i32(tmp32_1
);
2568 tcg_temp_free_i32(tmp32_2
);
2570 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2571 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2572 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2573 insn
= ld_code4(env
, s
->pc
);
2574 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2575 tmp
= get_address(s
, 0, b2
, d2
); /* shift */
2576 tmp2
= tcg_temp_new_i64();
2577 tmp32_1
= load_reg32(r1
);
2578 tmp32_2
= load_reg32(r1
+ 1);
2579 tcg_gen_concat_i32_i64(tmp2
, tmp32_2
, tmp32_1
); /* operand */
2582 tcg_gen_shr_i64(tmp2
, tmp2
, tmp
);
2585 tcg_gen_shl_i64(tmp2
, tmp2
, tmp
);
2588 tcg_gen_sar_i64(tmp2
, tmp2
, tmp
);
2589 set_cc_s64(s
, tmp2
);
2592 tcg_gen_shri_i64(tmp
, tmp2
, 32);
2593 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
2594 store_reg32(r1
, tmp32_1
);
2595 tcg_gen_trunc_i64_i32(tmp32_2
, tmp2
);
2596 store_reg32(r1
+ 1, tmp32_2
);
2597 tcg_temp_free_i64(tmp
);
2598 tcg_temp_free_i64(tmp2
);
2600 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2601 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2602 insn
= ld_code4(env
, s
->pc
);
2603 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2605 tmp
= get_address(s
, 0, b2
, d2
);
2606 tmp2
= tcg_temp_new_i64();
2607 tmp3
= tcg_const_i64(4);
2608 tmp4
= tcg_const_i64(0xffffffff00000000ULL
);
2609 for (i
= r1
;; i
= (i
+ 1) % 16) {
2611 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2612 tcg_gen_and_i64(regs
[i
], regs
[i
], tmp4
);
2613 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
2615 tcg_gen_qemu_st32(regs
[i
], tmp
, get_mem_index(s
));
2620 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
2622 tcg_temp_free_i64(tmp
);
2623 tcg_temp_free_i64(tmp2
);
2624 tcg_temp_free_i64(tmp3
);
2625 tcg_temp_free_i64(tmp4
);
2627 case 0x92: /* MVI D1(B1),I2 [SI] */
2628 insn
= ld_code4(env
, s
->pc
);
2629 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2630 tmp2
= tcg_const_i64(i2
);
2631 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2632 tcg_temp_free_i64(tmp
);
2633 tcg_temp_free_i64(tmp2
);
2635 case 0x94: /* NI D1(B1),I2 [SI] */
2636 case 0x96: /* OI D1(B1),I2 [SI] */
2637 case 0x97: /* XI D1(B1),I2 [SI] */
2638 insn
= ld_code4(env
, s
->pc
);
2639 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2640 tmp2
= tcg_temp_new_i64();
2641 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2644 tcg_gen_andi_i64(tmp2
, tmp2
, i2
);
2647 tcg_gen_ori_i64(tmp2
, tmp2
, i2
);
2650 tcg_gen_xori_i64(tmp2
, tmp2
, i2
);
2655 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2656 set_cc_nz_u64(s
, tmp2
);
2657 tcg_temp_free_i64(tmp
);
2658 tcg_temp_free_i64(tmp2
);
2660 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2661 insn
= ld_code4(env
, s
->pc
);
2662 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2663 tmp
= get_address(s
, 0, b2
, d2
);
2664 tmp32_1
= tcg_const_i32(r1
);
2665 tmp32_2
= tcg_const_i32(r3
);
2666 potential_page_fault(s
);
2667 gen_helper_lam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2668 tcg_temp_free_i64(tmp
);
2669 tcg_temp_free_i32(tmp32_1
);
2670 tcg_temp_free_i32(tmp32_2
);
2672 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2673 insn
= ld_code4(env
, s
->pc
);
2674 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2675 tmp
= get_address(s
, 0, b2
, d2
);
2676 tmp32_1
= tcg_const_i32(r1
);
2677 tmp32_2
= tcg_const_i32(r3
);
2678 potential_page_fault(s
);
2679 gen_helper_stam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2680 tcg_temp_free_i64(tmp
);
2681 tcg_temp_free_i32(tmp32_1
);
2682 tcg_temp_free_i32(tmp32_2
);
2684 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2685 insn
= ld_code4(env
, s
->pc
);
2686 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2687 tmp
= get_address(s
, 0, b2
, d2
);
2688 tmp32_1
= tcg_const_i32(r1
);
2689 tmp32_2
= tcg_const_i32(r3
);
2690 potential_page_fault(s
);
2691 gen_helper_mvcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2693 tcg_temp_free_i64(tmp
);
2694 tcg_temp_free_i32(tmp32_1
);
2695 tcg_temp_free_i32(tmp32_2
);
2697 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2698 insn
= ld_code4(env
, s
->pc
);
2699 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2700 tmp
= get_address(s
, 0, b2
, d2
);
2701 tmp32_1
= tcg_const_i32(r1
);
2702 tmp32_2
= tcg_const_i32(r3
);
2703 potential_page_fault(s
);
2704 gen_helper_clcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2706 tcg_temp_free_i64(tmp
);
2707 tcg_temp_free_i32(tmp32_1
);
2708 tcg_temp_free_i32(tmp32_2
);
2710 #ifndef CONFIG_USER_ONLY
2711 case 0xac: /* STNSM D1(B1),I2 [SI] */
2712 case 0xad: /* STOSM D1(B1),I2 [SI] */
2713 check_privileged(s
);
2714 insn
= ld_code4(env
, s
->pc
);
2715 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2716 tmp2
= tcg_temp_new_i64();
2717 tcg_gen_shri_i64(tmp2
, psw_mask
, 56);
2718 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2720 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2721 ((uint64_t)i2
<< 56) | 0x00ffffffffffffffULL
);
2723 tcg_gen_ori_i64(psw_mask
, psw_mask
, (uint64_t)i2
<< 56);
2725 tcg_temp_free_i64(tmp
);
2726 tcg_temp_free_i64(tmp2
);
2728 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2729 check_privileged(s
);
2730 insn
= ld_code4(env
, s
->pc
);
2731 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2732 tmp
= get_address(s
, 0, b2
, d2
);
2733 tmp2
= load_reg(r3
);
2734 tmp32_1
= tcg_const_i32(r1
);
2735 potential_page_fault(s
);
2736 gen_helper_sigp(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp2
);
2738 tcg_temp_free_i64(tmp
);
2739 tcg_temp_free_i64(tmp2
);
2740 tcg_temp_free_i32(tmp32_1
);
2742 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2743 check_privileged(s
);
2744 insn
= ld_code4(env
, s
->pc
);
2745 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2746 tmp32_1
= tcg_const_i32(r1
);
2747 potential_page_fault(s
);
2748 gen_helper_lra(cc_op
, cpu_env
, tmp
, tmp32_1
);
2750 tcg_temp_free_i64(tmp
);
2751 tcg_temp_free_i32(tmp32_1
);
2755 insn
= ld_code4(env
, s
->pc
);
2756 op
= (insn
>> 16) & 0xff;
2758 case 0x9c: /* STFPC D2(B2) [S] */
2760 b2
= (insn
>> 12) & 0xf;
2761 tmp32_1
= tcg_temp_new_i32();
2762 tmp
= tcg_temp_new_i64();
2763 tmp2
= get_address(s
, 0, b2
, d2
);
2764 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2765 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2766 tcg_gen_qemu_st32(tmp
, tmp2
, get_mem_index(s
));
2767 tcg_temp_free_i32(tmp32_1
);
2768 tcg_temp_free_i64(tmp
);
2769 tcg_temp_free_i64(tmp2
);
2772 disas_b2(env
, s
, op
, insn
);
2777 insn
= ld_code4(env
, s
->pc
);
2778 op
= (insn
>> 16) & 0xff;
2779 r3
= (insn
>> 12) & 0xf; /* aka m3 */
2780 r1
= (insn
>> 4) & 0xf;
2782 disas_b3(env
, s
, op
, r3
, r1
, r2
);
2784 #ifndef CONFIG_USER_ONLY
2785 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2787 check_privileged(s
);
2788 insn
= ld_code4(env
, s
->pc
);
2789 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2790 tmp
= get_address(s
, 0, b2
, d2
);
2791 tmp32_1
= tcg_const_i32(r1
);
2792 tmp32_2
= tcg_const_i32(r3
);
2793 potential_page_fault(s
);
2794 gen_helper_stctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2795 tcg_temp_free_i64(tmp
);
2796 tcg_temp_free_i32(tmp32_1
);
2797 tcg_temp_free_i32(tmp32_2
);
2799 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2801 check_privileged(s
);
2802 insn
= ld_code4(env
, s
->pc
);
2803 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2804 tmp
= get_address(s
, 0, b2
, d2
);
2805 tmp32_1
= tcg_const_i32(r1
);
2806 tmp32_2
= tcg_const_i32(r3
);
2807 potential_page_fault(s
);
2808 gen_helper_lctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2809 tcg_temp_free_i64(tmp
);
2810 tcg_temp_free_i32(tmp32_1
);
2811 tcg_temp_free_i32(tmp32_2
);
2815 insn
= ld_code4(env
, s
->pc
);
2816 r1
= (insn
>> 4) & 0xf;
2818 op
= (insn
>> 16) & 0xff;
2819 disas_b9(env
, s
, op
, r1
, r2
);
2821 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2822 insn
= ld_code4(env
, s
->pc
);
2823 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2824 tmp
= get_address(s
, 0, b2
, d2
);
2825 tmp32_1
= tcg_const_i32(r1
);
2826 tmp32_2
= tcg_const_i32(r3
);
2827 potential_page_fault(s
);
2828 gen_helper_cs(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2830 tcg_temp_free_i64(tmp
);
2831 tcg_temp_free_i32(tmp32_1
);
2832 tcg_temp_free_i32(tmp32_2
);
2834 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2835 insn
= ld_code4(env
, s
->pc
);
2836 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2837 tmp
= get_address(s
, 0, b2
, d2
);
2838 tmp32_1
= load_reg32(r1
);
2839 tmp32_2
= tcg_const_i32(r3
);
2840 potential_page_fault(s
);
2841 gen_helper_clm(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2843 tcg_temp_free_i64(tmp
);
2844 tcg_temp_free_i32(tmp32_1
);
2845 tcg_temp_free_i32(tmp32_2
);
2847 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2848 insn
= ld_code4(env
, s
->pc
);
2849 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2850 tmp
= get_address(s
, 0, b2
, d2
);
2851 tmp32_1
= load_reg32(r1
);
2852 tmp32_2
= tcg_const_i32(r3
);
2853 potential_page_fault(s
);
2854 gen_helper_stcm(cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2855 tcg_temp_free_i64(tmp
);
2856 tcg_temp_free_i32(tmp32_1
);
2857 tcg_temp_free_i32(tmp32_2
);
2859 case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
2860 insn
= ld_code4(env
, s
->pc
);
2861 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2863 /* effectively a 32-bit load */
2864 tmp
= get_address(s
, 0, b2
, d2
);
2865 tmp32_1
= tcg_temp_new_i32();
2866 tmp32_2
= tcg_const_i32(r3
);
2867 tcg_gen_qemu_ld32u(tmp
, tmp
, get_mem_index(s
));
2868 store_reg32_i64(r1
, tmp
);
2869 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
2870 set_cc_icm(s
, tmp32_2
, tmp32_1
);
2871 tcg_temp_free_i64(tmp
);
2872 tcg_temp_free_i32(tmp32_1
);
2873 tcg_temp_free_i32(tmp32_2
);
2875 uint32_t mask
= 0x00ffffffUL
;
2876 uint32_t shift
= 24;
2878 tmp
= get_address(s
, 0, b2
, d2
);
2879 tmp2
= tcg_temp_new_i64();
2880 tmp32_1
= load_reg32(r1
);
2881 tmp32_2
= tcg_temp_new_i32();
2882 tmp32_3
= tcg_const_i32(r3
);
2883 tmp32_4
= tcg_const_i32(0);
2886 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2887 tcg_gen_trunc_i64_i32(tmp32_2
, tmp2
);
2889 tcg_gen_shli_i32(tmp32_2
, tmp32_2
, shift
);
2891 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, mask
);
2892 tcg_gen_or_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2893 tcg_gen_or_i32(tmp32_4
, tmp32_4
, tmp32_2
);
2894 tcg_gen_addi_i64(tmp
, tmp
, 1);
2896 m3
= (m3
<< 1) & 0xf;
2897 mask
= (mask
>> 8) | 0xff000000UL
;
2900 store_reg32(r1
, tmp32_1
);
2901 set_cc_icm(s
, tmp32_3
, tmp32_4
);
2902 tcg_temp_free_i64(tmp
);
2903 tcg_temp_free_i64(tmp2
);
2904 tcg_temp_free_i32(tmp32_1
);
2905 tcg_temp_free_i32(tmp32_2
);
2906 tcg_temp_free_i32(tmp32_3
);
2907 tcg_temp_free_i32(tmp32_4
);
2909 /* i.e. env->cc = 0 */
2910 gen_op_movi_cc(s
, 0);
2913 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2914 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2915 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2916 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2917 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2918 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2919 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2920 insn
= ld_code6(env
, s
->pc
);
2921 vl
= tcg_const_i32((insn
>> 32) & 0xff);
2922 b1
= (insn
>> 28) & 0xf;
2923 b2
= (insn
>> 12) & 0xf;
2924 d1
= (insn
>> 16) & 0xfff;
2926 tmp
= get_address(s
, 0, b1
, d1
);
2927 tmp2
= get_address(s
, 0, b2
, d2
);
2930 gen_op_mvc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
2933 potential_page_fault(s
);
2934 gen_helper_nc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2938 gen_op_clc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
2941 potential_page_fault(s
);
2942 gen_helper_oc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2946 potential_page_fault(s
);
2947 gen_helper_xc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2951 potential_page_fault(s
);
2952 gen_helper_tr(cpu_env
, vl
, tmp
, tmp2
);
2956 potential_page_fault(s
);
2957 gen_helper_unpk(cpu_env
, vl
, tmp
, tmp2
);
2962 tcg_temp_free_i64(tmp
);
2963 tcg_temp_free_i64(tmp2
);
2965 #ifndef CONFIG_USER_ONLY
2966 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2967 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2968 check_privileged(s
);
2969 potential_page_fault(s
);
2970 insn
= ld_code6(env
, s
->pc
);
2971 r1
= (insn
>> 36) & 0xf;
2972 r3
= (insn
>> 32) & 0xf;
2973 b1
= (insn
>> 28) & 0xf;
2974 d1
= (insn
>> 16) & 0xfff;
2975 b2
= (insn
>> 12) & 0xf;
2979 tmp2
= get_address(s
, 0, b1
, d1
);
2980 tmp3
= get_address(s
, 0, b2
, d2
);
2982 gen_helper_mvcp(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
2984 gen_helper_mvcs(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
2987 tcg_temp_free_i64(tmp
);
2988 tcg_temp_free_i64(tmp2
);
2989 tcg_temp_free_i64(tmp3
);
2993 insn
= ld_code6(env
, s
->pc
);
2996 r1
= (insn
>> 36) & 0xf;
2997 x2
= (insn
>> 32) & 0xf;
2998 b2
= (insn
>> 28) & 0xf;
2999 d2
= ((int)((((insn
>> 16) & 0xfff)
3000 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
3001 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
3003 #ifndef CONFIG_USER_ONLY
3005 /* Test Protection */
3006 check_privileged(s
);
3007 insn
= ld_code6(env
, s
->pc
);
3009 disas_e5(env
, s
, insn
);
3013 insn
= ld_code6(env
, s
->pc
);
3016 r1
= (insn
>> 36) & 0xf;
3017 r3
= (insn
>> 32) & 0xf;
3018 b2
= (insn
>> 28) & 0xf;
3019 d2
= ((int)((((insn
>> 16) & 0xfff)
3020 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
3021 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
3024 insn
= ld_code6(env
, s
->pc
);
3027 r1
= (insn
>> 36) & 0xf;
3028 x2
= (insn
>> 32) & 0xf;
3029 b2
= (insn
>> 28) & 0xf;
3030 d2
= (short)((insn
>> 16) & 0xfff);
3031 r1b
= (insn
>> 12) & 0xf;
3032 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
3035 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
3036 gen_illegal_opcode(s
);
3041 /* ====================================================================== */
3042 /* Define the insn format enumeration. */
3043 #define F0(N) FMT_##N,
3044 #define F1(N, X1) F0(N)
3045 #define F2(N, X1, X2) F0(N)
3046 #define F3(N, X1, X2, X3) F0(N)
3047 #define F4(N, X1, X2, X3, X4) F0(N)
3048 #define F5(N, X1, X2, X3, X4, X5) F0(N)
3051 #include "insn-format.def"
3061 /* Define a structure to hold the decoded fields. We'll store each inside
3062 an array indexed by an enum. In order to conserve memory, we'll arrange
3063 for fields that do not exist at the same time to overlap, thus the "C"
3064 for compact. For checking purposes there is an "O" for original index
3065 as well that will be applied to availability bitmaps. */
3067 enum DisasFieldIndexO
{
3090 enum DisasFieldIndexC
{
3121 struct DisasFields
{
3124 unsigned presentC
:16;
3125 unsigned int presentO
;
3129 /* This is the way fields are to be accessed out of DisasFields. */
3130 #define have_field(S, F) have_field1((S), FLD_O_##F)
3131 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
3133 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
3135 return (f
->presentO
>> c
) & 1;
3138 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
3139 enum DisasFieldIndexC c
)
3141 assert(have_field1(f
, o
));
3145 /* Describe the layout of each field in each format. */
3146 typedef struct DisasField
{
3148 unsigned int size
:8;
3149 unsigned int type
:2;
3150 unsigned int indexC
:6;
3151 enum DisasFieldIndexO indexO
:8;
3154 typedef struct DisasFormatInfo
{
3155 DisasField op
[NUM_C_FIELD
];
3158 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
3159 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
3160 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3161 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
3162 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3163 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3164 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
3165 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3166 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3167 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3168 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3169 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3170 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
3171 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
3173 #define F0(N) { { } },
3174 #define F1(N, X1) { { X1 } },
3175 #define F2(N, X1, X2) { { X1, X2 } },
3176 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
3177 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
3178 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
3180 static const DisasFormatInfo format_info
[] = {
3181 #include "insn-format.def"
3199 /* Generally, we'll extract operands into this structures, operate upon
3200 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3201 of routines below for more details. */
3203 bool g_out
, g_out2
, g_in1
, g_in2
;
3204 TCGv_i64 out
, out2
, in1
, in2
;
3208 /* Return values from translate_one, indicating the state of the TB. */
3210 /* Continue the TB. */
3212 /* We have emitted one or more goto_tb. No fixup required. */
3214 /* We are not using a goto_tb (for whatever reason), but have updated
3215 the PC (for whatever reason), so there's no need to do it again on
3218 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3219 updated the PC for the next instruction to be executed. */
3221 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3222 No following code will be executed. */
3226 typedef enum DisasFacility
{
3227 FAC_Z
, /* zarch (default) */
3228 FAC_CASS
, /* compare and swap and store */
3229 FAC_CASS2
, /* compare and swap and store 2*/
3230 FAC_DFP
, /* decimal floating point */
3231 FAC_DFPR
, /* decimal floating point rounding */
3232 FAC_DO
, /* distinct operands */
3233 FAC_EE
, /* execute extensions */
3234 FAC_EI
, /* extended immediate */
3235 FAC_FPE
, /* floating point extension */
3236 FAC_FPSSH
, /* floating point support sign handling */
3237 FAC_FPRGR
, /* FPR-GR transfer */
3238 FAC_GIE
, /* general instructions extension */
3239 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
3240 FAC_HW
, /* high-word */
3241 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
3242 FAC_LOC
, /* load/store on condition */
3243 FAC_LD
, /* long displacement */
3244 FAC_PC
, /* population count */
3245 FAC_SCF
, /* store clock fast */
3246 FAC_SFLE
, /* store facility list extended */
3252 DisasFacility fac
:6;
3256 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
3257 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
3258 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
3259 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
3260 void (*help_cout
)(DisasContext
*, DisasOps
*);
3261 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
3266 /* ====================================================================== */
3267 /* Miscelaneous helpers, used by several operations. */
3269 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
3271 if (dest
== s
->next_pc
) {
3274 if (use_goto_tb(s
, dest
)) {
3275 gen_update_cc_op(s
);
3277 tcg_gen_movi_i64(psw_addr
, dest
);
3278 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
3279 return EXIT_GOTO_TB
;
3281 tcg_gen_movi_i64(psw_addr
, dest
);
3282 return EXIT_PC_UPDATED
;
3286 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
3287 bool is_imm
, int imm
, TCGv_i64 cdest
)
3290 uint64_t dest
= s
->pc
+ 2 * imm
;
3293 /* Take care of the special cases first. */
3294 if (c
->cond
== TCG_COND_NEVER
) {
3299 if (dest
== s
->next_pc
) {
3300 /* Branch to next. */
3304 if (c
->cond
== TCG_COND_ALWAYS
) {
3305 ret
= help_goto_direct(s
, dest
);
3309 if (TCGV_IS_UNUSED_I64(cdest
)) {
3310 /* E.g. bcr %r0 -> no branch. */
3314 if (c
->cond
== TCG_COND_ALWAYS
) {
3315 tcg_gen_mov_i64(psw_addr
, cdest
);
3316 ret
= EXIT_PC_UPDATED
;
3321 if (use_goto_tb(s
, s
->next_pc
)) {
3322 if (is_imm
&& use_goto_tb(s
, dest
)) {
3323 /* Both exits can use goto_tb. */
3324 gen_update_cc_op(s
);
3326 lab
= gen_new_label();
3328 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3330 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3333 /* Branch not taken. */
3335 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3336 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3341 tcg_gen_movi_i64(psw_addr
, dest
);
3342 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
3346 /* Fallthru can use goto_tb, but taken branch cannot. */
3347 /* Store taken branch destination before the brcond. This
3348 avoids having to allocate a new local temp to hold it.
3349 We'll overwrite this in the not taken case anyway. */
3351 tcg_gen_mov_i64(psw_addr
, cdest
);
3354 lab
= gen_new_label();
3356 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3358 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3361 /* Branch not taken. */
3362 gen_update_cc_op(s
);
3364 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3365 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3369 tcg_gen_movi_i64(psw_addr
, dest
);
3371 ret
= EXIT_PC_UPDATED
;
3374 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3375 Most commonly we're single-stepping or some other condition that
3376 disables all use of goto_tb. Just update the PC and exit. */
3378 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
3380 cdest
= tcg_const_i64(dest
);
3384 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
3387 TCGv_i32 t0
= tcg_temp_new_i32();
3388 TCGv_i64 t1
= tcg_temp_new_i64();
3389 TCGv_i64 z
= tcg_const_i64(0);
3390 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
3391 tcg_gen_extu_i32_i64(t1
, t0
);
3392 tcg_temp_free_i32(t0
);
3393 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
3394 tcg_temp_free_i64(t1
);
3395 tcg_temp_free_i64(z
);
3399 tcg_temp_free_i64(cdest
);
3401 tcg_temp_free_i64(next
);
3403 ret
= EXIT_PC_UPDATED
;
3411 /* ====================================================================== */
3412 /* The operations. These perform the bulk of the work for any insn,
3413 usually after the operands have been loaded and output initialized. */
3415 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
3417 gen_helper_abs_i64(o
->out
, o
->in2
);
3421 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
3423 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3427 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
3431 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3433 /* XXX possible optimization point */
3435 cc
= tcg_temp_new_i64();
3436 tcg_gen_extu_i32_i64(cc
, cc_op
);
3437 tcg_gen_shri_i64(cc
, cc
, 1);
3439 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3440 tcg_temp_free_i64(cc
);
3444 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
3446 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3450 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
3452 int shift
= s
->insn
->data
& 0xff;
3453 int size
= s
->insn
->data
>> 8;
3454 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3457 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3458 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3459 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3461 /* Produce the CC from only the bits manipulated. */
3462 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3463 set_cc_nz_u64(s
, cc_dst
);
3467 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
3469 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3470 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
3471 tcg_gen_mov_i64(psw_addr
, o
->in2
);
3472 return EXIT_PC_UPDATED
;
3478 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
3480 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3481 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
3484 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
3486 int m1
= get_field(s
->fields
, m1
);
3487 bool is_imm
= have_field(s
->fields
, i2
);
3488 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3491 disas_jcc(s
, &c
, m1
);
3492 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3495 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
3497 int r1
= get_field(s
->fields
, r1
);
3498 bool is_imm
= have_field(s
->fields
, i2
);
3499 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3503 c
.cond
= TCG_COND_NE
;
3508 t
= tcg_temp_new_i64();
3509 tcg_gen_subi_i64(t
, regs
[r1
], 1);
3510 store_reg32_i64(r1
, t
);
3511 c
.u
.s32
.a
= tcg_temp_new_i32();
3512 c
.u
.s32
.b
= tcg_const_i32(0);
3513 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
3514 tcg_temp_free_i64(t
);
3516 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3519 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
3521 int r1
= get_field(s
->fields
, r1
);
3522 bool is_imm
= have_field(s
->fields
, i2
);
3523 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3526 c
.cond
= TCG_COND_NE
;
3531 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
3532 c
.u
.s64
.a
= regs
[r1
];
3533 c
.u
.s64
.b
= tcg_const_i64(0);
3535 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3538 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
3540 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3541 return_low128(o
->out
);
3545 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
3547 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3548 return_low128(o
->out
);
3552 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
3554 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3555 return_low128(o
->out
);
3559 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
3561 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3562 return_low128(o
->out
);
3566 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
3568 int shift
= s
->insn
->data
& 0xff;
3569 int size
= s
->insn
->data
>> 8;
3570 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
3574 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
3576 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
3580 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
3582 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
3586 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
3588 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
3592 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
3594 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
3598 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
3600 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
3604 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
3606 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
3610 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
3612 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
3616 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3619 o
->g_out
= o
->g_in2
;
3620 TCGV_UNUSED_I64(o
->in2
);
3625 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3627 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3631 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3633 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3634 return_low128(o
->out2
);
3638 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3640 gen_helper_nabs_i64(o
->out
, o
->in2
);
3644 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3646 tcg_gen_neg_i64(o
->out
, o
->in2
);
3650 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3652 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3656 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3658 int shift
= s
->insn
->data
& 0xff;
3659 int size
= s
->insn
->data
>> 8;
3660 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3663 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3664 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3666 /* Produce the CC from only the bits manipulated. */
3667 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3668 set_cc_nz_u64(s
, cc_dst
);
3672 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3674 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3678 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3680 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3684 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3686 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3690 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3692 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3696 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3698 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3702 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3707 tcg_gen_not_i64(o
->in2
, o
->in2
);
3708 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3710 /* XXX possible optimization point */
3712 cc
= tcg_temp_new_i64();
3713 tcg_gen_extu_i32_i64(cc
, cc_op
);
3714 tcg_gen_shri_i64(cc
, cc
, 1);
3715 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3716 tcg_temp_free_i64(cc
);
3720 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3722 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3726 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3728 int shift
= s
->insn
->data
& 0xff;
3729 int size
= s
->insn
->data
>> 8;
3730 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3733 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3734 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3736 /* Produce the CC from only the bits manipulated. */
3737 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3738 set_cc_nz_u64(s
, cc_dst
);
3742 /* ====================================================================== */
3743 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3744 the original inputs), update the various cc data structures in order to
3745 be able to compute the new condition code. */
3747 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3749 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3752 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3754 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3757 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3759 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3762 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3764 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3767 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3769 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3772 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3774 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3777 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3779 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3782 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3784 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3787 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3789 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3792 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3794 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3797 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3799 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3802 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3804 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3807 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3809 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3812 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3814 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3817 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3819 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3822 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3824 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3827 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3829 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3830 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3833 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3835 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3838 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3840 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3843 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3845 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3848 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3850 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3853 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3855 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3858 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3860 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3863 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3865 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3868 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3870 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3873 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3875 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3878 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3880 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3883 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3885 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3888 /* ====================================================================== */
3889 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3890 with the TCG register to which we will write. Used in combination with
3891 the "wout" generators, in some cases we need a new temporary, and in
3892 some cases we can write to a TCG global. */
3894 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 o
->out
= tcg_temp_new_i64();
3899 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3901 o
->out
= tcg_temp_new_i64();
3902 o
->out2
= tcg_temp_new_i64();
3905 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3907 o
->out
= regs
[get_field(f
, r1
)];
3911 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3913 /* ??? Specification exception: r1 must be even. */
3914 int r1
= get_field(f
, r1
);
3916 o
->out2
= regs
[(r1
+ 1) & 15];
3917 o
->g_out
= o
->g_out2
= true;
3920 /* ====================================================================== */
3921 /* The "Write OUTput" generators. These generally perform some non-trivial
3922 copy of data to TCG globals, or to main memory. The trivial cases are
3923 generally handled by having a "prep" generator install the TCG global
3924 as the destination of the operation. */
3926 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 store_reg(get_field(f
, r1
), o
->out
);
3931 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3933 store_reg32_i64(get_field(f
, r1
), o
->out
);
3936 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3938 /* ??? Specification exception: r1 must be even. */
3939 int r1
= get_field(f
, r1
);
3940 store_reg32_i64(r1
, o
->out
);
3941 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3944 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3946 /* ??? Specification exception: r1 must be even. */
3947 int r1
= get_field(f
, r1
);
3948 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3949 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3950 store_reg32_i64(r1
, o
->out
);
3953 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3955 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3956 store_reg32_i64(get_field(f
, r1
), o
->out
);
3960 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3962 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3965 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3967 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3970 /* ====================================================================== */
3971 /* The "INput 1" generators. These load the first operand to an insn. */
3973 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3975 o
->in1
= load_reg(get_field(f
, r1
));
3978 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3980 o
->in1
= regs
[get_field(f
, r1
)];
3984 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3986 /* ??? Specification exception: r1 must be even. */
3987 int r1
= get_field(f
, r1
);
3988 o
->in1
= load_reg((r1
+ 1) & 15);
3991 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3993 /* ??? Specification exception: r1 must be even. */
3994 int r1
= get_field(f
, r1
);
3995 o
->in1
= tcg_temp_new_i64();
3996 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3999 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4001 /* ??? Specification exception: r1 must be even. */
4002 int r1
= get_field(f
, r1
);
4003 o
->in1
= tcg_temp_new_i64();
4004 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
4007 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4009 /* ??? Specification exception: r1 must be even. */
4010 int r1
= get_field(f
, r1
);
4011 o
->in1
= tcg_temp_new_i64();
4012 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4015 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4017 o
->in1
= load_reg(get_field(f
, r2
));
4020 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4022 o
->in1
= load_reg(get_field(f
, r3
));
4025 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4027 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4030 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4033 o
->in1
= tcg_temp_new_i64();
4034 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4037 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4040 o
->in1
= tcg_temp_new_i64();
4041 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4044 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4047 o
->in1
= tcg_temp_new_i64();
4048 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4051 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4054 o
->in1
= tcg_temp_new_i64();
4055 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4058 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4061 o
->in1
= tcg_temp_new_i64();
4062 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4065 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4068 o
->in1
= tcg_temp_new_i64();
4069 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4072 /* ====================================================================== */
4073 /* The "INput 2" generators. These load the second operand to an insn. */
4075 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4077 o
->in2
= load_reg(get_field(f
, r2
));
4080 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4082 o
->in2
= regs
[get_field(f
, r2
)];
4086 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4088 int r2
= get_field(f
, r2
);
4090 o
->in2
= load_reg(r2
);
4094 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4096 o
->in2
= tcg_temp_new_i64();
4097 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4100 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4102 o
->in2
= tcg_temp_new_i64();
4103 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4106 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4108 o
->in2
= tcg_temp_new_i64();
4109 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4112 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4114 o
->in2
= tcg_temp_new_i64();
4115 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4118 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4120 o
->in2
= load_reg(get_field(f
, r3
));
4123 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4125 o
->in2
= tcg_temp_new_i64();
4126 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4129 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4131 o
->in2
= tcg_temp_new_i64();
4132 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4135 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4137 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4138 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4141 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4143 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4146 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4149 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4152 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4155 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4158 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4161 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4164 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4167 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4170 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4173 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4176 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4179 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4182 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4185 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4188 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4191 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4194 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4196 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4199 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4201 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4204 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4206 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4209 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4211 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4214 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4216 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4217 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4220 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4222 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4223 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4226 /* ====================================================================== */
4228 /* Find opc within the table of insns. This is formulated as a switch
4229 statement so that (1) we get compile-time notice of cut-paste errors
4230 for duplicated opcodes, and (2) the compiler generates the binary
4231 search tree, rather than us having to post-process the table. */
4233 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4234 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4236 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4238 enum DisasInsnEnum
{
4239 #include "insn-data.def"
4243 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4248 .help_in1 = in1_##I1, \
4249 .help_in2 = in2_##I2, \
4250 .help_prep = prep_##P, \
4251 .help_wout = wout_##W, \
4252 .help_cout = cout_##CC, \
4253 .help_op = op_##OP, \
4257 /* Allow 0 to be used for NULL in the table below. */
4265 static const DisasInsn insn_info
[] = {
4266 #include "insn-data.def"
4270 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4271 case OPC: return &insn_info[insn_ ## NM];
4273 static const DisasInsn
*lookup_opc(uint16_t opc
)
4276 #include "insn-data.def"
4285 /* Extract a field from the insn. The INSN should be left-aligned in
4286 the uint64_t so that we can more easily utilize the big-bit-endian
4287 definitions we extract from the Principals of Operation. */
4289 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4297 /* Zero extract the field from the insn. */
4298 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4300 /* Sign-extend, or un-swap the field as necessary. */
4302 case 0: /* unsigned */
4304 case 1: /* signed */
4305 assert(f
->size
<= 32);
4306 m
= 1u << (f
->size
- 1);
4309 case 2: /* dl+dh split, signed 20 bit. */
4310 r
= ((int8_t)r
<< 12) | (r
>> 8);
4316 /* Validate that the "compressed" encoding we selected above is valid.
4317 I.e. we havn't make two different original fields overlap. */
4318 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4319 o
->presentC
|= 1 << f
->indexC
;
4320 o
->presentO
|= 1 << f
->indexO
;
4322 o
->c
[f
->indexC
] = r
;
4325 /* Lookup the insn at the current PC, extracting the operands into O and
4326 returning the info struct for the insn. Returns NULL for invalid insn. */
4328 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4331 uint64_t insn
, pc
= s
->pc
;
4333 const DisasInsn
*info
;
4335 insn
= ld_code2(env
, pc
);
4336 op
= (insn
>> 8) & 0xff;
4337 ilen
= get_ilen(op
);
4338 s
->next_pc
= s
->pc
+ ilen
;
4345 insn
= ld_code4(env
, pc
) << 32;
4348 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4354 /* We can't actually determine the insn format until we've looked up
4355 the full insn opcode. Which we can't do without locating the
4356 secondary opcode. Assume by default that OP2 is at bit 40; for
4357 those smaller insns that don't actually have a secondary opcode
4358 this will correctly result in OP2 = 0. */
4364 case 0xb2: /* S, RRF, RRE */
4365 case 0xb3: /* RRE, RRD, RRF */
4366 case 0xb9: /* RRE, RRF */
4367 case 0xe5: /* SSE, SIL */
4368 op2
= (insn
<< 8) >> 56;
4372 case 0xc0: /* RIL */
4373 case 0xc2: /* RIL */
4374 case 0xc4: /* RIL */
4375 case 0xc6: /* RIL */
4376 case 0xc8: /* SSF */
4377 case 0xcc: /* RIL */
4378 op2
= (insn
<< 12) >> 60;
4380 case 0xd0 ... 0xdf: /* SS */
4386 case 0xee ... 0xf3: /* SS */
4387 case 0xf8 ... 0xfd: /* SS */
4391 op2
= (insn
<< 40) >> 56;
4395 memset(f
, 0, sizeof(*f
));
4399 /* Lookup the instruction. */
4400 info
= lookup_opc(op
<< 8 | op2
);
4402 /* If we found it, extract the operands. */
4404 DisasFormat fmt
= info
->fmt
;
4407 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4408 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4414 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4416 const DisasInsn
*insn
;
4417 ExitStatus ret
= NO_EXIT
;
4421 insn
= extract_insn(env
, s
, &f
);
4423 /* If not found, try the old interpreter. This includes ILLOPC. */
4425 disas_s390_insn(env
, s
);
4426 switch (s
->is_jmp
) {
4434 ret
= EXIT_PC_UPDATED
;
4437 ret
= EXIT_NORETURN
;
4447 /* Set up the strutures we use to communicate with the helpers. */
4450 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4451 TCGV_UNUSED_I64(o
.out
);
4452 TCGV_UNUSED_I64(o
.out2
);
4453 TCGV_UNUSED_I64(o
.in1
);
4454 TCGV_UNUSED_I64(o
.in2
);
4455 TCGV_UNUSED_I64(o
.addr1
);
4457 /* Implement the instruction. */
4458 if (insn
->help_in1
) {
4459 insn
->help_in1(s
, &f
, &o
);
4461 if (insn
->help_in2
) {
4462 insn
->help_in2(s
, &f
, &o
);
4464 if (insn
->help_prep
) {
4465 insn
->help_prep(s
, &f
, &o
);
4467 if (insn
->help_op
) {
4468 ret
= insn
->help_op(s
, &o
);
4470 if (insn
->help_wout
) {
4471 insn
->help_wout(s
, &f
, &o
);
4473 if (insn
->help_cout
) {
4474 insn
->help_cout(s
, &o
);
4477 /* Free any temporaries created by the helpers. */
4478 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4479 tcg_temp_free_i64(o
.out
);
4481 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4482 tcg_temp_free_i64(o
.out2
);
4484 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4485 tcg_temp_free_i64(o
.in1
);
4487 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4488 tcg_temp_free_i64(o
.in2
);
4490 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4491 tcg_temp_free_i64(o
.addr1
);
4494 /* Advance to the next instruction. */
4499 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4500 TranslationBlock
*tb
,
4504 target_ulong pc_start
;
4505 uint64_t next_page_start
;
4506 uint16_t *gen_opc_end
;
4508 int num_insns
, max_insns
;
4516 if (!(tb
->flags
& FLAG_MASK_64
)) {
4517 pc_start
&= 0x7fffffff;
4522 dc
.cc_op
= CC_OP_DYNAMIC
;
4523 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4524 dc
.is_jmp
= DISAS_NEXT
;
4526 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4528 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4531 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4532 if (max_insns
== 0) {
4533 max_insns
= CF_COUNT_MASK
;
4540 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4544 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4547 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4548 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4549 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4550 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4552 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4556 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4557 tcg_gen_debug_insn_start(dc
.pc
);
4561 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4562 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4563 if (bp
->pc
== dc
.pc
) {
4564 status
= EXIT_PC_STALE
;
4570 if (status
== NO_EXIT
) {
4571 status
= translate_one(env
, &dc
);
4574 /* If we reach a page boundary, are single stepping,
4575 or exhaust instruction count, stop generation. */
4576 if (status
== NO_EXIT
4577 && (dc
.pc
>= next_page_start
4578 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4579 || num_insns
>= max_insns
4581 || env
->singlestep_enabled
)) {
4582 status
= EXIT_PC_STALE
;
4584 } while (status
== NO_EXIT
);
4586 if (tb
->cflags
& CF_LAST_IO
) {
4595 update_psw_addr(&dc
);
4597 case EXIT_PC_UPDATED
:
4598 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4599 gen_op_calc_cc(&dc
);
4601 /* Next TB starts off with CC_OP_DYNAMIC,
4602 so make sure the cc op type is in env */
4603 gen_op_set_cc_op(&dc
);
4606 gen_exception(EXCP_DEBUG
);
4608 /* Generate the return instruction */
4616 gen_icount_end(tb
, num_insns
);
4617 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4619 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4622 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4625 tb
->size
= dc
.pc
- pc_start
;
4626 tb
->icount
= num_insns
;
4629 #if defined(S390X_DEBUG_DISAS)
4630 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4631 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4632 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4638 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4640 gen_intermediate_code_internal(env
, tb
, 0);
4643 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4645 gen_intermediate_code_internal(env
, tb
, 1);
4648 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4651 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4652 cc_op
= gen_opc_cc_op
[pc_pos
];
4653 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {