4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_reg16(int reg
, TCGv_i32 v
)
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
287 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
292 static inline void store_freg32(int reg
, TCGv_i32 v
)
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
298 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
299 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
303 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
305 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
308 static inline void return_low128(TCGv_i64 dest
)
310 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
313 static inline void update_psw_addr(DisasContext
*s
)
316 tcg_gen_movi_i64(psw_addr
, s
->pc
);
319 static inline void potential_page_fault(DisasContext
*s
)
321 #ifndef CONFIG_USER_ONLY
327 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
329 return (uint64_t)cpu_lduw_code(env
, pc
);
332 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
334 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
337 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
339 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
342 static inline int get_mem_index(DisasContext
*s
)
344 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
345 case PSW_ASC_PRIMARY
>> 32:
347 case PSW_ASC_SECONDARY
>> 32:
349 case PSW_ASC_HOME
>> 32:
357 static void gen_exception(int excp
)
359 TCGv_i32 tmp
= tcg_const_i32(excp
);
360 gen_helper_exception(cpu_env
, tmp
);
361 tcg_temp_free_i32(tmp
);
364 static void gen_program_exception(DisasContext
*s
, int code
)
368 /* Remember what pgm exeption this was. */
369 tmp
= tcg_const_i32(code
);
370 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
371 tcg_temp_free_i32(tmp
);
373 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
374 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
375 tcg_temp_free_i32(tmp
);
377 /* Advance past instruction. */
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM
);
388 s
->is_jmp
= DISAS_EXCP
;
391 static inline void gen_illegal_opcode(DisasContext
*s
)
393 gen_program_exception(s
, PGM_SPECIFICATION
);
396 static inline void check_privileged(DisasContext
*s
)
398 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
399 gen_program_exception(s
, PGM_PRIVILEGED
);
403 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
414 tmp
= tcg_const_i64(d2
);
415 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
420 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
424 tmp
= tcg_const_i64(d2
);
425 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
430 tmp
= tcg_const_i64(d2
);
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
435 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
441 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
443 s
->cc_op
= CC_OP_CONST0
+ val
;
446 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
448 tcg_gen_discard_i64(cc_src
);
449 tcg_gen_mov_i64(cc_dst
, dst
);
450 tcg_gen_discard_i64(cc_vr
);
454 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
456 tcg_gen_discard_i64(cc_src
);
457 tcg_gen_extu_i32_i64(cc_dst
, dst
);
458 tcg_gen_discard_i64(cc_vr
);
462 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
465 tcg_gen_mov_i64(cc_src
, src
);
466 tcg_gen_mov_i64(cc_dst
, dst
);
467 tcg_gen_discard_i64(cc_vr
);
471 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
474 tcg_gen_extu_i32_i64(cc_src
, src
);
475 tcg_gen_extu_i32_i64(cc_dst
, dst
);
476 tcg_gen_discard_i64(cc_vr
);
480 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
481 TCGv_i64 dst
, TCGv_i64 vr
)
483 tcg_gen_mov_i64(cc_src
, src
);
484 tcg_gen_mov_i64(cc_dst
, dst
);
485 tcg_gen_mov_i64(cc_vr
, vr
);
489 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
491 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
494 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
496 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
499 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
502 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
505 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
508 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
511 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
513 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
516 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
518 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
521 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp
= tcg_const_i32(v2
);
525 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
526 tcg_temp_free_i32(tmp
);
529 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
531 TCGv_i32 tmp
= tcg_const_i32(v2
);
532 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
533 tcg_temp_free_i32(tmp
);
536 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
538 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
541 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
543 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
546 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
548 TCGv_i64 tmp
= tcg_const_i64(v2
);
550 tcg_temp_free_i64(tmp
);
553 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
555 TCGv_i64 tmp
= tcg_const_i64(v2
);
557 tcg_temp_free_i64(tmp
);
560 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
562 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
565 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
567 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
570 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
572 tcg_gen_extu_i32_i64(cc_src
, v1
);
573 tcg_gen_mov_i64(cc_dst
, v2
);
574 tcg_gen_discard_i64(cc_vr
);
575 s
->cc_op
= CC_OP_LTGT_F32
;
578 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
580 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext
*s
)
586 tcg_gen_discard_i64(cc_src
);
587 tcg_gen_discard_i64(cc_dst
);
588 tcg_gen_discard_i64(cc_vr
);
589 s
->cc_op
= CC_OP_STATIC
;
592 static inline void gen_op_set_cc_op(DisasContext
*s
)
594 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
595 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
599 static inline void gen_update_cc_op(DisasContext
*s
)
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext
*s
)
607 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
608 TCGv_i64 dummy
= tcg_const_i64(0);
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
619 /* env->cc_op already is the cc value */
633 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
638 case CC_OP_LTUGTU_32
:
639 case CC_OP_LTUGTU_64
:
647 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
662 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
672 tcg_temp_free_i32(local_cc_op
);
673 tcg_temp_free_i64(dummy
);
675 /* We now have cc in cc_op as constant */
679 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
683 *r1
= (insn
>> 4) & 0xf;
687 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
688 int *x2
, int *b2
, int *d2
)
692 *r1
= (insn
>> 20) & 0xf;
693 *x2
= (insn
>> 16) & 0xf;
694 *b2
= (insn
>> 12) & 0xf;
697 return get_address(s
, *x2
, *b2
, *d2
);
700 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
705 *r1
= (insn
>> 20) & 0xf;
707 *r3
= (insn
>> 16) & 0xf;
708 *b2
= (insn
>> 12) & 0xf;
712 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
717 *i2
= (insn
>> 16) & 0xff;
718 *b1
= (insn
>> 12) & 0xf;
721 return get_address(s
, 0, *b1
, *d1
);
724 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
728 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
729 && !s
->singlestep_enabled
730 && !(s
->tb
->cflags
& CF_LAST_IO
));
733 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
737 if (use_goto_tb(s
, pc
)) {
738 tcg_gen_goto_tb(tb_num
);
739 tcg_gen_movi_i64(psw_addr
, pc
);
740 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr
, pc
);
748 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss
[cc_op
]++;
755 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit
[cc_op
]++;
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond
[16] = {
766 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
767 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
768 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
769 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
770 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
771 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
772 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
773 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond
[16] = {
780 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
782 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
784 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
794 enum cc_op old_cc_op
= s
->cc_op
;
796 if (mask
== 15 || mask
== 0) {
797 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
800 c
->g1
= c
->g2
= true;
805 /* Find the TCG condition for the mask + cc op. */
811 cond
= ltgt_cond
[mask
];
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
818 case CC_OP_LTUGTU_32
:
819 case CC_OP_LTUGTU_64
:
820 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
821 if (cond
== TCG_COND_NEVER
) {
824 account_inline_branch(s
, old_cc_op
);
828 cond
= nz_cond
[mask
];
829 if (cond
== TCG_COND_NEVER
) {
832 account_inline_branch(s
, old_cc_op
);
847 account_inline_branch(s
, old_cc_op
);
862 account_inline_branch(s
, old_cc_op
);
867 /* Calculate cc value. */
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s
, old_cc_op
);
875 old_cc_op
= CC_OP_STATIC
;
876 cond
= TCG_COND_NEVER
;
880 /* Load up the arguments of the comparison. */
882 c
->g1
= c
->g2
= false;
886 c
->u
.s32
.a
= tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
888 c
->u
.s32
.b
= tcg_const_i32(0);
891 case CC_OP_LTUGTU_32
:
893 c
->u
.s32
.a
= tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
895 c
->u
.s32
.b
= tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
902 c
->u
.s64
.b
= tcg_const_i64(0);
906 case CC_OP_LTUGTU_64
:
909 c
->g1
= c
->g2
= true;
915 c
->u
.s64
.a
= tcg_temp_new_i64();
916 c
->u
.s64
.b
= tcg_const_i64(0);
917 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
927 c
->u
.s32
.b
= tcg_const_i32(3);
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
931 c
->u
.s32
.b
= tcg_const_i32(2);
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
935 c
->u
.s32
.b
= tcg_const_i32(1);
937 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
940 c
->u
.s32
.a
= tcg_temp_new_i32();
941 c
->u
.s32
.b
= tcg_const_i32(0);
942 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
944 case 0x8 | 0x4: /* cc < 2 */
946 c
->u
.s32
.b
= tcg_const_i32(2);
948 case 0x8: /* cc == 0 */
950 c
->u
.s32
.b
= tcg_const_i32(0);
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
954 c
->u
.s32
.b
= tcg_const_i32(0);
956 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
959 c
->u
.s32
.a
= tcg_temp_new_i32();
960 c
->u
.s32
.b
= tcg_const_i32(0);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
963 case 0x4: /* cc == 1 */
965 c
->u
.s32
.b
= tcg_const_i32(1);
967 case 0x2 | 0x1: /* cc > 1 */
969 c
->u
.s32
.b
= tcg_const_i32(1);
971 case 0x2: /* cc == 2 */
973 c
->u
.s32
.b
= tcg_const_i32(2);
975 case 0x1: /* cc == 3 */
977 c
->u
.s32
.b
= tcg_const_i32(3);
980 /* CC is masked by something else: (8 >> cc) & mask. */
983 c
->u
.s32
.a
= tcg_const_i32(8);
984 c
->u
.s32
.b
= tcg_const_i32(0);
985 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
986 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
997 static void free_compare(DisasCompare
*c
)
1001 tcg_temp_free_i64(c
->u
.s64
.a
);
1003 tcg_temp_free_i32(c
->u
.s32
.a
);
1008 tcg_temp_free_i64(c
->u
.s64
.b
);
1010 tcg_temp_free_i32(c
->u
.s32
.b
);
1015 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1016 int x2
, int b2
, int d2
)
1018 TCGv_i64 addr
, tmp2
;
1021 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1022 op
, r1
, x2
, b2
, d2
);
1023 addr
= get_address(s
, x2
, b2
, d2
);
1025 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1026 tmp2
= tcg_temp_new_i64();
1027 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1028 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1029 store_reg(r1
, tmp2
);
1030 tcg_temp_free_i64(tmp2
);
1032 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1033 tmp2
= tcg_temp_new_i64();
1034 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1035 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1036 store_reg(r1
, tmp2
);
1037 tcg_temp_free_i64(tmp2
);
1039 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1040 tmp2
= tcg_temp_new_i64();
1041 tmp32_1
= tcg_temp_new_i32();
1042 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1043 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1044 tcg_temp_free_i64(tmp2
);
1045 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1046 store_reg32(r1
, tmp32_1
);
1047 tcg_temp_free_i32(tmp32_1
);
1049 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1050 tmp2
= tcg_temp_new_i64();
1051 tmp32_1
= tcg_temp_new_i32();
1052 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1053 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1054 tcg_temp_free_i64(tmp2
);
1055 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1056 store_reg16(r1
, tmp32_1
);
1057 tcg_temp_free_i32(tmp32_1
);
1059 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1060 tmp32_1
= load_reg32(r1
);
1061 tmp2
= tcg_temp_new_i64();
1062 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1063 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1064 tcg_temp_free_i32(tmp32_1
);
1065 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1066 tcg_temp_free_i64(tmp2
);
1069 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1070 gen_illegal_opcode(s
);
1073 tcg_temp_free_i64(addr
);
1076 #ifndef CONFIG_USER_ONLY
1077 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1080 int op
= (insn
>> 32) & 0xff;
1082 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1083 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1085 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1087 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1088 /* Test Protection */
1089 potential_page_fault(s
);
1090 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1094 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1095 gen_illegal_opcode(s
);
1099 tcg_temp_free_i64(tmp
);
1100 tcg_temp_free_i64(tmp2
);
1104 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1105 int r3
, int b2
, int d2
)
1108 TCGv_i32 tmp32_1
, tmp32_2
;
1110 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1111 op
, r1
, r3
, b2
, d2
);
1113 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1114 tmp
= get_address(s
, 0, b2
, d2
);
1115 tmp32_1
= tcg_const_i32(r1
);
1116 tmp32_2
= tcg_const_i32(r3
);
1117 potential_page_fault(s
);
1118 gen_helper_stcmh(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1119 tcg_temp_free_i64(tmp
);
1120 tcg_temp_free_i32(tmp32_1
);
1121 tcg_temp_free_i32(tmp32_2
);
1123 #ifndef CONFIG_USER_ONLY
1124 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1126 check_privileged(s
);
1127 tmp
= get_address(s
, 0, b2
, d2
);
1128 tmp32_1
= tcg_const_i32(r1
);
1129 tmp32_2
= tcg_const_i32(r3
);
1130 potential_page_fault(s
);
1131 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1132 tcg_temp_free_i64(tmp
);
1133 tcg_temp_free_i32(tmp32_1
);
1134 tcg_temp_free_i32(tmp32_2
);
1136 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1138 check_privileged(s
);
1139 tmp
= get_address(s
, 0, b2
, d2
);
1140 tmp32_1
= tcg_const_i32(r1
);
1141 tmp32_2
= tcg_const_i32(r3
);
1142 potential_page_fault(s
);
1143 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1144 tcg_temp_free_i64(tmp
);
1145 tcg_temp_free_i32(tmp32_1
);
1146 tcg_temp_free_i32(tmp32_2
);
1150 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1151 gen_illegal_opcode(s
);
1156 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1157 int x2
, int b2
, int d2
, int r1b
)
1159 TCGv_i32 tmp_r1
, tmp32
;
1161 addr
= get_address(s
, x2
, b2
, d2
);
1162 tmp_r1
= tcg_const_i32(r1
);
1164 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1165 potential_page_fault(s
);
1166 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1168 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1169 potential_page_fault(s
);
1170 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1172 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1173 tmp
= tcg_temp_new_i64();
1174 tmp32
= load_freg32(r1
);
1175 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1176 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1177 tcg_temp_free_i64(tmp
);
1178 tcg_temp_free_i32(tmp32
);
1180 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1181 tmp
= tcg_temp_new_i64();
1182 tmp32
= tcg_temp_new_i32();
1183 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1184 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1185 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1186 tcg_temp_free_i64(tmp
);
1187 tcg_temp_free_i32(tmp32
);
1189 tmp32
= load_freg32(r1
);
1190 gen_set_cc_nz_f32(s
, tmp32
);
1191 tcg_temp_free_i32(tmp32
);
1193 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1194 tmp
= tcg_temp_new_i64();
1195 tmp32
= tcg_temp_new_i32();
1196 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1197 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1198 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1199 tcg_temp_free_i64(tmp
);
1200 tcg_temp_free_i32(tmp32
);
1202 tmp32
= load_freg32(r1
);
1203 gen_set_cc_nz_f32(s
, tmp32
);
1204 tcg_temp_free_i32(tmp32
);
1206 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1207 tmp
= tcg_temp_new_i64();
1208 tmp32
= tcg_temp_new_i32();
1209 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1210 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1211 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1212 tcg_temp_free_i64(tmp
);
1213 tcg_temp_free_i32(tmp32
);
1215 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1216 potential_page_fault(s
);
1217 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1220 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1221 potential_page_fault(s
);
1222 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1225 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1226 potential_page_fault(s
);
1227 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1230 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1231 tmp
= tcg_temp_new_i64();
1232 tmp32
= tcg_temp_new_i32();
1233 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1234 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1235 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1236 tcg_temp_free_i64(tmp
);
1237 tcg_temp_free_i32(tmp32
);
1239 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1240 potential_page_fault(s
);
1241 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1244 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1245 potential_page_fault(s
);
1246 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1249 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1250 potential_page_fault(s
);
1251 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1254 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1255 potential_page_fault(s
);
1256 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1258 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1259 potential_page_fault(s
);
1260 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1262 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1263 /* for RXF insns, r1 is R3 and r1b is R1 */
1264 tmp32
= tcg_const_i32(r1b
);
1265 potential_page_fault(s
);
1266 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1267 tcg_temp_free_i32(tmp32
);
1270 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1271 gen_illegal_opcode(s
);
1274 tcg_temp_free_i32(tmp_r1
);
1275 tcg_temp_free_i64(addr
);
1278 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1281 TCGv_i64 tmp
, tmp2
, tmp3
;
1282 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1284 #ifndef CONFIG_USER_ONLY
1288 r1
= (insn
>> 4) & 0xf;
1291 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1294 case 0x22: /* IPM R1 [RRE] */
1295 tmp32_1
= tcg_const_i32(r1
);
1297 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1298 tcg_temp_free_i32(tmp32_1
);
1300 case 0x41: /* CKSM R1,R2 [RRE] */
1301 tmp32_1
= tcg_const_i32(r1
);
1302 tmp32_2
= tcg_const_i32(r2
);
1303 potential_page_fault(s
);
1304 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1305 tcg_temp_free_i32(tmp32_1
);
1306 tcg_temp_free_i32(tmp32_2
);
1307 gen_op_movi_cc(s
, 0);
1309 case 0x4e: /* SAR R1,R2 [RRE] */
1310 tmp32_1
= load_reg32(r2
);
1311 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1312 tcg_temp_free_i32(tmp32_1
);
1314 case 0x4f: /* EAR R1,R2 [RRE] */
1315 tmp32_1
= tcg_temp_new_i32();
1316 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1317 store_reg32(r1
, tmp32_1
);
1318 tcg_temp_free_i32(tmp32_1
);
1320 case 0x54: /* MVPG R1,R2 [RRE] */
1322 tmp2
= load_reg(r1
);
1323 tmp3
= load_reg(r2
);
1324 potential_page_fault(s
);
1325 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1326 tcg_temp_free_i64(tmp
);
1327 tcg_temp_free_i64(tmp2
);
1328 tcg_temp_free_i64(tmp3
);
1329 /* XXX check CCO bit and set CC accordingly */
1330 gen_op_movi_cc(s
, 0);
1332 case 0x55: /* MVST R1,R2 [RRE] */
1333 tmp32_1
= load_reg32(0);
1334 tmp32_2
= tcg_const_i32(r1
);
1335 tmp32_3
= tcg_const_i32(r2
);
1336 potential_page_fault(s
);
1337 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1338 tcg_temp_free_i32(tmp32_1
);
1339 tcg_temp_free_i32(tmp32_2
);
1340 tcg_temp_free_i32(tmp32_3
);
1341 gen_op_movi_cc(s
, 1);
1343 case 0x5d: /* CLST R1,R2 [RRE] */
1344 tmp32_1
= load_reg32(0);
1345 tmp32_2
= tcg_const_i32(r1
);
1346 tmp32_3
= tcg_const_i32(r2
);
1347 potential_page_fault(s
);
1348 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1350 tcg_temp_free_i32(tmp32_1
);
1351 tcg_temp_free_i32(tmp32_2
);
1352 tcg_temp_free_i32(tmp32_3
);
1354 case 0x5e: /* SRST R1,R2 [RRE] */
1355 tmp32_1
= load_reg32(0);
1356 tmp32_2
= tcg_const_i32(r1
);
1357 tmp32_3
= tcg_const_i32(r2
);
1358 potential_page_fault(s
);
1359 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1361 tcg_temp_free_i32(tmp32_1
);
1362 tcg_temp_free_i32(tmp32_2
);
1363 tcg_temp_free_i32(tmp32_3
);
1366 #ifndef CONFIG_USER_ONLY
1367 case 0x02: /* STIDP D2(B2) [S] */
1369 check_privileged(s
);
1370 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1371 tmp
= get_address(s
, 0, b2
, d2
);
1372 potential_page_fault(s
);
1373 gen_helper_stidp(cpu_env
, tmp
);
1374 tcg_temp_free_i64(tmp
);
1376 case 0x04: /* SCK D2(B2) [S] */
1378 check_privileged(s
);
1379 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1380 tmp
= get_address(s
, 0, b2
, d2
);
1381 potential_page_fault(s
);
1382 gen_helper_sck(cc_op
, tmp
);
1384 tcg_temp_free_i64(tmp
);
1386 case 0x05: /* STCK D2(B2) [S] */
1388 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1389 tmp
= get_address(s
, 0, b2
, d2
);
1390 potential_page_fault(s
);
1391 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1393 tcg_temp_free_i64(tmp
);
1395 case 0x06: /* SCKC D2(B2) [S] */
1396 /* Set Clock Comparator */
1397 check_privileged(s
);
1398 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1399 tmp
= get_address(s
, 0, b2
, d2
);
1400 potential_page_fault(s
);
1401 gen_helper_sckc(cpu_env
, tmp
);
1402 tcg_temp_free_i64(tmp
);
1404 case 0x07: /* STCKC D2(B2) [S] */
1405 /* Store Clock Comparator */
1406 check_privileged(s
);
1407 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1408 tmp
= get_address(s
, 0, b2
, d2
);
1409 potential_page_fault(s
);
1410 gen_helper_stckc(cpu_env
, tmp
);
1411 tcg_temp_free_i64(tmp
);
1413 case 0x08: /* SPT D2(B2) [S] */
1415 check_privileged(s
);
1416 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1417 tmp
= get_address(s
, 0, b2
, d2
);
1418 potential_page_fault(s
);
1419 gen_helper_spt(cpu_env
, tmp
);
1420 tcg_temp_free_i64(tmp
);
1422 case 0x09: /* STPT D2(B2) [S] */
1423 /* Store CPU Timer */
1424 check_privileged(s
);
1425 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1426 tmp
= get_address(s
, 0, b2
, d2
);
1427 potential_page_fault(s
);
1428 gen_helper_stpt(cpu_env
, tmp
);
1429 tcg_temp_free_i64(tmp
);
1431 case 0x0a: /* SPKA D2(B2) [S] */
1432 /* Set PSW Key from Address */
1433 check_privileged(s
);
1434 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1435 tmp
= get_address(s
, 0, b2
, d2
);
1436 tmp2
= tcg_temp_new_i64();
1437 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1438 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1439 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1440 tcg_temp_free_i64(tmp2
);
1441 tcg_temp_free_i64(tmp
);
1443 case 0x0d: /* PTLB [S] */
1445 check_privileged(s
);
1446 gen_helper_ptlb(cpu_env
);
1448 case 0x10: /* SPX D2(B2) [S] */
1449 /* Set Prefix Register */
1450 check_privileged(s
);
1451 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1452 tmp
= get_address(s
, 0, b2
, d2
);
1453 potential_page_fault(s
);
1454 gen_helper_spx(cpu_env
, tmp
);
1455 tcg_temp_free_i64(tmp
);
1457 case 0x11: /* STPX D2(B2) [S] */
1459 check_privileged(s
);
1460 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1461 tmp
= get_address(s
, 0, b2
, d2
);
1462 tmp2
= tcg_temp_new_i64();
1463 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1464 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1465 tcg_temp_free_i64(tmp
);
1466 tcg_temp_free_i64(tmp2
);
1468 case 0x12: /* STAP D2(B2) [S] */
1469 /* Store CPU Address */
1470 check_privileged(s
);
1471 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1472 tmp
= get_address(s
, 0, b2
, d2
);
1473 tmp2
= tcg_temp_new_i64();
1474 tmp32_1
= tcg_temp_new_i32();
1475 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1476 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1477 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1478 tcg_temp_free_i64(tmp
);
1479 tcg_temp_free_i64(tmp2
);
1480 tcg_temp_free_i32(tmp32_1
);
1482 case 0x21: /* IPTE R1,R2 [RRE] */
1483 /* Invalidate PTE */
1484 check_privileged(s
);
1485 r1
= (insn
>> 4) & 0xf;
1488 tmp2
= load_reg(r2
);
1489 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1490 tcg_temp_free_i64(tmp
);
1491 tcg_temp_free_i64(tmp2
);
1493 case 0x29: /* ISKE R1,R2 [RRE] */
1494 /* Insert Storage Key Extended */
1495 check_privileged(s
);
1496 r1
= (insn
>> 4) & 0xf;
1499 tmp2
= tcg_temp_new_i64();
1500 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1501 store_reg(r1
, tmp2
);
1502 tcg_temp_free_i64(tmp
);
1503 tcg_temp_free_i64(tmp2
);
1505 case 0x2a: /* RRBE R1,R2 [RRE] */
1506 /* Set Storage Key Extended */
1507 check_privileged(s
);
1508 r1
= (insn
>> 4) & 0xf;
1510 tmp32_1
= load_reg32(r1
);
1512 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1514 tcg_temp_free_i32(tmp32_1
);
1515 tcg_temp_free_i64(tmp
);
1517 case 0x2b: /* SSKE R1,R2 [RRE] */
1518 /* Set Storage Key Extended */
1519 check_privileged(s
);
1520 r1
= (insn
>> 4) & 0xf;
1522 tmp32_1
= load_reg32(r1
);
1524 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1525 tcg_temp_free_i32(tmp32_1
);
1526 tcg_temp_free_i64(tmp
);
1528 case 0x34: /* STCH ? */
1529 /* Store Subchannel */
1530 check_privileged(s
);
1531 gen_op_movi_cc(s
, 3);
1533 case 0x46: /* STURA R1,R2 [RRE] */
1534 /* Store Using Real Address */
1535 check_privileged(s
);
1536 r1
= (insn
>> 4) & 0xf;
1538 tmp32_1
= load_reg32(r1
);
1540 potential_page_fault(s
);
1541 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1542 tcg_temp_free_i32(tmp32_1
);
1543 tcg_temp_free_i64(tmp
);
1545 case 0x50: /* CSP R1,R2 [RRE] */
1546 /* Compare And Swap And Purge */
1547 check_privileged(s
);
1548 r1
= (insn
>> 4) & 0xf;
1550 tmp32_1
= tcg_const_i32(r1
);
1551 tmp32_2
= tcg_const_i32(r2
);
1552 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1554 tcg_temp_free_i32(tmp32_1
);
1555 tcg_temp_free_i32(tmp32_2
);
1557 case 0x5f: /* CHSC ? */
1558 /* Channel Subsystem Call */
1559 check_privileged(s
);
1560 gen_op_movi_cc(s
, 3);
1562 case 0x78: /* STCKE D2(B2) [S] */
1563 /* Store Clock Extended */
1564 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1565 tmp
= get_address(s
, 0, b2
, d2
);
1566 potential_page_fault(s
);
1567 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1569 tcg_temp_free_i64(tmp
);
1571 case 0x79: /* SACF D2(B2) [S] */
1572 /* Set Address Space Control Fast */
1573 check_privileged(s
);
1574 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1575 tmp
= get_address(s
, 0, b2
, d2
);
1576 potential_page_fault(s
);
1577 gen_helper_sacf(cpu_env
, tmp
);
1578 tcg_temp_free_i64(tmp
);
1579 /* addressing mode has changed, so end the block */
1582 s
->is_jmp
= DISAS_JUMP
;
1584 case 0x7d: /* STSI D2,(B2) [S] */
1585 check_privileged(s
);
1586 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1587 tmp
= get_address(s
, 0, b2
, d2
);
1588 tmp32_1
= load_reg32(0);
1589 tmp32_2
= load_reg32(1);
1590 potential_page_fault(s
);
1591 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1593 tcg_temp_free_i64(tmp
);
1594 tcg_temp_free_i32(tmp32_1
);
1595 tcg_temp_free_i32(tmp32_2
);
1597 case 0x9d: /* LFPC D2(B2) [S] */
1598 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1599 tmp
= get_address(s
, 0, b2
, d2
);
1600 tmp2
= tcg_temp_new_i64();
1601 tmp32_1
= tcg_temp_new_i32();
1602 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1603 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1604 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1605 tcg_temp_free_i64(tmp
);
1606 tcg_temp_free_i64(tmp2
);
1607 tcg_temp_free_i32(tmp32_1
);
1609 case 0xb1: /* STFL D2(B2) [S] */
1610 /* Store Facility List (CPU features) at 200 */
1611 check_privileged(s
);
1612 tmp2
= tcg_const_i64(0xc0000000);
1613 tmp
= tcg_const_i64(200);
1614 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1615 tcg_temp_free_i64(tmp2
);
1616 tcg_temp_free_i64(tmp
);
1618 case 0xb2: /* LPSWE D2(B2) [S] */
1619 /* Load PSW Extended */
1620 check_privileged(s
);
1621 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1622 tmp
= get_address(s
, 0, b2
, d2
);
1623 tmp2
= tcg_temp_new_i64();
1624 tmp3
= tcg_temp_new_i64();
1625 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1626 tcg_gen_addi_i64(tmp
, tmp
, 8);
1627 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1628 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1629 /* we need to keep cc_op intact */
1630 s
->is_jmp
= DISAS_JUMP
;
1631 tcg_temp_free_i64(tmp
);
1632 tcg_temp_free_i64(tmp2
);
1633 tcg_temp_free_i64(tmp3
);
1635 case 0x20: /* SERVC R1,R2 [RRE] */
1636 /* SCLP Service call (PV hypercall) */
1637 check_privileged(s
);
1638 potential_page_fault(s
);
1639 tmp32_1
= load_reg32(r2
);
1641 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1643 tcg_temp_free_i32(tmp32_1
);
1644 tcg_temp_free_i64(tmp
);
1648 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1649 gen_illegal_opcode(s
);
1654 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1658 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1659 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1660 #define FP_HELPER(i) \
1661 tmp32_1 = tcg_const_i32(r1); \
1662 tmp32_2 = tcg_const_i32(r2); \
1663 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1664 tcg_temp_free_i32(tmp32_1); \
1665 tcg_temp_free_i32(tmp32_2);
1667 #define FP_HELPER_CC(i) \
1668 tmp32_1 = tcg_const_i32(r1); \
1669 tmp32_2 = tcg_const_i32(r2); \
1670 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1672 tcg_temp_free_i32(tmp32_1); \
1673 tcg_temp_free_i32(tmp32_2);
1676 case 0x0: /* LPEBR R1,R2 [RRE] */
1677 FP_HELPER_CC(lpebr
);
1679 case 0x2: /* LTEBR R1,R2 [RRE] */
1680 FP_HELPER_CC(ltebr
);
1682 case 0x3: /* LCEBR R1,R2 [RRE] */
1683 FP_HELPER_CC(lcebr
);
1685 case 0x4: /* LDEBR R1,R2 [RRE] */
1688 case 0x5: /* LXDBR R1,R2 [RRE] */
1691 case 0x9: /* CEBR R1,R2 [RRE] */
1694 case 0xa: /* AEBR R1,R2 [RRE] */
1697 case 0xb: /* SEBR R1,R2 [RRE] */
1700 case 0xd: /* DEBR R1,R2 [RRE] */
1703 case 0x10: /* LPDBR R1,R2 [RRE] */
1704 FP_HELPER_CC(lpdbr
);
1706 case 0x12: /* LTDBR R1,R2 [RRE] */
1707 FP_HELPER_CC(ltdbr
);
1709 case 0x13: /* LCDBR R1,R2 [RRE] */
1710 FP_HELPER_CC(lcdbr
);
1712 case 0x15: /* SQBDR R1,R2 [RRE] */
1715 case 0x17: /* MEEBR R1,R2 [RRE] */
1718 case 0x19: /* CDBR R1,R2 [RRE] */
1721 case 0x1a: /* ADBR R1,R2 [RRE] */
1724 case 0x1b: /* SDBR R1,R2 [RRE] */
1727 case 0x1c: /* MDBR R1,R2 [RRE] */
1730 case 0x1d: /* DDBR R1,R2 [RRE] */
1733 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1734 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1735 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1736 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1737 tmp32_1
= tcg_const_i32(m3
);
1738 tmp32_2
= tcg_const_i32(r2
);
1739 tmp32_3
= tcg_const_i32(r1
);
1742 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1745 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1748 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1753 tcg_temp_free_i32(tmp32_1
);
1754 tcg_temp_free_i32(tmp32_2
);
1755 tcg_temp_free_i32(tmp32_3
);
1757 case 0x40: /* LPXBR R1,R2 [RRE] */
1758 FP_HELPER_CC(lpxbr
);
1760 case 0x42: /* LTXBR R1,R2 [RRE] */
1761 FP_HELPER_CC(ltxbr
);
1763 case 0x43: /* LCXBR R1,R2 [RRE] */
1764 FP_HELPER_CC(lcxbr
);
1766 case 0x44: /* LEDBR R1,R2 [RRE] */
1769 case 0x45: /* LDXBR R1,R2 [RRE] */
1772 case 0x46: /* LEXBR R1,R2 [RRE] */
1775 case 0x49: /* CXBR R1,R2 [RRE] */
1778 case 0x4a: /* AXBR R1,R2 [RRE] */
1781 case 0x4b: /* SXBR R1,R2 [RRE] */
1784 case 0x4c: /* MXBR R1,R2 [RRE] */
1787 case 0x4d: /* DXBR R1,R2 [RRE] */
1790 case 0x65: /* LXR R1,R2 [RRE] */
1791 tmp
= load_freg(r2
);
1792 store_freg(r1
, tmp
);
1793 tcg_temp_free_i64(tmp
);
1794 tmp
= load_freg(r2
+ 2);
1795 store_freg(r1
+ 2, tmp
);
1796 tcg_temp_free_i64(tmp
);
1798 case 0x74: /* LZER R1 [RRE] */
1799 tmp32_1
= tcg_const_i32(r1
);
1800 gen_helper_lzer(cpu_env
, tmp32_1
);
1801 tcg_temp_free_i32(tmp32_1
);
1803 case 0x75: /* LZDR R1 [RRE] */
1804 tmp32_1
= tcg_const_i32(r1
);
1805 gen_helper_lzdr(cpu_env
, tmp32_1
);
1806 tcg_temp_free_i32(tmp32_1
);
1808 case 0x76: /* LZXR R1 [RRE] */
1809 tmp32_1
= tcg_const_i32(r1
);
1810 gen_helper_lzxr(cpu_env
, tmp32_1
);
1811 tcg_temp_free_i32(tmp32_1
);
1813 case 0x84: /* SFPC R1 [RRE] */
1814 tmp32_1
= load_reg32(r1
);
1815 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1816 tcg_temp_free_i32(tmp32_1
);
1818 case 0x94: /* CEFBR R1,R2 [RRE] */
1819 case 0x95: /* CDFBR R1,R2 [RRE] */
1820 case 0x96: /* CXFBR R1,R2 [RRE] */
1821 tmp32_1
= tcg_const_i32(r1
);
1822 tmp32_2
= load_reg32(r2
);
1825 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1828 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1831 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1836 tcg_temp_free_i32(tmp32_1
);
1837 tcg_temp_free_i32(tmp32_2
);
1839 case 0x98: /* CFEBR R1,R2 [RRE] */
1840 case 0x99: /* CFDBR R1,R2 [RRE] */
1841 case 0x9a: /* CFXBR R1,R2 [RRE] */
1842 tmp32_1
= tcg_const_i32(r1
);
1843 tmp32_2
= tcg_const_i32(r2
);
1844 tmp32_3
= tcg_const_i32(m3
);
1847 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1850 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1853 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1859 tcg_temp_free_i32(tmp32_1
);
1860 tcg_temp_free_i32(tmp32_2
);
1861 tcg_temp_free_i32(tmp32_3
);
1863 case 0xa4: /* CEGBR R1,R2 [RRE] */
1864 case 0xa5: /* CDGBR R1,R2 [RRE] */
1865 tmp32_1
= tcg_const_i32(r1
);
1869 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1872 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1877 tcg_temp_free_i32(tmp32_1
);
1878 tcg_temp_free_i64(tmp
);
1880 case 0xa6: /* CXGBR R1,R2 [RRE] */
1881 tmp32_1
= tcg_const_i32(r1
);
1883 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1884 tcg_temp_free_i32(tmp32_1
);
1885 tcg_temp_free_i64(tmp
);
1887 case 0xa8: /* CGEBR R1,R2 [RRE] */
1888 tmp32_1
= tcg_const_i32(r1
);
1889 tmp32_2
= tcg_const_i32(r2
);
1890 tmp32_3
= tcg_const_i32(m3
);
1891 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1893 tcg_temp_free_i32(tmp32_1
);
1894 tcg_temp_free_i32(tmp32_2
);
1895 tcg_temp_free_i32(tmp32_3
);
1897 case 0xa9: /* CGDBR R1,R2 [RRE] */
1898 tmp32_1
= tcg_const_i32(r1
);
1899 tmp32_2
= tcg_const_i32(r2
);
1900 tmp32_3
= tcg_const_i32(m3
);
1901 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1903 tcg_temp_free_i32(tmp32_1
);
1904 tcg_temp_free_i32(tmp32_2
);
1905 tcg_temp_free_i32(tmp32_3
);
1907 case 0xaa: /* CGXBR R1,R2 [RRE] */
1908 tmp32_1
= tcg_const_i32(r1
);
1909 tmp32_2
= tcg_const_i32(r2
);
1910 tmp32_3
= tcg_const_i32(m3
);
1911 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1913 tcg_temp_free_i32(tmp32_1
);
1914 tcg_temp_free_i32(tmp32_2
);
1915 tcg_temp_free_i32(tmp32_3
);
1918 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1919 gen_illegal_opcode(s
);
1927 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1933 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1935 case 0x17: /* LLGTR R1,R2 [RRE] */
1936 tmp32_1
= load_reg32(r2
);
1937 tmp
= tcg_temp_new_i64();
1938 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
1939 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
1941 tcg_temp_free_i32(tmp32_1
);
1942 tcg_temp_free_i64(tmp
);
1944 case 0x0f: /* LRVGR R1,R2 [RRE] */
1945 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
1947 case 0x1f: /* LRVR R1,R2 [RRE] */
1948 tmp32_1
= load_reg32(r2
);
1949 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1950 store_reg32(r1
, tmp32_1
);
1951 tcg_temp_free_i32(tmp32_1
);
1953 case 0x83: /* FLOGR R1,R2 [RRE] */
1955 tmp32_1
= tcg_const_i32(r1
);
1956 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1958 tcg_temp_free_i64(tmp
);
1959 tcg_temp_free_i32(tmp32_1
);
1962 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1963 gen_illegal_opcode(s
);
1968 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1971 TCGv_i32 tmp32_1
, tmp32_2
;
1974 int op
, r1
, r2
, r3
, d2
, x2
, b2
, r1b
;
1976 opc
= cpu_ldub_code(env
, s
->pc
);
1977 LOG_DISAS("opc 0x%x\n", opc
);
1981 insn
= ld_code4(env
, s
->pc
);
1982 op
= (insn
>> 16) & 0xff;
1983 disas_b2(env
, s
, op
, insn
);
1986 insn
= ld_code4(env
, s
->pc
);
1987 op
= (insn
>> 16) & 0xff;
1988 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1989 r1
= (insn
>> 4) & 0xf;
1991 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1994 insn
= ld_code4(env
, s
->pc
);
1995 r1
= (insn
>> 4) & 0xf;
1997 op
= (insn
>> 16) & 0xff;
1998 disas_b9(env
, s
, op
, r1
, r2
);
2000 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2001 insn
= ld_code4(env
, s
->pc
);
2002 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2003 tmp
= get_address(s
, 0, b2
, d2
);
2004 tmp32_1
= load_reg32(r1
);
2005 tmp32_2
= tcg_const_i32(r3
);
2006 potential_page_fault(s
);
2007 gen_helper_stcm(cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2008 tcg_temp_free_i64(tmp
);
2009 tcg_temp_free_i32(tmp32_1
);
2010 tcg_temp_free_i32(tmp32_2
);
2013 insn
= ld_code6(env
, s
->pc
);
2016 r1
= (insn
>> 36) & 0xf;
2017 x2
= (insn
>> 32) & 0xf;
2018 b2
= (insn
>> 28) & 0xf;
2019 d2
= ((int)((((insn
>> 16) & 0xfff)
2020 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2021 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
2023 #ifndef CONFIG_USER_ONLY
2025 /* Test Protection */
2026 check_privileged(s
);
2027 insn
= ld_code6(env
, s
->pc
);
2029 disas_e5(env
, s
, insn
);
2033 insn
= ld_code6(env
, s
->pc
);
2036 r1
= (insn
>> 36) & 0xf;
2037 r3
= (insn
>> 32) & 0xf;
2038 b2
= (insn
>> 28) & 0xf;
2039 d2
= ((int)((((insn
>> 16) & 0xfff)
2040 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2041 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
2044 insn
= ld_code6(env
, s
->pc
);
2047 r1
= (insn
>> 36) & 0xf;
2048 x2
= (insn
>> 32) & 0xf;
2049 b2
= (insn
>> 28) & 0xf;
2050 d2
= (short)((insn
>> 16) & 0xfff);
2051 r1b
= (insn
>> 12) & 0xf;
2052 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
2055 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
2056 gen_illegal_opcode(s
);
2061 /* ====================================================================== */
2062 /* Define the insn format enumeration. */
2063 #define F0(N) FMT_##N,
2064 #define F1(N, X1) F0(N)
2065 #define F2(N, X1, X2) F0(N)
2066 #define F3(N, X1, X2, X3) F0(N)
2067 #define F4(N, X1, X2, X3, X4) F0(N)
2068 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2071 #include "insn-format.def"
2081 /* Define a structure to hold the decoded fields. We'll store each inside
2082 an array indexed by an enum. In order to conserve memory, we'll arrange
2083 for fields that do not exist at the same time to overlap, thus the "C"
2084 for compact. For checking purposes there is an "O" for original index
2085 as well that will be applied to availability bitmaps. */
2087 enum DisasFieldIndexO
{
2110 enum DisasFieldIndexC
{
2141 struct DisasFields
{
2144 unsigned presentC
:16;
2145 unsigned int presentO
;
2149 /* This is the way fields are to be accessed out of DisasFields. */
2150 #define have_field(S, F) have_field1((S), FLD_O_##F)
2151 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2153 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
2155 return (f
->presentO
>> c
) & 1;
2158 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
2159 enum DisasFieldIndexC c
)
2161 assert(have_field1(f
, o
));
2165 /* Describe the layout of each field in each format. */
2166 typedef struct DisasField
{
2168 unsigned int size
:8;
2169 unsigned int type
:2;
2170 unsigned int indexC
:6;
2171 enum DisasFieldIndexO indexO
:8;
2174 typedef struct DisasFormatInfo
{
2175 DisasField op
[NUM_C_FIELD
];
2178 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2179 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2180 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2181 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2182 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2183 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2184 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2185 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2186 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2187 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2188 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2189 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2190 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2191 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2193 #define F0(N) { { } },
2194 #define F1(N, X1) { { X1 } },
2195 #define F2(N, X1, X2) { { X1, X2 } },
2196 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2197 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2198 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2200 static const DisasFormatInfo format_info
[] = {
2201 #include "insn-format.def"
2219 /* Generally, we'll extract operands into this structures, operate upon
2220 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2221 of routines below for more details. */
2223 bool g_out
, g_out2
, g_in1
, g_in2
;
2224 TCGv_i64 out
, out2
, in1
, in2
;
2228 /* Return values from translate_one, indicating the state of the TB. */
2230 /* Continue the TB. */
2232 /* We have emitted one or more goto_tb. No fixup required. */
2234 /* We are not using a goto_tb (for whatever reason), but have updated
2235 the PC (for whatever reason), so there's no need to do it again on
2238 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2239 updated the PC for the next instruction to be executed. */
2241 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2242 No following code will be executed. */
2246 typedef enum DisasFacility
{
2247 FAC_Z
, /* zarch (default) */
2248 FAC_CASS
, /* compare and swap and store */
2249 FAC_CASS2
, /* compare and swap and store 2*/
2250 FAC_DFP
, /* decimal floating point */
2251 FAC_DFPR
, /* decimal floating point rounding */
2252 FAC_DO
, /* distinct operands */
2253 FAC_EE
, /* execute extensions */
2254 FAC_EI
, /* extended immediate */
2255 FAC_FPE
, /* floating point extension */
2256 FAC_FPSSH
, /* floating point support sign handling */
2257 FAC_FPRGR
, /* FPR-GR transfer */
2258 FAC_GIE
, /* general instructions extension */
2259 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
2260 FAC_HW
, /* high-word */
2261 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
2262 FAC_LOC
, /* load/store on condition */
2263 FAC_LD
, /* long displacement */
2264 FAC_PC
, /* population count */
2265 FAC_SCF
, /* store clock fast */
2266 FAC_SFLE
, /* store facility list extended */
2272 DisasFacility fac
:6;
2276 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
2277 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
2278 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
2279 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
2280 void (*help_cout
)(DisasContext
*, DisasOps
*);
2281 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
2286 /* ====================================================================== */
2287 /* Miscelaneous helpers, used by several operations. */
2289 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
2290 DisasOps
*o
, int mask
)
2292 int b2
= get_field(f
, b2
);
2293 int d2
= get_field(f
, d2
);
2296 o
->in2
= tcg_const_i64(d2
& mask
);
2298 o
->in2
= get_address(s
, 0, b2
, d2
);
2299 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2303 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
2305 if (dest
== s
->next_pc
) {
2308 if (use_goto_tb(s
, dest
)) {
2309 gen_update_cc_op(s
);
2311 tcg_gen_movi_i64(psw_addr
, dest
);
2312 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
2313 return EXIT_GOTO_TB
;
2315 tcg_gen_movi_i64(psw_addr
, dest
);
2316 return EXIT_PC_UPDATED
;
2320 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
2321 bool is_imm
, int imm
, TCGv_i64 cdest
)
2324 uint64_t dest
= s
->pc
+ 2 * imm
;
2327 /* Take care of the special cases first. */
2328 if (c
->cond
== TCG_COND_NEVER
) {
2333 if (dest
== s
->next_pc
) {
2334 /* Branch to next. */
2338 if (c
->cond
== TCG_COND_ALWAYS
) {
2339 ret
= help_goto_direct(s
, dest
);
2343 if (TCGV_IS_UNUSED_I64(cdest
)) {
2344 /* E.g. bcr %r0 -> no branch. */
2348 if (c
->cond
== TCG_COND_ALWAYS
) {
2349 tcg_gen_mov_i64(psw_addr
, cdest
);
2350 ret
= EXIT_PC_UPDATED
;
2355 if (use_goto_tb(s
, s
->next_pc
)) {
2356 if (is_imm
&& use_goto_tb(s
, dest
)) {
2357 /* Both exits can use goto_tb. */
2358 gen_update_cc_op(s
);
2360 lab
= gen_new_label();
2362 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2364 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2367 /* Branch not taken. */
2369 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2370 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2375 tcg_gen_movi_i64(psw_addr
, dest
);
2376 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
2380 /* Fallthru can use goto_tb, but taken branch cannot. */
2381 /* Store taken branch destination before the brcond. This
2382 avoids having to allocate a new local temp to hold it.
2383 We'll overwrite this in the not taken case anyway. */
2385 tcg_gen_mov_i64(psw_addr
, cdest
);
2388 lab
= gen_new_label();
2390 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2392 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2395 /* Branch not taken. */
2396 gen_update_cc_op(s
);
2398 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2399 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2403 tcg_gen_movi_i64(psw_addr
, dest
);
2405 ret
= EXIT_PC_UPDATED
;
2408 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2409 Most commonly we're single-stepping or some other condition that
2410 disables all use of goto_tb. Just update the PC and exit. */
2412 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
2414 cdest
= tcg_const_i64(dest
);
2418 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
2421 TCGv_i32 t0
= tcg_temp_new_i32();
2422 TCGv_i64 t1
= tcg_temp_new_i64();
2423 TCGv_i64 z
= tcg_const_i64(0);
2424 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
2425 tcg_gen_extu_i32_i64(t1
, t0
);
2426 tcg_temp_free_i32(t0
);
2427 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
2428 tcg_temp_free_i64(t1
);
2429 tcg_temp_free_i64(z
);
2433 tcg_temp_free_i64(cdest
);
2435 tcg_temp_free_i64(next
);
2437 ret
= EXIT_PC_UPDATED
;
2445 /* ====================================================================== */
2446 /* The operations. These perform the bulk of the work for any insn,
2447 usually after the operands have been loaded and output initialized. */
2449 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
2451 gen_helper_abs_i64(o
->out
, o
->in2
);
2455 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
2457 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2461 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2465 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2467 /* XXX possible optimization point */
2469 cc
= tcg_temp_new_i64();
2470 tcg_gen_extu_i32_i64(cc
, cc_op
);
2471 tcg_gen_shri_i64(cc
, cc
, 1);
2473 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2474 tcg_temp_free_i64(cc
);
2478 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2480 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2484 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2486 int shift
= s
->insn
->data
& 0xff;
2487 int size
= s
->insn
->data
>> 8;
2488 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2491 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2492 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2493 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2495 /* Produce the CC from only the bits manipulated. */
2496 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2497 set_cc_nz_u64(s
, cc_dst
);
2501 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2503 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2504 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2505 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2506 return EXIT_PC_UPDATED
;
2512 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2514 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2515 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2518 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2520 int m1
= get_field(s
->fields
, m1
);
2521 bool is_imm
= have_field(s
->fields
, i2
);
2522 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2525 disas_jcc(s
, &c
, m1
);
2526 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2529 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2531 int r1
= get_field(s
->fields
, r1
);
2532 bool is_imm
= have_field(s
->fields
, i2
);
2533 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2537 c
.cond
= TCG_COND_NE
;
2542 t
= tcg_temp_new_i64();
2543 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2544 store_reg32_i64(r1
, t
);
2545 c
.u
.s32
.a
= tcg_temp_new_i32();
2546 c
.u
.s32
.b
= tcg_const_i32(0);
2547 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2548 tcg_temp_free_i64(t
);
2550 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2553 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2555 int r1
= get_field(s
->fields
, r1
);
2556 bool is_imm
= have_field(s
->fields
, i2
);
2557 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2560 c
.cond
= TCG_COND_NE
;
2565 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2566 c
.u
.s64
.a
= regs
[r1
];
2567 c
.u
.s64
.b
= tcg_const_i64(0);
2569 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2572 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2574 int l
= get_field(s
->fields
, l1
);
2579 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2580 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2583 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2584 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2587 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2588 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2591 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2592 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2595 potential_page_fault(s
);
2596 vl
= tcg_const_i32(l
);
2597 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2598 tcg_temp_free_i32(vl
);
2602 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2606 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2608 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2609 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2610 potential_page_fault(s
);
2611 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2612 tcg_temp_free_i32(r1
);
2613 tcg_temp_free_i32(r3
);
2618 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2620 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2621 TCGv_i32 t1
= tcg_temp_new_i32();
2622 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2623 potential_page_fault(s
);
2624 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2626 tcg_temp_free_i32(t1
);
2627 tcg_temp_free_i32(m3
);
2631 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2633 int r3
= get_field(s
->fields
, r3
);
2634 potential_page_fault(s
);
2635 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2640 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2642 int r3
= get_field(s
->fields
, r3
);
2643 potential_page_fault(s
);
2644 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2649 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2651 int r3
= get_field(s
->fields
, r3
);
2652 TCGv_i64 in3
= tcg_temp_new_i64();
2653 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2654 potential_page_fault(s
);
2655 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2656 tcg_temp_free_i64(in3
);
2661 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2663 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2664 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2665 potential_page_fault(s
);
2666 /* XXX rewrite in tcg */
2667 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2672 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2674 TCGv_i64 t1
= tcg_temp_new_i64();
2675 TCGv_i32 t2
= tcg_temp_new_i32();
2676 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2677 gen_helper_cvd(t1
, t2
);
2678 tcg_temp_free_i32(t2
);
2679 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2680 tcg_temp_free_i64(t1
);
2684 #ifndef CONFIG_USER_ONLY
2685 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2689 check_privileged(s
);
2690 potential_page_fault(s
);
2692 /* We pretend the format is RX_a so that D2 is the field we want. */
2693 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2694 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2695 tcg_temp_free_i32(tmp
);
2700 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2702 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2703 return_low128(o
->out
);
2707 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2709 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2710 return_low128(o
->out
);
2714 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2716 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2717 return_low128(o
->out
);
2721 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2723 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2724 return_low128(o
->out
);
2728 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2730 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2734 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2736 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2737 tb->flags, (ab)use the tb->cs_base field as the address of
2738 the template in memory, and grab 8 bits of tb->flags/cflags for
2739 the contents of the register. We would then recognize all this
2740 in gen_intermediate_code_internal, generating code for exactly
2741 one instruction. This new TB then gets executed normally.
2743 On the other hand, this seems to be mostly used for modifying
2744 MVC inside of memcpy, which needs a helper call anyway. So
2745 perhaps this doesn't bear thinking about any further. */
2752 tmp
= tcg_const_i64(s
->next_pc
);
2753 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2754 tcg_temp_free_i64(tmp
);
2760 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2762 int m3
= get_field(s
->fields
, m3
);
2763 int pos
, len
, base
= s
->insn
->data
;
2764 TCGv_i64 tmp
= tcg_temp_new_i64();
2769 /* Effectively a 32-bit load. */
2770 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2777 /* Effectively a 16-bit load. */
2778 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2786 /* Effectively an 8-bit load. */
2787 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2792 pos
= base
+ ctz32(m3
) * 8;
2793 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2794 ccm
= ((1ull << len
) - 1) << pos
;
2798 /* This is going to be a sequence of loads and inserts. */
2799 pos
= base
+ 32 - 8;
2803 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2804 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2805 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2808 m3
= (m3
<< 1) & 0xf;
2814 tcg_gen_movi_i64(tmp
, ccm
);
2815 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2816 tcg_temp_free_i64(tmp
);
2820 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2822 int shift
= s
->insn
->data
& 0xff;
2823 int size
= s
->insn
->data
>> 8;
2824 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2828 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2830 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2834 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2836 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2840 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2842 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2846 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2848 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2852 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2854 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2858 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2860 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2864 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2866 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2870 #ifndef CONFIG_USER_ONLY
2871 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2873 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2874 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2875 check_privileged(s
);
2876 potential_page_fault(s
);
2877 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2878 tcg_temp_free_i32(r1
);
2879 tcg_temp_free_i32(r3
);
2883 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2885 check_privileged(s
);
2886 potential_page_fault(s
);
2887 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2892 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2896 check_privileged(s
);
2898 t1
= tcg_temp_new_i64();
2899 t2
= tcg_temp_new_i64();
2900 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2901 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2902 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2903 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2904 tcg_gen_shli_i64(t1
, t1
, 32);
2905 gen_helper_load_psw(cpu_env
, t1
, t2
);
2906 tcg_temp_free_i64(t1
);
2907 tcg_temp_free_i64(t2
);
2908 return EXIT_NORETURN
;
2912 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2914 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2915 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2916 potential_page_fault(s
);
2917 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2918 tcg_temp_free_i32(r1
);
2919 tcg_temp_free_i32(r3
);
2923 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2925 int r1
= get_field(s
->fields
, r1
);
2926 int r3
= get_field(s
->fields
, r3
);
2927 TCGv_i64 t
= tcg_temp_new_i64();
2928 TCGv_i64 t4
= tcg_const_i64(4);
2931 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2932 store_reg32_i64(r1
, t
);
2936 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2940 tcg_temp_free_i64(t
);
2941 tcg_temp_free_i64(t4
);
2945 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2947 int r1
= get_field(s
->fields
, r1
);
2948 int r3
= get_field(s
->fields
, r3
);
2949 TCGv_i64 t
= tcg_temp_new_i64();
2950 TCGv_i64 t4
= tcg_const_i64(4);
2953 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2954 store_reg32h_i64(r1
, t
);
2958 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2962 tcg_temp_free_i64(t
);
2963 tcg_temp_free_i64(t4
);
2967 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2969 int r1
= get_field(s
->fields
, r1
);
2970 int r3
= get_field(s
->fields
, r3
);
2971 TCGv_i64 t8
= tcg_const_i64(8);
2974 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2978 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2982 tcg_temp_free_i64(t8
);
2986 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2989 o
->g_out
= o
->g_in2
;
2990 TCGV_UNUSED_I64(o
->in2
);
2995 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2999 o
->g_out
= o
->g_in1
;
3000 o
->g_out2
= o
->g_in2
;
3001 TCGV_UNUSED_I64(o
->in1
);
3002 TCGV_UNUSED_I64(o
->in2
);
3003 o
->g_in1
= o
->g_in2
= false;
3007 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3009 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3010 potential_page_fault(s
);
3011 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3012 tcg_temp_free_i32(l
);
3016 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3018 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3019 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
3020 potential_page_fault(s
);
3021 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
3022 tcg_temp_free_i32(r1
);
3023 tcg_temp_free_i32(r2
);
3028 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3030 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3031 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3032 potential_page_fault(s
);
3033 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
3034 tcg_temp_free_i32(r1
);
3035 tcg_temp_free_i32(r3
);
3040 #ifndef CONFIG_USER_ONLY
3041 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3043 int r1
= get_field(s
->fields
, l1
);
3044 check_privileged(s
);
3045 potential_page_fault(s
);
3046 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3051 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3053 int r1
= get_field(s
->fields
, l1
);
3054 check_privileged(s
);
3055 potential_page_fault(s
);
3056 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3062 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3064 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3068 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3070 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3071 return_low128(o
->out2
);
3075 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3077 gen_helper_nabs_i64(o
->out
, o
->in2
);
3081 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3083 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3084 potential_page_fault(s
);
3085 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3086 tcg_temp_free_i32(l
);
3091 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3093 tcg_gen_neg_i64(o
->out
, o
->in2
);
3097 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3099 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3100 potential_page_fault(s
);
3101 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3102 tcg_temp_free_i32(l
);
3107 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3109 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3113 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3115 int shift
= s
->insn
->data
& 0xff;
3116 int size
= s
->insn
->data
>> 8;
3117 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3120 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3121 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3123 /* Produce the CC from only the bits manipulated. */
3124 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3125 set_cc_nz_u64(s
, cc_dst
);
3129 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3131 TCGv_i32 t1
= tcg_temp_new_i32();
3132 TCGv_i32 t2
= tcg_temp_new_i32();
3133 TCGv_i32 to
= tcg_temp_new_i32();
3134 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
3135 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
3136 tcg_gen_rotl_i32(to
, t1
, t2
);
3137 tcg_gen_extu_i32_i64(o
->out
, to
);
3138 tcg_temp_free_i32(t1
);
3139 tcg_temp_free_i32(t2
);
3140 tcg_temp_free_i32(to
);
3144 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3146 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3150 #ifndef CONFIG_USER_ONLY
3151 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3153 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3154 check_privileged(s
);
3155 potential_page_fault(s
);
3156 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3157 tcg_temp_free_i32(r1
);
3162 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3164 uint64_t sign
= 1ull << s
->insn
->data
;
3165 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3166 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3167 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3168 /* The arithmetic left shift is curious in that it does not affect
3169 the sign bit. Copy that over from the source unchanged. */
3170 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3171 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3172 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3176 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3178 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3182 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3184 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3188 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3190 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3194 #ifndef CONFIG_USER_ONLY
3195 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3197 check_privileged(s
);
3198 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3202 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3204 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3205 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3206 check_privileged(s
);
3207 potential_page_fault(s
);
3208 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3209 tcg_temp_free_i32(r1
);
3210 tcg_temp_free_i32(r3
);
3214 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3216 uint64_t i2
= get_field(s
->fields
, i2
);
3219 check_privileged(s
);
3221 /* It is important to do what the instruction name says: STORE THEN.
3222 If we let the output hook perform the store then if we fault and
3223 restart, we'll have the wrong SYSTEM MASK in place. */
3224 t
= tcg_temp_new_i64();
3225 tcg_gen_shri_i64(t
, psw_mask
, 56);
3226 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3227 tcg_temp_free_i64(t
);
3229 if (s
->fields
->op
== 0xac) {
3230 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3231 (i2
<< 56) | 0x00ffffffffffffffull
);
3233 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3239 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3241 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3245 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3247 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3251 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3253 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3257 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3259 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3263 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3265 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3266 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3267 potential_page_fault(s
);
3268 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3269 tcg_temp_free_i32(r1
);
3270 tcg_temp_free_i32(r3
);
3274 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3276 int r1
= get_field(s
->fields
, r1
);
3277 int r3
= get_field(s
->fields
, r3
);
3278 int size
= s
->insn
->data
;
3279 TCGv_i64 tsize
= tcg_const_i64(size
);
3283 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3285 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3290 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3294 tcg_temp_free_i64(tsize
);
3298 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3300 int r1
= get_field(s
->fields
, r1
);
3301 int r3
= get_field(s
->fields
, r3
);
3302 TCGv_i64 t
= tcg_temp_new_i64();
3303 TCGv_i64 t4
= tcg_const_i64(4);
3304 TCGv_i64 t32
= tcg_const_i64(32);
3307 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3308 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3312 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3316 tcg_temp_free_i64(t
);
3317 tcg_temp_free_i64(t4
);
3318 tcg_temp_free_i64(t32
);
3322 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3324 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3328 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3333 tcg_gen_not_i64(o
->in2
, o
->in2
);
3334 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3336 /* XXX possible optimization point */
3338 cc
= tcg_temp_new_i64();
3339 tcg_gen_extu_i32_i64(cc
, cc_op
);
3340 tcg_gen_shri_i64(cc
, cc
, 1);
3341 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3342 tcg_temp_free_i64(cc
);
3346 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3353 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3354 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3355 tcg_temp_free_i32(t
);
3357 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3358 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3359 tcg_temp_free_i32(t
);
3361 gen_exception(EXCP_SVC
);
3362 return EXIT_NORETURN
;
3365 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3367 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3368 potential_page_fault(s
);
3369 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3370 tcg_temp_free_i32(l
);
3375 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3377 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3378 potential_page_fault(s
);
3379 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3380 tcg_temp_free_i32(l
);
3384 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3386 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3387 potential_page_fault(s
);
3388 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3389 tcg_temp_free_i32(l
);
3394 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3396 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3400 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3402 int shift
= s
->insn
->data
& 0xff;
3403 int size
= s
->insn
->data
>> 8;
3404 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3407 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3408 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3410 /* Produce the CC from only the bits manipulated. */
3411 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3412 set_cc_nz_u64(s
, cc_dst
);
3416 /* ====================================================================== */
3417 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3418 the original inputs), update the various cc data structures in order to
3419 be able to compute the new condition code. */
3421 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3423 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3426 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3428 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3431 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3433 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3436 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3438 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3441 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3443 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3446 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3448 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3451 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3453 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3456 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3458 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3461 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3463 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3466 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3468 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3471 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3473 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3476 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3478 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3481 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3483 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3486 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3488 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3491 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3493 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3496 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3498 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3501 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3503 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3504 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3507 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3509 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3512 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3514 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3517 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3519 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3522 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3524 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3527 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3529 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3532 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3534 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3537 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3539 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3542 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3544 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3547 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3549 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3552 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3554 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3557 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3559 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3562 /* ====================================================================== */
3563 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3564 with the TCG register to which we will write. Used in combination with
3565 the "wout" generators, in some cases we need a new temporary, and in
3566 some cases we can write to a TCG global. */
3568 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3570 o
->out
= tcg_temp_new_i64();
3573 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3575 o
->out
= tcg_temp_new_i64();
3576 o
->out2
= tcg_temp_new_i64();
3579 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3581 o
->out
= regs
[get_field(f
, r1
)];
3585 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3587 /* ??? Specification exception: r1 must be even. */
3588 int r1
= get_field(f
, r1
);
3590 o
->out2
= regs
[(r1
+ 1) & 15];
3591 o
->g_out
= o
->g_out2
= true;
3594 /* ====================================================================== */
3595 /* The "Write OUTput" generators. These generally perform some non-trivial
3596 copy of data to TCG globals, or to main memory. The trivial cases are
3597 generally handled by having a "prep" generator install the TCG global
3598 as the destination of the operation. */
3600 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3602 store_reg(get_field(f
, r1
), o
->out
);
3605 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3607 int r1
= get_field(f
, r1
);
3608 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3611 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3613 store_reg32_i64(get_field(f
, r1
), o
->out
);
3616 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3618 /* ??? Specification exception: r1 must be even. */
3619 int r1
= get_field(f
, r1
);
3620 store_reg32_i64(r1
, o
->out
);
3621 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3624 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3626 /* ??? Specification exception: r1 must be even. */
3627 int r1
= get_field(f
, r1
);
3628 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3629 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3630 store_reg32_i64(r1
, o
->out
);
3633 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3635 store_freg32_i64(get_field(f
, r1
), o
->out
);
3638 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3640 store_freg(get_field(f
, r1
), o
->out
);
3643 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3645 int f1
= get_field(s
->fields
, r1
);
3646 store_freg(f1
, o
->out
);
3647 store_freg((f1
+ 2) & 15, o
->out2
);
3650 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3652 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3653 store_reg32_i64(get_field(f
, r1
), o
->out
);
3657 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3659 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3660 store_freg32_i64(get_field(f
, r1
), o
->out
);
3664 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3666 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3669 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3671 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3674 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3676 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3679 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3681 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3684 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3686 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3689 /* ====================================================================== */
3690 /* The "INput 1" generators. These load the first operand to an insn. */
3692 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3694 o
->in1
= load_reg(get_field(f
, r1
));
3697 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3699 o
->in1
= regs
[get_field(f
, r1
)];
3703 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3705 o
->in1
= tcg_temp_new_i64();
3706 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3709 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3711 o
->in1
= tcg_temp_new_i64();
3712 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3715 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3717 o
->in1
= tcg_temp_new_i64();
3718 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3721 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3723 /* ??? Specification exception: r1 must be even. */
3724 int r1
= get_field(f
, r1
);
3725 o
->in1
= load_reg((r1
+ 1) & 15);
3728 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3730 /* ??? Specification exception: r1 must be even. */
3731 int r1
= get_field(f
, r1
);
3732 o
->in1
= tcg_temp_new_i64();
3733 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3736 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3738 /* ??? Specification exception: r1 must be even. */
3739 int r1
= get_field(f
, r1
);
3740 o
->in1
= tcg_temp_new_i64();
3741 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3744 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3746 /* ??? Specification exception: r1 must be even. */
3747 int r1
= get_field(f
, r1
);
3748 o
->in1
= tcg_temp_new_i64();
3749 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3752 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3754 o
->in1
= load_reg(get_field(f
, r2
));
3757 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3759 o
->in1
= load_reg(get_field(f
, r3
));
3762 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3764 o
->in1
= regs
[get_field(f
, r3
)];
3768 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3770 o
->in1
= tcg_temp_new_i64();
3771 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3774 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3776 o
->in1
= tcg_temp_new_i64();
3777 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3780 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3785 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3787 o
->in1
= fregs
[get_field(f
, r1
)];
3791 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3793 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3796 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3799 o
->in1
= tcg_temp_new_i64();
3800 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3803 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3806 o
->in1
= tcg_temp_new_i64();
3807 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3810 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3813 o
->in1
= tcg_temp_new_i64();
3814 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3817 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 o
->in1
= tcg_temp_new_i64();
3821 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3824 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3827 o
->in1
= tcg_temp_new_i64();
3828 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3831 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3834 o
->in1
= tcg_temp_new_i64();
3835 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3838 /* ====================================================================== */
3839 /* The "INput 2" generators. These load the second operand to an insn. */
3841 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3843 o
->in2
= load_reg(get_field(f
, r2
));
3846 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3848 o
->in2
= regs
[get_field(f
, r2
)];
3852 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 int r2
= get_field(f
, r2
);
3856 o
->in2
= load_reg(r2
);
3860 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3862 o
->in2
= tcg_temp_new_i64();
3863 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3866 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 o
->in2
= tcg_temp_new_i64();
3869 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3872 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3874 o
->in2
= tcg_temp_new_i64();
3875 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3878 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3880 o
->in2
= tcg_temp_new_i64();
3881 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3884 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3886 o
->in2
= load_reg(get_field(f
, r3
));
3889 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3891 o
->in2
= tcg_temp_new_i64();
3892 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3895 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3897 o
->in2
= tcg_temp_new_i64();
3898 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3901 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3903 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3906 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3908 o
->in2
= fregs
[get_field(f
, r2
)];
3912 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3914 int f2
= get_field(f
, r2
);
3916 o
->in2
= fregs
[(f2
+ 2) & 15];
3917 o
->g_in1
= o
->g_in2
= true;
3920 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3922 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3923 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3926 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3931 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3933 help_l2_shift(s
, f
, o
, 31);
3936 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3938 help_l2_shift(s
, f
, o
, 63);
3941 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3944 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3947 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3950 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3953 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3956 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3959 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3962 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3965 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3968 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3971 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3974 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3977 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3980 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3983 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3986 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3989 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3992 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3995 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3997 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4000 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4002 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4005 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4007 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4010 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4012 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4015 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4017 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4018 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4021 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4023 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4024 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4027 /* ====================================================================== */
4029 /* Find opc within the table of insns. This is formulated as a switch
4030 statement so that (1) we get compile-time notice of cut-paste errors
4031 for duplicated opcodes, and (2) the compiler generates the binary
4032 search tree, rather than us having to post-process the table. */
4034 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4035 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4037 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4039 enum DisasInsnEnum
{
4040 #include "insn-data.def"
4044 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4049 .help_in1 = in1_##I1, \
4050 .help_in2 = in2_##I2, \
4051 .help_prep = prep_##P, \
4052 .help_wout = wout_##W, \
4053 .help_cout = cout_##CC, \
4054 .help_op = op_##OP, \
4058 /* Allow 0 to be used for NULL in the table below. */
4066 static const DisasInsn insn_info
[] = {
4067 #include "insn-data.def"
4071 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4072 case OPC: return &insn_info[insn_ ## NM];
4074 static const DisasInsn
*lookup_opc(uint16_t opc
)
4077 #include "insn-data.def"
4086 /* Extract a field from the insn. The INSN should be left-aligned in
4087 the uint64_t so that we can more easily utilize the big-bit-endian
4088 definitions we extract from the Principals of Operation. */
4090 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4098 /* Zero extract the field from the insn. */
4099 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4101 /* Sign-extend, or un-swap the field as necessary. */
4103 case 0: /* unsigned */
4105 case 1: /* signed */
4106 assert(f
->size
<= 32);
4107 m
= 1u << (f
->size
- 1);
4110 case 2: /* dl+dh split, signed 20 bit. */
4111 r
= ((int8_t)r
<< 12) | (r
>> 8);
4117 /* Validate that the "compressed" encoding we selected above is valid.
4118 I.e. we havn't make two different original fields overlap. */
4119 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4120 o
->presentC
|= 1 << f
->indexC
;
4121 o
->presentO
|= 1 << f
->indexO
;
4123 o
->c
[f
->indexC
] = r
;
4126 /* Lookup the insn at the current PC, extracting the operands into O and
4127 returning the info struct for the insn. Returns NULL for invalid insn. */
4129 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4132 uint64_t insn
, pc
= s
->pc
;
4134 const DisasInsn
*info
;
4136 insn
= ld_code2(env
, pc
);
4137 op
= (insn
>> 8) & 0xff;
4138 ilen
= get_ilen(op
);
4139 s
->next_pc
= s
->pc
+ ilen
;
4146 insn
= ld_code4(env
, pc
) << 32;
4149 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4155 /* We can't actually determine the insn format until we've looked up
4156 the full insn opcode. Which we can't do without locating the
4157 secondary opcode. Assume by default that OP2 is at bit 40; for
4158 those smaller insns that don't actually have a secondary opcode
4159 this will correctly result in OP2 = 0. */
4165 case 0xb2: /* S, RRF, RRE */
4166 case 0xb3: /* RRE, RRD, RRF */
4167 case 0xb9: /* RRE, RRF */
4168 case 0xe5: /* SSE, SIL */
4169 op2
= (insn
<< 8) >> 56;
4173 case 0xc0: /* RIL */
4174 case 0xc2: /* RIL */
4175 case 0xc4: /* RIL */
4176 case 0xc6: /* RIL */
4177 case 0xc8: /* SSF */
4178 case 0xcc: /* RIL */
4179 op2
= (insn
<< 12) >> 60;
4181 case 0xd0 ... 0xdf: /* SS */
4187 case 0xee ... 0xf3: /* SS */
4188 case 0xf8 ... 0xfd: /* SS */
4192 op2
= (insn
<< 40) >> 56;
4196 memset(f
, 0, sizeof(*f
));
4200 /* Lookup the instruction. */
4201 info
= lookup_opc(op
<< 8 | op2
);
4203 /* If we found it, extract the operands. */
4205 DisasFormat fmt
= info
->fmt
;
4208 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4209 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4215 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4217 const DisasInsn
*insn
;
4218 ExitStatus ret
= NO_EXIT
;
4222 insn
= extract_insn(env
, s
, &f
);
4224 /* If not found, try the old interpreter. This includes ILLOPC. */
4226 disas_s390_insn(env
, s
);
4227 switch (s
->is_jmp
) {
4235 ret
= EXIT_PC_UPDATED
;
4238 ret
= EXIT_NORETURN
;
4248 /* Set up the strutures we use to communicate with the helpers. */
4251 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4252 TCGV_UNUSED_I64(o
.out
);
4253 TCGV_UNUSED_I64(o
.out2
);
4254 TCGV_UNUSED_I64(o
.in1
);
4255 TCGV_UNUSED_I64(o
.in2
);
4256 TCGV_UNUSED_I64(o
.addr1
);
4258 /* Implement the instruction. */
4259 if (insn
->help_in1
) {
4260 insn
->help_in1(s
, &f
, &o
);
4262 if (insn
->help_in2
) {
4263 insn
->help_in2(s
, &f
, &o
);
4265 if (insn
->help_prep
) {
4266 insn
->help_prep(s
, &f
, &o
);
4268 if (insn
->help_op
) {
4269 ret
= insn
->help_op(s
, &o
);
4271 if (insn
->help_wout
) {
4272 insn
->help_wout(s
, &f
, &o
);
4274 if (insn
->help_cout
) {
4275 insn
->help_cout(s
, &o
);
4278 /* Free any temporaries created by the helpers. */
4279 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4280 tcg_temp_free_i64(o
.out
);
4282 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4283 tcg_temp_free_i64(o
.out2
);
4285 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4286 tcg_temp_free_i64(o
.in1
);
4288 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4289 tcg_temp_free_i64(o
.in2
);
4291 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4292 tcg_temp_free_i64(o
.addr1
);
4295 /* Advance to the next instruction. */
4300 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4301 TranslationBlock
*tb
,
4305 target_ulong pc_start
;
4306 uint64_t next_page_start
;
4307 uint16_t *gen_opc_end
;
4309 int num_insns
, max_insns
;
4317 if (!(tb
->flags
& FLAG_MASK_64
)) {
4318 pc_start
&= 0x7fffffff;
4323 dc
.cc_op
= CC_OP_DYNAMIC
;
4324 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4325 dc
.is_jmp
= DISAS_NEXT
;
4327 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4329 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4332 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4333 if (max_insns
== 0) {
4334 max_insns
= CF_COUNT_MASK
;
4341 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4345 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4348 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4349 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4350 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4351 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4353 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4357 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4358 tcg_gen_debug_insn_start(dc
.pc
);
4362 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4363 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4364 if (bp
->pc
== dc
.pc
) {
4365 status
= EXIT_PC_STALE
;
4371 if (status
== NO_EXIT
) {
4372 status
= translate_one(env
, &dc
);
4375 /* If we reach a page boundary, are single stepping,
4376 or exhaust instruction count, stop generation. */
4377 if (status
== NO_EXIT
4378 && (dc
.pc
>= next_page_start
4379 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4380 || num_insns
>= max_insns
4382 || env
->singlestep_enabled
)) {
4383 status
= EXIT_PC_STALE
;
4385 } while (status
== NO_EXIT
);
4387 if (tb
->cflags
& CF_LAST_IO
) {
4396 update_psw_addr(&dc
);
4398 case EXIT_PC_UPDATED
:
4399 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4400 gen_op_calc_cc(&dc
);
4402 /* Next TB starts off with CC_OP_DYNAMIC,
4403 so make sure the cc op type is in env */
4404 gen_op_set_cc_op(&dc
);
4407 gen_exception(EXCP_DEBUG
);
4409 /* Generate the return instruction */
4417 gen_icount_end(tb
, num_insns
);
4418 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4420 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4423 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4426 tb
->size
= dc
.pc
- pc_start
;
4427 tb
->icount
= num_insns
;
4430 #if defined(S390X_DEBUG_DISAS)
4431 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4432 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4433 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4439 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4441 gen_intermediate_code_internal(env
, tb
, 0);
4444 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4446 gen_intermediate_code_internal(env
, tb
, 1);
4449 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4452 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4453 cc_op
= gen_opc_cc_op
[pc_pos
];
4454 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {