4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_reg16(int reg
, TCGv_i32 v
)
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
287 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
292 static inline void store_freg32(int reg
, TCGv_i32 v
)
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
298 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
299 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
303 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
305 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
308 static inline void return_low128(TCGv_i64 dest
)
310 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
313 static inline void update_psw_addr(DisasContext
*s
)
316 tcg_gen_movi_i64(psw_addr
, s
->pc
);
319 static inline void potential_page_fault(DisasContext
*s
)
321 #ifndef CONFIG_USER_ONLY
327 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
329 return (uint64_t)cpu_lduw_code(env
, pc
);
332 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
334 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
337 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
339 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
342 static inline int get_mem_index(DisasContext
*s
)
344 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
345 case PSW_ASC_PRIMARY
>> 32:
347 case PSW_ASC_SECONDARY
>> 32:
349 case PSW_ASC_HOME
>> 32:
357 static void gen_exception(int excp
)
359 TCGv_i32 tmp
= tcg_const_i32(excp
);
360 gen_helper_exception(cpu_env
, tmp
);
361 tcg_temp_free_i32(tmp
);
364 static void gen_program_exception(DisasContext
*s
, int code
)
368 /* Remember what pgm exeption this was. */
369 tmp
= tcg_const_i32(code
);
370 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
371 tcg_temp_free_i32(tmp
);
373 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
374 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
375 tcg_temp_free_i32(tmp
);
377 /* Advance past instruction. */
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM
);
388 s
->is_jmp
= DISAS_EXCP
;
391 static inline void gen_illegal_opcode(DisasContext
*s
)
393 gen_program_exception(s
, PGM_SPECIFICATION
);
396 static inline void check_privileged(DisasContext
*s
)
398 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
399 gen_program_exception(s
, PGM_PRIVILEGED
);
403 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
414 tmp
= tcg_const_i64(d2
);
415 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
420 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
424 tmp
= tcg_const_i64(d2
);
425 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
430 tmp
= tcg_const_i64(d2
);
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
435 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
441 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
443 s
->cc_op
= CC_OP_CONST0
+ val
;
446 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
448 tcg_gen_discard_i64(cc_src
);
449 tcg_gen_mov_i64(cc_dst
, dst
);
450 tcg_gen_discard_i64(cc_vr
);
454 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
456 tcg_gen_discard_i64(cc_src
);
457 tcg_gen_extu_i32_i64(cc_dst
, dst
);
458 tcg_gen_discard_i64(cc_vr
);
462 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
465 tcg_gen_mov_i64(cc_src
, src
);
466 tcg_gen_mov_i64(cc_dst
, dst
);
467 tcg_gen_discard_i64(cc_vr
);
471 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
474 tcg_gen_extu_i32_i64(cc_src
, src
);
475 tcg_gen_extu_i32_i64(cc_dst
, dst
);
476 tcg_gen_discard_i64(cc_vr
);
480 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
481 TCGv_i64 dst
, TCGv_i64 vr
)
483 tcg_gen_mov_i64(cc_src
, src
);
484 tcg_gen_mov_i64(cc_dst
, dst
);
485 tcg_gen_mov_i64(cc_vr
, vr
);
489 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
491 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
494 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
496 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
499 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
502 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
505 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
508 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
511 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
513 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
516 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
518 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
521 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp
= tcg_const_i32(v2
);
525 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
526 tcg_temp_free_i32(tmp
);
529 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
531 TCGv_i32 tmp
= tcg_const_i32(v2
);
532 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
533 tcg_temp_free_i32(tmp
);
536 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
538 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
541 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
543 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
546 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
548 TCGv_i64 tmp
= tcg_const_i64(v2
);
550 tcg_temp_free_i64(tmp
);
553 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
555 TCGv_i64 tmp
= tcg_const_i64(v2
);
557 tcg_temp_free_i64(tmp
);
560 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
562 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
565 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
567 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
570 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
572 tcg_gen_extu_i32_i64(cc_src
, v1
);
573 tcg_gen_mov_i64(cc_dst
, v2
);
574 tcg_gen_discard_i64(cc_vr
);
575 s
->cc_op
= CC_OP_LTGT_F32
;
578 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
580 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext
*s
)
586 tcg_gen_discard_i64(cc_src
);
587 tcg_gen_discard_i64(cc_dst
);
588 tcg_gen_discard_i64(cc_vr
);
589 s
->cc_op
= CC_OP_STATIC
;
592 static inline void gen_op_set_cc_op(DisasContext
*s
)
594 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
595 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
599 static inline void gen_update_cc_op(DisasContext
*s
)
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext
*s
)
607 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
608 TCGv_i64 dummy
= tcg_const_i64(0);
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
619 /* env->cc_op already is the cc value */
633 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
638 case CC_OP_LTUGTU_32
:
639 case CC_OP_LTUGTU_64
:
647 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
662 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
672 tcg_temp_free_i32(local_cc_op
);
673 tcg_temp_free_i64(dummy
);
675 /* We now have cc in cc_op as constant */
679 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
683 *r1
= (insn
>> 4) & 0xf;
687 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
688 int *x2
, int *b2
, int *d2
)
692 *r1
= (insn
>> 20) & 0xf;
693 *x2
= (insn
>> 16) & 0xf;
694 *b2
= (insn
>> 12) & 0xf;
697 return get_address(s
, *x2
, *b2
, *d2
);
700 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
705 *r1
= (insn
>> 20) & 0xf;
707 *r3
= (insn
>> 16) & 0xf;
708 *b2
= (insn
>> 12) & 0xf;
712 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
717 *i2
= (insn
>> 16) & 0xff;
718 *b1
= (insn
>> 12) & 0xf;
721 return get_address(s
, 0, *b1
, *d1
);
724 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
728 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
729 && !s
->singlestep_enabled
730 && !(s
->tb
->cflags
& CF_LAST_IO
));
733 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
737 if (use_goto_tb(s
, pc
)) {
738 tcg_gen_goto_tb(tb_num
);
739 tcg_gen_movi_i64(psw_addr
, pc
);
740 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr
, pc
);
748 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss
[cc_op
]++;
755 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit
[cc_op
]++;
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond
[16] = {
766 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
767 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
768 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
769 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
770 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
771 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
772 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
773 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond
[16] = {
780 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
782 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
784 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
794 enum cc_op old_cc_op
= s
->cc_op
;
796 if (mask
== 15 || mask
== 0) {
797 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
800 c
->g1
= c
->g2
= true;
805 /* Find the TCG condition for the mask + cc op. */
811 cond
= ltgt_cond
[mask
];
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
818 case CC_OP_LTUGTU_32
:
819 case CC_OP_LTUGTU_64
:
820 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
821 if (cond
== TCG_COND_NEVER
) {
824 account_inline_branch(s
, old_cc_op
);
828 cond
= nz_cond
[mask
];
829 if (cond
== TCG_COND_NEVER
) {
832 account_inline_branch(s
, old_cc_op
);
847 account_inline_branch(s
, old_cc_op
);
862 account_inline_branch(s
, old_cc_op
);
867 /* Calculate cc value. */
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s
, old_cc_op
);
875 old_cc_op
= CC_OP_STATIC
;
876 cond
= TCG_COND_NEVER
;
880 /* Load up the arguments of the comparison. */
882 c
->g1
= c
->g2
= false;
886 c
->u
.s32
.a
= tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
888 c
->u
.s32
.b
= tcg_const_i32(0);
891 case CC_OP_LTUGTU_32
:
893 c
->u
.s32
.a
= tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
895 c
->u
.s32
.b
= tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
902 c
->u
.s64
.b
= tcg_const_i64(0);
906 case CC_OP_LTUGTU_64
:
909 c
->g1
= c
->g2
= true;
915 c
->u
.s64
.a
= tcg_temp_new_i64();
916 c
->u
.s64
.b
= tcg_const_i64(0);
917 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
927 c
->u
.s32
.b
= tcg_const_i32(3);
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
931 c
->u
.s32
.b
= tcg_const_i32(2);
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
935 c
->u
.s32
.b
= tcg_const_i32(1);
937 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
940 c
->u
.s32
.a
= tcg_temp_new_i32();
941 c
->u
.s32
.b
= tcg_const_i32(0);
942 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
944 case 0x8 | 0x4: /* cc < 2 */
946 c
->u
.s32
.b
= tcg_const_i32(2);
948 case 0x8: /* cc == 0 */
950 c
->u
.s32
.b
= tcg_const_i32(0);
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
954 c
->u
.s32
.b
= tcg_const_i32(0);
956 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
959 c
->u
.s32
.a
= tcg_temp_new_i32();
960 c
->u
.s32
.b
= tcg_const_i32(0);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
963 case 0x4: /* cc == 1 */
965 c
->u
.s32
.b
= tcg_const_i32(1);
967 case 0x2 | 0x1: /* cc > 1 */
969 c
->u
.s32
.b
= tcg_const_i32(1);
971 case 0x2: /* cc == 2 */
973 c
->u
.s32
.b
= tcg_const_i32(2);
975 case 0x1: /* cc == 3 */
977 c
->u
.s32
.b
= tcg_const_i32(3);
980 /* CC is masked by something else: (8 >> cc) & mask. */
983 c
->u
.s32
.a
= tcg_const_i32(8);
984 c
->u
.s32
.b
= tcg_const_i32(0);
985 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
986 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
997 static void free_compare(DisasCompare
*c
)
1001 tcg_temp_free_i64(c
->u
.s64
.a
);
1003 tcg_temp_free_i32(c
->u
.s32
.a
);
1008 tcg_temp_free_i64(c
->u
.s64
.b
);
1010 tcg_temp_free_i32(c
->u
.s32
.b
);
1015 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1016 int x2
, int b2
, int d2
)
1018 TCGv_i64 addr
, tmp2
;
1021 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1022 op
, r1
, x2
, b2
, d2
);
1023 addr
= get_address(s
, x2
, b2
, d2
);
1025 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1026 tmp2
= tcg_temp_new_i64();
1027 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1028 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1029 store_reg(r1
, tmp2
);
1030 tcg_temp_free_i64(tmp2
);
1032 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1033 tmp2
= tcg_temp_new_i64();
1034 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1035 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1036 store_reg(r1
, tmp2
);
1037 tcg_temp_free_i64(tmp2
);
1039 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1040 tmp2
= tcg_temp_new_i64();
1041 tmp32_1
= tcg_temp_new_i32();
1042 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1043 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1044 tcg_temp_free_i64(tmp2
);
1045 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1046 store_reg32(r1
, tmp32_1
);
1047 tcg_temp_free_i32(tmp32_1
);
1049 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1050 tmp2
= tcg_temp_new_i64();
1051 tmp32_1
= tcg_temp_new_i32();
1052 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1053 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1054 tcg_temp_free_i64(tmp2
);
1055 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1056 store_reg16(r1
, tmp32_1
);
1057 tcg_temp_free_i32(tmp32_1
);
1059 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1060 tmp32_1
= load_reg32(r1
);
1061 tmp2
= tcg_temp_new_i64();
1062 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1063 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1064 tcg_temp_free_i32(tmp32_1
);
1065 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1066 tcg_temp_free_i64(tmp2
);
1069 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1070 gen_illegal_opcode(s
);
1073 tcg_temp_free_i64(addr
);
1076 #ifndef CONFIG_USER_ONLY
1077 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1080 int op
= (insn
>> 32) & 0xff;
1082 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1083 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1085 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1087 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1088 /* Test Protection */
1089 potential_page_fault(s
);
1090 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1094 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1095 gen_illegal_opcode(s
);
1099 tcg_temp_free_i64(tmp
);
1100 tcg_temp_free_i64(tmp2
);
1104 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1105 int r3
, int b2
, int d2
)
1108 TCGv_i32 tmp32_1
, tmp32_2
;
1110 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1111 op
, r1
, r3
, b2
, d2
);
1113 #ifndef CONFIG_USER_ONLY
1114 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1116 check_privileged(s
);
1117 tmp
= get_address(s
, 0, b2
, d2
);
1118 tmp32_1
= tcg_const_i32(r1
);
1119 tmp32_2
= tcg_const_i32(r3
);
1120 potential_page_fault(s
);
1121 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1122 tcg_temp_free_i64(tmp
);
1123 tcg_temp_free_i32(tmp32_1
);
1124 tcg_temp_free_i32(tmp32_2
);
1126 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1128 check_privileged(s
);
1129 tmp
= get_address(s
, 0, b2
, d2
);
1130 tmp32_1
= tcg_const_i32(r1
);
1131 tmp32_2
= tcg_const_i32(r3
);
1132 potential_page_fault(s
);
1133 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1134 tcg_temp_free_i64(tmp
);
1135 tcg_temp_free_i32(tmp32_1
);
1136 tcg_temp_free_i32(tmp32_2
);
1140 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1141 gen_illegal_opcode(s
);
1146 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1147 int x2
, int b2
, int d2
, int r1b
)
1149 TCGv_i32 tmp_r1
, tmp32
;
1151 addr
= get_address(s
, x2
, b2
, d2
);
1152 tmp_r1
= tcg_const_i32(r1
);
1154 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1155 potential_page_fault(s
);
1156 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1158 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1159 potential_page_fault(s
);
1160 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1162 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1163 tmp
= tcg_temp_new_i64();
1164 tmp32
= load_freg32(r1
);
1165 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1166 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1167 tcg_temp_free_i64(tmp
);
1168 tcg_temp_free_i32(tmp32
);
1170 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1171 tmp
= tcg_temp_new_i64();
1172 tmp32
= tcg_temp_new_i32();
1173 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1175 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1176 tcg_temp_free_i64(tmp
);
1177 tcg_temp_free_i32(tmp32
);
1179 tmp32
= load_freg32(r1
);
1180 gen_set_cc_nz_f32(s
, tmp32
);
1181 tcg_temp_free_i32(tmp32
);
1183 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1184 tmp
= tcg_temp_new_i64();
1185 tmp32
= tcg_temp_new_i32();
1186 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1187 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1188 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1189 tcg_temp_free_i64(tmp
);
1190 tcg_temp_free_i32(tmp32
);
1192 tmp32
= load_freg32(r1
);
1193 gen_set_cc_nz_f32(s
, tmp32
);
1194 tcg_temp_free_i32(tmp32
);
1196 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1197 tmp
= tcg_temp_new_i64();
1198 tmp32
= tcg_temp_new_i32();
1199 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1200 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1201 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1202 tcg_temp_free_i64(tmp
);
1203 tcg_temp_free_i32(tmp32
);
1205 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1206 potential_page_fault(s
);
1207 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1210 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1211 potential_page_fault(s
);
1212 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1215 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1216 potential_page_fault(s
);
1217 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1220 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1221 tmp
= tcg_temp_new_i64();
1222 tmp32
= tcg_temp_new_i32();
1223 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1224 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1225 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1226 tcg_temp_free_i64(tmp
);
1227 tcg_temp_free_i32(tmp32
);
1229 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1230 potential_page_fault(s
);
1231 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1234 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1235 potential_page_fault(s
);
1236 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1239 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1240 potential_page_fault(s
);
1241 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1244 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1245 potential_page_fault(s
);
1246 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1248 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1249 potential_page_fault(s
);
1250 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1252 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1253 /* for RXF insns, r1 is R3 and r1b is R1 */
1254 tmp32
= tcg_const_i32(r1b
);
1255 potential_page_fault(s
);
1256 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1257 tcg_temp_free_i32(tmp32
);
1260 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1261 gen_illegal_opcode(s
);
1264 tcg_temp_free_i32(tmp_r1
);
1265 tcg_temp_free_i64(addr
);
1268 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1271 TCGv_i64 tmp
, tmp2
, tmp3
;
1272 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1274 #ifndef CONFIG_USER_ONLY
1278 r1
= (insn
>> 4) & 0xf;
1281 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1284 case 0x22: /* IPM R1 [RRE] */
1285 tmp32_1
= tcg_const_i32(r1
);
1287 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1288 tcg_temp_free_i32(tmp32_1
);
1290 case 0x41: /* CKSM R1,R2 [RRE] */
1291 tmp32_1
= tcg_const_i32(r1
);
1292 tmp32_2
= tcg_const_i32(r2
);
1293 potential_page_fault(s
);
1294 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1295 tcg_temp_free_i32(tmp32_1
);
1296 tcg_temp_free_i32(tmp32_2
);
1297 gen_op_movi_cc(s
, 0);
1299 case 0x4e: /* SAR R1,R2 [RRE] */
1300 tmp32_1
= load_reg32(r2
);
1301 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1302 tcg_temp_free_i32(tmp32_1
);
1304 case 0x4f: /* EAR R1,R2 [RRE] */
1305 tmp32_1
= tcg_temp_new_i32();
1306 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1307 store_reg32(r1
, tmp32_1
);
1308 tcg_temp_free_i32(tmp32_1
);
1310 case 0x54: /* MVPG R1,R2 [RRE] */
1312 tmp2
= load_reg(r1
);
1313 tmp3
= load_reg(r2
);
1314 potential_page_fault(s
);
1315 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1316 tcg_temp_free_i64(tmp
);
1317 tcg_temp_free_i64(tmp2
);
1318 tcg_temp_free_i64(tmp3
);
1319 /* XXX check CCO bit and set CC accordingly */
1320 gen_op_movi_cc(s
, 0);
1322 case 0x55: /* MVST R1,R2 [RRE] */
1323 tmp32_1
= load_reg32(0);
1324 tmp32_2
= tcg_const_i32(r1
);
1325 tmp32_3
= tcg_const_i32(r2
);
1326 potential_page_fault(s
);
1327 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1328 tcg_temp_free_i32(tmp32_1
);
1329 tcg_temp_free_i32(tmp32_2
);
1330 tcg_temp_free_i32(tmp32_3
);
1331 gen_op_movi_cc(s
, 1);
1333 case 0x5d: /* CLST R1,R2 [RRE] */
1334 tmp32_1
= load_reg32(0);
1335 tmp32_2
= tcg_const_i32(r1
);
1336 tmp32_3
= tcg_const_i32(r2
);
1337 potential_page_fault(s
);
1338 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1340 tcg_temp_free_i32(tmp32_1
);
1341 tcg_temp_free_i32(tmp32_2
);
1342 tcg_temp_free_i32(tmp32_3
);
1344 case 0x5e: /* SRST R1,R2 [RRE] */
1345 tmp32_1
= load_reg32(0);
1346 tmp32_2
= tcg_const_i32(r1
);
1347 tmp32_3
= tcg_const_i32(r2
);
1348 potential_page_fault(s
);
1349 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1351 tcg_temp_free_i32(tmp32_1
);
1352 tcg_temp_free_i32(tmp32_2
);
1353 tcg_temp_free_i32(tmp32_3
);
1356 #ifndef CONFIG_USER_ONLY
1357 case 0x02: /* STIDP D2(B2) [S] */
1359 check_privileged(s
);
1360 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1361 tmp
= get_address(s
, 0, b2
, d2
);
1362 potential_page_fault(s
);
1363 gen_helper_stidp(cpu_env
, tmp
);
1364 tcg_temp_free_i64(tmp
);
1366 case 0x04: /* SCK D2(B2) [S] */
1368 check_privileged(s
);
1369 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1370 tmp
= get_address(s
, 0, b2
, d2
);
1371 potential_page_fault(s
);
1372 gen_helper_sck(cc_op
, tmp
);
1374 tcg_temp_free_i64(tmp
);
1376 case 0x05: /* STCK D2(B2) [S] */
1378 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1379 tmp
= get_address(s
, 0, b2
, d2
);
1380 potential_page_fault(s
);
1381 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1383 tcg_temp_free_i64(tmp
);
1385 case 0x06: /* SCKC D2(B2) [S] */
1386 /* Set Clock Comparator */
1387 check_privileged(s
);
1388 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1389 tmp
= get_address(s
, 0, b2
, d2
);
1390 potential_page_fault(s
);
1391 gen_helper_sckc(cpu_env
, tmp
);
1392 tcg_temp_free_i64(tmp
);
1394 case 0x07: /* STCKC D2(B2) [S] */
1395 /* Store Clock Comparator */
1396 check_privileged(s
);
1397 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1398 tmp
= get_address(s
, 0, b2
, d2
);
1399 potential_page_fault(s
);
1400 gen_helper_stckc(cpu_env
, tmp
);
1401 tcg_temp_free_i64(tmp
);
1403 case 0x08: /* SPT D2(B2) [S] */
1405 check_privileged(s
);
1406 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1407 tmp
= get_address(s
, 0, b2
, d2
);
1408 potential_page_fault(s
);
1409 gen_helper_spt(cpu_env
, tmp
);
1410 tcg_temp_free_i64(tmp
);
1412 case 0x09: /* STPT D2(B2) [S] */
1413 /* Store CPU Timer */
1414 check_privileged(s
);
1415 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1416 tmp
= get_address(s
, 0, b2
, d2
);
1417 potential_page_fault(s
);
1418 gen_helper_stpt(cpu_env
, tmp
);
1419 tcg_temp_free_i64(tmp
);
1421 case 0x0a: /* SPKA D2(B2) [S] */
1422 /* Set PSW Key from Address */
1423 check_privileged(s
);
1424 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1425 tmp
= get_address(s
, 0, b2
, d2
);
1426 tmp2
= tcg_temp_new_i64();
1427 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1428 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1429 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1430 tcg_temp_free_i64(tmp2
);
1431 tcg_temp_free_i64(tmp
);
1433 case 0x0d: /* PTLB [S] */
1435 check_privileged(s
);
1436 gen_helper_ptlb(cpu_env
);
1438 case 0x10: /* SPX D2(B2) [S] */
1439 /* Set Prefix Register */
1440 check_privileged(s
);
1441 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1442 tmp
= get_address(s
, 0, b2
, d2
);
1443 potential_page_fault(s
);
1444 gen_helper_spx(cpu_env
, tmp
);
1445 tcg_temp_free_i64(tmp
);
1447 case 0x11: /* STPX D2(B2) [S] */
1449 check_privileged(s
);
1450 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1451 tmp
= get_address(s
, 0, b2
, d2
);
1452 tmp2
= tcg_temp_new_i64();
1453 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1454 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1455 tcg_temp_free_i64(tmp
);
1456 tcg_temp_free_i64(tmp2
);
1458 case 0x12: /* STAP D2(B2) [S] */
1459 /* Store CPU Address */
1460 check_privileged(s
);
1461 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1462 tmp
= get_address(s
, 0, b2
, d2
);
1463 tmp2
= tcg_temp_new_i64();
1464 tmp32_1
= tcg_temp_new_i32();
1465 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1466 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1467 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1468 tcg_temp_free_i64(tmp
);
1469 tcg_temp_free_i64(tmp2
);
1470 tcg_temp_free_i32(tmp32_1
);
1472 case 0x21: /* IPTE R1,R2 [RRE] */
1473 /* Invalidate PTE */
1474 check_privileged(s
);
1475 r1
= (insn
>> 4) & 0xf;
1478 tmp2
= load_reg(r2
);
1479 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1480 tcg_temp_free_i64(tmp
);
1481 tcg_temp_free_i64(tmp2
);
1483 case 0x29: /* ISKE R1,R2 [RRE] */
1484 /* Insert Storage Key Extended */
1485 check_privileged(s
);
1486 r1
= (insn
>> 4) & 0xf;
1489 tmp2
= tcg_temp_new_i64();
1490 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1491 store_reg(r1
, tmp2
);
1492 tcg_temp_free_i64(tmp
);
1493 tcg_temp_free_i64(tmp2
);
1495 case 0x2a: /* RRBE R1,R2 [RRE] */
1496 /* Set Storage Key Extended */
1497 check_privileged(s
);
1498 r1
= (insn
>> 4) & 0xf;
1500 tmp32_1
= load_reg32(r1
);
1502 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1504 tcg_temp_free_i32(tmp32_1
);
1505 tcg_temp_free_i64(tmp
);
1507 case 0x2b: /* SSKE R1,R2 [RRE] */
1508 /* Set Storage Key Extended */
1509 check_privileged(s
);
1510 r1
= (insn
>> 4) & 0xf;
1512 tmp32_1
= load_reg32(r1
);
1514 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1515 tcg_temp_free_i32(tmp32_1
);
1516 tcg_temp_free_i64(tmp
);
1518 case 0x34: /* STCH ? */
1519 /* Store Subchannel */
1520 check_privileged(s
);
1521 gen_op_movi_cc(s
, 3);
1523 case 0x46: /* STURA R1,R2 [RRE] */
1524 /* Store Using Real Address */
1525 check_privileged(s
);
1526 r1
= (insn
>> 4) & 0xf;
1528 tmp32_1
= load_reg32(r1
);
1530 potential_page_fault(s
);
1531 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1532 tcg_temp_free_i32(tmp32_1
);
1533 tcg_temp_free_i64(tmp
);
1535 case 0x50: /* CSP R1,R2 [RRE] */
1536 /* Compare And Swap And Purge */
1537 check_privileged(s
);
1538 r1
= (insn
>> 4) & 0xf;
1540 tmp32_1
= tcg_const_i32(r1
);
1541 tmp32_2
= tcg_const_i32(r2
);
1542 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1544 tcg_temp_free_i32(tmp32_1
);
1545 tcg_temp_free_i32(tmp32_2
);
1547 case 0x5f: /* CHSC ? */
1548 /* Channel Subsystem Call */
1549 check_privileged(s
);
1550 gen_op_movi_cc(s
, 3);
1552 case 0x78: /* STCKE D2(B2) [S] */
1553 /* Store Clock Extended */
1554 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1555 tmp
= get_address(s
, 0, b2
, d2
);
1556 potential_page_fault(s
);
1557 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1559 tcg_temp_free_i64(tmp
);
1561 case 0x79: /* SACF D2(B2) [S] */
1562 /* Set Address Space Control Fast */
1563 check_privileged(s
);
1564 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1565 tmp
= get_address(s
, 0, b2
, d2
);
1566 potential_page_fault(s
);
1567 gen_helper_sacf(cpu_env
, tmp
);
1568 tcg_temp_free_i64(tmp
);
1569 /* addressing mode has changed, so end the block */
1572 s
->is_jmp
= DISAS_JUMP
;
1574 case 0x7d: /* STSI D2,(B2) [S] */
1575 check_privileged(s
);
1576 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1577 tmp
= get_address(s
, 0, b2
, d2
);
1578 tmp32_1
= load_reg32(0);
1579 tmp32_2
= load_reg32(1);
1580 potential_page_fault(s
);
1581 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1583 tcg_temp_free_i64(tmp
);
1584 tcg_temp_free_i32(tmp32_1
);
1585 tcg_temp_free_i32(tmp32_2
);
1587 case 0x9d: /* LFPC D2(B2) [S] */
1588 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1589 tmp
= get_address(s
, 0, b2
, d2
);
1590 tmp2
= tcg_temp_new_i64();
1591 tmp32_1
= tcg_temp_new_i32();
1592 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1593 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1594 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1595 tcg_temp_free_i64(tmp
);
1596 tcg_temp_free_i64(tmp2
);
1597 tcg_temp_free_i32(tmp32_1
);
1599 case 0xb1: /* STFL D2(B2) [S] */
1600 /* Store Facility List (CPU features) at 200 */
1601 check_privileged(s
);
1602 tmp2
= tcg_const_i64(0xc0000000);
1603 tmp
= tcg_const_i64(200);
1604 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1605 tcg_temp_free_i64(tmp2
);
1606 tcg_temp_free_i64(tmp
);
1608 case 0xb2: /* LPSWE D2(B2) [S] */
1609 /* Load PSW Extended */
1610 check_privileged(s
);
1611 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1612 tmp
= get_address(s
, 0, b2
, d2
);
1613 tmp2
= tcg_temp_new_i64();
1614 tmp3
= tcg_temp_new_i64();
1615 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1616 tcg_gen_addi_i64(tmp
, tmp
, 8);
1617 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1618 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1619 /* we need to keep cc_op intact */
1620 s
->is_jmp
= DISAS_JUMP
;
1621 tcg_temp_free_i64(tmp
);
1622 tcg_temp_free_i64(tmp2
);
1623 tcg_temp_free_i64(tmp3
);
1625 case 0x20: /* SERVC R1,R2 [RRE] */
1626 /* SCLP Service call (PV hypercall) */
1627 check_privileged(s
);
1628 potential_page_fault(s
);
1629 tmp32_1
= load_reg32(r2
);
1631 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1633 tcg_temp_free_i32(tmp32_1
);
1634 tcg_temp_free_i64(tmp
);
1638 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1639 gen_illegal_opcode(s
);
1644 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1648 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1649 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1650 #define FP_HELPER(i) \
1651 tmp32_1 = tcg_const_i32(r1); \
1652 tmp32_2 = tcg_const_i32(r2); \
1653 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1654 tcg_temp_free_i32(tmp32_1); \
1655 tcg_temp_free_i32(tmp32_2);
1657 #define FP_HELPER_CC(i) \
1658 tmp32_1 = tcg_const_i32(r1); \
1659 tmp32_2 = tcg_const_i32(r2); \
1660 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1662 tcg_temp_free_i32(tmp32_1); \
1663 tcg_temp_free_i32(tmp32_2);
1666 case 0x0: /* LPEBR R1,R2 [RRE] */
1667 FP_HELPER_CC(lpebr
);
1669 case 0x2: /* LTEBR R1,R2 [RRE] */
1670 FP_HELPER_CC(ltebr
);
1672 case 0x3: /* LCEBR R1,R2 [RRE] */
1673 FP_HELPER_CC(lcebr
);
1675 case 0x4: /* LDEBR R1,R2 [RRE] */
1678 case 0x5: /* LXDBR R1,R2 [RRE] */
1681 case 0x9: /* CEBR R1,R2 [RRE] */
1684 case 0xa: /* AEBR R1,R2 [RRE] */
1687 case 0xb: /* SEBR R1,R2 [RRE] */
1690 case 0xd: /* DEBR R1,R2 [RRE] */
1693 case 0x10: /* LPDBR R1,R2 [RRE] */
1694 FP_HELPER_CC(lpdbr
);
1696 case 0x12: /* LTDBR R1,R2 [RRE] */
1697 FP_HELPER_CC(ltdbr
);
1699 case 0x13: /* LCDBR R1,R2 [RRE] */
1700 FP_HELPER_CC(lcdbr
);
1702 case 0x15: /* SQBDR R1,R2 [RRE] */
1705 case 0x17: /* MEEBR R1,R2 [RRE] */
1708 case 0x19: /* CDBR R1,R2 [RRE] */
1711 case 0x1a: /* ADBR R1,R2 [RRE] */
1714 case 0x1b: /* SDBR R1,R2 [RRE] */
1717 case 0x1c: /* MDBR R1,R2 [RRE] */
1720 case 0x1d: /* DDBR R1,R2 [RRE] */
1723 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1724 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1725 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1726 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1727 tmp32_1
= tcg_const_i32(m3
);
1728 tmp32_2
= tcg_const_i32(r2
);
1729 tmp32_3
= tcg_const_i32(r1
);
1732 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1735 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1738 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1743 tcg_temp_free_i32(tmp32_1
);
1744 tcg_temp_free_i32(tmp32_2
);
1745 tcg_temp_free_i32(tmp32_3
);
1747 case 0x40: /* LPXBR R1,R2 [RRE] */
1748 FP_HELPER_CC(lpxbr
);
1750 case 0x42: /* LTXBR R1,R2 [RRE] */
1751 FP_HELPER_CC(ltxbr
);
1753 case 0x43: /* LCXBR R1,R2 [RRE] */
1754 FP_HELPER_CC(lcxbr
);
1756 case 0x44: /* LEDBR R1,R2 [RRE] */
1759 case 0x45: /* LDXBR R1,R2 [RRE] */
1762 case 0x46: /* LEXBR R1,R2 [RRE] */
1765 case 0x49: /* CXBR R1,R2 [RRE] */
1768 case 0x4a: /* AXBR R1,R2 [RRE] */
1771 case 0x4b: /* SXBR R1,R2 [RRE] */
1774 case 0x4c: /* MXBR R1,R2 [RRE] */
1777 case 0x4d: /* DXBR R1,R2 [RRE] */
1780 case 0x65: /* LXR R1,R2 [RRE] */
1781 tmp
= load_freg(r2
);
1782 store_freg(r1
, tmp
);
1783 tcg_temp_free_i64(tmp
);
1784 tmp
= load_freg(r2
+ 2);
1785 store_freg(r1
+ 2, tmp
);
1786 tcg_temp_free_i64(tmp
);
1788 case 0x74: /* LZER R1 [RRE] */
1789 tmp32_1
= tcg_const_i32(r1
);
1790 gen_helper_lzer(cpu_env
, tmp32_1
);
1791 tcg_temp_free_i32(tmp32_1
);
1793 case 0x75: /* LZDR R1 [RRE] */
1794 tmp32_1
= tcg_const_i32(r1
);
1795 gen_helper_lzdr(cpu_env
, tmp32_1
);
1796 tcg_temp_free_i32(tmp32_1
);
1798 case 0x76: /* LZXR R1 [RRE] */
1799 tmp32_1
= tcg_const_i32(r1
);
1800 gen_helper_lzxr(cpu_env
, tmp32_1
);
1801 tcg_temp_free_i32(tmp32_1
);
1803 case 0x84: /* SFPC R1 [RRE] */
1804 tmp32_1
= load_reg32(r1
);
1805 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1806 tcg_temp_free_i32(tmp32_1
);
1808 case 0x94: /* CEFBR R1,R2 [RRE] */
1809 case 0x95: /* CDFBR R1,R2 [RRE] */
1810 case 0x96: /* CXFBR R1,R2 [RRE] */
1811 tmp32_1
= tcg_const_i32(r1
);
1812 tmp32_2
= load_reg32(r2
);
1815 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1818 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1821 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1826 tcg_temp_free_i32(tmp32_1
);
1827 tcg_temp_free_i32(tmp32_2
);
1829 case 0x98: /* CFEBR R1,R2 [RRE] */
1830 case 0x99: /* CFDBR R1,R2 [RRE] */
1831 case 0x9a: /* CFXBR R1,R2 [RRE] */
1832 tmp32_1
= tcg_const_i32(r1
);
1833 tmp32_2
= tcg_const_i32(r2
);
1834 tmp32_3
= tcg_const_i32(m3
);
1837 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1840 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1843 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1849 tcg_temp_free_i32(tmp32_1
);
1850 tcg_temp_free_i32(tmp32_2
);
1851 tcg_temp_free_i32(tmp32_3
);
1853 case 0xa4: /* CEGBR R1,R2 [RRE] */
1854 case 0xa5: /* CDGBR R1,R2 [RRE] */
1855 tmp32_1
= tcg_const_i32(r1
);
1859 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1862 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1867 tcg_temp_free_i32(tmp32_1
);
1868 tcg_temp_free_i64(tmp
);
1870 case 0xa6: /* CXGBR R1,R2 [RRE] */
1871 tmp32_1
= tcg_const_i32(r1
);
1873 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1874 tcg_temp_free_i32(tmp32_1
);
1875 tcg_temp_free_i64(tmp
);
1877 case 0xa8: /* CGEBR R1,R2 [RRE] */
1878 tmp32_1
= tcg_const_i32(r1
);
1879 tmp32_2
= tcg_const_i32(r2
);
1880 tmp32_3
= tcg_const_i32(m3
);
1881 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1883 tcg_temp_free_i32(tmp32_1
);
1884 tcg_temp_free_i32(tmp32_2
);
1885 tcg_temp_free_i32(tmp32_3
);
1887 case 0xa9: /* CGDBR R1,R2 [RRE] */
1888 tmp32_1
= tcg_const_i32(r1
);
1889 tmp32_2
= tcg_const_i32(r2
);
1890 tmp32_3
= tcg_const_i32(m3
);
1891 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1893 tcg_temp_free_i32(tmp32_1
);
1894 tcg_temp_free_i32(tmp32_2
);
1895 tcg_temp_free_i32(tmp32_3
);
1897 case 0xaa: /* CGXBR R1,R2 [RRE] */
1898 tmp32_1
= tcg_const_i32(r1
);
1899 tmp32_2
= tcg_const_i32(r2
);
1900 tmp32_3
= tcg_const_i32(m3
);
1901 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1903 tcg_temp_free_i32(tmp32_1
);
1904 tcg_temp_free_i32(tmp32_2
);
1905 tcg_temp_free_i32(tmp32_3
);
1908 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
1909 gen_illegal_opcode(s
);
1917 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1923 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1925 case 0x17: /* LLGTR R1,R2 [RRE] */
1926 tmp32_1
= load_reg32(r2
);
1927 tmp
= tcg_temp_new_i64();
1928 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
1929 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
1931 tcg_temp_free_i32(tmp32_1
);
1932 tcg_temp_free_i64(tmp
);
1934 case 0x0f: /* LRVGR R1,R2 [RRE] */
1935 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
1937 case 0x1f: /* LRVR R1,R2 [RRE] */
1938 tmp32_1
= load_reg32(r2
);
1939 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1940 store_reg32(r1
, tmp32_1
);
1941 tcg_temp_free_i32(tmp32_1
);
1943 case 0x83: /* FLOGR R1,R2 [RRE] */
1945 tmp32_1
= tcg_const_i32(r1
);
1946 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
1948 tcg_temp_free_i64(tmp
);
1949 tcg_temp_free_i32(tmp32_1
);
1952 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
1953 gen_illegal_opcode(s
);
1958 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1962 int op
, r1
, r2
, r3
, d2
, x2
, b2
, r1b
;
1964 opc
= cpu_ldub_code(env
, s
->pc
);
1965 LOG_DISAS("opc 0x%x\n", opc
);
1969 insn
= ld_code4(env
, s
->pc
);
1970 op
= (insn
>> 16) & 0xff;
1971 disas_b2(env
, s
, op
, insn
);
1974 insn
= ld_code4(env
, s
->pc
);
1975 op
= (insn
>> 16) & 0xff;
1976 r3
= (insn
>> 12) & 0xf; /* aka m3 */
1977 r1
= (insn
>> 4) & 0xf;
1979 disas_b3(env
, s
, op
, r3
, r1
, r2
);
1982 insn
= ld_code4(env
, s
->pc
);
1983 r1
= (insn
>> 4) & 0xf;
1985 op
= (insn
>> 16) & 0xff;
1986 disas_b9(env
, s
, op
, r1
, r2
);
1989 insn
= ld_code6(env
, s
->pc
);
1992 r1
= (insn
>> 36) & 0xf;
1993 x2
= (insn
>> 32) & 0xf;
1994 b2
= (insn
>> 28) & 0xf;
1995 d2
= ((int)((((insn
>> 16) & 0xfff)
1996 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
1997 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
1999 #ifndef CONFIG_USER_ONLY
2001 /* Test Protection */
2002 check_privileged(s
);
2003 insn
= ld_code6(env
, s
->pc
);
2005 disas_e5(env
, s
, insn
);
2009 insn
= ld_code6(env
, s
->pc
);
2012 r1
= (insn
>> 36) & 0xf;
2013 r3
= (insn
>> 32) & 0xf;
2014 b2
= (insn
>> 28) & 0xf;
2015 d2
= ((int)((((insn
>> 16) & 0xfff)
2016 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2017 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
2020 insn
= ld_code6(env
, s
->pc
);
2023 r1
= (insn
>> 36) & 0xf;
2024 x2
= (insn
>> 32) & 0xf;
2025 b2
= (insn
>> 28) & 0xf;
2026 d2
= (short)((insn
>> 16) & 0xfff);
2027 r1b
= (insn
>> 12) & 0xf;
2028 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
2031 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
2032 gen_illegal_opcode(s
);
2037 /* ====================================================================== */
2038 /* Define the insn format enumeration. */
2039 #define F0(N) FMT_##N,
2040 #define F1(N, X1) F0(N)
2041 #define F2(N, X1, X2) F0(N)
2042 #define F3(N, X1, X2, X3) F0(N)
2043 #define F4(N, X1, X2, X3, X4) F0(N)
2044 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2047 #include "insn-format.def"
2057 /* Define a structure to hold the decoded fields. We'll store each inside
2058 an array indexed by an enum. In order to conserve memory, we'll arrange
2059 for fields that do not exist at the same time to overlap, thus the "C"
2060 for compact. For checking purposes there is an "O" for original index
2061 as well that will be applied to availability bitmaps. */
2063 enum DisasFieldIndexO
{
2086 enum DisasFieldIndexC
{
2117 struct DisasFields
{
2120 unsigned presentC
:16;
2121 unsigned int presentO
;
2125 /* This is the way fields are to be accessed out of DisasFields. */
2126 #define have_field(S, F) have_field1((S), FLD_O_##F)
2127 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2129 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
2131 return (f
->presentO
>> c
) & 1;
2134 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
2135 enum DisasFieldIndexC c
)
2137 assert(have_field1(f
, o
));
2141 /* Describe the layout of each field in each format. */
2142 typedef struct DisasField
{
2144 unsigned int size
:8;
2145 unsigned int type
:2;
2146 unsigned int indexC
:6;
2147 enum DisasFieldIndexO indexO
:8;
2150 typedef struct DisasFormatInfo
{
2151 DisasField op
[NUM_C_FIELD
];
2154 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2155 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2156 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2157 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2158 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2159 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2160 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2161 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2162 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2163 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2164 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2165 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2166 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2167 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2169 #define F0(N) { { } },
2170 #define F1(N, X1) { { X1 } },
2171 #define F2(N, X1, X2) { { X1, X2 } },
2172 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2173 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2174 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2176 static const DisasFormatInfo format_info
[] = {
2177 #include "insn-format.def"
2195 /* Generally, we'll extract operands into this structures, operate upon
2196 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2197 of routines below for more details. */
2199 bool g_out
, g_out2
, g_in1
, g_in2
;
2200 TCGv_i64 out
, out2
, in1
, in2
;
2204 /* Return values from translate_one, indicating the state of the TB. */
2206 /* Continue the TB. */
2208 /* We have emitted one or more goto_tb. No fixup required. */
2210 /* We are not using a goto_tb (for whatever reason), but have updated
2211 the PC (for whatever reason), so there's no need to do it again on
2214 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2215 updated the PC for the next instruction to be executed. */
2217 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2218 No following code will be executed. */
2222 typedef enum DisasFacility
{
2223 FAC_Z
, /* zarch (default) */
2224 FAC_CASS
, /* compare and swap and store */
2225 FAC_CASS2
, /* compare and swap and store 2*/
2226 FAC_DFP
, /* decimal floating point */
2227 FAC_DFPR
, /* decimal floating point rounding */
2228 FAC_DO
, /* distinct operands */
2229 FAC_EE
, /* execute extensions */
2230 FAC_EI
, /* extended immediate */
2231 FAC_FPE
, /* floating point extension */
2232 FAC_FPSSH
, /* floating point support sign handling */
2233 FAC_FPRGR
, /* FPR-GR transfer */
2234 FAC_GIE
, /* general instructions extension */
2235 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
2236 FAC_HW
, /* high-word */
2237 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
2238 FAC_LOC
, /* load/store on condition */
2239 FAC_LD
, /* long displacement */
2240 FAC_PC
, /* population count */
2241 FAC_SCF
, /* store clock fast */
2242 FAC_SFLE
, /* store facility list extended */
2248 DisasFacility fac
:6;
2252 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
2253 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
2254 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
2255 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
2256 void (*help_cout
)(DisasContext
*, DisasOps
*);
2257 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
2262 /* ====================================================================== */
2263 /* Miscelaneous helpers, used by several operations. */
2265 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
2266 DisasOps
*o
, int mask
)
2268 int b2
= get_field(f
, b2
);
2269 int d2
= get_field(f
, d2
);
2272 o
->in2
= tcg_const_i64(d2
& mask
);
2274 o
->in2
= get_address(s
, 0, b2
, d2
);
2275 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2279 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
2281 if (dest
== s
->next_pc
) {
2284 if (use_goto_tb(s
, dest
)) {
2285 gen_update_cc_op(s
);
2287 tcg_gen_movi_i64(psw_addr
, dest
);
2288 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
2289 return EXIT_GOTO_TB
;
2291 tcg_gen_movi_i64(psw_addr
, dest
);
2292 return EXIT_PC_UPDATED
;
2296 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
2297 bool is_imm
, int imm
, TCGv_i64 cdest
)
2300 uint64_t dest
= s
->pc
+ 2 * imm
;
2303 /* Take care of the special cases first. */
2304 if (c
->cond
== TCG_COND_NEVER
) {
2309 if (dest
== s
->next_pc
) {
2310 /* Branch to next. */
2314 if (c
->cond
== TCG_COND_ALWAYS
) {
2315 ret
= help_goto_direct(s
, dest
);
2319 if (TCGV_IS_UNUSED_I64(cdest
)) {
2320 /* E.g. bcr %r0 -> no branch. */
2324 if (c
->cond
== TCG_COND_ALWAYS
) {
2325 tcg_gen_mov_i64(psw_addr
, cdest
);
2326 ret
= EXIT_PC_UPDATED
;
2331 if (use_goto_tb(s
, s
->next_pc
)) {
2332 if (is_imm
&& use_goto_tb(s
, dest
)) {
2333 /* Both exits can use goto_tb. */
2334 gen_update_cc_op(s
);
2336 lab
= gen_new_label();
2338 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2340 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2343 /* Branch not taken. */
2345 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2346 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2351 tcg_gen_movi_i64(psw_addr
, dest
);
2352 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
2356 /* Fallthru can use goto_tb, but taken branch cannot. */
2357 /* Store taken branch destination before the brcond. This
2358 avoids having to allocate a new local temp to hold it.
2359 We'll overwrite this in the not taken case anyway. */
2361 tcg_gen_mov_i64(psw_addr
, cdest
);
2364 lab
= gen_new_label();
2366 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2368 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2371 /* Branch not taken. */
2372 gen_update_cc_op(s
);
2374 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2375 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2379 tcg_gen_movi_i64(psw_addr
, dest
);
2381 ret
= EXIT_PC_UPDATED
;
2384 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2385 Most commonly we're single-stepping or some other condition that
2386 disables all use of goto_tb. Just update the PC and exit. */
2388 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
2390 cdest
= tcg_const_i64(dest
);
2394 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
2397 TCGv_i32 t0
= tcg_temp_new_i32();
2398 TCGv_i64 t1
= tcg_temp_new_i64();
2399 TCGv_i64 z
= tcg_const_i64(0);
2400 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
2401 tcg_gen_extu_i32_i64(t1
, t0
);
2402 tcg_temp_free_i32(t0
);
2403 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
2404 tcg_temp_free_i64(t1
);
2405 tcg_temp_free_i64(z
);
2409 tcg_temp_free_i64(cdest
);
2411 tcg_temp_free_i64(next
);
2413 ret
= EXIT_PC_UPDATED
;
2421 /* ====================================================================== */
2422 /* The operations. These perform the bulk of the work for any insn,
2423 usually after the operands have been loaded and output initialized. */
2425 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
2427 gen_helper_abs_i64(o
->out
, o
->in2
);
2431 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
2433 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2437 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2441 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2443 /* XXX possible optimization point */
2445 cc
= tcg_temp_new_i64();
2446 tcg_gen_extu_i32_i64(cc
, cc_op
);
2447 tcg_gen_shri_i64(cc
, cc
, 1);
2449 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2450 tcg_temp_free_i64(cc
);
2454 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2456 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2460 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2462 int shift
= s
->insn
->data
& 0xff;
2463 int size
= s
->insn
->data
>> 8;
2464 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2467 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2468 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2469 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2471 /* Produce the CC from only the bits manipulated. */
2472 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2473 set_cc_nz_u64(s
, cc_dst
);
2477 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2479 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2480 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2481 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2482 return EXIT_PC_UPDATED
;
2488 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2490 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2491 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2494 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2496 int m1
= get_field(s
->fields
, m1
);
2497 bool is_imm
= have_field(s
->fields
, i2
);
2498 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2501 disas_jcc(s
, &c
, m1
);
2502 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2505 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2507 int r1
= get_field(s
->fields
, r1
);
2508 bool is_imm
= have_field(s
->fields
, i2
);
2509 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2513 c
.cond
= TCG_COND_NE
;
2518 t
= tcg_temp_new_i64();
2519 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2520 store_reg32_i64(r1
, t
);
2521 c
.u
.s32
.a
= tcg_temp_new_i32();
2522 c
.u
.s32
.b
= tcg_const_i32(0);
2523 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2524 tcg_temp_free_i64(t
);
2526 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2529 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2531 int r1
= get_field(s
->fields
, r1
);
2532 bool is_imm
= have_field(s
->fields
, i2
);
2533 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2536 c
.cond
= TCG_COND_NE
;
2541 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2542 c
.u
.s64
.a
= regs
[r1
];
2543 c
.u
.s64
.b
= tcg_const_i64(0);
2545 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2548 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
2550 int l
= get_field(s
->fields
, l1
);
2555 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
2556 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
2559 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
2560 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
2563 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
2564 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
2567 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
2568 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
2571 potential_page_fault(s
);
2572 vl
= tcg_const_i32(l
);
2573 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
2574 tcg_temp_free_i32(vl
);
2578 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2582 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2584 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2585 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2586 potential_page_fault(s
);
2587 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2588 tcg_temp_free_i32(r1
);
2589 tcg_temp_free_i32(r3
);
2594 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2596 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2597 TCGv_i32 t1
= tcg_temp_new_i32();
2598 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2599 potential_page_fault(s
);
2600 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2602 tcg_temp_free_i32(t1
);
2603 tcg_temp_free_i32(m3
);
2607 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2609 int r3
= get_field(s
->fields
, r3
);
2610 potential_page_fault(s
);
2611 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2616 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2618 int r3
= get_field(s
->fields
, r3
);
2619 potential_page_fault(s
);
2620 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2625 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2627 int r3
= get_field(s
->fields
, r3
);
2628 TCGv_i64 in3
= tcg_temp_new_i64();
2629 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2630 potential_page_fault(s
);
2631 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2632 tcg_temp_free_i64(in3
);
2637 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2639 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2640 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2641 potential_page_fault(s
);
2642 /* XXX rewrite in tcg */
2643 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2648 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2650 TCGv_i64 t1
= tcg_temp_new_i64();
2651 TCGv_i32 t2
= tcg_temp_new_i32();
2652 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2653 gen_helper_cvd(t1
, t2
);
2654 tcg_temp_free_i32(t2
);
2655 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2656 tcg_temp_free_i64(t1
);
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2665 check_privileged(s
);
2666 potential_page_fault(s
);
2668 /* We pretend the format is RX_a so that D2 is the field we want. */
2669 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2670 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2671 tcg_temp_free_i32(tmp
);
2676 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2678 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2679 return_low128(o
->out
);
2683 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2685 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2686 return_low128(o
->out
);
2690 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2692 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2693 return_low128(o
->out
);
2697 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2699 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2700 return_low128(o
->out
);
2704 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2706 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2710 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2712 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2713 tb->flags, (ab)use the tb->cs_base field as the address of
2714 the template in memory, and grab 8 bits of tb->flags/cflags for
2715 the contents of the register. We would then recognize all this
2716 in gen_intermediate_code_internal, generating code for exactly
2717 one instruction. This new TB then gets executed normally.
2719 On the other hand, this seems to be mostly used for modifying
2720 MVC inside of memcpy, which needs a helper call anyway. So
2721 perhaps this doesn't bear thinking about any further. */
2728 tmp
= tcg_const_i64(s
->next_pc
);
2729 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2730 tcg_temp_free_i64(tmp
);
2736 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2738 int m3
= get_field(s
->fields
, m3
);
2739 int pos
, len
, base
= s
->insn
->data
;
2740 TCGv_i64 tmp
= tcg_temp_new_i64();
2745 /* Effectively a 32-bit load. */
2746 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2753 /* Effectively a 16-bit load. */
2754 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2762 /* Effectively an 8-bit load. */
2763 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2768 pos
= base
+ ctz32(m3
) * 8;
2769 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2770 ccm
= ((1ull << len
) - 1) << pos
;
2774 /* This is going to be a sequence of loads and inserts. */
2775 pos
= base
+ 32 - 8;
2779 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2780 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2781 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2784 m3
= (m3
<< 1) & 0xf;
2790 tcg_gen_movi_i64(tmp
, ccm
);
2791 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2792 tcg_temp_free_i64(tmp
);
2796 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2798 int shift
= s
->insn
->data
& 0xff;
2799 int size
= s
->insn
->data
>> 8;
2800 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2804 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2806 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2810 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2812 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2816 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2818 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2822 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2824 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2828 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2830 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2834 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2836 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2840 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2842 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2846 #ifndef CONFIG_USER_ONLY
2847 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2849 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2850 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2851 check_privileged(s
);
2852 potential_page_fault(s
);
2853 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2854 tcg_temp_free_i32(r1
);
2855 tcg_temp_free_i32(r3
);
2859 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2861 check_privileged(s
);
2862 potential_page_fault(s
);
2863 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2868 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2872 check_privileged(s
);
2874 t1
= tcg_temp_new_i64();
2875 t2
= tcg_temp_new_i64();
2876 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2877 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2878 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2879 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2880 tcg_gen_shli_i64(t1
, t1
, 32);
2881 gen_helper_load_psw(cpu_env
, t1
, t2
);
2882 tcg_temp_free_i64(t1
);
2883 tcg_temp_free_i64(t2
);
2884 return EXIT_NORETURN
;
2888 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2890 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2891 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2892 potential_page_fault(s
);
2893 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2894 tcg_temp_free_i32(r1
);
2895 tcg_temp_free_i32(r3
);
2899 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2901 int r1
= get_field(s
->fields
, r1
);
2902 int r3
= get_field(s
->fields
, r3
);
2903 TCGv_i64 t
= tcg_temp_new_i64();
2904 TCGv_i64 t4
= tcg_const_i64(4);
2907 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2908 store_reg32_i64(r1
, t
);
2912 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2916 tcg_temp_free_i64(t
);
2917 tcg_temp_free_i64(t4
);
2921 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2923 int r1
= get_field(s
->fields
, r1
);
2924 int r3
= get_field(s
->fields
, r3
);
2925 TCGv_i64 t
= tcg_temp_new_i64();
2926 TCGv_i64 t4
= tcg_const_i64(4);
2929 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2930 store_reg32h_i64(r1
, t
);
2934 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2938 tcg_temp_free_i64(t
);
2939 tcg_temp_free_i64(t4
);
2943 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2945 int r1
= get_field(s
->fields
, r1
);
2946 int r3
= get_field(s
->fields
, r3
);
2947 TCGv_i64 t8
= tcg_const_i64(8);
2950 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2954 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2958 tcg_temp_free_i64(t8
);
2962 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2965 o
->g_out
= o
->g_in2
;
2966 TCGV_UNUSED_I64(o
->in2
);
2971 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2975 o
->g_out
= o
->g_in1
;
2976 o
->g_out2
= o
->g_in2
;
2977 TCGV_UNUSED_I64(o
->in1
);
2978 TCGV_UNUSED_I64(o
->in2
);
2979 o
->g_in1
= o
->g_in2
= false;
2983 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2985 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2986 potential_page_fault(s
);
2987 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2988 tcg_temp_free_i32(l
);
2992 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2994 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2995 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2996 potential_page_fault(s
);
2997 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2998 tcg_temp_free_i32(r1
);
2999 tcg_temp_free_i32(r2
);
3004 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3006 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3007 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3008 potential_page_fault(s
);
3009 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
3010 tcg_temp_free_i32(r1
);
3011 tcg_temp_free_i32(r3
);
3016 #ifndef CONFIG_USER_ONLY
3017 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3019 int r1
= get_field(s
->fields
, l1
);
3020 check_privileged(s
);
3021 potential_page_fault(s
);
3022 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3027 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3029 int r1
= get_field(s
->fields
, l1
);
3030 check_privileged(s
);
3031 potential_page_fault(s
);
3032 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3038 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3040 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3044 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3046 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3047 return_low128(o
->out2
);
3051 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3053 gen_helper_nabs_i64(o
->out
, o
->in2
);
3057 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3059 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3060 potential_page_fault(s
);
3061 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3062 tcg_temp_free_i32(l
);
3067 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3069 tcg_gen_neg_i64(o
->out
, o
->in2
);
3073 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3075 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3076 potential_page_fault(s
);
3077 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3078 tcg_temp_free_i32(l
);
3083 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3085 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3089 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3091 int shift
= s
->insn
->data
& 0xff;
3092 int size
= s
->insn
->data
>> 8;
3093 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3096 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3097 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3099 /* Produce the CC from only the bits manipulated. */
3100 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3101 set_cc_nz_u64(s
, cc_dst
);
3105 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3107 TCGv_i32 t1
= tcg_temp_new_i32();
3108 TCGv_i32 t2
= tcg_temp_new_i32();
3109 TCGv_i32 to
= tcg_temp_new_i32();
3110 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
3111 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
3112 tcg_gen_rotl_i32(to
, t1
, t2
);
3113 tcg_gen_extu_i32_i64(o
->out
, to
);
3114 tcg_temp_free_i32(t1
);
3115 tcg_temp_free_i32(t2
);
3116 tcg_temp_free_i32(to
);
3120 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3122 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3126 #ifndef CONFIG_USER_ONLY
3127 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3129 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3130 check_privileged(s
);
3131 potential_page_fault(s
);
3132 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3133 tcg_temp_free_i32(r1
);
3138 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3140 uint64_t sign
= 1ull << s
->insn
->data
;
3141 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3142 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3143 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3144 /* The arithmetic left shift is curious in that it does not affect
3145 the sign bit. Copy that over from the source unchanged. */
3146 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3147 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3148 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3152 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3154 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3158 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3160 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3164 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3166 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3170 #ifndef CONFIG_USER_ONLY
3171 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3173 check_privileged(s
);
3174 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3178 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3180 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3181 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3182 check_privileged(s
);
3183 potential_page_fault(s
);
3184 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3185 tcg_temp_free_i32(r1
);
3186 tcg_temp_free_i32(r3
);
3190 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3192 uint64_t i2
= get_field(s
->fields
, i2
);
3195 check_privileged(s
);
3197 /* It is important to do what the instruction name says: STORE THEN.
3198 If we let the output hook perform the store then if we fault and
3199 restart, we'll have the wrong SYSTEM MASK in place. */
3200 t
= tcg_temp_new_i64();
3201 tcg_gen_shri_i64(t
, psw_mask
, 56);
3202 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3203 tcg_temp_free_i64(t
);
3205 if (s
->fields
->op
== 0xac) {
3206 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3207 (i2
<< 56) | 0x00ffffffffffffffull
);
3209 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3215 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3217 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3221 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3223 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3227 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3229 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3233 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3235 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3239 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3241 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3242 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3243 potential_page_fault(s
);
3244 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3245 tcg_temp_free_i32(r1
);
3246 tcg_temp_free_i32(r3
);
3250 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3252 int m3
= get_field(s
->fields
, m3
);
3253 int pos
, base
= s
->insn
->data
;
3254 TCGv_i64 tmp
= tcg_temp_new_i64();
3256 pos
= base
+ ctz32(m3
) * 8;
3259 /* Effectively a 32-bit store. */
3260 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3261 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3267 /* Effectively a 16-bit store. */
3268 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3269 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3276 /* Effectively an 8-bit store. */
3277 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3278 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3282 /* This is going to be a sequence of shifts and stores. */
3283 pos
= base
+ 32 - 8;
3286 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3287 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3288 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3290 m3
= (m3
<< 1) & 0xf;
3295 tcg_temp_free_i64(tmp
);
3299 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3301 int r1
= get_field(s
->fields
, r1
);
3302 int r3
= get_field(s
->fields
, r3
);
3303 int size
= s
->insn
->data
;
3304 TCGv_i64 tsize
= tcg_const_i64(size
);
3308 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3310 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3315 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3319 tcg_temp_free_i64(tsize
);
3323 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3325 int r1
= get_field(s
->fields
, r1
);
3326 int r3
= get_field(s
->fields
, r3
);
3327 TCGv_i64 t
= tcg_temp_new_i64();
3328 TCGv_i64 t4
= tcg_const_i64(4);
3329 TCGv_i64 t32
= tcg_const_i64(32);
3332 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3333 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3337 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3341 tcg_temp_free_i64(t
);
3342 tcg_temp_free_i64(t4
);
3343 tcg_temp_free_i64(t32
);
3347 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3349 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3353 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3358 tcg_gen_not_i64(o
->in2
, o
->in2
);
3359 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3361 /* XXX possible optimization point */
3363 cc
= tcg_temp_new_i64();
3364 tcg_gen_extu_i32_i64(cc
, cc_op
);
3365 tcg_gen_shri_i64(cc
, cc
, 1);
3366 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3367 tcg_temp_free_i64(cc
);
3371 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3378 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3379 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3380 tcg_temp_free_i32(t
);
3382 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3383 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3384 tcg_temp_free_i32(t
);
3386 gen_exception(EXCP_SVC
);
3387 return EXIT_NORETURN
;
3390 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3392 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3393 potential_page_fault(s
);
3394 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3395 tcg_temp_free_i32(l
);
3400 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3402 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3403 potential_page_fault(s
);
3404 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3405 tcg_temp_free_i32(l
);
3409 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3411 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3412 potential_page_fault(s
);
3413 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3414 tcg_temp_free_i32(l
);
3419 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3421 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3425 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3427 int shift
= s
->insn
->data
& 0xff;
3428 int size
= s
->insn
->data
>> 8;
3429 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3432 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3433 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3435 /* Produce the CC from only the bits manipulated. */
3436 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3437 set_cc_nz_u64(s
, cc_dst
);
3441 /* ====================================================================== */
3442 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3443 the original inputs), update the various cc data structures in order to
3444 be able to compute the new condition code. */
3446 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3448 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3451 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3453 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3456 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3458 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3461 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3463 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3466 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3468 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3471 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3473 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3476 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3478 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3481 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3483 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3486 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3488 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3491 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3493 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3496 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3498 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3501 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3503 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3506 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3508 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3511 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3513 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3516 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3518 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3521 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3523 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3526 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3528 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3529 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3532 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3534 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3537 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3539 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3542 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3544 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3547 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3549 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3552 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3554 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3557 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3559 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3562 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3564 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3567 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3569 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3572 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3574 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3577 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3579 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3582 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3584 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3587 /* ====================================================================== */
3588 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3589 with the TCG register to which we will write. Used in combination with
3590 the "wout" generators, in some cases we need a new temporary, and in
3591 some cases we can write to a TCG global. */
3593 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3595 o
->out
= tcg_temp_new_i64();
3598 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3600 o
->out
= tcg_temp_new_i64();
3601 o
->out2
= tcg_temp_new_i64();
3604 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3606 o
->out
= regs
[get_field(f
, r1
)];
3610 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3612 /* ??? Specification exception: r1 must be even. */
3613 int r1
= get_field(f
, r1
);
3615 o
->out2
= regs
[(r1
+ 1) & 15];
3616 o
->g_out
= o
->g_out2
= true;
3619 /* ====================================================================== */
3620 /* The "Write OUTput" generators. These generally perform some non-trivial
3621 copy of data to TCG globals, or to main memory. The trivial cases are
3622 generally handled by having a "prep" generator install the TCG global
3623 as the destination of the operation. */
3625 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3627 store_reg(get_field(f
, r1
), o
->out
);
3630 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3632 int r1
= get_field(f
, r1
);
3633 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3636 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3638 store_reg32_i64(get_field(f
, r1
), o
->out
);
3641 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3643 /* ??? Specification exception: r1 must be even. */
3644 int r1
= get_field(f
, r1
);
3645 store_reg32_i64(r1
, o
->out
);
3646 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3649 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3651 /* ??? Specification exception: r1 must be even. */
3652 int r1
= get_field(f
, r1
);
3653 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3654 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3655 store_reg32_i64(r1
, o
->out
);
3658 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3660 store_freg32_i64(get_field(f
, r1
), o
->out
);
3663 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3665 store_freg(get_field(f
, r1
), o
->out
);
3668 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3670 int f1
= get_field(s
->fields
, r1
);
3671 store_freg(f1
, o
->out
);
3672 store_freg((f1
+ 2) & 15, o
->out2
);
3675 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3677 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3678 store_reg32_i64(get_field(f
, r1
), o
->out
);
3682 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3684 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3685 store_freg32_i64(get_field(f
, r1
), o
->out
);
3689 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3691 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3694 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3696 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3699 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3701 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3704 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3706 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3709 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3711 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3714 /* ====================================================================== */
3715 /* The "INput 1" generators. These load the first operand to an insn. */
3717 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3719 o
->in1
= load_reg(get_field(f
, r1
));
3722 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3724 o
->in1
= regs
[get_field(f
, r1
)];
3728 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3730 o
->in1
= tcg_temp_new_i64();
3731 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3734 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3736 o
->in1
= tcg_temp_new_i64();
3737 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3740 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3742 o
->in1
= tcg_temp_new_i64();
3743 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3746 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3748 /* ??? Specification exception: r1 must be even. */
3749 int r1
= get_field(f
, r1
);
3750 o
->in1
= load_reg((r1
+ 1) & 15);
3753 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3755 /* ??? Specification exception: r1 must be even. */
3756 int r1
= get_field(f
, r1
);
3757 o
->in1
= tcg_temp_new_i64();
3758 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3761 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3763 /* ??? Specification exception: r1 must be even. */
3764 int r1
= get_field(f
, r1
);
3765 o
->in1
= tcg_temp_new_i64();
3766 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3769 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3771 /* ??? Specification exception: r1 must be even. */
3772 int r1
= get_field(f
, r1
);
3773 o
->in1
= tcg_temp_new_i64();
3774 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3777 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3779 o
->in1
= load_reg(get_field(f
, r2
));
3782 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3784 o
->in1
= load_reg(get_field(f
, r3
));
3787 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3789 o
->in1
= regs
[get_field(f
, r3
)];
3793 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3795 o
->in1
= tcg_temp_new_i64();
3796 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3799 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3801 o
->in1
= tcg_temp_new_i64();
3802 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3805 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3807 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3810 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3812 o
->in1
= fregs
[get_field(f
, r1
)];
3816 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3818 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3821 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3824 o
->in1
= tcg_temp_new_i64();
3825 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3828 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 o
->in1
= tcg_temp_new_i64();
3832 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3835 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3838 o
->in1
= tcg_temp_new_i64();
3839 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3842 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3845 o
->in1
= tcg_temp_new_i64();
3846 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3849 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3852 o
->in1
= tcg_temp_new_i64();
3853 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3856 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3859 o
->in1
= tcg_temp_new_i64();
3860 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3863 /* ====================================================================== */
3864 /* The "INput 2" generators. These load the second operand to an insn. */
3866 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 o
->in2
= load_reg(get_field(f
, r2
));
3871 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3873 o
->in2
= regs
[get_field(f
, r2
)];
3877 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3879 int r2
= get_field(f
, r2
);
3881 o
->in2
= load_reg(r2
);
3885 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3887 o
->in2
= tcg_temp_new_i64();
3888 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3891 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3893 o
->in2
= tcg_temp_new_i64();
3894 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3897 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3899 o
->in2
= tcg_temp_new_i64();
3900 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3903 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3905 o
->in2
= tcg_temp_new_i64();
3906 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3909 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 o
->in2
= load_reg(get_field(f
, r3
));
3914 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3916 o
->in2
= tcg_temp_new_i64();
3917 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3920 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3922 o
->in2
= tcg_temp_new_i64();
3923 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3926 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3931 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3933 o
->in2
= fregs
[get_field(f
, r2
)];
3937 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3939 int f2
= get_field(f
, r2
);
3941 o
->in2
= fregs
[(f2
+ 2) & 15];
3942 o
->g_in1
= o
->g_in2
= true;
3945 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3948 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3951 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3953 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3956 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3958 help_l2_shift(s
, f
, o
, 31);
3961 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3963 help_l2_shift(s
, f
, o
, 63);
3966 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3969 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3972 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3975 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3978 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3981 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3984 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3987 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3990 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3993 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3996 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3999 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4002 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4005 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4008 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4011 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4014 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4017 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4020 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4022 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4025 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4027 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4030 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4032 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4035 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4037 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4040 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4042 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4043 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4046 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4048 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4049 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4052 /* ====================================================================== */
4054 /* Find opc within the table of insns. This is formulated as a switch
4055 statement so that (1) we get compile-time notice of cut-paste errors
4056 for duplicated opcodes, and (2) the compiler generates the binary
4057 search tree, rather than us having to post-process the table. */
4059 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4060 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4062 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4064 enum DisasInsnEnum
{
4065 #include "insn-data.def"
4069 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4074 .help_in1 = in1_##I1, \
4075 .help_in2 = in2_##I2, \
4076 .help_prep = prep_##P, \
4077 .help_wout = wout_##W, \
4078 .help_cout = cout_##CC, \
4079 .help_op = op_##OP, \
4083 /* Allow 0 to be used for NULL in the table below. */
4091 static const DisasInsn insn_info
[] = {
4092 #include "insn-data.def"
4096 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4097 case OPC: return &insn_info[insn_ ## NM];
4099 static const DisasInsn
*lookup_opc(uint16_t opc
)
4102 #include "insn-data.def"
4111 /* Extract a field from the insn. The INSN should be left-aligned in
4112 the uint64_t so that we can more easily utilize the big-bit-endian
4113 definitions we extract from the Principals of Operation. */
4115 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4123 /* Zero extract the field from the insn. */
4124 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4126 /* Sign-extend, or un-swap the field as necessary. */
4128 case 0: /* unsigned */
4130 case 1: /* signed */
4131 assert(f
->size
<= 32);
4132 m
= 1u << (f
->size
- 1);
4135 case 2: /* dl+dh split, signed 20 bit. */
4136 r
= ((int8_t)r
<< 12) | (r
>> 8);
4142 /* Validate that the "compressed" encoding we selected above is valid.
4143 I.e. we havn't make two different original fields overlap. */
4144 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4145 o
->presentC
|= 1 << f
->indexC
;
4146 o
->presentO
|= 1 << f
->indexO
;
4148 o
->c
[f
->indexC
] = r
;
4151 /* Lookup the insn at the current PC, extracting the operands into O and
4152 returning the info struct for the insn. Returns NULL for invalid insn. */
4154 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4157 uint64_t insn
, pc
= s
->pc
;
4159 const DisasInsn
*info
;
4161 insn
= ld_code2(env
, pc
);
4162 op
= (insn
>> 8) & 0xff;
4163 ilen
= get_ilen(op
);
4164 s
->next_pc
= s
->pc
+ ilen
;
4171 insn
= ld_code4(env
, pc
) << 32;
4174 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4180 /* We can't actually determine the insn format until we've looked up
4181 the full insn opcode. Which we can't do without locating the
4182 secondary opcode. Assume by default that OP2 is at bit 40; for
4183 those smaller insns that don't actually have a secondary opcode
4184 this will correctly result in OP2 = 0. */
4190 case 0xb2: /* S, RRF, RRE */
4191 case 0xb3: /* RRE, RRD, RRF */
4192 case 0xb9: /* RRE, RRF */
4193 case 0xe5: /* SSE, SIL */
4194 op2
= (insn
<< 8) >> 56;
4198 case 0xc0: /* RIL */
4199 case 0xc2: /* RIL */
4200 case 0xc4: /* RIL */
4201 case 0xc6: /* RIL */
4202 case 0xc8: /* SSF */
4203 case 0xcc: /* RIL */
4204 op2
= (insn
<< 12) >> 60;
4206 case 0xd0 ... 0xdf: /* SS */
4212 case 0xee ... 0xf3: /* SS */
4213 case 0xf8 ... 0xfd: /* SS */
4217 op2
= (insn
<< 40) >> 56;
4221 memset(f
, 0, sizeof(*f
));
4225 /* Lookup the instruction. */
4226 info
= lookup_opc(op
<< 8 | op2
);
4228 /* If we found it, extract the operands. */
4230 DisasFormat fmt
= info
->fmt
;
4233 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4234 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4240 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4242 const DisasInsn
*insn
;
4243 ExitStatus ret
= NO_EXIT
;
4247 insn
= extract_insn(env
, s
, &f
);
4249 /* If not found, try the old interpreter. This includes ILLOPC. */
4251 disas_s390_insn(env
, s
);
4252 switch (s
->is_jmp
) {
4260 ret
= EXIT_PC_UPDATED
;
4263 ret
= EXIT_NORETURN
;
4273 /* Set up the strutures we use to communicate with the helpers. */
4276 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4277 TCGV_UNUSED_I64(o
.out
);
4278 TCGV_UNUSED_I64(o
.out2
);
4279 TCGV_UNUSED_I64(o
.in1
);
4280 TCGV_UNUSED_I64(o
.in2
);
4281 TCGV_UNUSED_I64(o
.addr1
);
4283 /* Implement the instruction. */
4284 if (insn
->help_in1
) {
4285 insn
->help_in1(s
, &f
, &o
);
4287 if (insn
->help_in2
) {
4288 insn
->help_in2(s
, &f
, &o
);
4290 if (insn
->help_prep
) {
4291 insn
->help_prep(s
, &f
, &o
);
4293 if (insn
->help_op
) {
4294 ret
= insn
->help_op(s
, &o
);
4296 if (insn
->help_wout
) {
4297 insn
->help_wout(s
, &f
, &o
);
4299 if (insn
->help_cout
) {
4300 insn
->help_cout(s
, &o
);
4303 /* Free any temporaries created by the helpers. */
4304 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4305 tcg_temp_free_i64(o
.out
);
4307 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4308 tcg_temp_free_i64(o
.out2
);
4310 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4311 tcg_temp_free_i64(o
.in1
);
4313 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4314 tcg_temp_free_i64(o
.in2
);
4316 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4317 tcg_temp_free_i64(o
.addr1
);
4320 /* Advance to the next instruction. */
4325 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4326 TranslationBlock
*tb
,
4330 target_ulong pc_start
;
4331 uint64_t next_page_start
;
4332 uint16_t *gen_opc_end
;
4334 int num_insns
, max_insns
;
4342 if (!(tb
->flags
& FLAG_MASK_64
)) {
4343 pc_start
&= 0x7fffffff;
4348 dc
.cc_op
= CC_OP_DYNAMIC
;
4349 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4350 dc
.is_jmp
= DISAS_NEXT
;
4352 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4354 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4357 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4358 if (max_insns
== 0) {
4359 max_insns
= CF_COUNT_MASK
;
4366 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4370 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4373 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4374 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4375 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4376 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4378 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4382 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4383 tcg_gen_debug_insn_start(dc
.pc
);
4387 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4388 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4389 if (bp
->pc
== dc
.pc
) {
4390 status
= EXIT_PC_STALE
;
4396 if (status
== NO_EXIT
) {
4397 status
= translate_one(env
, &dc
);
4400 /* If we reach a page boundary, are single stepping,
4401 or exhaust instruction count, stop generation. */
4402 if (status
== NO_EXIT
4403 && (dc
.pc
>= next_page_start
4404 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4405 || num_insns
>= max_insns
4407 || env
->singlestep_enabled
)) {
4408 status
= EXIT_PC_STALE
;
4410 } while (status
== NO_EXIT
);
4412 if (tb
->cflags
& CF_LAST_IO
) {
4421 update_psw_addr(&dc
);
4423 case EXIT_PC_UPDATED
:
4424 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4425 gen_op_calc_cc(&dc
);
4427 /* Next TB starts off with CC_OP_DYNAMIC,
4428 so make sure the cc op type is in env */
4429 gen_op_set_cc_op(&dc
);
4432 gen_exception(EXCP_DEBUG
);
4434 /* Generate the return instruction */
4442 gen_icount_end(tb
, num_insns
);
4443 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4445 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4448 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4451 tb
->size
= dc
.pc
- pc_start
;
4452 tb
->icount
= num_insns
;
4455 #if defined(S390X_DEBUG_DISAS)
4456 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4457 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4458 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4464 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4466 gen_intermediate_code_internal(env
, tb
, 0);
4469 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4471 gen_intermediate_code_internal(env
, tb
, 1);
4474 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4477 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4478 cc_op
= gen_opc_cc_op
[pc_pos
];
4479 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {