4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_reg16(int reg
, TCGv_i32 v
)
283 /* 16 bit register writes keep the upper bytes */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
287 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
292 static inline void store_freg32(int reg
, TCGv_i32 v
)
294 /* 32 bit register writes keep the lower half */
295 #if HOST_LONG_BITS == 32
296 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
298 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
299 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
303 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
305 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
308 static inline void return_low128(TCGv_i64 dest
)
310 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
313 static inline void update_psw_addr(DisasContext
*s
)
316 tcg_gen_movi_i64(psw_addr
, s
->pc
);
319 static inline void potential_page_fault(DisasContext
*s
)
321 #ifndef CONFIG_USER_ONLY
327 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
329 return (uint64_t)cpu_lduw_code(env
, pc
);
332 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
334 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
337 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
339 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
342 static inline int get_mem_index(DisasContext
*s
)
344 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
345 case PSW_ASC_PRIMARY
>> 32:
347 case PSW_ASC_SECONDARY
>> 32:
349 case PSW_ASC_HOME
>> 32:
357 static void gen_exception(int excp
)
359 TCGv_i32 tmp
= tcg_const_i32(excp
);
360 gen_helper_exception(cpu_env
, tmp
);
361 tcg_temp_free_i32(tmp
);
364 static void gen_program_exception(DisasContext
*s
, int code
)
368 /* Remember what pgm exeption this was. */
369 tmp
= tcg_const_i32(code
);
370 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
371 tcg_temp_free_i32(tmp
);
373 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
374 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
375 tcg_temp_free_i32(tmp
);
377 /* Advance past instruction. */
384 /* Trigger exception. */
385 gen_exception(EXCP_PGM
);
388 s
->is_jmp
= DISAS_EXCP
;
391 static inline void gen_illegal_opcode(DisasContext
*s
)
393 gen_program_exception(s
, PGM_SPECIFICATION
);
396 static inline void check_privileged(DisasContext
*s
)
398 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
399 gen_program_exception(s
, PGM_PRIVILEGED
);
403 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
407 /* 31-bitify the immediate part; register contents are dealt with below */
408 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
414 tmp
= tcg_const_i64(d2
);
415 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
420 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
424 tmp
= tcg_const_i64(d2
);
425 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
430 tmp
= tcg_const_i64(d2
);
433 /* 31-bit mode mask if there are values loaded from registers */
434 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
435 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
441 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
443 s
->cc_op
= CC_OP_CONST0
+ val
;
446 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
448 tcg_gen_discard_i64(cc_src
);
449 tcg_gen_mov_i64(cc_dst
, dst
);
450 tcg_gen_discard_i64(cc_vr
);
454 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
456 tcg_gen_discard_i64(cc_src
);
457 tcg_gen_extu_i32_i64(cc_dst
, dst
);
458 tcg_gen_discard_i64(cc_vr
);
462 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
465 tcg_gen_mov_i64(cc_src
, src
);
466 tcg_gen_mov_i64(cc_dst
, dst
);
467 tcg_gen_discard_i64(cc_vr
);
471 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
474 tcg_gen_extu_i32_i64(cc_src
, src
);
475 tcg_gen_extu_i32_i64(cc_dst
, dst
);
476 tcg_gen_discard_i64(cc_vr
);
480 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
481 TCGv_i64 dst
, TCGv_i64 vr
)
483 tcg_gen_mov_i64(cc_src
, src
);
484 tcg_gen_mov_i64(cc_dst
, dst
);
485 tcg_gen_mov_i64(cc_vr
, vr
);
489 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
491 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
494 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
496 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
499 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
502 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
505 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
508 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
511 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
513 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
516 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
518 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
521 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
523 /* XXX optimize for the constant? put it in s? */
524 TCGv_i32 tmp
= tcg_const_i32(v2
);
525 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
526 tcg_temp_free_i32(tmp
);
529 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
531 TCGv_i32 tmp
= tcg_const_i32(v2
);
532 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
533 tcg_temp_free_i32(tmp
);
536 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
538 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
541 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
543 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
546 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
548 TCGv_i64 tmp
= tcg_const_i64(v2
);
550 tcg_temp_free_i64(tmp
);
553 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
555 TCGv_i64 tmp
= tcg_const_i64(v2
);
557 tcg_temp_free_i64(tmp
);
560 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
562 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
565 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
567 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
570 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
572 tcg_gen_extu_i32_i64(cc_src
, v1
);
573 tcg_gen_mov_i64(cc_dst
, v2
);
574 tcg_gen_discard_i64(cc_vr
);
575 s
->cc_op
= CC_OP_LTGT_F32
;
578 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
580 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
583 /* CC value is in env->cc_op */
584 static inline void set_cc_static(DisasContext
*s
)
586 tcg_gen_discard_i64(cc_src
);
587 tcg_gen_discard_i64(cc_dst
);
588 tcg_gen_discard_i64(cc_vr
);
589 s
->cc_op
= CC_OP_STATIC
;
592 static inline void gen_op_set_cc_op(DisasContext
*s
)
594 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
595 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
599 static inline void gen_update_cc_op(DisasContext
*s
)
604 /* calculates cc into cc_op */
605 static void gen_op_calc_cc(DisasContext
*s
)
607 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
608 TCGv_i64 dummy
= tcg_const_i64(0);
615 /* s->cc_op is the cc value */
616 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
619 /* env->cc_op already is the cc value */
633 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
638 case CC_OP_LTUGTU_32
:
639 case CC_OP_LTUGTU_64
:
647 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
662 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
665 /* unknown operation - assume 3 arguments and cc_op in env */
666 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
672 tcg_temp_free_i32(local_cc_op
);
673 tcg_temp_free_i64(dummy
);
675 /* We now have cc in cc_op as constant */
679 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
683 *r1
= (insn
>> 4) & 0xf;
687 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
688 int *x2
, int *b2
, int *d2
)
692 *r1
= (insn
>> 20) & 0xf;
693 *x2
= (insn
>> 16) & 0xf;
694 *b2
= (insn
>> 12) & 0xf;
697 return get_address(s
, *x2
, *b2
, *d2
);
700 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
705 *r1
= (insn
>> 20) & 0xf;
707 *r3
= (insn
>> 16) & 0xf;
708 *b2
= (insn
>> 12) & 0xf;
712 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
717 *i2
= (insn
>> 16) & 0xff;
718 *b1
= (insn
>> 12) & 0xf;
721 return get_address(s
, 0, *b1
, *d1
);
724 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
726 /* NOTE: we handle the case where the TB spans two pages here */
727 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
728 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
729 && !s
->singlestep_enabled
730 && !(s
->tb
->cflags
& CF_LAST_IO
));
733 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
737 if (use_goto_tb(s
, pc
)) {
738 tcg_gen_goto_tb(tb_num
);
739 tcg_gen_movi_i64(psw_addr
, pc
);
740 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
742 /* jump to another page: currently not optimized */
743 tcg_gen_movi_i64(psw_addr
, pc
);
748 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
750 #ifdef DEBUG_INLINE_BRANCHES
751 inline_branch_miss
[cc_op
]++;
755 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
757 #ifdef DEBUG_INLINE_BRANCHES
758 inline_branch_hit
[cc_op
]++;
762 /* Table of mask values to comparison codes, given a comparison as input.
763 For a true comparison CC=3 will never be set, but we treat this
764 conservatively for possible use when CC=3 indicates overflow. */
765 static const TCGCond ltgt_cond
[16] = {
766 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
767 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
768 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
769 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
770 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
771 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
772 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
773 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
776 /* Table of mask values to comparison codes, given a logic op as input.
777 For such, only CC=0 and CC=1 should be possible. */
778 static const TCGCond nz_cond
[16] = {
780 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
782 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
784 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
785 /* EQ | NE | x | x */
786 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
789 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
790 details required to generate a TCG comparison. */
791 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
794 enum cc_op old_cc_op
= s
->cc_op
;
796 if (mask
== 15 || mask
== 0) {
797 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
800 c
->g1
= c
->g2
= true;
805 /* Find the TCG condition for the mask + cc op. */
811 cond
= ltgt_cond
[mask
];
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
818 case CC_OP_LTUGTU_32
:
819 case CC_OP_LTUGTU_64
:
820 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
821 if (cond
== TCG_COND_NEVER
) {
824 account_inline_branch(s
, old_cc_op
);
828 cond
= nz_cond
[mask
];
829 if (cond
== TCG_COND_NEVER
) {
832 account_inline_branch(s
, old_cc_op
);
847 account_inline_branch(s
, old_cc_op
);
862 account_inline_branch(s
, old_cc_op
);
867 /* Calculate cc value. */
872 /* Jump based on CC. We'll load up the real cond below;
873 the assignment here merely avoids a compiler warning. */
874 account_noninline_branch(s
, old_cc_op
);
875 old_cc_op
= CC_OP_STATIC
;
876 cond
= TCG_COND_NEVER
;
880 /* Load up the arguments of the comparison. */
882 c
->g1
= c
->g2
= false;
886 c
->u
.s32
.a
= tcg_temp_new_i32();
887 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
888 c
->u
.s32
.b
= tcg_const_i32(0);
891 case CC_OP_LTUGTU_32
:
893 c
->u
.s32
.a
= tcg_temp_new_i32();
894 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
895 c
->u
.s32
.b
= tcg_temp_new_i32();
896 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
902 c
->u
.s64
.b
= tcg_const_i64(0);
906 case CC_OP_LTUGTU_64
:
909 c
->g1
= c
->g2
= true;
915 c
->u
.s64
.a
= tcg_temp_new_i64();
916 c
->u
.s64
.b
= tcg_const_i64(0);
917 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
925 case 0x8 | 0x4 | 0x2: /* cc != 3 */
927 c
->u
.s32
.b
= tcg_const_i32(3);
929 case 0x8 | 0x4 | 0x1: /* cc != 2 */
931 c
->u
.s32
.b
= tcg_const_i32(2);
933 case 0x8 | 0x2 | 0x1: /* cc != 1 */
935 c
->u
.s32
.b
= tcg_const_i32(1);
937 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
940 c
->u
.s32
.a
= tcg_temp_new_i32();
941 c
->u
.s32
.b
= tcg_const_i32(0);
942 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
944 case 0x8 | 0x4: /* cc < 2 */
946 c
->u
.s32
.b
= tcg_const_i32(2);
948 case 0x8: /* cc == 0 */
950 c
->u
.s32
.b
= tcg_const_i32(0);
952 case 0x4 | 0x2 | 0x1: /* cc != 0 */
954 c
->u
.s32
.b
= tcg_const_i32(0);
956 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
959 c
->u
.s32
.a
= tcg_temp_new_i32();
960 c
->u
.s32
.b
= tcg_const_i32(0);
961 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
963 case 0x4: /* cc == 1 */
965 c
->u
.s32
.b
= tcg_const_i32(1);
967 case 0x2 | 0x1: /* cc > 1 */
969 c
->u
.s32
.b
= tcg_const_i32(1);
971 case 0x2: /* cc == 2 */
973 c
->u
.s32
.b
= tcg_const_i32(2);
975 case 0x1: /* cc == 3 */
977 c
->u
.s32
.b
= tcg_const_i32(3);
980 /* CC is masked by something else: (8 >> cc) & mask. */
983 c
->u
.s32
.a
= tcg_const_i32(8);
984 c
->u
.s32
.b
= tcg_const_i32(0);
985 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
986 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
997 static void free_compare(DisasCompare
*c
)
1001 tcg_temp_free_i64(c
->u
.s64
.a
);
1003 tcg_temp_free_i32(c
->u
.s32
.a
);
1008 tcg_temp_free_i64(c
->u
.s64
.b
);
1010 tcg_temp_free_i32(c
->u
.s32
.b
);
1015 static void gen_op_clc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1021 /* check for simple 32bit or 64bit match */
1024 tmp
= tcg_temp_new_i64();
1025 tmp2
= tcg_temp_new_i64();
1027 tcg_gen_qemu_ld8u(tmp
, s1
, get_mem_index(s
));
1028 tcg_gen_qemu_ld8u(tmp2
, s2
, get_mem_index(s
));
1029 cmp_u64(s
, tmp
, tmp2
);
1031 tcg_temp_free_i64(tmp
);
1032 tcg_temp_free_i64(tmp2
);
1035 tmp
= tcg_temp_new_i64();
1036 tmp2
= tcg_temp_new_i64();
1038 tcg_gen_qemu_ld16u(tmp
, s1
, get_mem_index(s
));
1039 tcg_gen_qemu_ld16u(tmp2
, s2
, get_mem_index(s
));
1040 cmp_u64(s
, tmp
, tmp2
);
1042 tcg_temp_free_i64(tmp
);
1043 tcg_temp_free_i64(tmp2
);
1046 tmp
= tcg_temp_new_i64();
1047 tmp2
= tcg_temp_new_i64();
1049 tcg_gen_qemu_ld32u(tmp
, s1
, get_mem_index(s
));
1050 tcg_gen_qemu_ld32u(tmp2
, s2
, get_mem_index(s
));
1051 cmp_u64(s
, tmp
, tmp2
);
1053 tcg_temp_free_i64(tmp
);
1054 tcg_temp_free_i64(tmp2
);
1057 tmp
= tcg_temp_new_i64();
1058 tmp2
= tcg_temp_new_i64();
1060 tcg_gen_qemu_ld64(tmp
, s1
, get_mem_index(s
));
1061 tcg_gen_qemu_ld64(tmp2
, s2
, get_mem_index(s
));
1062 cmp_u64(s
, tmp
, tmp2
);
1064 tcg_temp_free_i64(tmp
);
1065 tcg_temp_free_i64(tmp2
);
1069 potential_page_fault(s
);
1070 vl
= tcg_const_i32(l
);
1071 gen_helper_clc(cc_op
, cpu_env
, vl
, s1
, s2
);
1072 tcg_temp_free_i32(vl
);
1076 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1077 int x2
, int b2
, int d2
)
1079 TCGv_i64 addr
, tmp2
;
1082 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1083 op
, r1
, x2
, b2
, d2
);
1084 addr
= get_address(s
, x2
, b2
, d2
);
1086 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1087 tmp2
= tcg_temp_new_i64();
1088 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1089 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1090 store_reg(r1
, tmp2
);
1091 tcg_temp_free_i64(tmp2
);
1093 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1094 tmp2
= tcg_temp_new_i64();
1095 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1096 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1097 store_reg(r1
, tmp2
);
1098 tcg_temp_free_i64(tmp2
);
1100 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1101 tmp2
= tcg_temp_new_i64();
1102 tmp32_1
= tcg_temp_new_i32();
1103 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1104 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1105 tcg_temp_free_i64(tmp2
);
1106 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1107 store_reg32(r1
, tmp32_1
);
1108 tcg_temp_free_i32(tmp32_1
);
1110 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1111 tmp2
= tcg_temp_new_i64();
1112 tmp32_1
= tcg_temp_new_i32();
1113 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1114 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1115 tcg_temp_free_i64(tmp2
);
1116 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1117 store_reg16(r1
, tmp32_1
);
1118 tcg_temp_free_i32(tmp32_1
);
1120 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1121 tmp32_1
= load_reg32(r1
);
1122 tmp2
= tcg_temp_new_i64();
1123 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1124 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1125 tcg_temp_free_i32(tmp32_1
);
1126 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1127 tcg_temp_free_i64(tmp2
);
1130 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1131 gen_illegal_opcode(s
);
1134 tcg_temp_free_i64(addr
);
1137 #ifndef CONFIG_USER_ONLY
1138 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1141 int op
= (insn
>> 32) & 0xff;
1143 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1144 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1146 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1148 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1149 /* Test Protection */
1150 potential_page_fault(s
);
1151 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1155 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1156 gen_illegal_opcode(s
);
1160 tcg_temp_free_i64(tmp
);
1161 tcg_temp_free_i64(tmp2
);
1165 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1166 int r3
, int b2
, int d2
)
1169 TCGv_i32 tmp32_1
, tmp32_2
;
1171 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1172 op
, r1
, r3
, b2
, d2
);
1174 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1175 tmp
= get_address(s
, 0, b2
, d2
);
1176 tmp32_1
= tcg_const_i32(r1
);
1177 tmp32_2
= tcg_const_i32(r3
);
1178 potential_page_fault(s
);
1179 gen_helper_stcmh(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1180 tcg_temp_free_i64(tmp
);
1181 tcg_temp_free_i32(tmp32_1
);
1182 tcg_temp_free_i32(tmp32_2
);
1184 #ifndef CONFIG_USER_ONLY
1185 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1187 check_privileged(s
);
1188 tmp
= get_address(s
, 0, b2
, d2
);
1189 tmp32_1
= tcg_const_i32(r1
);
1190 tmp32_2
= tcg_const_i32(r3
);
1191 potential_page_fault(s
);
1192 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1193 tcg_temp_free_i64(tmp
);
1194 tcg_temp_free_i32(tmp32_1
);
1195 tcg_temp_free_i32(tmp32_2
);
1197 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1199 check_privileged(s
);
1200 tmp
= get_address(s
, 0, b2
, d2
);
1201 tmp32_1
= tcg_const_i32(r1
);
1202 tmp32_2
= tcg_const_i32(r3
);
1203 potential_page_fault(s
);
1204 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1205 tcg_temp_free_i64(tmp
);
1206 tcg_temp_free_i32(tmp32_1
);
1207 tcg_temp_free_i32(tmp32_2
);
1210 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1211 tmp
= get_address(s
, 0, b2
, d2
);
1212 tmp32_1
= tcg_const_i32(r1
);
1213 tmp32_2
= tcg_const_i32(r3
);
1214 potential_page_fault(s
);
1215 /* XXX rewrite in tcg */
1216 gen_helper_csg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1218 tcg_temp_free_i64(tmp
);
1219 tcg_temp_free_i32(tmp32_1
);
1220 tcg_temp_free_i32(tmp32_2
);
1222 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1223 tmp
= get_address(s
, 0, b2
, d2
);
1224 tmp32_1
= tcg_const_i32(r1
);
1225 tmp32_2
= tcg_const_i32(r3
);
1226 potential_page_fault(s
);
1227 /* XXX rewrite in tcg */
1228 gen_helper_cdsg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1230 tcg_temp_free_i64(tmp
);
1231 tcg_temp_free_i32(tmp32_1
);
1232 tcg_temp_free_i32(tmp32_2
);
1235 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1236 gen_illegal_opcode(s
);
1241 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1242 int x2
, int b2
, int d2
, int r1b
)
1244 TCGv_i32 tmp_r1
, tmp32
;
1246 addr
= get_address(s
, x2
, b2
, d2
);
1247 tmp_r1
= tcg_const_i32(r1
);
1249 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1250 potential_page_fault(s
);
1251 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1253 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1254 potential_page_fault(s
);
1255 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1257 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1258 tmp
= tcg_temp_new_i64();
1259 tmp32
= load_freg32(r1
);
1260 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1261 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1262 tcg_temp_free_i64(tmp
);
1263 tcg_temp_free_i32(tmp32
);
1265 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1266 tmp
= tcg_temp_new_i64();
1267 tmp32
= tcg_temp_new_i32();
1268 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1269 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1270 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1271 tcg_temp_free_i64(tmp
);
1272 tcg_temp_free_i32(tmp32
);
1274 tmp32
= load_freg32(r1
);
1275 gen_set_cc_nz_f32(s
, tmp32
);
1276 tcg_temp_free_i32(tmp32
);
1278 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1279 tmp
= tcg_temp_new_i64();
1280 tmp32
= tcg_temp_new_i32();
1281 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1282 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1283 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1284 tcg_temp_free_i64(tmp
);
1285 tcg_temp_free_i32(tmp32
);
1287 tmp32
= load_freg32(r1
);
1288 gen_set_cc_nz_f32(s
, tmp32
);
1289 tcg_temp_free_i32(tmp32
);
1291 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1292 tmp
= tcg_temp_new_i64();
1293 tmp32
= tcg_temp_new_i32();
1294 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1295 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1296 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1297 tcg_temp_free_i64(tmp
);
1298 tcg_temp_free_i32(tmp32
);
1300 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1301 potential_page_fault(s
);
1302 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1305 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1306 potential_page_fault(s
);
1307 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1310 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1311 potential_page_fault(s
);
1312 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1315 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1316 tmp
= tcg_temp_new_i64();
1317 tmp32
= tcg_temp_new_i32();
1318 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1319 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1320 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1321 tcg_temp_free_i64(tmp
);
1322 tcg_temp_free_i32(tmp32
);
1324 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1325 potential_page_fault(s
);
1326 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1329 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1330 potential_page_fault(s
);
1331 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1334 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1335 potential_page_fault(s
);
1336 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1339 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1340 potential_page_fault(s
);
1341 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1343 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1344 potential_page_fault(s
);
1345 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1347 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1348 /* for RXF insns, r1 is R3 and r1b is R1 */
1349 tmp32
= tcg_const_i32(r1b
);
1350 potential_page_fault(s
);
1351 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1352 tcg_temp_free_i32(tmp32
);
1355 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1356 gen_illegal_opcode(s
);
1359 tcg_temp_free_i32(tmp_r1
);
1360 tcg_temp_free_i64(addr
);
1363 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1366 TCGv_i64 tmp
, tmp2
, tmp3
;
1367 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1369 #ifndef CONFIG_USER_ONLY
1373 r1
= (insn
>> 4) & 0xf;
1376 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1379 case 0x22: /* IPM R1 [RRE] */
1380 tmp32_1
= tcg_const_i32(r1
);
1382 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1383 tcg_temp_free_i32(tmp32_1
);
1385 case 0x41: /* CKSM R1,R2 [RRE] */
1386 tmp32_1
= tcg_const_i32(r1
);
1387 tmp32_2
= tcg_const_i32(r2
);
1388 potential_page_fault(s
);
1389 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1390 tcg_temp_free_i32(tmp32_1
);
1391 tcg_temp_free_i32(tmp32_2
);
1392 gen_op_movi_cc(s
, 0);
1394 case 0x4e: /* SAR R1,R2 [RRE] */
1395 tmp32_1
= load_reg32(r2
);
1396 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1397 tcg_temp_free_i32(tmp32_1
);
1399 case 0x4f: /* EAR R1,R2 [RRE] */
1400 tmp32_1
= tcg_temp_new_i32();
1401 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1402 store_reg32(r1
, tmp32_1
);
1403 tcg_temp_free_i32(tmp32_1
);
1405 case 0x54: /* MVPG R1,R2 [RRE] */
1407 tmp2
= load_reg(r1
);
1408 tmp3
= load_reg(r2
);
1409 potential_page_fault(s
);
1410 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1411 tcg_temp_free_i64(tmp
);
1412 tcg_temp_free_i64(tmp2
);
1413 tcg_temp_free_i64(tmp3
);
1414 /* XXX check CCO bit and set CC accordingly */
1415 gen_op_movi_cc(s
, 0);
1417 case 0x55: /* MVST R1,R2 [RRE] */
1418 tmp32_1
= load_reg32(0);
1419 tmp32_2
= tcg_const_i32(r1
);
1420 tmp32_3
= tcg_const_i32(r2
);
1421 potential_page_fault(s
);
1422 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1423 tcg_temp_free_i32(tmp32_1
);
1424 tcg_temp_free_i32(tmp32_2
);
1425 tcg_temp_free_i32(tmp32_3
);
1426 gen_op_movi_cc(s
, 1);
1428 case 0x5d: /* CLST R1,R2 [RRE] */
1429 tmp32_1
= load_reg32(0);
1430 tmp32_2
= tcg_const_i32(r1
);
1431 tmp32_3
= tcg_const_i32(r2
);
1432 potential_page_fault(s
);
1433 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1435 tcg_temp_free_i32(tmp32_1
);
1436 tcg_temp_free_i32(tmp32_2
);
1437 tcg_temp_free_i32(tmp32_3
);
1439 case 0x5e: /* SRST R1,R2 [RRE] */
1440 tmp32_1
= load_reg32(0);
1441 tmp32_2
= tcg_const_i32(r1
);
1442 tmp32_3
= tcg_const_i32(r2
);
1443 potential_page_fault(s
);
1444 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1446 tcg_temp_free_i32(tmp32_1
);
1447 tcg_temp_free_i32(tmp32_2
);
1448 tcg_temp_free_i32(tmp32_3
);
1451 #ifndef CONFIG_USER_ONLY
1452 case 0x02: /* STIDP D2(B2) [S] */
1454 check_privileged(s
);
1455 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1456 tmp
= get_address(s
, 0, b2
, d2
);
1457 potential_page_fault(s
);
1458 gen_helper_stidp(cpu_env
, tmp
);
1459 tcg_temp_free_i64(tmp
);
1461 case 0x04: /* SCK D2(B2) [S] */
1463 check_privileged(s
);
1464 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1465 tmp
= get_address(s
, 0, b2
, d2
);
1466 potential_page_fault(s
);
1467 gen_helper_sck(cc_op
, tmp
);
1469 tcg_temp_free_i64(tmp
);
1471 case 0x05: /* STCK D2(B2) [S] */
1473 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1474 tmp
= get_address(s
, 0, b2
, d2
);
1475 potential_page_fault(s
);
1476 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1478 tcg_temp_free_i64(tmp
);
1480 case 0x06: /* SCKC D2(B2) [S] */
1481 /* Set Clock Comparator */
1482 check_privileged(s
);
1483 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1484 tmp
= get_address(s
, 0, b2
, d2
);
1485 potential_page_fault(s
);
1486 gen_helper_sckc(cpu_env
, tmp
);
1487 tcg_temp_free_i64(tmp
);
1489 case 0x07: /* STCKC D2(B2) [S] */
1490 /* Store Clock Comparator */
1491 check_privileged(s
);
1492 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1493 tmp
= get_address(s
, 0, b2
, d2
);
1494 potential_page_fault(s
);
1495 gen_helper_stckc(cpu_env
, tmp
);
1496 tcg_temp_free_i64(tmp
);
1498 case 0x08: /* SPT D2(B2) [S] */
1500 check_privileged(s
);
1501 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1502 tmp
= get_address(s
, 0, b2
, d2
);
1503 potential_page_fault(s
);
1504 gen_helper_spt(cpu_env
, tmp
);
1505 tcg_temp_free_i64(tmp
);
1507 case 0x09: /* STPT D2(B2) [S] */
1508 /* Store CPU Timer */
1509 check_privileged(s
);
1510 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1511 tmp
= get_address(s
, 0, b2
, d2
);
1512 potential_page_fault(s
);
1513 gen_helper_stpt(cpu_env
, tmp
);
1514 tcg_temp_free_i64(tmp
);
1516 case 0x0a: /* SPKA D2(B2) [S] */
1517 /* Set PSW Key from Address */
1518 check_privileged(s
);
1519 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1520 tmp
= get_address(s
, 0, b2
, d2
);
1521 tmp2
= tcg_temp_new_i64();
1522 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1523 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1524 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1525 tcg_temp_free_i64(tmp2
);
1526 tcg_temp_free_i64(tmp
);
1528 case 0x0d: /* PTLB [S] */
1530 check_privileged(s
);
1531 gen_helper_ptlb(cpu_env
);
1533 case 0x10: /* SPX D2(B2) [S] */
1534 /* Set Prefix Register */
1535 check_privileged(s
);
1536 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1537 tmp
= get_address(s
, 0, b2
, d2
);
1538 potential_page_fault(s
);
1539 gen_helper_spx(cpu_env
, tmp
);
1540 tcg_temp_free_i64(tmp
);
1542 case 0x11: /* STPX D2(B2) [S] */
1544 check_privileged(s
);
1545 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1546 tmp
= get_address(s
, 0, b2
, d2
);
1547 tmp2
= tcg_temp_new_i64();
1548 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1549 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1550 tcg_temp_free_i64(tmp
);
1551 tcg_temp_free_i64(tmp2
);
1553 case 0x12: /* STAP D2(B2) [S] */
1554 /* Store CPU Address */
1555 check_privileged(s
);
1556 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1557 tmp
= get_address(s
, 0, b2
, d2
);
1558 tmp2
= tcg_temp_new_i64();
1559 tmp32_1
= tcg_temp_new_i32();
1560 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1561 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1562 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1563 tcg_temp_free_i64(tmp
);
1564 tcg_temp_free_i64(tmp2
);
1565 tcg_temp_free_i32(tmp32_1
);
1567 case 0x21: /* IPTE R1,R2 [RRE] */
1568 /* Invalidate PTE */
1569 check_privileged(s
);
1570 r1
= (insn
>> 4) & 0xf;
1573 tmp2
= load_reg(r2
);
1574 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1575 tcg_temp_free_i64(tmp
);
1576 tcg_temp_free_i64(tmp2
);
1578 case 0x29: /* ISKE R1,R2 [RRE] */
1579 /* Insert Storage Key Extended */
1580 check_privileged(s
);
1581 r1
= (insn
>> 4) & 0xf;
1584 tmp2
= tcg_temp_new_i64();
1585 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1586 store_reg(r1
, tmp2
);
1587 tcg_temp_free_i64(tmp
);
1588 tcg_temp_free_i64(tmp2
);
1590 case 0x2a: /* RRBE R1,R2 [RRE] */
1591 /* Set Storage Key Extended */
1592 check_privileged(s
);
1593 r1
= (insn
>> 4) & 0xf;
1595 tmp32_1
= load_reg32(r1
);
1597 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1599 tcg_temp_free_i32(tmp32_1
);
1600 tcg_temp_free_i64(tmp
);
1602 case 0x2b: /* SSKE R1,R2 [RRE] */
1603 /* Set Storage Key Extended */
1604 check_privileged(s
);
1605 r1
= (insn
>> 4) & 0xf;
1607 tmp32_1
= load_reg32(r1
);
1609 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1610 tcg_temp_free_i32(tmp32_1
);
1611 tcg_temp_free_i64(tmp
);
1613 case 0x34: /* STCH ? */
1614 /* Store Subchannel */
1615 check_privileged(s
);
1616 gen_op_movi_cc(s
, 3);
1618 case 0x46: /* STURA R1,R2 [RRE] */
1619 /* Store Using Real Address */
1620 check_privileged(s
);
1621 r1
= (insn
>> 4) & 0xf;
1623 tmp32_1
= load_reg32(r1
);
1625 potential_page_fault(s
);
1626 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1627 tcg_temp_free_i32(tmp32_1
);
1628 tcg_temp_free_i64(tmp
);
1630 case 0x50: /* CSP R1,R2 [RRE] */
1631 /* Compare And Swap And Purge */
1632 check_privileged(s
);
1633 r1
= (insn
>> 4) & 0xf;
1635 tmp32_1
= tcg_const_i32(r1
);
1636 tmp32_2
= tcg_const_i32(r2
);
1637 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1639 tcg_temp_free_i32(tmp32_1
);
1640 tcg_temp_free_i32(tmp32_2
);
1642 case 0x5f: /* CHSC ? */
1643 /* Channel Subsystem Call */
1644 check_privileged(s
);
1645 gen_op_movi_cc(s
, 3);
1647 case 0x78: /* STCKE D2(B2) [S] */
1648 /* Store Clock Extended */
1649 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1650 tmp
= get_address(s
, 0, b2
, d2
);
1651 potential_page_fault(s
);
1652 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1654 tcg_temp_free_i64(tmp
);
1656 case 0x79: /* SACF D2(B2) [S] */
1657 /* Set Address Space Control Fast */
1658 check_privileged(s
);
1659 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1660 tmp
= get_address(s
, 0, b2
, d2
);
1661 potential_page_fault(s
);
1662 gen_helper_sacf(cpu_env
, tmp
);
1663 tcg_temp_free_i64(tmp
);
1664 /* addressing mode has changed, so end the block */
1667 s
->is_jmp
= DISAS_JUMP
;
1669 case 0x7d: /* STSI D2,(B2) [S] */
1670 check_privileged(s
);
1671 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1672 tmp
= get_address(s
, 0, b2
, d2
);
1673 tmp32_1
= load_reg32(0);
1674 tmp32_2
= load_reg32(1);
1675 potential_page_fault(s
);
1676 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1678 tcg_temp_free_i64(tmp
);
1679 tcg_temp_free_i32(tmp32_1
);
1680 tcg_temp_free_i32(tmp32_2
);
1682 case 0x9d: /* LFPC D2(B2) [S] */
1683 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1684 tmp
= get_address(s
, 0, b2
, d2
);
1685 tmp2
= tcg_temp_new_i64();
1686 tmp32_1
= tcg_temp_new_i32();
1687 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1688 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1689 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1690 tcg_temp_free_i64(tmp
);
1691 tcg_temp_free_i64(tmp2
);
1692 tcg_temp_free_i32(tmp32_1
);
1694 case 0xb1: /* STFL D2(B2) [S] */
1695 /* Store Facility List (CPU features) at 200 */
1696 check_privileged(s
);
1697 tmp2
= tcg_const_i64(0xc0000000);
1698 tmp
= tcg_const_i64(200);
1699 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1700 tcg_temp_free_i64(tmp2
);
1701 tcg_temp_free_i64(tmp
);
1703 case 0xb2: /* LPSWE D2(B2) [S] */
1704 /* Load PSW Extended */
1705 check_privileged(s
);
1706 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1707 tmp
= get_address(s
, 0, b2
, d2
);
1708 tmp2
= tcg_temp_new_i64();
1709 tmp3
= tcg_temp_new_i64();
1710 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1711 tcg_gen_addi_i64(tmp
, tmp
, 8);
1712 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1713 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1714 /* we need to keep cc_op intact */
1715 s
->is_jmp
= DISAS_JUMP
;
1716 tcg_temp_free_i64(tmp
);
1717 tcg_temp_free_i64(tmp2
);
1718 tcg_temp_free_i64(tmp3
);
1720 case 0x20: /* SERVC R1,R2 [RRE] */
1721 /* SCLP Service call (PV hypercall) */
1722 check_privileged(s
);
1723 potential_page_fault(s
);
1724 tmp32_1
= load_reg32(r2
);
1726 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1728 tcg_temp_free_i32(tmp32_1
);
1729 tcg_temp_free_i64(tmp
);
1733 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1734 gen_illegal_opcode(s
);
1739 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
1743 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1744 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
1745 #define FP_HELPER(i) \
1746 tmp32_1 = tcg_const_i32(r1); \
1747 tmp32_2 = tcg_const_i32(r2); \
1748 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
1749 tcg_temp_free_i32(tmp32_1); \
1750 tcg_temp_free_i32(tmp32_2);
1752 #define FP_HELPER_CC(i) \
1753 tmp32_1 = tcg_const_i32(r1); \
1754 tmp32_2 = tcg_const_i32(r2); \
1755 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
1757 tcg_temp_free_i32(tmp32_1); \
1758 tcg_temp_free_i32(tmp32_2);
1761 case 0x0: /* LPEBR R1,R2 [RRE] */
1762 FP_HELPER_CC(lpebr
);
1764 case 0x2: /* LTEBR R1,R2 [RRE] */
1765 FP_HELPER_CC(ltebr
);
1767 case 0x3: /* LCEBR R1,R2 [RRE] */
1768 FP_HELPER_CC(lcebr
);
1770 case 0x4: /* LDEBR R1,R2 [RRE] */
1773 case 0x5: /* LXDBR R1,R2 [RRE] */
1776 case 0x9: /* CEBR R1,R2 [RRE] */
1779 case 0xa: /* AEBR R1,R2 [RRE] */
1782 case 0xb: /* SEBR R1,R2 [RRE] */
1785 case 0xd: /* DEBR R1,R2 [RRE] */
1788 case 0x10: /* LPDBR R1,R2 [RRE] */
1789 FP_HELPER_CC(lpdbr
);
1791 case 0x12: /* LTDBR R1,R2 [RRE] */
1792 FP_HELPER_CC(ltdbr
);
1794 case 0x13: /* LCDBR R1,R2 [RRE] */
1795 FP_HELPER_CC(lcdbr
);
1797 case 0x15: /* SQBDR R1,R2 [RRE] */
1800 case 0x17: /* MEEBR R1,R2 [RRE] */
1803 case 0x19: /* CDBR R1,R2 [RRE] */
1806 case 0x1a: /* ADBR R1,R2 [RRE] */
1809 case 0x1b: /* SDBR R1,R2 [RRE] */
1812 case 0x1c: /* MDBR R1,R2 [RRE] */
1815 case 0x1d: /* DDBR R1,R2 [RRE] */
1818 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
1819 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
1820 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
1821 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
1822 tmp32_1
= tcg_const_i32(m3
);
1823 tmp32_2
= tcg_const_i32(r2
);
1824 tmp32_3
= tcg_const_i32(r1
);
1827 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1830 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1833 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
1838 tcg_temp_free_i32(tmp32_1
);
1839 tcg_temp_free_i32(tmp32_2
);
1840 tcg_temp_free_i32(tmp32_3
);
1842 case 0x40: /* LPXBR R1,R2 [RRE] */
1843 FP_HELPER_CC(lpxbr
);
1845 case 0x42: /* LTXBR R1,R2 [RRE] */
1846 FP_HELPER_CC(ltxbr
);
1848 case 0x43: /* LCXBR R1,R2 [RRE] */
1849 FP_HELPER_CC(lcxbr
);
1851 case 0x44: /* LEDBR R1,R2 [RRE] */
1854 case 0x45: /* LDXBR R1,R2 [RRE] */
1857 case 0x46: /* LEXBR R1,R2 [RRE] */
1860 case 0x49: /* CXBR R1,R2 [RRE] */
1863 case 0x4a: /* AXBR R1,R2 [RRE] */
1866 case 0x4b: /* SXBR R1,R2 [RRE] */
1869 case 0x4c: /* MXBR R1,R2 [RRE] */
1872 case 0x4d: /* DXBR R1,R2 [RRE] */
1875 case 0x65: /* LXR R1,R2 [RRE] */
1876 tmp
= load_freg(r2
);
1877 store_freg(r1
, tmp
);
1878 tcg_temp_free_i64(tmp
);
1879 tmp
= load_freg(r2
+ 2);
1880 store_freg(r1
+ 2, tmp
);
1881 tcg_temp_free_i64(tmp
);
1883 case 0x74: /* LZER R1 [RRE] */
1884 tmp32_1
= tcg_const_i32(r1
);
1885 gen_helper_lzer(cpu_env
, tmp32_1
);
1886 tcg_temp_free_i32(tmp32_1
);
1888 case 0x75: /* LZDR R1 [RRE] */
1889 tmp32_1
= tcg_const_i32(r1
);
1890 gen_helper_lzdr(cpu_env
, tmp32_1
);
1891 tcg_temp_free_i32(tmp32_1
);
1893 case 0x76: /* LZXR R1 [RRE] */
1894 tmp32_1
= tcg_const_i32(r1
);
1895 gen_helper_lzxr(cpu_env
, tmp32_1
);
1896 tcg_temp_free_i32(tmp32_1
);
1898 case 0x84: /* SFPC R1 [RRE] */
1899 tmp32_1
= load_reg32(r1
);
1900 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1901 tcg_temp_free_i32(tmp32_1
);
1903 case 0x8c: /* EFPC R1 [RRE] */
1904 tmp32_1
= tcg_temp_new_i32();
1905 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1906 store_reg32(r1
, tmp32_1
);
1907 tcg_temp_free_i32(tmp32_1
);
1909 case 0x94: /* CEFBR R1,R2 [RRE] */
1910 case 0x95: /* CDFBR R1,R2 [RRE] */
1911 case 0x96: /* CXFBR R1,R2 [RRE] */
1912 tmp32_1
= tcg_const_i32(r1
);
1913 tmp32_2
= load_reg32(r2
);
1916 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
1919 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
1922 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
1927 tcg_temp_free_i32(tmp32_1
);
1928 tcg_temp_free_i32(tmp32_2
);
1930 case 0x98: /* CFEBR R1,R2 [RRE] */
1931 case 0x99: /* CFDBR R1,R2 [RRE] */
1932 case 0x9a: /* CFXBR R1,R2 [RRE] */
1933 tmp32_1
= tcg_const_i32(r1
);
1934 tmp32_2
= tcg_const_i32(r2
);
1935 tmp32_3
= tcg_const_i32(m3
);
1938 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1941 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1944 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1950 tcg_temp_free_i32(tmp32_1
);
1951 tcg_temp_free_i32(tmp32_2
);
1952 tcg_temp_free_i32(tmp32_3
);
1954 case 0xa4: /* CEGBR R1,R2 [RRE] */
1955 case 0xa5: /* CDGBR R1,R2 [RRE] */
1956 tmp32_1
= tcg_const_i32(r1
);
1960 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
1963 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
1968 tcg_temp_free_i32(tmp32_1
);
1969 tcg_temp_free_i64(tmp
);
1971 case 0xa6: /* CXGBR R1,R2 [RRE] */
1972 tmp32_1
= tcg_const_i32(r1
);
1974 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
1975 tcg_temp_free_i32(tmp32_1
);
1976 tcg_temp_free_i64(tmp
);
1978 case 0xa8: /* CGEBR R1,R2 [RRE] */
1979 tmp32_1
= tcg_const_i32(r1
);
1980 tmp32_2
= tcg_const_i32(r2
);
1981 tmp32_3
= tcg_const_i32(m3
);
1982 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1984 tcg_temp_free_i32(tmp32_1
);
1985 tcg_temp_free_i32(tmp32_2
);
1986 tcg_temp_free_i32(tmp32_3
);
1988 case 0xa9: /* CGDBR R1,R2 [RRE] */
1989 tmp32_1
= tcg_const_i32(r1
);
1990 tmp32_2
= tcg_const_i32(r2
);
1991 tmp32_3
= tcg_const_i32(m3
);
1992 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1994 tcg_temp_free_i32(tmp32_1
);
1995 tcg_temp_free_i32(tmp32_2
);
1996 tcg_temp_free_i32(tmp32_3
);
1998 case 0xaa: /* CGXBR R1,R2 [RRE] */
1999 tmp32_1
= tcg_const_i32(r1
);
2000 tmp32_2
= tcg_const_i32(r2
);
2001 tmp32_3
= tcg_const_i32(m3
);
2002 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2004 tcg_temp_free_i32(tmp32_1
);
2005 tcg_temp_free_i32(tmp32_2
);
2006 tcg_temp_free_i32(tmp32_3
);
2009 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
2010 gen_illegal_opcode(s
);
2018 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
2024 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
2026 case 0x17: /* LLGTR R1,R2 [RRE] */
2027 tmp32_1
= load_reg32(r2
);
2028 tmp
= tcg_temp_new_i64();
2029 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
2030 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2032 tcg_temp_free_i32(tmp32_1
);
2033 tcg_temp_free_i64(tmp
);
2035 case 0x0f: /* LRVGR R1,R2 [RRE] */
2036 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
2038 case 0x1f: /* LRVR R1,R2 [RRE] */
2039 tmp32_1
= load_reg32(r2
);
2040 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
2041 store_reg32(r1
, tmp32_1
);
2042 tcg_temp_free_i32(tmp32_1
);
2044 case 0x83: /* FLOGR R1,R2 [RRE] */
2046 tmp32_1
= tcg_const_i32(r1
);
2047 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
2049 tcg_temp_free_i64(tmp
);
2050 tcg_temp_free_i32(tmp32_1
);
2053 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
2054 gen_illegal_opcode(s
);
2059 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
2062 TCGv_i32 tmp32_1
, tmp32_2
;
2065 int op
, r1
, r2
, r3
, d1
, d2
, x2
, b1
, b2
, r1b
;
2068 opc
= cpu_ldub_code(env
, s
->pc
);
2069 LOG_DISAS("opc 0x%x\n", opc
);
2072 #ifndef CONFIG_USER_ONLY
2073 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2074 check_privileged(s
);
2075 insn
= ld_code4(env
, s
->pc
);
2076 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2077 tmp
= get_address(s
, 0, b2
, d2
);
2078 tmp2
= load_reg(r3
);
2079 tmp32_1
= tcg_const_i32(r1
);
2080 potential_page_fault(s
);
2081 gen_helper_sigp(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp2
);
2083 tcg_temp_free_i64(tmp
);
2084 tcg_temp_free_i64(tmp2
);
2085 tcg_temp_free_i32(tmp32_1
);
2087 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2088 check_privileged(s
);
2089 insn
= ld_code4(env
, s
->pc
);
2090 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2091 tmp32_1
= tcg_const_i32(r1
);
2092 potential_page_fault(s
);
2093 gen_helper_lra(cc_op
, cpu_env
, tmp
, tmp32_1
);
2095 tcg_temp_free_i64(tmp
);
2096 tcg_temp_free_i32(tmp32_1
);
2100 insn
= ld_code4(env
, s
->pc
);
2101 op
= (insn
>> 16) & 0xff;
2103 case 0x9c: /* STFPC D2(B2) [S] */
2105 b2
= (insn
>> 12) & 0xf;
2106 tmp32_1
= tcg_temp_new_i32();
2107 tmp
= tcg_temp_new_i64();
2108 tmp2
= get_address(s
, 0, b2
, d2
);
2109 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2110 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2111 tcg_gen_qemu_st32(tmp
, tmp2
, get_mem_index(s
));
2112 tcg_temp_free_i32(tmp32_1
);
2113 tcg_temp_free_i64(tmp
);
2114 tcg_temp_free_i64(tmp2
);
2117 disas_b2(env
, s
, op
, insn
);
2122 insn
= ld_code4(env
, s
->pc
);
2123 op
= (insn
>> 16) & 0xff;
2124 r3
= (insn
>> 12) & 0xf; /* aka m3 */
2125 r1
= (insn
>> 4) & 0xf;
2127 disas_b3(env
, s
, op
, r3
, r1
, r2
);
2129 #ifndef CONFIG_USER_ONLY
2130 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2132 check_privileged(s
);
2133 insn
= ld_code4(env
, s
->pc
);
2134 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2135 tmp
= get_address(s
, 0, b2
, d2
);
2136 tmp32_1
= tcg_const_i32(r1
);
2137 tmp32_2
= tcg_const_i32(r3
);
2138 potential_page_fault(s
);
2139 gen_helper_stctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2140 tcg_temp_free_i64(tmp
);
2141 tcg_temp_free_i32(tmp32_1
);
2142 tcg_temp_free_i32(tmp32_2
);
2144 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2146 check_privileged(s
);
2147 insn
= ld_code4(env
, s
->pc
);
2148 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2149 tmp
= get_address(s
, 0, b2
, d2
);
2150 tmp32_1
= tcg_const_i32(r1
);
2151 tmp32_2
= tcg_const_i32(r3
);
2152 potential_page_fault(s
);
2153 gen_helper_lctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2154 tcg_temp_free_i64(tmp
);
2155 tcg_temp_free_i32(tmp32_1
);
2156 tcg_temp_free_i32(tmp32_2
);
2160 insn
= ld_code4(env
, s
->pc
);
2161 r1
= (insn
>> 4) & 0xf;
2163 op
= (insn
>> 16) & 0xff;
2164 disas_b9(env
, s
, op
, r1
, r2
);
2166 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2167 insn
= ld_code4(env
, s
->pc
);
2168 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2169 tmp
= get_address(s
, 0, b2
, d2
);
2170 tmp32_1
= tcg_const_i32(r1
);
2171 tmp32_2
= tcg_const_i32(r3
);
2172 potential_page_fault(s
);
2173 gen_helper_cs(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2175 tcg_temp_free_i64(tmp
);
2176 tcg_temp_free_i32(tmp32_1
);
2177 tcg_temp_free_i32(tmp32_2
);
2179 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2180 insn
= ld_code4(env
, s
->pc
);
2181 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2182 tmp
= get_address(s
, 0, b2
, d2
);
2183 tmp32_1
= load_reg32(r1
);
2184 tmp32_2
= tcg_const_i32(r3
);
2185 potential_page_fault(s
);
2186 gen_helper_clm(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2188 tcg_temp_free_i64(tmp
);
2189 tcg_temp_free_i32(tmp32_1
);
2190 tcg_temp_free_i32(tmp32_2
);
2192 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2193 insn
= ld_code4(env
, s
->pc
);
2194 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2195 tmp
= get_address(s
, 0, b2
, d2
);
2196 tmp32_1
= load_reg32(r1
);
2197 tmp32_2
= tcg_const_i32(r3
);
2198 potential_page_fault(s
);
2199 gen_helper_stcm(cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2200 tcg_temp_free_i64(tmp
);
2201 tcg_temp_free_i32(tmp32_1
);
2202 tcg_temp_free_i32(tmp32_2
);
2204 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2205 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2206 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2207 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2208 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2209 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2210 insn
= ld_code6(env
, s
->pc
);
2211 vl
= tcg_const_i32((insn
>> 32) & 0xff);
2212 b1
= (insn
>> 28) & 0xf;
2213 b2
= (insn
>> 12) & 0xf;
2214 d1
= (insn
>> 16) & 0xfff;
2216 tmp
= get_address(s
, 0, b1
, d1
);
2217 tmp2
= get_address(s
, 0, b2
, d2
);
2220 potential_page_fault(s
);
2221 gen_helper_nc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2225 gen_op_clc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
2228 potential_page_fault(s
);
2229 gen_helper_oc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2233 potential_page_fault(s
);
2234 gen_helper_xc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
2238 potential_page_fault(s
);
2239 gen_helper_tr(cpu_env
, vl
, tmp
, tmp2
);
2243 potential_page_fault(s
);
2244 gen_helper_unpk(cpu_env
, vl
, tmp
, tmp2
);
2249 tcg_temp_free_i64(tmp
);
2250 tcg_temp_free_i64(tmp2
);
2252 #ifndef CONFIG_USER_ONLY
2253 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
2254 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
2255 check_privileged(s
);
2256 potential_page_fault(s
);
2257 insn
= ld_code6(env
, s
->pc
);
2258 r1
= (insn
>> 36) & 0xf;
2259 r3
= (insn
>> 32) & 0xf;
2260 b1
= (insn
>> 28) & 0xf;
2261 d1
= (insn
>> 16) & 0xfff;
2262 b2
= (insn
>> 12) & 0xf;
2265 tmp
= get_address(s
, 0, b1
, d1
);
2266 tmp2
= get_address(s
, 0, b2
, d2
);
2268 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], tmp
, tmp2
);
2270 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], tmp
, tmp2
);
2273 tcg_temp_free_i64(tmp
);
2274 tcg_temp_free_i64(tmp2
);
2278 insn
= ld_code6(env
, s
->pc
);
2281 r1
= (insn
>> 36) & 0xf;
2282 x2
= (insn
>> 32) & 0xf;
2283 b2
= (insn
>> 28) & 0xf;
2284 d2
= ((int)((((insn
>> 16) & 0xfff)
2285 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2286 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
2288 #ifndef CONFIG_USER_ONLY
2290 /* Test Protection */
2291 check_privileged(s
);
2292 insn
= ld_code6(env
, s
->pc
);
2294 disas_e5(env
, s
, insn
);
2298 insn
= ld_code6(env
, s
->pc
);
2301 r1
= (insn
>> 36) & 0xf;
2302 r3
= (insn
>> 32) & 0xf;
2303 b2
= (insn
>> 28) & 0xf;
2304 d2
= ((int)((((insn
>> 16) & 0xfff)
2305 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
2306 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
2309 insn
= ld_code6(env
, s
->pc
);
2312 r1
= (insn
>> 36) & 0xf;
2313 x2
= (insn
>> 32) & 0xf;
2314 b2
= (insn
>> 28) & 0xf;
2315 d2
= (short)((insn
>> 16) & 0xfff);
2316 r1b
= (insn
>> 12) & 0xf;
2317 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
2320 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
2321 gen_illegal_opcode(s
);
2326 /* ====================================================================== */
2327 /* Define the insn format enumeration. */
2328 #define F0(N) FMT_##N,
2329 #define F1(N, X1) F0(N)
2330 #define F2(N, X1, X2) F0(N)
2331 #define F3(N, X1, X2, X3) F0(N)
2332 #define F4(N, X1, X2, X3, X4) F0(N)
2333 #define F5(N, X1, X2, X3, X4, X5) F0(N)
2336 #include "insn-format.def"
2346 /* Define a structure to hold the decoded fields. We'll store each inside
2347 an array indexed by an enum. In order to conserve memory, we'll arrange
2348 for fields that do not exist at the same time to overlap, thus the "C"
2349 for compact. For checking purposes there is an "O" for original index
2350 as well that will be applied to availability bitmaps. */
2352 enum DisasFieldIndexO
{
2375 enum DisasFieldIndexC
{
2406 struct DisasFields
{
2409 unsigned presentC
:16;
2410 unsigned int presentO
;
2414 /* This is the way fields are to be accessed out of DisasFields. */
2415 #define have_field(S, F) have_field1((S), FLD_O_##F)
2416 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
2418 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
2420 return (f
->presentO
>> c
) & 1;
2423 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
2424 enum DisasFieldIndexC c
)
2426 assert(have_field1(f
, o
));
2430 /* Describe the layout of each field in each format. */
2431 typedef struct DisasField
{
2433 unsigned int size
:8;
2434 unsigned int type
:2;
2435 unsigned int indexC
:6;
2436 enum DisasFieldIndexO indexO
:8;
2439 typedef struct DisasFormatInfo
{
2440 DisasField op
[NUM_C_FIELD
];
2443 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
2444 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
2445 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2446 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
2447 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2448 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2449 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
2450 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2451 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2452 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
2453 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
2454 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
2455 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
2456 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
2458 #define F0(N) { { } },
2459 #define F1(N, X1) { { X1 } },
2460 #define F2(N, X1, X2) { { X1, X2 } },
2461 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
2462 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
2463 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
2465 static const DisasFormatInfo format_info
[] = {
2466 #include "insn-format.def"
2484 /* Generally, we'll extract operands into this structures, operate upon
2485 them, and store them back. See the "in1", "in2", "prep", "wout" sets
2486 of routines below for more details. */
2488 bool g_out
, g_out2
, g_in1
, g_in2
;
2489 TCGv_i64 out
, out2
, in1
, in2
;
2493 /* Return values from translate_one, indicating the state of the TB. */
2495 /* Continue the TB. */
2497 /* We have emitted one or more goto_tb. No fixup required. */
2499 /* We are not using a goto_tb (for whatever reason), but have updated
2500 the PC (for whatever reason), so there's no need to do it again on
2503 /* We are exiting the TB, but have neither emitted a goto_tb, nor
2504 updated the PC for the next instruction to be executed. */
2506 /* We are ending the TB with a noreturn function call, e.g. longjmp.
2507 No following code will be executed. */
2511 typedef enum DisasFacility
{
2512 FAC_Z
, /* zarch (default) */
2513 FAC_CASS
, /* compare and swap and store */
2514 FAC_CASS2
, /* compare and swap and store 2*/
2515 FAC_DFP
, /* decimal floating point */
2516 FAC_DFPR
, /* decimal floating point rounding */
2517 FAC_DO
, /* distinct operands */
2518 FAC_EE
, /* execute extensions */
2519 FAC_EI
, /* extended immediate */
2520 FAC_FPE
, /* floating point extension */
2521 FAC_FPSSH
, /* floating point support sign handling */
2522 FAC_FPRGR
, /* FPR-GR transfer */
2523 FAC_GIE
, /* general instructions extension */
2524 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
2525 FAC_HW
, /* high-word */
2526 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
2527 FAC_LOC
, /* load/store on condition */
2528 FAC_LD
, /* long displacement */
2529 FAC_PC
, /* population count */
2530 FAC_SCF
, /* store clock fast */
2531 FAC_SFLE
, /* store facility list extended */
2537 DisasFacility fac
:6;
2541 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
2542 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
2543 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
2544 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
2545 void (*help_cout
)(DisasContext
*, DisasOps
*);
2546 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
2551 /* ====================================================================== */
2552 /* Miscelaneous helpers, used by several operations. */
2554 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
2555 DisasOps
*o
, int mask
)
2557 int b2
= get_field(f
, b2
);
2558 int d2
= get_field(f
, d2
);
2561 o
->in2
= tcg_const_i64(d2
& mask
);
2563 o
->in2
= get_address(s
, 0, b2
, d2
);
2564 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2568 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
2570 if (dest
== s
->next_pc
) {
2573 if (use_goto_tb(s
, dest
)) {
2574 gen_update_cc_op(s
);
2576 tcg_gen_movi_i64(psw_addr
, dest
);
2577 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
2578 return EXIT_GOTO_TB
;
2580 tcg_gen_movi_i64(psw_addr
, dest
);
2581 return EXIT_PC_UPDATED
;
2585 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
2586 bool is_imm
, int imm
, TCGv_i64 cdest
)
2589 uint64_t dest
= s
->pc
+ 2 * imm
;
2592 /* Take care of the special cases first. */
2593 if (c
->cond
== TCG_COND_NEVER
) {
2598 if (dest
== s
->next_pc
) {
2599 /* Branch to next. */
2603 if (c
->cond
== TCG_COND_ALWAYS
) {
2604 ret
= help_goto_direct(s
, dest
);
2608 if (TCGV_IS_UNUSED_I64(cdest
)) {
2609 /* E.g. bcr %r0 -> no branch. */
2613 if (c
->cond
== TCG_COND_ALWAYS
) {
2614 tcg_gen_mov_i64(psw_addr
, cdest
);
2615 ret
= EXIT_PC_UPDATED
;
2620 if (use_goto_tb(s
, s
->next_pc
)) {
2621 if (is_imm
&& use_goto_tb(s
, dest
)) {
2622 /* Both exits can use goto_tb. */
2623 gen_update_cc_op(s
);
2625 lab
= gen_new_label();
2627 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2629 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2632 /* Branch not taken. */
2634 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2635 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2640 tcg_gen_movi_i64(psw_addr
, dest
);
2641 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
2645 /* Fallthru can use goto_tb, but taken branch cannot. */
2646 /* Store taken branch destination before the brcond. This
2647 avoids having to allocate a new local temp to hold it.
2648 We'll overwrite this in the not taken case anyway. */
2650 tcg_gen_mov_i64(psw_addr
, cdest
);
2653 lab
= gen_new_label();
2655 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
2657 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
2660 /* Branch not taken. */
2661 gen_update_cc_op(s
);
2663 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
2664 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
2668 tcg_gen_movi_i64(psw_addr
, dest
);
2670 ret
= EXIT_PC_UPDATED
;
2673 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
2674 Most commonly we're single-stepping or some other condition that
2675 disables all use of goto_tb. Just update the PC and exit. */
2677 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
2679 cdest
= tcg_const_i64(dest
);
2683 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
2686 TCGv_i32 t0
= tcg_temp_new_i32();
2687 TCGv_i64 t1
= tcg_temp_new_i64();
2688 TCGv_i64 z
= tcg_const_i64(0);
2689 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
2690 tcg_gen_extu_i32_i64(t1
, t0
);
2691 tcg_temp_free_i32(t0
);
2692 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
2693 tcg_temp_free_i64(t1
);
2694 tcg_temp_free_i64(z
);
2698 tcg_temp_free_i64(cdest
);
2700 tcg_temp_free_i64(next
);
2702 ret
= EXIT_PC_UPDATED
;
2710 /* ====================================================================== */
2711 /* The operations. These perform the bulk of the work for any insn,
2712 usually after the operands have been loaded and output initialized. */
2714 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
2716 gen_helper_abs_i64(o
->out
, o
->in2
);
2720 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
2722 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2726 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
2730 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2732 /* XXX possible optimization point */
2734 cc
= tcg_temp_new_i64();
2735 tcg_gen_extu_i32_i64(cc
, cc_op
);
2736 tcg_gen_shri_i64(cc
, cc
, 1);
2738 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2739 tcg_temp_free_i64(cc
);
2743 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
2745 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2749 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
2751 int shift
= s
->insn
->data
& 0xff;
2752 int size
= s
->insn
->data
>> 8;
2753 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2756 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2757 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2758 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2760 /* Produce the CC from only the bits manipulated. */
2761 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2762 set_cc_nz_u64(s
, cc_dst
);
2766 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
2768 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2769 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
2770 tcg_gen_mov_i64(psw_addr
, o
->in2
);
2771 return EXIT_PC_UPDATED
;
2777 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
2779 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
2780 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
2783 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
2785 int m1
= get_field(s
->fields
, m1
);
2786 bool is_imm
= have_field(s
->fields
, i2
);
2787 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2790 disas_jcc(s
, &c
, m1
);
2791 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2794 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
2796 int r1
= get_field(s
->fields
, r1
);
2797 bool is_imm
= have_field(s
->fields
, i2
);
2798 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2802 c
.cond
= TCG_COND_NE
;
2807 t
= tcg_temp_new_i64();
2808 tcg_gen_subi_i64(t
, regs
[r1
], 1);
2809 store_reg32_i64(r1
, t
);
2810 c
.u
.s32
.a
= tcg_temp_new_i32();
2811 c
.u
.s32
.b
= tcg_const_i32(0);
2812 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
2813 tcg_temp_free_i64(t
);
2815 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2818 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
2820 int r1
= get_field(s
->fields
, r1
);
2821 bool is_imm
= have_field(s
->fields
, i2
);
2822 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
2825 c
.cond
= TCG_COND_NE
;
2830 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
2831 c
.u
.s64
.a
= regs
[r1
];
2832 c
.u
.s64
.b
= tcg_const_i64(0);
2834 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
2837 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2839 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2840 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2841 potential_page_fault(s
);
2842 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2843 tcg_temp_free_i32(r1
);
2844 tcg_temp_free_i32(r3
);
2849 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2851 TCGv_i64 t1
= tcg_temp_new_i64();
2852 TCGv_i32 t2
= tcg_temp_new_i32();
2853 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2854 gen_helper_cvd(t1
, t2
);
2855 tcg_temp_free_i32(t2
);
2856 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2857 tcg_temp_free_i64(t1
);
2861 #ifndef CONFIG_USER_ONLY
2862 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2866 check_privileged(s
);
2867 potential_page_fault(s
);
2869 /* We pretend the format is RX_a so that D2 is the field we want. */
2870 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2871 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2872 tcg_temp_free_i32(tmp
);
2877 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2879 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2880 return_low128(o
->out
);
2884 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2886 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2887 return_low128(o
->out
);
2891 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2893 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2894 return_low128(o
->out
);
2898 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2900 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2901 return_low128(o
->out
);
2905 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2907 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2908 tb->flags, (ab)use the tb->cs_base field as the address of
2909 the template in memory, and grab 8 bits of tb->flags/cflags for
2910 the contents of the register. We would then recognize all this
2911 in gen_intermediate_code_internal, generating code for exactly
2912 one instruction. This new TB then gets executed normally.
2914 On the other hand, this seems to be mostly used for modifying
2915 MVC inside of memcpy, which needs a helper call anyway. So
2916 perhaps this doesn't bear thinking about any further. */
2923 tmp
= tcg_const_i64(s
->next_pc
);
2924 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2925 tcg_temp_free_i64(tmp
);
2931 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2933 int m3
= get_field(s
->fields
, m3
);
2934 int pos
, len
, base
= s
->insn
->data
;
2935 TCGv_i64 tmp
= tcg_temp_new_i64();
2940 /* Effectively a 32-bit load. */
2941 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2948 /* Effectively a 16-bit load. */
2949 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2957 /* Effectively an 8-bit load. */
2958 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2963 pos
= base
+ ctz32(m3
) * 8;
2964 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2965 ccm
= ((1ull << len
) - 1) << pos
;
2969 /* This is going to be a sequence of loads and inserts. */
2970 pos
= base
+ 32 - 8;
2974 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2975 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2976 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2979 m3
= (m3
<< 1) & 0xf;
2985 tcg_gen_movi_i64(tmp
, ccm
);
2986 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2987 tcg_temp_free_i64(tmp
);
2991 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2993 int shift
= s
->insn
->data
& 0xff;
2994 int size
= s
->insn
->data
>> 8;
2995 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2999 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
3001 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
3005 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
3007 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
3011 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
3013 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
3017 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
3019 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
3023 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
3025 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
3029 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
3031 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
3035 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
3037 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
3041 #ifndef CONFIG_USER_ONLY
3042 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
3046 check_privileged(s
);
3048 t1
= tcg_temp_new_i64();
3049 t2
= tcg_temp_new_i64();
3050 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
3051 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
3052 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
3053 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3054 tcg_gen_shli_i64(t1
, t1
, 32);
3055 gen_helper_load_psw(cpu_env
, t1
, t2
);
3056 tcg_temp_free_i64(t1
);
3057 tcg_temp_free_i64(t2
);
3058 return EXIT_NORETURN
;
3062 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
3064 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3065 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3066 potential_page_fault(s
);
3067 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
3068 tcg_temp_free_i32(r1
);
3069 tcg_temp_free_i32(r3
);
3073 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
3075 int r1
= get_field(s
->fields
, r1
);
3076 int r3
= get_field(s
->fields
, r3
);
3077 TCGv_i64 t
= tcg_temp_new_i64();
3078 TCGv_i64 t4
= tcg_const_i64(4);
3081 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
3082 store_reg32_i64(r1
, t
);
3086 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3090 tcg_temp_free_i64(t
);
3091 tcg_temp_free_i64(t4
);
3095 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
3097 int r1
= get_field(s
->fields
, r1
);
3098 int r3
= get_field(s
->fields
, r3
);
3099 TCGv_i64 t
= tcg_temp_new_i64();
3100 TCGv_i64 t4
= tcg_const_i64(4);
3103 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
3104 store_reg32h_i64(r1
, t
);
3108 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3112 tcg_temp_free_i64(t
);
3113 tcg_temp_free_i64(t4
);
3117 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
3119 int r1
= get_field(s
->fields
, r1
);
3120 int r3
= get_field(s
->fields
, r3
);
3121 TCGv_i64 t8
= tcg_const_i64(8);
3124 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3128 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
3132 tcg_temp_free_i64(t8
);
3136 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3139 o
->g_out
= o
->g_in2
;
3140 TCGV_UNUSED_I64(o
->in2
);
3145 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
3149 o
->g_out
= o
->g_in1
;
3150 o
->g_out2
= o
->g_in2
;
3151 TCGV_UNUSED_I64(o
->in1
);
3152 TCGV_UNUSED_I64(o
->in2
);
3153 o
->g_in1
= o
->g_in2
= false;
3157 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
3159 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3160 potential_page_fault(s
);
3161 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3162 tcg_temp_free_i32(l
);
3166 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3168 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3169 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
3170 potential_page_fault(s
);
3171 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
3172 tcg_temp_free_i32(r1
);
3173 tcg_temp_free_i32(r2
);
3178 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3180 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3181 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3182 potential_page_fault(s
);
3183 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
3184 tcg_temp_free_i32(r1
);
3185 tcg_temp_free_i32(r3
);
3190 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3192 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3196 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3198 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3199 return_low128(o
->out2
);
3203 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3205 gen_helper_nabs_i64(o
->out
, o
->in2
);
3209 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3211 tcg_gen_neg_i64(o
->out
, o
->in2
);
3215 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3217 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3221 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3223 int shift
= s
->insn
->data
& 0xff;
3224 int size
= s
->insn
->data
>> 8;
3225 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3228 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3229 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3231 /* Produce the CC from only the bits manipulated. */
3232 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3233 set_cc_nz_u64(s
, cc_dst
);
3237 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3239 TCGv_i32 t1
= tcg_temp_new_i32();
3240 TCGv_i32 t2
= tcg_temp_new_i32();
3241 TCGv_i32 to
= tcg_temp_new_i32();
3242 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
3243 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
3244 tcg_gen_rotl_i32(to
, t1
, t2
);
3245 tcg_gen_extu_i32_i64(o
->out
, to
);
3246 tcg_temp_free_i32(t1
);
3247 tcg_temp_free_i32(t2
);
3248 tcg_temp_free_i32(to
);
3252 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3254 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3258 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3260 uint64_t sign
= 1ull << s
->insn
->data
;
3261 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3262 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3263 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3264 /* The arithmetic left shift is curious in that it does not affect
3265 the sign bit. Copy that over from the source unchanged. */
3266 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3267 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3268 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3272 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3274 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3278 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3280 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3284 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3286 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3290 #ifndef CONFIG_USER_ONLY
3291 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3293 check_privileged(s
);
3294 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3298 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3300 uint64_t i2
= get_field(s
->fields
, i2
);
3303 check_privileged(s
);
3305 /* It is important to do what the instruction name says: STORE THEN.
3306 If we let the output hook perform the store then if we fault and
3307 restart, we'll have the wrong SYSTEM MASK in place. */
3308 t
= tcg_temp_new_i64();
3309 tcg_gen_shri_i64(t
, psw_mask
, 56);
3310 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3311 tcg_temp_free_i64(t
);
3313 if (s
->fields
->op
== 0xac) {
3314 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3315 (i2
<< 56) | 0x00ffffffffffffffull
);
3317 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3323 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3325 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3329 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3331 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3335 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3337 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3341 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3343 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3347 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3349 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3350 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3351 potential_page_fault(s
);
3352 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3353 tcg_temp_free_i32(r1
);
3354 tcg_temp_free_i32(r3
);
3358 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3360 int r1
= get_field(s
->fields
, r1
);
3361 int r3
= get_field(s
->fields
, r3
);
3362 int size
= s
->insn
->data
;
3363 TCGv_i64 tsize
= tcg_const_i64(size
);
3367 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3369 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3374 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3378 tcg_temp_free_i64(tsize
);
3382 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3384 int r1
= get_field(s
->fields
, r1
);
3385 int r3
= get_field(s
->fields
, r3
);
3386 TCGv_i64 t
= tcg_temp_new_i64();
3387 TCGv_i64 t4
= tcg_const_i64(4);
3388 TCGv_i64 t32
= tcg_const_i64(32);
3391 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3392 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3396 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3400 tcg_temp_free_i64(t
);
3401 tcg_temp_free_i64(t4
);
3402 tcg_temp_free_i64(t32
);
3406 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3408 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3412 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3417 tcg_gen_not_i64(o
->in2
, o
->in2
);
3418 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3420 /* XXX possible optimization point */
3422 cc
= tcg_temp_new_i64();
3423 tcg_gen_extu_i32_i64(cc
, cc_op
);
3424 tcg_gen_shri_i64(cc
, cc
, 1);
3425 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3426 tcg_temp_free_i64(cc
);
3430 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3437 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3438 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3439 tcg_temp_free_i32(t
);
3441 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3442 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3443 tcg_temp_free_i32(t
);
3445 gen_exception(EXCP_SVC
);
3446 return EXIT_NORETURN
;
3449 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3451 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3455 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3457 int shift
= s
->insn
->data
& 0xff;
3458 int size
= s
->insn
->data
>> 8;
3459 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3462 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3463 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3465 /* Produce the CC from only the bits manipulated. */
3466 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3467 set_cc_nz_u64(s
, cc_dst
);
3471 /* ====================================================================== */
3472 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3473 the original inputs), update the various cc data structures in order to
3474 be able to compute the new condition code. */
3476 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3478 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3481 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3483 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3486 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3488 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3491 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3493 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3496 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3498 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3501 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3503 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3506 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3508 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3511 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3513 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3516 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3518 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3521 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3523 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3526 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3528 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3531 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3533 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3536 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3538 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3541 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3543 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3546 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3548 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3551 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3553 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3556 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3558 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3559 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3562 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3564 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3567 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3569 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3572 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3574 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3577 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3579 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3582 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3584 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3587 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3589 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3592 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3594 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3597 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3599 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3602 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3604 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3607 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3609 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3612 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3614 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3617 /* ====================================================================== */
3618 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3619 with the TCG register to which we will write. Used in combination with
3620 the "wout" generators, in some cases we need a new temporary, and in
3621 some cases we can write to a TCG global. */
3623 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3625 o
->out
= tcg_temp_new_i64();
3628 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3630 o
->out
= tcg_temp_new_i64();
3631 o
->out2
= tcg_temp_new_i64();
3634 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3636 o
->out
= regs
[get_field(f
, r1
)];
3640 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3642 /* ??? Specification exception: r1 must be even. */
3643 int r1
= get_field(f
, r1
);
3645 o
->out2
= regs
[(r1
+ 1) & 15];
3646 o
->g_out
= o
->g_out2
= true;
3649 /* ====================================================================== */
3650 /* The "Write OUTput" generators. These generally perform some non-trivial
3651 copy of data to TCG globals, or to main memory. The trivial cases are
3652 generally handled by having a "prep" generator install the TCG global
3653 as the destination of the operation. */
3655 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3657 store_reg(get_field(f
, r1
), o
->out
);
3660 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3662 int r1
= get_field(f
, r1
);
3663 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3666 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3668 store_reg32_i64(get_field(f
, r1
), o
->out
);
3671 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3673 /* ??? Specification exception: r1 must be even. */
3674 int r1
= get_field(f
, r1
);
3675 store_reg32_i64(r1
, o
->out
);
3676 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3679 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3681 /* ??? Specification exception: r1 must be even. */
3682 int r1
= get_field(f
, r1
);
3683 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3684 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3685 store_reg32_i64(r1
, o
->out
);
3688 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3690 store_freg32_i64(get_field(f
, r1
), o
->out
);
3693 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3695 store_freg(get_field(f
, r1
), o
->out
);
3698 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3700 int f1
= get_field(s
->fields
, r1
);
3701 store_freg(f1
, o
->out
);
3702 store_freg((f1
+ 2) & 15, o
->out2
);
3705 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3707 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3708 store_reg32_i64(get_field(f
, r1
), o
->out
);
3712 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3714 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3715 store_freg32_i64(get_field(f
, r1
), o
->out
);
3719 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3721 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3724 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3726 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3729 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3731 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3734 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3736 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3739 /* ====================================================================== */
3740 /* The "INput 1" generators. These load the first operand to an insn. */
3742 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3744 o
->in1
= load_reg(get_field(f
, r1
));
3747 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3749 o
->in1
= regs
[get_field(f
, r1
)];
3753 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3755 o
->in1
= tcg_temp_new_i64();
3756 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3759 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3761 o
->in1
= tcg_temp_new_i64();
3762 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3765 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3767 /* ??? Specification exception: r1 must be even. */
3768 int r1
= get_field(f
, r1
);
3769 o
->in1
= load_reg((r1
+ 1) & 15);
3772 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3774 /* ??? Specification exception: r1 must be even. */
3775 int r1
= get_field(f
, r1
);
3776 o
->in1
= tcg_temp_new_i64();
3777 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3780 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 /* ??? Specification exception: r1 must be even. */
3783 int r1
= get_field(f
, r1
);
3784 o
->in1
= tcg_temp_new_i64();
3785 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3788 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3790 /* ??? Specification exception: r1 must be even. */
3791 int r1
= get_field(f
, r1
);
3792 o
->in1
= tcg_temp_new_i64();
3793 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3796 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3798 o
->in1
= load_reg(get_field(f
, r2
));
3801 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3803 o
->in1
= load_reg(get_field(f
, r3
));
3806 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3808 o
->in1
= regs
[get_field(f
, r3
)];
3812 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3814 o
->in1
= tcg_temp_new_i64();
3815 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3818 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 o
->in1
= tcg_temp_new_i64();
3821 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3824 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3826 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3829 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 o
->in1
= fregs
[get_field(f
, r1
)];
3835 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3837 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3840 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3843 o
->in1
= tcg_temp_new_i64();
3844 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3847 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3850 o
->in1
= tcg_temp_new_i64();
3851 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3854 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3857 o
->in1
= tcg_temp_new_i64();
3858 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3861 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3864 o
->in1
= tcg_temp_new_i64();
3865 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3868 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3871 o
->in1
= tcg_temp_new_i64();
3872 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3875 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3878 o
->in1
= tcg_temp_new_i64();
3879 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3882 /* ====================================================================== */
3883 /* The "INput 2" generators. These load the second operand to an insn. */
3885 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3887 o
->in2
= load_reg(get_field(f
, r2
));
3890 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3892 o
->in2
= regs
[get_field(f
, r2
)];
3896 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3898 int r2
= get_field(f
, r2
);
3900 o
->in2
= load_reg(r2
);
3904 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3906 o
->in2
= tcg_temp_new_i64();
3907 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3910 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3912 o
->in2
= tcg_temp_new_i64();
3913 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3916 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3918 o
->in2
= tcg_temp_new_i64();
3919 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3922 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3924 o
->in2
= tcg_temp_new_i64();
3925 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3928 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3930 o
->in2
= load_reg(get_field(f
, r3
));
3933 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3935 o
->in2
= tcg_temp_new_i64();
3936 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3939 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3941 o
->in2
= tcg_temp_new_i64();
3942 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3945 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3950 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3952 o
->in2
= fregs
[get_field(f
, r2
)];
3956 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3958 int f2
= get_field(f
, r2
);
3960 o
->in2
= fregs
[(f2
+ 2) & 15];
3961 o
->g_in1
= o
->g_in2
= true;
3964 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3966 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3967 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3970 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3972 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3975 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3977 help_l2_shift(s
, f
, o
, 31);
3980 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3982 help_l2_shift(s
, f
, o
, 63);
3985 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3988 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3991 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3994 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3997 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4000 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4003 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4006 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4009 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4012 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4015 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4018 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4021 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4024 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4027 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4030 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4033 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4036 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4039 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4041 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4044 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4046 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4049 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4051 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4054 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4056 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4059 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4061 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4062 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4065 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4067 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4068 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4071 /* ====================================================================== */
4073 /* Find opc within the table of insns. This is formulated as a switch
4074 statement so that (1) we get compile-time notice of cut-paste errors
4075 for duplicated opcodes, and (2) the compiler generates the binary
4076 search tree, rather than us having to post-process the table. */
4078 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4079 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4081 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4083 enum DisasInsnEnum
{
4084 #include "insn-data.def"
4088 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4093 .help_in1 = in1_##I1, \
4094 .help_in2 = in2_##I2, \
4095 .help_prep = prep_##P, \
4096 .help_wout = wout_##W, \
4097 .help_cout = cout_##CC, \
4098 .help_op = op_##OP, \
4102 /* Allow 0 to be used for NULL in the table below. */
4110 static const DisasInsn insn_info
[] = {
4111 #include "insn-data.def"
4115 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4116 case OPC: return &insn_info[insn_ ## NM];
4118 static const DisasInsn
*lookup_opc(uint16_t opc
)
4121 #include "insn-data.def"
4130 /* Extract a field from the insn. The INSN should be left-aligned in
4131 the uint64_t so that we can more easily utilize the big-bit-endian
4132 definitions we extract from the Principals of Operation. */
4134 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4142 /* Zero extract the field from the insn. */
4143 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4145 /* Sign-extend, or un-swap the field as necessary. */
4147 case 0: /* unsigned */
4149 case 1: /* signed */
4150 assert(f
->size
<= 32);
4151 m
= 1u << (f
->size
- 1);
4154 case 2: /* dl+dh split, signed 20 bit. */
4155 r
= ((int8_t)r
<< 12) | (r
>> 8);
4161 /* Validate that the "compressed" encoding we selected above is valid.
4162 I.e. we havn't make two different original fields overlap. */
4163 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4164 o
->presentC
|= 1 << f
->indexC
;
4165 o
->presentO
|= 1 << f
->indexO
;
4167 o
->c
[f
->indexC
] = r
;
4170 /* Lookup the insn at the current PC, extracting the operands into O and
4171 returning the info struct for the insn. Returns NULL for invalid insn. */
4173 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4176 uint64_t insn
, pc
= s
->pc
;
4178 const DisasInsn
*info
;
4180 insn
= ld_code2(env
, pc
);
4181 op
= (insn
>> 8) & 0xff;
4182 ilen
= get_ilen(op
);
4183 s
->next_pc
= s
->pc
+ ilen
;
4190 insn
= ld_code4(env
, pc
) << 32;
4193 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4199 /* We can't actually determine the insn format until we've looked up
4200 the full insn opcode. Which we can't do without locating the
4201 secondary opcode. Assume by default that OP2 is at bit 40; for
4202 those smaller insns that don't actually have a secondary opcode
4203 this will correctly result in OP2 = 0. */
4209 case 0xb2: /* S, RRF, RRE */
4210 case 0xb3: /* RRE, RRD, RRF */
4211 case 0xb9: /* RRE, RRF */
4212 case 0xe5: /* SSE, SIL */
4213 op2
= (insn
<< 8) >> 56;
4217 case 0xc0: /* RIL */
4218 case 0xc2: /* RIL */
4219 case 0xc4: /* RIL */
4220 case 0xc6: /* RIL */
4221 case 0xc8: /* SSF */
4222 case 0xcc: /* RIL */
4223 op2
= (insn
<< 12) >> 60;
4225 case 0xd0 ... 0xdf: /* SS */
4231 case 0xee ... 0xf3: /* SS */
4232 case 0xf8 ... 0xfd: /* SS */
4236 op2
= (insn
<< 40) >> 56;
4240 memset(f
, 0, sizeof(*f
));
4244 /* Lookup the instruction. */
4245 info
= lookup_opc(op
<< 8 | op2
);
4247 /* If we found it, extract the operands. */
4249 DisasFormat fmt
= info
->fmt
;
4252 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4253 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4259 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4261 const DisasInsn
*insn
;
4262 ExitStatus ret
= NO_EXIT
;
4266 insn
= extract_insn(env
, s
, &f
);
4268 /* If not found, try the old interpreter. This includes ILLOPC. */
4270 disas_s390_insn(env
, s
);
4271 switch (s
->is_jmp
) {
4279 ret
= EXIT_PC_UPDATED
;
4282 ret
= EXIT_NORETURN
;
4292 /* Set up the strutures we use to communicate with the helpers. */
4295 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4296 TCGV_UNUSED_I64(o
.out
);
4297 TCGV_UNUSED_I64(o
.out2
);
4298 TCGV_UNUSED_I64(o
.in1
);
4299 TCGV_UNUSED_I64(o
.in2
);
4300 TCGV_UNUSED_I64(o
.addr1
);
4302 /* Implement the instruction. */
4303 if (insn
->help_in1
) {
4304 insn
->help_in1(s
, &f
, &o
);
4306 if (insn
->help_in2
) {
4307 insn
->help_in2(s
, &f
, &o
);
4309 if (insn
->help_prep
) {
4310 insn
->help_prep(s
, &f
, &o
);
4312 if (insn
->help_op
) {
4313 ret
= insn
->help_op(s
, &o
);
4315 if (insn
->help_wout
) {
4316 insn
->help_wout(s
, &f
, &o
);
4318 if (insn
->help_cout
) {
4319 insn
->help_cout(s
, &o
);
4322 /* Free any temporaries created by the helpers. */
4323 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4324 tcg_temp_free_i64(o
.out
);
4326 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4327 tcg_temp_free_i64(o
.out2
);
4329 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4330 tcg_temp_free_i64(o
.in1
);
4332 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4333 tcg_temp_free_i64(o
.in2
);
4335 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4336 tcg_temp_free_i64(o
.addr1
);
4339 /* Advance to the next instruction. */
4344 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4345 TranslationBlock
*tb
,
4349 target_ulong pc_start
;
4350 uint64_t next_page_start
;
4351 uint16_t *gen_opc_end
;
4353 int num_insns
, max_insns
;
4361 if (!(tb
->flags
& FLAG_MASK_64
)) {
4362 pc_start
&= 0x7fffffff;
4367 dc
.cc_op
= CC_OP_DYNAMIC
;
4368 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4369 dc
.is_jmp
= DISAS_NEXT
;
4371 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4373 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4376 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4377 if (max_insns
== 0) {
4378 max_insns
= CF_COUNT_MASK
;
4385 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4389 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4392 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4393 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4394 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4395 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4397 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4401 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4402 tcg_gen_debug_insn_start(dc
.pc
);
4406 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4407 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4408 if (bp
->pc
== dc
.pc
) {
4409 status
= EXIT_PC_STALE
;
4415 if (status
== NO_EXIT
) {
4416 status
= translate_one(env
, &dc
);
4419 /* If we reach a page boundary, are single stepping,
4420 or exhaust instruction count, stop generation. */
4421 if (status
== NO_EXIT
4422 && (dc
.pc
>= next_page_start
4423 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4424 || num_insns
>= max_insns
4426 || env
->singlestep_enabled
)) {
4427 status
= EXIT_PC_STALE
;
4429 } while (status
== NO_EXIT
);
4431 if (tb
->cflags
& CF_LAST_IO
) {
4440 update_psw_addr(&dc
);
4442 case EXIT_PC_UPDATED
:
4443 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4444 gen_op_calc_cc(&dc
);
4446 /* Next TB starts off with CC_OP_DYNAMIC,
4447 so make sure the cc op type is in env */
4448 gen_op_set_cc_op(&dc
);
4451 gen_exception(EXCP_DEBUG
);
4453 /* Generate the return instruction */
4461 gen_icount_end(tb
, num_insns
);
4462 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4464 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4467 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4470 tb
->size
= dc
.pc
- pc_start
;
4471 tb
->icount
= num_insns
;
4474 #if defined(S390X_DEBUG_DISAS)
4475 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4476 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4477 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4483 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4485 gen_intermediate_code_internal(env
, tb
, 0);
4488 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4490 gen_intermediate_code_internal(env
, tb
, 1);
4493 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4496 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4497 cc_op
= gen_opc_cc_op
[pc_pos
];
4498 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {