4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
36 /* global register indexes */
37 static TCGv_ptr cpu_env
;
39 #include "exec/gen-icount.h"
45 /* Information that (most) every instruction needs to manipulate. */
46 typedef struct DisasContext DisasContext
;
47 typedef struct DisasInsn DisasInsn
;
48 typedef struct DisasFields DisasFields
;
51 struct TranslationBlock
*tb
;
52 const DisasInsn
*insn
;
56 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 static void gen_op_calc_cc(DisasContext
*s
);
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static inline void debug_insn(uint64_t insn
)
83 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
86 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
88 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
89 if (s
->tb
->flags
& FLAG_MASK_32
) {
90 return pc
| 0x80000000;
96 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
101 if (env
->cc_op
> 3) {
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
103 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
105 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
106 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
109 for (i
= 0; i
< 16; i
++) {
110 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
112 cpu_fprintf(f
, "\n");
118 for (i
= 0; i
< 16; i
++) {
119 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
121 cpu_fprintf(f
, "\n");
127 #ifndef CONFIG_USER_ONLY
128 for (i
= 0; i
< 16; i
++) {
129 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
131 cpu_fprintf(f
, "\n");
138 #ifdef DEBUG_INLINE_BRANCHES
139 for (i
= 0; i
< CC_OP_MAX
; i
++) {
140 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
141 inline_branch_miss
[i
], inline_branch_hit
[i
]);
145 cpu_fprintf(f
, "\n");
148 static TCGv_i64 psw_addr
;
149 static TCGv_i64 psw_mask
;
151 static TCGv_i32 cc_op
;
152 static TCGv_i64 cc_src
;
153 static TCGv_i64 cc_dst
;
154 static TCGv_i64 cc_vr
;
156 static char cpu_reg_names
[32][4];
157 static TCGv_i64 regs
[16];
158 static TCGv_i64 fregs
[16];
160 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
162 void s390x_translate_init(void)
166 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
167 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
168 offsetof(CPUS390XState
, psw
.addr
),
170 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.mask
),
174 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
176 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
178 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
180 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
185 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, regs
[i
]),
190 for (i
= 0; i
< 16; i
++) {
191 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
192 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
193 offsetof(CPUS390XState
, fregs
[i
].d
),
194 cpu_reg_names
[i
+ 16]);
197 /* register helpers */
202 static inline TCGv_i64
load_reg(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_mov_i64(r
, regs
[reg
]);
209 static inline TCGv_i64
load_freg(int reg
)
211 TCGv_i64 r
= tcg_temp_new_i64();
212 tcg_gen_mov_i64(r
, fregs
[reg
]);
216 static inline TCGv_i32
load_freg32(int reg
)
218 TCGv_i32 r
= tcg_temp_new_i32();
219 #if HOST_LONG_BITS == 32
220 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
222 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
227 static inline TCGv_i32
load_reg32(int reg
)
229 TCGv_i32 r
= tcg_temp_new_i32();
230 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
234 static inline TCGv_i64
load_reg32_i64(int reg
)
236 TCGv_i64 r
= tcg_temp_new_i64();
237 tcg_gen_ext32s_i64(r
, regs
[reg
]);
241 static inline void store_reg(int reg
, TCGv_i64 v
)
243 tcg_gen_mov_i64(regs
[reg
], v
);
246 static inline void store_freg(int reg
, TCGv_i64 v
)
248 tcg_gen_mov_i64(fregs
[reg
], v
);
251 static inline void store_reg32(int reg
, TCGv_i32 v
)
253 /* 32 bit register writes keep the upper half */
254 #if HOST_LONG_BITS == 32
255 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
257 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
258 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
262 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
264 /* 32 bit register writes keep the upper half */
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
268 static inline void store_reg16(int reg
, TCGv_i32 v
)
270 /* 16 bit register writes keep the upper bytes */
271 #if HOST_LONG_BITS == 32
272 tcg_gen_deposit_i32(TCGV_LOW(regs
[reg
]), TCGV_LOW(regs
[reg
]), v
, 0, 16);
274 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
275 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 16);
279 static inline void store_reg8(int reg
, TCGv_i64 v
)
281 /* 8 bit register writes keep the upper bytes */
282 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 8);
285 static inline void store_freg32(int reg
, TCGv_i32 v
)
287 /* 32 bit register writes keep the lower half */
288 #if HOST_LONG_BITS == 32
289 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
291 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
292 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
296 static inline void return_low128(TCGv_i64 dest
)
298 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
301 static inline void update_psw_addr(DisasContext
*s
)
304 tcg_gen_movi_i64(psw_addr
, s
->pc
);
307 static inline void potential_page_fault(DisasContext
*s
)
309 #ifndef CONFIG_USER_ONLY
315 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
317 return (uint64_t)cpu_lduw_code(env
, pc
);
320 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
322 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
325 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
327 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
330 static inline int get_mem_index(DisasContext
*s
)
332 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
333 case PSW_ASC_PRIMARY
>> 32:
335 case PSW_ASC_SECONDARY
>> 32:
337 case PSW_ASC_HOME
>> 32:
345 static void gen_exception(int excp
)
347 TCGv_i32 tmp
= tcg_const_i32(excp
);
348 gen_helper_exception(cpu_env
, tmp
);
349 tcg_temp_free_i32(tmp
);
352 static void gen_program_exception(DisasContext
*s
, int code
)
356 /* Remember what pgm exeption this was. */
357 tmp
= tcg_const_i32(code
);
358 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
359 tcg_temp_free_i32(tmp
);
361 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
362 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
363 tcg_temp_free_i32(tmp
);
365 /* Advance past instruction. */
372 /* Trigger exception. */
373 gen_exception(EXCP_PGM
);
376 s
->is_jmp
= DISAS_EXCP
;
379 static inline void gen_illegal_opcode(DisasContext
*s
)
381 gen_program_exception(s
, PGM_SPECIFICATION
);
384 static inline void check_privileged(DisasContext
*s
)
386 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
387 gen_program_exception(s
, PGM_PRIVILEGED
);
391 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
395 /* 31-bitify the immediate part; register contents are dealt with below */
396 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
402 tmp
= tcg_const_i64(d2
);
403 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
408 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
412 tmp
= tcg_const_i64(d2
);
413 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
418 tmp
= tcg_const_i64(d2
);
421 /* 31-bit mode mask if there are values loaded from registers */
422 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
423 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
429 static void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
431 s
->cc_op
= CC_OP_CONST0
+ val
;
434 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
436 tcg_gen_discard_i64(cc_src
);
437 tcg_gen_mov_i64(cc_dst
, dst
);
438 tcg_gen_discard_i64(cc_vr
);
442 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
444 tcg_gen_discard_i64(cc_src
);
445 tcg_gen_extu_i32_i64(cc_dst
, dst
);
446 tcg_gen_discard_i64(cc_vr
);
450 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
453 tcg_gen_mov_i64(cc_src
, src
);
454 tcg_gen_mov_i64(cc_dst
, dst
);
455 tcg_gen_discard_i64(cc_vr
);
459 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
462 tcg_gen_extu_i32_i64(cc_src
, src
);
463 tcg_gen_extu_i32_i64(cc_dst
, dst
);
464 tcg_gen_discard_i64(cc_vr
);
468 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
469 TCGv_i64 dst
, TCGv_i64 vr
)
471 tcg_gen_mov_i64(cc_src
, src
);
472 tcg_gen_mov_i64(cc_dst
, dst
);
473 tcg_gen_mov_i64(cc_vr
, vr
);
477 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
479 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
482 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
484 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
487 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
490 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
493 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
496 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
499 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
501 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
504 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
506 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
509 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
511 /* XXX optimize for the constant? put it in s? */
512 TCGv_i32 tmp
= tcg_const_i32(v2
);
513 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
514 tcg_temp_free_i32(tmp
);
517 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
519 TCGv_i32 tmp
= tcg_const_i32(v2
);
520 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
521 tcg_temp_free_i32(tmp
);
524 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
526 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
529 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
531 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
534 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
536 TCGv_i64 tmp
= tcg_const_i64(v2
);
538 tcg_temp_free_i64(tmp
);
541 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
543 TCGv_i64 tmp
= tcg_const_i64(v2
);
545 tcg_temp_free_i64(tmp
);
548 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
550 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
553 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
555 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
558 static void set_cc_icm(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
560 gen_op_update2_cc_i32(s
, CC_OP_ICM
, v1
, v2
);
563 static void set_cc_cmp_f32_i64(DisasContext
*s
, TCGv_i32 v1
, TCGv_i64 v2
)
565 tcg_gen_extu_i32_i64(cc_src
, v1
);
566 tcg_gen_mov_i64(cc_dst
, v2
);
567 tcg_gen_discard_i64(cc_vr
);
568 s
->cc_op
= CC_OP_LTGT_F32
;
571 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i32 v1
)
573 gen_op_update1_cc_i32(s
, CC_OP_NZ_F32
, v1
);
576 /* CC value is in env->cc_op */
577 static inline void set_cc_static(DisasContext
*s
)
579 tcg_gen_discard_i64(cc_src
);
580 tcg_gen_discard_i64(cc_dst
);
581 tcg_gen_discard_i64(cc_vr
);
582 s
->cc_op
= CC_OP_STATIC
;
585 static inline void gen_op_set_cc_op(DisasContext
*s
)
587 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
588 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
592 static inline void gen_update_cc_op(DisasContext
*s
)
597 /* calculates cc into cc_op */
598 static void gen_op_calc_cc(DisasContext
*s
)
600 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
601 TCGv_i64 dummy
= tcg_const_i64(0);
608 /* s->cc_op is the cc value */
609 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
612 /* env->cc_op already is the cc value */
626 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
631 case CC_OP_LTUGTU_32
:
632 case CC_OP_LTUGTU_64
:
639 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
654 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
657 /* unknown operation - assume 3 arguments and cc_op in env */
658 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
664 tcg_temp_free_i32(local_cc_op
);
665 tcg_temp_free_i64(dummy
);
667 /* We now have cc in cc_op as constant */
671 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
675 *r1
= (insn
>> 4) & 0xf;
679 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
680 int *x2
, int *b2
, int *d2
)
684 *r1
= (insn
>> 20) & 0xf;
685 *x2
= (insn
>> 16) & 0xf;
686 *b2
= (insn
>> 12) & 0xf;
689 return get_address(s
, *x2
, *b2
, *d2
);
692 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
697 *r1
= (insn
>> 20) & 0xf;
699 *r3
= (insn
>> 16) & 0xf;
700 *b2
= (insn
>> 12) & 0xf;
704 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
709 *i2
= (insn
>> 16) & 0xff;
710 *b1
= (insn
>> 12) & 0xf;
713 return get_address(s
, 0, *b1
, *d1
);
716 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
718 /* NOTE: we handle the case where the TB spans two pages here */
719 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
720 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
721 && !s
->singlestep_enabled
722 && !(s
->tb
->cflags
& CF_LAST_IO
));
725 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
729 if (use_goto_tb(s
, pc
)) {
730 tcg_gen_goto_tb(tb_num
);
731 tcg_gen_movi_i64(psw_addr
, pc
);
732 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
734 /* jump to another page: currently not optimized */
735 tcg_gen_movi_i64(psw_addr
, pc
);
740 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
742 #ifdef DEBUG_INLINE_BRANCHES
743 inline_branch_miss
[cc_op
]++;
747 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
749 #ifdef DEBUG_INLINE_BRANCHES
750 inline_branch_hit
[cc_op
]++;
754 /* Table of mask values to comparison codes, given a comparison as input.
755 For a true comparison CC=3 will never be set, but we treat this
756 conservatively for possible use when CC=3 indicates overflow. */
757 static const TCGCond ltgt_cond
[16] = {
758 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
759 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
760 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
761 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
762 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
763 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
764 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
765 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
768 /* Table of mask values to comparison codes, given a logic op as input.
769 For such, only CC=0 and CC=1 should be possible. */
770 static const TCGCond nz_cond
[16] = {
772 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
774 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
776 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
777 /* EQ | NE | x | x */
778 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
781 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
782 details required to generate a TCG comparison. */
783 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
786 enum cc_op old_cc_op
= s
->cc_op
;
788 if (mask
== 15 || mask
== 0) {
789 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
792 c
->g1
= c
->g2
= true;
797 /* Find the TCG condition for the mask + cc op. */
803 cond
= ltgt_cond
[mask
];
804 if (cond
== TCG_COND_NEVER
) {
807 account_inline_branch(s
, old_cc_op
);
810 case CC_OP_LTUGTU_32
:
811 case CC_OP_LTUGTU_64
:
812 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
813 if (cond
== TCG_COND_NEVER
) {
816 account_inline_branch(s
, old_cc_op
);
820 cond
= nz_cond
[mask
];
821 if (cond
== TCG_COND_NEVER
) {
824 account_inline_branch(s
, old_cc_op
);
839 account_inline_branch(s
, old_cc_op
);
854 account_inline_branch(s
, old_cc_op
);
859 /* Calculate cc value. */
864 /* Jump based on CC. We'll load up the real cond below;
865 the assignment here merely avoids a compiler warning. */
866 account_noninline_branch(s
, old_cc_op
);
867 old_cc_op
= CC_OP_STATIC
;
868 cond
= TCG_COND_NEVER
;
872 /* Load up the arguments of the comparison. */
874 c
->g1
= c
->g2
= false;
878 c
->u
.s32
.a
= tcg_temp_new_i32();
879 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
880 c
->u
.s32
.b
= tcg_const_i32(0);
883 case CC_OP_LTUGTU_32
:
885 c
->u
.s32
.a
= tcg_temp_new_i32();
886 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
887 c
->u
.s32
.b
= tcg_temp_new_i32();
888 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
895 c
->u
.s64
.b
= tcg_const_i64(0);
899 case CC_OP_LTUGTU_64
:
902 c
->g1
= c
->g2
= true;
907 c
->u
.s64
.a
= tcg_temp_new_i64();
908 c
->u
.s64
.b
= tcg_const_i64(0);
909 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
917 case 0x8 | 0x4 | 0x2: /* cc != 3 */
919 c
->u
.s32
.b
= tcg_const_i32(3);
921 case 0x8 | 0x4 | 0x1: /* cc != 2 */
923 c
->u
.s32
.b
= tcg_const_i32(2);
925 case 0x8 | 0x2 | 0x1: /* cc != 1 */
927 c
->u
.s32
.b
= tcg_const_i32(1);
929 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
932 c
->u
.s32
.a
= tcg_temp_new_i32();
933 c
->u
.s32
.b
= tcg_const_i32(0);
934 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
936 case 0x8 | 0x4: /* cc < 2 */
938 c
->u
.s32
.b
= tcg_const_i32(2);
940 case 0x8: /* cc == 0 */
942 c
->u
.s32
.b
= tcg_const_i32(0);
944 case 0x4 | 0x2 | 0x1: /* cc != 0 */
946 c
->u
.s32
.b
= tcg_const_i32(0);
948 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
951 c
->u
.s32
.a
= tcg_temp_new_i32();
952 c
->u
.s32
.b
= tcg_const_i32(0);
953 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
955 case 0x4: /* cc == 1 */
957 c
->u
.s32
.b
= tcg_const_i32(1);
959 case 0x2 | 0x1: /* cc > 1 */
961 c
->u
.s32
.b
= tcg_const_i32(1);
963 case 0x2: /* cc == 2 */
965 c
->u
.s32
.b
= tcg_const_i32(2);
967 case 0x1: /* cc == 3 */
969 c
->u
.s32
.b
= tcg_const_i32(3);
972 /* CC is masked by something else: (8 >> cc) & mask. */
975 c
->u
.s32
.a
= tcg_const_i32(8);
976 c
->u
.s32
.b
= tcg_const_i32(0);
977 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
978 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
989 static void free_compare(DisasCompare
*c
)
993 tcg_temp_free_i64(c
->u
.s64
.a
);
995 tcg_temp_free_i32(c
->u
.s32
.a
);
1000 tcg_temp_free_i64(c
->u
.s64
.b
);
1002 tcg_temp_free_i32(c
->u
.s32
.b
);
1007 static void gen_op_mvc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1011 int l_memset
= gen_new_label();
1012 int l_out
= gen_new_label();
1013 TCGv_i64 dest
= tcg_temp_local_new_i64();
1014 TCGv_i64 src
= tcg_temp_local_new_i64();
1017 /* Find out if we should use the inline version of mvc */
1032 /* Fall back to helper */
1033 vl
= tcg_const_i32(l
);
1034 potential_page_fault(s
);
1035 gen_helper_mvc(cpu_env
, vl
, s1
, s2
);
1036 tcg_temp_free_i32(vl
);
1040 tcg_gen_mov_i64(dest
, s1
);
1041 tcg_gen_mov_i64(src
, s2
);
1043 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
1044 /* XXX what if we overflow while moving? */
1045 tcg_gen_andi_i64(dest
, dest
, 0x7fffffffUL
);
1046 tcg_gen_andi_i64(src
, src
, 0x7fffffffUL
);
1049 tmp
= tcg_temp_new_i64();
1050 tcg_gen_addi_i64(tmp
, src
, 1);
1051 tcg_gen_brcond_i64(TCG_COND_EQ
, dest
, tmp
, l_memset
);
1052 tcg_temp_free_i64(tmp
);
1056 tmp
= tcg_temp_new_i64();
1058 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1059 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1061 tcg_temp_free_i64(tmp
);
1064 tmp
= tcg_temp_new_i64();
1066 tcg_gen_qemu_ld16u(tmp
, src
, get_mem_index(s
));
1067 tcg_gen_qemu_st16(tmp
, dest
, get_mem_index(s
));
1069 tcg_temp_free_i64(tmp
);
1072 tmp
= tcg_temp_new_i64();
1074 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1075 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1077 tcg_temp_free_i64(tmp
);
1080 tmp
= tcg_temp_new_i64();
1081 tmp2
= tcg_temp_new_i64();
1083 tcg_gen_qemu_ld32u(tmp
, src
, get_mem_index(s
));
1084 tcg_gen_addi_i64(src
, src
, 4);
1085 tcg_gen_qemu_ld8u(tmp2
, src
, get_mem_index(s
));
1086 tcg_gen_qemu_st32(tmp
, dest
, get_mem_index(s
));
1087 tcg_gen_addi_i64(dest
, dest
, 4);
1088 tcg_gen_qemu_st8(tmp2
, dest
, get_mem_index(s
));
1090 tcg_temp_free_i64(tmp
);
1091 tcg_temp_free_i64(tmp2
);
1094 tmp
= tcg_temp_new_i64();
1096 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1097 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1099 tcg_temp_free_i64(tmp
);
1102 /* The inline version can become too big for too uneven numbers, only
1103 use it on known good lengths */
1104 tmp
= tcg_temp_new_i64();
1105 tmp2
= tcg_const_i64(8);
1106 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1107 tcg_gen_qemu_ld64(tmp
, src
, get_mem_index(s
));
1108 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1110 tcg_gen_add_i64(src
, src
, tmp2
);
1111 tcg_gen_add_i64(dest
, dest
, tmp2
);
1114 tcg_temp_free_i64(tmp2
);
1115 tmp2
= tcg_const_i64(1);
1117 for (; i
<= l
; i
++) {
1118 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1119 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1121 tcg_gen_add_i64(src
, src
, tmp2
);
1122 tcg_gen_add_i64(dest
, dest
, tmp2
);
1125 tcg_temp_free_i64(tmp2
);
1126 tcg_temp_free_i64(tmp
);
1132 gen_set_label(l_memset
);
1133 /* memset case (dest == (src + 1)) */
1135 tmp
= tcg_temp_new_i64();
1136 tmp2
= tcg_temp_new_i64();
1137 /* fill tmp with the byte */
1138 tcg_gen_qemu_ld8u(tmp
, src
, get_mem_index(s
));
1139 tcg_gen_shli_i64(tmp2
, tmp
, 8);
1140 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1141 tcg_gen_shli_i64(tmp2
, tmp
, 16);
1142 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1143 tcg_gen_shli_i64(tmp2
, tmp
, 32);
1144 tcg_gen_or_i64(tmp
, tmp
, tmp2
);
1145 tcg_temp_free_i64(tmp2
);
1147 tmp2
= tcg_const_i64(8);
1149 for (i
= 0; (i
+ 7) <= l
; i
+= 8) {
1150 tcg_gen_qemu_st64(tmp
, dest
, get_mem_index(s
));
1151 tcg_gen_addi_i64(dest
, dest
, 8);
1154 tcg_temp_free_i64(tmp2
);
1155 tmp2
= tcg_const_i64(1);
1157 for (; i
<= l
; i
++) {
1158 tcg_gen_qemu_st8(tmp
, dest
, get_mem_index(s
));
1159 tcg_gen_addi_i64(dest
, dest
, 1);
1162 tcg_temp_free_i64(tmp2
);
1163 tcg_temp_free_i64(tmp
);
1165 gen_set_label(l_out
);
1167 tcg_temp_free(dest
);
1171 static void gen_op_clc(DisasContext
*s
, int l
, TCGv_i64 s1
, TCGv_i64 s2
)
1177 /* check for simple 32bit or 64bit match */
1180 tmp
= tcg_temp_new_i64();
1181 tmp2
= tcg_temp_new_i64();
1183 tcg_gen_qemu_ld8u(tmp
, s1
, get_mem_index(s
));
1184 tcg_gen_qemu_ld8u(tmp2
, s2
, get_mem_index(s
));
1185 cmp_u64(s
, tmp
, tmp2
);
1187 tcg_temp_free_i64(tmp
);
1188 tcg_temp_free_i64(tmp2
);
1191 tmp
= tcg_temp_new_i64();
1192 tmp2
= tcg_temp_new_i64();
1194 tcg_gen_qemu_ld16u(tmp
, s1
, get_mem_index(s
));
1195 tcg_gen_qemu_ld16u(tmp2
, s2
, get_mem_index(s
));
1196 cmp_u64(s
, tmp
, tmp2
);
1198 tcg_temp_free_i64(tmp
);
1199 tcg_temp_free_i64(tmp2
);
1202 tmp
= tcg_temp_new_i64();
1203 tmp2
= tcg_temp_new_i64();
1205 tcg_gen_qemu_ld32u(tmp
, s1
, get_mem_index(s
));
1206 tcg_gen_qemu_ld32u(tmp2
, s2
, get_mem_index(s
));
1207 cmp_u64(s
, tmp
, tmp2
);
1209 tcg_temp_free_i64(tmp
);
1210 tcg_temp_free_i64(tmp2
);
1213 tmp
= tcg_temp_new_i64();
1214 tmp2
= tcg_temp_new_i64();
1216 tcg_gen_qemu_ld64(tmp
, s1
, get_mem_index(s
));
1217 tcg_gen_qemu_ld64(tmp2
, s2
, get_mem_index(s
));
1218 cmp_u64(s
, tmp
, tmp2
);
1220 tcg_temp_free_i64(tmp
);
1221 tcg_temp_free_i64(tmp2
);
1225 potential_page_fault(s
);
1226 vl
= tcg_const_i32(l
);
1227 gen_helper_clc(cc_op
, cpu_env
, vl
, s1
, s2
);
1228 tcg_temp_free_i32(vl
);
1232 static void disas_e3(CPUS390XState
*env
, DisasContext
* s
, int op
, int r1
,
1233 int x2
, int b2
, int d2
)
1235 TCGv_i64 addr
, tmp2
, tmp3
;
1238 LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
1239 op
, r1
, x2
, b2
, d2
);
1240 addr
= get_address(s
, x2
, b2
, d2
);
1242 case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
1243 tmp2
= tcg_temp_new_i64();
1244 tcg_gen_qemu_ld64(tmp2
, addr
, get_mem_index(s
));
1245 tcg_gen_bswap64_i64(tmp2
, tmp2
);
1246 store_reg(r1
, tmp2
);
1247 tcg_temp_free_i64(tmp2
);
1249 case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
1250 tmp2
= tcg_temp_new_i64();
1251 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1252 tcg_gen_andi_i64(tmp2
, tmp2
, 0x7fffffffULL
);
1253 store_reg(r1
, tmp2
);
1254 tcg_temp_free_i64(tmp2
);
1256 case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
1257 tmp2
= tcg_temp_new_i64();
1258 tmp32_1
= tcg_temp_new_i32();
1259 tcg_gen_qemu_ld32u(tmp2
, addr
, get_mem_index(s
));
1260 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1261 tcg_temp_free_i64(tmp2
);
1262 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1263 store_reg32(r1
, tmp32_1
);
1264 tcg_temp_free_i32(tmp32_1
);
1266 case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
1267 tmp2
= tcg_temp_new_i64();
1268 tmp32_1
= tcg_temp_new_i32();
1269 tcg_gen_qemu_ld16u(tmp2
, addr
, get_mem_index(s
));
1270 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
1271 tcg_temp_free_i64(tmp2
);
1272 tcg_gen_bswap16_i32(tmp32_1
, tmp32_1
);
1273 store_reg16(r1
, tmp32_1
);
1274 tcg_temp_free_i32(tmp32_1
);
1276 case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
1277 tmp32_1
= load_reg32(r1
);
1278 tmp2
= tcg_temp_new_i64();
1279 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
1280 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1281 tcg_temp_free_i32(tmp32_1
);
1282 tcg_gen_qemu_st32(tmp2
, addr
, get_mem_index(s
));
1283 tcg_temp_free_i64(tmp2
);
1285 case 0x73: /* ICY R1,D2(X2,B2) [RXY] */
1286 tmp3
= tcg_temp_new_i64();
1287 tcg_gen_qemu_ld8u(tmp3
, addr
, get_mem_index(s
));
1288 store_reg8(r1
, tmp3
);
1289 tcg_temp_free_i64(tmp3
);
1292 LOG_DISAS("illegal e3 operation 0x%x\n", op
);
1293 gen_illegal_opcode(s
);
1296 tcg_temp_free_i64(addr
);
1299 #ifndef CONFIG_USER_ONLY
1300 static void disas_e5(CPUS390XState
*env
, DisasContext
* s
, uint64_t insn
)
1303 int op
= (insn
>> 32) & 0xff;
1305 tmp
= get_address(s
, 0, (insn
>> 28) & 0xf, (insn
>> 16) & 0xfff);
1306 tmp2
= get_address(s
, 0, (insn
>> 12) & 0xf, insn
& 0xfff);
1308 LOG_DISAS("disas_e5: insn %" PRIx64
"\n", insn
);
1310 case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
1311 /* Test Protection */
1312 potential_page_fault(s
);
1313 gen_helper_tprot(cc_op
, tmp
, tmp2
);
1317 LOG_DISAS("illegal e5 operation 0x%x\n", op
);
1318 gen_illegal_opcode(s
);
1322 tcg_temp_free_i64(tmp
);
1323 tcg_temp_free_i64(tmp2
);
1327 static void disas_eb(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1328 int r3
, int b2
, int d2
)
1330 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
1331 TCGv_i32 tmp32_1
, tmp32_2
;
1334 LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
1335 op
, r1
, r3
, b2
, d2
);
1337 case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
1338 case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
1339 case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
1340 case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
1341 case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
1343 tmp
= get_address(s
, 0, b2
, d2
);
1344 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1346 tmp
= tcg_const_i64(d2
& 0x3f);
1350 tcg_gen_shr_i64(regs
[r1
], regs
[r3
], tmp
);
1353 tcg_gen_shl_i64(regs
[r1
], regs
[r3
], tmp
);
1356 tcg_gen_sar_i64(regs
[r1
], regs
[r3
], tmp
);
1359 tmp2
= tcg_temp_new_i64();
1360 tmp3
= tcg_temp_new_i64();
1361 gen_op_update2_cc_i64(s
, CC_OP_SLAG
, regs
[r3
], tmp
);
1362 tcg_gen_shl_i64(tmp2
, regs
[r3
], tmp
);
1363 /* override sign bit with source sign */
1364 tcg_gen_andi_i64(tmp2
, tmp2
, ~0x8000000000000000ULL
);
1365 tcg_gen_andi_i64(tmp3
, regs
[r3
], 0x8000000000000000ULL
);
1366 tcg_gen_or_i64(regs
[r1
], tmp2
, tmp3
);
1367 tcg_temp_free_i64(tmp2
);
1368 tcg_temp_free_i64(tmp3
);
1371 tcg_gen_rotl_i64(regs
[r1
], regs
[r3
], tmp
);
1378 set_cc_s64(s
, regs
[r1
]);
1380 tcg_temp_free_i64(tmp
);
1382 case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
1384 tmp
= get_address(s
, 0, b2
, d2
);
1385 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1387 tmp
= tcg_const_i64(d2
& 0x3f);
1389 tmp32_1
= tcg_temp_new_i32();
1390 tmp32_2
= load_reg32(r3
);
1391 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
1394 tcg_gen_rotl_i32(tmp32_1
, tmp32_2
, tmp32_1
);
1400 store_reg32(r1
, tmp32_1
);
1401 tcg_temp_free_i64(tmp
);
1402 tcg_temp_free_i32(tmp32_1
);
1403 tcg_temp_free_i32(tmp32_2
);
1405 case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
1406 case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
1409 case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
1410 case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
1413 /* Apparently, unrolling lmg/stmg of any size gains performance -
1414 even for very long ones... */
1415 tmp
= get_address(s
, 0, b2
, d2
);
1416 tmp3
= tcg_const_i64(stm_len
);
1417 tmp4
= tcg_const_i64(op
== 0x26 ? 32 : 4);
1418 for (i
= r1
;; i
= (i
+ 1) % 16) {
1421 tcg_gen_qemu_ld64(regs
[i
], tmp
, get_mem_index(s
));
1424 tmp2
= tcg_temp_new_i64();
1425 #if HOST_LONG_BITS == 32
1426 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1427 tcg_gen_trunc_i64_i32(TCGV_HIGH(regs
[i
]), tmp2
);
1429 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
1430 tcg_gen_shl_i64(tmp2
, tmp2
, tmp4
);
1431 tcg_gen_ext32u_i64(regs
[i
], regs
[i
]);
1432 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
1434 tcg_temp_free_i64(tmp2
);
1437 tcg_gen_qemu_st64(regs
[i
], tmp
, get_mem_index(s
));
1440 tmp2
= tcg_temp_new_i64();
1441 tcg_gen_shr_i64(tmp2
, regs
[i
], tmp4
);
1442 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1443 tcg_temp_free_i64(tmp2
);
1451 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
1453 tcg_temp_free_i64(tmp
);
1454 tcg_temp_free_i64(tmp3
);
1455 tcg_temp_free_i64(tmp4
);
1457 case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
1458 tmp
= get_address(s
, 0, b2
, d2
);
1459 tmp32_1
= tcg_const_i32(r1
);
1460 tmp32_2
= tcg_const_i32(r3
);
1461 potential_page_fault(s
);
1462 gen_helper_stcmh(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1463 tcg_temp_free_i64(tmp
);
1464 tcg_temp_free_i32(tmp32_1
);
1465 tcg_temp_free_i32(tmp32_2
);
1467 #ifndef CONFIG_USER_ONLY
1468 case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
1470 check_privileged(s
);
1471 tmp
= get_address(s
, 0, b2
, d2
);
1472 tmp32_1
= tcg_const_i32(r1
);
1473 tmp32_2
= tcg_const_i32(r3
);
1474 potential_page_fault(s
);
1475 gen_helper_lctlg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1476 tcg_temp_free_i64(tmp
);
1477 tcg_temp_free_i32(tmp32_1
);
1478 tcg_temp_free_i32(tmp32_2
);
1480 case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
1482 check_privileged(s
);
1483 tmp
= get_address(s
, 0, b2
, d2
);
1484 tmp32_1
= tcg_const_i32(r1
);
1485 tmp32_2
= tcg_const_i32(r3
);
1486 potential_page_fault(s
);
1487 gen_helper_stctg(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1488 tcg_temp_free_i64(tmp
);
1489 tcg_temp_free_i32(tmp32_1
);
1490 tcg_temp_free_i32(tmp32_2
);
1493 case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
1494 tmp
= get_address(s
, 0, b2
, d2
);
1495 tmp32_1
= tcg_const_i32(r1
);
1496 tmp32_2
= tcg_const_i32(r3
);
1497 potential_page_fault(s
);
1498 /* XXX rewrite in tcg */
1499 gen_helper_csg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1501 tcg_temp_free_i64(tmp
);
1502 tcg_temp_free_i32(tmp32_1
);
1503 tcg_temp_free_i32(tmp32_2
);
1505 case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
1506 tmp
= get_address(s
, 0, b2
, d2
);
1507 tmp32_1
= tcg_const_i32(r1
);
1508 tmp32_2
= tcg_const_i32(r3
);
1509 potential_page_fault(s
);
1510 /* XXX rewrite in tcg */
1511 gen_helper_cdsg(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1513 tcg_temp_free_i64(tmp
);
1514 tcg_temp_free_i32(tmp32_1
);
1515 tcg_temp_free_i32(tmp32_2
);
1517 case 0x51: /* TMY D1(B1),I2 [SIY] */
1518 tmp
= get_address(s
, 0, b2
, d2
); /* SIY -> this is the destination */
1519 tmp2
= tcg_const_i64((r1
<< 4) | r3
);
1520 tcg_gen_qemu_ld8u(tmp
, tmp
, get_mem_index(s
));
1521 /* yes, this is a 32 bit operation with 64 bit tcg registers, because
1522 that incurs less conversions */
1523 cmp_64(s
, tmp
, tmp2
, CC_OP_TM_32
);
1524 tcg_temp_free_i64(tmp
);
1525 tcg_temp_free_i64(tmp2
);
1527 case 0x52: /* MVIY D1(B1),I2 [SIY] */
1528 tmp
= get_address(s
, 0, b2
, d2
); /* SIY -> this is the destination */
1529 tmp2
= tcg_const_i64((r1
<< 4) | r3
);
1530 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
1531 tcg_temp_free_i64(tmp
);
1532 tcg_temp_free_i64(tmp2
);
1534 case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */
1535 tmp
= get_address(s
, 0, b2
, d2
);
1536 tmp32_1
= tcg_const_i32(r1
);
1537 tmp32_2
= tcg_const_i32(r3
);
1538 potential_page_fault(s
);
1539 /* XXX split CC calculation out */
1540 gen_helper_icmh(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
1542 tcg_temp_free_i64(tmp
);
1543 tcg_temp_free_i32(tmp32_1
);
1544 tcg_temp_free_i32(tmp32_2
);
1547 LOG_DISAS("illegal eb operation 0x%x\n", op
);
1548 gen_illegal_opcode(s
);
1553 static void disas_ed(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1554 int x2
, int b2
, int d2
, int r1b
)
1556 TCGv_i32 tmp_r1
, tmp32
;
1558 addr
= get_address(s
, x2
, b2
, d2
);
1559 tmp_r1
= tcg_const_i32(r1
);
1561 case 0x4: /* LDEB R1,D2(X2,B2) [RXE] */
1562 potential_page_fault(s
);
1563 gen_helper_ldeb(cpu_env
, tmp_r1
, addr
);
1565 case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
1566 potential_page_fault(s
);
1567 gen_helper_lxdb(cpu_env
, tmp_r1
, addr
);
1569 case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
1570 tmp
= tcg_temp_new_i64();
1571 tmp32
= load_freg32(r1
);
1572 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1573 set_cc_cmp_f32_i64(s
, tmp32
, tmp
);
1574 tcg_temp_free_i64(tmp
);
1575 tcg_temp_free_i32(tmp32
);
1577 case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
1578 tmp
= tcg_temp_new_i64();
1579 tmp32
= tcg_temp_new_i32();
1580 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1581 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1582 gen_helper_aeb(cpu_env
, tmp_r1
, tmp32
);
1583 tcg_temp_free_i64(tmp
);
1584 tcg_temp_free_i32(tmp32
);
1586 tmp32
= load_freg32(r1
);
1587 gen_set_cc_nz_f32(s
, tmp32
);
1588 tcg_temp_free_i32(tmp32
);
1590 case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
1591 tmp
= tcg_temp_new_i64();
1592 tmp32
= tcg_temp_new_i32();
1593 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1594 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1595 gen_helper_seb(cpu_env
, tmp_r1
, tmp32
);
1596 tcg_temp_free_i64(tmp
);
1597 tcg_temp_free_i32(tmp32
);
1599 tmp32
= load_freg32(r1
);
1600 gen_set_cc_nz_f32(s
, tmp32
);
1601 tcg_temp_free_i32(tmp32
);
1603 case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
1604 tmp
= tcg_temp_new_i64();
1605 tmp32
= tcg_temp_new_i32();
1606 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1607 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1608 gen_helper_deb(cpu_env
, tmp_r1
, tmp32
);
1609 tcg_temp_free_i64(tmp
);
1610 tcg_temp_free_i32(tmp32
);
1612 case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
1613 potential_page_fault(s
);
1614 gen_helper_tceb(cc_op
, cpu_env
, tmp_r1
, addr
);
1617 case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
1618 potential_page_fault(s
);
1619 gen_helper_tcdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1622 case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
1623 potential_page_fault(s
);
1624 gen_helper_tcxb(cc_op
, cpu_env
, tmp_r1
, addr
);
1627 case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
1628 tmp
= tcg_temp_new_i64();
1629 tmp32
= tcg_temp_new_i32();
1630 tcg_gen_qemu_ld32u(tmp
, addr
, get_mem_index(s
));
1631 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
1632 gen_helper_meeb(cpu_env
, tmp_r1
, tmp32
);
1633 tcg_temp_free_i64(tmp
);
1634 tcg_temp_free_i32(tmp32
);
1636 case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
1637 potential_page_fault(s
);
1638 gen_helper_cdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1641 case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
1642 potential_page_fault(s
);
1643 gen_helper_adb(cc_op
, cpu_env
, tmp_r1
, addr
);
1646 case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
1647 potential_page_fault(s
);
1648 gen_helper_sdb(cc_op
, cpu_env
, tmp_r1
, addr
);
1651 case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
1652 potential_page_fault(s
);
1653 gen_helper_mdb(cpu_env
, tmp_r1
, addr
);
1655 case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
1656 potential_page_fault(s
);
1657 gen_helper_ddb(cpu_env
, tmp_r1
, addr
);
1659 case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
1660 /* for RXF insns, r1 is R3 and r1b is R1 */
1661 tmp32
= tcg_const_i32(r1b
);
1662 potential_page_fault(s
);
1663 gen_helper_madb(cpu_env
, tmp32
, addr
, tmp_r1
);
1664 tcg_temp_free_i32(tmp32
);
1667 LOG_DISAS("illegal ed operation 0x%x\n", op
);
1668 gen_illegal_opcode(s
);
1671 tcg_temp_free_i32(tmp_r1
);
1672 tcg_temp_free_i64(addr
);
1675 static void disas_a7(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
1680 LOG_DISAS("disas_a7: op 0x%x r1 %d i2 0x%x\n", op
, r1
, i2
);
1682 case 0x0: /* TMLH or TMH R1,I2 [RI] */
1683 case 0x1: /* TMLL or TML R1,I2 [RI] */
1684 case 0x2: /* TMHH R1,I2 [RI] */
1685 case 0x3: /* TMHL R1,I2 [RI] */
1687 tmp2
= tcg_const_i64((uint16_t)i2
);
1690 tcg_gen_shri_i64(tmp
, tmp
, 16);
1695 tcg_gen_shri_i64(tmp
, tmp
, 48);
1698 tcg_gen_shri_i64(tmp
, tmp
, 32);
1701 tcg_gen_andi_i64(tmp
, tmp
, 0xffff);
1702 cmp_64(s
, tmp
, tmp2
, CC_OP_TM_64
);
1703 tcg_temp_free_i64(tmp
);
1704 tcg_temp_free_i64(tmp2
);
1707 LOG_DISAS("illegal a7 operation 0x%x\n", op
);
1708 gen_illegal_opcode(s
);
1713 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1716 TCGv_i64 tmp
, tmp2
, tmp3
;
1717 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
1719 #ifndef CONFIG_USER_ONLY
1723 r1
= (insn
>> 4) & 0xf;
1726 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1729 case 0x22: /* IPM R1 [RRE] */
1730 tmp32_1
= tcg_const_i32(r1
);
1732 gen_helper_ipm(cpu_env
, cc_op
, tmp32_1
);
1733 tcg_temp_free_i32(tmp32_1
);
1735 case 0x41: /* CKSM R1,R2 [RRE] */
1736 tmp32_1
= tcg_const_i32(r1
);
1737 tmp32_2
= tcg_const_i32(r2
);
1738 potential_page_fault(s
);
1739 gen_helper_cksm(cpu_env
, tmp32_1
, tmp32_2
);
1740 tcg_temp_free_i32(tmp32_1
);
1741 tcg_temp_free_i32(tmp32_2
);
1742 gen_op_movi_cc(s
, 0);
1744 case 0x4e: /* SAR R1,R2 [RRE] */
1745 tmp32_1
= load_reg32(r2
);
1746 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
1747 tcg_temp_free_i32(tmp32_1
);
1749 case 0x4f: /* EAR R1,R2 [RRE] */
1750 tmp32_1
= tcg_temp_new_i32();
1751 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1752 store_reg32(r1
, tmp32_1
);
1753 tcg_temp_free_i32(tmp32_1
);
1755 case 0x54: /* MVPG R1,R2 [RRE] */
1757 tmp2
= load_reg(r1
);
1758 tmp3
= load_reg(r2
);
1759 potential_page_fault(s
);
1760 gen_helper_mvpg(cpu_env
, tmp
, tmp2
, tmp3
);
1761 tcg_temp_free_i64(tmp
);
1762 tcg_temp_free_i64(tmp2
);
1763 tcg_temp_free_i64(tmp3
);
1764 /* XXX check CCO bit and set CC accordingly */
1765 gen_op_movi_cc(s
, 0);
1767 case 0x55: /* MVST R1,R2 [RRE] */
1768 tmp32_1
= load_reg32(0);
1769 tmp32_2
= tcg_const_i32(r1
);
1770 tmp32_3
= tcg_const_i32(r2
);
1771 potential_page_fault(s
);
1772 gen_helper_mvst(cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1773 tcg_temp_free_i32(tmp32_1
);
1774 tcg_temp_free_i32(tmp32_2
);
1775 tcg_temp_free_i32(tmp32_3
);
1776 gen_op_movi_cc(s
, 1);
1778 case 0x5d: /* CLST R1,R2 [RRE] */
1779 tmp32_1
= load_reg32(0);
1780 tmp32_2
= tcg_const_i32(r1
);
1781 tmp32_3
= tcg_const_i32(r2
);
1782 potential_page_fault(s
);
1783 gen_helper_clst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1785 tcg_temp_free_i32(tmp32_1
);
1786 tcg_temp_free_i32(tmp32_2
);
1787 tcg_temp_free_i32(tmp32_3
);
1789 case 0x5e: /* SRST R1,R2 [RRE] */
1790 tmp32_1
= load_reg32(0);
1791 tmp32_2
= tcg_const_i32(r1
);
1792 tmp32_3
= tcg_const_i32(r2
);
1793 potential_page_fault(s
);
1794 gen_helper_srst(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
1796 tcg_temp_free_i32(tmp32_1
);
1797 tcg_temp_free_i32(tmp32_2
);
1798 tcg_temp_free_i32(tmp32_3
);
1801 #ifndef CONFIG_USER_ONLY
1802 case 0x02: /* STIDP D2(B2) [S] */
1804 check_privileged(s
);
1805 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1806 tmp
= get_address(s
, 0, b2
, d2
);
1807 potential_page_fault(s
);
1808 gen_helper_stidp(cpu_env
, tmp
);
1809 tcg_temp_free_i64(tmp
);
1811 case 0x04: /* SCK D2(B2) [S] */
1813 check_privileged(s
);
1814 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1815 tmp
= get_address(s
, 0, b2
, d2
);
1816 potential_page_fault(s
);
1817 gen_helper_sck(cc_op
, tmp
);
1819 tcg_temp_free_i64(tmp
);
1821 case 0x05: /* STCK D2(B2) [S] */
1823 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1824 tmp
= get_address(s
, 0, b2
, d2
);
1825 potential_page_fault(s
);
1826 gen_helper_stck(cc_op
, cpu_env
, tmp
);
1828 tcg_temp_free_i64(tmp
);
1830 case 0x06: /* SCKC D2(B2) [S] */
1831 /* Set Clock Comparator */
1832 check_privileged(s
);
1833 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1834 tmp
= get_address(s
, 0, b2
, d2
);
1835 potential_page_fault(s
);
1836 gen_helper_sckc(cpu_env
, tmp
);
1837 tcg_temp_free_i64(tmp
);
1839 case 0x07: /* STCKC D2(B2) [S] */
1840 /* Store Clock Comparator */
1841 check_privileged(s
);
1842 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1843 tmp
= get_address(s
, 0, b2
, d2
);
1844 potential_page_fault(s
);
1845 gen_helper_stckc(cpu_env
, tmp
);
1846 tcg_temp_free_i64(tmp
);
1848 case 0x08: /* SPT D2(B2) [S] */
1850 check_privileged(s
);
1851 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1852 tmp
= get_address(s
, 0, b2
, d2
);
1853 potential_page_fault(s
);
1854 gen_helper_spt(cpu_env
, tmp
);
1855 tcg_temp_free_i64(tmp
);
1857 case 0x09: /* STPT D2(B2) [S] */
1858 /* Store CPU Timer */
1859 check_privileged(s
);
1860 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1861 tmp
= get_address(s
, 0, b2
, d2
);
1862 potential_page_fault(s
);
1863 gen_helper_stpt(cpu_env
, tmp
);
1864 tcg_temp_free_i64(tmp
);
1866 case 0x0a: /* SPKA D2(B2) [S] */
1867 /* Set PSW Key from Address */
1868 check_privileged(s
);
1869 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1870 tmp
= get_address(s
, 0, b2
, d2
);
1871 tmp2
= tcg_temp_new_i64();
1872 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1873 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1874 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1875 tcg_temp_free_i64(tmp2
);
1876 tcg_temp_free_i64(tmp
);
1878 case 0x0d: /* PTLB [S] */
1880 check_privileged(s
);
1881 gen_helper_ptlb(cpu_env
);
1883 case 0x10: /* SPX D2(B2) [S] */
1884 /* Set Prefix Register */
1885 check_privileged(s
);
1886 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1887 tmp
= get_address(s
, 0, b2
, d2
);
1888 potential_page_fault(s
);
1889 gen_helper_spx(cpu_env
, tmp
);
1890 tcg_temp_free_i64(tmp
);
1892 case 0x11: /* STPX D2(B2) [S] */
1894 check_privileged(s
);
1895 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1896 tmp
= get_address(s
, 0, b2
, d2
);
1897 tmp2
= tcg_temp_new_i64();
1898 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1899 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1900 tcg_temp_free_i64(tmp
);
1901 tcg_temp_free_i64(tmp2
);
1903 case 0x12: /* STAP D2(B2) [S] */
1904 /* Store CPU Address */
1905 check_privileged(s
);
1906 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1907 tmp
= get_address(s
, 0, b2
, d2
);
1908 tmp2
= tcg_temp_new_i64();
1909 tmp32_1
= tcg_temp_new_i32();
1910 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1911 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1912 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1913 tcg_temp_free_i64(tmp
);
1914 tcg_temp_free_i64(tmp2
);
1915 tcg_temp_free_i32(tmp32_1
);
1917 case 0x21: /* IPTE R1,R2 [RRE] */
1918 /* Invalidate PTE */
1919 check_privileged(s
);
1920 r1
= (insn
>> 4) & 0xf;
1923 tmp2
= load_reg(r2
);
1924 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1925 tcg_temp_free_i64(tmp
);
1926 tcg_temp_free_i64(tmp2
);
1928 case 0x29: /* ISKE R1,R2 [RRE] */
1929 /* Insert Storage Key Extended */
1930 check_privileged(s
);
1931 r1
= (insn
>> 4) & 0xf;
1934 tmp2
= tcg_temp_new_i64();
1935 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1936 store_reg(r1
, tmp2
);
1937 tcg_temp_free_i64(tmp
);
1938 tcg_temp_free_i64(tmp2
);
1940 case 0x2a: /* RRBE R1,R2 [RRE] */
1941 /* Set Storage Key Extended */
1942 check_privileged(s
);
1943 r1
= (insn
>> 4) & 0xf;
1945 tmp32_1
= load_reg32(r1
);
1947 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1949 tcg_temp_free_i32(tmp32_1
);
1950 tcg_temp_free_i64(tmp
);
1952 case 0x2b: /* SSKE R1,R2 [RRE] */
1953 /* Set Storage Key Extended */
1954 check_privileged(s
);
1955 r1
= (insn
>> 4) & 0xf;
1957 tmp32_1
= load_reg32(r1
);
1959 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1960 tcg_temp_free_i32(tmp32_1
);
1961 tcg_temp_free_i64(tmp
);
1963 case 0x34: /* STCH ? */
1964 /* Store Subchannel */
1965 check_privileged(s
);
1966 gen_op_movi_cc(s
, 3);
1968 case 0x46: /* STURA R1,R2 [RRE] */
1969 /* Store Using Real Address */
1970 check_privileged(s
);
1971 r1
= (insn
>> 4) & 0xf;
1973 tmp32_1
= load_reg32(r1
);
1975 potential_page_fault(s
);
1976 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1977 tcg_temp_free_i32(tmp32_1
);
1978 tcg_temp_free_i64(tmp
);
1980 case 0x50: /* CSP R1,R2 [RRE] */
1981 /* Compare And Swap And Purge */
1982 check_privileged(s
);
1983 r1
= (insn
>> 4) & 0xf;
1985 tmp32_1
= tcg_const_i32(r1
);
1986 tmp32_2
= tcg_const_i32(r2
);
1987 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1989 tcg_temp_free_i32(tmp32_1
);
1990 tcg_temp_free_i32(tmp32_2
);
1992 case 0x5f: /* CHSC ? */
1993 /* Channel Subsystem Call */
1994 check_privileged(s
);
1995 gen_op_movi_cc(s
, 3);
1997 case 0x78: /* STCKE D2(B2) [S] */
1998 /* Store Clock Extended */
1999 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2000 tmp
= get_address(s
, 0, b2
, d2
);
2001 potential_page_fault(s
);
2002 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
2004 tcg_temp_free_i64(tmp
);
2006 case 0x79: /* SACF D2(B2) [S] */
2007 /* Set Address Space Control Fast */
2008 check_privileged(s
);
2009 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2010 tmp
= get_address(s
, 0, b2
, d2
);
2011 potential_page_fault(s
);
2012 gen_helper_sacf(cpu_env
, tmp
);
2013 tcg_temp_free_i64(tmp
);
2014 /* addressing mode has changed, so end the block */
2017 s
->is_jmp
= DISAS_JUMP
;
2019 case 0x7d: /* STSI D2,(B2) [S] */
2020 check_privileged(s
);
2021 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2022 tmp
= get_address(s
, 0, b2
, d2
);
2023 tmp32_1
= load_reg32(0);
2024 tmp32_2
= load_reg32(1);
2025 potential_page_fault(s
);
2026 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
2028 tcg_temp_free_i64(tmp
);
2029 tcg_temp_free_i32(tmp32_1
);
2030 tcg_temp_free_i32(tmp32_2
);
2032 case 0x9d: /* LFPC D2(B2) [S] */
2033 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2034 tmp
= get_address(s
, 0, b2
, d2
);
2035 tmp2
= tcg_temp_new_i64();
2036 tmp32_1
= tcg_temp_new_i32();
2037 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2038 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
2039 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2040 tcg_temp_free_i64(tmp
);
2041 tcg_temp_free_i64(tmp2
);
2042 tcg_temp_free_i32(tmp32_1
);
2044 case 0xb1: /* STFL D2(B2) [S] */
2045 /* Store Facility List (CPU features) at 200 */
2046 check_privileged(s
);
2047 tmp2
= tcg_const_i64(0xc0000000);
2048 tmp
= tcg_const_i64(200);
2049 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
2050 tcg_temp_free_i64(tmp2
);
2051 tcg_temp_free_i64(tmp
);
2053 case 0xb2: /* LPSWE D2(B2) [S] */
2054 /* Load PSW Extended */
2055 check_privileged(s
);
2056 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2057 tmp
= get_address(s
, 0, b2
, d2
);
2058 tmp2
= tcg_temp_new_i64();
2059 tmp3
= tcg_temp_new_i64();
2060 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
2061 tcg_gen_addi_i64(tmp
, tmp
, 8);
2062 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
2063 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2064 /* we need to keep cc_op intact */
2065 s
->is_jmp
= DISAS_JUMP
;
2066 tcg_temp_free_i64(tmp
);
2067 tcg_temp_free_i64(tmp2
);
2068 tcg_temp_free_i64(tmp3
);
2070 case 0x20: /* SERVC R1,R2 [RRE] */
2071 /* SCLP Service call (PV hypercall) */
2072 check_privileged(s
);
2073 potential_page_fault(s
);
2074 tmp32_1
= load_reg32(r2
);
2076 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
2078 tcg_temp_free_i32(tmp32_1
);
2079 tcg_temp_free_i64(tmp
);
2083 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
2084 gen_illegal_opcode(s
);
2089 static void disas_b3(CPUS390XState
*env
, DisasContext
*s
, int op
, int m3
,
2093 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
;
2094 LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op
, m3
, r1
, r2
);
2095 #define FP_HELPER(i) \
2096 tmp32_1 = tcg_const_i32(r1); \
2097 tmp32_2 = tcg_const_i32(r2); \
2098 gen_helper_ ## i(cpu_env, tmp32_1, tmp32_2); \
2099 tcg_temp_free_i32(tmp32_1); \
2100 tcg_temp_free_i32(tmp32_2);
2102 #define FP_HELPER_CC(i) \
2103 tmp32_1 = tcg_const_i32(r1); \
2104 tmp32_2 = tcg_const_i32(r2); \
2105 gen_helper_ ## i(cc_op, cpu_env, tmp32_1, tmp32_2); \
2107 tcg_temp_free_i32(tmp32_1); \
2108 tcg_temp_free_i32(tmp32_2);
2111 case 0x0: /* LPEBR R1,R2 [RRE] */
2112 FP_HELPER_CC(lpebr
);
2114 case 0x2: /* LTEBR R1,R2 [RRE] */
2115 FP_HELPER_CC(ltebr
);
2117 case 0x3: /* LCEBR R1,R2 [RRE] */
2118 FP_HELPER_CC(lcebr
);
2120 case 0x4: /* LDEBR R1,R2 [RRE] */
2123 case 0x5: /* LXDBR R1,R2 [RRE] */
2126 case 0x9: /* CEBR R1,R2 [RRE] */
2129 case 0xa: /* AEBR R1,R2 [RRE] */
2132 case 0xb: /* SEBR R1,R2 [RRE] */
2135 case 0xd: /* DEBR R1,R2 [RRE] */
2138 case 0x10: /* LPDBR R1,R2 [RRE] */
2139 FP_HELPER_CC(lpdbr
);
2141 case 0x12: /* LTDBR R1,R2 [RRE] */
2142 FP_HELPER_CC(ltdbr
);
2144 case 0x13: /* LCDBR R1,R2 [RRE] */
2145 FP_HELPER_CC(lcdbr
);
2147 case 0x15: /* SQBDR R1,R2 [RRE] */
2150 case 0x17: /* MEEBR R1,R2 [RRE] */
2153 case 0x19: /* CDBR R1,R2 [RRE] */
2156 case 0x1a: /* ADBR R1,R2 [RRE] */
2159 case 0x1b: /* SDBR R1,R2 [RRE] */
2162 case 0x1c: /* MDBR R1,R2 [RRE] */
2165 case 0x1d: /* DDBR R1,R2 [RRE] */
2168 case 0xe: /* MAEBR R1,R3,R2 [RRF] */
2169 case 0x1e: /* MADBR R1,R3,R2 [RRF] */
2170 case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
2171 /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
2172 tmp32_1
= tcg_const_i32(m3
);
2173 tmp32_2
= tcg_const_i32(r2
);
2174 tmp32_3
= tcg_const_i32(r1
);
2177 gen_helper_maebr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2180 gen_helper_madbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2183 gen_helper_msdbr(cpu_env
, tmp32_1
, tmp32_3
, tmp32_2
);
2188 tcg_temp_free_i32(tmp32_1
);
2189 tcg_temp_free_i32(tmp32_2
);
2190 tcg_temp_free_i32(tmp32_3
);
2192 case 0x40: /* LPXBR R1,R2 [RRE] */
2193 FP_HELPER_CC(lpxbr
);
2195 case 0x42: /* LTXBR R1,R2 [RRE] */
2196 FP_HELPER_CC(ltxbr
);
2198 case 0x43: /* LCXBR R1,R2 [RRE] */
2199 FP_HELPER_CC(lcxbr
);
2201 case 0x44: /* LEDBR R1,R2 [RRE] */
2204 case 0x45: /* LDXBR R1,R2 [RRE] */
2207 case 0x46: /* LEXBR R1,R2 [RRE] */
2210 case 0x49: /* CXBR R1,R2 [RRE] */
2213 case 0x4a: /* AXBR R1,R2 [RRE] */
2216 case 0x4b: /* SXBR R1,R2 [RRE] */
2219 case 0x4c: /* MXBR R1,R2 [RRE] */
2222 case 0x4d: /* DXBR R1,R2 [RRE] */
2225 case 0x65: /* LXR R1,R2 [RRE] */
2226 tmp
= load_freg(r2
);
2227 store_freg(r1
, tmp
);
2228 tcg_temp_free_i64(tmp
);
2229 tmp
= load_freg(r2
+ 2);
2230 store_freg(r1
+ 2, tmp
);
2231 tcg_temp_free_i64(tmp
);
2233 case 0x74: /* LZER R1 [RRE] */
2234 tmp32_1
= tcg_const_i32(r1
);
2235 gen_helper_lzer(cpu_env
, tmp32_1
);
2236 tcg_temp_free_i32(tmp32_1
);
2238 case 0x75: /* LZDR R1 [RRE] */
2239 tmp32_1
= tcg_const_i32(r1
);
2240 gen_helper_lzdr(cpu_env
, tmp32_1
);
2241 tcg_temp_free_i32(tmp32_1
);
2243 case 0x76: /* LZXR R1 [RRE] */
2244 tmp32_1
= tcg_const_i32(r1
);
2245 gen_helper_lzxr(cpu_env
, tmp32_1
);
2246 tcg_temp_free_i32(tmp32_1
);
2248 case 0x84: /* SFPC R1 [RRE] */
2249 tmp32_1
= load_reg32(r1
);
2250 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2251 tcg_temp_free_i32(tmp32_1
);
2253 case 0x8c: /* EFPC R1 [RRE] */
2254 tmp32_1
= tcg_temp_new_i32();
2255 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2256 store_reg32(r1
, tmp32_1
);
2257 tcg_temp_free_i32(tmp32_1
);
2259 case 0x94: /* CEFBR R1,R2 [RRE] */
2260 case 0x95: /* CDFBR R1,R2 [RRE] */
2261 case 0x96: /* CXFBR R1,R2 [RRE] */
2262 tmp32_1
= tcg_const_i32(r1
);
2263 tmp32_2
= load_reg32(r2
);
2266 gen_helper_cefbr(cpu_env
, tmp32_1
, tmp32_2
);
2269 gen_helper_cdfbr(cpu_env
, tmp32_1
, tmp32_2
);
2272 gen_helper_cxfbr(cpu_env
, tmp32_1
, tmp32_2
);
2277 tcg_temp_free_i32(tmp32_1
);
2278 tcg_temp_free_i32(tmp32_2
);
2280 case 0x98: /* CFEBR R1,R2 [RRE] */
2281 case 0x99: /* CFDBR R1,R2 [RRE] */
2282 case 0x9a: /* CFXBR R1,R2 [RRE] */
2283 tmp32_1
= tcg_const_i32(r1
);
2284 tmp32_2
= tcg_const_i32(r2
);
2285 tmp32_3
= tcg_const_i32(m3
);
2288 gen_helper_cfebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2291 gen_helper_cfdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2294 gen_helper_cfxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2300 tcg_temp_free_i32(tmp32_1
);
2301 tcg_temp_free_i32(tmp32_2
);
2302 tcg_temp_free_i32(tmp32_3
);
2304 case 0xa4: /* CEGBR R1,R2 [RRE] */
2305 case 0xa5: /* CDGBR R1,R2 [RRE] */
2306 tmp32_1
= tcg_const_i32(r1
);
2310 gen_helper_cegbr(cpu_env
, tmp32_1
, tmp
);
2313 gen_helper_cdgbr(cpu_env
, tmp32_1
, tmp
);
2318 tcg_temp_free_i32(tmp32_1
);
2319 tcg_temp_free_i64(tmp
);
2321 case 0xa6: /* CXGBR R1,R2 [RRE] */
2322 tmp32_1
= tcg_const_i32(r1
);
2324 gen_helper_cxgbr(cpu_env
, tmp32_1
, tmp
);
2325 tcg_temp_free_i32(tmp32_1
);
2326 tcg_temp_free_i64(tmp
);
2328 case 0xa8: /* CGEBR R1,R2 [RRE] */
2329 tmp32_1
= tcg_const_i32(r1
);
2330 tmp32_2
= tcg_const_i32(r2
);
2331 tmp32_3
= tcg_const_i32(m3
);
2332 gen_helper_cgebr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2334 tcg_temp_free_i32(tmp32_1
);
2335 tcg_temp_free_i32(tmp32_2
);
2336 tcg_temp_free_i32(tmp32_3
);
2338 case 0xa9: /* CGDBR R1,R2 [RRE] */
2339 tmp32_1
= tcg_const_i32(r1
);
2340 tmp32_2
= tcg_const_i32(r2
);
2341 tmp32_3
= tcg_const_i32(m3
);
2342 gen_helper_cgdbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2344 tcg_temp_free_i32(tmp32_1
);
2345 tcg_temp_free_i32(tmp32_2
);
2346 tcg_temp_free_i32(tmp32_3
);
2348 case 0xaa: /* CGXBR R1,R2 [RRE] */
2349 tmp32_1
= tcg_const_i32(r1
);
2350 tmp32_2
= tcg_const_i32(r2
);
2351 tmp32_3
= tcg_const_i32(m3
);
2352 gen_helper_cgxbr(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp32_3
);
2354 tcg_temp_free_i32(tmp32_1
);
2355 tcg_temp_free_i32(tmp32_2
);
2356 tcg_temp_free_i32(tmp32_3
);
2359 LOG_DISAS("illegal b3 operation 0x%x\n", op
);
2360 gen_illegal_opcode(s
);
2368 static void disas_b9(CPUS390XState
*env
, DisasContext
*s
, int op
, int r1
,
2374 LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
2376 case 0x17: /* LLGTR R1,R2 [RRE] */
2377 tmp32_1
= load_reg32(r2
);
2378 tmp
= tcg_temp_new_i64();
2379 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, 0x7fffffffUL
);
2380 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2382 tcg_temp_free_i32(tmp32_1
);
2383 tcg_temp_free_i64(tmp
);
2385 case 0x0f: /* LRVGR R1,R2 [RRE] */
2386 tcg_gen_bswap64_i64(regs
[r1
], regs
[r2
]);
2388 case 0x1f: /* LRVR R1,R2 [RRE] */
2389 tmp32_1
= load_reg32(r2
);
2390 tcg_gen_bswap32_i32(tmp32_1
, tmp32_1
);
2391 store_reg32(r1
, tmp32_1
);
2392 tcg_temp_free_i32(tmp32_1
);
2394 case 0x83: /* FLOGR R1,R2 [RRE] */
2396 tmp32_1
= tcg_const_i32(r1
);
2397 gen_helper_flogr(cc_op
, cpu_env
, tmp32_1
, tmp
);
2399 tcg_temp_free_i64(tmp
);
2400 tcg_temp_free_i32(tmp32_1
);
2403 LOG_DISAS("illegal b9 operation 0x%x\n", op
);
2404 gen_illegal_opcode(s
);
2409 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
2411 TCGv_i64 tmp
, tmp2
, tmp3
, tmp4
;
2412 TCGv_i32 tmp32_1
, tmp32_2
, tmp32_3
, tmp32_4
;
2415 int op
, r1
, r2
, r3
, d1
, d2
, x2
, b1
, b2
, i
, i2
, r1b
;
2418 opc
= cpu_ldub_code(env
, s
->pc
);
2419 LOG_DISAS("opc 0x%x\n", opc
);
2422 #ifndef CONFIG_USER_ONLY
2423 case 0x01: /* SAM */
2424 insn
= ld_code2(env
, s
->pc
);
2425 /* set addressing mode, but we only do 64bit anyways */
2428 case 0xa: /* SVC I [RR] */
2429 insn
= ld_code2(env
, s
->pc
);
2434 tmp32_1
= tcg_const_i32(i
);
2435 tmp32_2
= tcg_const_i32(s
->next_pc
- s
->pc
);
2436 tcg_gen_st_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
2437 tcg_gen_st_i32(tmp32_2
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
2438 gen_exception(EXCP_SVC
);
2439 s
->is_jmp
= DISAS_EXCP
;
2440 tcg_temp_free_i32(tmp32_1
);
2441 tcg_temp_free_i32(tmp32_2
);
2443 case 0xe: /* MVCL R1,R2 [RR] */
2444 insn
= ld_code2(env
, s
->pc
);
2445 decode_rr(s
, insn
, &r1
, &r2
);
2446 tmp32_1
= tcg_const_i32(r1
);
2447 tmp32_2
= tcg_const_i32(r2
);
2448 potential_page_fault(s
);
2449 gen_helper_mvcl(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
2451 tcg_temp_free_i32(tmp32_1
);
2452 tcg_temp_free_i32(tmp32_2
);
2454 case 0x28: /* LDR R1,R2 [RR] */
2455 insn
= ld_code2(env
, s
->pc
);
2456 decode_rr(s
, insn
, &r1
, &r2
);
2457 tmp
= load_freg(r2
);
2458 store_freg(r1
, tmp
);
2459 tcg_temp_free_i64(tmp
);
2461 case 0x38: /* LER R1,R2 [RR] */
2462 insn
= ld_code2(env
, s
->pc
);
2463 decode_rr(s
, insn
, &r1
, &r2
);
2464 tmp32_1
= load_freg32(r2
);
2465 store_freg32(r1
, tmp32_1
);
2466 tcg_temp_free_i32(tmp32_1
);
2468 case 0x43: /* IC R1,D2(X2,B2) [RX] */
2469 insn
= ld_code4(env
, s
->pc
);
2470 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2471 tmp2
= tcg_temp_new_i64();
2472 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2473 store_reg8(r1
, tmp2
);
2474 tcg_temp_free_i64(tmp
);
2475 tcg_temp_free_i64(tmp2
);
2477 case 0x44: /* EX R1,D2(X2,B2) [RX] */
2478 insn
= ld_code4(env
, s
->pc
);
2479 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2480 tmp2
= load_reg(r1
);
2481 tmp3
= tcg_const_i64(s
->pc
+ 4);
2484 gen_helper_ex(cc_op
, cpu_env
, cc_op
, tmp2
, tmp
, tmp3
);
2486 tcg_temp_free_i64(tmp
);
2487 tcg_temp_free_i64(tmp2
);
2488 tcg_temp_free_i64(tmp3
);
2490 case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
2491 insn
= ld_code4(env
, s
->pc
);
2492 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2493 tmp2
= tcg_temp_new_i64();
2494 tmp32_1
= tcg_temp_new_i32();
2495 tcg_gen_trunc_i64_i32(tmp32_1
, regs
[r1
]);
2496 gen_helper_cvd(tmp2
, tmp32_1
);
2497 tcg_gen_qemu_st64(tmp2
, tmp
, get_mem_index(s
));
2498 tcg_temp_free_i64(tmp
);
2499 tcg_temp_free_i64(tmp2
);
2500 tcg_temp_free_i32(tmp32_1
);
2502 case 0x60: /* STD R1,D2(X2,B2) [RX] */
2503 insn
= ld_code4(env
, s
->pc
);
2504 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2505 tmp2
= load_freg(r1
);
2506 tcg_gen_qemu_st64(tmp2
, tmp
, get_mem_index(s
));
2507 tcg_temp_free_i64(tmp
);
2508 tcg_temp_free_i64(tmp2
);
2510 case 0x68: /* LD R1,D2(X2,B2) [RX] */
2511 insn
= ld_code4(env
, s
->pc
);
2512 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2513 tmp2
= tcg_temp_new_i64();
2514 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
2515 store_freg(r1
, tmp2
);
2516 tcg_temp_free_i64(tmp
);
2517 tcg_temp_free_i64(tmp2
);
2519 case 0x70: /* STE R1,D2(X2,B2) [RX] */
2520 insn
= ld_code4(env
, s
->pc
);
2521 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2522 tmp2
= tcg_temp_new_i64();
2523 tmp32_1
= load_freg32(r1
);
2524 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
2525 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
2526 tcg_temp_free_i64(tmp
);
2527 tcg_temp_free_i64(tmp2
);
2528 tcg_temp_free_i32(tmp32_1
);
2530 case 0x78: /* LE R1,D2(X2,B2) [RX] */
2531 insn
= ld_code4(env
, s
->pc
);
2532 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2533 tmp2
= tcg_temp_new_i64();
2534 tmp32_1
= tcg_temp_new_i32();
2535 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2536 tcg_gen_trunc_i64_i32(tmp32_1
, tmp2
);
2537 store_freg32(r1
, tmp32_1
);
2538 tcg_temp_free_i64(tmp
);
2539 tcg_temp_free_i64(tmp2
);
2540 tcg_temp_free_i32(tmp32_1
);
2542 #ifndef CONFIG_USER_ONLY
2543 case 0x80: /* SSM D2(B2) [S] */
2544 /* Set System Mask */
2545 check_privileged(s
);
2546 insn
= ld_code4(env
, s
->pc
);
2547 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2548 tmp
= get_address(s
, 0, b2
, d2
);
2549 tmp2
= tcg_temp_new_i64();
2550 tmp3
= tcg_temp_new_i64();
2551 tcg_gen_andi_i64(tmp3
, psw_mask
, ~0xff00000000000000ULL
);
2552 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2553 tcg_gen_shli_i64(tmp2
, tmp2
, 56);
2554 tcg_gen_or_i64(psw_mask
, tmp3
, tmp2
);
2555 tcg_temp_free_i64(tmp
);
2556 tcg_temp_free_i64(tmp2
);
2557 tcg_temp_free_i64(tmp3
);
2559 case 0x82: /* LPSW D2(B2) [S] */
2561 check_privileged(s
);
2562 insn
= ld_code4(env
, s
->pc
);
2563 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2564 tmp
= get_address(s
, 0, b2
, d2
);
2565 tmp2
= tcg_temp_new_i64();
2566 tmp3
= tcg_temp_new_i64();
2567 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2568 tcg_gen_addi_i64(tmp
, tmp
, 4);
2569 tcg_gen_qemu_ld32u(tmp3
, tmp
, get_mem_index(s
));
2570 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2571 tcg_gen_shli_i64(tmp2
, tmp2
, 32);
2572 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
2573 tcg_temp_free_i64(tmp
);
2574 tcg_temp_free_i64(tmp2
);
2575 tcg_temp_free_i64(tmp3
);
2576 /* we need to keep cc_op intact */
2577 s
->is_jmp
= DISAS_JUMP
;
2579 case 0x83: /* DIAG R1,R3,D2 [RS] */
2580 /* Diagnose call (KVM hypercall) */
2581 check_privileged(s
);
2582 potential_page_fault(s
);
2583 insn
= ld_code4(env
, s
->pc
);
2584 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2585 tmp32_1
= tcg_const_i32(insn
& 0xfff);
2588 gen_helper_diag(tmp2
, cpu_env
, tmp32_1
, tmp2
, tmp3
);
2590 tcg_temp_free_i32(tmp32_1
);
2591 tcg_temp_free_i64(tmp2
);
2592 tcg_temp_free_i64(tmp3
);
2595 case 0x88: /* SRL R1,D2(B2) [RS] */
2596 case 0x89: /* SLL R1,D2(B2) [RS] */
2597 case 0x8a: /* SRA R1,D2(B2) [RS] */
2598 insn
= ld_code4(env
, s
->pc
);
2599 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2600 tmp
= get_address(s
, 0, b2
, d2
);
2601 tmp32_1
= load_reg32(r1
);
2602 tmp32_2
= tcg_temp_new_i32();
2603 tcg_gen_trunc_i64_i32(tmp32_2
, tmp
);
2604 tcg_gen_andi_i32(tmp32_2
, tmp32_2
, 0x3f);
2607 tcg_gen_shr_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2610 tcg_gen_shl_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2613 tcg_gen_sar_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2614 set_cc_s32(s
, tmp32_1
);
2619 store_reg32(r1
, tmp32_1
);
2620 tcg_temp_free_i64(tmp
);
2621 tcg_temp_free_i32(tmp32_1
);
2622 tcg_temp_free_i32(tmp32_2
);
2624 case 0x8c: /* SRDL R1,D2(B2) [RS] */
2625 case 0x8d: /* SLDL R1,D2(B2) [RS] */
2626 case 0x8e: /* SRDA R1,D2(B2) [RS] */
2627 insn
= ld_code4(env
, s
->pc
);
2628 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2629 tmp
= get_address(s
, 0, b2
, d2
); /* shift */
2630 tmp2
= tcg_temp_new_i64();
2631 tmp32_1
= load_reg32(r1
);
2632 tmp32_2
= load_reg32(r1
+ 1);
2633 tcg_gen_concat_i32_i64(tmp2
, tmp32_2
, tmp32_1
); /* operand */
2636 tcg_gen_shr_i64(tmp2
, tmp2
, tmp
);
2639 tcg_gen_shl_i64(tmp2
, tmp2
, tmp
);
2642 tcg_gen_sar_i64(tmp2
, tmp2
, tmp
);
2643 set_cc_s64(s
, tmp2
);
2646 tcg_gen_shri_i64(tmp
, tmp2
, 32);
2647 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
2648 store_reg32(r1
, tmp32_1
);
2649 tcg_gen_trunc_i64_i32(tmp32_2
, tmp2
);
2650 store_reg32(r1
+ 1, tmp32_2
);
2651 tcg_temp_free_i64(tmp
);
2652 tcg_temp_free_i64(tmp2
);
2654 case 0x98: /* LM R1,R3,D2(B2) [RS] */
2655 case 0x90: /* STM R1,R3,D2(B2) [RS] */
2656 insn
= ld_code4(env
, s
->pc
);
2657 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2659 tmp
= get_address(s
, 0, b2
, d2
);
2660 tmp2
= tcg_temp_new_i64();
2661 tmp3
= tcg_const_i64(4);
2662 tmp4
= tcg_const_i64(0xffffffff00000000ULL
);
2663 for (i
= r1
;; i
= (i
+ 1) % 16) {
2665 tcg_gen_qemu_ld32u(tmp2
, tmp
, get_mem_index(s
));
2666 tcg_gen_and_i64(regs
[i
], regs
[i
], tmp4
);
2667 tcg_gen_or_i64(regs
[i
], regs
[i
], tmp2
);
2669 tcg_gen_qemu_st32(regs
[i
], tmp
, get_mem_index(s
));
2674 tcg_gen_add_i64(tmp
, tmp
, tmp3
);
2676 tcg_temp_free_i64(tmp
);
2677 tcg_temp_free_i64(tmp2
);
2678 tcg_temp_free_i64(tmp3
);
2679 tcg_temp_free_i64(tmp4
);
2681 case 0x91: /* TM D1(B1),I2 [SI] */
2682 insn
= ld_code4(env
, s
->pc
);
2683 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2684 tmp2
= tcg_const_i64(i2
);
2685 tcg_gen_qemu_ld8u(tmp
, tmp
, get_mem_index(s
));
2686 cmp_64(s
, tmp
, tmp2
, CC_OP_TM_32
);
2687 tcg_temp_free_i64(tmp
);
2688 tcg_temp_free_i64(tmp2
);
2690 case 0x92: /* MVI D1(B1),I2 [SI] */
2691 insn
= ld_code4(env
, s
->pc
);
2692 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2693 tmp2
= tcg_const_i64(i2
);
2694 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2695 tcg_temp_free_i64(tmp
);
2696 tcg_temp_free_i64(tmp2
);
2698 case 0x94: /* NI D1(B1),I2 [SI] */
2699 case 0x96: /* OI D1(B1),I2 [SI] */
2700 case 0x97: /* XI D1(B1),I2 [SI] */
2701 insn
= ld_code4(env
, s
->pc
);
2702 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2703 tmp2
= tcg_temp_new_i64();
2704 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2707 tcg_gen_andi_i64(tmp2
, tmp2
, i2
);
2710 tcg_gen_ori_i64(tmp2
, tmp2
, i2
);
2713 tcg_gen_xori_i64(tmp2
, tmp2
, i2
);
2718 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2719 set_cc_nz_u64(s
, tmp2
);
2720 tcg_temp_free_i64(tmp
);
2721 tcg_temp_free_i64(tmp2
);
2723 case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
2724 insn
= ld_code4(env
, s
->pc
);
2725 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2726 tmp
= get_address(s
, 0, b2
, d2
);
2727 tmp32_1
= tcg_const_i32(r1
);
2728 tmp32_2
= tcg_const_i32(r3
);
2729 potential_page_fault(s
);
2730 gen_helper_lam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2731 tcg_temp_free_i64(tmp
);
2732 tcg_temp_free_i32(tmp32_1
);
2733 tcg_temp_free_i32(tmp32_2
);
2735 case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
2736 insn
= ld_code4(env
, s
->pc
);
2737 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2738 tmp
= get_address(s
, 0, b2
, d2
);
2739 tmp32_1
= tcg_const_i32(r1
);
2740 tmp32_2
= tcg_const_i32(r3
);
2741 potential_page_fault(s
);
2742 gen_helper_stam(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2743 tcg_temp_free_i64(tmp
);
2744 tcg_temp_free_i32(tmp32_1
);
2745 tcg_temp_free_i32(tmp32_2
);
2748 insn
= ld_code4(env
, s
->pc
);
2749 r1
= (insn
>> 20) & 0xf;
2750 op
= (insn
>> 16) & 0xf;
2752 disas_a7(env
, s
, op
, r1
, i2
);
2754 case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
2755 insn
= ld_code4(env
, s
->pc
);
2756 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2757 tmp
= get_address(s
, 0, b2
, d2
);
2758 tmp32_1
= tcg_const_i32(r1
);
2759 tmp32_2
= tcg_const_i32(r3
);
2760 potential_page_fault(s
);
2761 gen_helper_mvcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2763 tcg_temp_free_i64(tmp
);
2764 tcg_temp_free_i32(tmp32_1
);
2765 tcg_temp_free_i32(tmp32_2
);
2767 case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
2768 insn
= ld_code4(env
, s
->pc
);
2769 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2770 tmp
= get_address(s
, 0, b2
, d2
);
2771 tmp32_1
= tcg_const_i32(r1
);
2772 tmp32_2
= tcg_const_i32(r3
);
2773 potential_page_fault(s
);
2774 gen_helper_clcle(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2776 tcg_temp_free_i64(tmp
);
2777 tcg_temp_free_i32(tmp32_1
);
2778 tcg_temp_free_i32(tmp32_2
);
2780 #ifndef CONFIG_USER_ONLY
2781 case 0xac: /* STNSM D1(B1),I2 [SI] */
2782 case 0xad: /* STOSM D1(B1),I2 [SI] */
2783 check_privileged(s
);
2784 insn
= ld_code4(env
, s
->pc
);
2785 tmp
= decode_si(s
, insn
, &i2
, &b1
, &d1
);
2786 tmp2
= tcg_temp_new_i64();
2787 tcg_gen_shri_i64(tmp2
, psw_mask
, 56);
2788 tcg_gen_qemu_st8(tmp2
, tmp
, get_mem_index(s
));
2790 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2791 ((uint64_t)i2
<< 56) | 0x00ffffffffffffffULL
);
2793 tcg_gen_ori_i64(psw_mask
, psw_mask
, (uint64_t)i2
<< 56);
2795 tcg_temp_free_i64(tmp
);
2796 tcg_temp_free_i64(tmp2
);
2798 case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
2799 check_privileged(s
);
2800 insn
= ld_code4(env
, s
->pc
);
2801 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2802 tmp
= get_address(s
, 0, b2
, d2
);
2803 tmp2
= load_reg(r3
);
2804 tmp32_1
= tcg_const_i32(r1
);
2805 potential_page_fault(s
);
2806 gen_helper_sigp(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp2
);
2808 tcg_temp_free_i64(tmp
);
2809 tcg_temp_free_i64(tmp2
);
2810 tcg_temp_free_i32(tmp32_1
);
2812 case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
2813 check_privileged(s
);
2814 insn
= ld_code4(env
, s
->pc
);
2815 tmp
= decode_rx(s
, insn
, &r1
, &x2
, &b2
, &d2
);
2816 tmp32_1
= tcg_const_i32(r1
);
2817 potential_page_fault(s
);
2818 gen_helper_lra(cc_op
, cpu_env
, tmp
, tmp32_1
);
2820 tcg_temp_free_i64(tmp
);
2821 tcg_temp_free_i32(tmp32_1
);
2825 insn
= ld_code4(env
, s
->pc
);
2826 op
= (insn
>> 16) & 0xff;
2828 case 0x9c: /* STFPC D2(B2) [S] */
2830 b2
= (insn
>> 12) & 0xf;
2831 tmp32_1
= tcg_temp_new_i32();
2832 tmp
= tcg_temp_new_i64();
2833 tmp2
= get_address(s
, 0, b2
, d2
);
2834 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2835 tcg_gen_extu_i32_i64(tmp
, tmp32_1
);
2836 tcg_gen_qemu_st32(tmp
, tmp2
, get_mem_index(s
));
2837 tcg_temp_free_i32(tmp32_1
);
2838 tcg_temp_free_i64(tmp
);
2839 tcg_temp_free_i64(tmp2
);
2842 disas_b2(env
, s
, op
, insn
);
2847 insn
= ld_code4(env
, s
->pc
);
2848 op
= (insn
>> 16) & 0xff;
2849 r3
= (insn
>> 12) & 0xf; /* aka m3 */
2850 r1
= (insn
>> 4) & 0xf;
2852 disas_b3(env
, s
, op
, r3
, r1
, r2
);
2854 #ifndef CONFIG_USER_ONLY
2855 case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
2857 check_privileged(s
);
2858 insn
= ld_code4(env
, s
->pc
);
2859 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2860 tmp
= get_address(s
, 0, b2
, d2
);
2861 tmp32_1
= tcg_const_i32(r1
);
2862 tmp32_2
= tcg_const_i32(r3
);
2863 potential_page_fault(s
);
2864 gen_helper_stctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2865 tcg_temp_free_i64(tmp
);
2866 tcg_temp_free_i32(tmp32_1
);
2867 tcg_temp_free_i32(tmp32_2
);
2869 case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
2871 check_privileged(s
);
2872 insn
= ld_code4(env
, s
->pc
);
2873 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2874 tmp
= get_address(s
, 0, b2
, d2
);
2875 tmp32_1
= tcg_const_i32(r1
);
2876 tmp32_2
= tcg_const_i32(r3
);
2877 potential_page_fault(s
);
2878 gen_helper_lctl(cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2879 tcg_temp_free_i64(tmp
);
2880 tcg_temp_free_i32(tmp32_1
);
2881 tcg_temp_free_i32(tmp32_2
);
2885 insn
= ld_code4(env
, s
->pc
);
2886 r1
= (insn
>> 4) & 0xf;
2888 op
= (insn
>> 16) & 0xff;
2889 disas_b9(env
, s
, op
, r1
, r2
);
2891 case 0xba: /* CS R1,R3,D2(B2) [RS] */
2892 insn
= ld_code4(env
, s
->pc
);
2893 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2894 tmp
= get_address(s
, 0, b2
, d2
);
2895 tmp32_1
= tcg_const_i32(r1
);
2896 tmp32_2
= tcg_const_i32(r3
);
2897 potential_page_fault(s
);
2898 gen_helper_cs(cc_op
, cpu_env
, tmp32_1
, tmp
, tmp32_2
);
2900 tcg_temp_free_i64(tmp
);
2901 tcg_temp_free_i32(tmp32_1
);
2902 tcg_temp_free_i32(tmp32_2
);
2904 case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
2905 insn
= ld_code4(env
, s
->pc
);
2906 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2907 tmp
= get_address(s
, 0, b2
, d2
);
2908 tmp32_1
= load_reg32(r1
);
2909 tmp32_2
= tcg_const_i32(r3
);
2910 potential_page_fault(s
);
2911 gen_helper_clm(cc_op
, cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2913 tcg_temp_free_i64(tmp
);
2914 tcg_temp_free_i32(tmp32_1
);
2915 tcg_temp_free_i32(tmp32_2
);
2917 case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
2918 insn
= ld_code4(env
, s
->pc
);
2919 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2920 tmp
= get_address(s
, 0, b2
, d2
);
2921 tmp32_1
= load_reg32(r1
);
2922 tmp32_2
= tcg_const_i32(r3
);
2923 potential_page_fault(s
);
2924 gen_helper_stcm(cpu_env
, tmp32_1
, tmp32_2
, tmp
);
2925 tcg_temp_free_i64(tmp
);
2926 tcg_temp_free_i32(tmp32_1
);
2927 tcg_temp_free_i32(tmp32_2
);
2929 case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
2930 insn
= ld_code4(env
, s
->pc
);
2931 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
2933 /* effectively a 32-bit load */
2934 tmp
= get_address(s
, 0, b2
, d2
);
2935 tmp32_1
= tcg_temp_new_i32();
2936 tmp32_2
= tcg_const_i32(r3
);
2937 tcg_gen_qemu_ld32u(tmp
, tmp
, get_mem_index(s
));
2938 store_reg32_i64(r1
, tmp
);
2939 tcg_gen_trunc_i64_i32(tmp32_1
, tmp
);
2940 set_cc_icm(s
, tmp32_2
, tmp32_1
);
2941 tcg_temp_free_i64(tmp
);
2942 tcg_temp_free_i32(tmp32_1
);
2943 tcg_temp_free_i32(tmp32_2
);
2945 uint32_t mask
= 0x00ffffffUL
;
2946 uint32_t shift
= 24;
2948 tmp
= get_address(s
, 0, b2
, d2
);
2949 tmp2
= tcg_temp_new_i64();
2950 tmp32_1
= load_reg32(r1
);
2951 tmp32_2
= tcg_temp_new_i32();
2952 tmp32_3
= tcg_const_i32(r3
);
2953 tmp32_4
= tcg_const_i32(0);
2956 tcg_gen_qemu_ld8u(tmp2
, tmp
, get_mem_index(s
));
2957 tcg_gen_trunc_i64_i32(tmp32_2
, tmp2
);
2959 tcg_gen_shli_i32(tmp32_2
, tmp32_2
, shift
);
2961 tcg_gen_andi_i32(tmp32_1
, tmp32_1
, mask
);
2962 tcg_gen_or_i32(tmp32_1
, tmp32_1
, tmp32_2
);
2963 tcg_gen_or_i32(tmp32_4
, tmp32_4
, tmp32_2
);
2964 tcg_gen_addi_i64(tmp
, tmp
, 1);
2966 m3
= (m3
<< 1) & 0xf;
2967 mask
= (mask
>> 8) | 0xff000000UL
;
2970 store_reg32(r1
, tmp32_1
);
2971 set_cc_icm(s
, tmp32_3
, tmp32_4
);
2972 tcg_temp_free_i64(tmp
);
2973 tcg_temp_free_i64(tmp2
);
2974 tcg_temp_free_i32(tmp32_1
);
2975 tcg_temp_free_i32(tmp32_2
);
2976 tcg_temp_free_i32(tmp32_3
);
2977 tcg_temp_free_i32(tmp32_4
);
2979 /* i.e. env->cc = 0 */
2980 gen_op_movi_cc(s
, 0);
2983 case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
2984 case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
2985 case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
2986 case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
2987 case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
2988 case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
2989 case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
2990 insn
= ld_code6(env
, s
->pc
);
2991 vl
= tcg_const_i32((insn
>> 32) & 0xff);
2992 b1
= (insn
>> 28) & 0xf;
2993 b2
= (insn
>> 12) & 0xf;
2994 d1
= (insn
>> 16) & 0xfff;
2996 tmp
= get_address(s
, 0, b1
, d1
);
2997 tmp2
= get_address(s
, 0, b2
, d2
);
3000 gen_op_mvc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
3003 potential_page_fault(s
);
3004 gen_helper_nc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
3008 gen_op_clc(s
, (insn
>> 32) & 0xff, tmp
, tmp2
);
3011 potential_page_fault(s
);
3012 gen_helper_oc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
3016 potential_page_fault(s
);
3017 gen_helper_xc(cc_op
, cpu_env
, vl
, tmp
, tmp2
);
3021 potential_page_fault(s
);
3022 gen_helper_tr(cpu_env
, vl
, tmp
, tmp2
);
3026 potential_page_fault(s
);
3027 gen_helper_unpk(cpu_env
, vl
, tmp
, tmp2
);
3032 tcg_temp_free_i64(tmp
);
3033 tcg_temp_free_i64(tmp2
);
3035 #ifndef CONFIG_USER_ONLY
3036 case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
3037 case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
3038 check_privileged(s
);
3039 potential_page_fault(s
);
3040 insn
= ld_code6(env
, s
->pc
);
3041 r1
= (insn
>> 36) & 0xf;
3042 r3
= (insn
>> 32) & 0xf;
3043 b1
= (insn
>> 28) & 0xf;
3044 d1
= (insn
>> 16) & 0xfff;
3045 b2
= (insn
>> 12) & 0xf;
3049 tmp2
= get_address(s
, 0, b1
, d1
);
3050 tmp3
= get_address(s
, 0, b2
, d2
);
3052 gen_helper_mvcp(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
3054 gen_helper_mvcs(cc_op
, cpu_env
, tmp
, tmp2
, tmp3
);
3057 tcg_temp_free_i64(tmp
);
3058 tcg_temp_free_i64(tmp2
);
3059 tcg_temp_free_i64(tmp3
);
3063 insn
= ld_code6(env
, s
->pc
);
3066 r1
= (insn
>> 36) & 0xf;
3067 x2
= (insn
>> 32) & 0xf;
3068 b2
= (insn
>> 28) & 0xf;
3069 d2
= ((int)((((insn
>> 16) & 0xfff)
3070 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
3071 disas_e3(env
, s
, op
, r1
, x2
, b2
, d2
);
3073 #ifndef CONFIG_USER_ONLY
3075 /* Test Protection */
3076 check_privileged(s
);
3077 insn
= ld_code6(env
, s
->pc
);
3079 disas_e5(env
, s
, insn
);
3083 insn
= ld_code6(env
, s
->pc
);
3086 r1
= (insn
>> 36) & 0xf;
3087 r3
= (insn
>> 32) & 0xf;
3088 b2
= (insn
>> 28) & 0xf;
3089 d2
= ((int)((((insn
>> 16) & 0xfff)
3090 | ((insn
<< 4) & 0xff000)) << 12)) >> 12;
3091 disas_eb(env
, s
, op
, r1
, r3
, b2
, d2
);
3094 insn
= ld_code6(env
, s
->pc
);
3097 r1
= (insn
>> 36) & 0xf;
3098 x2
= (insn
>> 32) & 0xf;
3099 b2
= (insn
>> 28) & 0xf;
3100 d2
= (short)((insn
>> 16) & 0xfff);
3101 r1b
= (insn
>> 12) & 0xf;
3102 disas_ed(env
, s
, op
, r1
, x2
, b2
, d2
, r1b
);
3105 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
3106 gen_illegal_opcode(s
);
3111 /* ====================================================================== */
3112 /* Define the insn format enumeration. */
3113 #define F0(N) FMT_##N,
3114 #define F1(N, X1) F0(N)
3115 #define F2(N, X1, X2) F0(N)
3116 #define F3(N, X1, X2, X3) F0(N)
3117 #define F4(N, X1, X2, X3, X4) F0(N)
3118 #define F5(N, X1, X2, X3, X4, X5) F0(N)
3121 #include "insn-format.def"
3131 /* Define a structure to hold the decoded fields. We'll store each inside
3132 an array indexed by an enum. In order to conserve memory, we'll arrange
3133 for fields that do not exist at the same time to overlap, thus the "C"
3134 for compact. For checking purposes there is an "O" for original index
3135 as well that will be applied to availability bitmaps. */
3137 enum DisasFieldIndexO
{
3160 enum DisasFieldIndexC
{
3191 struct DisasFields
{
3194 unsigned presentC
:16;
3195 unsigned int presentO
;
3199 /* This is the way fields are to be accessed out of DisasFields. */
3200 #define have_field(S, F) have_field1((S), FLD_O_##F)
3201 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
3203 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
3205 return (f
->presentO
>> c
) & 1;
3208 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
3209 enum DisasFieldIndexC c
)
3211 assert(have_field1(f
, o
));
3215 /* Describe the layout of each field in each format. */
3216 typedef struct DisasField
{
3218 unsigned int size
:8;
3219 unsigned int type
:2;
3220 unsigned int indexC
:6;
3221 enum DisasFieldIndexO indexO
:8;
3224 typedef struct DisasFormatInfo
{
3225 DisasField op
[NUM_C_FIELD
];
3228 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
3229 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
3230 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3231 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
3232 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3233 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3234 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
3235 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3236 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3237 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
3238 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
3239 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
3240 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
3241 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
3243 #define F0(N) { { } },
3244 #define F1(N, X1) { { X1 } },
3245 #define F2(N, X1, X2) { { X1, X2 } },
3246 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
3247 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
3248 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
3250 static const DisasFormatInfo format_info
[] = {
3251 #include "insn-format.def"
3269 /* Generally, we'll extract operands into this structures, operate upon
3270 them, and store them back. See the "in1", "in2", "prep", "wout" sets
3271 of routines below for more details. */
3273 bool g_out
, g_out2
, g_in1
, g_in2
;
3274 TCGv_i64 out
, out2
, in1
, in2
;
3278 /* Return values from translate_one, indicating the state of the TB. */
3280 /* Continue the TB. */
3282 /* We have emitted one or more goto_tb. No fixup required. */
3284 /* We are not using a goto_tb (for whatever reason), but have updated
3285 the PC (for whatever reason), so there's no need to do it again on
3288 /* We are exiting the TB, but have neither emitted a goto_tb, nor
3289 updated the PC for the next instruction to be executed. */
3291 /* We are ending the TB with a noreturn function call, e.g. longjmp.
3292 No following code will be executed. */
3296 typedef enum DisasFacility
{
3297 FAC_Z
, /* zarch (default) */
3298 FAC_CASS
, /* compare and swap and store */
3299 FAC_CASS2
, /* compare and swap and store 2*/
3300 FAC_DFP
, /* decimal floating point */
3301 FAC_DFPR
, /* decimal floating point rounding */
3302 FAC_DO
, /* distinct operands */
3303 FAC_EE
, /* execute extensions */
3304 FAC_EI
, /* extended immediate */
3305 FAC_FPE
, /* floating point extension */
3306 FAC_FPSSH
, /* floating point support sign handling */
3307 FAC_FPRGR
, /* FPR-GR transfer */
3308 FAC_GIE
, /* general instructions extension */
3309 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
3310 FAC_HW
, /* high-word */
3311 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
3312 FAC_LOC
, /* load/store on condition */
3313 FAC_LD
, /* long displacement */
3314 FAC_PC
, /* population count */
3315 FAC_SCF
, /* store clock fast */
3316 FAC_SFLE
, /* store facility list extended */
3322 DisasFacility fac
:6;
3326 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
3327 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
3328 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
3329 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
3330 void (*help_cout
)(DisasContext
*, DisasOps
*);
3331 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
3336 /* ====================================================================== */
3337 /* Miscelaneous helpers, used by several operations. */
3339 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
3341 if (dest
== s
->next_pc
) {
3344 if (use_goto_tb(s
, dest
)) {
3345 gen_update_cc_op(s
);
3347 tcg_gen_movi_i64(psw_addr
, dest
);
3348 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
3349 return EXIT_GOTO_TB
;
3351 tcg_gen_movi_i64(psw_addr
, dest
);
3352 return EXIT_PC_UPDATED
;
3356 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
3357 bool is_imm
, int imm
, TCGv_i64 cdest
)
3360 uint64_t dest
= s
->pc
+ 2 * imm
;
3363 /* Take care of the special cases first. */
3364 if (c
->cond
== TCG_COND_NEVER
) {
3369 if (dest
== s
->next_pc
) {
3370 /* Branch to next. */
3374 if (c
->cond
== TCG_COND_ALWAYS
) {
3375 ret
= help_goto_direct(s
, dest
);
3379 if (TCGV_IS_UNUSED_I64(cdest
)) {
3380 /* E.g. bcr %r0 -> no branch. */
3384 if (c
->cond
== TCG_COND_ALWAYS
) {
3385 tcg_gen_mov_i64(psw_addr
, cdest
);
3386 ret
= EXIT_PC_UPDATED
;
3391 if (use_goto_tb(s
, s
->next_pc
)) {
3392 if (is_imm
&& use_goto_tb(s
, dest
)) {
3393 /* Both exits can use goto_tb. */
3394 gen_update_cc_op(s
);
3396 lab
= gen_new_label();
3398 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3400 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3403 /* Branch not taken. */
3405 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3406 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3411 tcg_gen_movi_i64(psw_addr
, dest
);
3412 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
3416 /* Fallthru can use goto_tb, but taken branch cannot. */
3417 /* Store taken branch destination before the brcond. This
3418 avoids having to allocate a new local temp to hold it.
3419 We'll overwrite this in the not taken case anyway. */
3421 tcg_gen_mov_i64(psw_addr
, cdest
);
3424 lab
= gen_new_label();
3426 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
3428 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
3431 /* Branch not taken. */
3432 gen_update_cc_op(s
);
3434 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
3435 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
3439 tcg_gen_movi_i64(psw_addr
, dest
);
3441 ret
= EXIT_PC_UPDATED
;
3444 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
3445 Most commonly we're single-stepping or some other condition that
3446 disables all use of goto_tb. Just update the PC and exit. */
3448 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
3450 cdest
= tcg_const_i64(dest
);
3454 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
3457 TCGv_i32 t0
= tcg_temp_new_i32();
3458 TCGv_i64 t1
= tcg_temp_new_i64();
3459 TCGv_i64 z
= tcg_const_i64(0);
3460 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
3461 tcg_gen_extu_i32_i64(t1
, t0
);
3462 tcg_temp_free_i32(t0
);
3463 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
3464 tcg_temp_free_i64(t1
);
3465 tcg_temp_free_i64(z
);
3469 tcg_temp_free_i64(cdest
);
3471 tcg_temp_free_i64(next
);
3473 ret
= EXIT_PC_UPDATED
;
3481 /* ====================================================================== */
3482 /* The operations. These perform the bulk of the work for any insn,
3483 usually after the operands have been loaded and output initialized. */
3485 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
3487 gen_helper_abs_i64(o
->out
, o
->in2
);
3491 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
3493 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3497 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
3501 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3503 /* XXX possible optimization point */
3505 cc
= tcg_temp_new_i64();
3506 tcg_gen_extu_i32_i64(cc
, cc_op
);
3507 tcg_gen_shri_i64(cc
, cc
, 1);
3509 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3510 tcg_temp_free_i64(cc
);
3514 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
3516 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3520 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
3522 int shift
= s
->insn
->data
& 0xff;
3523 int size
= s
->insn
->data
>> 8;
3524 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3527 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3528 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3529 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
3531 /* Produce the CC from only the bits manipulated. */
3532 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3533 set_cc_nz_u64(s
, cc_dst
);
3537 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
3539 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3540 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
3541 tcg_gen_mov_i64(psw_addr
, o
->in2
);
3542 return EXIT_PC_UPDATED
;
3548 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
3550 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
3551 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
3554 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
3556 int m1
= get_field(s
->fields
, m1
);
3557 bool is_imm
= have_field(s
->fields
, i2
);
3558 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3561 disas_jcc(s
, &c
, m1
);
3562 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3565 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
3567 int r1
= get_field(s
->fields
, r1
);
3568 bool is_imm
= have_field(s
->fields
, i2
);
3569 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3573 c
.cond
= TCG_COND_NE
;
3578 t
= tcg_temp_new_i64();
3579 tcg_gen_subi_i64(t
, regs
[r1
], 1);
3580 store_reg32_i64(r1
, t
);
3581 c
.u
.s32
.a
= tcg_temp_new_i32();
3582 c
.u
.s32
.b
= tcg_const_i32(0);
3583 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
3584 tcg_temp_free_i64(t
);
3586 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3589 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
3591 int r1
= get_field(s
->fields
, r1
);
3592 bool is_imm
= have_field(s
->fields
, i2
);
3593 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
3596 c
.cond
= TCG_COND_NE
;
3601 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
3602 c
.u
.s64
.a
= regs
[r1
];
3603 c
.u
.s64
.b
= tcg_const_i64(0);
3605 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
3608 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
3610 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3611 return_low128(o
->out
);
3615 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
3617 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3618 return_low128(o
->out
);
3622 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
3624 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
3625 return_low128(o
->out
);
3629 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
3631 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3632 return_low128(o
->out
);
3636 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
3638 int shift
= s
->insn
->data
& 0xff;
3639 int size
= s
->insn
->data
>> 8;
3640 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
3644 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
3646 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
3650 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
3652 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
3656 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
3658 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
3662 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
3664 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
3668 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
3670 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
3674 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
3676 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
3680 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
3682 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
3686 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
3689 o
->g_out
= o
->g_in2
;
3690 TCGV_UNUSED_I64(o
->in2
);
3695 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3697 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3701 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3703 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3704 return_low128(o
->out2
);
3708 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3710 gen_helper_nabs_i64(o
->out
, o
->in2
);
3714 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3716 tcg_gen_neg_i64(o
->out
, o
->in2
);
3720 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3722 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3726 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3728 int shift
= s
->insn
->data
& 0xff;
3729 int size
= s
->insn
->data
>> 8;
3730 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3733 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3734 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3736 /* Produce the CC from only the bits manipulated. */
3737 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3738 set_cc_nz_u64(s
, cc_dst
);
3742 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3744 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3748 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3750 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3754 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3756 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3760 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3762 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3766 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3768 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3772 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3777 tcg_gen_not_i64(o
->in2
, o
->in2
);
3778 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3780 /* XXX possible optimization point */
3782 cc
= tcg_temp_new_i64();
3783 tcg_gen_extu_i32_i64(cc
, cc_op
);
3784 tcg_gen_shri_i64(cc
, cc
, 1);
3785 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3786 tcg_temp_free_i64(cc
);
3790 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3792 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3796 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3798 int shift
= s
->insn
->data
& 0xff;
3799 int size
= s
->insn
->data
>> 8;
3800 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3803 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3804 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3806 /* Produce the CC from only the bits manipulated. */
3807 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3808 set_cc_nz_u64(s
, cc_dst
);
3812 /* ====================================================================== */
3813 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3814 the original inputs), update the various cc data structures in order to
3815 be able to compute the new condition code. */
3817 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3819 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3822 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3824 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3827 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3829 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3832 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3834 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3837 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3839 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3842 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3844 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3847 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3849 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3852 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3854 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3857 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3859 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3862 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3864 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3867 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3869 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3872 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3874 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3877 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3879 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3882 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3884 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3887 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3889 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3892 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3894 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3897 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3899 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3900 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3903 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3905 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3908 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3910 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3913 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3915 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3918 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3920 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3923 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3925 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3928 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3930 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3933 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3935 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3938 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3940 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3943 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3945 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3948 /* ====================================================================== */
3949 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3950 with the TCG register to which we will write. Used in combination with
3951 the "wout" generators, in some cases we need a new temporary, and in
3952 some cases we can write to a TCG global. */
3954 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3956 o
->out
= tcg_temp_new_i64();
3959 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3961 o
->out
= tcg_temp_new_i64();
3962 o
->out2
= tcg_temp_new_i64();
3965 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3967 o
->out
= regs
[get_field(f
, r1
)];
3971 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3973 /* ??? Specification exception: r1 must be even. */
3974 int r1
= get_field(f
, r1
);
3976 o
->out2
= regs
[(r1
+ 1) & 15];
3977 o
->g_out
= o
->g_out2
= true;
3980 /* ====================================================================== */
3981 /* The "Write OUTput" generators. These generally perform some non-trivial
3982 copy of data to TCG globals, or to main memory. The trivial cases are
3983 generally handled by having a "prep" generator install the TCG global
3984 as the destination of the operation. */
3986 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3988 store_reg(get_field(f
, r1
), o
->out
);
3991 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3993 store_reg32_i64(get_field(f
, r1
), o
->out
);
3996 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3998 /* ??? Specification exception: r1 must be even. */
3999 int r1
= get_field(f
, r1
);
4000 store_reg32_i64(r1
, o
->out
);
4001 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
4004 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4006 /* ??? Specification exception: r1 must be even. */
4007 int r1
= get_field(f
, r1
);
4008 store_reg32_i64((r1
+ 1) & 15, o
->out
);
4009 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4010 store_reg32_i64(r1
, o
->out
);
4013 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4015 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4016 store_reg32_i64(get_field(f
, r1
), o
->out
);
4020 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4022 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4025 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4027 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4030 /* ====================================================================== */
4031 /* The "INput 1" generators. These load the first operand to an insn. */
4033 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4035 o
->in1
= load_reg(get_field(f
, r1
));
4038 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4040 o
->in1
= regs
[get_field(f
, r1
)];
4044 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4046 /* ??? Specification exception: r1 must be even. */
4047 int r1
= get_field(f
, r1
);
4048 o
->in1
= load_reg((r1
+ 1) & 15);
4051 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4053 /* ??? Specification exception: r1 must be even. */
4054 int r1
= get_field(f
, r1
);
4055 o
->in1
= tcg_temp_new_i64();
4056 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
4059 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4061 /* ??? Specification exception: r1 must be even. */
4062 int r1
= get_field(f
, r1
);
4063 o
->in1
= tcg_temp_new_i64();
4064 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
4067 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4069 /* ??? Specification exception: r1 must be even. */
4070 int r1
= get_field(f
, r1
);
4071 o
->in1
= tcg_temp_new_i64();
4072 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4075 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4077 o
->in1
= load_reg(get_field(f
, r2
));
4080 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4082 o
->in1
= load_reg(get_field(f
, r3
));
4085 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4087 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4090 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4093 o
->in1
= tcg_temp_new_i64();
4094 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4097 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4100 o
->in1
= tcg_temp_new_i64();
4101 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4104 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4107 o
->in1
= tcg_temp_new_i64();
4108 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4111 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4114 o
->in1
= tcg_temp_new_i64();
4115 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4118 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4121 o
->in1
= tcg_temp_new_i64();
4122 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4125 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4128 o
->in1
= tcg_temp_new_i64();
4129 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4132 /* ====================================================================== */
4133 /* The "INput 2" generators. These load the second operand to an insn. */
4135 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4137 o
->in2
= load_reg(get_field(f
, r2
));
4140 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4142 o
->in2
= regs
[get_field(f
, r2
)];
4146 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4148 int r2
= get_field(f
, r2
);
4150 o
->in2
= load_reg(r2
);
4154 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4156 o
->in2
= tcg_temp_new_i64();
4157 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4160 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4162 o
->in2
= tcg_temp_new_i64();
4163 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4166 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4168 o
->in2
= tcg_temp_new_i64();
4169 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4172 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4174 o
->in2
= tcg_temp_new_i64();
4175 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4178 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4180 o
->in2
= load_reg(get_field(f
, r3
));
4183 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4185 o
->in2
= tcg_temp_new_i64();
4186 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4189 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4191 o
->in2
= tcg_temp_new_i64();
4192 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4195 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4197 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4198 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4201 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4203 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4206 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4209 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4212 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4215 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4218 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4221 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4224 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4227 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4230 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4233 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4236 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4239 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4242 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4245 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4248 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4251 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4254 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4256 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4259 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4261 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4264 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4266 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4269 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4271 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4274 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4276 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4277 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4280 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4282 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4283 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4286 /* ====================================================================== */
4288 /* Find opc within the table of insns. This is formulated as a switch
4289 statement so that (1) we get compile-time notice of cut-paste errors
4290 for duplicated opcodes, and (2) the compiler generates the binary
4291 search tree, rather than us having to post-process the table. */
4293 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4294 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4296 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4298 enum DisasInsnEnum
{
4299 #include "insn-data.def"
4303 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4308 .help_in1 = in1_##I1, \
4309 .help_in2 = in2_##I2, \
4310 .help_prep = prep_##P, \
4311 .help_wout = wout_##W, \
4312 .help_cout = cout_##CC, \
4313 .help_op = op_##OP, \
4317 /* Allow 0 to be used for NULL in the table below. */
4325 static const DisasInsn insn_info
[] = {
4326 #include "insn-data.def"
4330 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4331 case OPC: return &insn_info[insn_ ## NM];
4333 static const DisasInsn
*lookup_opc(uint16_t opc
)
4336 #include "insn-data.def"
4345 /* Extract a field from the insn. The INSN should be left-aligned in
4346 the uint64_t so that we can more easily utilize the big-bit-endian
4347 definitions we extract from the Principals of Operation. */
4349 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4357 /* Zero extract the field from the insn. */
4358 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4360 /* Sign-extend, or un-swap the field as necessary. */
4362 case 0: /* unsigned */
4364 case 1: /* signed */
4365 assert(f
->size
<= 32);
4366 m
= 1u << (f
->size
- 1);
4369 case 2: /* dl+dh split, signed 20 bit. */
4370 r
= ((int8_t)r
<< 12) | (r
>> 8);
4376 /* Validate that the "compressed" encoding we selected above is valid.
4377 I.e. we havn't make two different original fields overlap. */
4378 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4379 o
->presentC
|= 1 << f
->indexC
;
4380 o
->presentO
|= 1 << f
->indexO
;
4382 o
->c
[f
->indexC
] = r
;
4385 /* Lookup the insn at the current PC, extracting the operands into O and
4386 returning the info struct for the insn. Returns NULL for invalid insn. */
4388 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4391 uint64_t insn
, pc
= s
->pc
;
4393 const DisasInsn
*info
;
4395 insn
= ld_code2(env
, pc
);
4396 op
= (insn
>> 8) & 0xff;
4397 ilen
= get_ilen(op
);
4398 s
->next_pc
= s
->pc
+ ilen
;
4405 insn
= ld_code4(env
, pc
) << 32;
4408 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4414 /* We can't actually determine the insn format until we've looked up
4415 the full insn opcode. Which we can't do without locating the
4416 secondary opcode. Assume by default that OP2 is at bit 40; for
4417 those smaller insns that don't actually have a secondary opcode
4418 this will correctly result in OP2 = 0. */
4424 case 0xb2: /* S, RRF, RRE */
4425 case 0xb3: /* RRE, RRD, RRF */
4426 case 0xb9: /* RRE, RRF */
4427 case 0xe5: /* SSE, SIL */
4428 op2
= (insn
<< 8) >> 56;
4432 case 0xc0: /* RIL */
4433 case 0xc2: /* RIL */
4434 case 0xc4: /* RIL */
4435 case 0xc6: /* RIL */
4436 case 0xc8: /* SSF */
4437 case 0xcc: /* RIL */
4438 op2
= (insn
<< 12) >> 60;
4440 case 0xd0 ... 0xdf: /* SS */
4446 case 0xee ... 0xf3: /* SS */
4447 case 0xf8 ... 0xfd: /* SS */
4451 op2
= (insn
<< 40) >> 56;
4455 memset(f
, 0, sizeof(*f
));
4459 /* Lookup the instruction. */
4460 info
= lookup_opc(op
<< 8 | op2
);
4462 /* If we found it, extract the operands. */
4464 DisasFormat fmt
= info
->fmt
;
4467 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4468 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4474 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4476 const DisasInsn
*insn
;
4477 ExitStatus ret
= NO_EXIT
;
4481 insn
= extract_insn(env
, s
, &f
);
4483 /* If not found, try the old interpreter. This includes ILLOPC. */
4485 disas_s390_insn(env
, s
);
4486 switch (s
->is_jmp
) {
4494 ret
= EXIT_PC_UPDATED
;
4497 ret
= EXIT_NORETURN
;
4507 /* Set up the strutures we use to communicate with the helpers. */
4510 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4511 TCGV_UNUSED_I64(o
.out
);
4512 TCGV_UNUSED_I64(o
.out2
);
4513 TCGV_UNUSED_I64(o
.in1
);
4514 TCGV_UNUSED_I64(o
.in2
);
4515 TCGV_UNUSED_I64(o
.addr1
);
4517 /* Implement the instruction. */
4518 if (insn
->help_in1
) {
4519 insn
->help_in1(s
, &f
, &o
);
4521 if (insn
->help_in2
) {
4522 insn
->help_in2(s
, &f
, &o
);
4524 if (insn
->help_prep
) {
4525 insn
->help_prep(s
, &f
, &o
);
4527 if (insn
->help_op
) {
4528 ret
= insn
->help_op(s
, &o
);
4530 if (insn
->help_wout
) {
4531 insn
->help_wout(s
, &f
, &o
);
4533 if (insn
->help_cout
) {
4534 insn
->help_cout(s
, &o
);
4537 /* Free any temporaries created by the helpers. */
4538 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4539 tcg_temp_free_i64(o
.out
);
4541 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4542 tcg_temp_free_i64(o
.out2
);
4544 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4545 tcg_temp_free_i64(o
.in1
);
4547 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4548 tcg_temp_free_i64(o
.in2
);
4550 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4551 tcg_temp_free_i64(o
.addr1
);
4554 /* Advance to the next instruction. */
4559 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4560 TranslationBlock
*tb
,
4564 target_ulong pc_start
;
4565 uint64_t next_page_start
;
4566 uint16_t *gen_opc_end
;
4568 int num_insns
, max_insns
;
4576 if (!(tb
->flags
& FLAG_MASK_64
)) {
4577 pc_start
&= 0x7fffffff;
4582 dc
.cc_op
= CC_OP_DYNAMIC
;
4583 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4584 dc
.is_jmp
= DISAS_NEXT
;
4586 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4588 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4591 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4592 if (max_insns
== 0) {
4593 max_insns
= CF_COUNT_MASK
;
4600 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4604 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4607 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4608 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4609 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4610 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4612 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4616 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4617 tcg_gen_debug_insn_start(dc
.pc
);
4621 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4622 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4623 if (bp
->pc
== dc
.pc
) {
4624 status
= EXIT_PC_STALE
;
4630 if (status
== NO_EXIT
) {
4631 status
= translate_one(env
, &dc
);
4634 /* If we reach a page boundary, are single stepping,
4635 or exhaust instruction count, stop generation. */
4636 if (status
== NO_EXIT
4637 && (dc
.pc
>= next_page_start
4638 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4639 || num_insns
>= max_insns
4641 || env
->singlestep_enabled
)) {
4642 status
= EXIT_PC_STALE
;
4644 } while (status
== NO_EXIT
);
4646 if (tb
->cflags
& CF_LAST_IO
) {
4655 update_psw_addr(&dc
);
4657 case EXIT_PC_UPDATED
:
4658 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4659 gen_op_calc_cc(&dc
);
4661 /* Next TB starts off with CC_OP_DYNAMIC,
4662 so make sure the cc op type is in env */
4663 gen_op_set_cc_op(&dc
);
4666 gen_exception(EXCP_DEBUG
);
4668 /* Generate the return instruction */
4676 gen_icount_end(tb
, num_insns
);
4677 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4679 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4682 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4685 tb
->size
= dc
.pc
- pc_start
;
4686 tb
->icount
= num_insns
;
4689 #if defined(S390X_DEBUG_DISAS)
4690 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4691 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4692 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4698 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4700 gen_intermediate_code_internal(env
, tb
, 0);
4703 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4705 gen_intermediate_code_internal(env
, tb
, 1);
4708 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4711 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4712 cc_op
= gen_opc_cc_op
[pc_pos
];
4713 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {