4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
490 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
493 static inline void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
495 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
498 static inline void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
500 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
503 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
506 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
509 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
512 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
515 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
517 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
520 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
522 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
525 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp
= tcg_const_i32(v2
);
529 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
530 tcg_temp_free_i32(tmp
);
533 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
535 TCGv_i32 tmp
= tcg_const_i32(v2
);
536 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
537 tcg_temp_free_i32(tmp
);
540 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
542 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
545 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
547 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
550 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
552 TCGv_i64 tmp
= tcg_const_i64(v2
);
554 tcg_temp_free_i64(tmp
);
557 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
559 TCGv_i64 tmp
= tcg_const_i64(v2
);
561 tcg_temp_free_i64(tmp
);
564 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
566 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
569 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
571 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext
*s
)
577 tcg_gen_discard_i64(cc_src
);
578 tcg_gen_discard_i64(cc_dst
);
579 tcg_gen_discard_i64(cc_vr
);
580 s
->cc_op
= CC_OP_STATIC
;
583 static inline void gen_op_set_cc_op(DisasContext
*s
)
585 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
586 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
590 static inline void gen_update_cc_op(DisasContext
*s
)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext
*s
)
598 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
599 TCGv_i64 dummy
= tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
630 case CC_OP_LTUGTU_32
:
631 case CC_OP_LTUGTU_64
:
638 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
653 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
664 tcg_temp_free_i64(dummy
);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
674 *r1
= (insn
>> 4) & 0xf;
678 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
679 int *x2
, int *b2
, int *d2
)
683 *r1
= (insn
>> 20) & 0xf;
684 *x2
= (insn
>> 16) & 0xf;
685 *b2
= (insn
>> 12) & 0xf;
688 return get_address(s
, *x2
, *b2
, *d2
);
691 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
696 *r1
= (insn
>> 20) & 0xf;
698 *r3
= (insn
>> 16) & 0xf;
699 *b2
= (insn
>> 12) & 0xf;
703 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
708 *i2
= (insn
>> 16) & 0xff;
709 *b1
= (insn
>> 12) & 0xf;
712 return get_address(s
, 0, *b1
, *d1
);
715 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
719 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
720 && !s
->singlestep_enabled
721 && !(s
->tb
->cflags
& CF_LAST_IO
));
724 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
728 if (use_goto_tb(s
, pc
)) {
729 tcg_gen_goto_tb(tb_num
);
730 tcg_gen_movi_i64(psw_addr
, pc
);
731 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr
, pc
);
739 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss
[cc_op
]++;
746 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit
[cc_op
]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond
[16] = {
757 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
758 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
759 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
760 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
761 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
762 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
763 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
764 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond
[16] = {
771 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
773 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
775 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
785 enum cc_op old_cc_op
= s
->cc_op
;
787 if (mask
== 15 || mask
== 0) {
788 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
791 c
->g1
= c
->g2
= true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond
= ltgt_cond
[mask
];
803 if (cond
== TCG_COND_NEVER
) {
806 account_inline_branch(s
, old_cc_op
);
809 case CC_OP_LTUGTU_32
:
810 case CC_OP_LTUGTU_64
:
811 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
819 cond
= nz_cond
[mask
];
820 if (cond
== TCG_COND_NEVER
) {
823 account_inline_branch(s
, old_cc_op
);
838 account_inline_branch(s
, old_cc_op
);
853 account_inline_branch(s
, old_cc_op
);
857 switch (mask
& 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s
, old_cc_op
);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s
, old_cc_op
);
880 old_cc_op
= CC_OP_STATIC
;
881 cond
= TCG_COND_NEVER
;
885 /* Load up the arguments of the comparison. */
887 c
->g1
= c
->g2
= false;
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
893 c
->u
.s32
.b
= tcg_const_i32(0);
896 case CC_OP_LTUGTU_32
:
898 c
->u
.s32
.a
= tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
900 c
->u
.s32
.b
= tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
908 c
->u
.s64
.b
= tcg_const_i64(0);
912 case CC_OP_LTUGTU_64
:
915 c
->g1
= c
->g2
= true;
921 c
->u
.s64
.a
= tcg_temp_new_i64();
922 c
->u
.s64
.b
= tcg_const_i64(0);
923 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c
->u
.s32
.b
= tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c
->u
.s32
.b
= tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c
->u
.s32
.b
= tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c
->u
.s32
.b
= tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c
->u
.s32
.b
= tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c
->u
.s32
.a
= tcg_temp_new_i32();
966 c
->u
.s32
.b
= tcg_const_i32(0);
967 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
969 case 0x4: /* cc == 1 */
971 c
->u
.s32
.b
= tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c
->u
.s32
.b
= tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c
->u
.s32
.b
= tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c
->u
.s32
.b
= tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c
->u
.s32
.a
= tcg_const_i32(8);
990 c
->u
.s32
.b
= tcg_const_i32(0);
991 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
992 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1003 static void free_compare(DisasCompare
*c
)
1007 tcg_temp_free_i64(c
->u
.s64
.a
);
1009 tcg_temp_free_i32(c
->u
.s32
.a
);
1014 tcg_temp_free_i64(c
->u
.s64
.b
);
1016 tcg_temp_free_i32(c
->u
.s32
.b
);
1021 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp
, tmp2
, tmp3
;
1026 TCGv_i32 tmp32_1
, tmp32_2
;
1030 r1
= (insn
>> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1036 case 0x06: /* SCKC D2(B2) [S] */
1037 /* Set Clock Comparator */
1038 check_privileged(s
);
1039 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1040 tmp
= get_address(s
, 0, b2
, d2
);
1041 potential_page_fault(s
);
1042 gen_helper_sckc(cpu_env
, tmp
);
1043 tcg_temp_free_i64(tmp
);
1045 case 0x07: /* STCKC D2(B2) [S] */
1046 /* Store Clock Comparator */
1047 check_privileged(s
);
1048 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1049 tmp
= get_address(s
, 0, b2
, d2
);
1050 potential_page_fault(s
);
1051 gen_helper_stckc(cpu_env
, tmp
);
1052 tcg_temp_free_i64(tmp
);
1054 case 0x08: /* SPT D2(B2) [S] */
1056 check_privileged(s
);
1057 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1058 tmp
= get_address(s
, 0, b2
, d2
);
1059 potential_page_fault(s
);
1060 gen_helper_spt(cpu_env
, tmp
);
1061 tcg_temp_free_i64(tmp
);
1063 case 0x09: /* STPT D2(B2) [S] */
1064 /* Store CPU Timer */
1065 check_privileged(s
);
1066 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1067 tmp
= get_address(s
, 0, b2
, d2
);
1068 potential_page_fault(s
);
1069 gen_helper_stpt(cpu_env
, tmp
);
1070 tcg_temp_free_i64(tmp
);
1072 case 0x0a: /* SPKA D2(B2) [S] */
1073 /* Set PSW Key from Address */
1074 check_privileged(s
);
1075 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1076 tmp
= get_address(s
, 0, b2
, d2
);
1077 tmp2
= tcg_temp_new_i64();
1078 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1079 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1080 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1081 tcg_temp_free_i64(tmp2
);
1082 tcg_temp_free_i64(tmp
);
1084 case 0x0d: /* PTLB [S] */
1086 check_privileged(s
);
1087 gen_helper_ptlb(cpu_env
);
1089 case 0x10: /* SPX D2(B2) [S] */
1090 /* Set Prefix Register */
1091 check_privileged(s
);
1092 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1093 tmp
= get_address(s
, 0, b2
, d2
);
1094 potential_page_fault(s
);
1095 gen_helper_spx(cpu_env
, tmp
);
1096 tcg_temp_free_i64(tmp
);
1098 case 0x11: /* STPX D2(B2) [S] */
1100 check_privileged(s
);
1101 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1102 tmp
= get_address(s
, 0, b2
, d2
);
1103 tmp2
= tcg_temp_new_i64();
1104 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1105 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1106 tcg_temp_free_i64(tmp
);
1107 tcg_temp_free_i64(tmp2
);
1109 case 0x12: /* STAP D2(B2) [S] */
1110 /* Store CPU Address */
1111 check_privileged(s
);
1112 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1113 tmp
= get_address(s
, 0, b2
, d2
);
1114 tmp2
= tcg_temp_new_i64();
1115 tmp32_1
= tcg_temp_new_i32();
1116 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1117 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1118 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1119 tcg_temp_free_i64(tmp
);
1120 tcg_temp_free_i64(tmp2
);
1121 tcg_temp_free_i32(tmp32_1
);
1123 case 0x21: /* IPTE R1,R2 [RRE] */
1124 /* Invalidate PTE */
1125 check_privileged(s
);
1126 r1
= (insn
>> 4) & 0xf;
1129 tmp2
= load_reg(r2
);
1130 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1131 tcg_temp_free_i64(tmp
);
1132 tcg_temp_free_i64(tmp2
);
1134 case 0x29: /* ISKE R1,R2 [RRE] */
1135 /* Insert Storage Key Extended */
1136 check_privileged(s
);
1137 r1
= (insn
>> 4) & 0xf;
1140 tmp2
= tcg_temp_new_i64();
1141 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1142 store_reg(r1
, tmp2
);
1143 tcg_temp_free_i64(tmp
);
1144 tcg_temp_free_i64(tmp2
);
1146 case 0x2a: /* RRBE R1,R2 [RRE] */
1147 /* Set Storage Key Extended */
1148 check_privileged(s
);
1149 r1
= (insn
>> 4) & 0xf;
1151 tmp32_1
= load_reg32(r1
);
1153 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1155 tcg_temp_free_i32(tmp32_1
);
1156 tcg_temp_free_i64(tmp
);
1158 case 0x2b: /* SSKE R1,R2 [RRE] */
1159 /* Set Storage Key Extended */
1160 check_privileged(s
);
1161 r1
= (insn
>> 4) & 0xf;
1163 tmp32_1
= load_reg32(r1
);
1165 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1166 tcg_temp_free_i32(tmp32_1
);
1167 tcg_temp_free_i64(tmp
);
1169 case 0x34: /* STCH ? */
1170 /* Store Subchannel */
1171 check_privileged(s
);
1172 gen_op_movi_cc(s
, 3);
1174 case 0x46: /* STURA R1,R2 [RRE] */
1175 /* Store Using Real Address */
1176 check_privileged(s
);
1177 r1
= (insn
>> 4) & 0xf;
1179 tmp32_1
= load_reg32(r1
);
1181 potential_page_fault(s
);
1182 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1183 tcg_temp_free_i32(tmp32_1
);
1184 tcg_temp_free_i64(tmp
);
1186 case 0x50: /* CSP R1,R2 [RRE] */
1187 /* Compare And Swap And Purge */
1188 check_privileged(s
);
1189 r1
= (insn
>> 4) & 0xf;
1191 tmp32_1
= tcg_const_i32(r1
);
1192 tmp32_2
= tcg_const_i32(r2
);
1193 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1195 tcg_temp_free_i32(tmp32_1
);
1196 tcg_temp_free_i32(tmp32_2
);
1198 case 0x5f: /* CHSC ? */
1199 /* Channel Subsystem Call */
1200 check_privileged(s
);
1201 gen_op_movi_cc(s
, 3);
1203 case 0x78: /* STCKE D2(B2) [S] */
1204 /* Store Clock Extended */
1205 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1206 tmp
= get_address(s
, 0, b2
, d2
);
1207 potential_page_fault(s
);
1208 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1210 tcg_temp_free_i64(tmp
);
1212 case 0x79: /* SACF D2(B2) [S] */
1213 /* Set Address Space Control Fast */
1214 check_privileged(s
);
1215 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1216 tmp
= get_address(s
, 0, b2
, d2
);
1217 potential_page_fault(s
);
1218 gen_helper_sacf(cpu_env
, tmp
);
1219 tcg_temp_free_i64(tmp
);
1220 /* addressing mode has changed, so end the block */
1223 s
->is_jmp
= DISAS_JUMP
;
1225 case 0x7d: /* STSI D2,(B2) [S] */
1226 check_privileged(s
);
1227 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1228 tmp
= get_address(s
, 0, b2
, d2
);
1229 tmp32_1
= load_reg32(0);
1230 tmp32_2
= load_reg32(1);
1231 potential_page_fault(s
);
1232 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1234 tcg_temp_free_i64(tmp
);
1235 tcg_temp_free_i32(tmp32_1
);
1236 tcg_temp_free_i32(tmp32_2
);
1238 case 0xb1: /* STFL D2(B2) [S] */
1239 /* Store Facility List (CPU features) at 200 */
1240 check_privileged(s
);
1241 tmp2
= tcg_const_i64(0xc0000000);
1242 tmp
= tcg_const_i64(200);
1243 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1244 tcg_temp_free_i64(tmp2
);
1245 tcg_temp_free_i64(tmp
);
1247 case 0xb2: /* LPSWE D2(B2) [S] */
1248 /* Load PSW Extended */
1249 check_privileged(s
);
1250 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1251 tmp
= get_address(s
, 0, b2
, d2
);
1252 tmp2
= tcg_temp_new_i64();
1253 tmp3
= tcg_temp_new_i64();
1254 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1255 tcg_gen_addi_i64(tmp
, tmp
, 8);
1256 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1257 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1258 /* we need to keep cc_op intact */
1259 s
->is_jmp
= DISAS_JUMP
;
1260 tcg_temp_free_i64(tmp
);
1261 tcg_temp_free_i64(tmp2
);
1262 tcg_temp_free_i64(tmp3
);
1264 case 0x20: /* SERVC R1,R2 [RRE] */
1265 /* SCLP Service call (PV hypercall) */
1266 check_privileged(s
);
1267 potential_page_fault(s
);
1268 tmp32_1
= load_reg32(r2
);
1270 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1272 tcg_temp_free_i32(tmp32_1
);
1273 tcg_temp_free_i64(tmp
);
1277 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1278 gen_illegal_opcode(s
);
1279 #ifndef CONFIG_USER_ONLY
1285 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1291 opc
= cpu_ldub_code(env
, s
->pc
);
1292 LOG_DISAS("opc 0x%x\n", opc
);
1296 insn
= ld_code4(env
, s
->pc
);
1297 op
= (insn
>> 16) & 0xff;
1298 disas_b2(env
, s
, op
, insn
);
1301 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1302 gen_illegal_opcode(s
);
1307 /* ====================================================================== */
1308 /* Define the insn format enumeration. */
1309 #define F0(N) FMT_##N,
1310 #define F1(N, X1) F0(N)
1311 #define F2(N, X1, X2) F0(N)
1312 #define F3(N, X1, X2, X3) F0(N)
1313 #define F4(N, X1, X2, X3, X4) F0(N)
1314 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1317 #include "insn-format.def"
1327 /* Define a structure to hold the decoded fields. We'll store each inside
1328 an array indexed by an enum. In order to conserve memory, we'll arrange
1329 for fields that do not exist at the same time to overlap, thus the "C"
1330 for compact. For checking purposes there is an "O" for original index
1331 as well that will be applied to availability bitmaps. */
1333 enum DisasFieldIndexO
{
1356 enum DisasFieldIndexC
{
1387 struct DisasFields
{
1390 unsigned presentC
:16;
1391 unsigned int presentO
;
1395 /* This is the way fields are to be accessed out of DisasFields. */
1396 #define have_field(S, F) have_field1((S), FLD_O_##F)
1397 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1399 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1401 return (f
->presentO
>> c
) & 1;
1404 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1405 enum DisasFieldIndexC c
)
1407 assert(have_field1(f
, o
));
1411 /* Describe the layout of each field in each format. */
1412 typedef struct DisasField
{
1414 unsigned int size
:8;
1415 unsigned int type
:2;
1416 unsigned int indexC
:6;
1417 enum DisasFieldIndexO indexO
:8;
1420 typedef struct DisasFormatInfo
{
1421 DisasField op
[NUM_C_FIELD
];
1424 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1425 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1426 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1427 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1428 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1429 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1430 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1431 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1432 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1433 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1434 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1435 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1436 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1437 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1439 #define F0(N) { { } },
1440 #define F1(N, X1) { { X1 } },
1441 #define F2(N, X1, X2) { { X1, X2 } },
1442 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1443 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1444 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1446 static const DisasFormatInfo format_info
[] = {
1447 #include "insn-format.def"
1465 /* Generally, we'll extract operands into this structures, operate upon
1466 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1467 of routines below for more details. */
1469 bool g_out
, g_out2
, g_in1
, g_in2
;
1470 TCGv_i64 out
, out2
, in1
, in2
;
1474 /* Return values from translate_one, indicating the state of the TB. */
1476 /* Continue the TB. */
1478 /* We have emitted one or more goto_tb. No fixup required. */
1480 /* We are not using a goto_tb (for whatever reason), but have updated
1481 the PC (for whatever reason), so there's no need to do it again on
1484 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1485 updated the PC for the next instruction to be executed. */
1487 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1488 No following code will be executed. */
1492 typedef enum DisasFacility
{
1493 FAC_Z
, /* zarch (default) */
1494 FAC_CASS
, /* compare and swap and store */
1495 FAC_CASS2
, /* compare and swap and store 2*/
1496 FAC_DFP
, /* decimal floating point */
1497 FAC_DFPR
, /* decimal floating point rounding */
1498 FAC_DO
, /* distinct operands */
1499 FAC_EE
, /* execute extensions */
1500 FAC_EI
, /* extended immediate */
1501 FAC_FPE
, /* floating point extension */
1502 FAC_FPSSH
, /* floating point support sign handling */
1503 FAC_FPRGR
, /* FPR-GR transfer */
1504 FAC_GIE
, /* general instructions extension */
1505 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1506 FAC_HW
, /* high-word */
1507 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1508 FAC_LOC
, /* load/store on condition */
1509 FAC_LD
, /* long displacement */
1510 FAC_PC
, /* population count */
1511 FAC_SCF
, /* store clock fast */
1512 FAC_SFLE
, /* store facility list extended */
1518 DisasFacility fac
:6;
1522 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1523 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1524 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1525 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1526 void (*help_cout
)(DisasContext
*, DisasOps
*);
1527 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1532 /* ====================================================================== */
1533 /* Miscelaneous helpers, used by several operations. */
1535 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1536 DisasOps
*o
, int mask
)
1538 int b2
= get_field(f
, b2
);
1539 int d2
= get_field(f
, d2
);
1542 o
->in2
= tcg_const_i64(d2
& mask
);
1544 o
->in2
= get_address(s
, 0, b2
, d2
);
1545 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1549 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1551 if (dest
== s
->next_pc
) {
1554 if (use_goto_tb(s
, dest
)) {
1555 gen_update_cc_op(s
);
1557 tcg_gen_movi_i64(psw_addr
, dest
);
1558 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1559 return EXIT_GOTO_TB
;
1561 tcg_gen_movi_i64(psw_addr
, dest
);
1562 return EXIT_PC_UPDATED
;
1566 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1567 bool is_imm
, int imm
, TCGv_i64 cdest
)
1570 uint64_t dest
= s
->pc
+ 2 * imm
;
1573 /* Take care of the special cases first. */
1574 if (c
->cond
== TCG_COND_NEVER
) {
1579 if (dest
== s
->next_pc
) {
1580 /* Branch to next. */
1584 if (c
->cond
== TCG_COND_ALWAYS
) {
1585 ret
= help_goto_direct(s
, dest
);
1589 if (TCGV_IS_UNUSED_I64(cdest
)) {
1590 /* E.g. bcr %r0 -> no branch. */
1594 if (c
->cond
== TCG_COND_ALWAYS
) {
1595 tcg_gen_mov_i64(psw_addr
, cdest
);
1596 ret
= EXIT_PC_UPDATED
;
1601 if (use_goto_tb(s
, s
->next_pc
)) {
1602 if (is_imm
&& use_goto_tb(s
, dest
)) {
1603 /* Both exits can use goto_tb. */
1604 gen_update_cc_op(s
);
1606 lab
= gen_new_label();
1608 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1610 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1613 /* Branch not taken. */
1615 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1616 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1621 tcg_gen_movi_i64(psw_addr
, dest
);
1622 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1626 /* Fallthru can use goto_tb, but taken branch cannot. */
1627 /* Store taken branch destination before the brcond. This
1628 avoids having to allocate a new local temp to hold it.
1629 We'll overwrite this in the not taken case anyway. */
1631 tcg_gen_mov_i64(psw_addr
, cdest
);
1634 lab
= gen_new_label();
1636 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1638 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1641 /* Branch not taken. */
1642 gen_update_cc_op(s
);
1644 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1645 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1649 tcg_gen_movi_i64(psw_addr
, dest
);
1651 ret
= EXIT_PC_UPDATED
;
1654 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1655 Most commonly we're single-stepping or some other condition that
1656 disables all use of goto_tb. Just update the PC and exit. */
1658 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1660 cdest
= tcg_const_i64(dest
);
1664 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1667 TCGv_i32 t0
= tcg_temp_new_i32();
1668 TCGv_i64 t1
= tcg_temp_new_i64();
1669 TCGv_i64 z
= tcg_const_i64(0);
1670 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1671 tcg_gen_extu_i32_i64(t1
, t0
);
1672 tcg_temp_free_i32(t0
);
1673 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1674 tcg_temp_free_i64(t1
);
1675 tcg_temp_free_i64(z
);
1679 tcg_temp_free_i64(cdest
);
1681 tcg_temp_free_i64(next
);
1683 ret
= EXIT_PC_UPDATED
;
1691 /* ====================================================================== */
1692 /* The operations. These perform the bulk of the work for any insn,
1693 usually after the operands have been loaded and output initialized. */
1695 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1697 gen_helper_abs_i64(o
->out
, o
->in2
);
1701 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1703 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1707 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1709 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1713 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1715 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1716 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1720 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1722 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1726 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1730 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1732 /* XXX possible optimization point */
1734 cc
= tcg_temp_new_i64();
1735 tcg_gen_extu_i32_i64(cc
, cc_op
);
1736 tcg_gen_shri_i64(cc
, cc
, 1);
1738 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1739 tcg_temp_free_i64(cc
);
1743 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1745 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1749 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1751 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1755 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1757 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1758 return_low128(o
->out2
);
1762 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1764 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1768 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1770 int shift
= s
->insn
->data
& 0xff;
1771 int size
= s
->insn
->data
>> 8;
1772 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1775 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1776 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1777 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1779 /* Produce the CC from only the bits manipulated. */
1780 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1781 set_cc_nz_u64(s
, cc_dst
);
1785 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1787 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1788 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1789 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1790 return EXIT_PC_UPDATED
;
1796 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1798 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1799 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1802 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1804 int m1
= get_field(s
->fields
, m1
);
1805 bool is_imm
= have_field(s
->fields
, i2
);
1806 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1809 disas_jcc(s
, &c
, m1
);
1810 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1813 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1815 int r1
= get_field(s
->fields
, r1
);
1816 bool is_imm
= have_field(s
->fields
, i2
);
1817 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1821 c
.cond
= TCG_COND_NE
;
1826 t
= tcg_temp_new_i64();
1827 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1828 store_reg32_i64(r1
, t
);
1829 c
.u
.s32
.a
= tcg_temp_new_i32();
1830 c
.u
.s32
.b
= tcg_const_i32(0);
1831 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1832 tcg_temp_free_i64(t
);
1834 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1837 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1839 int r1
= get_field(s
->fields
, r1
);
1840 bool is_imm
= have_field(s
->fields
, i2
);
1841 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1844 c
.cond
= TCG_COND_NE
;
1849 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1850 c
.u
.s64
.a
= regs
[r1
];
1851 c
.u
.s64
.b
= tcg_const_i64(0);
1853 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1856 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1858 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1863 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1865 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1870 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1872 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1877 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1879 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1880 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1881 tcg_temp_free_i32(m3
);
1882 gen_set_cc_nz_f32(s
, o
->in2
);
1886 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1888 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1889 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1890 tcg_temp_free_i32(m3
);
1891 gen_set_cc_nz_f64(s
, o
->in2
);
1895 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1897 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1898 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1899 tcg_temp_free_i32(m3
);
1900 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1904 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1906 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1907 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1908 tcg_temp_free_i32(m3
);
1909 gen_set_cc_nz_f32(s
, o
->in2
);
1913 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1915 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1916 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1917 tcg_temp_free_i32(m3
);
1918 gen_set_cc_nz_f64(s
, o
->in2
);
1922 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1924 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1925 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1926 tcg_temp_free_i32(m3
);
1927 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1931 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1933 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1934 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1935 tcg_temp_free_i32(m3
);
1939 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1941 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1942 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1943 tcg_temp_free_i32(m3
);
1947 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1949 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1950 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1951 tcg_temp_free_i32(m3
);
1952 return_low128(o
->out2
);
1956 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1958 int r2
= get_field(s
->fields
, r2
);
1959 TCGv_i64 len
= tcg_temp_new_i64();
1961 potential_page_fault(s
);
1962 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1964 return_low128(o
->out
);
1966 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1967 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1968 tcg_temp_free_i64(len
);
1973 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1975 int l
= get_field(s
->fields
, l1
);
1980 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1981 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1984 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1985 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1988 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1989 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1992 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1993 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1996 potential_page_fault(s
);
1997 vl
= tcg_const_i32(l
);
1998 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1999 tcg_temp_free_i32(vl
);
2003 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
2007 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
2009 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2010 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2011 potential_page_fault(s
);
2012 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2013 tcg_temp_free_i32(r1
);
2014 tcg_temp_free_i32(r3
);
2019 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2021 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2022 TCGv_i32 t1
= tcg_temp_new_i32();
2023 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2024 potential_page_fault(s
);
2025 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2027 tcg_temp_free_i32(t1
);
2028 tcg_temp_free_i32(m3
);
2032 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
2034 potential_page_fault(s
);
2035 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2037 return_low128(o
->in2
);
2041 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2043 int r3
= get_field(s
->fields
, r3
);
2044 potential_page_fault(s
);
2045 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2050 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2052 int r3
= get_field(s
->fields
, r3
);
2053 potential_page_fault(s
);
2054 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2059 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2061 int r3
= get_field(s
->fields
, r3
);
2062 TCGv_i64 in3
= tcg_temp_new_i64();
2063 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2064 potential_page_fault(s
);
2065 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2066 tcg_temp_free_i64(in3
);
2071 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2073 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2074 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2075 potential_page_fault(s
);
2076 /* XXX rewrite in tcg */
2077 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2082 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2084 TCGv_i64 t1
= tcg_temp_new_i64();
2085 TCGv_i32 t2
= tcg_temp_new_i32();
2086 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2087 gen_helper_cvd(t1
, t2
);
2088 tcg_temp_free_i32(t2
);
2089 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2090 tcg_temp_free_i64(t1
);
2094 #ifndef CONFIG_USER_ONLY
2095 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2099 check_privileged(s
);
2100 potential_page_fault(s
);
2102 /* We pretend the format is RX_a so that D2 is the field we want. */
2103 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2104 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2105 tcg_temp_free_i32(tmp
);
2110 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2112 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2113 return_low128(o
->out
);
2117 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2119 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2120 return_low128(o
->out
);
2124 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2126 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2127 return_low128(o
->out
);
2131 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2133 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2134 return_low128(o
->out
);
2138 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2140 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2144 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2146 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2150 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2152 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2153 return_low128(o
->out2
);
2157 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2159 int r2
= get_field(s
->fields
, r2
);
2160 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2164 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2166 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2170 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2172 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2173 tb->flags, (ab)use the tb->cs_base field as the address of
2174 the template in memory, and grab 8 bits of tb->flags/cflags for
2175 the contents of the register. We would then recognize all this
2176 in gen_intermediate_code_internal, generating code for exactly
2177 one instruction. This new TB then gets executed normally.
2179 On the other hand, this seems to be mostly used for modifying
2180 MVC inside of memcpy, which needs a helper call anyway. So
2181 perhaps this doesn't bear thinking about any further. */
2188 tmp
= tcg_const_i64(s
->next_pc
);
2189 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2190 tcg_temp_free_i64(tmp
);
2196 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2198 /* We'll use the original input for cc computation, since we get to
2199 compare that against 0, which ought to be better than comparing
2200 the real output against 64. It also lets cc_dst be a convenient
2201 temporary during our computation. */
2202 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2204 /* R1 = IN ? CLZ(IN) : 64. */
2205 gen_helper_clz(o
->out
, o
->in2
);
2207 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2208 value by 64, which is undefined. But since the shift is 64 iff the
2209 input is zero, we still get the correct result after and'ing. */
2210 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2211 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2212 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2216 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2218 int m3
= get_field(s
->fields
, m3
);
2219 int pos
, len
, base
= s
->insn
->data
;
2220 TCGv_i64 tmp
= tcg_temp_new_i64();
2225 /* Effectively a 32-bit load. */
2226 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2233 /* Effectively a 16-bit load. */
2234 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2242 /* Effectively an 8-bit load. */
2243 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2248 pos
= base
+ ctz32(m3
) * 8;
2249 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2250 ccm
= ((1ull << len
) - 1) << pos
;
2254 /* This is going to be a sequence of loads and inserts. */
2255 pos
= base
+ 32 - 8;
2259 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2260 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2261 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2264 m3
= (m3
<< 1) & 0xf;
2270 tcg_gen_movi_i64(tmp
, ccm
);
2271 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2272 tcg_temp_free_i64(tmp
);
2276 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2278 int shift
= s
->insn
->data
& 0xff;
2279 int size
= s
->insn
->data
>> 8;
2280 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2284 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2289 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2291 t1
= tcg_temp_new_i64();
2292 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2293 tcg_gen_shri_i64(t1
, t1
, 36);
2294 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2296 tcg_gen_extu_i32_i64(t1
, cc_op
);
2297 tcg_gen_shli_i64(t1
, t1
, 28);
2298 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2299 tcg_temp_free_i64(t1
);
2303 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2305 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2309 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2311 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2315 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2317 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2321 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2323 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2327 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2329 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2330 return_low128(o
->out2
);
2334 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2336 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2337 return_low128(o
->out2
);
2341 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2343 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2347 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2349 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2353 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2355 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2359 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2361 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2365 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2367 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2371 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2373 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2377 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2379 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2383 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2385 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2389 #ifndef CONFIG_USER_ONLY
2390 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2392 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2393 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2394 check_privileged(s
);
2395 potential_page_fault(s
);
2396 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2397 tcg_temp_free_i32(r1
);
2398 tcg_temp_free_i32(r3
);
2402 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2404 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2405 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2406 check_privileged(s
);
2407 potential_page_fault(s
);
2408 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2409 tcg_temp_free_i32(r1
);
2410 tcg_temp_free_i32(r3
);
2413 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2415 check_privileged(s
);
2416 potential_page_fault(s
);
2417 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2422 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2426 check_privileged(s
);
2428 t1
= tcg_temp_new_i64();
2429 t2
= tcg_temp_new_i64();
2430 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2431 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2432 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2433 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2434 tcg_gen_shli_i64(t1
, t1
, 32);
2435 gen_helper_load_psw(cpu_env
, t1
, t2
);
2436 tcg_temp_free_i64(t1
);
2437 tcg_temp_free_i64(t2
);
2438 return EXIT_NORETURN
;
2442 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2444 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2445 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2446 potential_page_fault(s
);
2447 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2448 tcg_temp_free_i32(r1
);
2449 tcg_temp_free_i32(r3
);
2453 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2455 int r1
= get_field(s
->fields
, r1
);
2456 int r3
= get_field(s
->fields
, r3
);
2457 TCGv_i64 t
= tcg_temp_new_i64();
2458 TCGv_i64 t4
= tcg_const_i64(4);
2461 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2462 store_reg32_i64(r1
, t
);
2466 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2470 tcg_temp_free_i64(t
);
2471 tcg_temp_free_i64(t4
);
2475 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2477 int r1
= get_field(s
->fields
, r1
);
2478 int r3
= get_field(s
->fields
, r3
);
2479 TCGv_i64 t
= tcg_temp_new_i64();
2480 TCGv_i64 t4
= tcg_const_i64(4);
2483 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2484 store_reg32h_i64(r1
, t
);
2488 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2492 tcg_temp_free_i64(t
);
2493 tcg_temp_free_i64(t4
);
2497 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2499 int r1
= get_field(s
->fields
, r1
);
2500 int r3
= get_field(s
->fields
, r3
);
2501 TCGv_i64 t8
= tcg_const_i64(8);
2504 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2508 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2512 tcg_temp_free_i64(t8
);
2516 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2519 o
->g_out
= o
->g_in2
;
2520 TCGV_UNUSED_I64(o
->in2
);
2525 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2529 o
->g_out
= o
->g_in1
;
2530 o
->g_out2
= o
->g_in2
;
2531 TCGV_UNUSED_I64(o
->in1
);
2532 TCGV_UNUSED_I64(o
->in2
);
2533 o
->g_in1
= o
->g_in2
= false;
2537 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2539 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2540 potential_page_fault(s
);
2541 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2542 tcg_temp_free_i32(l
);
2546 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2548 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2549 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2550 potential_page_fault(s
);
2551 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2552 tcg_temp_free_i32(r1
);
2553 tcg_temp_free_i32(r2
);
2558 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2560 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2561 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2562 potential_page_fault(s
);
2563 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2564 tcg_temp_free_i32(r1
);
2565 tcg_temp_free_i32(r3
);
2570 #ifndef CONFIG_USER_ONLY
2571 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2573 int r1
= get_field(s
->fields
, l1
);
2574 check_privileged(s
);
2575 potential_page_fault(s
);
2576 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2581 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2583 int r1
= get_field(s
->fields
, l1
);
2584 check_privileged(s
);
2585 potential_page_fault(s
);
2586 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2592 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2594 potential_page_fault(s
);
2595 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2600 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2602 potential_page_fault(s
);
2603 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2605 return_low128(o
->in2
);
2609 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2611 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2615 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2617 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2618 return_low128(o
->out2
);
2622 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2624 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2628 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2630 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2634 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2636 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2640 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2642 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2643 return_low128(o
->out2
);
2647 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2649 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2650 return_low128(o
->out2
);
2654 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2656 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2657 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2658 tcg_temp_free_i64(r3
);
2662 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2664 int r3
= get_field(s
->fields
, r3
);
2665 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2669 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2671 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2672 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2673 tcg_temp_free_i64(r3
);
2677 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2679 int r3
= get_field(s
->fields
, r3
);
2680 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2684 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2686 gen_helper_nabs_i64(o
->out
, o
->in2
);
2690 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2692 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2696 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2698 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2702 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2704 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2705 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2709 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2711 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2712 potential_page_fault(s
);
2713 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2714 tcg_temp_free_i32(l
);
2719 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2721 tcg_gen_neg_i64(o
->out
, o
->in2
);
2725 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2727 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2731 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2733 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2737 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2739 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2740 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2744 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2746 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2747 potential_page_fault(s
);
2748 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2749 tcg_temp_free_i32(l
);
2754 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2756 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2760 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2762 int shift
= s
->insn
->data
& 0xff;
2763 int size
= s
->insn
->data
>> 8;
2764 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2767 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2768 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2770 /* Produce the CC from only the bits manipulated. */
2771 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2772 set_cc_nz_u64(s
, cc_dst
);
2776 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2778 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2782 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2784 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2788 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2790 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2794 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2796 TCGv_i32 t1
= tcg_temp_new_i32();
2797 TCGv_i32 t2
= tcg_temp_new_i32();
2798 TCGv_i32 to
= tcg_temp_new_i32();
2799 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2800 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2801 tcg_gen_rotl_i32(to
, t1
, t2
);
2802 tcg_gen_extu_i32_i64(o
->out
, to
);
2803 tcg_temp_free_i32(t1
);
2804 tcg_temp_free_i32(t2
);
2805 tcg_temp_free_i32(to
);
2809 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2811 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2815 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2817 int r1
= get_field(s
->fields
, r1
);
2818 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2822 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2824 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2828 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2830 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2834 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2836 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2837 return_low128(o
->out2
);
2841 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2843 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2847 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2849 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2853 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2855 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2856 return_low128(o
->out2
);
2860 #ifndef CONFIG_USER_ONLY
2861 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2863 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2864 check_privileged(s
);
2865 potential_page_fault(s
);
2866 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2867 tcg_temp_free_i32(r1
);
2872 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2874 uint64_t sign
= 1ull << s
->insn
->data
;
2875 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2876 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2877 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2878 /* The arithmetic left shift is curious in that it does not affect
2879 the sign bit. Copy that over from the source unchanged. */
2880 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2881 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2882 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2886 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2888 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2892 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2894 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2898 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2900 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2904 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2906 gen_helper_sfpc(cpu_env
, o
->in2
);
2910 #ifndef CONFIG_USER_ONLY
2911 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2913 check_privileged(s
);
2914 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2918 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2920 gen_helper_stck(o
->out
, cpu_env
);
2921 /* ??? We don't implement clock states. */
2922 gen_op_movi_cc(s
, 0);
2926 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2928 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2929 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2930 check_privileged(s
);
2931 potential_page_fault(s
);
2932 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2933 tcg_temp_free_i32(r1
);
2934 tcg_temp_free_i32(r3
);
2938 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2940 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2941 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2942 check_privileged(s
);
2943 potential_page_fault(s
);
2944 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2945 tcg_temp_free_i32(r1
);
2946 tcg_temp_free_i32(r3
);
2950 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2952 check_privileged(s
);
2953 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2957 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2959 uint64_t i2
= get_field(s
->fields
, i2
);
2962 check_privileged(s
);
2964 /* It is important to do what the instruction name says: STORE THEN.
2965 If we let the output hook perform the store then if we fault and
2966 restart, we'll have the wrong SYSTEM MASK in place. */
2967 t
= tcg_temp_new_i64();
2968 tcg_gen_shri_i64(t
, psw_mask
, 56);
2969 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2970 tcg_temp_free_i64(t
);
2972 if (s
->fields
->op
== 0xac) {
2973 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2974 (i2
<< 56) | 0x00ffffffffffffffull
);
2976 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2982 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2984 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2988 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2990 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2994 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2996 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3000 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3002 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3006 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3008 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3009 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3010 potential_page_fault(s
);
3011 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3012 tcg_temp_free_i32(r1
);
3013 tcg_temp_free_i32(r3
);
3017 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3019 int m3
= get_field(s
->fields
, m3
);
3020 int pos
, base
= s
->insn
->data
;
3021 TCGv_i64 tmp
= tcg_temp_new_i64();
3023 pos
= base
+ ctz32(m3
) * 8;
3026 /* Effectively a 32-bit store. */
3027 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3028 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3034 /* Effectively a 16-bit store. */
3035 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3036 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3043 /* Effectively an 8-bit store. */
3044 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3045 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3049 /* This is going to be a sequence of shifts and stores. */
3050 pos
= base
+ 32 - 8;
3053 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3054 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3055 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3057 m3
= (m3
<< 1) & 0xf;
3062 tcg_temp_free_i64(tmp
);
3066 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3068 int r1
= get_field(s
->fields
, r1
);
3069 int r3
= get_field(s
->fields
, r3
);
3070 int size
= s
->insn
->data
;
3071 TCGv_i64 tsize
= tcg_const_i64(size
);
3075 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3077 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3082 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3086 tcg_temp_free_i64(tsize
);
3090 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3092 int r1
= get_field(s
->fields
, r1
);
3093 int r3
= get_field(s
->fields
, r3
);
3094 TCGv_i64 t
= tcg_temp_new_i64();
3095 TCGv_i64 t4
= tcg_const_i64(4);
3096 TCGv_i64 t32
= tcg_const_i64(32);
3099 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3100 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3104 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3108 tcg_temp_free_i64(t
);
3109 tcg_temp_free_i64(t4
);
3110 tcg_temp_free_i64(t32
);
3114 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3116 potential_page_fault(s
);
3117 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3119 return_low128(o
->in2
);
3123 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3125 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3129 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3134 tcg_gen_not_i64(o
->in2
, o
->in2
);
3135 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3137 /* XXX possible optimization point */
3139 cc
= tcg_temp_new_i64();
3140 tcg_gen_extu_i32_i64(cc
, cc_op
);
3141 tcg_gen_shri_i64(cc
, cc
, 1);
3142 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3143 tcg_temp_free_i64(cc
);
3147 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3154 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3155 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3156 tcg_temp_free_i32(t
);
3158 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3159 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3160 tcg_temp_free_i32(t
);
3162 gen_exception(EXCP_SVC
);
3163 return EXIT_NORETURN
;
3166 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3168 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3173 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3175 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3180 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3182 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3187 #ifndef CONFIG_USER_ONLY
3188 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3190 potential_page_fault(s
);
3191 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3197 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3199 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3200 potential_page_fault(s
);
3201 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3202 tcg_temp_free_i32(l
);
3207 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3209 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3210 potential_page_fault(s
);
3211 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3212 tcg_temp_free_i32(l
);
3216 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3218 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3219 potential_page_fault(s
);
3220 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3221 tcg_temp_free_i32(l
);
3226 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3228 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3232 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3234 int shift
= s
->insn
->data
& 0xff;
3235 int size
= s
->insn
->data
>> 8;
3236 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3239 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3240 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3242 /* Produce the CC from only the bits manipulated. */
3243 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3244 set_cc_nz_u64(s
, cc_dst
);
3248 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3250 o
->out
= tcg_const_i64(0);
3254 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3256 o
->out
= tcg_const_i64(0);
3262 /* ====================================================================== */
3263 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3264 the original inputs), update the various cc data structures in order to
3265 be able to compute the new condition code. */
3267 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3269 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3272 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3274 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3277 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3279 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3282 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3284 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3287 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3289 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3292 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3294 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3297 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3299 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3302 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3304 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3307 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3309 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3312 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3314 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3317 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3319 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3322 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3324 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3327 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3329 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3332 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3334 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3337 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3339 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3342 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3344 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3347 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3349 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3352 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3354 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3357 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3359 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3362 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3364 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3365 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3368 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3370 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3373 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3375 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3378 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3380 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3383 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3385 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3388 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3390 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3393 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3395 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3398 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3400 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3403 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3405 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3408 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3410 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3413 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3415 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3418 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3420 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3423 /* ====================================================================== */
3424 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3425 with the TCG register to which we will write. Used in combination with
3426 the "wout" generators, in some cases we need a new temporary, and in
3427 some cases we can write to a TCG global. */
3429 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3431 o
->out
= tcg_temp_new_i64();
3434 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3436 o
->out
= tcg_temp_new_i64();
3437 o
->out2
= tcg_temp_new_i64();
3440 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3442 o
->out
= regs
[get_field(f
, r1
)];
3446 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3448 /* ??? Specification exception: r1 must be even. */
3449 int r1
= get_field(f
, r1
);
3451 o
->out2
= regs
[(r1
+ 1) & 15];
3452 o
->g_out
= o
->g_out2
= true;
3455 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3457 o
->out
= fregs
[get_field(f
, r1
)];
3461 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3463 /* ??? Specification exception: r1 must be < 14. */
3464 int r1
= get_field(f
, r1
);
3466 o
->out2
= fregs
[(r1
+ 2) & 15];
3467 o
->g_out
= o
->g_out2
= true;
3470 /* ====================================================================== */
3471 /* The "Write OUTput" generators. These generally perform some non-trivial
3472 copy of data to TCG globals, or to main memory. The trivial cases are
3473 generally handled by having a "prep" generator install the TCG global
3474 as the destination of the operation. */
3476 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3478 store_reg(get_field(f
, r1
), o
->out
);
3481 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3483 int r1
= get_field(f
, r1
);
3484 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3487 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3489 int r1
= get_field(f
, r1
);
3490 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3493 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3495 store_reg32_i64(get_field(f
, r1
), o
->out
);
3498 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3500 /* ??? Specification exception: r1 must be even. */
3501 int r1
= get_field(f
, r1
);
3502 store_reg32_i64(r1
, o
->out
);
3503 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3506 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3508 /* ??? Specification exception: r1 must be even. */
3509 int r1
= get_field(f
, r1
);
3510 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3511 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3512 store_reg32_i64(r1
, o
->out
);
3515 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3517 store_freg32_i64(get_field(f
, r1
), o
->out
);
3520 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3522 store_freg(get_field(f
, r1
), o
->out
);
3525 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3527 /* ??? Specification exception: r1 must be < 14. */
3528 int f1
= get_field(s
->fields
, r1
);
3529 store_freg(f1
, o
->out
);
3530 store_freg((f1
+ 2) & 15, o
->out2
);
3533 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3535 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3536 store_reg32_i64(get_field(f
, r1
), o
->out
);
3540 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3542 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3543 store_freg32_i64(get_field(f
, r1
), o
->out
);
3547 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3549 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3552 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3554 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3557 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3559 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3562 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3564 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3567 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3569 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3572 /* ====================================================================== */
3573 /* The "INput 1" generators. These load the first operand to an insn. */
3575 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3577 o
->in1
= load_reg(get_field(f
, r1
));
3580 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3582 o
->in1
= regs
[get_field(f
, r1
)];
3586 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3588 o
->in1
= tcg_temp_new_i64();
3589 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3592 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3594 o
->in1
= tcg_temp_new_i64();
3595 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3598 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3600 o
->in1
= tcg_temp_new_i64();
3601 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3604 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3606 /* ??? Specification exception: r1 must be even. */
3607 int r1
= get_field(f
, r1
);
3608 o
->in1
= load_reg((r1
+ 1) & 15);
3611 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3613 /* ??? Specification exception: r1 must be even. */
3614 int r1
= get_field(f
, r1
);
3615 o
->in1
= tcg_temp_new_i64();
3616 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3619 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3621 /* ??? Specification exception: r1 must be even. */
3622 int r1
= get_field(f
, r1
);
3623 o
->in1
= tcg_temp_new_i64();
3624 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3627 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3629 /* ??? Specification exception: r1 must be even. */
3630 int r1
= get_field(f
, r1
);
3631 o
->in1
= tcg_temp_new_i64();
3632 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3635 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3637 o
->in1
= load_reg(get_field(f
, r2
));
3640 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3642 o
->in1
= load_reg(get_field(f
, r3
));
3645 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3647 o
->in1
= regs
[get_field(f
, r3
)];
3651 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3653 o
->in1
= tcg_temp_new_i64();
3654 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3657 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3659 o
->in1
= tcg_temp_new_i64();
3660 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3663 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3665 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3668 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3670 o
->in1
= fregs
[get_field(f
, r1
)];
3674 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3676 /* ??? Specification exception: r1 must be < 14. */
3677 int r1
= get_field(f
, r1
);
3679 o
->out2
= fregs
[(r1
+ 2) & 15];
3680 o
->g_out
= o
->g_out2
= true;
3683 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3685 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3688 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3690 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3691 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3694 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3697 o
->in1
= tcg_temp_new_i64();
3698 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3701 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3704 o
->in1
= tcg_temp_new_i64();
3705 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3708 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3711 o
->in1
= tcg_temp_new_i64();
3712 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3715 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3718 o
->in1
= tcg_temp_new_i64();
3719 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3722 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3725 o
->in1
= tcg_temp_new_i64();
3726 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3729 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3732 o
->in1
= tcg_temp_new_i64();
3733 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3736 /* ====================================================================== */
3737 /* The "INput 2" generators. These load the second operand to an insn. */
3739 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3741 o
->in2
= regs
[get_field(f
, r1
)];
3745 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3747 o
->in2
= tcg_temp_new_i64();
3748 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3751 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3753 o
->in2
= tcg_temp_new_i64();
3754 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3757 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3759 o
->in2
= load_reg(get_field(f
, r2
));
3762 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3764 o
->in2
= regs
[get_field(f
, r2
)];
3768 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3770 int r2
= get_field(f
, r2
);
3772 o
->in2
= load_reg(r2
);
3776 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3778 o
->in2
= tcg_temp_new_i64();
3779 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3782 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3784 o
->in2
= tcg_temp_new_i64();
3785 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3788 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3790 o
->in2
= tcg_temp_new_i64();
3791 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3794 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3796 o
->in2
= tcg_temp_new_i64();
3797 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3800 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3802 o
->in2
= load_reg(get_field(f
, r3
));
3805 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3807 o
->in2
= tcg_temp_new_i64();
3808 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3811 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3813 o
->in2
= tcg_temp_new_i64();
3814 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3817 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3819 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3822 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3824 o
->in2
= fregs
[get_field(f
, r2
)];
3828 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3830 /* ??? Specification exception: r1 must be < 14. */
3831 int r2
= get_field(f
, r2
);
3833 o
->in2
= fregs
[(r2
+ 2) & 15];
3834 o
->g_in1
= o
->g_in2
= true;
3837 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3839 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3842 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3844 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3845 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3848 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3850 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3853 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3855 help_l2_shift(s
, f
, o
, 31);
3858 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3860 help_l2_shift(s
, f
, o
, 63);
3863 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3866 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3869 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3872 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3875 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3878 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3881 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3884 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3887 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3890 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3893 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3899 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3902 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3905 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3908 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3911 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3914 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3917 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3920 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3923 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3925 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3928 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3930 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3933 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3935 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3938 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3940 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3943 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3945 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3946 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3949 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3951 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3952 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3955 /* ====================================================================== */
3957 /* Find opc within the table of insns. This is formulated as a switch
3958 statement so that (1) we get compile-time notice of cut-paste errors
3959 for duplicated opcodes, and (2) the compiler generates the binary
3960 search tree, rather than us having to post-process the table. */
3962 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3963 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3965 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3967 enum DisasInsnEnum
{
3968 #include "insn-data.def"
3972 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3977 .help_in1 = in1_##I1, \
3978 .help_in2 = in2_##I2, \
3979 .help_prep = prep_##P, \
3980 .help_wout = wout_##W, \
3981 .help_cout = cout_##CC, \
3982 .help_op = op_##OP, \
3986 /* Allow 0 to be used for NULL in the table below. */
3994 static const DisasInsn insn_info
[] = {
3995 #include "insn-data.def"
3999 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4000 case OPC: return &insn_info[insn_ ## NM];
4002 static const DisasInsn
*lookup_opc(uint16_t opc
)
4005 #include "insn-data.def"
4014 /* Extract a field from the insn. The INSN should be left-aligned in
4015 the uint64_t so that we can more easily utilize the big-bit-endian
4016 definitions we extract from the Principals of Operation. */
4018 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4026 /* Zero extract the field from the insn. */
4027 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4029 /* Sign-extend, or un-swap the field as necessary. */
4031 case 0: /* unsigned */
4033 case 1: /* signed */
4034 assert(f
->size
<= 32);
4035 m
= 1u << (f
->size
- 1);
4038 case 2: /* dl+dh split, signed 20 bit. */
4039 r
= ((int8_t)r
<< 12) | (r
>> 8);
4045 /* Validate that the "compressed" encoding we selected above is valid.
4046 I.e. we havn't make two different original fields overlap. */
4047 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4048 o
->presentC
|= 1 << f
->indexC
;
4049 o
->presentO
|= 1 << f
->indexO
;
4051 o
->c
[f
->indexC
] = r
;
4054 /* Lookup the insn at the current PC, extracting the operands into O and
4055 returning the info struct for the insn. Returns NULL for invalid insn. */
4057 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4060 uint64_t insn
, pc
= s
->pc
;
4062 const DisasInsn
*info
;
4064 insn
= ld_code2(env
, pc
);
4065 op
= (insn
>> 8) & 0xff;
4066 ilen
= get_ilen(op
);
4067 s
->next_pc
= s
->pc
+ ilen
;
4074 insn
= ld_code4(env
, pc
) << 32;
4077 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4083 /* We can't actually determine the insn format until we've looked up
4084 the full insn opcode. Which we can't do without locating the
4085 secondary opcode. Assume by default that OP2 is at bit 40; for
4086 those smaller insns that don't actually have a secondary opcode
4087 this will correctly result in OP2 = 0. */
4093 case 0xb2: /* S, RRF, RRE */
4094 case 0xb3: /* RRE, RRD, RRF */
4095 case 0xb9: /* RRE, RRF */
4096 case 0xe5: /* SSE, SIL */
4097 op2
= (insn
<< 8) >> 56;
4101 case 0xc0: /* RIL */
4102 case 0xc2: /* RIL */
4103 case 0xc4: /* RIL */
4104 case 0xc6: /* RIL */
4105 case 0xc8: /* SSF */
4106 case 0xcc: /* RIL */
4107 op2
= (insn
<< 12) >> 60;
4109 case 0xd0 ... 0xdf: /* SS */
4115 case 0xee ... 0xf3: /* SS */
4116 case 0xf8 ... 0xfd: /* SS */
4120 op2
= (insn
<< 40) >> 56;
4124 memset(f
, 0, sizeof(*f
));
4128 /* Lookup the instruction. */
4129 info
= lookup_opc(op
<< 8 | op2
);
4131 /* If we found it, extract the operands. */
4133 DisasFormat fmt
= info
->fmt
;
4136 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4137 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4143 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4145 const DisasInsn
*insn
;
4146 ExitStatus ret
= NO_EXIT
;
4150 insn
= extract_insn(env
, s
, &f
);
4152 /* If not found, try the old interpreter. This includes ILLOPC. */
4154 disas_s390_insn(env
, s
);
4155 switch (s
->is_jmp
) {
4163 ret
= EXIT_PC_UPDATED
;
4166 ret
= EXIT_NORETURN
;
4176 /* Set up the strutures we use to communicate with the helpers. */
4179 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4180 TCGV_UNUSED_I64(o
.out
);
4181 TCGV_UNUSED_I64(o
.out2
);
4182 TCGV_UNUSED_I64(o
.in1
);
4183 TCGV_UNUSED_I64(o
.in2
);
4184 TCGV_UNUSED_I64(o
.addr1
);
4186 /* Implement the instruction. */
4187 if (insn
->help_in1
) {
4188 insn
->help_in1(s
, &f
, &o
);
4190 if (insn
->help_in2
) {
4191 insn
->help_in2(s
, &f
, &o
);
4193 if (insn
->help_prep
) {
4194 insn
->help_prep(s
, &f
, &o
);
4196 if (insn
->help_op
) {
4197 ret
= insn
->help_op(s
, &o
);
4199 if (insn
->help_wout
) {
4200 insn
->help_wout(s
, &f
, &o
);
4202 if (insn
->help_cout
) {
4203 insn
->help_cout(s
, &o
);
4206 /* Free any temporaries created by the helpers. */
4207 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4208 tcg_temp_free_i64(o
.out
);
4210 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4211 tcg_temp_free_i64(o
.out2
);
4213 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4214 tcg_temp_free_i64(o
.in1
);
4216 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4217 tcg_temp_free_i64(o
.in2
);
4219 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4220 tcg_temp_free_i64(o
.addr1
);
4223 /* Advance to the next instruction. */
4228 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4229 TranslationBlock
*tb
,
4233 target_ulong pc_start
;
4234 uint64_t next_page_start
;
4235 uint16_t *gen_opc_end
;
4237 int num_insns
, max_insns
;
4245 if (!(tb
->flags
& FLAG_MASK_64
)) {
4246 pc_start
&= 0x7fffffff;
4251 dc
.cc_op
= CC_OP_DYNAMIC
;
4252 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4253 dc
.is_jmp
= DISAS_NEXT
;
4255 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4257 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4260 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4261 if (max_insns
== 0) {
4262 max_insns
= CF_COUNT_MASK
;
4269 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4273 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4276 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4277 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4278 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4279 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4281 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4285 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4286 tcg_gen_debug_insn_start(dc
.pc
);
4290 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4291 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4292 if (bp
->pc
== dc
.pc
) {
4293 status
= EXIT_PC_STALE
;
4299 if (status
== NO_EXIT
) {
4300 status
= translate_one(env
, &dc
);
4303 /* If we reach a page boundary, are single stepping,
4304 or exhaust instruction count, stop generation. */
4305 if (status
== NO_EXIT
4306 && (dc
.pc
>= next_page_start
4307 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4308 || num_insns
>= max_insns
4310 || env
->singlestep_enabled
)) {
4311 status
= EXIT_PC_STALE
;
4313 } while (status
== NO_EXIT
);
4315 if (tb
->cflags
& CF_LAST_IO
) {
4324 update_psw_addr(&dc
);
4326 case EXIT_PC_UPDATED
:
4327 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4328 gen_op_calc_cc(&dc
);
4330 /* Next TB starts off with CC_OP_DYNAMIC,
4331 so make sure the cc op type is in env */
4332 gen_op_set_cc_op(&dc
);
4335 gen_exception(EXCP_DEBUG
);
4337 /* Generate the return instruction */
4345 gen_icount_end(tb
, num_insns
);
4346 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4348 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4351 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4354 tb
->size
= dc
.pc
- pc_start
;
4355 tb
->icount
= num_insns
;
4358 #if defined(S390X_DEBUG_DISAS)
4359 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4360 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4361 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4367 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4369 gen_intermediate_code_internal(env
, tb
, 0);
4372 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4374 gen_intermediate_code_internal(env
, tb
, 1);
4377 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4380 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4381 cc_op
= gen_opc_cc_op
[pc_pos
];
4382 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {