4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
490 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
493 static inline void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
495 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
498 static inline void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
500 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
503 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
506 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
509 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
512 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
515 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
517 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
520 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
522 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
525 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp
= tcg_const_i32(v2
);
529 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
530 tcg_temp_free_i32(tmp
);
533 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
535 TCGv_i32 tmp
= tcg_const_i32(v2
);
536 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
537 tcg_temp_free_i32(tmp
);
540 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
542 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
545 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
547 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
550 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
552 TCGv_i64 tmp
= tcg_const_i64(v2
);
554 tcg_temp_free_i64(tmp
);
557 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
559 TCGv_i64 tmp
= tcg_const_i64(v2
);
561 tcg_temp_free_i64(tmp
);
564 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
566 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
569 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
571 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext
*s
)
577 tcg_gen_discard_i64(cc_src
);
578 tcg_gen_discard_i64(cc_dst
);
579 tcg_gen_discard_i64(cc_vr
);
580 s
->cc_op
= CC_OP_STATIC
;
583 static inline void gen_op_set_cc_op(DisasContext
*s
)
585 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
586 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
590 static inline void gen_update_cc_op(DisasContext
*s
)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext
*s
)
598 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
599 TCGv_i64 dummy
= tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
630 case CC_OP_LTUGTU_32
:
631 case CC_OP_LTUGTU_64
:
638 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
653 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
664 tcg_temp_free_i64(dummy
);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
674 *r1
= (insn
>> 4) & 0xf;
678 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
679 int *x2
, int *b2
, int *d2
)
683 *r1
= (insn
>> 20) & 0xf;
684 *x2
= (insn
>> 16) & 0xf;
685 *b2
= (insn
>> 12) & 0xf;
688 return get_address(s
, *x2
, *b2
, *d2
);
691 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
696 *r1
= (insn
>> 20) & 0xf;
698 *r3
= (insn
>> 16) & 0xf;
699 *b2
= (insn
>> 12) & 0xf;
703 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
708 *i2
= (insn
>> 16) & 0xff;
709 *b1
= (insn
>> 12) & 0xf;
712 return get_address(s
, 0, *b1
, *d1
);
715 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
719 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
720 && !s
->singlestep_enabled
721 && !(s
->tb
->cflags
& CF_LAST_IO
));
724 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
728 if (use_goto_tb(s
, pc
)) {
729 tcg_gen_goto_tb(tb_num
);
730 tcg_gen_movi_i64(psw_addr
, pc
);
731 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr
, pc
);
739 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss
[cc_op
]++;
746 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit
[cc_op
]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond
[16] = {
757 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
758 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
759 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
760 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
761 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
762 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
763 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
764 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond
[16] = {
771 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
773 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
775 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
785 enum cc_op old_cc_op
= s
->cc_op
;
787 if (mask
== 15 || mask
== 0) {
788 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
791 c
->g1
= c
->g2
= true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond
= ltgt_cond
[mask
];
803 if (cond
== TCG_COND_NEVER
) {
806 account_inline_branch(s
, old_cc_op
);
809 case CC_OP_LTUGTU_32
:
810 case CC_OP_LTUGTU_64
:
811 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
819 cond
= nz_cond
[mask
];
820 if (cond
== TCG_COND_NEVER
) {
823 account_inline_branch(s
, old_cc_op
);
838 account_inline_branch(s
, old_cc_op
);
853 account_inline_branch(s
, old_cc_op
);
857 switch (mask
& 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s
, old_cc_op
);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s
, old_cc_op
);
880 old_cc_op
= CC_OP_STATIC
;
881 cond
= TCG_COND_NEVER
;
885 /* Load up the arguments of the comparison. */
887 c
->g1
= c
->g2
= false;
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
893 c
->u
.s32
.b
= tcg_const_i32(0);
896 case CC_OP_LTUGTU_32
:
898 c
->u
.s32
.a
= tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
900 c
->u
.s32
.b
= tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
908 c
->u
.s64
.b
= tcg_const_i64(0);
912 case CC_OP_LTUGTU_64
:
915 c
->g1
= c
->g2
= true;
921 c
->u
.s64
.a
= tcg_temp_new_i64();
922 c
->u
.s64
.b
= tcg_const_i64(0);
923 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c
->u
.s32
.b
= tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c
->u
.s32
.b
= tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c
->u
.s32
.b
= tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c
->u
.s32
.b
= tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c
->u
.s32
.b
= tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c
->u
.s32
.a
= tcg_temp_new_i32();
966 c
->u
.s32
.b
= tcg_const_i32(0);
967 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
969 case 0x4: /* cc == 1 */
971 c
->u
.s32
.b
= tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c
->u
.s32
.b
= tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c
->u
.s32
.b
= tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c
->u
.s32
.b
= tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c
->u
.s32
.a
= tcg_const_i32(8);
990 c
->u
.s32
.b
= tcg_const_i32(0);
991 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
992 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1003 static void free_compare(DisasCompare
*c
)
1007 tcg_temp_free_i64(c
->u
.s64
.a
);
1009 tcg_temp_free_i32(c
->u
.s32
.a
);
1014 tcg_temp_free_i64(c
->u
.s64
.b
);
1016 tcg_temp_free_i32(c
->u
.s32
.b
);
1021 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp
, tmp2
, tmp3
;
1026 TCGv_i32 tmp32_1
, tmp32_2
;
1030 r1
= (insn
>> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1036 case 0x29: /* ISKE R1,R2 [RRE] */
1037 /* Insert Storage Key Extended */
1038 check_privileged(s
);
1039 r1
= (insn
>> 4) & 0xf;
1042 tmp2
= tcg_temp_new_i64();
1043 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1044 store_reg(r1
, tmp2
);
1045 tcg_temp_free_i64(tmp
);
1046 tcg_temp_free_i64(tmp2
);
1048 case 0x2a: /* RRBE R1,R2 [RRE] */
1049 /* Set Storage Key Extended */
1050 check_privileged(s
);
1051 r1
= (insn
>> 4) & 0xf;
1053 tmp32_1
= load_reg32(r1
);
1055 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1057 tcg_temp_free_i32(tmp32_1
);
1058 tcg_temp_free_i64(tmp
);
1060 case 0x2b: /* SSKE R1,R2 [RRE] */
1061 /* Set Storage Key Extended */
1062 check_privileged(s
);
1063 r1
= (insn
>> 4) & 0xf;
1065 tmp32_1
= load_reg32(r1
);
1067 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1068 tcg_temp_free_i32(tmp32_1
);
1069 tcg_temp_free_i64(tmp
);
1071 case 0x34: /* STCH ? */
1072 /* Store Subchannel */
1073 check_privileged(s
);
1074 gen_op_movi_cc(s
, 3);
1076 case 0x46: /* STURA R1,R2 [RRE] */
1077 /* Store Using Real Address */
1078 check_privileged(s
);
1079 r1
= (insn
>> 4) & 0xf;
1081 tmp32_1
= load_reg32(r1
);
1083 potential_page_fault(s
);
1084 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1085 tcg_temp_free_i32(tmp32_1
);
1086 tcg_temp_free_i64(tmp
);
1088 case 0x50: /* CSP R1,R2 [RRE] */
1089 /* Compare And Swap And Purge */
1090 check_privileged(s
);
1091 r1
= (insn
>> 4) & 0xf;
1093 tmp32_1
= tcg_const_i32(r1
);
1094 tmp32_2
= tcg_const_i32(r2
);
1095 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1097 tcg_temp_free_i32(tmp32_1
);
1098 tcg_temp_free_i32(tmp32_2
);
1100 case 0x5f: /* CHSC ? */
1101 /* Channel Subsystem Call */
1102 check_privileged(s
);
1103 gen_op_movi_cc(s
, 3);
1105 case 0x78: /* STCKE D2(B2) [S] */
1106 /* Store Clock Extended */
1107 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1108 tmp
= get_address(s
, 0, b2
, d2
);
1109 potential_page_fault(s
);
1110 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1112 tcg_temp_free_i64(tmp
);
1114 case 0x79: /* SACF D2(B2) [S] */
1115 /* Set Address Space Control Fast */
1116 check_privileged(s
);
1117 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1118 tmp
= get_address(s
, 0, b2
, d2
);
1119 potential_page_fault(s
);
1120 gen_helper_sacf(cpu_env
, tmp
);
1121 tcg_temp_free_i64(tmp
);
1122 /* addressing mode has changed, so end the block */
1125 s
->is_jmp
= DISAS_JUMP
;
1127 case 0x7d: /* STSI D2,(B2) [S] */
1128 check_privileged(s
);
1129 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1130 tmp
= get_address(s
, 0, b2
, d2
);
1131 tmp32_1
= load_reg32(0);
1132 tmp32_2
= load_reg32(1);
1133 potential_page_fault(s
);
1134 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1136 tcg_temp_free_i64(tmp
);
1137 tcg_temp_free_i32(tmp32_1
);
1138 tcg_temp_free_i32(tmp32_2
);
1140 case 0xb1: /* STFL D2(B2) [S] */
1141 /* Store Facility List (CPU features) at 200 */
1142 check_privileged(s
);
1143 tmp2
= tcg_const_i64(0xc0000000);
1144 tmp
= tcg_const_i64(200);
1145 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1146 tcg_temp_free_i64(tmp2
);
1147 tcg_temp_free_i64(tmp
);
1149 case 0xb2: /* LPSWE D2(B2) [S] */
1150 /* Load PSW Extended */
1151 check_privileged(s
);
1152 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1153 tmp
= get_address(s
, 0, b2
, d2
);
1154 tmp2
= tcg_temp_new_i64();
1155 tmp3
= tcg_temp_new_i64();
1156 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1157 tcg_gen_addi_i64(tmp
, tmp
, 8);
1158 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1159 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1160 /* we need to keep cc_op intact */
1161 s
->is_jmp
= DISAS_JUMP
;
1162 tcg_temp_free_i64(tmp
);
1163 tcg_temp_free_i64(tmp2
);
1164 tcg_temp_free_i64(tmp3
);
1166 case 0x20: /* SERVC R1,R2 [RRE] */
1167 /* SCLP Service call (PV hypercall) */
1168 check_privileged(s
);
1169 potential_page_fault(s
);
1170 tmp32_1
= load_reg32(r2
);
1172 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1174 tcg_temp_free_i32(tmp32_1
);
1175 tcg_temp_free_i64(tmp
);
1179 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1180 gen_illegal_opcode(s
);
1181 #ifndef CONFIG_USER_ONLY
1187 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1193 opc
= cpu_ldub_code(env
, s
->pc
);
1194 LOG_DISAS("opc 0x%x\n", opc
);
1198 insn
= ld_code4(env
, s
->pc
);
1199 op
= (insn
>> 16) & 0xff;
1200 disas_b2(env
, s
, op
, insn
);
1203 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1204 gen_illegal_opcode(s
);
1209 /* ====================================================================== */
1210 /* Define the insn format enumeration. */
1211 #define F0(N) FMT_##N,
1212 #define F1(N, X1) F0(N)
1213 #define F2(N, X1, X2) F0(N)
1214 #define F3(N, X1, X2, X3) F0(N)
1215 #define F4(N, X1, X2, X3, X4) F0(N)
1216 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1219 #include "insn-format.def"
1229 /* Define a structure to hold the decoded fields. We'll store each inside
1230 an array indexed by an enum. In order to conserve memory, we'll arrange
1231 for fields that do not exist at the same time to overlap, thus the "C"
1232 for compact. For checking purposes there is an "O" for original index
1233 as well that will be applied to availability bitmaps. */
1235 enum DisasFieldIndexO
{
1258 enum DisasFieldIndexC
{
1289 struct DisasFields
{
1292 unsigned presentC
:16;
1293 unsigned int presentO
;
1297 /* This is the way fields are to be accessed out of DisasFields. */
1298 #define have_field(S, F) have_field1((S), FLD_O_##F)
1299 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1301 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1303 return (f
->presentO
>> c
) & 1;
1306 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1307 enum DisasFieldIndexC c
)
1309 assert(have_field1(f
, o
));
1313 /* Describe the layout of each field in each format. */
1314 typedef struct DisasField
{
1316 unsigned int size
:8;
1317 unsigned int type
:2;
1318 unsigned int indexC
:6;
1319 enum DisasFieldIndexO indexO
:8;
1322 typedef struct DisasFormatInfo
{
1323 DisasField op
[NUM_C_FIELD
];
1326 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1327 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1328 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1329 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1330 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1331 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1332 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1333 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1334 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1335 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1336 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1337 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1338 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1339 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1341 #define F0(N) { { } },
1342 #define F1(N, X1) { { X1 } },
1343 #define F2(N, X1, X2) { { X1, X2 } },
1344 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1345 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1346 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1348 static const DisasFormatInfo format_info
[] = {
1349 #include "insn-format.def"
1367 /* Generally, we'll extract operands into this structures, operate upon
1368 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1369 of routines below for more details. */
1371 bool g_out
, g_out2
, g_in1
, g_in2
;
1372 TCGv_i64 out
, out2
, in1
, in2
;
1376 /* Return values from translate_one, indicating the state of the TB. */
1378 /* Continue the TB. */
1380 /* We have emitted one or more goto_tb. No fixup required. */
1382 /* We are not using a goto_tb (for whatever reason), but have updated
1383 the PC (for whatever reason), so there's no need to do it again on
1386 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1387 updated the PC for the next instruction to be executed. */
1389 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1390 No following code will be executed. */
1394 typedef enum DisasFacility
{
1395 FAC_Z
, /* zarch (default) */
1396 FAC_CASS
, /* compare and swap and store */
1397 FAC_CASS2
, /* compare and swap and store 2*/
1398 FAC_DFP
, /* decimal floating point */
1399 FAC_DFPR
, /* decimal floating point rounding */
1400 FAC_DO
, /* distinct operands */
1401 FAC_EE
, /* execute extensions */
1402 FAC_EI
, /* extended immediate */
1403 FAC_FPE
, /* floating point extension */
1404 FAC_FPSSH
, /* floating point support sign handling */
1405 FAC_FPRGR
, /* FPR-GR transfer */
1406 FAC_GIE
, /* general instructions extension */
1407 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1408 FAC_HW
, /* high-word */
1409 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1410 FAC_LOC
, /* load/store on condition */
1411 FAC_LD
, /* long displacement */
1412 FAC_PC
, /* population count */
1413 FAC_SCF
, /* store clock fast */
1414 FAC_SFLE
, /* store facility list extended */
1420 DisasFacility fac
:6;
1424 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1425 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1426 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1427 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1428 void (*help_cout
)(DisasContext
*, DisasOps
*);
1429 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1434 /* ====================================================================== */
1435 /* Miscelaneous helpers, used by several operations. */
1437 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1438 DisasOps
*o
, int mask
)
1440 int b2
= get_field(f
, b2
);
1441 int d2
= get_field(f
, d2
);
1444 o
->in2
= tcg_const_i64(d2
& mask
);
1446 o
->in2
= get_address(s
, 0, b2
, d2
);
1447 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1451 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1453 if (dest
== s
->next_pc
) {
1456 if (use_goto_tb(s
, dest
)) {
1457 gen_update_cc_op(s
);
1459 tcg_gen_movi_i64(psw_addr
, dest
);
1460 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1461 return EXIT_GOTO_TB
;
1463 tcg_gen_movi_i64(psw_addr
, dest
);
1464 return EXIT_PC_UPDATED
;
1468 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1469 bool is_imm
, int imm
, TCGv_i64 cdest
)
1472 uint64_t dest
= s
->pc
+ 2 * imm
;
1475 /* Take care of the special cases first. */
1476 if (c
->cond
== TCG_COND_NEVER
) {
1481 if (dest
== s
->next_pc
) {
1482 /* Branch to next. */
1486 if (c
->cond
== TCG_COND_ALWAYS
) {
1487 ret
= help_goto_direct(s
, dest
);
1491 if (TCGV_IS_UNUSED_I64(cdest
)) {
1492 /* E.g. bcr %r0 -> no branch. */
1496 if (c
->cond
== TCG_COND_ALWAYS
) {
1497 tcg_gen_mov_i64(psw_addr
, cdest
);
1498 ret
= EXIT_PC_UPDATED
;
1503 if (use_goto_tb(s
, s
->next_pc
)) {
1504 if (is_imm
&& use_goto_tb(s
, dest
)) {
1505 /* Both exits can use goto_tb. */
1506 gen_update_cc_op(s
);
1508 lab
= gen_new_label();
1510 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1512 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1515 /* Branch not taken. */
1517 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1518 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1523 tcg_gen_movi_i64(psw_addr
, dest
);
1524 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1528 /* Fallthru can use goto_tb, but taken branch cannot. */
1529 /* Store taken branch destination before the brcond. This
1530 avoids having to allocate a new local temp to hold it.
1531 We'll overwrite this in the not taken case anyway. */
1533 tcg_gen_mov_i64(psw_addr
, cdest
);
1536 lab
= gen_new_label();
1538 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1540 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1543 /* Branch not taken. */
1544 gen_update_cc_op(s
);
1546 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1547 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1551 tcg_gen_movi_i64(psw_addr
, dest
);
1553 ret
= EXIT_PC_UPDATED
;
1556 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1557 Most commonly we're single-stepping or some other condition that
1558 disables all use of goto_tb. Just update the PC and exit. */
1560 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1562 cdest
= tcg_const_i64(dest
);
1566 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1569 TCGv_i32 t0
= tcg_temp_new_i32();
1570 TCGv_i64 t1
= tcg_temp_new_i64();
1571 TCGv_i64 z
= tcg_const_i64(0);
1572 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1573 tcg_gen_extu_i32_i64(t1
, t0
);
1574 tcg_temp_free_i32(t0
);
1575 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1576 tcg_temp_free_i64(t1
);
1577 tcg_temp_free_i64(z
);
1581 tcg_temp_free_i64(cdest
);
1583 tcg_temp_free_i64(next
);
1585 ret
= EXIT_PC_UPDATED
;
1593 /* ====================================================================== */
1594 /* The operations. These perform the bulk of the work for any insn,
1595 usually after the operands have been loaded and output initialized. */
1597 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1599 gen_helper_abs_i64(o
->out
, o
->in2
);
1603 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1605 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1609 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1611 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1615 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1617 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1618 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1622 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1624 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1628 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1632 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1634 /* XXX possible optimization point */
1636 cc
= tcg_temp_new_i64();
1637 tcg_gen_extu_i32_i64(cc
, cc_op
);
1638 tcg_gen_shri_i64(cc
, cc
, 1);
1640 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1641 tcg_temp_free_i64(cc
);
1645 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1647 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1651 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1653 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1657 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1659 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1660 return_low128(o
->out2
);
1664 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1666 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1670 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1672 int shift
= s
->insn
->data
& 0xff;
1673 int size
= s
->insn
->data
>> 8;
1674 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1677 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1678 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1679 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1681 /* Produce the CC from only the bits manipulated. */
1682 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1683 set_cc_nz_u64(s
, cc_dst
);
1687 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1689 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1690 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1691 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1692 return EXIT_PC_UPDATED
;
1698 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1700 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1701 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1704 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1706 int m1
= get_field(s
->fields
, m1
);
1707 bool is_imm
= have_field(s
->fields
, i2
);
1708 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1711 disas_jcc(s
, &c
, m1
);
1712 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1715 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1717 int r1
= get_field(s
->fields
, r1
);
1718 bool is_imm
= have_field(s
->fields
, i2
);
1719 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1723 c
.cond
= TCG_COND_NE
;
1728 t
= tcg_temp_new_i64();
1729 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1730 store_reg32_i64(r1
, t
);
1731 c
.u
.s32
.a
= tcg_temp_new_i32();
1732 c
.u
.s32
.b
= tcg_const_i32(0);
1733 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1734 tcg_temp_free_i64(t
);
1736 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1739 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1741 int r1
= get_field(s
->fields
, r1
);
1742 bool is_imm
= have_field(s
->fields
, i2
);
1743 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1746 c
.cond
= TCG_COND_NE
;
1751 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1752 c
.u
.s64
.a
= regs
[r1
];
1753 c
.u
.s64
.b
= tcg_const_i64(0);
1755 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1758 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1760 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1765 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1767 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1772 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1774 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1779 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 gen_set_cc_nz_f32(s
, o
->in2
);
1788 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1790 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1791 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1792 tcg_temp_free_i32(m3
);
1793 gen_set_cc_nz_f64(s
, o
->in2
);
1797 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1801 tcg_temp_free_i32(m3
);
1802 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1806 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1811 gen_set_cc_nz_f32(s
, o
->in2
);
1815 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1817 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1818 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1819 tcg_temp_free_i32(m3
);
1820 gen_set_cc_nz_f64(s
, o
->in2
);
1824 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1826 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1827 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1828 tcg_temp_free_i32(m3
);
1829 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1833 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1835 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1836 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1837 tcg_temp_free_i32(m3
);
1841 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1843 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1844 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1845 tcg_temp_free_i32(m3
);
1849 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1851 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1852 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1853 tcg_temp_free_i32(m3
);
1854 return_low128(o
->out2
);
1858 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1860 int r2
= get_field(s
->fields
, r2
);
1861 TCGv_i64 len
= tcg_temp_new_i64();
1863 potential_page_fault(s
);
1864 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1866 return_low128(o
->out
);
1868 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1869 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1870 tcg_temp_free_i64(len
);
1875 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1877 int l
= get_field(s
->fields
, l1
);
1882 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1883 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1886 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1887 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1890 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1891 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1894 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1895 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1898 potential_page_fault(s
);
1899 vl
= tcg_const_i32(l
);
1900 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1901 tcg_temp_free_i32(vl
);
1905 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1909 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1911 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1912 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1913 potential_page_fault(s
);
1914 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1915 tcg_temp_free_i32(r1
);
1916 tcg_temp_free_i32(r3
);
1921 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1924 TCGv_i32 t1
= tcg_temp_new_i32();
1925 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1926 potential_page_fault(s
);
1927 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1929 tcg_temp_free_i32(t1
);
1930 tcg_temp_free_i32(m3
);
1934 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1936 potential_page_fault(s
);
1937 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1939 return_low128(o
->in2
);
1943 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1945 int r3
= get_field(s
->fields
, r3
);
1946 potential_page_fault(s
);
1947 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1952 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1954 int r3
= get_field(s
->fields
, r3
);
1955 potential_page_fault(s
);
1956 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1961 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1963 int r3
= get_field(s
->fields
, r3
);
1964 TCGv_i64 in3
= tcg_temp_new_i64();
1965 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1966 potential_page_fault(s
);
1967 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1968 tcg_temp_free_i64(in3
);
1973 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1975 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1976 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1977 potential_page_fault(s
);
1978 /* XXX rewrite in tcg */
1979 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1984 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1986 TCGv_i64 t1
= tcg_temp_new_i64();
1987 TCGv_i32 t2
= tcg_temp_new_i32();
1988 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1989 gen_helper_cvd(t1
, t2
);
1990 tcg_temp_free_i32(t2
);
1991 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1992 tcg_temp_free_i64(t1
);
1996 #ifndef CONFIG_USER_ONLY
1997 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2001 check_privileged(s
);
2002 potential_page_fault(s
);
2004 /* We pretend the format is RX_a so that D2 is the field we want. */
2005 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2006 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2007 tcg_temp_free_i32(tmp
);
2012 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2014 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2015 return_low128(o
->out
);
2019 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2021 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2022 return_low128(o
->out
);
2026 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2028 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2029 return_low128(o
->out
);
2033 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2035 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2036 return_low128(o
->out
);
2040 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2042 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2046 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2048 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2052 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2054 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2055 return_low128(o
->out2
);
2059 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2061 int r2
= get_field(s
->fields
, r2
);
2062 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2066 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2068 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2072 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2074 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2075 tb->flags, (ab)use the tb->cs_base field as the address of
2076 the template in memory, and grab 8 bits of tb->flags/cflags for
2077 the contents of the register. We would then recognize all this
2078 in gen_intermediate_code_internal, generating code for exactly
2079 one instruction. This new TB then gets executed normally.
2081 On the other hand, this seems to be mostly used for modifying
2082 MVC inside of memcpy, which needs a helper call anyway. So
2083 perhaps this doesn't bear thinking about any further. */
2090 tmp
= tcg_const_i64(s
->next_pc
);
2091 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2092 tcg_temp_free_i64(tmp
);
2098 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2100 /* We'll use the original input for cc computation, since we get to
2101 compare that against 0, which ought to be better than comparing
2102 the real output against 64. It also lets cc_dst be a convenient
2103 temporary during our computation. */
2104 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2106 /* R1 = IN ? CLZ(IN) : 64. */
2107 gen_helper_clz(o
->out
, o
->in2
);
2109 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2110 value by 64, which is undefined. But since the shift is 64 iff the
2111 input is zero, we still get the correct result after and'ing. */
2112 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2113 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2114 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2118 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2120 int m3
= get_field(s
->fields
, m3
);
2121 int pos
, len
, base
= s
->insn
->data
;
2122 TCGv_i64 tmp
= tcg_temp_new_i64();
2127 /* Effectively a 32-bit load. */
2128 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2135 /* Effectively a 16-bit load. */
2136 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2144 /* Effectively an 8-bit load. */
2145 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2150 pos
= base
+ ctz32(m3
) * 8;
2151 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2152 ccm
= ((1ull << len
) - 1) << pos
;
2156 /* This is going to be a sequence of loads and inserts. */
2157 pos
= base
+ 32 - 8;
2161 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2162 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2163 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2166 m3
= (m3
<< 1) & 0xf;
2172 tcg_gen_movi_i64(tmp
, ccm
);
2173 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2174 tcg_temp_free_i64(tmp
);
2178 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2180 int shift
= s
->insn
->data
& 0xff;
2181 int size
= s
->insn
->data
>> 8;
2182 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2186 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2191 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2193 t1
= tcg_temp_new_i64();
2194 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2195 tcg_gen_shri_i64(t1
, t1
, 36);
2196 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2198 tcg_gen_extu_i32_i64(t1
, cc_op
);
2199 tcg_gen_shli_i64(t1
, t1
, 28);
2200 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2201 tcg_temp_free_i64(t1
);
2205 #ifndef CONFIG_USER_ONLY
2206 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2208 check_privileged(s
);
2209 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2214 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2216 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2220 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2222 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2226 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2228 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2232 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2234 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2238 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2240 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2241 return_low128(o
->out2
);
2245 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2247 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2248 return_low128(o
->out2
);
2252 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2254 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2258 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2260 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2264 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2266 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2270 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2272 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2276 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2278 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2282 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2284 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2288 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2290 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2294 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2296 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2300 #ifndef CONFIG_USER_ONLY
2301 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2303 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2304 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2305 check_privileged(s
);
2306 potential_page_fault(s
);
2307 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2308 tcg_temp_free_i32(r1
);
2309 tcg_temp_free_i32(r3
);
2313 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2315 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2316 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2317 check_privileged(s
);
2318 potential_page_fault(s
);
2319 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2320 tcg_temp_free_i32(r1
);
2321 tcg_temp_free_i32(r3
);
2324 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2326 check_privileged(s
);
2327 potential_page_fault(s
);
2328 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2333 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2337 check_privileged(s
);
2339 t1
= tcg_temp_new_i64();
2340 t2
= tcg_temp_new_i64();
2341 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2342 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2343 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2344 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2345 tcg_gen_shli_i64(t1
, t1
, 32);
2346 gen_helper_load_psw(cpu_env
, t1
, t2
);
2347 tcg_temp_free_i64(t1
);
2348 tcg_temp_free_i64(t2
);
2349 return EXIT_NORETURN
;
2353 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2355 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2356 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2357 potential_page_fault(s
);
2358 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2359 tcg_temp_free_i32(r1
);
2360 tcg_temp_free_i32(r3
);
2364 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2366 int r1
= get_field(s
->fields
, r1
);
2367 int r3
= get_field(s
->fields
, r3
);
2368 TCGv_i64 t
= tcg_temp_new_i64();
2369 TCGv_i64 t4
= tcg_const_i64(4);
2372 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2373 store_reg32_i64(r1
, t
);
2377 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2381 tcg_temp_free_i64(t
);
2382 tcg_temp_free_i64(t4
);
2386 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2388 int r1
= get_field(s
->fields
, r1
);
2389 int r3
= get_field(s
->fields
, r3
);
2390 TCGv_i64 t
= tcg_temp_new_i64();
2391 TCGv_i64 t4
= tcg_const_i64(4);
2394 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2395 store_reg32h_i64(r1
, t
);
2399 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2403 tcg_temp_free_i64(t
);
2404 tcg_temp_free_i64(t4
);
2408 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2410 int r1
= get_field(s
->fields
, r1
);
2411 int r3
= get_field(s
->fields
, r3
);
2412 TCGv_i64 t8
= tcg_const_i64(8);
2415 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2419 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2423 tcg_temp_free_i64(t8
);
2427 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2430 o
->g_out
= o
->g_in2
;
2431 TCGV_UNUSED_I64(o
->in2
);
2436 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2440 o
->g_out
= o
->g_in1
;
2441 o
->g_out2
= o
->g_in2
;
2442 TCGV_UNUSED_I64(o
->in1
);
2443 TCGV_UNUSED_I64(o
->in2
);
2444 o
->g_in1
= o
->g_in2
= false;
2448 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2450 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2451 potential_page_fault(s
);
2452 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2453 tcg_temp_free_i32(l
);
2457 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2459 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2460 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2461 potential_page_fault(s
);
2462 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2463 tcg_temp_free_i32(r1
);
2464 tcg_temp_free_i32(r2
);
2469 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2471 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2472 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2473 potential_page_fault(s
);
2474 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2475 tcg_temp_free_i32(r1
);
2476 tcg_temp_free_i32(r3
);
2481 #ifndef CONFIG_USER_ONLY
2482 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2484 int r1
= get_field(s
->fields
, l1
);
2485 check_privileged(s
);
2486 potential_page_fault(s
);
2487 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2492 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2494 int r1
= get_field(s
->fields
, l1
);
2495 check_privileged(s
);
2496 potential_page_fault(s
);
2497 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2503 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2505 potential_page_fault(s
);
2506 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2511 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2513 potential_page_fault(s
);
2514 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2516 return_low128(o
->in2
);
2520 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2522 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2526 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2528 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2529 return_low128(o
->out2
);
2533 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2535 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2539 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2541 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2545 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2547 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2551 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2553 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2554 return_low128(o
->out2
);
2558 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2560 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2561 return_low128(o
->out2
);
2565 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2567 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2568 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2569 tcg_temp_free_i64(r3
);
2573 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2575 int r3
= get_field(s
->fields
, r3
);
2576 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2580 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2582 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2583 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2584 tcg_temp_free_i64(r3
);
2588 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2590 int r3
= get_field(s
->fields
, r3
);
2591 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2595 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2597 gen_helper_nabs_i64(o
->out
, o
->in2
);
2601 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2603 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2607 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2609 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2613 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2615 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2616 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2620 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2622 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2623 potential_page_fault(s
);
2624 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2625 tcg_temp_free_i32(l
);
2630 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2632 tcg_gen_neg_i64(o
->out
, o
->in2
);
2636 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2638 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2642 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2644 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2648 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2650 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2651 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2655 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2657 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2658 potential_page_fault(s
);
2659 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2660 tcg_temp_free_i32(l
);
2665 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2667 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2671 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2673 int shift
= s
->insn
->data
& 0xff;
2674 int size
= s
->insn
->data
>> 8;
2675 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2678 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2679 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2681 /* Produce the CC from only the bits manipulated. */
2682 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2683 set_cc_nz_u64(s
, cc_dst
);
2687 #ifndef CONFIG_USER_ONLY
2688 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2690 check_privileged(s
);
2691 gen_helper_ptlb(cpu_env
);
2696 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2698 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2702 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2704 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2708 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2710 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2714 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2716 TCGv_i32 t1
= tcg_temp_new_i32();
2717 TCGv_i32 t2
= tcg_temp_new_i32();
2718 TCGv_i32 to
= tcg_temp_new_i32();
2719 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2720 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2721 tcg_gen_rotl_i32(to
, t1
, t2
);
2722 tcg_gen_extu_i32_i64(o
->out
, to
);
2723 tcg_temp_free_i32(t1
);
2724 tcg_temp_free_i32(t2
);
2725 tcg_temp_free_i32(to
);
2729 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2731 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2735 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2737 int r1
= get_field(s
->fields
, r1
);
2738 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2742 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2744 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2748 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2750 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2754 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2756 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2757 return_low128(o
->out2
);
2761 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2763 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2767 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2769 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2773 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2775 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2776 return_low128(o
->out2
);
2780 #ifndef CONFIG_USER_ONLY
2781 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2783 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2784 check_privileged(s
);
2785 potential_page_fault(s
);
2786 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2787 tcg_temp_free_i32(r1
);
2792 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2794 uint64_t sign
= 1ull << s
->insn
->data
;
2795 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2796 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2797 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2798 /* The arithmetic left shift is curious in that it does not affect
2799 the sign bit. Copy that over from the source unchanged. */
2800 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2801 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2802 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2806 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2808 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2812 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2814 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2818 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2820 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2824 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2826 gen_helper_sfpc(cpu_env
, o
->in2
);
2830 #ifndef CONFIG_USER_ONLY
2831 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2833 check_privileged(s
);
2834 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2835 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2839 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2841 check_privileged(s
);
2842 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2846 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
2848 check_privileged(s
);
2849 /* ??? Surely cpu address != cpu number. In any case the previous
2850 version of this stored more than the required half-word, so it
2851 is unlikely this has ever been tested. */
2852 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2856 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2858 gen_helper_stck(o
->out
, cpu_env
);
2859 /* ??? We don't implement clock states. */
2860 gen_op_movi_cc(s
, 0);
2864 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2866 check_privileged(s
);
2867 gen_helper_sckc(cpu_env
, o
->in2
);
2871 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2873 check_privileged(s
);
2874 gen_helper_stckc(o
->out
, cpu_env
);
2878 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2880 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2881 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2882 check_privileged(s
);
2883 potential_page_fault(s
);
2884 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2885 tcg_temp_free_i32(r1
);
2886 tcg_temp_free_i32(r3
);
2890 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2892 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2893 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2894 check_privileged(s
);
2895 potential_page_fault(s
);
2896 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2897 tcg_temp_free_i32(r1
);
2898 tcg_temp_free_i32(r3
);
2902 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2904 check_privileged(s
);
2905 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2909 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2911 check_privileged(s
);
2912 gen_helper_spt(cpu_env
, o
->in2
);
2916 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2918 check_privileged(s
);
2919 gen_helper_stpt(o
->out
, cpu_env
);
2923 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
2925 check_privileged(s
);
2926 gen_helper_spx(cpu_env
, o
->in2
);
2930 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
2932 check_privileged(s
);
2933 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
2934 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
2938 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2940 uint64_t i2
= get_field(s
->fields
, i2
);
2943 check_privileged(s
);
2945 /* It is important to do what the instruction name says: STORE THEN.
2946 If we let the output hook perform the store then if we fault and
2947 restart, we'll have the wrong SYSTEM MASK in place. */
2948 t
= tcg_temp_new_i64();
2949 tcg_gen_shri_i64(t
, psw_mask
, 56);
2950 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2951 tcg_temp_free_i64(t
);
2953 if (s
->fields
->op
== 0xac) {
2954 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2955 (i2
<< 56) | 0x00ffffffffffffffull
);
2957 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2963 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2965 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2969 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2971 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2975 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2977 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2981 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2983 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2987 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2989 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2990 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2991 potential_page_fault(s
);
2992 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
2993 tcg_temp_free_i32(r1
);
2994 tcg_temp_free_i32(r3
);
2998 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3000 int m3
= get_field(s
->fields
, m3
);
3001 int pos
, base
= s
->insn
->data
;
3002 TCGv_i64 tmp
= tcg_temp_new_i64();
3004 pos
= base
+ ctz32(m3
) * 8;
3007 /* Effectively a 32-bit store. */
3008 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3009 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3015 /* Effectively a 16-bit store. */
3016 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3017 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3024 /* Effectively an 8-bit store. */
3025 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3026 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3030 /* This is going to be a sequence of shifts and stores. */
3031 pos
= base
+ 32 - 8;
3034 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3035 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3036 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3038 m3
= (m3
<< 1) & 0xf;
3043 tcg_temp_free_i64(tmp
);
3047 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3049 int r1
= get_field(s
->fields
, r1
);
3050 int r3
= get_field(s
->fields
, r3
);
3051 int size
= s
->insn
->data
;
3052 TCGv_i64 tsize
= tcg_const_i64(size
);
3056 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3058 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3063 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3067 tcg_temp_free_i64(tsize
);
3071 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3073 int r1
= get_field(s
->fields
, r1
);
3074 int r3
= get_field(s
->fields
, r3
);
3075 TCGv_i64 t
= tcg_temp_new_i64();
3076 TCGv_i64 t4
= tcg_const_i64(4);
3077 TCGv_i64 t32
= tcg_const_i64(32);
3080 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3081 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3085 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3089 tcg_temp_free_i64(t
);
3090 tcg_temp_free_i64(t4
);
3091 tcg_temp_free_i64(t32
);
3095 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3097 potential_page_fault(s
);
3098 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3100 return_low128(o
->in2
);
3104 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3106 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3110 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3115 tcg_gen_not_i64(o
->in2
, o
->in2
);
3116 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3118 /* XXX possible optimization point */
3120 cc
= tcg_temp_new_i64();
3121 tcg_gen_extu_i32_i64(cc
, cc_op
);
3122 tcg_gen_shri_i64(cc
, cc
, 1);
3123 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3124 tcg_temp_free_i64(cc
);
3128 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3135 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3136 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3137 tcg_temp_free_i32(t
);
3139 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3140 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3141 tcg_temp_free_i32(t
);
3143 gen_exception(EXCP_SVC
);
3144 return EXIT_NORETURN
;
3147 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3149 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3154 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3156 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3161 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3163 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3168 #ifndef CONFIG_USER_ONLY
3169 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3171 potential_page_fault(s
);
3172 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3178 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3180 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3181 potential_page_fault(s
);
3182 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3183 tcg_temp_free_i32(l
);
3188 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3190 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3191 potential_page_fault(s
);
3192 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3193 tcg_temp_free_i32(l
);
3197 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3199 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3200 potential_page_fault(s
);
3201 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3202 tcg_temp_free_i32(l
);
3207 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3209 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3213 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3215 int shift
= s
->insn
->data
& 0xff;
3216 int size
= s
->insn
->data
>> 8;
3217 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3220 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3221 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3223 /* Produce the CC from only the bits manipulated. */
3224 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3225 set_cc_nz_u64(s
, cc_dst
);
3229 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3231 o
->out
= tcg_const_i64(0);
3235 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3237 o
->out
= tcg_const_i64(0);
3243 /* ====================================================================== */
3244 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3245 the original inputs), update the various cc data structures in order to
3246 be able to compute the new condition code. */
3248 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3250 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3253 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3255 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3258 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3260 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3263 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3265 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3268 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3270 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3273 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3275 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3278 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3280 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3283 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3285 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3288 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3290 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3293 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3295 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3298 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3300 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3303 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3305 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3308 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3310 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3313 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3315 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3318 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3320 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3323 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3325 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3328 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3330 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3333 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3335 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3338 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3340 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3343 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3345 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3346 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3349 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3351 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3354 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3356 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3359 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3361 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3364 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3366 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3369 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3371 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3374 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3376 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3379 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3381 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3384 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3386 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3389 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3391 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3394 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3396 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3399 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3401 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3404 /* ====================================================================== */
3405 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3406 with the TCG register to which we will write. Used in combination with
3407 the "wout" generators, in some cases we need a new temporary, and in
3408 some cases we can write to a TCG global. */
3410 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3412 o
->out
= tcg_temp_new_i64();
3415 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3417 o
->out
= tcg_temp_new_i64();
3418 o
->out2
= tcg_temp_new_i64();
3421 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3423 o
->out
= regs
[get_field(f
, r1
)];
3427 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3429 /* ??? Specification exception: r1 must be even. */
3430 int r1
= get_field(f
, r1
);
3432 o
->out2
= regs
[(r1
+ 1) & 15];
3433 o
->g_out
= o
->g_out2
= true;
3436 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3438 o
->out
= fregs
[get_field(f
, r1
)];
3442 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3444 /* ??? Specification exception: r1 must be < 14. */
3445 int r1
= get_field(f
, r1
);
3447 o
->out2
= fregs
[(r1
+ 2) & 15];
3448 o
->g_out
= o
->g_out2
= true;
3451 /* ====================================================================== */
3452 /* The "Write OUTput" generators. These generally perform some non-trivial
3453 copy of data to TCG globals, or to main memory. The trivial cases are
3454 generally handled by having a "prep" generator install the TCG global
3455 as the destination of the operation. */
3457 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3459 store_reg(get_field(f
, r1
), o
->out
);
3462 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3464 int r1
= get_field(f
, r1
);
3465 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3468 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3470 int r1
= get_field(f
, r1
);
3471 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3474 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3476 store_reg32_i64(get_field(f
, r1
), o
->out
);
3479 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3481 /* ??? Specification exception: r1 must be even. */
3482 int r1
= get_field(f
, r1
);
3483 store_reg32_i64(r1
, o
->out
);
3484 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3487 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3489 /* ??? Specification exception: r1 must be even. */
3490 int r1
= get_field(f
, r1
);
3491 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3492 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3493 store_reg32_i64(r1
, o
->out
);
3496 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3498 store_freg32_i64(get_field(f
, r1
), o
->out
);
3501 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3503 store_freg(get_field(f
, r1
), o
->out
);
3506 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3508 /* ??? Specification exception: r1 must be < 14. */
3509 int f1
= get_field(s
->fields
, r1
);
3510 store_freg(f1
, o
->out
);
3511 store_freg((f1
+ 2) & 15, o
->out2
);
3514 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3516 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3517 store_reg32_i64(get_field(f
, r1
), o
->out
);
3521 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3524 store_freg32_i64(get_field(f
, r1
), o
->out
);
3528 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3530 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3533 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3535 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3538 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3540 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3543 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3545 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3548 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3550 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3553 /* ====================================================================== */
3554 /* The "INput 1" generators. These load the first operand to an insn. */
3556 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3558 o
->in1
= load_reg(get_field(f
, r1
));
3561 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3563 o
->in1
= regs
[get_field(f
, r1
)];
3567 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3569 o
->in1
= tcg_temp_new_i64();
3570 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3573 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3575 o
->in1
= tcg_temp_new_i64();
3576 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3579 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3581 o
->in1
= tcg_temp_new_i64();
3582 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3585 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3587 /* ??? Specification exception: r1 must be even. */
3588 int r1
= get_field(f
, r1
);
3589 o
->in1
= load_reg((r1
+ 1) & 15);
3592 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3594 /* ??? Specification exception: r1 must be even. */
3595 int r1
= get_field(f
, r1
);
3596 o
->in1
= tcg_temp_new_i64();
3597 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3600 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3602 /* ??? Specification exception: r1 must be even. */
3603 int r1
= get_field(f
, r1
);
3604 o
->in1
= tcg_temp_new_i64();
3605 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3608 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3610 /* ??? Specification exception: r1 must be even. */
3611 int r1
= get_field(f
, r1
);
3612 o
->in1
= tcg_temp_new_i64();
3613 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3616 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3618 o
->in1
= load_reg(get_field(f
, r2
));
3621 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3623 o
->in1
= load_reg(get_field(f
, r3
));
3626 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3628 o
->in1
= regs
[get_field(f
, r3
)];
3632 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3634 o
->in1
= tcg_temp_new_i64();
3635 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3638 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3640 o
->in1
= tcg_temp_new_i64();
3641 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3644 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3646 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3649 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3651 o
->in1
= fregs
[get_field(f
, r1
)];
3655 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3657 /* ??? Specification exception: r1 must be < 14. */
3658 int r1
= get_field(f
, r1
);
3660 o
->out2
= fregs
[(r1
+ 2) & 15];
3661 o
->g_out
= o
->g_out2
= true;
3664 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3666 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3669 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3671 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3672 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3675 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3678 o
->in1
= tcg_temp_new_i64();
3679 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3682 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3685 o
->in1
= tcg_temp_new_i64();
3686 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3689 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3692 o
->in1
= tcg_temp_new_i64();
3693 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3696 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3699 o
->in1
= tcg_temp_new_i64();
3700 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3703 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3706 o
->in1
= tcg_temp_new_i64();
3707 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3710 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3713 o
->in1
= tcg_temp_new_i64();
3714 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3717 /* ====================================================================== */
3718 /* The "INput 2" generators. These load the second operand to an insn. */
3720 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3722 o
->in2
= regs
[get_field(f
, r1
)];
3726 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3728 o
->in2
= tcg_temp_new_i64();
3729 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3732 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3734 o
->in2
= tcg_temp_new_i64();
3735 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3738 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3740 o
->in2
= load_reg(get_field(f
, r2
));
3743 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3745 o
->in2
= regs
[get_field(f
, r2
)];
3749 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3751 int r2
= get_field(f
, r2
);
3753 o
->in2
= load_reg(r2
);
3757 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3759 o
->in2
= tcg_temp_new_i64();
3760 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3763 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3765 o
->in2
= tcg_temp_new_i64();
3766 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3769 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3771 o
->in2
= tcg_temp_new_i64();
3772 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3775 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3777 o
->in2
= tcg_temp_new_i64();
3778 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3781 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3783 o
->in2
= load_reg(get_field(f
, r3
));
3786 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3788 o
->in2
= tcg_temp_new_i64();
3789 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3792 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3794 o
->in2
= tcg_temp_new_i64();
3795 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3798 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3800 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3803 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3805 o
->in2
= fregs
[get_field(f
, r2
)];
3809 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3811 /* ??? Specification exception: r1 must be < 14. */
3812 int r2
= get_field(f
, r2
);
3814 o
->in2
= fregs
[(r2
+ 2) & 15];
3815 o
->g_in1
= o
->g_in2
= true;
3818 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3823 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3825 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3826 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3829 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3834 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3836 help_l2_shift(s
, f
, o
, 31);
3839 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3841 help_l2_shift(s
, f
, o
, 63);
3844 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3847 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3850 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3853 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3856 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3859 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3862 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3865 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3868 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3871 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3874 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3877 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3880 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3883 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3886 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3889 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3892 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3895 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3898 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3901 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3904 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3906 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3909 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3911 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3914 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3916 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3919 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3921 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3924 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3926 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3927 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3930 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3932 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3933 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3936 /* ====================================================================== */
3938 /* Find opc within the table of insns. This is formulated as a switch
3939 statement so that (1) we get compile-time notice of cut-paste errors
3940 for duplicated opcodes, and (2) the compiler generates the binary
3941 search tree, rather than us having to post-process the table. */
3943 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3944 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3946 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3948 enum DisasInsnEnum
{
3949 #include "insn-data.def"
3953 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3958 .help_in1 = in1_##I1, \
3959 .help_in2 = in2_##I2, \
3960 .help_prep = prep_##P, \
3961 .help_wout = wout_##W, \
3962 .help_cout = cout_##CC, \
3963 .help_op = op_##OP, \
3967 /* Allow 0 to be used for NULL in the table below. */
3975 static const DisasInsn insn_info
[] = {
3976 #include "insn-data.def"
3980 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3981 case OPC: return &insn_info[insn_ ## NM];
3983 static const DisasInsn
*lookup_opc(uint16_t opc
)
3986 #include "insn-data.def"
3995 /* Extract a field from the insn. The INSN should be left-aligned in
3996 the uint64_t so that we can more easily utilize the big-bit-endian
3997 definitions we extract from the Principals of Operation. */
3999 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4007 /* Zero extract the field from the insn. */
4008 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4010 /* Sign-extend, or un-swap the field as necessary. */
4012 case 0: /* unsigned */
4014 case 1: /* signed */
4015 assert(f
->size
<= 32);
4016 m
= 1u << (f
->size
- 1);
4019 case 2: /* dl+dh split, signed 20 bit. */
4020 r
= ((int8_t)r
<< 12) | (r
>> 8);
4026 /* Validate that the "compressed" encoding we selected above is valid.
4027 I.e. we havn't make two different original fields overlap. */
4028 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4029 o
->presentC
|= 1 << f
->indexC
;
4030 o
->presentO
|= 1 << f
->indexO
;
4032 o
->c
[f
->indexC
] = r
;
4035 /* Lookup the insn at the current PC, extracting the operands into O and
4036 returning the info struct for the insn. Returns NULL for invalid insn. */
4038 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4041 uint64_t insn
, pc
= s
->pc
;
4043 const DisasInsn
*info
;
4045 insn
= ld_code2(env
, pc
);
4046 op
= (insn
>> 8) & 0xff;
4047 ilen
= get_ilen(op
);
4048 s
->next_pc
= s
->pc
+ ilen
;
4055 insn
= ld_code4(env
, pc
) << 32;
4058 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4064 /* We can't actually determine the insn format until we've looked up
4065 the full insn opcode. Which we can't do without locating the
4066 secondary opcode. Assume by default that OP2 is at bit 40; for
4067 those smaller insns that don't actually have a secondary opcode
4068 this will correctly result in OP2 = 0. */
4074 case 0xb2: /* S, RRF, RRE */
4075 case 0xb3: /* RRE, RRD, RRF */
4076 case 0xb9: /* RRE, RRF */
4077 case 0xe5: /* SSE, SIL */
4078 op2
= (insn
<< 8) >> 56;
4082 case 0xc0: /* RIL */
4083 case 0xc2: /* RIL */
4084 case 0xc4: /* RIL */
4085 case 0xc6: /* RIL */
4086 case 0xc8: /* SSF */
4087 case 0xcc: /* RIL */
4088 op2
= (insn
<< 12) >> 60;
4090 case 0xd0 ... 0xdf: /* SS */
4096 case 0xee ... 0xf3: /* SS */
4097 case 0xf8 ... 0xfd: /* SS */
4101 op2
= (insn
<< 40) >> 56;
4105 memset(f
, 0, sizeof(*f
));
4109 /* Lookup the instruction. */
4110 info
= lookup_opc(op
<< 8 | op2
);
4112 /* If we found it, extract the operands. */
4114 DisasFormat fmt
= info
->fmt
;
4117 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4118 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4124 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4126 const DisasInsn
*insn
;
4127 ExitStatus ret
= NO_EXIT
;
4131 insn
= extract_insn(env
, s
, &f
);
4133 /* If not found, try the old interpreter. This includes ILLOPC. */
4135 disas_s390_insn(env
, s
);
4136 switch (s
->is_jmp
) {
4144 ret
= EXIT_PC_UPDATED
;
4147 ret
= EXIT_NORETURN
;
4157 /* Set up the strutures we use to communicate with the helpers. */
4160 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4161 TCGV_UNUSED_I64(o
.out
);
4162 TCGV_UNUSED_I64(o
.out2
);
4163 TCGV_UNUSED_I64(o
.in1
);
4164 TCGV_UNUSED_I64(o
.in2
);
4165 TCGV_UNUSED_I64(o
.addr1
);
4167 /* Implement the instruction. */
4168 if (insn
->help_in1
) {
4169 insn
->help_in1(s
, &f
, &o
);
4171 if (insn
->help_in2
) {
4172 insn
->help_in2(s
, &f
, &o
);
4174 if (insn
->help_prep
) {
4175 insn
->help_prep(s
, &f
, &o
);
4177 if (insn
->help_op
) {
4178 ret
= insn
->help_op(s
, &o
);
4180 if (insn
->help_wout
) {
4181 insn
->help_wout(s
, &f
, &o
);
4183 if (insn
->help_cout
) {
4184 insn
->help_cout(s
, &o
);
4187 /* Free any temporaries created by the helpers. */
4188 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4189 tcg_temp_free_i64(o
.out
);
4191 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4192 tcg_temp_free_i64(o
.out2
);
4194 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4195 tcg_temp_free_i64(o
.in1
);
4197 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4198 tcg_temp_free_i64(o
.in2
);
4200 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4201 tcg_temp_free_i64(o
.addr1
);
4204 /* Advance to the next instruction. */
4209 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4210 TranslationBlock
*tb
,
4214 target_ulong pc_start
;
4215 uint64_t next_page_start
;
4216 uint16_t *gen_opc_end
;
4218 int num_insns
, max_insns
;
4226 if (!(tb
->flags
& FLAG_MASK_64
)) {
4227 pc_start
&= 0x7fffffff;
4232 dc
.cc_op
= CC_OP_DYNAMIC
;
4233 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4234 dc
.is_jmp
= DISAS_NEXT
;
4236 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4238 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4241 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4242 if (max_insns
== 0) {
4243 max_insns
= CF_COUNT_MASK
;
4250 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4254 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4257 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4258 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4259 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4260 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4262 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4266 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4267 tcg_gen_debug_insn_start(dc
.pc
);
4271 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4272 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4273 if (bp
->pc
== dc
.pc
) {
4274 status
= EXIT_PC_STALE
;
4280 if (status
== NO_EXIT
) {
4281 status
= translate_one(env
, &dc
);
4284 /* If we reach a page boundary, are single stepping,
4285 or exhaust instruction count, stop generation. */
4286 if (status
== NO_EXIT
4287 && (dc
.pc
>= next_page_start
4288 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4289 || num_insns
>= max_insns
4291 || env
->singlestep_enabled
)) {
4292 status
= EXIT_PC_STALE
;
4294 } while (status
== NO_EXIT
);
4296 if (tb
->cflags
& CF_LAST_IO
) {
4305 update_psw_addr(&dc
);
4307 case EXIT_PC_UPDATED
:
4308 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4309 gen_op_calc_cc(&dc
);
4311 /* Next TB starts off with CC_OP_DYNAMIC,
4312 so make sure the cc op type is in env */
4313 gen_op_set_cc_op(&dc
);
4316 gen_exception(EXCP_DEBUG
);
4318 /* Generate the return instruction */
4326 gen_icount_end(tb
, num_insns
);
4327 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4329 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4332 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4335 tb
->size
= dc
.pc
- pc_start
;
4336 tb
->icount
= num_insns
;
4339 #if defined(S390X_DEBUG_DISAS)
4340 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4341 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4342 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4348 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4350 gen_intermediate_code_internal(env
, tb
, 0);
4353 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4355 gen_intermediate_code_internal(env
, tb
, 1);
4358 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4361 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4362 cc_op
= gen_opc_cc_op
[pc_pos
];
4363 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {