4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
490 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
493 static inline void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
495 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
498 static inline void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
500 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
503 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
506 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
509 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
512 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
515 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
517 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
520 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
522 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
525 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp
= tcg_const_i32(v2
);
529 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
530 tcg_temp_free_i32(tmp
);
533 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
535 TCGv_i32 tmp
= tcg_const_i32(v2
);
536 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
537 tcg_temp_free_i32(tmp
);
540 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
542 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
545 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
547 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
550 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
552 TCGv_i64 tmp
= tcg_const_i64(v2
);
554 tcg_temp_free_i64(tmp
);
557 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
559 TCGv_i64 tmp
= tcg_const_i64(v2
);
561 tcg_temp_free_i64(tmp
);
564 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
566 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
569 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
571 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext
*s
)
577 tcg_gen_discard_i64(cc_src
);
578 tcg_gen_discard_i64(cc_dst
);
579 tcg_gen_discard_i64(cc_vr
);
580 s
->cc_op
= CC_OP_STATIC
;
583 static inline void gen_op_set_cc_op(DisasContext
*s
)
585 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
586 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
590 static inline void gen_update_cc_op(DisasContext
*s
)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext
*s
)
598 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
599 TCGv_i64 dummy
= tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
630 case CC_OP_LTUGTU_32
:
631 case CC_OP_LTUGTU_64
:
638 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
653 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
664 tcg_temp_free_i64(dummy
);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
674 *r1
= (insn
>> 4) & 0xf;
678 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
679 int *x2
, int *b2
, int *d2
)
683 *r1
= (insn
>> 20) & 0xf;
684 *x2
= (insn
>> 16) & 0xf;
685 *b2
= (insn
>> 12) & 0xf;
688 return get_address(s
, *x2
, *b2
, *d2
);
691 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
696 *r1
= (insn
>> 20) & 0xf;
698 *r3
= (insn
>> 16) & 0xf;
699 *b2
= (insn
>> 12) & 0xf;
703 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
708 *i2
= (insn
>> 16) & 0xff;
709 *b1
= (insn
>> 12) & 0xf;
712 return get_address(s
, 0, *b1
, *d1
);
715 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
719 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
720 && !s
->singlestep_enabled
721 && !(s
->tb
->cflags
& CF_LAST_IO
));
724 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
728 if (use_goto_tb(s
, pc
)) {
729 tcg_gen_goto_tb(tb_num
);
730 tcg_gen_movi_i64(psw_addr
, pc
);
731 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr
, pc
);
739 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss
[cc_op
]++;
746 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit
[cc_op
]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond
[16] = {
757 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
758 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
759 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
760 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
761 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
762 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
763 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
764 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond
[16] = {
771 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
773 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
775 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
785 enum cc_op old_cc_op
= s
->cc_op
;
787 if (mask
== 15 || mask
== 0) {
788 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
791 c
->g1
= c
->g2
= true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond
= ltgt_cond
[mask
];
803 if (cond
== TCG_COND_NEVER
) {
806 account_inline_branch(s
, old_cc_op
);
809 case CC_OP_LTUGTU_32
:
810 case CC_OP_LTUGTU_64
:
811 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
819 cond
= nz_cond
[mask
];
820 if (cond
== TCG_COND_NEVER
) {
823 account_inline_branch(s
, old_cc_op
);
838 account_inline_branch(s
, old_cc_op
);
853 account_inline_branch(s
, old_cc_op
);
857 switch (mask
& 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s
, old_cc_op
);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s
, old_cc_op
);
880 old_cc_op
= CC_OP_STATIC
;
881 cond
= TCG_COND_NEVER
;
885 /* Load up the arguments of the comparison. */
887 c
->g1
= c
->g2
= false;
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
893 c
->u
.s32
.b
= tcg_const_i32(0);
896 case CC_OP_LTUGTU_32
:
898 c
->u
.s32
.a
= tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
900 c
->u
.s32
.b
= tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
908 c
->u
.s64
.b
= tcg_const_i64(0);
912 case CC_OP_LTUGTU_64
:
915 c
->g1
= c
->g2
= true;
921 c
->u
.s64
.a
= tcg_temp_new_i64();
922 c
->u
.s64
.b
= tcg_const_i64(0);
923 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c
->u
.s32
.b
= tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c
->u
.s32
.b
= tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c
->u
.s32
.b
= tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c
->u
.s32
.b
= tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c
->u
.s32
.b
= tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c
->u
.s32
.a
= tcg_temp_new_i32();
966 c
->u
.s32
.b
= tcg_const_i32(0);
967 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
969 case 0x4: /* cc == 1 */
971 c
->u
.s32
.b
= tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c
->u
.s32
.b
= tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c
->u
.s32
.b
= tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c
->u
.s32
.b
= tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c
->u
.s32
.a
= tcg_const_i32(8);
990 c
->u
.s32
.b
= tcg_const_i32(0);
991 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
992 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1003 static void free_compare(DisasCompare
*c
)
1007 tcg_temp_free_i64(c
->u
.s64
.a
);
1009 tcg_temp_free_i32(c
->u
.s32
.a
);
1014 tcg_temp_free_i64(c
->u
.s64
.b
);
1016 tcg_temp_free_i32(c
->u
.s32
.b
);
1021 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp
, tmp2
, tmp3
;
1026 TCGv_i32 tmp32_1
, tmp32_2
;
1030 r1
= (insn
>> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1036 case 0x0d: /* PTLB [S] */
1038 check_privileged(s
);
1039 gen_helper_ptlb(cpu_env
);
1041 case 0x10: /* SPX D2(B2) [S] */
1042 /* Set Prefix Register */
1043 check_privileged(s
);
1044 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1045 tmp
= get_address(s
, 0, b2
, d2
);
1046 potential_page_fault(s
);
1047 gen_helper_spx(cpu_env
, tmp
);
1048 tcg_temp_free_i64(tmp
);
1050 case 0x11: /* STPX D2(B2) [S] */
1052 check_privileged(s
);
1053 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1054 tmp
= get_address(s
, 0, b2
, d2
);
1055 tmp2
= tcg_temp_new_i64();
1056 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1057 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1058 tcg_temp_free_i64(tmp
);
1059 tcg_temp_free_i64(tmp2
);
1061 case 0x12: /* STAP D2(B2) [S] */
1062 /* Store CPU Address */
1063 check_privileged(s
);
1064 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1065 tmp
= get_address(s
, 0, b2
, d2
);
1066 tmp2
= tcg_temp_new_i64();
1067 tmp32_1
= tcg_temp_new_i32();
1068 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1069 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1070 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1071 tcg_temp_free_i64(tmp
);
1072 tcg_temp_free_i64(tmp2
);
1073 tcg_temp_free_i32(tmp32_1
);
1075 case 0x21: /* IPTE R1,R2 [RRE] */
1076 /* Invalidate PTE */
1077 check_privileged(s
);
1078 r1
= (insn
>> 4) & 0xf;
1081 tmp2
= load_reg(r2
);
1082 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1083 tcg_temp_free_i64(tmp
);
1084 tcg_temp_free_i64(tmp2
);
1086 case 0x29: /* ISKE R1,R2 [RRE] */
1087 /* Insert Storage Key Extended */
1088 check_privileged(s
);
1089 r1
= (insn
>> 4) & 0xf;
1092 tmp2
= tcg_temp_new_i64();
1093 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1094 store_reg(r1
, tmp2
);
1095 tcg_temp_free_i64(tmp
);
1096 tcg_temp_free_i64(tmp2
);
1098 case 0x2a: /* RRBE R1,R2 [RRE] */
1099 /* Set Storage Key Extended */
1100 check_privileged(s
);
1101 r1
= (insn
>> 4) & 0xf;
1103 tmp32_1
= load_reg32(r1
);
1105 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1107 tcg_temp_free_i32(tmp32_1
);
1108 tcg_temp_free_i64(tmp
);
1110 case 0x2b: /* SSKE R1,R2 [RRE] */
1111 /* Set Storage Key Extended */
1112 check_privileged(s
);
1113 r1
= (insn
>> 4) & 0xf;
1115 tmp32_1
= load_reg32(r1
);
1117 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1118 tcg_temp_free_i32(tmp32_1
);
1119 tcg_temp_free_i64(tmp
);
1121 case 0x34: /* STCH ? */
1122 /* Store Subchannel */
1123 check_privileged(s
);
1124 gen_op_movi_cc(s
, 3);
1126 case 0x46: /* STURA R1,R2 [RRE] */
1127 /* Store Using Real Address */
1128 check_privileged(s
);
1129 r1
= (insn
>> 4) & 0xf;
1131 tmp32_1
= load_reg32(r1
);
1133 potential_page_fault(s
);
1134 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1135 tcg_temp_free_i32(tmp32_1
);
1136 tcg_temp_free_i64(tmp
);
1138 case 0x50: /* CSP R1,R2 [RRE] */
1139 /* Compare And Swap And Purge */
1140 check_privileged(s
);
1141 r1
= (insn
>> 4) & 0xf;
1143 tmp32_1
= tcg_const_i32(r1
);
1144 tmp32_2
= tcg_const_i32(r2
);
1145 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1147 tcg_temp_free_i32(tmp32_1
);
1148 tcg_temp_free_i32(tmp32_2
);
1150 case 0x5f: /* CHSC ? */
1151 /* Channel Subsystem Call */
1152 check_privileged(s
);
1153 gen_op_movi_cc(s
, 3);
1155 case 0x78: /* STCKE D2(B2) [S] */
1156 /* Store Clock Extended */
1157 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1158 tmp
= get_address(s
, 0, b2
, d2
);
1159 potential_page_fault(s
);
1160 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1162 tcg_temp_free_i64(tmp
);
1164 case 0x79: /* SACF D2(B2) [S] */
1165 /* Set Address Space Control Fast */
1166 check_privileged(s
);
1167 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1168 tmp
= get_address(s
, 0, b2
, d2
);
1169 potential_page_fault(s
);
1170 gen_helper_sacf(cpu_env
, tmp
);
1171 tcg_temp_free_i64(tmp
);
1172 /* addressing mode has changed, so end the block */
1175 s
->is_jmp
= DISAS_JUMP
;
1177 case 0x7d: /* STSI D2,(B2) [S] */
1178 check_privileged(s
);
1179 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1180 tmp
= get_address(s
, 0, b2
, d2
);
1181 tmp32_1
= load_reg32(0);
1182 tmp32_2
= load_reg32(1);
1183 potential_page_fault(s
);
1184 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1186 tcg_temp_free_i64(tmp
);
1187 tcg_temp_free_i32(tmp32_1
);
1188 tcg_temp_free_i32(tmp32_2
);
1190 case 0xb1: /* STFL D2(B2) [S] */
1191 /* Store Facility List (CPU features) at 200 */
1192 check_privileged(s
);
1193 tmp2
= tcg_const_i64(0xc0000000);
1194 tmp
= tcg_const_i64(200);
1195 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1196 tcg_temp_free_i64(tmp2
);
1197 tcg_temp_free_i64(tmp
);
1199 case 0xb2: /* LPSWE D2(B2) [S] */
1200 /* Load PSW Extended */
1201 check_privileged(s
);
1202 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1203 tmp
= get_address(s
, 0, b2
, d2
);
1204 tmp2
= tcg_temp_new_i64();
1205 tmp3
= tcg_temp_new_i64();
1206 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1207 tcg_gen_addi_i64(tmp
, tmp
, 8);
1208 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1209 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1210 /* we need to keep cc_op intact */
1211 s
->is_jmp
= DISAS_JUMP
;
1212 tcg_temp_free_i64(tmp
);
1213 tcg_temp_free_i64(tmp2
);
1214 tcg_temp_free_i64(tmp3
);
1216 case 0x20: /* SERVC R1,R2 [RRE] */
1217 /* SCLP Service call (PV hypercall) */
1218 check_privileged(s
);
1219 potential_page_fault(s
);
1220 tmp32_1
= load_reg32(r2
);
1222 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1224 tcg_temp_free_i32(tmp32_1
);
1225 tcg_temp_free_i64(tmp
);
1229 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1230 gen_illegal_opcode(s
);
1231 #ifndef CONFIG_USER_ONLY
1237 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1243 opc
= cpu_ldub_code(env
, s
->pc
);
1244 LOG_DISAS("opc 0x%x\n", opc
);
1248 insn
= ld_code4(env
, s
->pc
);
1249 op
= (insn
>> 16) & 0xff;
1250 disas_b2(env
, s
, op
, insn
);
1253 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1254 gen_illegal_opcode(s
);
1259 /* ====================================================================== */
1260 /* Define the insn format enumeration. */
1261 #define F0(N) FMT_##N,
1262 #define F1(N, X1) F0(N)
1263 #define F2(N, X1, X2) F0(N)
1264 #define F3(N, X1, X2, X3) F0(N)
1265 #define F4(N, X1, X2, X3, X4) F0(N)
1266 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1269 #include "insn-format.def"
1279 /* Define a structure to hold the decoded fields. We'll store each inside
1280 an array indexed by an enum. In order to conserve memory, we'll arrange
1281 for fields that do not exist at the same time to overlap, thus the "C"
1282 for compact. For checking purposes there is an "O" for original index
1283 as well that will be applied to availability bitmaps. */
1285 enum DisasFieldIndexO
{
1308 enum DisasFieldIndexC
{
1339 struct DisasFields
{
1342 unsigned presentC
:16;
1343 unsigned int presentO
;
1347 /* This is the way fields are to be accessed out of DisasFields. */
1348 #define have_field(S, F) have_field1((S), FLD_O_##F)
1349 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1351 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1353 return (f
->presentO
>> c
) & 1;
1356 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1357 enum DisasFieldIndexC c
)
1359 assert(have_field1(f
, o
));
1363 /* Describe the layout of each field in each format. */
1364 typedef struct DisasField
{
1366 unsigned int size
:8;
1367 unsigned int type
:2;
1368 unsigned int indexC
:6;
1369 enum DisasFieldIndexO indexO
:8;
1372 typedef struct DisasFormatInfo
{
1373 DisasField op
[NUM_C_FIELD
];
1376 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1377 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1378 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1379 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1380 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1381 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1382 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1383 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1384 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1385 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1386 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1387 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1388 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1389 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1391 #define F0(N) { { } },
1392 #define F1(N, X1) { { X1 } },
1393 #define F2(N, X1, X2) { { X1, X2 } },
1394 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1395 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1396 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1398 static const DisasFormatInfo format_info
[] = {
1399 #include "insn-format.def"
1417 /* Generally, we'll extract operands into this structures, operate upon
1418 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1419 of routines below for more details. */
1421 bool g_out
, g_out2
, g_in1
, g_in2
;
1422 TCGv_i64 out
, out2
, in1
, in2
;
1426 /* Return values from translate_one, indicating the state of the TB. */
1428 /* Continue the TB. */
1430 /* We have emitted one or more goto_tb. No fixup required. */
1432 /* We are not using a goto_tb (for whatever reason), but have updated
1433 the PC (for whatever reason), so there's no need to do it again on
1436 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1437 updated the PC for the next instruction to be executed. */
1439 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1440 No following code will be executed. */
1444 typedef enum DisasFacility
{
1445 FAC_Z
, /* zarch (default) */
1446 FAC_CASS
, /* compare and swap and store */
1447 FAC_CASS2
, /* compare and swap and store 2*/
1448 FAC_DFP
, /* decimal floating point */
1449 FAC_DFPR
, /* decimal floating point rounding */
1450 FAC_DO
, /* distinct operands */
1451 FAC_EE
, /* execute extensions */
1452 FAC_EI
, /* extended immediate */
1453 FAC_FPE
, /* floating point extension */
1454 FAC_FPSSH
, /* floating point support sign handling */
1455 FAC_FPRGR
, /* FPR-GR transfer */
1456 FAC_GIE
, /* general instructions extension */
1457 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1458 FAC_HW
, /* high-word */
1459 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1460 FAC_LOC
, /* load/store on condition */
1461 FAC_LD
, /* long displacement */
1462 FAC_PC
, /* population count */
1463 FAC_SCF
, /* store clock fast */
1464 FAC_SFLE
, /* store facility list extended */
1470 DisasFacility fac
:6;
1474 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1475 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1476 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1477 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1478 void (*help_cout
)(DisasContext
*, DisasOps
*);
1479 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1484 /* ====================================================================== */
1485 /* Miscelaneous helpers, used by several operations. */
1487 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1488 DisasOps
*o
, int mask
)
1490 int b2
= get_field(f
, b2
);
1491 int d2
= get_field(f
, d2
);
1494 o
->in2
= tcg_const_i64(d2
& mask
);
1496 o
->in2
= get_address(s
, 0, b2
, d2
);
1497 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1501 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1503 if (dest
== s
->next_pc
) {
1506 if (use_goto_tb(s
, dest
)) {
1507 gen_update_cc_op(s
);
1509 tcg_gen_movi_i64(psw_addr
, dest
);
1510 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1511 return EXIT_GOTO_TB
;
1513 tcg_gen_movi_i64(psw_addr
, dest
);
1514 return EXIT_PC_UPDATED
;
1518 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1519 bool is_imm
, int imm
, TCGv_i64 cdest
)
1522 uint64_t dest
= s
->pc
+ 2 * imm
;
1525 /* Take care of the special cases first. */
1526 if (c
->cond
== TCG_COND_NEVER
) {
1531 if (dest
== s
->next_pc
) {
1532 /* Branch to next. */
1536 if (c
->cond
== TCG_COND_ALWAYS
) {
1537 ret
= help_goto_direct(s
, dest
);
1541 if (TCGV_IS_UNUSED_I64(cdest
)) {
1542 /* E.g. bcr %r0 -> no branch. */
1546 if (c
->cond
== TCG_COND_ALWAYS
) {
1547 tcg_gen_mov_i64(psw_addr
, cdest
);
1548 ret
= EXIT_PC_UPDATED
;
1553 if (use_goto_tb(s
, s
->next_pc
)) {
1554 if (is_imm
&& use_goto_tb(s
, dest
)) {
1555 /* Both exits can use goto_tb. */
1556 gen_update_cc_op(s
);
1558 lab
= gen_new_label();
1560 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1562 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1565 /* Branch not taken. */
1567 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1568 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1573 tcg_gen_movi_i64(psw_addr
, dest
);
1574 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1578 /* Fallthru can use goto_tb, but taken branch cannot. */
1579 /* Store taken branch destination before the brcond. This
1580 avoids having to allocate a new local temp to hold it.
1581 We'll overwrite this in the not taken case anyway. */
1583 tcg_gen_mov_i64(psw_addr
, cdest
);
1586 lab
= gen_new_label();
1588 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1590 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1593 /* Branch not taken. */
1594 gen_update_cc_op(s
);
1596 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1597 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1601 tcg_gen_movi_i64(psw_addr
, dest
);
1603 ret
= EXIT_PC_UPDATED
;
1606 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1607 Most commonly we're single-stepping or some other condition that
1608 disables all use of goto_tb. Just update the PC and exit. */
1610 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1612 cdest
= tcg_const_i64(dest
);
1616 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1619 TCGv_i32 t0
= tcg_temp_new_i32();
1620 TCGv_i64 t1
= tcg_temp_new_i64();
1621 TCGv_i64 z
= tcg_const_i64(0);
1622 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1623 tcg_gen_extu_i32_i64(t1
, t0
);
1624 tcg_temp_free_i32(t0
);
1625 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1626 tcg_temp_free_i64(t1
);
1627 tcg_temp_free_i64(z
);
1631 tcg_temp_free_i64(cdest
);
1633 tcg_temp_free_i64(next
);
1635 ret
= EXIT_PC_UPDATED
;
1643 /* ====================================================================== */
1644 /* The operations. These perform the bulk of the work for any insn,
1645 usually after the operands have been loaded and output initialized. */
1647 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1649 gen_helper_abs_i64(o
->out
, o
->in2
);
1653 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1655 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1659 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1661 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1665 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1667 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1668 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1672 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1674 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1678 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1682 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1684 /* XXX possible optimization point */
1686 cc
= tcg_temp_new_i64();
1687 tcg_gen_extu_i32_i64(cc
, cc_op
);
1688 tcg_gen_shri_i64(cc
, cc
, 1);
1690 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1691 tcg_temp_free_i64(cc
);
1695 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1697 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1701 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1703 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1707 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1709 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1710 return_low128(o
->out2
);
1714 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1716 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1720 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1722 int shift
= s
->insn
->data
& 0xff;
1723 int size
= s
->insn
->data
>> 8;
1724 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1727 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1728 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1729 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1731 /* Produce the CC from only the bits manipulated. */
1732 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1733 set_cc_nz_u64(s
, cc_dst
);
1737 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1739 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1740 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1741 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1742 return EXIT_PC_UPDATED
;
1748 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1750 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1751 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1754 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1756 int m1
= get_field(s
->fields
, m1
);
1757 bool is_imm
= have_field(s
->fields
, i2
);
1758 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1761 disas_jcc(s
, &c
, m1
);
1762 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1765 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1767 int r1
= get_field(s
->fields
, r1
);
1768 bool is_imm
= have_field(s
->fields
, i2
);
1769 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1773 c
.cond
= TCG_COND_NE
;
1778 t
= tcg_temp_new_i64();
1779 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1780 store_reg32_i64(r1
, t
);
1781 c
.u
.s32
.a
= tcg_temp_new_i32();
1782 c
.u
.s32
.b
= tcg_const_i32(0);
1783 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1784 tcg_temp_free_i64(t
);
1786 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1789 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1791 int r1
= get_field(s
->fields
, r1
);
1792 bool is_imm
= have_field(s
->fields
, i2
);
1793 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1796 c
.cond
= TCG_COND_NE
;
1801 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1802 c
.u
.s64
.a
= regs
[r1
];
1803 c
.u
.s64
.b
= tcg_const_i64(0);
1805 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1808 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1810 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1815 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1817 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1822 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1824 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1829 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1831 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1832 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1833 tcg_temp_free_i32(m3
);
1834 gen_set_cc_nz_f32(s
, o
->in2
);
1838 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1840 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1841 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1842 tcg_temp_free_i32(m3
);
1843 gen_set_cc_nz_f64(s
, o
->in2
);
1847 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1849 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1850 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1851 tcg_temp_free_i32(m3
);
1852 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1856 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1858 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1859 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1860 tcg_temp_free_i32(m3
);
1861 gen_set_cc_nz_f32(s
, o
->in2
);
1865 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1867 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1868 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1869 tcg_temp_free_i32(m3
);
1870 gen_set_cc_nz_f64(s
, o
->in2
);
1874 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1876 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1877 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1878 tcg_temp_free_i32(m3
);
1879 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1883 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1885 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1886 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1887 tcg_temp_free_i32(m3
);
1891 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1893 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1894 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1895 tcg_temp_free_i32(m3
);
1899 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1901 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1902 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1903 tcg_temp_free_i32(m3
);
1904 return_low128(o
->out2
);
1908 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1910 int r2
= get_field(s
->fields
, r2
);
1911 TCGv_i64 len
= tcg_temp_new_i64();
1913 potential_page_fault(s
);
1914 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1916 return_low128(o
->out
);
1918 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1919 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1920 tcg_temp_free_i64(len
);
1925 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1927 int l
= get_field(s
->fields
, l1
);
1932 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1933 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1936 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1937 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1940 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1941 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1944 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1945 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1948 potential_page_fault(s
);
1949 vl
= tcg_const_i32(l
);
1950 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1951 tcg_temp_free_i32(vl
);
1955 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1959 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1961 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1962 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1963 potential_page_fault(s
);
1964 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1965 tcg_temp_free_i32(r1
);
1966 tcg_temp_free_i32(r3
);
1971 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1973 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1974 TCGv_i32 t1
= tcg_temp_new_i32();
1975 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1976 potential_page_fault(s
);
1977 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1979 tcg_temp_free_i32(t1
);
1980 tcg_temp_free_i32(m3
);
1984 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1986 potential_page_fault(s
);
1987 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1989 return_low128(o
->in2
);
1993 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1995 int r3
= get_field(s
->fields
, r3
);
1996 potential_page_fault(s
);
1997 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2002 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2004 int r3
= get_field(s
->fields
, r3
);
2005 potential_page_fault(s
);
2006 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2011 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2013 int r3
= get_field(s
->fields
, r3
);
2014 TCGv_i64 in3
= tcg_temp_new_i64();
2015 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2016 potential_page_fault(s
);
2017 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2018 tcg_temp_free_i64(in3
);
2023 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2025 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2026 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2027 potential_page_fault(s
);
2028 /* XXX rewrite in tcg */
2029 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2034 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2036 TCGv_i64 t1
= tcg_temp_new_i64();
2037 TCGv_i32 t2
= tcg_temp_new_i32();
2038 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2039 gen_helper_cvd(t1
, t2
);
2040 tcg_temp_free_i32(t2
);
2041 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2042 tcg_temp_free_i64(t1
);
2046 #ifndef CONFIG_USER_ONLY
2047 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2051 check_privileged(s
);
2052 potential_page_fault(s
);
2054 /* We pretend the format is RX_a so that D2 is the field we want. */
2055 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2056 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2057 tcg_temp_free_i32(tmp
);
2062 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2064 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2065 return_low128(o
->out
);
2069 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2071 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2072 return_low128(o
->out
);
2076 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2078 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2079 return_low128(o
->out
);
2083 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2085 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2086 return_low128(o
->out
);
2090 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2092 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2096 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2098 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2102 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2104 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2105 return_low128(o
->out2
);
2109 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2111 int r2
= get_field(s
->fields
, r2
);
2112 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2116 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2118 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2122 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2124 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2125 tb->flags, (ab)use the tb->cs_base field as the address of
2126 the template in memory, and grab 8 bits of tb->flags/cflags for
2127 the contents of the register. We would then recognize all this
2128 in gen_intermediate_code_internal, generating code for exactly
2129 one instruction. This new TB then gets executed normally.
2131 On the other hand, this seems to be mostly used for modifying
2132 MVC inside of memcpy, which needs a helper call anyway. So
2133 perhaps this doesn't bear thinking about any further. */
2140 tmp
= tcg_const_i64(s
->next_pc
);
2141 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2142 tcg_temp_free_i64(tmp
);
2148 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2150 /* We'll use the original input for cc computation, since we get to
2151 compare that against 0, which ought to be better than comparing
2152 the real output against 64. It also lets cc_dst be a convenient
2153 temporary during our computation. */
2154 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2156 /* R1 = IN ? CLZ(IN) : 64. */
2157 gen_helper_clz(o
->out
, o
->in2
);
2159 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2160 value by 64, which is undefined. But since the shift is 64 iff the
2161 input is zero, we still get the correct result after and'ing. */
2162 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2163 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2164 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2168 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2170 int m3
= get_field(s
->fields
, m3
);
2171 int pos
, len
, base
= s
->insn
->data
;
2172 TCGv_i64 tmp
= tcg_temp_new_i64();
2177 /* Effectively a 32-bit load. */
2178 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2185 /* Effectively a 16-bit load. */
2186 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2194 /* Effectively an 8-bit load. */
2195 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2200 pos
= base
+ ctz32(m3
) * 8;
2201 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2202 ccm
= ((1ull << len
) - 1) << pos
;
2206 /* This is going to be a sequence of loads and inserts. */
2207 pos
= base
+ 32 - 8;
2211 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2212 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2213 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2216 m3
= (m3
<< 1) & 0xf;
2222 tcg_gen_movi_i64(tmp
, ccm
);
2223 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2224 tcg_temp_free_i64(tmp
);
2228 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2230 int shift
= s
->insn
->data
& 0xff;
2231 int size
= s
->insn
->data
>> 8;
2232 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2236 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2241 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2243 t1
= tcg_temp_new_i64();
2244 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2245 tcg_gen_shri_i64(t1
, t1
, 36);
2246 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2248 tcg_gen_extu_i32_i64(t1
, cc_op
);
2249 tcg_gen_shli_i64(t1
, t1
, 28);
2250 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2251 tcg_temp_free_i64(t1
);
2255 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2257 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2261 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2263 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2267 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2269 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2273 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2275 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2279 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2281 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2282 return_low128(o
->out2
);
2286 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2288 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2289 return_low128(o
->out2
);
2293 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2295 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2299 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2301 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2305 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2307 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2311 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2313 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2317 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2319 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2323 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2325 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2329 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2331 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2335 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2337 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2341 #ifndef CONFIG_USER_ONLY
2342 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2344 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2345 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2346 check_privileged(s
);
2347 potential_page_fault(s
);
2348 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2349 tcg_temp_free_i32(r1
);
2350 tcg_temp_free_i32(r3
);
2354 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2356 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2357 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2358 check_privileged(s
);
2359 potential_page_fault(s
);
2360 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2361 tcg_temp_free_i32(r1
);
2362 tcg_temp_free_i32(r3
);
2365 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2367 check_privileged(s
);
2368 potential_page_fault(s
);
2369 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2374 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2378 check_privileged(s
);
2380 t1
= tcg_temp_new_i64();
2381 t2
= tcg_temp_new_i64();
2382 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2383 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2384 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2385 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2386 tcg_gen_shli_i64(t1
, t1
, 32);
2387 gen_helper_load_psw(cpu_env
, t1
, t2
);
2388 tcg_temp_free_i64(t1
);
2389 tcg_temp_free_i64(t2
);
2390 return EXIT_NORETURN
;
2394 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2396 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2397 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2398 potential_page_fault(s
);
2399 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2400 tcg_temp_free_i32(r1
);
2401 tcg_temp_free_i32(r3
);
2405 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2407 int r1
= get_field(s
->fields
, r1
);
2408 int r3
= get_field(s
->fields
, r3
);
2409 TCGv_i64 t
= tcg_temp_new_i64();
2410 TCGv_i64 t4
= tcg_const_i64(4);
2413 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2414 store_reg32_i64(r1
, t
);
2418 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2422 tcg_temp_free_i64(t
);
2423 tcg_temp_free_i64(t4
);
2427 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2429 int r1
= get_field(s
->fields
, r1
);
2430 int r3
= get_field(s
->fields
, r3
);
2431 TCGv_i64 t
= tcg_temp_new_i64();
2432 TCGv_i64 t4
= tcg_const_i64(4);
2435 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2436 store_reg32h_i64(r1
, t
);
2440 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2444 tcg_temp_free_i64(t
);
2445 tcg_temp_free_i64(t4
);
2449 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2451 int r1
= get_field(s
->fields
, r1
);
2452 int r3
= get_field(s
->fields
, r3
);
2453 TCGv_i64 t8
= tcg_const_i64(8);
2456 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2460 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2464 tcg_temp_free_i64(t8
);
2468 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2471 o
->g_out
= o
->g_in2
;
2472 TCGV_UNUSED_I64(o
->in2
);
2477 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2481 o
->g_out
= o
->g_in1
;
2482 o
->g_out2
= o
->g_in2
;
2483 TCGV_UNUSED_I64(o
->in1
);
2484 TCGV_UNUSED_I64(o
->in2
);
2485 o
->g_in1
= o
->g_in2
= false;
2489 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2491 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2492 potential_page_fault(s
);
2493 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2494 tcg_temp_free_i32(l
);
2498 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2500 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2501 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2502 potential_page_fault(s
);
2503 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2504 tcg_temp_free_i32(r1
);
2505 tcg_temp_free_i32(r2
);
2510 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2512 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2513 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2514 potential_page_fault(s
);
2515 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2516 tcg_temp_free_i32(r1
);
2517 tcg_temp_free_i32(r3
);
2522 #ifndef CONFIG_USER_ONLY
2523 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2525 int r1
= get_field(s
->fields
, l1
);
2526 check_privileged(s
);
2527 potential_page_fault(s
);
2528 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2533 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2535 int r1
= get_field(s
->fields
, l1
);
2536 check_privileged(s
);
2537 potential_page_fault(s
);
2538 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2544 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2546 potential_page_fault(s
);
2547 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2552 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2554 potential_page_fault(s
);
2555 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2557 return_low128(o
->in2
);
2561 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2563 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2567 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2569 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2570 return_low128(o
->out2
);
2574 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2576 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2580 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2582 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2586 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2588 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2592 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2594 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2595 return_low128(o
->out2
);
2599 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2601 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2602 return_low128(o
->out2
);
2606 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2608 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2609 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2610 tcg_temp_free_i64(r3
);
2614 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2616 int r3
= get_field(s
->fields
, r3
);
2617 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2621 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2623 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2624 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2625 tcg_temp_free_i64(r3
);
2629 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2631 int r3
= get_field(s
->fields
, r3
);
2632 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2636 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2638 gen_helper_nabs_i64(o
->out
, o
->in2
);
2642 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2644 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2648 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2650 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2654 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2656 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2657 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2661 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2663 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2664 potential_page_fault(s
);
2665 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2666 tcg_temp_free_i32(l
);
2671 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2673 tcg_gen_neg_i64(o
->out
, o
->in2
);
2677 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2679 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2683 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2685 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2689 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2691 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2692 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2696 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2698 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2699 potential_page_fault(s
);
2700 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2701 tcg_temp_free_i32(l
);
2706 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2708 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2712 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2714 int shift
= s
->insn
->data
& 0xff;
2715 int size
= s
->insn
->data
>> 8;
2716 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2719 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2720 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2722 /* Produce the CC from only the bits manipulated. */
2723 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2724 set_cc_nz_u64(s
, cc_dst
);
2728 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2730 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2734 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2736 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2740 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2742 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2746 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2748 TCGv_i32 t1
= tcg_temp_new_i32();
2749 TCGv_i32 t2
= tcg_temp_new_i32();
2750 TCGv_i32 to
= tcg_temp_new_i32();
2751 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2752 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2753 tcg_gen_rotl_i32(to
, t1
, t2
);
2754 tcg_gen_extu_i32_i64(o
->out
, to
);
2755 tcg_temp_free_i32(t1
);
2756 tcg_temp_free_i32(t2
);
2757 tcg_temp_free_i32(to
);
2761 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2763 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2767 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2769 int r1
= get_field(s
->fields
, r1
);
2770 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2774 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2776 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2780 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2782 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2786 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2788 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2789 return_low128(o
->out2
);
2793 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2795 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2799 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2801 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2805 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2807 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2808 return_low128(o
->out2
);
2812 #ifndef CONFIG_USER_ONLY
2813 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2815 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2816 check_privileged(s
);
2817 potential_page_fault(s
);
2818 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2819 tcg_temp_free_i32(r1
);
2824 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2826 uint64_t sign
= 1ull << s
->insn
->data
;
2827 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2828 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2829 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2830 /* The arithmetic left shift is curious in that it does not affect
2831 the sign bit. Copy that over from the source unchanged. */
2832 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2833 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2834 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2838 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2840 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2844 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2846 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2850 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2852 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2856 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2858 gen_helper_sfpc(cpu_env
, o
->in2
);
2862 #ifndef CONFIG_USER_ONLY
2863 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2865 check_privileged(s
);
2866 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2867 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2871 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2873 check_privileged(s
);
2874 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2878 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2880 gen_helper_stck(o
->out
, cpu_env
);
2881 /* ??? We don't implement clock states. */
2882 gen_op_movi_cc(s
, 0);
2886 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2888 check_privileged(s
);
2889 gen_helper_sckc(cpu_env
, o
->in2
);
2893 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2895 check_privileged(s
);
2896 gen_helper_stckc(o
->out
, cpu_env
);
2900 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2902 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2903 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2904 check_privileged(s
);
2905 potential_page_fault(s
);
2906 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2907 tcg_temp_free_i32(r1
);
2908 tcg_temp_free_i32(r3
);
2912 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2914 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2915 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2916 check_privileged(s
);
2917 potential_page_fault(s
);
2918 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2919 tcg_temp_free_i32(r1
);
2920 tcg_temp_free_i32(r3
);
2924 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2926 check_privileged(s
);
2927 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2931 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2933 check_privileged(s
);
2934 gen_helper_spt(cpu_env
, o
->in2
);
2938 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2940 check_privileged(s
);
2941 gen_helper_stpt(o
->out
, cpu_env
);
2945 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2947 uint64_t i2
= get_field(s
->fields
, i2
);
2950 check_privileged(s
);
2952 /* It is important to do what the instruction name says: STORE THEN.
2953 If we let the output hook perform the store then if we fault and
2954 restart, we'll have the wrong SYSTEM MASK in place. */
2955 t
= tcg_temp_new_i64();
2956 tcg_gen_shri_i64(t
, psw_mask
, 56);
2957 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2958 tcg_temp_free_i64(t
);
2960 if (s
->fields
->op
== 0xac) {
2961 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2962 (i2
<< 56) | 0x00ffffffffffffffull
);
2964 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2970 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2972 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2976 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2978 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2982 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2984 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2988 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2990 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2994 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2996 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2997 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2998 potential_page_fault(s
);
2999 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3000 tcg_temp_free_i32(r1
);
3001 tcg_temp_free_i32(r3
);
3005 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3007 int m3
= get_field(s
->fields
, m3
);
3008 int pos
, base
= s
->insn
->data
;
3009 TCGv_i64 tmp
= tcg_temp_new_i64();
3011 pos
= base
+ ctz32(m3
) * 8;
3014 /* Effectively a 32-bit store. */
3015 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3016 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3022 /* Effectively a 16-bit store. */
3023 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3024 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3031 /* Effectively an 8-bit store. */
3032 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3033 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3037 /* This is going to be a sequence of shifts and stores. */
3038 pos
= base
+ 32 - 8;
3041 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3042 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3043 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3045 m3
= (m3
<< 1) & 0xf;
3050 tcg_temp_free_i64(tmp
);
3054 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3056 int r1
= get_field(s
->fields
, r1
);
3057 int r3
= get_field(s
->fields
, r3
);
3058 int size
= s
->insn
->data
;
3059 TCGv_i64 tsize
= tcg_const_i64(size
);
3063 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3065 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3070 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3074 tcg_temp_free_i64(tsize
);
3078 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3080 int r1
= get_field(s
->fields
, r1
);
3081 int r3
= get_field(s
->fields
, r3
);
3082 TCGv_i64 t
= tcg_temp_new_i64();
3083 TCGv_i64 t4
= tcg_const_i64(4);
3084 TCGv_i64 t32
= tcg_const_i64(32);
3087 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3088 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3092 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3096 tcg_temp_free_i64(t
);
3097 tcg_temp_free_i64(t4
);
3098 tcg_temp_free_i64(t32
);
3102 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3104 potential_page_fault(s
);
3105 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3107 return_low128(o
->in2
);
3111 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3113 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3117 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3122 tcg_gen_not_i64(o
->in2
, o
->in2
);
3123 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3125 /* XXX possible optimization point */
3127 cc
= tcg_temp_new_i64();
3128 tcg_gen_extu_i32_i64(cc
, cc_op
);
3129 tcg_gen_shri_i64(cc
, cc
, 1);
3130 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3131 tcg_temp_free_i64(cc
);
3135 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3142 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3143 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3144 tcg_temp_free_i32(t
);
3146 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3147 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3148 tcg_temp_free_i32(t
);
3150 gen_exception(EXCP_SVC
);
3151 return EXIT_NORETURN
;
3154 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3156 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3161 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3163 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3168 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3170 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3175 #ifndef CONFIG_USER_ONLY
3176 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3178 potential_page_fault(s
);
3179 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3185 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3187 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3188 potential_page_fault(s
);
3189 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3190 tcg_temp_free_i32(l
);
3195 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3197 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3198 potential_page_fault(s
);
3199 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3200 tcg_temp_free_i32(l
);
3204 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3206 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3207 potential_page_fault(s
);
3208 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3209 tcg_temp_free_i32(l
);
3214 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3216 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3220 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3222 int shift
= s
->insn
->data
& 0xff;
3223 int size
= s
->insn
->data
>> 8;
3224 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3227 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3228 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3230 /* Produce the CC from only the bits manipulated. */
3231 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3232 set_cc_nz_u64(s
, cc_dst
);
3236 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3238 o
->out
= tcg_const_i64(0);
3242 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3244 o
->out
= tcg_const_i64(0);
3250 /* ====================================================================== */
3251 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3252 the original inputs), update the various cc data structures in order to
3253 be able to compute the new condition code. */
3255 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3257 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3260 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3262 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3265 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3267 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3270 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3272 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3275 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3277 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3280 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3282 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3285 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3287 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3290 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3292 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3295 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3297 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3300 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3302 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3305 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3307 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3310 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3312 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3315 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3317 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3320 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3322 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3325 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3327 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3330 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3332 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3335 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3337 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3340 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3342 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3345 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3347 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3350 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3352 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3353 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3356 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3358 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3361 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3363 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3366 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3368 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3371 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3373 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3376 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3378 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3381 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3383 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3386 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3388 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3391 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3393 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3396 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3398 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3401 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3403 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3406 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3408 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3411 /* ====================================================================== */
3412 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3413 with the TCG register to which we will write. Used in combination with
3414 the "wout" generators, in some cases we need a new temporary, and in
3415 some cases we can write to a TCG global. */
3417 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3419 o
->out
= tcg_temp_new_i64();
3422 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3424 o
->out
= tcg_temp_new_i64();
3425 o
->out2
= tcg_temp_new_i64();
3428 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3430 o
->out
= regs
[get_field(f
, r1
)];
3434 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3436 /* ??? Specification exception: r1 must be even. */
3437 int r1
= get_field(f
, r1
);
3439 o
->out2
= regs
[(r1
+ 1) & 15];
3440 o
->g_out
= o
->g_out2
= true;
3443 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3445 o
->out
= fregs
[get_field(f
, r1
)];
3449 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3451 /* ??? Specification exception: r1 must be < 14. */
3452 int r1
= get_field(f
, r1
);
3454 o
->out2
= fregs
[(r1
+ 2) & 15];
3455 o
->g_out
= o
->g_out2
= true;
3458 /* ====================================================================== */
3459 /* The "Write OUTput" generators. These generally perform some non-trivial
3460 copy of data to TCG globals, or to main memory. The trivial cases are
3461 generally handled by having a "prep" generator install the TCG global
3462 as the destination of the operation. */
3464 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3466 store_reg(get_field(f
, r1
), o
->out
);
3469 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3471 int r1
= get_field(f
, r1
);
3472 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3475 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3477 int r1
= get_field(f
, r1
);
3478 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3481 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3483 store_reg32_i64(get_field(f
, r1
), o
->out
);
3486 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3488 /* ??? Specification exception: r1 must be even. */
3489 int r1
= get_field(f
, r1
);
3490 store_reg32_i64(r1
, o
->out
);
3491 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3494 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3496 /* ??? Specification exception: r1 must be even. */
3497 int r1
= get_field(f
, r1
);
3498 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3499 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3500 store_reg32_i64(r1
, o
->out
);
3503 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3505 store_freg32_i64(get_field(f
, r1
), o
->out
);
3508 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3510 store_freg(get_field(f
, r1
), o
->out
);
3513 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3515 /* ??? Specification exception: r1 must be < 14. */
3516 int f1
= get_field(s
->fields
, r1
);
3517 store_freg(f1
, o
->out
);
3518 store_freg((f1
+ 2) & 15, o
->out2
);
3521 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3524 store_reg32_i64(get_field(f
, r1
), o
->out
);
3528 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3530 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3531 store_freg32_i64(get_field(f
, r1
), o
->out
);
3535 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3537 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3540 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3542 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3545 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3547 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3550 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3552 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3555 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3557 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3560 /* ====================================================================== */
3561 /* The "INput 1" generators. These load the first operand to an insn. */
3563 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3565 o
->in1
= load_reg(get_field(f
, r1
));
3568 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3570 o
->in1
= regs
[get_field(f
, r1
)];
3574 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3576 o
->in1
= tcg_temp_new_i64();
3577 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3580 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3582 o
->in1
= tcg_temp_new_i64();
3583 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3586 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3588 o
->in1
= tcg_temp_new_i64();
3589 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3592 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3594 /* ??? Specification exception: r1 must be even. */
3595 int r1
= get_field(f
, r1
);
3596 o
->in1
= load_reg((r1
+ 1) & 15);
3599 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3601 /* ??? Specification exception: r1 must be even. */
3602 int r1
= get_field(f
, r1
);
3603 o
->in1
= tcg_temp_new_i64();
3604 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3607 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3609 /* ??? Specification exception: r1 must be even. */
3610 int r1
= get_field(f
, r1
);
3611 o
->in1
= tcg_temp_new_i64();
3612 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3615 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1
= get_field(f
, r1
);
3619 o
->in1
= tcg_temp_new_i64();
3620 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3623 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3625 o
->in1
= load_reg(get_field(f
, r2
));
3628 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3630 o
->in1
= load_reg(get_field(f
, r3
));
3633 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3635 o
->in1
= regs
[get_field(f
, r3
)];
3639 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3641 o
->in1
= tcg_temp_new_i64();
3642 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3645 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3647 o
->in1
= tcg_temp_new_i64();
3648 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3651 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3653 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3656 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3658 o
->in1
= fregs
[get_field(f
, r1
)];
3662 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3664 /* ??? Specification exception: r1 must be < 14. */
3665 int r1
= get_field(f
, r1
);
3667 o
->out2
= fregs
[(r1
+ 2) & 15];
3668 o
->g_out
= o
->g_out2
= true;
3671 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3673 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3676 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3678 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3679 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3682 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3685 o
->in1
= tcg_temp_new_i64();
3686 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3689 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3692 o
->in1
= tcg_temp_new_i64();
3693 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3696 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3699 o
->in1
= tcg_temp_new_i64();
3700 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3703 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3706 o
->in1
= tcg_temp_new_i64();
3707 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3710 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3713 o
->in1
= tcg_temp_new_i64();
3714 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3717 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3720 o
->in1
= tcg_temp_new_i64();
3721 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3724 /* ====================================================================== */
3725 /* The "INput 2" generators. These load the second operand to an insn. */
3727 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3729 o
->in2
= regs
[get_field(f
, r1
)];
3733 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3735 o
->in2
= tcg_temp_new_i64();
3736 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3739 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3741 o
->in2
= tcg_temp_new_i64();
3742 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3745 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3747 o
->in2
= load_reg(get_field(f
, r2
));
3750 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3752 o
->in2
= regs
[get_field(f
, r2
)];
3756 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3758 int r2
= get_field(f
, r2
);
3760 o
->in2
= load_reg(r2
);
3764 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3766 o
->in2
= tcg_temp_new_i64();
3767 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3770 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3772 o
->in2
= tcg_temp_new_i64();
3773 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3776 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3778 o
->in2
= tcg_temp_new_i64();
3779 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3782 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3784 o
->in2
= tcg_temp_new_i64();
3785 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3788 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3790 o
->in2
= load_reg(get_field(f
, r3
));
3793 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3795 o
->in2
= tcg_temp_new_i64();
3796 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3799 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3801 o
->in2
= tcg_temp_new_i64();
3802 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3805 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3807 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3810 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3812 o
->in2
= fregs
[get_field(f
, r2
)];
3816 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3818 /* ??? Specification exception: r1 must be < 14. */
3819 int r2
= get_field(f
, r2
);
3821 o
->in2
= fregs
[(r2
+ 2) & 15];
3822 o
->g_in1
= o
->g_in2
= true;
3825 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3827 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3830 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3832 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3833 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3836 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3838 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3841 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3843 help_l2_shift(s
, f
, o
, 31);
3846 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3848 help_l2_shift(s
, f
, o
, 63);
3851 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3857 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3860 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3863 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3866 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3869 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3872 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3875 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3878 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3881 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3884 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3887 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3890 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3893 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3899 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3902 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3905 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3908 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3911 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3913 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3916 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3918 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3921 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3923 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3926 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3931 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3933 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3934 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3937 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3939 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3940 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3943 /* ====================================================================== */
3945 /* Find opc within the table of insns. This is formulated as a switch
3946 statement so that (1) we get compile-time notice of cut-paste errors
3947 for duplicated opcodes, and (2) the compiler generates the binary
3948 search tree, rather than us having to post-process the table. */
3950 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3951 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3953 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3955 enum DisasInsnEnum
{
3956 #include "insn-data.def"
3960 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3965 .help_in1 = in1_##I1, \
3966 .help_in2 = in2_##I2, \
3967 .help_prep = prep_##P, \
3968 .help_wout = wout_##W, \
3969 .help_cout = cout_##CC, \
3970 .help_op = op_##OP, \
3974 /* Allow 0 to be used for NULL in the table below. */
3982 static const DisasInsn insn_info
[] = {
3983 #include "insn-data.def"
3987 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3988 case OPC: return &insn_info[insn_ ## NM];
3990 static const DisasInsn
*lookup_opc(uint16_t opc
)
3993 #include "insn-data.def"
4002 /* Extract a field from the insn. The INSN should be left-aligned in
4003 the uint64_t so that we can more easily utilize the big-bit-endian
4004 definitions we extract from the Principals of Operation. */
4006 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4014 /* Zero extract the field from the insn. */
4015 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4017 /* Sign-extend, or un-swap the field as necessary. */
4019 case 0: /* unsigned */
4021 case 1: /* signed */
4022 assert(f
->size
<= 32);
4023 m
= 1u << (f
->size
- 1);
4026 case 2: /* dl+dh split, signed 20 bit. */
4027 r
= ((int8_t)r
<< 12) | (r
>> 8);
4033 /* Validate that the "compressed" encoding we selected above is valid.
4034 I.e. we havn't make two different original fields overlap. */
4035 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4036 o
->presentC
|= 1 << f
->indexC
;
4037 o
->presentO
|= 1 << f
->indexO
;
4039 o
->c
[f
->indexC
] = r
;
4042 /* Lookup the insn at the current PC, extracting the operands into O and
4043 returning the info struct for the insn. Returns NULL for invalid insn. */
4045 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4048 uint64_t insn
, pc
= s
->pc
;
4050 const DisasInsn
*info
;
4052 insn
= ld_code2(env
, pc
);
4053 op
= (insn
>> 8) & 0xff;
4054 ilen
= get_ilen(op
);
4055 s
->next_pc
= s
->pc
+ ilen
;
4062 insn
= ld_code4(env
, pc
) << 32;
4065 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4071 /* We can't actually determine the insn format until we've looked up
4072 the full insn opcode. Which we can't do without locating the
4073 secondary opcode. Assume by default that OP2 is at bit 40; for
4074 those smaller insns that don't actually have a secondary opcode
4075 this will correctly result in OP2 = 0. */
4081 case 0xb2: /* S, RRF, RRE */
4082 case 0xb3: /* RRE, RRD, RRF */
4083 case 0xb9: /* RRE, RRF */
4084 case 0xe5: /* SSE, SIL */
4085 op2
= (insn
<< 8) >> 56;
4089 case 0xc0: /* RIL */
4090 case 0xc2: /* RIL */
4091 case 0xc4: /* RIL */
4092 case 0xc6: /* RIL */
4093 case 0xc8: /* SSF */
4094 case 0xcc: /* RIL */
4095 op2
= (insn
<< 12) >> 60;
4097 case 0xd0 ... 0xdf: /* SS */
4103 case 0xee ... 0xf3: /* SS */
4104 case 0xf8 ... 0xfd: /* SS */
4108 op2
= (insn
<< 40) >> 56;
4112 memset(f
, 0, sizeof(*f
));
4116 /* Lookup the instruction. */
4117 info
= lookup_opc(op
<< 8 | op2
);
4119 /* If we found it, extract the operands. */
4121 DisasFormat fmt
= info
->fmt
;
4124 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4125 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4131 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4133 const DisasInsn
*insn
;
4134 ExitStatus ret
= NO_EXIT
;
4138 insn
= extract_insn(env
, s
, &f
);
4140 /* If not found, try the old interpreter. This includes ILLOPC. */
4142 disas_s390_insn(env
, s
);
4143 switch (s
->is_jmp
) {
4151 ret
= EXIT_PC_UPDATED
;
4154 ret
= EXIT_NORETURN
;
4164 /* Set up the strutures we use to communicate with the helpers. */
4167 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4168 TCGV_UNUSED_I64(o
.out
);
4169 TCGV_UNUSED_I64(o
.out2
);
4170 TCGV_UNUSED_I64(o
.in1
);
4171 TCGV_UNUSED_I64(o
.in2
);
4172 TCGV_UNUSED_I64(o
.addr1
);
4174 /* Implement the instruction. */
4175 if (insn
->help_in1
) {
4176 insn
->help_in1(s
, &f
, &o
);
4178 if (insn
->help_in2
) {
4179 insn
->help_in2(s
, &f
, &o
);
4181 if (insn
->help_prep
) {
4182 insn
->help_prep(s
, &f
, &o
);
4184 if (insn
->help_op
) {
4185 ret
= insn
->help_op(s
, &o
);
4187 if (insn
->help_wout
) {
4188 insn
->help_wout(s
, &f
, &o
);
4190 if (insn
->help_cout
) {
4191 insn
->help_cout(s
, &o
);
4194 /* Free any temporaries created by the helpers. */
4195 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4196 tcg_temp_free_i64(o
.out
);
4198 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4199 tcg_temp_free_i64(o
.out2
);
4201 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4202 tcg_temp_free_i64(o
.in1
);
4204 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4205 tcg_temp_free_i64(o
.in2
);
4207 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4208 tcg_temp_free_i64(o
.addr1
);
4211 /* Advance to the next instruction. */
4216 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4217 TranslationBlock
*tb
,
4221 target_ulong pc_start
;
4222 uint64_t next_page_start
;
4223 uint16_t *gen_opc_end
;
4225 int num_insns
, max_insns
;
4233 if (!(tb
->flags
& FLAG_MASK_64
)) {
4234 pc_start
&= 0x7fffffff;
4239 dc
.cc_op
= CC_OP_DYNAMIC
;
4240 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4241 dc
.is_jmp
= DISAS_NEXT
;
4243 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4245 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4248 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4249 if (max_insns
== 0) {
4250 max_insns
= CF_COUNT_MASK
;
4257 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4261 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4264 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4265 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4266 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4267 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4269 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4273 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4274 tcg_gen_debug_insn_start(dc
.pc
);
4278 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4279 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4280 if (bp
->pc
== dc
.pc
) {
4281 status
= EXIT_PC_STALE
;
4287 if (status
== NO_EXIT
) {
4288 status
= translate_one(env
, &dc
);
4291 /* If we reach a page boundary, are single stepping,
4292 or exhaust instruction count, stop generation. */
4293 if (status
== NO_EXIT
4294 && (dc
.pc
>= next_page_start
4295 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4296 || num_insns
>= max_insns
4298 || env
->singlestep_enabled
)) {
4299 status
= EXIT_PC_STALE
;
4301 } while (status
== NO_EXIT
);
4303 if (tb
->cflags
& CF_LAST_IO
) {
4312 update_psw_addr(&dc
);
4314 case EXIT_PC_UPDATED
:
4315 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4316 gen_op_calc_cc(&dc
);
4318 /* Next TB starts off with CC_OP_DYNAMIC,
4319 so make sure the cc op type is in env */
4320 gen_op_set_cc_op(&dc
);
4323 gen_exception(EXCP_DEBUG
);
4325 /* Generate the return instruction */
4333 gen_icount_end(tb
, num_insns
);
4334 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4336 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4339 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4342 tb
->size
= dc
.pc
- pc_start
;
4343 tb
->icount
= num_insns
;
4346 #if defined(S390X_DEBUG_DISAS)
4347 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4348 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4349 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4355 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4357 gen_intermediate_code_internal(env
, tb
, 0);
4360 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4362 gen_intermediate_code_internal(env
, tb
, 1);
4365 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4368 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4369 cc_op
= gen_opc_cc_op
[pc_pos
];
4370 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {