4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
490 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
493 static inline void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
495 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
498 static inline void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
500 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
503 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
506 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
509 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
512 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
515 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
517 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
520 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
522 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
525 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp
= tcg_const_i32(v2
);
529 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
530 tcg_temp_free_i32(tmp
);
533 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
535 TCGv_i32 tmp
= tcg_const_i32(v2
);
536 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
537 tcg_temp_free_i32(tmp
);
540 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
542 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
545 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
547 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
550 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
552 TCGv_i64 tmp
= tcg_const_i64(v2
);
554 tcg_temp_free_i64(tmp
);
557 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
559 TCGv_i64 tmp
= tcg_const_i64(v2
);
561 tcg_temp_free_i64(tmp
);
564 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
566 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
569 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
571 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext
*s
)
577 tcg_gen_discard_i64(cc_src
);
578 tcg_gen_discard_i64(cc_dst
);
579 tcg_gen_discard_i64(cc_vr
);
580 s
->cc_op
= CC_OP_STATIC
;
583 static inline void gen_op_set_cc_op(DisasContext
*s
)
585 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
586 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
590 static inline void gen_update_cc_op(DisasContext
*s
)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext
*s
)
598 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
599 TCGv_i64 dummy
= tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
630 case CC_OP_LTUGTU_32
:
631 case CC_OP_LTUGTU_64
:
638 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
653 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
664 tcg_temp_free_i64(dummy
);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
674 *r1
= (insn
>> 4) & 0xf;
678 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
679 int *x2
, int *b2
, int *d2
)
683 *r1
= (insn
>> 20) & 0xf;
684 *x2
= (insn
>> 16) & 0xf;
685 *b2
= (insn
>> 12) & 0xf;
688 return get_address(s
, *x2
, *b2
, *d2
);
691 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
696 *r1
= (insn
>> 20) & 0xf;
698 *r3
= (insn
>> 16) & 0xf;
699 *b2
= (insn
>> 12) & 0xf;
703 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
708 *i2
= (insn
>> 16) & 0xff;
709 *b1
= (insn
>> 12) & 0xf;
712 return get_address(s
, 0, *b1
, *d1
);
715 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
719 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
720 && !s
->singlestep_enabled
721 && !(s
->tb
->cflags
& CF_LAST_IO
));
724 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
728 if (use_goto_tb(s
, pc
)) {
729 tcg_gen_goto_tb(tb_num
);
730 tcg_gen_movi_i64(psw_addr
, pc
);
731 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr
, pc
);
739 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss
[cc_op
]++;
746 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit
[cc_op
]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond
[16] = {
757 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
758 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
759 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
760 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
761 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
762 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
763 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
764 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond
[16] = {
771 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
773 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
775 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
785 enum cc_op old_cc_op
= s
->cc_op
;
787 if (mask
== 15 || mask
== 0) {
788 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
791 c
->g1
= c
->g2
= true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond
= ltgt_cond
[mask
];
803 if (cond
== TCG_COND_NEVER
) {
806 account_inline_branch(s
, old_cc_op
);
809 case CC_OP_LTUGTU_32
:
810 case CC_OP_LTUGTU_64
:
811 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
819 cond
= nz_cond
[mask
];
820 if (cond
== TCG_COND_NEVER
) {
823 account_inline_branch(s
, old_cc_op
);
838 account_inline_branch(s
, old_cc_op
);
853 account_inline_branch(s
, old_cc_op
);
857 switch (mask
& 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s
, old_cc_op
);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s
, old_cc_op
);
880 old_cc_op
= CC_OP_STATIC
;
881 cond
= TCG_COND_NEVER
;
885 /* Load up the arguments of the comparison. */
887 c
->g1
= c
->g2
= false;
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
893 c
->u
.s32
.b
= tcg_const_i32(0);
896 case CC_OP_LTUGTU_32
:
898 c
->u
.s32
.a
= tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
900 c
->u
.s32
.b
= tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
908 c
->u
.s64
.b
= tcg_const_i64(0);
912 case CC_OP_LTUGTU_64
:
915 c
->g1
= c
->g2
= true;
921 c
->u
.s64
.a
= tcg_temp_new_i64();
922 c
->u
.s64
.b
= tcg_const_i64(0);
923 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c
->u
.s32
.b
= tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c
->u
.s32
.b
= tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c
->u
.s32
.b
= tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c
->u
.s32
.b
= tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c
->u
.s32
.b
= tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c
->u
.s32
.a
= tcg_temp_new_i32();
966 c
->u
.s32
.b
= tcg_const_i32(0);
967 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
969 case 0x4: /* cc == 1 */
971 c
->u
.s32
.b
= tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c
->u
.s32
.b
= tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c
->u
.s32
.b
= tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c
->u
.s32
.b
= tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c
->u
.s32
.a
= tcg_const_i32(8);
990 c
->u
.s32
.b
= tcg_const_i32(0);
991 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
992 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1003 static void free_compare(DisasCompare
*c
)
1007 tcg_temp_free_i64(c
->u
.s64
.a
);
1009 tcg_temp_free_i32(c
->u
.s32
.a
);
1014 tcg_temp_free_i64(c
->u
.s64
.b
);
1016 tcg_temp_free_i32(c
->u
.s32
.b
);
1021 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp
, tmp2
, tmp3
;
1026 TCGv_i32 tmp32_1
, tmp32_2
;
1030 r1
= (insn
>> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1036 case 0x08: /* SPT D2(B2) [S] */
1038 check_privileged(s
);
1039 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1040 tmp
= get_address(s
, 0, b2
, d2
);
1041 potential_page_fault(s
);
1042 gen_helper_spt(cpu_env
, tmp
);
1043 tcg_temp_free_i64(tmp
);
1045 case 0x09: /* STPT D2(B2) [S] */
1046 /* Store CPU Timer */
1047 check_privileged(s
);
1048 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1049 tmp
= get_address(s
, 0, b2
, d2
);
1050 potential_page_fault(s
);
1051 gen_helper_stpt(cpu_env
, tmp
);
1052 tcg_temp_free_i64(tmp
);
1054 case 0x0a: /* SPKA D2(B2) [S] */
1055 /* Set PSW Key from Address */
1056 check_privileged(s
);
1057 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1058 tmp
= get_address(s
, 0, b2
, d2
);
1059 tmp2
= tcg_temp_new_i64();
1060 tcg_gen_andi_i64(tmp2
, psw_mask
, ~PSW_MASK_KEY
);
1061 tcg_gen_shli_i64(tmp
, tmp
, PSW_SHIFT_KEY
- 4);
1062 tcg_gen_or_i64(psw_mask
, tmp2
, tmp
);
1063 tcg_temp_free_i64(tmp2
);
1064 tcg_temp_free_i64(tmp
);
1066 case 0x0d: /* PTLB [S] */
1068 check_privileged(s
);
1069 gen_helper_ptlb(cpu_env
);
1071 case 0x10: /* SPX D2(B2) [S] */
1072 /* Set Prefix Register */
1073 check_privileged(s
);
1074 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1075 tmp
= get_address(s
, 0, b2
, d2
);
1076 potential_page_fault(s
);
1077 gen_helper_spx(cpu_env
, tmp
);
1078 tcg_temp_free_i64(tmp
);
1080 case 0x11: /* STPX D2(B2) [S] */
1082 check_privileged(s
);
1083 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1084 tmp
= get_address(s
, 0, b2
, d2
);
1085 tmp2
= tcg_temp_new_i64();
1086 tcg_gen_ld_i64(tmp2
, cpu_env
, offsetof(CPUS390XState
, psa
));
1087 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1088 tcg_temp_free_i64(tmp
);
1089 tcg_temp_free_i64(tmp2
);
1091 case 0x12: /* STAP D2(B2) [S] */
1092 /* Store CPU Address */
1093 check_privileged(s
);
1094 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1095 tmp
= get_address(s
, 0, b2
, d2
);
1096 tmp2
= tcg_temp_new_i64();
1097 tmp32_1
= tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp32_1
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
1099 tcg_gen_extu_i32_i64(tmp2
, tmp32_1
);
1100 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1101 tcg_temp_free_i64(tmp
);
1102 tcg_temp_free_i64(tmp2
);
1103 tcg_temp_free_i32(tmp32_1
);
1105 case 0x21: /* IPTE R1,R2 [RRE] */
1106 /* Invalidate PTE */
1107 check_privileged(s
);
1108 r1
= (insn
>> 4) & 0xf;
1111 tmp2
= load_reg(r2
);
1112 gen_helper_ipte(cpu_env
, tmp
, tmp2
);
1113 tcg_temp_free_i64(tmp
);
1114 tcg_temp_free_i64(tmp2
);
1116 case 0x29: /* ISKE R1,R2 [RRE] */
1117 /* Insert Storage Key Extended */
1118 check_privileged(s
);
1119 r1
= (insn
>> 4) & 0xf;
1122 tmp2
= tcg_temp_new_i64();
1123 gen_helper_iske(tmp2
, cpu_env
, tmp
);
1124 store_reg(r1
, tmp2
);
1125 tcg_temp_free_i64(tmp
);
1126 tcg_temp_free_i64(tmp2
);
1128 case 0x2a: /* RRBE R1,R2 [RRE] */
1129 /* Set Storage Key Extended */
1130 check_privileged(s
);
1131 r1
= (insn
>> 4) & 0xf;
1133 tmp32_1
= load_reg32(r1
);
1135 gen_helper_rrbe(cc_op
, cpu_env
, tmp32_1
, tmp
);
1137 tcg_temp_free_i32(tmp32_1
);
1138 tcg_temp_free_i64(tmp
);
1140 case 0x2b: /* SSKE R1,R2 [RRE] */
1141 /* Set Storage Key Extended */
1142 check_privileged(s
);
1143 r1
= (insn
>> 4) & 0xf;
1145 tmp32_1
= load_reg32(r1
);
1147 gen_helper_sske(cpu_env
, tmp32_1
, tmp
);
1148 tcg_temp_free_i32(tmp32_1
);
1149 tcg_temp_free_i64(tmp
);
1151 case 0x34: /* STCH ? */
1152 /* Store Subchannel */
1153 check_privileged(s
);
1154 gen_op_movi_cc(s
, 3);
1156 case 0x46: /* STURA R1,R2 [RRE] */
1157 /* Store Using Real Address */
1158 check_privileged(s
);
1159 r1
= (insn
>> 4) & 0xf;
1161 tmp32_1
= load_reg32(r1
);
1163 potential_page_fault(s
);
1164 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1165 tcg_temp_free_i32(tmp32_1
);
1166 tcg_temp_free_i64(tmp
);
1168 case 0x50: /* CSP R1,R2 [RRE] */
1169 /* Compare And Swap And Purge */
1170 check_privileged(s
);
1171 r1
= (insn
>> 4) & 0xf;
1173 tmp32_1
= tcg_const_i32(r1
);
1174 tmp32_2
= tcg_const_i32(r2
);
1175 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1177 tcg_temp_free_i32(tmp32_1
);
1178 tcg_temp_free_i32(tmp32_2
);
1180 case 0x5f: /* CHSC ? */
1181 /* Channel Subsystem Call */
1182 check_privileged(s
);
1183 gen_op_movi_cc(s
, 3);
1185 case 0x78: /* STCKE D2(B2) [S] */
1186 /* Store Clock Extended */
1187 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1188 tmp
= get_address(s
, 0, b2
, d2
);
1189 potential_page_fault(s
);
1190 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1192 tcg_temp_free_i64(tmp
);
1194 case 0x79: /* SACF D2(B2) [S] */
1195 /* Set Address Space Control Fast */
1196 check_privileged(s
);
1197 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1198 tmp
= get_address(s
, 0, b2
, d2
);
1199 potential_page_fault(s
);
1200 gen_helper_sacf(cpu_env
, tmp
);
1201 tcg_temp_free_i64(tmp
);
1202 /* addressing mode has changed, so end the block */
1205 s
->is_jmp
= DISAS_JUMP
;
1207 case 0x7d: /* STSI D2,(B2) [S] */
1208 check_privileged(s
);
1209 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1210 tmp
= get_address(s
, 0, b2
, d2
);
1211 tmp32_1
= load_reg32(0);
1212 tmp32_2
= load_reg32(1);
1213 potential_page_fault(s
);
1214 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1216 tcg_temp_free_i64(tmp
);
1217 tcg_temp_free_i32(tmp32_1
);
1218 tcg_temp_free_i32(tmp32_2
);
1220 case 0xb1: /* STFL D2(B2) [S] */
1221 /* Store Facility List (CPU features) at 200 */
1222 check_privileged(s
);
1223 tmp2
= tcg_const_i64(0xc0000000);
1224 tmp
= tcg_const_i64(200);
1225 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1226 tcg_temp_free_i64(tmp2
);
1227 tcg_temp_free_i64(tmp
);
1229 case 0xb2: /* LPSWE D2(B2) [S] */
1230 /* Load PSW Extended */
1231 check_privileged(s
);
1232 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1233 tmp
= get_address(s
, 0, b2
, d2
);
1234 tmp2
= tcg_temp_new_i64();
1235 tmp3
= tcg_temp_new_i64();
1236 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1237 tcg_gen_addi_i64(tmp
, tmp
, 8);
1238 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1239 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1240 /* we need to keep cc_op intact */
1241 s
->is_jmp
= DISAS_JUMP
;
1242 tcg_temp_free_i64(tmp
);
1243 tcg_temp_free_i64(tmp2
);
1244 tcg_temp_free_i64(tmp3
);
1246 case 0x20: /* SERVC R1,R2 [RRE] */
1247 /* SCLP Service call (PV hypercall) */
1248 check_privileged(s
);
1249 potential_page_fault(s
);
1250 tmp32_1
= load_reg32(r2
);
1252 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1254 tcg_temp_free_i32(tmp32_1
);
1255 tcg_temp_free_i64(tmp
);
1259 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1260 gen_illegal_opcode(s
);
1261 #ifndef CONFIG_USER_ONLY
1267 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1273 opc
= cpu_ldub_code(env
, s
->pc
);
1274 LOG_DISAS("opc 0x%x\n", opc
);
1278 insn
= ld_code4(env
, s
->pc
);
1279 op
= (insn
>> 16) & 0xff;
1280 disas_b2(env
, s
, op
, insn
);
1283 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1284 gen_illegal_opcode(s
);
1289 /* ====================================================================== */
1290 /* Define the insn format enumeration. */
1291 #define F0(N) FMT_##N,
1292 #define F1(N, X1) F0(N)
1293 #define F2(N, X1, X2) F0(N)
1294 #define F3(N, X1, X2, X3) F0(N)
1295 #define F4(N, X1, X2, X3, X4) F0(N)
1296 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1299 #include "insn-format.def"
1309 /* Define a structure to hold the decoded fields. We'll store each inside
1310 an array indexed by an enum. In order to conserve memory, we'll arrange
1311 for fields that do not exist at the same time to overlap, thus the "C"
1312 for compact. For checking purposes there is an "O" for original index
1313 as well that will be applied to availability bitmaps. */
1315 enum DisasFieldIndexO
{
1338 enum DisasFieldIndexC
{
1369 struct DisasFields
{
1372 unsigned presentC
:16;
1373 unsigned int presentO
;
1377 /* This is the way fields are to be accessed out of DisasFields. */
1378 #define have_field(S, F) have_field1((S), FLD_O_##F)
1379 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1381 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1383 return (f
->presentO
>> c
) & 1;
1386 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1387 enum DisasFieldIndexC c
)
1389 assert(have_field1(f
, o
));
1393 /* Describe the layout of each field in each format. */
1394 typedef struct DisasField
{
1396 unsigned int size
:8;
1397 unsigned int type
:2;
1398 unsigned int indexC
:6;
1399 enum DisasFieldIndexO indexO
:8;
1402 typedef struct DisasFormatInfo
{
1403 DisasField op
[NUM_C_FIELD
];
1406 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1407 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1408 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1409 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1410 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1411 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1412 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1413 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1414 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1415 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1416 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1417 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1418 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1419 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1421 #define F0(N) { { } },
1422 #define F1(N, X1) { { X1 } },
1423 #define F2(N, X1, X2) { { X1, X2 } },
1424 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1425 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1426 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1428 static const DisasFormatInfo format_info
[] = {
1429 #include "insn-format.def"
1447 /* Generally, we'll extract operands into this structures, operate upon
1448 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1449 of routines below for more details. */
1451 bool g_out
, g_out2
, g_in1
, g_in2
;
1452 TCGv_i64 out
, out2
, in1
, in2
;
1456 /* Return values from translate_one, indicating the state of the TB. */
1458 /* Continue the TB. */
1460 /* We have emitted one or more goto_tb. No fixup required. */
1462 /* We are not using a goto_tb (for whatever reason), but have updated
1463 the PC (for whatever reason), so there's no need to do it again on
1466 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1467 updated the PC for the next instruction to be executed. */
1469 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1470 No following code will be executed. */
1474 typedef enum DisasFacility
{
1475 FAC_Z
, /* zarch (default) */
1476 FAC_CASS
, /* compare and swap and store */
1477 FAC_CASS2
, /* compare and swap and store 2*/
1478 FAC_DFP
, /* decimal floating point */
1479 FAC_DFPR
, /* decimal floating point rounding */
1480 FAC_DO
, /* distinct operands */
1481 FAC_EE
, /* execute extensions */
1482 FAC_EI
, /* extended immediate */
1483 FAC_FPE
, /* floating point extension */
1484 FAC_FPSSH
, /* floating point support sign handling */
1485 FAC_FPRGR
, /* FPR-GR transfer */
1486 FAC_GIE
, /* general instructions extension */
1487 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1488 FAC_HW
, /* high-word */
1489 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1490 FAC_LOC
, /* load/store on condition */
1491 FAC_LD
, /* long displacement */
1492 FAC_PC
, /* population count */
1493 FAC_SCF
, /* store clock fast */
1494 FAC_SFLE
, /* store facility list extended */
1500 DisasFacility fac
:6;
1504 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1505 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1506 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1507 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1508 void (*help_cout
)(DisasContext
*, DisasOps
*);
1509 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1514 /* ====================================================================== */
1515 /* Miscelaneous helpers, used by several operations. */
1517 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1518 DisasOps
*o
, int mask
)
1520 int b2
= get_field(f
, b2
);
1521 int d2
= get_field(f
, d2
);
1524 o
->in2
= tcg_const_i64(d2
& mask
);
1526 o
->in2
= get_address(s
, 0, b2
, d2
);
1527 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1531 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1533 if (dest
== s
->next_pc
) {
1536 if (use_goto_tb(s
, dest
)) {
1537 gen_update_cc_op(s
);
1539 tcg_gen_movi_i64(psw_addr
, dest
);
1540 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1541 return EXIT_GOTO_TB
;
1543 tcg_gen_movi_i64(psw_addr
, dest
);
1544 return EXIT_PC_UPDATED
;
1548 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1549 bool is_imm
, int imm
, TCGv_i64 cdest
)
1552 uint64_t dest
= s
->pc
+ 2 * imm
;
1555 /* Take care of the special cases first. */
1556 if (c
->cond
== TCG_COND_NEVER
) {
1561 if (dest
== s
->next_pc
) {
1562 /* Branch to next. */
1566 if (c
->cond
== TCG_COND_ALWAYS
) {
1567 ret
= help_goto_direct(s
, dest
);
1571 if (TCGV_IS_UNUSED_I64(cdest
)) {
1572 /* E.g. bcr %r0 -> no branch. */
1576 if (c
->cond
== TCG_COND_ALWAYS
) {
1577 tcg_gen_mov_i64(psw_addr
, cdest
);
1578 ret
= EXIT_PC_UPDATED
;
1583 if (use_goto_tb(s
, s
->next_pc
)) {
1584 if (is_imm
&& use_goto_tb(s
, dest
)) {
1585 /* Both exits can use goto_tb. */
1586 gen_update_cc_op(s
);
1588 lab
= gen_new_label();
1590 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1592 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1595 /* Branch not taken. */
1597 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1598 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1603 tcg_gen_movi_i64(psw_addr
, dest
);
1604 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1608 /* Fallthru can use goto_tb, but taken branch cannot. */
1609 /* Store taken branch destination before the brcond. This
1610 avoids having to allocate a new local temp to hold it.
1611 We'll overwrite this in the not taken case anyway. */
1613 tcg_gen_mov_i64(psw_addr
, cdest
);
1616 lab
= gen_new_label();
1618 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1620 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1623 /* Branch not taken. */
1624 gen_update_cc_op(s
);
1626 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1627 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1631 tcg_gen_movi_i64(psw_addr
, dest
);
1633 ret
= EXIT_PC_UPDATED
;
1636 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1637 Most commonly we're single-stepping or some other condition that
1638 disables all use of goto_tb. Just update the PC and exit. */
1640 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1642 cdest
= tcg_const_i64(dest
);
1646 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1649 TCGv_i32 t0
= tcg_temp_new_i32();
1650 TCGv_i64 t1
= tcg_temp_new_i64();
1651 TCGv_i64 z
= tcg_const_i64(0);
1652 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1653 tcg_gen_extu_i32_i64(t1
, t0
);
1654 tcg_temp_free_i32(t0
);
1655 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1656 tcg_temp_free_i64(t1
);
1657 tcg_temp_free_i64(z
);
1661 tcg_temp_free_i64(cdest
);
1663 tcg_temp_free_i64(next
);
1665 ret
= EXIT_PC_UPDATED
;
1673 /* ====================================================================== */
1674 /* The operations. These perform the bulk of the work for any insn,
1675 usually after the operands have been loaded and output initialized. */
1677 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1679 gen_helper_abs_i64(o
->out
, o
->in2
);
1683 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1685 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1689 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1691 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1695 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1697 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1698 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1702 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1704 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1708 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1712 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1714 /* XXX possible optimization point */
1716 cc
= tcg_temp_new_i64();
1717 tcg_gen_extu_i32_i64(cc
, cc_op
);
1718 tcg_gen_shri_i64(cc
, cc
, 1);
1720 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1721 tcg_temp_free_i64(cc
);
1725 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1727 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1731 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1733 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1737 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1739 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1740 return_low128(o
->out2
);
1744 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1746 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1750 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1752 int shift
= s
->insn
->data
& 0xff;
1753 int size
= s
->insn
->data
>> 8;
1754 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1757 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1758 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1759 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1761 /* Produce the CC from only the bits manipulated. */
1762 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1763 set_cc_nz_u64(s
, cc_dst
);
1767 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1769 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1770 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1771 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1772 return EXIT_PC_UPDATED
;
1778 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1780 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1781 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1784 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1786 int m1
= get_field(s
->fields
, m1
);
1787 bool is_imm
= have_field(s
->fields
, i2
);
1788 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1791 disas_jcc(s
, &c
, m1
);
1792 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1795 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1797 int r1
= get_field(s
->fields
, r1
);
1798 bool is_imm
= have_field(s
->fields
, i2
);
1799 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1803 c
.cond
= TCG_COND_NE
;
1808 t
= tcg_temp_new_i64();
1809 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1810 store_reg32_i64(r1
, t
);
1811 c
.u
.s32
.a
= tcg_temp_new_i32();
1812 c
.u
.s32
.b
= tcg_const_i32(0);
1813 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1814 tcg_temp_free_i64(t
);
1816 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1819 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1821 int r1
= get_field(s
->fields
, r1
);
1822 bool is_imm
= have_field(s
->fields
, i2
);
1823 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1826 c
.cond
= TCG_COND_NE
;
1831 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1832 c
.u
.s64
.a
= regs
[r1
];
1833 c
.u
.s64
.b
= tcg_const_i64(0);
1835 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1838 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1840 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1845 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1847 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1852 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1854 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1859 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1861 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1862 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1863 tcg_temp_free_i32(m3
);
1864 gen_set_cc_nz_f32(s
, o
->in2
);
1868 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1870 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1871 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1872 tcg_temp_free_i32(m3
);
1873 gen_set_cc_nz_f64(s
, o
->in2
);
1877 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1879 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1880 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1881 tcg_temp_free_i32(m3
);
1882 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1886 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1888 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1889 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1890 tcg_temp_free_i32(m3
);
1891 gen_set_cc_nz_f32(s
, o
->in2
);
1895 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1897 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1898 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1899 tcg_temp_free_i32(m3
);
1900 gen_set_cc_nz_f64(s
, o
->in2
);
1904 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1906 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1907 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1908 tcg_temp_free_i32(m3
);
1909 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1913 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1915 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1916 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1917 tcg_temp_free_i32(m3
);
1921 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1923 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1924 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1925 tcg_temp_free_i32(m3
);
1929 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1931 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1932 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1933 tcg_temp_free_i32(m3
);
1934 return_low128(o
->out2
);
1938 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1940 int r2
= get_field(s
->fields
, r2
);
1941 TCGv_i64 len
= tcg_temp_new_i64();
1943 potential_page_fault(s
);
1944 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1946 return_low128(o
->out
);
1948 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1949 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1950 tcg_temp_free_i64(len
);
1955 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1957 int l
= get_field(s
->fields
, l1
);
1962 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1963 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1966 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1967 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1970 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1971 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1974 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1975 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1978 potential_page_fault(s
);
1979 vl
= tcg_const_i32(l
);
1980 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1981 tcg_temp_free_i32(vl
);
1985 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1989 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1991 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1992 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1993 potential_page_fault(s
);
1994 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1995 tcg_temp_free_i32(r1
);
1996 tcg_temp_free_i32(r3
);
2001 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
2003 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2004 TCGv_i32 t1
= tcg_temp_new_i32();
2005 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2006 potential_page_fault(s
);
2007 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
2009 tcg_temp_free_i32(t1
);
2010 tcg_temp_free_i32(m3
);
2014 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
2016 potential_page_fault(s
);
2017 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2019 return_low128(o
->in2
);
2023 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
2025 int r3
= get_field(s
->fields
, r3
);
2026 potential_page_fault(s
);
2027 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2032 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
2034 int r3
= get_field(s
->fields
, r3
);
2035 potential_page_fault(s
);
2036 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
2041 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
2043 int r3
= get_field(s
->fields
, r3
);
2044 TCGv_i64 in3
= tcg_temp_new_i64();
2045 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
2046 potential_page_fault(s
);
2047 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
2048 tcg_temp_free_i64(in3
);
2053 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2055 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2056 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2057 potential_page_fault(s
);
2058 /* XXX rewrite in tcg */
2059 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2064 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2066 TCGv_i64 t1
= tcg_temp_new_i64();
2067 TCGv_i32 t2
= tcg_temp_new_i32();
2068 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
2069 gen_helper_cvd(t1
, t2
);
2070 tcg_temp_free_i32(t2
);
2071 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2072 tcg_temp_free_i64(t1
);
2076 #ifndef CONFIG_USER_ONLY
2077 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2081 check_privileged(s
);
2082 potential_page_fault(s
);
2084 /* We pretend the format is RX_a so that D2 is the field we want. */
2085 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2086 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2087 tcg_temp_free_i32(tmp
);
2092 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2094 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2095 return_low128(o
->out
);
2099 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2101 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2102 return_low128(o
->out
);
2106 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2108 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2109 return_low128(o
->out
);
2113 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2115 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2116 return_low128(o
->out
);
2120 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2122 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2126 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2128 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2132 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2134 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2135 return_low128(o
->out2
);
2139 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2141 int r2
= get_field(s
->fields
, r2
);
2142 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2146 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2148 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2152 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2154 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2155 tb->flags, (ab)use the tb->cs_base field as the address of
2156 the template in memory, and grab 8 bits of tb->flags/cflags for
2157 the contents of the register. We would then recognize all this
2158 in gen_intermediate_code_internal, generating code for exactly
2159 one instruction. This new TB then gets executed normally.
2161 On the other hand, this seems to be mostly used for modifying
2162 MVC inside of memcpy, which needs a helper call anyway. So
2163 perhaps this doesn't bear thinking about any further. */
2170 tmp
= tcg_const_i64(s
->next_pc
);
2171 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2172 tcg_temp_free_i64(tmp
);
2178 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2180 /* We'll use the original input for cc computation, since we get to
2181 compare that against 0, which ought to be better than comparing
2182 the real output against 64. It also lets cc_dst be a convenient
2183 temporary during our computation. */
2184 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2186 /* R1 = IN ? CLZ(IN) : 64. */
2187 gen_helper_clz(o
->out
, o
->in2
);
2189 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2190 value by 64, which is undefined. But since the shift is 64 iff the
2191 input is zero, we still get the correct result after and'ing. */
2192 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2193 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2194 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2198 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2200 int m3
= get_field(s
->fields
, m3
);
2201 int pos
, len
, base
= s
->insn
->data
;
2202 TCGv_i64 tmp
= tcg_temp_new_i64();
2207 /* Effectively a 32-bit load. */
2208 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2215 /* Effectively a 16-bit load. */
2216 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2224 /* Effectively an 8-bit load. */
2225 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2230 pos
= base
+ ctz32(m3
) * 8;
2231 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2232 ccm
= ((1ull << len
) - 1) << pos
;
2236 /* This is going to be a sequence of loads and inserts. */
2237 pos
= base
+ 32 - 8;
2241 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2242 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2243 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2246 m3
= (m3
<< 1) & 0xf;
2252 tcg_gen_movi_i64(tmp
, ccm
);
2253 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2254 tcg_temp_free_i64(tmp
);
2258 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2260 int shift
= s
->insn
->data
& 0xff;
2261 int size
= s
->insn
->data
>> 8;
2262 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2266 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2271 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2273 t1
= tcg_temp_new_i64();
2274 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2275 tcg_gen_shri_i64(t1
, t1
, 36);
2276 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2278 tcg_gen_extu_i32_i64(t1
, cc_op
);
2279 tcg_gen_shli_i64(t1
, t1
, 28);
2280 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2281 tcg_temp_free_i64(t1
);
2285 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2287 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2291 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2293 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2297 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2299 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2303 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2305 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2309 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2311 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2312 return_low128(o
->out2
);
2316 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2318 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2319 return_low128(o
->out2
);
2323 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2325 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2329 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2331 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2335 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2337 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2341 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2343 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2347 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2349 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2353 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2355 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2359 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2361 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2365 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2367 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2371 #ifndef CONFIG_USER_ONLY
2372 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2374 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2375 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2376 check_privileged(s
);
2377 potential_page_fault(s
);
2378 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2379 tcg_temp_free_i32(r1
);
2380 tcg_temp_free_i32(r3
);
2384 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2386 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2387 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2388 check_privileged(s
);
2389 potential_page_fault(s
);
2390 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2391 tcg_temp_free_i32(r1
);
2392 tcg_temp_free_i32(r3
);
2395 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2397 check_privileged(s
);
2398 potential_page_fault(s
);
2399 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2404 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2408 check_privileged(s
);
2410 t1
= tcg_temp_new_i64();
2411 t2
= tcg_temp_new_i64();
2412 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2413 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2414 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2415 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2416 tcg_gen_shli_i64(t1
, t1
, 32);
2417 gen_helper_load_psw(cpu_env
, t1
, t2
);
2418 tcg_temp_free_i64(t1
);
2419 tcg_temp_free_i64(t2
);
2420 return EXIT_NORETURN
;
2424 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2426 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2427 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2428 potential_page_fault(s
);
2429 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2430 tcg_temp_free_i32(r1
);
2431 tcg_temp_free_i32(r3
);
2435 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2437 int r1
= get_field(s
->fields
, r1
);
2438 int r3
= get_field(s
->fields
, r3
);
2439 TCGv_i64 t
= tcg_temp_new_i64();
2440 TCGv_i64 t4
= tcg_const_i64(4);
2443 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2444 store_reg32_i64(r1
, t
);
2448 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2452 tcg_temp_free_i64(t
);
2453 tcg_temp_free_i64(t4
);
2457 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2459 int r1
= get_field(s
->fields
, r1
);
2460 int r3
= get_field(s
->fields
, r3
);
2461 TCGv_i64 t
= tcg_temp_new_i64();
2462 TCGv_i64 t4
= tcg_const_i64(4);
2465 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2466 store_reg32h_i64(r1
, t
);
2470 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2474 tcg_temp_free_i64(t
);
2475 tcg_temp_free_i64(t4
);
2479 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2481 int r1
= get_field(s
->fields
, r1
);
2482 int r3
= get_field(s
->fields
, r3
);
2483 TCGv_i64 t8
= tcg_const_i64(8);
2486 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2490 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2494 tcg_temp_free_i64(t8
);
2498 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2501 o
->g_out
= o
->g_in2
;
2502 TCGV_UNUSED_I64(o
->in2
);
2507 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2511 o
->g_out
= o
->g_in1
;
2512 o
->g_out2
= o
->g_in2
;
2513 TCGV_UNUSED_I64(o
->in1
);
2514 TCGV_UNUSED_I64(o
->in2
);
2515 o
->g_in1
= o
->g_in2
= false;
2519 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2521 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2522 potential_page_fault(s
);
2523 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2524 tcg_temp_free_i32(l
);
2528 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2530 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2531 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2532 potential_page_fault(s
);
2533 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2534 tcg_temp_free_i32(r1
);
2535 tcg_temp_free_i32(r2
);
2540 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2542 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2543 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2544 potential_page_fault(s
);
2545 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2546 tcg_temp_free_i32(r1
);
2547 tcg_temp_free_i32(r3
);
2552 #ifndef CONFIG_USER_ONLY
2553 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2555 int r1
= get_field(s
->fields
, l1
);
2556 check_privileged(s
);
2557 potential_page_fault(s
);
2558 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2563 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2565 int r1
= get_field(s
->fields
, l1
);
2566 check_privileged(s
);
2567 potential_page_fault(s
);
2568 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2574 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2576 potential_page_fault(s
);
2577 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2582 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2584 potential_page_fault(s
);
2585 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2587 return_low128(o
->in2
);
2591 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2593 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2597 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2599 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2600 return_low128(o
->out2
);
2604 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2606 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2610 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2612 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2616 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2618 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2622 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2624 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2625 return_low128(o
->out2
);
2629 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2631 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2632 return_low128(o
->out2
);
2636 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2638 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2639 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2640 tcg_temp_free_i64(r3
);
2644 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2646 int r3
= get_field(s
->fields
, r3
);
2647 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2651 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2653 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2654 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2655 tcg_temp_free_i64(r3
);
2659 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2661 int r3
= get_field(s
->fields
, r3
);
2662 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2666 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2668 gen_helper_nabs_i64(o
->out
, o
->in2
);
2672 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2674 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2678 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2680 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2684 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2686 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2687 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2691 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2693 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2694 potential_page_fault(s
);
2695 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2696 tcg_temp_free_i32(l
);
2701 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2703 tcg_gen_neg_i64(o
->out
, o
->in2
);
2707 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2709 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2713 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2715 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2719 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2721 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2722 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2726 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2728 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2729 potential_page_fault(s
);
2730 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2731 tcg_temp_free_i32(l
);
2736 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2738 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2742 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2744 int shift
= s
->insn
->data
& 0xff;
2745 int size
= s
->insn
->data
>> 8;
2746 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2749 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2750 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2752 /* Produce the CC from only the bits manipulated. */
2753 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2754 set_cc_nz_u64(s
, cc_dst
);
2758 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2760 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2764 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2766 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2770 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2772 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2776 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2778 TCGv_i32 t1
= tcg_temp_new_i32();
2779 TCGv_i32 t2
= tcg_temp_new_i32();
2780 TCGv_i32 to
= tcg_temp_new_i32();
2781 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2782 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2783 tcg_gen_rotl_i32(to
, t1
, t2
);
2784 tcg_gen_extu_i32_i64(o
->out
, to
);
2785 tcg_temp_free_i32(t1
);
2786 tcg_temp_free_i32(t2
);
2787 tcg_temp_free_i32(to
);
2791 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2793 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2797 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2799 int r1
= get_field(s
->fields
, r1
);
2800 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2804 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2806 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2810 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2812 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2816 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2818 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2819 return_low128(o
->out2
);
2823 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2825 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2829 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2831 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2835 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2837 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2838 return_low128(o
->out2
);
2842 #ifndef CONFIG_USER_ONLY
2843 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2845 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2846 check_privileged(s
);
2847 potential_page_fault(s
);
2848 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2849 tcg_temp_free_i32(r1
);
2854 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2856 uint64_t sign
= 1ull << s
->insn
->data
;
2857 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2858 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2859 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2860 /* The arithmetic left shift is curious in that it does not affect
2861 the sign bit. Copy that over from the source unchanged. */
2862 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2863 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2864 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2868 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2870 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2874 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2876 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2880 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2882 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2886 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2888 gen_helper_sfpc(cpu_env
, o
->in2
);
2892 #ifndef CONFIG_USER_ONLY
2893 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2895 check_privileged(s
);
2896 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2900 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2902 gen_helper_stck(o
->out
, cpu_env
);
2903 /* ??? We don't implement clock states. */
2904 gen_op_movi_cc(s
, 0);
2908 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2910 check_privileged(s
);
2911 gen_helper_sckc(cpu_env
, o
->in2
);
2915 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2917 check_privileged(s
);
2918 gen_helper_stckc(o
->out
, cpu_env
);
2922 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2924 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2925 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2926 check_privileged(s
);
2927 potential_page_fault(s
);
2928 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2929 tcg_temp_free_i32(r1
);
2930 tcg_temp_free_i32(r3
);
2934 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2936 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2937 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2938 check_privileged(s
);
2939 potential_page_fault(s
);
2940 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2941 tcg_temp_free_i32(r1
);
2942 tcg_temp_free_i32(r3
);
2946 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2948 check_privileged(s
);
2949 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2953 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2955 uint64_t i2
= get_field(s
->fields
, i2
);
2958 check_privileged(s
);
2960 /* It is important to do what the instruction name says: STORE THEN.
2961 If we let the output hook perform the store then if we fault and
2962 restart, we'll have the wrong SYSTEM MASK in place. */
2963 t
= tcg_temp_new_i64();
2964 tcg_gen_shri_i64(t
, psw_mask
, 56);
2965 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2966 tcg_temp_free_i64(t
);
2968 if (s
->fields
->op
== 0xac) {
2969 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2970 (i2
<< 56) | 0x00ffffffffffffffull
);
2972 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2978 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2980 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2984 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2986 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2990 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2992 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2996 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2998 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3002 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3004 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3005 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3006 potential_page_fault(s
);
3007 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3008 tcg_temp_free_i32(r1
);
3009 tcg_temp_free_i32(r3
);
3013 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3015 int m3
= get_field(s
->fields
, m3
);
3016 int pos
, base
= s
->insn
->data
;
3017 TCGv_i64 tmp
= tcg_temp_new_i64();
3019 pos
= base
+ ctz32(m3
) * 8;
3022 /* Effectively a 32-bit store. */
3023 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3024 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3030 /* Effectively a 16-bit store. */
3031 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3032 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3039 /* Effectively an 8-bit store. */
3040 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3041 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3045 /* This is going to be a sequence of shifts and stores. */
3046 pos
= base
+ 32 - 8;
3049 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3050 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3051 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3053 m3
= (m3
<< 1) & 0xf;
3058 tcg_temp_free_i64(tmp
);
3062 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3064 int r1
= get_field(s
->fields
, r1
);
3065 int r3
= get_field(s
->fields
, r3
);
3066 int size
= s
->insn
->data
;
3067 TCGv_i64 tsize
= tcg_const_i64(size
);
3071 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3073 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3078 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3082 tcg_temp_free_i64(tsize
);
3086 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3088 int r1
= get_field(s
->fields
, r1
);
3089 int r3
= get_field(s
->fields
, r3
);
3090 TCGv_i64 t
= tcg_temp_new_i64();
3091 TCGv_i64 t4
= tcg_const_i64(4);
3092 TCGv_i64 t32
= tcg_const_i64(32);
3095 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3096 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3100 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3104 tcg_temp_free_i64(t
);
3105 tcg_temp_free_i64(t4
);
3106 tcg_temp_free_i64(t32
);
3110 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3112 potential_page_fault(s
);
3113 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3115 return_low128(o
->in2
);
3119 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3121 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3125 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3130 tcg_gen_not_i64(o
->in2
, o
->in2
);
3131 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3133 /* XXX possible optimization point */
3135 cc
= tcg_temp_new_i64();
3136 tcg_gen_extu_i32_i64(cc
, cc_op
);
3137 tcg_gen_shri_i64(cc
, cc
, 1);
3138 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3139 tcg_temp_free_i64(cc
);
3143 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3150 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3151 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3152 tcg_temp_free_i32(t
);
3154 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3155 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3156 tcg_temp_free_i32(t
);
3158 gen_exception(EXCP_SVC
);
3159 return EXIT_NORETURN
;
3162 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3164 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3169 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3171 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3176 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3178 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3183 #ifndef CONFIG_USER_ONLY
3184 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3186 potential_page_fault(s
);
3187 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3193 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3195 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3196 potential_page_fault(s
);
3197 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3198 tcg_temp_free_i32(l
);
3203 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3205 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3206 potential_page_fault(s
);
3207 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3208 tcg_temp_free_i32(l
);
3212 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3214 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3215 potential_page_fault(s
);
3216 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3217 tcg_temp_free_i32(l
);
3222 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3224 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3228 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3230 int shift
= s
->insn
->data
& 0xff;
3231 int size
= s
->insn
->data
>> 8;
3232 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3235 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3236 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3238 /* Produce the CC from only the bits manipulated. */
3239 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3240 set_cc_nz_u64(s
, cc_dst
);
3244 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3246 o
->out
= tcg_const_i64(0);
3250 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3252 o
->out
= tcg_const_i64(0);
3258 /* ====================================================================== */
3259 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3260 the original inputs), update the various cc data structures in order to
3261 be able to compute the new condition code. */
3263 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3265 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3268 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3270 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3273 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3275 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3278 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3280 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3283 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3285 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3288 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3290 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3293 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3295 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3298 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3300 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3303 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3305 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3308 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3310 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3313 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3315 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3318 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3320 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3323 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3325 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3328 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3330 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3333 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3335 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3338 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3340 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3343 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3345 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3348 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3350 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3353 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3355 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3358 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3360 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3361 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3364 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3366 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3369 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3371 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3374 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3376 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3379 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3381 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3384 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3386 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3389 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3391 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3394 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3396 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3399 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3401 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3404 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3406 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3409 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3411 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3414 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3416 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3419 /* ====================================================================== */
3420 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3421 with the TCG register to which we will write. Used in combination with
3422 the "wout" generators, in some cases we need a new temporary, and in
3423 some cases we can write to a TCG global. */
3425 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3427 o
->out
= tcg_temp_new_i64();
3430 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3432 o
->out
= tcg_temp_new_i64();
3433 o
->out2
= tcg_temp_new_i64();
3436 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3438 o
->out
= regs
[get_field(f
, r1
)];
3442 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3444 /* ??? Specification exception: r1 must be even. */
3445 int r1
= get_field(f
, r1
);
3447 o
->out2
= regs
[(r1
+ 1) & 15];
3448 o
->g_out
= o
->g_out2
= true;
3451 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3453 o
->out
= fregs
[get_field(f
, r1
)];
3457 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3459 /* ??? Specification exception: r1 must be < 14. */
3460 int r1
= get_field(f
, r1
);
3462 o
->out2
= fregs
[(r1
+ 2) & 15];
3463 o
->g_out
= o
->g_out2
= true;
3466 /* ====================================================================== */
3467 /* The "Write OUTput" generators. These generally perform some non-trivial
3468 copy of data to TCG globals, or to main memory. The trivial cases are
3469 generally handled by having a "prep" generator install the TCG global
3470 as the destination of the operation. */
3472 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3474 store_reg(get_field(f
, r1
), o
->out
);
3477 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3479 int r1
= get_field(f
, r1
);
3480 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3483 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3485 int r1
= get_field(f
, r1
);
3486 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3489 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3491 store_reg32_i64(get_field(f
, r1
), o
->out
);
3494 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3496 /* ??? Specification exception: r1 must be even. */
3497 int r1
= get_field(f
, r1
);
3498 store_reg32_i64(r1
, o
->out
);
3499 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3502 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3504 /* ??? Specification exception: r1 must be even. */
3505 int r1
= get_field(f
, r1
);
3506 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3507 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3508 store_reg32_i64(r1
, o
->out
);
3511 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3513 store_freg32_i64(get_field(f
, r1
), o
->out
);
3516 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3518 store_freg(get_field(f
, r1
), o
->out
);
3521 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 /* ??? Specification exception: r1 must be < 14. */
3524 int f1
= get_field(s
->fields
, r1
);
3525 store_freg(f1
, o
->out
);
3526 store_freg((f1
+ 2) & 15, o
->out2
);
3529 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3531 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3532 store_reg32_i64(get_field(f
, r1
), o
->out
);
3536 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3538 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3539 store_freg32_i64(get_field(f
, r1
), o
->out
);
3543 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3545 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3548 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3550 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3553 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3555 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3558 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3560 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3563 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3565 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3568 /* ====================================================================== */
3569 /* The "INput 1" generators. These load the first operand to an insn. */
3571 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3573 o
->in1
= load_reg(get_field(f
, r1
));
3576 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3578 o
->in1
= regs
[get_field(f
, r1
)];
3582 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3584 o
->in1
= tcg_temp_new_i64();
3585 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3588 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3590 o
->in1
= tcg_temp_new_i64();
3591 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3594 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3596 o
->in1
= tcg_temp_new_i64();
3597 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3600 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3602 /* ??? Specification exception: r1 must be even. */
3603 int r1
= get_field(f
, r1
);
3604 o
->in1
= load_reg((r1
+ 1) & 15);
3607 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3609 /* ??? Specification exception: r1 must be even. */
3610 int r1
= get_field(f
, r1
);
3611 o
->in1
= tcg_temp_new_i64();
3612 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3615 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3617 /* ??? Specification exception: r1 must be even. */
3618 int r1
= get_field(f
, r1
);
3619 o
->in1
= tcg_temp_new_i64();
3620 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3623 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3625 /* ??? Specification exception: r1 must be even. */
3626 int r1
= get_field(f
, r1
);
3627 o
->in1
= tcg_temp_new_i64();
3628 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3631 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3633 o
->in1
= load_reg(get_field(f
, r2
));
3636 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3638 o
->in1
= load_reg(get_field(f
, r3
));
3641 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3643 o
->in1
= regs
[get_field(f
, r3
)];
3647 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3649 o
->in1
= tcg_temp_new_i64();
3650 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3653 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3655 o
->in1
= tcg_temp_new_i64();
3656 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3659 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3661 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3664 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3666 o
->in1
= fregs
[get_field(f
, r1
)];
3670 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3672 /* ??? Specification exception: r1 must be < 14. */
3673 int r1
= get_field(f
, r1
);
3675 o
->out2
= fregs
[(r1
+ 2) & 15];
3676 o
->g_out
= o
->g_out2
= true;
3679 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3681 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3684 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3686 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3687 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3690 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3693 o
->in1
= tcg_temp_new_i64();
3694 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3697 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3700 o
->in1
= tcg_temp_new_i64();
3701 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3704 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3707 o
->in1
= tcg_temp_new_i64();
3708 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3711 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3714 o
->in1
= tcg_temp_new_i64();
3715 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3718 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3721 o
->in1
= tcg_temp_new_i64();
3722 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3725 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3728 o
->in1
= tcg_temp_new_i64();
3729 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3732 /* ====================================================================== */
3733 /* The "INput 2" generators. These load the second operand to an insn. */
3735 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3737 o
->in2
= regs
[get_field(f
, r1
)];
3741 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3743 o
->in2
= tcg_temp_new_i64();
3744 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3747 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3749 o
->in2
= tcg_temp_new_i64();
3750 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3753 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3755 o
->in2
= load_reg(get_field(f
, r2
));
3758 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3760 o
->in2
= regs
[get_field(f
, r2
)];
3764 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3766 int r2
= get_field(f
, r2
);
3768 o
->in2
= load_reg(r2
);
3772 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3774 o
->in2
= tcg_temp_new_i64();
3775 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3778 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3780 o
->in2
= tcg_temp_new_i64();
3781 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3784 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3786 o
->in2
= tcg_temp_new_i64();
3787 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3790 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3792 o
->in2
= tcg_temp_new_i64();
3793 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3796 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3798 o
->in2
= load_reg(get_field(f
, r3
));
3801 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3803 o
->in2
= tcg_temp_new_i64();
3804 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3807 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3809 o
->in2
= tcg_temp_new_i64();
3810 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3813 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3815 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3818 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 o
->in2
= fregs
[get_field(f
, r2
)];
3824 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3826 /* ??? Specification exception: r1 must be < 14. */
3827 int r2
= get_field(f
, r2
);
3829 o
->in2
= fregs
[(r2
+ 2) & 15];
3830 o
->g_in1
= o
->g_in2
= true;
3833 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3835 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3838 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3840 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3841 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3844 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3846 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3849 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3851 help_l2_shift(s
, f
, o
, 31);
3854 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3856 help_l2_shift(s
, f
, o
, 63);
3859 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3862 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3865 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3871 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3874 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3877 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3880 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3883 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3886 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3889 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3892 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3895 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3898 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3901 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3904 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3907 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3910 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3913 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3916 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3919 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3921 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3924 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3926 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3929 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3931 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3934 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3936 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3939 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3941 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3942 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3945 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3947 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3948 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3951 /* ====================================================================== */
3953 /* Find opc within the table of insns. This is formulated as a switch
3954 statement so that (1) we get compile-time notice of cut-paste errors
3955 for duplicated opcodes, and (2) the compiler generates the binary
3956 search tree, rather than us having to post-process the table. */
3958 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3959 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3961 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3963 enum DisasInsnEnum
{
3964 #include "insn-data.def"
3968 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3973 .help_in1 = in1_##I1, \
3974 .help_in2 = in2_##I2, \
3975 .help_prep = prep_##P, \
3976 .help_wout = wout_##W, \
3977 .help_cout = cout_##CC, \
3978 .help_op = op_##OP, \
3982 /* Allow 0 to be used for NULL in the table below. */
3990 static const DisasInsn insn_info
[] = {
3991 #include "insn-data.def"
3995 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3996 case OPC: return &insn_info[insn_ ## NM];
3998 static const DisasInsn
*lookup_opc(uint16_t opc
)
4001 #include "insn-data.def"
4010 /* Extract a field from the insn. The INSN should be left-aligned in
4011 the uint64_t so that we can more easily utilize the big-bit-endian
4012 definitions we extract from the Principals of Operation. */
4014 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4022 /* Zero extract the field from the insn. */
4023 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4025 /* Sign-extend, or un-swap the field as necessary. */
4027 case 0: /* unsigned */
4029 case 1: /* signed */
4030 assert(f
->size
<= 32);
4031 m
= 1u << (f
->size
- 1);
4034 case 2: /* dl+dh split, signed 20 bit. */
4035 r
= ((int8_t)r
<< 12) | (r
>> 8);
4041 /* Validate that the "compressed" encoding we selected above is valid.
4042 I.e. we havn't make two different original fields overlap. */
4043 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4044 o
->presentC
|= 1 << f
->indexC
;
4045 o
->presentO
|= 1 << f
->indexO
;
4047 o
->c
[f
->indexC
] = r
;
4050 /* Lookup the insn at the current PC, extracting the operands into O and
4051 returning the info struct for the insn. Returns NULL for invalid insn. */
4053 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4056 uint64_t insn
, pc
= s
->pc
;
4058 const DisasInsn
*info
;
4060 insn
= ld_code2(env
, pc
);
4061 op
= (insn
>> 8) & 0xff;
4062 ilen
= get_ilen(op
);
4063 s
->next_pc
= s
->pc
+ ilen
;
4070 insn
= ld_code4(env
, pc
) << 32;
4073 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4079 /* We can't actually determine the insn format until we've looked up
4080 the full insn opcode. Which we can't do without locating the
4081 secondary opcode. Assume by default that OP2 is at bit 40; for
4082 those smaller insns that don't actually have a secondary opcode
4083 this will correctly result in OP2 = 0. */
4089 case 0xb2: /* S, RRF, RRE */
4090 case 0xb3: /* RRE, RRD, RRF */
4091 case 0xb9: /* RRE, RRF */
4092 case 0xe5: /* SSE, SIL */
4093 op2
= (insn
<< 8) >> 56;
4097 case 0xc0: /* RIL */
4098 case 0xc2: /* RIL */
4099 case 0xc4: /* RIL */
4100 case 0xc6: /* RIL */
4101 case 0xc8: /* SSF */
4102 case 0xcc: /* RIL */
4103 op2
= (insn
<< 12) >> 60;
4105 case 0xd0 ... 0xdf: /* SS */
4111 case 0xee ... 0xf3: /* SS */
4112 case 0xf8 ... 0xfd: /* SS */
4116 op2
= (insn
<< 40) >> 56;
4120 memset(f
, 0, sizeof(*f
));
4124 /* Lookup the instruction. */
4125 info
= lookup_opc(op
<< 8 | op2
);
4127 /* If we found it, extract the operands. */
4129 DisasFormat fmt
= info
->fmt
;
4132 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4133 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4139 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4141 const DisasInsn
*insn
;
4142 ExitStatus ret
= NO_EXIT
;
4146 insn
= extract_insn(env
, s
, &f
);
4148 /* If not found, try the old interpreter. This includes ILLOPC. */
4150 disas_s390_insn(env
, s
);
4151 switch (s
->is_jmp
) {
4159 ret
= EXIT_PC_UPDATED
;
4162 ret
= EXIT_NORETURN
;
4172 /* Set up the strutures we use to communicate with the helpers. */
4175 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4176 TCGV_UNUSED_I64(o
.out
);
4177 TCGV_UNUSED_I64(o
.out2
);
4178 TCGV_UNUSED_I64(o
.in1
);
4179 TCGV_UNUSED_I64(o
.in2
);
4180 TCGV_UNUSED_I64(o
.addr1
);
4182 /* Implement the instruction. */
4183 if (insn
->help_in1
) {
4184 insn
->help_in1(s
, &f
, &o
);
4186 if (insn
->help_in2
) {
4187 insn
->help_in2(s
, &f
, &o
);
4189 if (insn
->help_prep
) {
4190 insn
->help_prep(s
, &f
, &o
);
4192 if (insn
->help_op
) {
4193 ret
= insn
->help_op(s
, &o
);
4195 if (insn
->help_wout
) {
4196 insn
->help_wout(s
, &f
, &o
);
4198 if (insn
->help_cout
) {
4199 insn
->help_cout(s
, &o
);
4202 /* Free any temporaries created by the helpers. */
4203 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4204 tcg_temp_free_i64(o
.out
);
4206 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4207 tcg_temp_free_i64(o
.out2
);
4209 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4210 tcg_temp_free_i64(o
.in1
);
4212 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4213 tcg_temp_free_i64(o
.in2
);
4215 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4216 tcg_temp_free_i64(o
.addr1
);
4219 /* Advance to the next instruction. */
4224 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4225 TranslationBlock
*tb
,
4229 target_ulong pc_start
;
4230 uint64_t next_page_start
;
4231 uint16_t *gen_opc_end
;
4233 int num_insns
, max_insns
;
4241 if (!(tb
->flags
& FLAG_MASK_64
)) {
4242 pc_start
&= 0x7fffffff;
4247 dc
.cc_op
= CC_OP_DYNAMIC
;
4248 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4249 dc
.is_jmp
= DISAS_NEXT
;
4251 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4253 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4256 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4257 if (max_insns
== 0) {
4258 max_insns
= CF_COUNT_MASK
;
4265 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4269 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4272 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4273 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4274 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4275 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4277 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4281 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4282 tcg_gen_debug_insn_start(dc
.pc
);
4286 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4287 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4288 if (bp
->pc
== dc
.pc
) {
4289 status
= EXIT_PC_STALE
;
4295 if (status
== NO_EXIT
) {
4296 status
= translate_one(env
, &dc
);
4299 /* If we reach a page boundary, are single stepping,
4300 or exhaust instruction count, stop generation. */
4301 if (status
== NO_EXIT
4302 && (dc
.pc
>= next_page_start
4303 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4304 || num_insns
>= max_insns
4306 || env
->singlestep_enabled
)) {
4307 status
= EXIT_PC_STALE
;
4309 } while (status
== NO_EXIT
);
4311 if (tb
->cflags
& CF_LAST_IO
) {
4320 update_psw_addr(&dc
);
4322 case EXIT_PC_UPDATED
:
4323 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4324 gen_op_calc_cc(&dc
);
4326 /* Next TB starts off with CC_OP_DYNAMIC,
4327 so make sure the cc op type is in env */
4328 gen_op_set_cc_op(&dc
);
4331 gen_exception(EXCP_DEBUG
);
4333 /* Generate the return instruction */
4341 gen_icount_end(tb
, num_insns
);
4342 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4344 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4347 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4350 tb
->size
= dc
.pc
- pc_start
;
4351 tb
->icount
= num_insns
;
4354 #if defined(S390X_DEBUG_DISAS)
4355 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4356 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4357 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4363 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4365 gen_intermediate_code_internal(env
, tb
, 0);
4368 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4370 gen_intermediate_code_internal(env
, tb
, 1);
4373 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4376 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4377 cc_op
= gen_opc_cc_op
[pc_pos
];
4378 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {