4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static inline void debug_insn(uint64_t insn
)
84 LOG_DISAS("insn: 0x%" PRIx64
"\n", insn
);
87 static inline uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
102 if (env
->cc_op
> 3) {
103 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
104 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
106 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
107 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
110 for (i
= 0; i
< 16; i
++) {
111 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
113 cpu_fprintf(f
, "\n");
119 for (i
= 0; i
< 16; i
++) {
120 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
122 cpu_fprintf(f
, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i
= 0; i
< 16; i
++) {
130 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
132 cpu_fprintf(f
, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i
= 0; i
< CC_OP_MAX
; i
++) {
141 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
142 inline_branch_miss
[i
], inline_branch_hit
[i
]);
146 cpu_fprintf(f
, "\n");
149 static TCGv_i64 psw_addr
;
150 static TCGv_i64 psw_mask
;
152 static TCGv_i32 cc_op
;
153 static TCGv_i64 cc_src
;
154 static TCGv_i64 cc_dst
;
155 static TCGv_i64 cc_vr
;
157 static char cpu_reg_names
[32][4];
158 static TCGv_i64 regs
[16];
159 static TCGv_i64 fregs
[16];
161 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
163 void s390x_translate_init(void)
167 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
168 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUS390XState
, psw
.addr
),
171 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
172 offsetof(CPUS390XState
, psw
.mask
),
175 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
177 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
179 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
181 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
184 for (i
= 0; i
< 16; i
++) {
185 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
186 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
187 offsetof(CPUS390XState
, regs
[i
]),
191 for (i
= 0; i
< 16; i
++) {
192 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
193 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
194 offsetof(CPUS390XState
, fregs
[i
].d
),
195 cpu_reg_names
[i
+ 16]);
198 /* register helpers */
203 static inline TCGv_i64
load_reg(int reg
)
205 TCGv_i64 r
= tcg_temp_new_i64();
206 tcg_gen_mov_i64(r
, regs
[reg
]);
210 static inline TCGv_i64
load_freg(int reg
)
212 TCGv_i64 r
= tcg_temp_new_i64();
213 tcg_gen_mov_i64(r
, fregs
[reg
]);
217 static inline TCGv_i32
load_freg32(int reg
)
219 TCGv_i32 r
= tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r
, TCGV_HIGH(fregs
[reg
]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r
)), fregs
[reg
], 32);
228 static inline TCGv_i64
load_freg32_i64(int reg
)
230 TCGv_i64 r
= tcg_temp_new_i64();
231 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
235 static inline TCGv_i32
load_reg32(int reg
)
237 TCGv_i32 r
= tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r
, regs
[reg
]);
242 static inline TCGv_i64
load_reg32_i64(int reg
)
244 TCGv_i64 r
= tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r
, regs
[reg
]);
249 static inline void store_reg(int reg
, TCGv_i64 v
)
251 tcg_gen_mov_i64(regs
[reg
], v
);
254 static inline void store_freg(int reg
, TCGv_i64 v
)
256 tcg_gen_mov_i64(fregs
[reg
], v
);
259 static inline void store_reg32(int reg
, TCGv_i32 v
)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs
[reg
]), v
);
265 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
],
266 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 0, 32);
270 static inline void store_reg32_i64(int reg
, TCGv_i64 v
)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
276 static inline void store_reg32h_i64(int reg
, TCGv_i64 v
)
278 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
281 static inline void store_freg32(int reg
, TCGv_i32 v
)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs
[reg
]), v
);
287 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
],
288 MAKE_TCGV_I64(GET_TCGV_I32(v
)), 32, 32);
292 static inline void store_freg32_i64(int reg
, TCGv_i64 v
)
294 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
297 static inline void return_low128(TCGv_i64 dest
)
299 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
302 static inline void update_psw_addr(DisasContext
*s
)
305 tcg_gen_movi_i64(psw_addr
, s
->pc
);
308 static inline void potential_page_fault(DisasContext
*s
)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
318 return (uint64_t)cpu_lduw_code(env
, pc
);
321 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
326 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
328 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
331 static inline int get_mem_index(DisasContext
*s
)
333 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
334 case PSW_ASC_PRIMARY
>> 32:
336 case PSW_ASC_SECONDARY
>> 32:
338 case PSW_ASC_HOME
>> 32:
346 static void gen_exception(int excp
)
348 TCGv_i32 tmp
= tcg_const_i32(excp
);
349 gen_helper_exception(cpu_env
, tmp
);
350 tcg_temp_free_i32(tmp
);
353 static void gen_program_exception(DisasContext
*s
, int code
)
357 /* Remember what pgm exeption this was. */
358 tmp
= tcg_const_i32(code
);
359 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
360 tcg_temp_free_i32(tmp
);
362 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
363 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
364 tcg_temp_free_i32(tmp
);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM
);
377 s
->is_jmp
= DISAS_EXCP
;
380 static inline void gen_illegal_opcode(DisasContext
*s
)
382 gen_program_exception(s
, PGM_SPECIFICATION
);
385 static inline void check_privileged(DisasContext
*s
)
387 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
388 gen_program_exception(s
, PGM_PRIVILEGED
);
392 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
403 tmp
= tcg_const_i64(d2
);
404 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
409 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
413 tmp
= tcg_const_i64(d2
);
414 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
419 tmp
= tcg_const_i64(d2
);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
424 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
430 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
432 s
->cc_op
= CC_OP_CONST0
+ val
;
435 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
437 tcg_gen_discard_i64(cc_src
);
438 tcg_gen_mov_i64(cc_dst
, dst
);
439 tcg_gen_discard_i64(cc_vr
);
443 static void gen_op_update1_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 dst
)
445 tcg_gen_discard_i64(cc_src
);
446 tcg_gen_extu_i32_i64(cc_dst
, dst
);
447 tcg_gen_discard_i64(cc_vr
);
451 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
454 tcg_gen_mov_i64(cc_src
, src
);
455 tcg_gen_mov_i64(cc_dst
, dst
);
456 tcg_gen_discard_i64(cc_vr
);
460 static void gen_op_update2_cc_i32(DisasContext
*s
, enum cc_op op
, TCGv_i32 src
,
463 tcg_gen_extu_i32_i64(cc_src
, src
);
464 tcg_gen_extu_i32_i64(cc_dst
, dst
);
465 tcg_gen_discard_i64(cc_vr
);
469 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
470 TCGv_i64 dst
, TCGv_i64 vr
)
472 tcg_gen_mov_i64(cc_src
, src
);
473 tcg_gen_mov_i64(cc_dst
, dst
);
474 tcg_gen_mov_i64(cc_vr
, vr
);
478 static inline void set_cc_nz_u32(DisasContext
*s
, TCGv_i32 val
)
480 gen_op_update1_cc_i32(s
, CC_OP_NZ
, val
);
483 static inline void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
485 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
488 static inline void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
490 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
493 static inline void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
495 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
498 static inline void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
500 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
503 static inline void cmp_32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
,
506 gen_op_update2_cc_i32(s
, cond
, v1
, v2
);
509 static inline void cmp_64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
,
512 gen_op_update2_cc_i64(s
, cond
, v1
, v2
);
515 static inline void cmp_s32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
517 cmp_32(s
, v1
, v2
, CC_OP_LTGT_32
);
520 static inline void cmp_u32(DisasContext
*s
, TCGv_i32 v1
, TCGv_i32 v2
)
522 cmp_32(s
, v1
, v2
, CC_OP_LTUGTU_32
);
525 static inline void cmp_s32c(DisasContext
*s
, TCGv_i32 v1
, int32_t v2
)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp
= tcg_const_i32(v2
);
529 cmp_32(s
, v1
, tmp
, CC_OP_LTGT_32
);
530 tcg_temp_free_i32(tmp
);
533 static inline void cmp_u32c(DisasContext
*s
, TCGv_i32 v1
, uint32_t v2
)
535 TCGv_i32 tmp
= tcg_const_i32(v2
);
536 cmp_32(s
, v1
, tmp
, CC_OP_LTUGTU_32
);
537 tcg_temp_free_i32(tmp
);
540 static inline void cmp_s64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
542 cmp_64(s
, v1
, v2
, CC_OP_LTGT_64
);
545 static inline void cmp_u64(DisasContext
*s
, TCGv_i64 v1
, TCGv_i64 v2
)
547 cmp_64(s
, v1
, v2
, CC_OP_LTUGTU_64
);
550 static inline void cmp_s64c(DisasContext
*s
, TCGv_i64 v1
, int64_t v2
)
552 TCGv_i64 tmp
= tcg_const_i64(v2
);
554 tcg_temp_free_i64(tmp
);
557 static inline void cmp_u64c(DisasContext
*s
, TCGv_i64 v1
, uint64_t v2
)
559 TCGv_i64 tmp
= tcg_const_i64(v2
);
561 tcg_temp_free_i64(tmp
);
564 static inline void set_cc_s32(DisasContext
*s
, TCGv_i32 val
)
566 gen_op_update1_cc_i32(s
, CC_OP_LTGT0_32
, val
);
569 static inline void set_cc_s64(DisasContext
*s
, TCGv_i64 val
)
571 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, val
);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext
*s
)
577 tcg_gen_discard_i64(cc_src
);
578 tcg_gen_discard_i64(cc_dst
);
579 tcg_gen_discard_i64(cc_vr
);
580 s
->cc_op
= CC_OP_STATIC
;
583 static inline void gen_op_set_cc_op(DisasContext
*s
)
585 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
586 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
590 static inline void gen_update_cc_op(DisasContext
*s
)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext
*s
)
598 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
599 TCGv_i64 dummy
= tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
630 case CC_OP_LTUGTU_32
:
631 case CC_OP_LTUGTU_64
:
638 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
653 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
663 tcg_temp_free_i32(local_cc_op
);
664 tcg_temp_free_i64(dummy
);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext
*s
, uint64_t insn
, int *r1
, int *r2
)
674 *r1
= (insn
>> 4) & 0xf;
678 static inline TCGv_i64
decode_rx(DisasContext
*s
, uint64_t insn
, int *r1
,
679 int *x2
, int *b2
, int *d2
)
683 *r1
= (insn
>> 20) & 0xf;
684 *x2
= (insn
>> 16) & 0xf;
685 *b2
= (insn
>> 12) & 0xf;
688 return get_address(s
, *x2
, *b2
, *d2
);
691 static inline void decode_rs(DisasContext
*s
, uint64_t insn
, int *r1
, int *r3
,
696 *r1
= (insn
>> 20) & 0xf;
698 *r3
= (insn
>> 16) & 0xf;
699 *b2
= (insn
>> 12) & 0xf;
703 static inline TCGv_i64
decode_si(DisasContext
*s
, uint64_t insn
, int *i2
,
708 *i2
= (insn
>> 16) & 0xff;
709 *b1
= (insn
>> 12) & 0xf;
712 return get_address(s
, 0, *b1
, *d1
);
715 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
719 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
720 && !s
->singlestep_enabled
721 && !(s
->tb
->cflags
& CF_LAST_IO
));
724 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong pc
)
728 if (use_goto_tb(s
, pc
)) {
729 tcg_gen_goto_tb(tb_num
);
730 tcg_gen_movi_i64(psw_addr
, pc
);
731 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ tb_num
);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr
, pc
);
739 static inline void account_noninline_branch(DisasContext
*s
, int cc_op
)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss
[cc_op
]++;
746 static inline void account_inline_branch(DisasContext
*s
, int cc_op
)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit
[cc_op
]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond
[16] = {
757 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
758 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
759 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
760 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
761 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
762 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
763 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
764 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond
[16] = {
771 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
773 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
775 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
785 enum cc_op old_cc_op
= s
->cc_op
;
787 if (mask
== 15 || mask
== 0) {
788 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
791 c
->g1
= c
->g2
= true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond
= ltgt_cond
[mask
];
803 if (cond
== TCG_COND_NEVER
) {
806 account_inline_branch(s
, old_cc_op
);
809 case CC_OP_LTUGTU_32
:
810 case CC_OP_LTUGTU_64
:
811 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
812 if (cond
== TCG_COND_NEVER
) {
815 account_inline_branch(s
, old_cc_op
);
819 cond
= nz_cond
[mask
];
820 if (cond
== TCG_COND_NEVER
) {
823 account_inline_branch(s
, old_cc_op
);
838 account_inline_branch(s
, old_cc_op
);
853 account_inline_branch(s
, old_cc_op
);
857 switch (mask
& 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s
, old_cc_op
);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s
, old_cc_op
);
880 old_cc_op
= CC_OP_STATIC
;
881 cond
= TCG_COND_NEVER
;
885 /* Load up the arguments of the comparison. */
887 c
->g1
= c
->g2
= false;
891 c
->u
.s32
.a
= tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
893 c
->u
.s32
.b
= tcg_const_i32(0);
896 case CC_OP_LTUGTU_32
:
898 c
->u
.s32
.a
= tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
900 c
->u
.s32
.b
= tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
908 c
->u
.s64
.b
= tcg_const_i64(0);
912 case CC_OP_LTUGTU_64
:
915 c
->g1
= c
->g2
= true;
921 c
->u
.s64
.a
= tcg_temp_new_i64();
922 c
->u
.s64
.b
= tcg_const_i64(0);
923 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c
->u
.s32
.b
= tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c
->u
.s32
.b
= tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c
->u
.s32
.b
= tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c
->u
.s32
.a
= tcg_temp_new_i32();
947 c
->u
.s32
.b
= tcg_const_i32(0);
948 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c
->u
.s32
.b
= tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c
->u
.s32
.b
= tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c
->u
.s32
.b
= tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c
->u
.s32
.a
= tcg_temp_new_i32();
966 c
->u
.s32
.b
= tcg_const_i32(0);
967 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
969 case 0x4: /* cc == 1 */
971 c
->u
.s32
.b
= tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c
->u
.s32
.b
= tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c
->u
.s32
.b
= tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c
->u
.s32
.b
= tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c
->u
.s32
.a
= tcg_const_i32(8);
990 c
->u
.s32
.b
= tcg_const_i32(0);
991 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
992 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
1003 static void free_compare(DisasCompare
*c
)
1007 tcg_temp_free_i64(c
->u
.s64
.a
);
1009 tcg_temp_free_i32(c
->u
.s32
.a
);
1014 tcg_temp_free_i64(c
->u
.s64
.b
);
1016 tcg_temp_free_i32(c
->u
.s32
.b
);
1021 static void disas_b2(CPUS390XState
*env
, DisasContext
*s
, int op
,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp
, tmp2
, tmp3
;
1026 TCGv_i32 tmp32_1
, tmp32_2
;
1030 r1
= (insn
>> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op
, r1
, r2
);
1036 case 0x34: /* STCH ? */
1037 /* Store Subchannel */
1038 check_privileged(s
);
1039 gen_op_movi_cc(s
, 3);
1041 case 0x46: /* STURA R1,R2 [RRE] */
1042 /* Store Using Real Address */
1043 check_privileged(s
);
1044 r1
= (insn
>> 4) & 0xf;
1046 tmp32_1
= load_reg32(r1
);
1048 potential_page_fault(s
);
1049 gen_helper_stura(cpu_env
, tmp
, tmp32_1
);
1050 tcg_temp_free_i32(tmp32_1
);
1051 tcg_temp_free_i64(tmp
);
1053 case 0x50: /* CSP R1,R2 [RRE] */
1054 /* Compare And Swap And Purge */
1055 check_privileged(s
);
1056 r1
= (insn
>> 4) & 0xf;
1058 tmp32_1
= tcg_const_i32(r1
);
1059 tmp32_2
= tcg_const_i32(r2
);
1060 gen_helper_csp(cc_op
, cpu_env
, tmp32_1
, tmp32_2
);
1062 tcg_temp_free_i32(tmp32_1
);
1063 tcg_temp_free_i32(tmp32_2
);
1065 case 0x5f: /* CHSC ? */
1066 /* Channel Subsystem Call */
1067 check_privileged(s
);
1068 gen_op_movi_cc(s
, 3);
1070 case 0x78: /* STCKE D2(B2) [S] */
1071 /* Store Clock Extended */
1072 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1073 tmp
= get_address(s
, 0, b2
, d2
);
1074 potential_page_fault(s
);
1075 gen_helper_stcke(cc_op
, cpu_env
, tmp
);
1077 tcg_temp_free_i64(tmp
);
1079 case 0x79: /* SACF D2(B2) [S] */
1080 /* Set Address Space Control Fast */
1081 check_privileged(s
);
1082 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1083 tmp
= get_address(s
, 0, b2
, d2
);
1084 potential_page_fault(s
);
1085 gen_helper_sacf(cpu_env
, tmp
);
1086 tcg_temp_free_i64(tmp
);
1087 /* addressing mode has changed, so end the block */
1090 s
->is_jmp
= DISAS_JUMP
;
1092 case 0x7d: /* STSI D2,(B2) [S] */
1093 check_privileged(s
);
1094 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1095 tmp
= get_address(s
, 0, b2
, d2
);
1096 tmp32_1
= load_reg32(0);
1097 tmp32_2
= load_reg32(1);
1098 potential_page_fault(s
);
1099 gen_helper_stsi(cc_op
, cpu_env
, tmp
, tmp32_1
, tmp32_2
);
1101 tcg_temp_free_i64(tmp
);
1102 tcg_temp_free_i32(tmp32_1
);
1103 tcg_temp_free_i32(tmp32_2
);
1105 case 0xb1: /* STFL D2(B2) [S] */
1106 /* Store Facility List (CPU features) at 200 */
1107 check_privileged(s
);
1108 tmp2
= tcg_const_i64(0xc0000000);
1109 tmp
= tcg_const_i64(200);
1110 tcg_gen_qemu_st32(tmp2
, tmp
, get_mem_index(s
));
1111 tcg_temp_free_i64(tmp2
);
1112 tcg_temp_free_i64(tmp
);
1114 case 0xb2: /* LPSWE D2(B2) [S] */
1115 /* Load PSW Extended */
1116 check_privileged(s
);
1117 decode_rs(s
, insn
, &r1
, &r3
, &b2
, &d2
);
1118 tmp
= get_address(s
, 0, b2
, d2
);
1119 tmp2
= tcg_temp_new_i64();
1120 tmp3
= tcg_temp_new_i64();
1121 tcg_gen_qemu_ld64(tmp2
, tmp
, get_mem_index(s
));
1122 tcg_gen_addi_i64(tmp
, tmp
, 8);
1123 tcg_gen_qemu_ld64(tmp3
, tmp
, get_mem_index(s
));
1124 gen_helper_load_psw(cpu_env
, tmp2
, tmp3
);
1125 /* we need to keep cc_op intact */
1126 s
->is_jmp
= DISAS_JUMP
;
1127 tcg_temp_free_i64(tmp
);
1128 tcg_temp_free_i64(tmp2
);
1129 tcg_temp_free_i64(tmp3
);
1131 case 0x20: /* SERVC R1,R2 [RRE] */
1132 /* SCLP Service call (PV hypercall) */
1133 check_privileged(s
);
1134 potential_page_fault(s
);
1135 tmp32_1
= load_reg32(r2
);
1137 gen_helper_servc(cc_op
, cpu_env
, tmp32_1
, tmp
);
1139 tcg_temp_free_i32(tmp32_1
);
1140 tcg_temp_free_i64(tmp
);
1144 LOG_DISAS("illegal b2 operation 0x%x\n", op
);
1145 gen_illegal_opcode(s
);
1146 #ifndef CONFIG_USER_ONLY
1152 static void disas_s390_insn(CPUS390XState
*env
, DisasContext
*s
)
1158 opc
= cpu_ldub_code(env
, s
->pc
);
1159 LOG_DISAS("opc 0x%x\n", opc
);
1163 insn
= ld_code4(env
, s
->pc
);
1164 op
= (insn
>> 16) & 0xff;
1165 disas_b2(env
, s
, op
, insn
);
1168 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%x\n", opc
);
1169 gen_illegal_opcode(s
);
1174 /* ====================================================================== */
1175 /* Define the insn format enumeration. */
1176 #define F0(N) FMT_##N,
1177 #define F1(N, X1) F0(N)
1178 #define F2(N, X1, X2) F0(N)
1179 #define F3(N, X1, X2, X3) F0(N)
1180 #define F4(N, X1, X2, X3, X4) F0(N)
1181 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1184 #include "insn-format.def"
1194 /* Define a structure to hold the decoded fields. We'll store each inside
1195 an array indexed by an enum. In order to conserve memory, we'll arrange
1196 for fields that do not exist at the same time to overlap, thus the "C"
1197 for compact. For checking purposes there is an "O" for original index
1198 as well that will be applied to availability bitmaps. */
1200 enum DisasFieldIndexO
{
1223 enum DisasFieldIndexC
{
1254 struct DisasFields
{
1257 unsigned presentC
:16;
1258 unsigned int presentO
;
1262 /* This is the way fields are to be accessed out of DisasFields. */
1263 #define have_field(S, F) have_field1((S), FLD_O_##F)
1264 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1266 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1268 return (f
->presentO
>> c
) & 1;
1271 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1272 enum DisasFieldIndexC c
)
1274 assert(have_field1(f
, o
));
1278 /* Describe the layout of each field in each format. */
1279 typedef struct DisasField
{
1281 unsigned int size
:8;
1282 unsigned int type
:2;
1283 unsigned int indexC
:6;
1284 enum DisasFieldIndexO indexO
:8;
1287 typedef struct DisasFormatInfo
{
1288 DisasField op
[NUM_C_FIELD
];
1291 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1292 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1293 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1294 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1295 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1296 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1297 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1298 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1299 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1300 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1301 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1302 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1303 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1304 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1306 #define F0(N) { { } },
1307 #define F1(N, X1) { { X1 } },
1308 #define F2(N, X1, X2) { { X1, X2 } },
1309 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1310 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1311 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1313 static const DisasFormatInfo format_info
[] = {
1314 #include "insn-format.def"
1332 /* Generally, we'll extract operands into this structures, operate upon
1333 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1334 of routines below for more details. */
1336 bool g_out
, g_out2
, g_in1
, g_in2
;
1337 TCGv_i64 out
, out2
, in1
, in2
;
1341 /* Return values from translate_one, indicating the state of the TB. */
1343 /* Continue the TB. */
1345 /* We have emitted one or more goto_tb. No fixup required. */
1347 /* We are not using a goto_tb (for whatever reason), but have updated
1348 the PC (for whatever reason), so there's no need to do it again on
1351 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1352 updated the PC for the next instruction to be executed. */
1354 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1355 No following code will be executed. */
1359 typedef enum DisasFacility
{
1360 FAC_Z
, /* zarch (default) */
1361 FAC_CASS
, /* compare and swap and store */
1362 FAC_CASS2
, /* compare and swap and store 2*/
1363 FAC_DFP
, /* decimal floating point */
1364 FAC_DFPR
, /* decimal floating point rounding */
1365 FAC_DO
, /* distinct operands */
1366 FAC_EE
, /* execute extensions */
1367 FAC_EI
, /* extended immediate */
1368 FAC_FPE
, /* floating point extension */
1369 FAC_FPSSH
, /* floating point support sign handling */
1370 FAC_FPRGR
, /* FPR-GR transfer */
1371 FAC_GIE
, /* general instructions extension */
1372 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1373 FAC_HW
, /* high-word */
1374 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1375 FAC_LOC
, /* load/store on condition */
1376 FAC_LD
, /* long displacement */
1377 FAC_PC
, /* population count */
1378 FAC_SCF
, /* store clock fast */
1379 FAC_SFLE
, /* store facility list extended */
1385 DisasFacility fac
:6;
1389 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1390 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1391 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1392 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1393 void (*help_cout
)(DisasContext
*, DisasOps
*);
1394 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1399 /* ====================================================================== */
1400 /* Miscelaneous helpers, used by several operations. */
1402 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1403 DisasOps
*o
, int mask
)
1405 int b2
= get_field(f
, b2
);
1406 int d2
= get_field(f
, d2
);
1409 o
->in2
= tcg_const_i64(d2
& mask
);
1411 o
->in2
= get_address(s
, 0, b2
, d2
);
1412 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1416 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1418 if (dest
== s
->next_pc
) {
1421 if (use_goto_tb(s
, dest
)) {
1422 gen_update_cc_op(s
);
1424 tcg_gen_movi_i64(psw_addr
, dest
);
1425 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1426 return EXIT_GOTO_TB
;
1428 tcg_gen_movi_i64(psw_addr
, dest
);
1429 return EXIT_PC_UPDATED
;
1433 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1434 bool is_imm
, int imm
, TCGv_i64 cdest
)
1437 uint64_t dest
= s
->pc
+ 2 * imm
;
1440 /* Take care of the special cases first. */
1441 if (c
->cond
== TCG_COND_NEVER
) {
1446 if (dest
== s
->next_pc
) {
1447 /* Branch to next. */
1451 if (c
->cond
== TCG_COND_ALWAYS
) {
1452 ret
= help_goto_direct(s
, dest
);
1456 if (TCGV_IS_UNUSED_I64(cdest
)) {
1457 /* E.g. bcr %r0 -> no branch. */
1461 if (c
->cond
== TCG_COND_ALWAYS
) {
1462 tcg_gen_mov_i64(psw_addr
, cdest
);
1463 ret
= EXIT_PC_UPDATED
;
1468 if (use_goto_tb(s
, s
->next_pc
)) {
1469 if (is_imm
&& use_goto_tb(s
, dest
)) {
1470 /* Both exits can use goto_tb. */
1471 gen_update_cc_op(s
);
1473 lab
= gen_new_label();
1475 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1477 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1480 /* Branch not taken. */
1482 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1483 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1488 tcg_gen_movi_i64(psw_addr
, dest
);
1489 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1493 /* Fallthru can use goto_tb, but taken branch cannot. */
1494 /* Store taken branch destination before the brcond. This
1495 avoids having to allocate a new local temp to hold it.
1496 We'll overwrite this in the not taken case anyway. */
1498 tcg_gen_mov_i64(psw_addr
, cdest
);
1501 lab
= gen_new_label();
1503 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1505 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1508 /* Branch not taken. */
1509 gen_update_cc_op(s
);
1511 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1512 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1516 tcg_gen_movi_i64(psw_addr
, dest
);
1518 ret
= EXIT_PC_UPDATED
;
1521 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1522 Most commonly we're single-stepping or some other condition that
1523 disables all use of goto_tb. Just update the PC and exit. */
1525 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1527 cdest
= tcg_const_i64(dest
);
1531 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1534 TCGv_i32 t0
= tcg_temp_new_i32();
1535 TCGv_i64 t1
= tcg_temp_new_i64();
1536 TCGv_i64 z
= tcg_const_i64(0);
1537 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1538 tcg_gen_extu_i32_i64(t1
, t0
);
1539 tcg_temp_free_i32(t0
);
1540 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1541 tcg_temp_free_i64(t1
);
1542 tcg_temp_free_i64(z
);
1546 tcg_temp_free_i64(cdest
);
1548 tcg_temp_free_i64(next
);
1550 ret
= EXIT_PC_UPDATED
;
1558 /* ====================================================================== */
1559 /* The operations. These perform the bulk of the work for any insn,
1560 usually after the operands have been loaded and output initialized. */
1562 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1564 gen_helper_abs_i64(o
->out
, o
->in2
);
1568 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1570 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1574 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1576 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1580 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1582 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1583 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1587 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1589 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1593 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1597 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1599 /* XXX possible optimization point */
1601 cc
= tcg_temp_new_i64();
1602 tcg_gen_extu_i32_i64(cc
, cc_op
);
1603 tcg_gen_shri_i64(cc
, cc
, 1);
1605 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1606 tcg_temp_free_i64(cc
);
1610 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1612 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1616 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1618 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1622 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1624 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1625 return_low128(o
->out2
);
1629 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1631 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1635 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1637 int shift
= s
->insn
->data
& 0xff;
1638 int size
= s
->insn
->data
>> 8;
1639 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1642 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1643 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1644 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1646 /* Produce the CC from only the bits manipulated. */
1647 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1648 set_cc_nz_u64(s
, cc_dst
);
1652 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1654 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1655 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1656 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1657 return EXIT_PC_UPDATED
;
1663 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1665 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1666 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1669 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1671 int m1
= get_field(s
->fields
, m1
);
1672 bool is_imm
= have_field(s
->fields
, i2
);
1673 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1676 disas_jcc(s
, &c
, m1
);
1677 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1680 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1682 int r1
= get_field(s
->fields
, r1
);
1683 bool is_imm
= have_field(s
->fields
, i2
);
1684 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1688 c
.cond
= TCG_COND_NE
;
1693 t
= tcg_temp_new_i64();
1694 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1695 store_reg32_i64(r1
, t
);
1696 c
.u
.s32
.a
= tcg_temp_new_i32();
1697 c
.u
.s32
.b
= tcg_const_i32(0);
1698 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1699 tcg_temp_free_i64(t
);
1701 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1704 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1706 int r1
= get_field(s
->fields
, r1
);
1707 bool is_imm
= have_field(s
->fields
, i2
);
1708 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1711 c
.cond
= TCG_COND_NE
;
1716 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1717 c
.u
.s64
.a
= regs
[r1
];
1718 c
.u
.s64
.b
= tcg_const_i64(0);
1720 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1723 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1725 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1730 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1732 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1737 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1739 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1744 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1746 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1747 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1748 tcg_temp_free_i32(m3
);
1749 gen_set_cc_nz_f32(s
, o
->in2
);
1753 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1755 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1756 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1757 tcg_temp_free_i32(m3
);
1758 gen_set_cc_nz_f64(s
, o
->in2
);
1762 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1764 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1765 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1766 tcg_temp_free_i32(m3
);
1767 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1771 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1773 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1774 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1775 tcg_temp_free_i32(m3
);
1776 gen_set_cc_nz_f32(s
, o
->in2
);
1780 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1782 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1783 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1784 tcg_temp_free_i32(m3
);
1785 gen_set_cc_nz_f64(s
, o
->in2
);
1789 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1791 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1792 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1793 tcg_temp_free_i32(m3
);
1794 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1798 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1800 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1801 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1802 tcg_temp_free_i32(m3
);
1806 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1808 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1809 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1810 tcg_temp_free_i32(m3
);
1814 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1816 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1817 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1818 tcg_temp_free_i32(m3
);
1819 return_low128(o
->out2
);
1823 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1825 int r2
= get_field(s
->fields
, r2
);
1826 TCGv_i64 len
= tcg_temp_new_i64();
1828 potential_page_fault(s
);
1829 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1831 return_low128(o
->out
);
1833 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1834 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1835 tcg_temp_free_i64(len
);
1840 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1842 int l
= get_field(s
->fields
, l1
);
1847 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1848 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1851 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1852 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1855 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1856 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1859 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1860 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1863 potential_page_fault(s
);
1864 vl
= tcg_const_i32(l
);
1865 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1866 tcg_temp_free_i32(vl
);
1870 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1874 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1876 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1877 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1878 potential_page_fault(s
);
1879 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1880 tcg_temp_free_i32(r1
);
1881 tcg_temp_free_i32(r3
);
1886 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1888 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1889 TCGv_i32 t1
= tcg_temp_new_i32();
1890 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1891 potential_page_fault(s
);
1892 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1894 tcg_temp_free_i32(t1
);
1895 tcg_temp_free_i32(m3
);
1899 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1901 potential_page_fault(s
);
1902 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1904 return_low128(o
->in2
);
1908 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1910 int r3
= get_field(s
->fields
, r3
);
1911 potential_page_fault(s
);
1912 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1917 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1919 int r3
= get_field(s
->fields
, r3
);
1920 potential_page_fault(s
);
1921 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1926 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1928 int r3
= get_field(s
->fields
, r3
);
1929 TCGv_i64 in3
= tcg_temp_new_i64();
1930 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1931 potential_page_fault(s
);
1932 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1933 tcg_temp_free_i64(in3
);
1938 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1940 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1941 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1942 potential_page_fault(s
);
1943 /* XXX rewrite in tcg */
1944 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1949 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1951 TCGv_i64 t1
= tcg_temp_new_i64();
1952 TCGv_i32 t2
= tcg_temp_new_i32();
1953 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1954 gen_helper_cvd(t1
, t2
);
1955 tcg_temp_free_i32(t2
);
1956 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1957 tcg_temp_free_i64(t1
);
1961 #ifndef CONFIG_USER_ONLY
1962 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1966 check_privileged(s
);
1967 potential_page_fault(s
);
1969 /* We pretend the format is RX_a so that D2 is the field we want. */
1970 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1971 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1972 tcg_temp_free_i32(tmp
);
1977 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1979 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1980 return_low128(o
->out
);
1984 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1986 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1987 return_low128(o
->out
);
1991 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1993 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1994 return_low128(o
->out
);
1998 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2000 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2001 return_low128(o
->out
);
2005 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2007 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2011 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2013 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2017 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2019 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2020 return_low128(o
->out2
);
2024 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2026 int r2
= get_field(s
->fields
, r2
);
2027 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2031 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2033 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2037 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2039 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2040 tb->flags, (ab)use the tb->cs_base field as the address of
2041 the template in memory, and grab 8 bits of tb->flags/cflags for
2042 the contents of the register. We would then recognize all this
2043 in gen_intermediate_code_internal, generating code for exactly
2044 one instruction. This new TB then gets executed normally.
2046 On the other hand, this seems to be mostly used for modifying
2047 MVC inside of memcpy, which needs a helper call anyway. So
2048 perhaps this doesn't bear thinking about any further. */
2055 tmp
= tcg_const_i64(s
->next_pc
);
2056 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2057 tcg_temp_free_i64(tmp
);
2063 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2065 /* We'll use the original input for cc computation, since we get to
2066 compare that against 0, which ought to be better than comparing
2067 the real output against 64. It also lets cc_dst be a convenient
2068 temporary during our computation. */
2069 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2071 /* R1 = IN ? CLZ(IN) : 64. */
2072 gen_helper_clz(o
->out
, o
->in2
);
2074 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2075 value by 64, which is undefined. But since the shift is 64 iff the
2076 input is zero, we still get the correct result after and'ing. */
2077 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2078 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2079 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2083 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2085 int m3
= get_field(s
->fields
, m3
);
2086 int pos
, len
, base
= s
->insn
->data
;
2087 TCGv_i64 tmp
= tcg_temp_new_i64();
2092 /* Effectively a 32-bit load. */
2093 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2100 /* Effectively a 16-bit load. */
2101 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2109 /* Effectively an 8-bit load. */
2110 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2115 pos
= base
+ ctz32(m3
) * 8;
2116 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2117 ccm
= ((1ull << len
) - 1) << pos
;
2121 /* This is going to be a sequence of loads and inserts. */
2122 pos
= base
+ 32 - 8;
2126 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2127 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2128 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2131 m3
= (m3
<< 1) & 0xf;
2137 tcg_gen_movi_i64(tmp
, ccm
);
2138 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2139 tcg_temp_free_i64(tmp
);
2143 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2145 int shift
= s
->insn
->data
& 0xff;
2146 int size
= s
->insn
->data
>> 8;
2147 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2151 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2156 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2158 t1
= tcg_temp_new_i64();
2159 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2160 tcg_gen_shri_i64(t1
, t1
, 36);
2161 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2163 tcg_gen_extu_i32_i64(t1
, cc_op
);
2164 tcg_gen_shli_i64(t1
, t1
, 28);
2165 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2166 tcg_temp_free_i64(t1
);
2170 #ifndef CONFIG_USER_ONLY
2171 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2173 check_privileged(s
);
2174 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2178 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2180 check_privileged(s
);
2181 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2186 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2188 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2192 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2194 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2198 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2200 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2204 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2206 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2210 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2212 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2213 return_low128(o
->out2
);
2217 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2219 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2220 return_low128(o
->out2
);
2224 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2226 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2230 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2232 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2236 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2238 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2242 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2244 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2248 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2250 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2254 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2256 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2260 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2262 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2266 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2268 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2272 #ifndef CONFIG_USER_ONLY
2273 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2275 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2276 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2277 check_privileged(s
);
2278 potential_page_fault(s
);
2279 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2280 tcg_temp_free_i32(r1
);
2281 tcg_temp_free_i32(r3
);
2285 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2287 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2288 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2289 check_privileged(s
);
2290 potential_page_fault(s
);
2291 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2292 tcg_temp_free_i32(r1
);
2293 tcg_temp_free_i32(r3
);
2296 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2298 check_privileged(s
);
2299 potential_page_fault(s
);
2300 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2305 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2309 check_privileged(s
);
2311 t1
= tcg_temp_new_i64();
2312 t2
= tcg_temp_new_i64();
2313 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2314 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2315 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2316 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2317 tcg_gen_shli_i64(t1
, t1
, 32);
2318 gen_helper_load_psw(cpu_env
, t1
, t2
);
2319 tcg_temp_free_i64(t1
);
2320 tcg_temp_free_i64(t2
);
2321 return EXIT_NORETURN
;
2325 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2327 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2328 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2329 potential_page_fault(s
);
2330 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2331 tcg_temp_free_i32(r1
);
2332 tcg_temp_free_i32(r3
);
2336 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2338 int r1
= get_field(s
->fields
, r1
);
2339 int r3
= get_field(s
->fields
, r3
);
2340 TCGv_i64 t
= tcg_temp_new_i64();
2341 TCGv_i64 t4
= tcg_const_i64(4);
2344 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2345 store_reg32_i64(r1
, t
);
2349 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2353 tcg_temp_free_i64(t
);
2354 tcg_temp_free_i64(t4
);
2358 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2360 int r1
= get_field(s
->fields
, r1
);
2361 int r3
= get_field(s
->fields
, r3
);
2362 TCGv_i64 t
= tcg_temp_new_i64();
2363 TCGv_i64 t4
= tcg_const_i64(4);
2366 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2367 store_reg32h_i64(r1
, t
);
2371 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2375 tcg_temp_free_i64(t
);
2376 tcg_temp_free_i64(t4
);
2380 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2382 int r1
= get_field(s
->fields
, r1
);
2383 int r3
= get_field(s
->fields
, r3
);
2384 TCGv_i64 t8
= tcg_const_i64(8);
2387 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2391 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2395 tcg_temp_free_i64(t8
);
2399 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2402 o
->g_out
= o
->g_in2
;
2403 TCGV_UNUSED_I64(o
->in2
);
2408 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2412 o
->g_out
= o
->g_in1
;
2413 o
->g_out2
= o
->g_in2
;
2414 TCGV_UNUSED_I64(o
->in1
);
2415 TCGV_UNUSED_I64(o
->in2
);
2416 o
->g_in1
= o
->g_in2
= false;
2420 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2422 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2423 potential_page_fault(s
);
2424 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2425 tcg_temp_free_i32(l
);
2429 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2431 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2432 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2433 potential_page_fault(s
);
2434 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2435 tcg_temp_free_i32(r1
);
2436 tcg_temp_free_i32(r2
);
2441 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2443 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2444 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2445 potential_page_fault(s
);
2446 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2447 tcg_temp_free_i32(r1
);
2448 tcg_temp_free_i32(r3
);
2453 #ifndef CONFIG_USER_ONLY
2454 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2456 int r1
= get_field(s
->fields
, l1
);
2457 check_privileged(s
);
2458 potential_page_fault(s
);
2459 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2464 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2466 int r1
= get_field(s
->fields
, l1
);
2467 check_privileged(s
);
2468 potential_page_fault(s
);
2469 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2475 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2477 potential_page_fault(s
);
2478 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2483 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2485 potential_page_fault(s
);
2486 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2488 return_low128(o
->in2
);
2492 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2494 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2498 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2500 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2501 return_low128(o
->out2
);
2505 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2507 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2511 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2513 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2517 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2519 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2523 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2525 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2526 return_low128(o
->out2
);
2530 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2532 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2533 return_low128(o
->out2
);
2537 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2539 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2540 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2541 tcg_temp_free_i64(r3
);
2545 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2547 int r3
= get_field(s
->fields
, r3
);
2548 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2552 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2554 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2555 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2556 tcg_temp_free_i64(r3
);
2560 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2562 int r3
= get_field(s
->fields
, r3
);
2563 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2567 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2569 gen_helper_nabs_i64(o
->out
, o
->in2
);
2573 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2575 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2579 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2581 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2585 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2587 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2588 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2592 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2594 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2595 potential_page_fault(s
);
2596 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2597 tcg_temp_free_i32(l
);
2602 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2604 tcg_gen_neg_i64(o
->out
, o
->in2
);
2608 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2610 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2614 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2616 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2620 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2622 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2623 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2627 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2629 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2630 potential_page_fault(s
);
2631 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2632 tcg_temp_free_i32(l
);
2637 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2639 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2643 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2645 int shift
= s
->insn
->data
& 0xff;
2646 int size
= s
->insn
->data
>> 8;
2647 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2650 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2651 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2653 /* Produce the CC from only the bits manipulated. */
2654 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2655 set_cc_nz_u64(s
, cc_dst
);
2659 #ifndef CONFIG_USER_ONLY
2660 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2662 check_privileged(s
);
2663 gen_helper_ptlb(cpu_env
);
2668 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2670 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2674 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2676 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2680 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2682 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2686 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2688 TCGv_i32 t1
= tcg_temp_new_i32();
2689 TCGv_i32 t2
= tcg_temp_new_i32();
2690 TCGv_i32 to
= tcg_temp_new_i32();
2691 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2692 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2693 tcg_gen_rotl_i32(to
, t1
, t2
);
2694 tcg_gen_extu_i32_i64(o
->out
, to
);
2695 tcg_temp_free_i32(t1
);
2696 tcg_temp_free_i32(t2
);
2697 tcg_temp_free_i32(to
);
2701 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2703 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2707 #ifndef CONFIG_USER_ONLY
2708 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2710 check_privileged(s
);
2711 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2717 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2719 int r1
= get_field(s
->fields
, r1
);
2720 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2724 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2726 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2730 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2732 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2736 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2738 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2739 return_low128(o
->out2
);
2743 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2745 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2749 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2751 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2755 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2757 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2758 return_low128(o
->out2
);
2762 #ifndef CONFIG_USER_ONLY
2763 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2765 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2766 check_privileged(s
);
2767 potential_page_fault(s
);
2768 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2769 tcg_temp_free_i32(r1
);
2774 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2776 uint64_t sign
= 1ull << s
->insn
->data
;
2777 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2778 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2779 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2780 /* The arithmetic left shift is curious in that it does not affect
2781 the sign bit. Copy that over from the source unchanged. */
2782 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2783 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2784 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2788 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2790 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2794 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2796 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2800 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2802 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2806 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2808 gen_helper_sfpc(cpu_env
, o
->in2
);
2812 #ifndef CONFIG_USER_ONLY
2813 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2815 check_privileged(s
);
2816 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2817 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2821 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
2823 check_privileged(s
);
2824 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
2828 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2830 check_privileged(s
);
2831 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2835 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
2837 check_privileged(s
);
2838 /* ??? Surely cpu address != cpu number. In any case the previous
2839 version of this stored more than the required half-word, so it
2840 is unlikely this has ever been tested. */
2841 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2845 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2847 gen_helper_stck(o
->out
, cpu_env
);
2848 /* ??? We don't implement clock states. */
2849 gen_op_movi_cc(s
, 0);
2853 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2855 check_privileged(s
);
2856 gen_helper_sckc(cpu_env
, o
->in2
);
2860 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2862 check_privileged(s
);
2863 gen_helper_stckc(o
->out
, cpu_env
);
2867 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2869 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2870 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2871 check_privileged(s
);
2872 potential_page_fault(s
);
2873 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2874 tcg_temp_free_i32(r1
);
2875 tcg_temp_free_i32(r3
);
2879 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2881 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2882 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2883 check_privileged(s
);
2884 potential_page_fault(s
);
2885 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2886 tcg_temp_free_i32(r1
);
2887 tcg_temp_free_i32(r3
);
2891 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2893 check_privileged(s
);
2894 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2898 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2900 check_privileged(s
);
2901 gen_helper_spt(cpu_env
, o
->in2
);
2905 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2907 check_privileged(s
);
2908 gen_helper_stpt(o
->out
, cpu_env
);
2912 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
2914 check_privileged(s
);
2915 gen_helper_spx(cpu_env
, o
->in2
);
2919 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
2921 check_privileged(s
);
2922 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
2923 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
2927 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2929 uint64_t i2
= get_field(s
->fields
, i2
);
2932 check_privileged(s
);
2934 /* It is important to do what the instruction name says: STORE THEN.
2935 If we let the output hook perform the store then if we fault and
2936 restart, we'll have the wrong SYSTEM MASK in place. */
2937 t
= tcg_temp_new_i64();
2938 tcg_gen_shri_i64(t
, psw_mask
, 56);
2939 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2940 tcg_temp_free_i64(t
);
2942 if (s
->fields
->op
== 0xac) {
2943 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2944 (i2
<< 56) | 0x00ffffffffffffffull
);
2946 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2952 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2954 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2958 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2960 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2964 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2966 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2970 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2972 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2976 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2978 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2979 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2980 potential_page_fault(s
);
2981 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
2982 tcg_temp_free_i32(r1
);
2983 tcg_temp_free_i32(r3
);
2987 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
2989 int m3
= get_field(s
->fields
, m3
);
2990 int pos
, base
= s
->insn
->data
;
2991 TCGv_i64 tmp
= tcg_temp_new_i64();
2993 pos
= base
+ ctz32(m3
) * 8;
2996 /* Effectively a 32-bit store. */
2997 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2998 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3004 /* Effectively a 16-bit store. */
3005 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3006 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3013 /* Effectively an 8-bit store. */
3014 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3015 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3019 /* This is going to be a sequence of shifts and stores. */
3020 pos
= base
+ 32 - 8;
3023 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3024 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3025 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3027 m3
= (m3
<< 1) & 0xf;
3032 tcg_temp_free_i64(tmp
);
3036 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3038 int r1
= get_field(s
->fields
, r1
);
3039 int r3
= get_field(s
->fields
, r3
);
3040 int size
= s
->insn
->data
;
3041 TCGv_i64 tsize
= tcg_const_i64(size
);
3045 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3047 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3052 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3056 tcg_temp_free_i64(tsize
);
3060 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3062 int r1
= get_field(s
->fields
, r1
);
3063 int r3
= get_field(s
->fields
, r3
);
3064 TCGv_i64 t
= tcg_temp_new_i64();
3065 TCGv_i64 t4
= tcg_const_i64(4);
3066 TCGv_i64 t32
= tcg_const_i64(32);
3069 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3070 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3074 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3078 tcg_temp_free_i64(t
);
3079 tcg_temp_free_i64(t4
);
3080 tcg_temp_free_i64(t32
);
3084 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3086 potential_page_fault(s
);
3087 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3089 return_low128(o
->in2
);
3093 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3095 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3099 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3104 tcg_gen_not_i64(o
->in2
, o
->in2
);
3105 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
3107 /* XXX possible optimization point */
3109 cc
= tcg_temp_new_i64();
3110 tcg_gen_extu_i32_i64(cc
, cc_op
);
3111 tcg_gen_shri_i64(cc
, cc
, 1);
3112 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
3113 tcg_temp_free_i64(cc
);
3117 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3124 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3125 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3126 tcg_temp_free_i32(t
);
3128 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3129 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3130 tcg_temp_free_i32(t
);
3132 gen_exception(EXCP_SVC
);
3133 return EXIT_NORETURN
;
3136 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3138 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3143 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3145 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3150 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3152 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3157 #ifndef CONFIG_USER_ONLY
3158 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3160 potential_page_fault(s
);
3161 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3167 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3169 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3170 potential_page_fault(s
);
3171 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3172 tcg_temp_free_i32(l
);
3177 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3179 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3180 potential_page_fault(s
);
3181 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3182 tcg_temp_free_i32(l
);
3186 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3188 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3189 potential_page_fault(s
);
3190 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3191 tcg_temp_free_i32(l
);
3196 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3198 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3202 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3204 int shift
= s
->insn
->data
& 0xff;
3205 int size
= s
->insn
->data
>> 8;
3206 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3209 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3210 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3212 /* Produce the CC from only the bits manipulated. */
3213 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3214 set_cc_nz_u64(s
, cc_dst
);
3218 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3220 o
->out
= tcg_const_i64(0);
3224 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3226 o
->out
= tcg_const_i64(0);
3232 /* ====================================================================== */
3233 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3234 the original inputs), update the various cc data structures in order to
3235 be able to compute the new condition code. */
3237 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3239 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3242 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3244 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3247 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3249 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3252 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3254 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3257 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3259 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3262 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3264 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3267 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3269 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3272 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3274 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3277 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3279 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3282 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3284 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3287 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3289 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3292 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3294 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3297 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3299 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3302 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3304 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3307 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3309 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3312 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3314 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3317 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3319 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3322 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3324 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3327 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3329 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3332 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3334 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3335 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3338 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3340 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3343 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3345 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3348 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3350 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3353 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3355 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3358 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3360 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3363 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3365 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3368 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3370 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3373 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3375 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3378 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3380 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3383 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3385 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3388 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3390 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3393 /* ====================================================================== */
3394 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3395 with the TCG register to which we will write. Used in combination with
3396 the "wout" generators, in some cases we need a new temporary, and in
3397 some cases we can write to a TCG global. */
3399 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3401 o
->out
= tcg_temp_new_i64();
3404 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3406 o
->out
= tcg_temp_new_i64();
3407 o
->out2
= tcg_temp_new_i64();
3410 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3412 o
->out
= regs
[get_field(f
, r1
)];
3416 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3418 /* ??? Specification exception: r1 must be even. */
3419 int r1
= get_field(f
, r1
);
3421 o
->out2
= regs
[(r1
+ 1) & 15];
3422 o
->g_out
= o
->g_out2
= true;
3425 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3427 o
->out
= fregs
[get_field(f
, r1
)];
3431 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3433 /* ??? Specification exception: r1 must be < 14. */
3434 int r1
= get_field(f
, r1
);
3436 o
->out2
= fregs
[(r1
+ 2) & 15];
3437 o
->g_out
= o
->g_out2
= true;
3440 /* ====================================================================== */
3441 /* The "Write OUTput" generators. These generally perform some non-trivial
3442 copy of data to TCG globals, or to main memory. The trivial cases are
3443 generally handled by having a "prep" generator install the TCG global
3444 as the destination of the operation. */
3446 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3448 store_reg(get_field(f
, r1
), o
->out
);
3451 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3453 int r1
= get_field(f
, r1
);
3454 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3457 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3459 int r1
= get_field(f
, r1
);
3460 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3463 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3465 store_reg32_i64(get_field(f
, r1
), o
->out
);
3468 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3470 /* ??? Specification exception: r1 must be even. */
3471 int r1
= get_field(f
, r1
);
3472 store_reg32_i64(r1
, o
->out
);
3473 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3476 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3478 /* ??? Specification exception: r1 must be even. */
3479 int r1
= get_field(f
, r1
);
3480 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3481 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3482 store_reg32_i64(r1
, o
->out
);
3485 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3487 store_freg32_i64(get_field(f
, r1
), o
->out
);
3490 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3492 store_freg(get_field(f
, r1
), o
->out
);
3495 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3497 /* ??? Specification exception: r1 must be < 14. */
3498 int f1
= get_field(s
->fields
, r1
);
3499 store_freg(f1
, o
->out
);
3500 store_freg((f1
+ 2) & 15, o
->out2
);
3503 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3505 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3506 store_reg32_i64(get_field(f
, r1
), o
->out
);
3510 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3512 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3513 store_freg32_i64(get_field(f
, r1
), o
->out
);
3517 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3519 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3522 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3524 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3527 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3529 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3532 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3534 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3537 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3539 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3542 /* ====================================================================== */
3543 /* The "INput 1" generators. These load the first operand to an insn. */
3545 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3547 o
->in1
= load_reg(get_field(f
, r1
));
3550 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3552 o
->in1
= regs
[get_field(f
, r1
)];
3556 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3558 o
->in1
= tcg_temp_new_i64();
3559 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3562 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3564 o
->in1
= tcg_temp_new_i64();
3565 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3568 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3570 o
->in1
= tcg_temp_new_i64();
3571 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3574 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3576 /* ??? Specification exception: r1 must be even. */
3577 int r1
= get_field(f
, r1
);
3578 o
->in1
= load_reg((r1
+ 1) & 15);
3581 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3583 /* ??? Specification exception: r1 must be even. */
3584 int r1
= get_field(f
, r1
);
3585 o
->in1
= tcg_temp_new_i64();
3586 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3589 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3591 /* ??? Specification exception: r1 must be even. */
3592 int r1
= get_field(f
, r1
);
3593 o
->in1
= tcg_temp_new_i64();
3594 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3597 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3599 /* ??? Specification exception: r1 must be even. */
3600 int r1
= get_field(f
, r1
);
3601 o
->in1
= tcg_temp_new_i64();
3602 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3605 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3607 o
->in1
= load_reg(get_field(f
, r2
));
3610 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3612 o
->in1
= load_reg(get_field(f
, r3
));
3615 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3617 o
->in1
= regs
[get_field(f
, r3
)];
3621 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3623 o
->in1
= tcg_temp_new_i64();
3624 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3627 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3629 o
->in1
= tcg_temp_new_i64();
3630 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3633 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3635 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3638 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3640 o
->in1
= fregs
[get_field(f
, r1
)];
3644 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3646 /* ??? Specification exception: r1 must be < 14. */
3647 int r1
= get_field(f
, r1
);
3649 o
->out2
= fregs
[(r1
+ 2) & 15];
3650 o
->g_out
= o
->g_out2
= true;
3653 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3655 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3658 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3660 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3661 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3664 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3667 o
->in1
= tcg_temp_new_i64();
3668 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3671 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3674 o
->in1
= tcg_temp_new_i64();
3675 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3678 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3681 o
->in1
= tcg_temp_new_i64();
3682 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3685 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3688 o
->in1
= tcg_temp_new_i64();
3689 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3692 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3695 o
->in1
= tcg_temp_new_i64();
3696 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3699 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3702 o
->in1
= tcg_temp_new_i64();
3703 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3706 /* ====================================================================== */
3707 /* The "INput 2" generators. These load the second operand to an insn. */
3709 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3711 o
->in2
= regs
[get_field(f
, r1
)];
3715 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3717 o
->in2
= tcg_temp_new_i64();
3718 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3721 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3723 o
->in2
= tcg_temp_new_i64();
3724 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3727 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3729 o
->in2
= load_reg(get_field(f
, r2
));
3732 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3734 o
->in2
= regs
[get_field(f
, r2
)];
3738 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3740 int r2
= get_field(f
, r2
);
3742 o
->in2
= load_reg(r2
);
3746 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3748 o
->in2
= tcg_temp_new_i64();
3749 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3752 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3754 o
->in2
= tcg_temp_new_i64();
3755 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3758 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3760 o
->in2
= tcg_temp_new_i64();
3761 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3764 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3766 o
->in2
= tcg_temp_new_i64();
3767 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3770 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3772 o
->in2
= load_reg(get_field(f
, r3
));
3775 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3777 o
->in2
= tcg_temp_new_i64();
3778 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3781 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3783 o
->in2
= tcg_temp_new_i64();
3784 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3787 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3789 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3792 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3794 o
->in2
= fregs
[get_field(f
, r2
)];
3798 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3800 /* ??? Specification exception: r1 must be < 14. */
3801 int r2
= get_field(f
, r2
);
3803 o
->in2
= fregs
[(r2
+ 2) & 15];
3804 o
->g_in1
= o
->g_in2
= true;
3807 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3809 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3812 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3814 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3815 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3818 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3820 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3823 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3825 help_l2_shift(s
, f
, o
, 31);
3828 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3830 help_l2_shift(s
, f
, o
, 63);
3833 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3836 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3839 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3842 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3845 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3848 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3851 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3857 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3860 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3863 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3866 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3869 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3872 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3875 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3878 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3881 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3884 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3887 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3890 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3893 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3895 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3898 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3900 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3903 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3905 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3908 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3910 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3913 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3915 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3916 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3919 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3921 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3922 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3925 /* ====================================================================== */
3927 /* Find opc within the table of insns. This is formulated as a switch
3928 statement so that (1) we get compile-time notice of cut-paste errors
3929 for duplicated opcodes, and (2) the compiler generates the binary
3930 search tree, rather than us having to post-process the table. */
3932 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3933 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3935 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3937 enum DisasInsnEnum
{
3938 #include "insn-data.def"
3942 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3947 .help_in1 = in1_##I1, \
3948 .help_in2 = in2_##I2, \
3949 .help_prep = prep_##P, \
3950 .help_wout = wout_##W, \
3951 .help_cout = cout_##CC, \
3952 .help_op = op_##OP, \
3956 /* Allow 0 to be used for NULL in the table below. */
3964 static const DisasInsn insn_info
[] = {
3965 #include "insn-data.def"
3969 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3970 case OPC: return &insn_info[insn_ ## NM];
3972 static const DisasInsn
*lookup_opc(uint16_t opc
)
3975 #include "insn-data.def"
3984 /* Extract a field from the insn. The INSN should be left-aligned in
3985 the uint64_t so that we can more easily utilize the big-bit-endian
3986 definitions we extract from the Principals of Operation. */
3988 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
3996 /* Zero extract the field from the insn. */
3997 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
3999 /* Sign-extend, or un-swap the field as necessary. */
4001 case 0: /* unsigned */
4003 case 1: /* signed */
4004 assert(f
->size
<= 32);
4005 m
= 1u << (f
->size
- 1);
4008 case 2: /* dl+dh split, signed 20 bit. */
4009 r
= ((int8_t)r
<< 12) | (r
>> 8);
4015 /* Validate that the "compressed" encoding we selected above is valid.
4016 I.e. we havn't make two different original fields overlap. */
4017 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4018 o
->presentC
|= 1 << f
->indexC
;
4019 o
->presentO
|= 1 << f
->indexO
;
4021 o
->c
[f
->indexC
] = r
;
4024 /* Lookup the insn at the current PC, extracting the operands into O and
4025 returning the info struct for the insn. Returns NULL for invalid insn. */
4027 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4030 uint64_t insn
, pc
= s
->pc
;
4032 const DisasInsn
*info
;
4034 insn
= ld_code2(env
, pc
);
4035 op
= (insn
>> 8) & 0xff;
4036 ilen
= get_ilen(op
);
4037 s
->next_pc
= s
->pc
+ ilen
;
4044 insn
= ld_code4(env
, pc
) << 32;
4047 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4053 /* We can't actually determine the insn format until we've looked up
4054 the full insn opcode. Which we can't do without locating the
4055 secondary opcode. Assume by default that OP2 is at bit 40; for
4056 those smaller insns that don't actually have a secondary opcode
4057 this will correctly result in OP2 = 0. */
4063 case 0xb2: /* S, RRF, RRE */
4064 case 0xb3: /* RRE, RRD, RRF */
4065 case 0xb9: /* RRE, RRF */
4066 case 0xe5: /* SSE, SIL */
4067 op2
= (insn
<< 8) >> 56;
4071 case 0xc0: /* RIL */
4072 case 0xc2: /* RIL */
4073 case 0xc4: /* RIL */
4074 case 0xc6: /* RIL */
4075 case 0xc8: /* SSF */
4076 case 0xcc: /* RIL */
4077 op2
= (insn
<< 12) >> 60;
4079 case 0xd0 ... 0xdf: /* SS */
4085 case 0xee ... 0xf3: /* SS */
4086 case 0xf8 ... 0xfd: /* SS */
4090 op2
= (insn
<< 40) >> 56;
4094 memset(f
, 0, sizeof(*f
));
4098 /* Lookup the instruction. */
4099 info
= lookup_opc(op
<< 8 | op2
);
4101 /* If we found it, extract the operands. */
4103 DisasFormat fmt
= info
->fmt
;
4106 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4107 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4113 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4115 const DisasInsn
*insn
;
4116 ExitStatus ret
= NO_EXIT
;
4120 insn
= extract_insn(env
, s
, &f
);
4122 /* If not found, try the old interpreter. This includes ILLOPC. */
4124 disas_s390_insn(env
, s
);
4125 switch (s
->is_jmp
) {
4133 ret
= EXIT_PC_UPDATED
;
4136 ret
= EXIT_NORETURN
;
4146 /* Set up the strutures we use to communicate with the helpers. */
4149 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4150 TCGV_UNUSED_I64(o
.out
);
4151 TCGV_UNUSED_I64(o
.out2
);
4152 TCGV_UNUSED_I64(o
.in1
);
4153 TCGV_UNUSED_I64(o
.in2
);
4154 TCGV_UNUSED_I64(o
.addr1
);
4156 /* Implement the instruction. */
4157 if (insn
->help_in1
) {
4158 insn
->help_in1(s
, &f
, &o
);
4160 if (insn
->help_in2
) {
4161 insn
->help_in2(s
, &f
, &o
);
4163 if (insn
->help_prep
) {
4164 insn
->help_prep(s
, &f
, &o
);
4166 if (insn
->help_op
) {
4167 ret
= insn
->help_op(s
, &o
);
4169 if (insn
->help_wout
) {
4170 insn
->help_wout(s
, &f
, &o
);
4172 if (insn
->help_cout
) {
4173 insn
->help_cout(s
, &o
);
4176 /* Free any temporaries created by the helpers. */
4177 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4178 tcg_temp_free_i64(o
.out
);
4180 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4181 tcg_temp_free_i64(o
.out2
);
4183 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4184 tcg_temp_free_i64(o
.in1
);
4186 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4187 tcg_temp_free_i64(o
.in2
);
4189 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4190 tcg_temp_free_i64(o
.addr1
);
4193 /* Advance to the next instruction. */
4198 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4199 TranslationBlock
*tb
,
4203 target_ulong pc_start
;
4204 uint64_t next_page_start
;
4205 uint16_t *gen_opc_end
;
4207 int num_insns
, max_insns
;
4215 if (!(tb
->flags
& FLAG_MASK_64
)) {
4216 pc_start
&= 0x7fffffff;
4221 dc
.cc_op
= CC_OP_DYNAMIC
;
4222 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4223 dc
.is_jmp
= DISAS_NEXT
;
4225 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4227 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4230 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4231 if (max_insns
== 0) {
4232 max_insns
= CF_COUNT_MASK
;
4239 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4243 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4246 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4247 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4248 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4249 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4251 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4255 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4256 tcg_gen_debug_insn_start(dc
.pc
);
4260 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4261 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4262 if (bp
->pc
== dc
.pc
) {
4263 status
= EXIT_PC_STALE
;
4269 if (status
== NO_EXIT
) {
4270 status
= translate_one(env
, &dc
);
4273 /* If we reach a page boundary, are single stepping,
4274 or exhaust instruction count, stop generation. */
4275 if (status
== NO_EXIT
4276 && (dc
.pc
>= next_page_start
4277 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4278 || num_insns
>= max_insns
4280 || env
->singlestep_enabled
)) {
4281 status
= EXIT_PC_STALE
;
4283 } while (status
== NO_EXIT
);
4285 if (tb
->cflags
& CF_LAST_IO
) {
4294 update_psw_addr(&dc
);
4296 case EXIT_PC_UPDATED
:
4297 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4298 gen_op_calc_cc(&dc
);
4300 /* Next TB starts off with CC_OP_DYNAMIC,
4301 so make sure the cc op type is in env */
4302 gen_op_set_cc_op(&dc
);
4305 gen_exception(EXCP_DEBUG
);
4307 /* Generate the return instruction */
4315 gen_icount_end(tb
, num_insns
);
4316 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4318 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4321 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4324 tb
->size
= dc
.pc
- pc_start
;
4325 tb
->icount
= num_insns
;
4328 #if defined(S390X_DEBUG_DISAS)
4329 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4330 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4331 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4337 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4339 gen_intermediate_code_internal(env
, tb
, 0);
4342 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4344 gen_intermediate_code_internal(env
, tb
, 1);
4347 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4350 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4351 cc_op
= gen_opc_cc_op
[pc_pos
];
4352 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {