4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a
, b
; } s64
;
69 struct { TCGv_i32 a
, b
; } s32
;
75 static void gen_op_calc_cc(DisasContext
*s
);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit
[CC_OP_MAX
];
79 static uint64_t inline_branch_miss
[CC_OP_MAX
];
82 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
84 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
85 if (s
->tb
->flags
& FLAG_MASK_32
) {
86 return pc
| 0x80000000;
92 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
98 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
99 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
101 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
102 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
105 for (i
= 0; i
< 16; i
++) {
106 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
108 cpu_fprintf(f
, "\n");
114 for (i
= 0; i
< 16; i
++) {
115 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
117 cpu_fprintf(f
, "\n");
123 #ifndef CONFIG_USER_ONLY
124 for (i
= 0; i
< 16; i
++) {
125 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
127 cpu_fprintf(f
, "\n");
134 #ifdef DEBUG_INLINE_BRANCHES
135 for (i
= 0; i
< CC_OP_MAX
; i
++) {
136 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
137 inline_branch_miss
[i
], inline_branch_hit
[i
]);
141 cpu_fprintf(f
, "\n");
144 static TCGv_i64 psw_addr
;
145 static TCGv_i64 psw_mask
;
147 static TCGv_i32 cc_op
;
148 static TCGv_i64 cc_src
;
149 static TCGv_i64 cc_dst
;
150 static TCGv_i64 cc_vr
;
152 static char cpu_reg_names
[32][4];
153 static TCGv_i64 regs
[16];
154 static TCGv_i64 fregs
[16];
156 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
158 void s390x_translate_init(void)
162 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
163 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
164 offsetof(CPUS390XState
, psw
.addr
),
166 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
167 offsetof(CPUS390XState
, psw
.mask
),
170 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
172 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
174 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
176 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
179 for (i
= 0; i
< 16; i
++) {
180 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
181 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
182 offsetof(CPUS390XState
, regs
[i
]),
186 for (i
= 0; i
< 16; i
++) {
187 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
188 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
189 offsetof(CPUS390XState
, fregs
[i
].d
),
190 cpu_reg_names
[i
+ 16]);
193 /* register helpers */
198 static TCGv_i64
load_reg(int reg
)
200 TCGv_i64 r
= tcg_temp_new_i64();
201 tcg_gen_mov_i64(r
, regs
[reg
]);
205 static TCGv_i64
load_freg32_i64(int reg
)
207 TCGv_i64 r
= tcg_temp_new_i64();
208 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
212 static void store_reg(int reg
, TCGv_i64 v
)
214 tcg_gen_mov_i64(regs
[reg
], v
);
217 static void store_freg(int reg
, TCGv_i64 v
)
219 tcg_gen_mov_i64(fregs
[reg
], v
);
222 static void store_reg32_i64(int reg
, TCGv_i64 v
)
224 /* 32 bit register writes keep the upper half */
225 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
228 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
230 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
233 static void store_freg32_i64(int reg
, TCGv_i64 v
)
235 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
238 static void return_low128(TCGv_i64 dest
)
240 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
243 static void update_psw_addr(DisasContext
*s
)
246 tcg_gen_movi_i64(psw_addr
, s
->pc
);
249 static void potential_page_fault(DisasContext
*s
)
251 #ifndef CONFIG_USER_ONLY
257 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
259 return (uint64_t)cpu_lduw_code(env
, pc
);
262 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
264 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
267 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
269 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
272 static int get_mem_index(DisasContext
*s
)
274 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
275 case PSW_ASC_PRIMARY
>> 32:
277 case PSW_ASC_SECONDARY
>> 32:
279 case PSW_ASC_HOME
>> 32:
287 static void gen_exception(int excp
)
289 TCGv_i32 tmp
= tcg_const_i32(excp
);
290 gen_helper_exception(cpu_env
, tmp
);
291 tcg_temp_free_i32(tmp
);
294 static void gen_program_exception(DisasContext
*s
, int code
)
298 /* Remember what pgm exeption this was. */
299 tmp
= tcg_const_i32(code
);
300 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
301 tcg_temp_free_i32(tmp
);
303 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
304 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
305 tcg_temp_free_i32(tmp
);
307 /* Advance past instruction. */
314 /* Trigger exception. */
315 gen_exception(EXCP_PGM
);
318 s
->is_jmp
= DISAS_EXCP
;
321 static inline void gen_illegal_opcode(DisasContext
*s
)
323 gen_program_exception(s
, PGM_SPECIFICATION
);
326 static inline void check_privileged(DisasContext
*s
)
328 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
329 gen_program_exception(s
, PGM_PRIVILEGED
);
333 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
337 /* 31-bitify the immediate part; register contents are dealt with below */
338 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
344 tmp
= tcg_const_i64(d2
);
345 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
350 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
354 tmp
= tcg_const_i64(d2
);
355 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
360 tmp
= tcg_const_i64(d2
);
363 /* 31-bit mode mask if there are values loaded from registers */
364 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
365 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
371 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
373 s
->cc_op
= CC_OP_CONST0
+ val
;
376 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
378 tcg_gen_discard_i64(cc_src
);
379 tcg_gen_mov_i64(cc_dst
, dst
);
380 tcg_gen_discard_i64(cc_vr
);
384 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
387 tcg_gen_mov_i64(cc_src
, src
);
388 tcg_gen_mov_i64(cc_dst
, dst
);
389 tcg_gen_discard_i64(cc_vr
);
393 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
394 TCGv_i64 dst
, TCGv_i64 vr
)
396 tcg_gen_mov_i64(cc_src
, src
);
397 tcg_gen_mov_i64(cc_dst
, dst
);
398 tcg_gen_mov_i64(cc_vr
, vr
);
402 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
404 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
407 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
409 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
412 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
414 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
417 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
419 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
422 /* CC value is in env->cc_op */
423 static void set_cc_static(DisasContext
*s
)
425 tcg_gen_discard_i64(cc_src
);
426 tcg_gen_discard_i64(cc_dst
);
427 tcg_gen_discard_i64(cc_vr
);
428 s
->cc_op
= CC_OP_STATIC
;
431 static void gen_op_set_cc_op(DisasContext
*s
)
433 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
434 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
438 static void gen_update_cc_op(DisasContext
*s
)
443 /* calculates cc into cc_op */
444 static void gen_op_calc_cc(DisasContext
*s
)
446 TCGv_i32 local_cc_op
= tcg_const_i32(s
->cc_op
);
447 TCGv_i64 dummy
= tcg_const_i64(0);
454 /* s->cc_op is the cc value */
455 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
458 /* env->cc_op already is the cc value */
473 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
478 case CC_OP_LTUGTU_32
:
479 case CC_OP_LTUGTU_64
:
486 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
501 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
504 /* unknown operation - assume 3 arguments and cc_op in env */
505 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
511 tcg_temp_free_i32(local_cc_op
);
512 tcg_temp_free_i64(dummy
);
514 /* We now have cc in cc_op as constant */
518 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
520 /* NOTE: we handle the case where the TB spans two pages here */
521 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
522 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
523 && !s
->singlestep_enabled
524 && !(s
->tb
->cflags
& CF_LAST_IO
));
527 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
529 #ifdef DEBUG_INLINE_BRANCHES
530 inline_branch_miss
[cc_op
]++;
534 static void account_inline_branch(DisasContext
*s
, int cc_op
)
536 #ifdef DEBUG_INLINE_BRANCHES
537 inline_branch_hit
[cc_op
]++;
541 /* Table of mask values to comparison codes, given a comparison as input.
542 For a true comparison CC=3 will never be set, but we treat this
543 conservatively for possible use when CC=3 indicates overflow. */
544 static const TCGCond ltgt_cond
[16] = {
545 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
546 TCG_COND_GT
, TCG_COND_NEVER
, /* | | GT | x */
547 TCG_COND_LT
, TCG_COND_NEVER
, /* | LT | | x */
548 TCG_COND_NE
, TCG_COND_NEVER
, /* | LT | GT | x */
549 TCG_COND_EQ
, TCG_COND_NEVER
, /* EQ | | | x */
550 TCG_COND_GE
, TCG_COND_NEVER
, /* EQ | | GT | x */
551 TCG_COND_LE
, TCG_COND_NEVER
, /* EQ | LT | | x */
552 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
555 /* Table of mask values to comparison codes, given a logic op as input.
556 For such, only CC=0 and CC=1 should be possible. */
557 static const TCGCond nz_cond
[16] = {
559 TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
, TCG_COND_NEVER
,
561 TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
, TCG_COND_NE
,
563 TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
, TCG_COND_EQ
,
564 /* EQ | NE | x | x */
565 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
568 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
569 details required to generate a TCG comparison. */
570 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
573 enum cc_op old_cc_op
= s
->cc_op
;
575 if (mask
== 15 || mask
== 0) {
576 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
579 c
->g1
= c
->g2
= true;
584 /* Find the TCG condition for the mask + cc op. */
590 cond
= ltgt_cond
[mask
];
591 if (cond
== TCG_COND_NEVER
) {
594 account_inline_branch(s
, old_cc_op
);
597 case CC_OP_LTUGTU_32
:
598 case CC_OP_LTUGTU_64
:
599 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
600 if (cond
== TCG_COND_NEVER
) {
603 account_inline_branch(s
, old_cc_op
);
607 cond
= nz_cond
[mask
];
608 if (cond
== TCG_COND_NEVER
) {
611 account_inline_branch(s
, old_cc_op
);
626 account_inline_branch(s
, old_cc_op
);
641 account_inline_branch(s
, old_cc_op
);
645 switch (mask
& 0xa) {
646 case 8: /* src == 0 -> no one bit found */
649 case 2: /* src != 0 -> one bit found */
655 account_inline_branch(s
, old_cc_op
);
660 /* Calculate cc value. */
665 /* Jump based on CC. We'll load up the real cond below;
666 the assignment here merely avoids a compiler warning. */
667 account_noninline_branch(s
, old_cc_op
);
668 old_cc_op
= CC_OP_STATIC
;
669 cond
= TCG_COND_NEVER
;
673 /* Load up the arguments of the comparison. */
675 c
->g1
= c
->g2
= false;
679 c
->u
.s32
.a
= tcg_temp_new_i32();
680 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
681 c
->u
.s32
.b
= tcg_const_i32(0);
684 case CC_OP_LTUGTU_32
:
686 c
->u
.s32
.a
= tcg_temp_new_i32();
687 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
688 c
->u
.s32
.b
= tcg_temp_new_i32();
689 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
696 c
->u
.s64
.b
= tcg_const_i64(0);
700 case CC_OP_LTUGTU_64
:
703 c
->g1
= c
->g2
= true;
709 c
->u
.s64
.a
= tcg_temp_new_i64();
710 c
->u
.s64
.b
= tcg_const_i64(0);
711 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
719 case 0x8 | 0x4 | 0x2: /* cc != 3 */
721 c
->u
.s32
.b
= tcg_const_i32(3);
723 case 0x8 | 0x4 | 0x1: /* cc != 2 */
725 c
->u
.s32
.b
= tcg_const_i32(2);
727 case 0x8 | 0x2 | 0x1: /* cc != 1 */
729 c
->u
.s32
.b
= tcg_const_i32(1);
731 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
734 c
->u
.s32
.a
= tcg_temp_new_i32();
735 c
->u
.s32
.b
= tcg_const_i32(0);
736 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
738 case 0x8 | 0x4: /* cc < 2 */
740 c
->u
.s32
.b
= tcg_const_i32(2);
742 case 0x8: /* cc == 0 */
744 c
->u
.s32
.b
= tcg_const_i32(0);
746 case 0x4 | 0x2 | 0x1: /* cc != 0 */
748 c
->u
.s32
.b
= tcg_const_i32(0);
750 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
753 c
->u
.s32
.a
= tcg_temp_new_i32();
754 c
->u
.s32
.b
= tcg_const_i32(0);
755 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
757 case 0x4: /* cc == 1 */
759 c
->u
.s32
.b
= tcg_const_i32(1);
761 case 0x2 | 0x1: /* cc > 1 */
763 c
->u
.s32
.b
= tcg_const_i32(1);
765 case 0x2: /* cc == 2 */
767 c
->u
.s32
.b
= tcg_const_i32(2);
769 case 0x1: /* cc == 3 */
771 c
->u
.s32
.b
= tcg_const_i32(3);
774 /* CC is masked by something else: (8 >> cc) & mask. */
777 c
->u
.s32
.a
= tcg_const_i32(8);
778 c
->u
.s32
.b
= tcg_const_i32(0);
779 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
780 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
791 static void free_compare(DisasCompare
*c
)
795 tcg_temp_free_i64(c
->u
.s64
.a
);
797 tcg_temp_free_i32(c
->u
.s32
.a
);
802 tcg_temp_free_i64(c
->u
.s64
.b
);
804 tcg_temp_free_i32(c
->u
.s32
.b
);
809 /* ====================================================================== */
810 /* Define the insn format enumeration. */
811 #define F0(N) FMT_##N,
812 #define F1(N, X1) F0(N)
813 #define F2(N, X1, X2) F0(N)
814 #define F3(N, X1, X2, X3) F0(N)
815 #define F4(N, X1, X2, X3, X4) F0(N)
816 #define F5(N, X1, X2, X3, X4, X5) F0(N)
819 #include "insn-format.def"
829 /* Define a structure to hold the decoded fields. We'll store each inside
830 an array indexed by an enum. In order to conserve memory, we'll arrange
831 for fields that do not exist at the same time to overlap, thus the "C"
832 for compact. For checking purposes there is an "O" for original index
833 as well that will be applied to availability bitmaps. */
835 enum DisasFieldIndexO
{
858 enum DisasFieldIndexC
{
892 unsigned presentC
:16;
893 unsigned int presentO
;
897 /* This is the way fields are to be accessed out of DisasFields. */
898 #define have_field(S, F) have_field1((S), FLD_O_##F)
899 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
901 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
903 return (f
->presentO
>> c
) & 1;
906 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
907 enum DisasFieldIndexC c
)
909 assert(have_field1(f
, o
));
913 /* Describe the layout of each field in each format. */
914 typedef struct DisasField
{
918 unsigned int indexC
:6;
919 enum DisasFieldIndexO indexO
:8;
922 typedef struct DisasFormatInfo
{
923 DisasField op
[NUM_C_FIELD
];
926 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
927 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
928 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
929 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
930 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
931 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
932 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
933 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
934 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
935 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
936 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
937 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
938 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
939 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
941 #define F0(N) { { } },
942 #define F1(N, X1) { { X1 } },
943 #define F2(N, X1, X2) { { X1, X2 } },
944 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
945 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
946 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
948 static const DisasFormatInfo format_info
[] = {
949 #include "insn-format.def"
967 /* Generally, we'll extract operands into this structures, operate upon
968 them, and store them back. See the "in1", "in2", "prep", "wout" sets
969 of routines below for more details. */
971 bool g_out
, g_out2
, g_in1
, g_in2
;
972 TCGv_i64 out
, out2
, in1
, in2
;
976 /* Return values from translate_one, indicating the state of the TB. */
978 /* Continue the TB. */
980 /* We have emitted one or more goto_tb. No fixup required. */
982 /* We are not using a goto_tb (for whatever reason), but have updated
983 the PC (for whatever reason), so there's no need to do it again on
986 /* We are exiting the TB, but have neither emitted a goto_tb, nor
987 updated the PC for the next instruction to be executed. */
989 /* We are ending the TB with a noreturn function call, e.g. longjmp.
990 No following code will be executed. */
994 typedef enum DisasFacility
{
995 FAC_Z
, /* zarch (default) */
996 FAC_CASS
, /* compare and swap and store */
997 FAC_CASS2
, /* compare and swap and store 2*/
998 FAC_DFP
, /* decimal floating point */
999 FAC_DFPR
, /* decimal floating point rounding */
1000 FAC_DO
, /* distinct operands */
1001 FAC_EE
, /* execute extensions */
1002 FAC_EI
, /* extended immediate */
1003 FAC_FPE
, /* floating point extension */
1004 FAC_FPSSH
, /* floating point support sign handling */
1005 FAC_FPRGR
, /* FPR-GR transfer */
1006 FAC_GIE
, /* general instructions extension */
1007 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1008 FAC_HW
, /* high-word */
1009 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1010 FAC_LOC
, /* load/store on condition */
1011 FAC_LD
, /* long displacement */
1012 FAC_PC
, /* population count */
1013 FAC_SCF
, /* store clock fast */
1014 FAC_SFLE
, /* store facility list extended */
1020 DisasFacility fac
:6;
1024 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1025 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1026 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1027 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1028 void (*help_cout
)(DisasContext
*, DisasOps
*);
1029 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1034 /* ====================================================================== */
1035 /* Miscelaneous helpers, used by several operations. */
1037 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1038 DisasOps
*o
, int mask
)
1040 int b2
= get_field(f
, b2
);
1041 int d2
= get_field(f
, d2
);
1044 o
->in2
= tcg_const_i64(d2
& mask
);
1046 o
->in2
= get_address(s
, 0, b2
, d2
);
1047 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1051 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1053 if (dest
== s
->next_pc
) {
1056 if (use_goto_tb(s
, dest
)) {
1057 gen_update_cc_op(s
);
1059 tcg_gen_movi_i64(psw_addr
, dest
);
1060 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1061 return EXIT_GOTO_TB
;
1063 tcg_gen_movi_i64(psw_addr
, dest
);
1064 return EXIT_PC_UPDATED
;
1068 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1069 bool is_imm
, int imm
, TCGv_i64 cdest
)
1072 uint64_t dest
= s
->pc
+ 2 * imm
;
1075 /* Take care of the special cases first. */
1076 if (c
->cond
== TCG_COND_NEVER
) {
1081 if (dest
== s
->next_pc
) {
1082 /* Branch to next. */
1086 if (c
->cond
== TCG_COND_ALWAYS
) {
1087 ret
= help_goto_direct(s
, dest
);
1091 if (TCGV_IS_UNUSED_I64(cdest
)) {
1092 /* E.g. bcr %r0 -> no branch. */
1096 if (c
->cond
== TCG_COND_ALWAYS
) {
1097 tcg_gen_mov_i64(psw_addr
, cdest
);
1098 ret
= EXIT_PC_UPDATED
;
1103 if (use_goto_tb(s
, s
->next_pc
)) {
1104 if (is_imm
&& use_goto_tb(s
, dest
)) {
1105 /* Both exits can use goto_tb. */
1106 gen_update_cc_op(s
);
1108 lab
= gen_new_label();
1110 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1112 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1115 /* Branch not taken. */
1117 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1118 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1123 tcg_gen_movi_i64(psw_addr
, dest
);
1124 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1128 /* Fallthru can use goto_tb, but taken branch cannot. */
1129 /* Store taken branch destination before the brcond. This
1130 avoids having to allocate a new local temp to hold it.
1131 We'll overwrite this in the not taken case anyway. */
1133 tcg_gen_mov_i64(psw_addr
, cdest
);
1136 lab
= gen_new_label();
1138 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1140 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1143 /* Branch not taken. */
1144 gen_update_cc_op(s
);
1146 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1147 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1151 tcg_gen_movi_i64(psw_addr
, dest
);
1153 ret
= EXIT_PC_UPDATED
;
1156 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1157 Most commonly we're single-stepping or some other condition that
1158 disables all use of goto_tb. Just update the PC and exit. */
1160 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1162 cdest
= tcg_const_i64(dest
);
1166 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1169 TCGv_i32 t0
= tcg_temp_new_i32();
1170 TCGv_i64 t1
= tcg_temp_new_i64();
1171 TCGv_i64 z
= tcg_const_i64(0);
1172 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1173 tcg_gen_extu_i32_i64(t1
, t0
);
1174 tcg_temp_free_i32(t0
);
1175 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1176 tcg_temp_free_i64(t1
);
1177 tcg_temp_free_i64(z
);
1181 tcg_temp_free_i64(cdest
);
1183 tcg_temp_free_i64(next
);
1185 ret
= EXIT_PC_UPDATED
;
1193 /* ====================================================================== */
1194 /* The operations. These perform the bulk of the work for any insn,
1195 usually after the operands have been loaded and output initialized. */
1197 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1199 gen_helper_abs_i64(o
->out
, o
->in2
);
1203 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1205 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1209 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1211 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1215 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1217 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1218 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1222 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1224 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1228 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1232 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1234 /* XXX possible optimization point */
1236 cc
= tcg_temp_new_i64();
1237 tcg_gen_extu_i32_i64(cc
, cc_op
);
1238 tcg_gen_shri_i64(cc
, cc
, 1);
1240 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
1241 tcg_temp_free_i64(cc
);
1245 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1247 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1251 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1253 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1257 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1259 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1260 return_low128(o
->out2
);
1264 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1266 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1270 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1272 int shift
= s
->insn
->data
& 0xff;
1273 int size
= s
->insn
->data
>> 8;
1274 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1277 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1278 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1279 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1281 /* Produce the CC from only the bits manipulated. */
1282 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1283 set_cc_nz_u64(s
, cc_dst
);
1287 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1289 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1290 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1291 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1292 return EXIT_PC_UPDATED
;
1298 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1300 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1301 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1304 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1306 int m1
= get_field(s
->fields
, m1
);
1307 bool is_imm
= have_field(s
->fields
, i2
);
1308 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1311 disas_jcc(s
, &c
, m1
);
1312 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1315 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1317 int r1
= get_field(s
->fields
, r1
);
1318 bool is_imm
= have_field(s
->fields
, i2
);
1319 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1323 c
.cond
= TCG_COND_NE
;
1328 t
= tcg_temp_new_i64();
1329 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1330 store_reg32_i64(r1
, t
);
1331 c
.u
.s32
.a
= tcg_temp_new_i32();
1332 c
.u
.s32
.b
= tcg_const_i32(0);
1333 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1334 tcg_temp_free_i64(t
);
1336 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1339 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1341 int r1
= get_field(s
->fields
, r1
);
1342 bool is_imm
= have_field(s
->fields
, i2
);
1343 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1346 c
.cond
= TCG_COND_NE
;
1351 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1352 c
.u
.s64
.a
= regs
[r1
];
1353 c
.u
.s64
.b
= tcg_const_i64(0);
1355 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1358 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1360 int r1
= get_field(s
->fields
, r1
);
1361 int r3
= get_field(s
->fields
, r3
);
1362 bool is_imm
= have_field(s
->fields
, i2
);
1363 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1367 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1372 t
= tcg_temp_new_i64();
1373 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1374 c
.u
.s32
.a
= tcg_temp_new_i32();
1375 c
.u
.s32
.b
= tcg_temp_new_i32();
1376 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1377 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1378 store_reg32_i64(r1
, t
);
1379 tcg_temp_free_i64(t
);
1381 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1384 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1386 int r1
= get_field(s
->fields
, r1
);
1387 int r3
= get_field(s
->fields
, r3
);
1388 bool is_imm
= have_field(s
->fields
, i2
);
1389 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1392 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1395 if (r1
== (r3
| 1)) {
1396 c
.u
.s64
.b
= load_reg(r3
| 1);
1399 c
.u
.s64
.b
= regs
[r3
| 1];
1403 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1404 c
.u
.s64
.a
= regs
[r1
];
1407 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1410 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1412 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1417 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1419 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1424 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1426 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1431 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1433 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1434 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1435 tcg_temp_free_i32(m3
);
1436 gen_set_cc_nz_f32(s
, o
->in2
);
1440 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1442 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1443 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1444 tcg_temp_free_i32(m3
);
1445 gen_set_cc_nz_f64(s
, o
->in2
);
1449 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1451 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1452 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1453 tcg_temp_free_i32(m3
);
1454 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1458 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1460 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1461 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1462 tcg_temp_free_i32(m3
);
1463 gen_set_cc_nz_f32(s
, o
->in2
);
1467 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1469 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1470 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1471 tcg_temp_free_i32(m3
);
1472 gen_set_cc_nz_f64(s
, o
->in2
);
1476 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1478 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1479 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1480 tcg_temp_free_i32(m3
);
1481 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1485 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1487 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1488 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1489 tcg_temp_free_i32(m3
);
1493 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1495 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1496 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1497 tcg_temp_free_i32(m3
);
1501 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1503 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1504 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1505 tcg_temp_free_i32(m3
);
1506 return_low128(o
->out2
);
1510 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1512 int r2
= get_field(s
->fields
, r2
);
1513 TCGv_i64 len
= tcg_temp_new_i64();
1515 potential_page_fault(s
);
1516 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1518 return_low128(o
->out
);
1520 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1521 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1522 tcg_temp_free_i64(len
);
1527 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1529 int l
= get_field(s
->fields
, l1
);
1534 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1535 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1538 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1539 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1542 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1543 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1546 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1547 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1550 potential_page_fault(s
);
1551 vl
= tcg_const_i32(l
);
1552 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1553 tcg_temp_free_i32(vl
);
1557 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1561 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1563 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1564 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1565 potential_page_fault(s
);
1566 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1567 tcg_temp_free_i32(r1
);
1568 tcg_temp_free_i32(r3
);
1573 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1575 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1576 TCGv_i32 t1
= tcg_temp_new_i32();
1577 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1578 potential_page_fault(s
);
1579 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1581 tcg_temp_free_i32(t1
);
1582 tcg_temp_free_i32(m3
);
1586 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1588 potential_page_fault(s
);
1589 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1591 return_low128(o
->in2
);
1595 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1597 int r3
= get_field(s
->fields
, r3
);
1598 potential_page_fault(s
);
1599 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1604 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1606 int r3
= get_field(s
->fields
, r3
);
1607 potential_page_fault(s
);
1608 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1613 #ifndef CONFIG_USER_ONLY
1614 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1616 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1617 check_privileged(s
);
1618 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1619 tcg_temp_free_i32(r1
);
1625 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1627 int r3
= get_field(s
->fields
, r3
);
1628 TCGv_i64 in3
= tcg_temp_new_i64();
1629 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1630 potential_page_fault(s
);
1631 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1632 tcg_temp_free_i64(in3
);
1637 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1639 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1640 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1641 potential_page_fault(s
);
1642 /* XXX rewrite in tcg */
1643 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1648 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1650 TCGv_i64 t1
= tcg_temp_new_i64();
1651 TCGv_i32 t2
= tcg_temp_new_i32();
1652 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1653 gen_helper_cvd(t1
, t2
);
1654 tcg_temp_free_i32(t2
);
1655 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1656 tcg_temp_free_i64(t1
);
1660 #ifndef CONFIG_USER_ONLY
1661 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1665 check_privileged(s
);
1666 potential_page_fault(s
);
1668 /* We pretend the format is RX_a so that D2 is the field we want. */
1669 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1670 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1671 tcg_temp_free_i32(tmp
);
1676 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1678 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1679 return_low128(o
->out
);
1683 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1685 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1686 return_low128(o
->out
);
1690 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1692 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1693 return_low128(o
->out
);
1697 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
1699 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
1700 return_low128(o
->out
);
1704 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
1706 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1710 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
1712 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1716 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
1718 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1719 return_low128(o
->out2
);
1723 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
1725 int r2
= get_field(s
->fields
, r2
);
1726 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1730 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
1732 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1736 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
1738 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1739 tb->flags, (ab)use the tb->cs_base field as the address of
1740 the template in memory, and grab 8 bits of tb->flags/cflags for
1741 the contents of the register. We would then recognize all this
1742 in gen_intermediate_code_internal, generating code for exactly
1743 one instruction. This new TB then gets executed normally.
1745 On the other hand, this seems to be mostly used for modifying
1746 MVC inside of memcpy, which needs a helper call anyway. So
1747 perhaps this doesn't bear thinking about any further. */
1754 tmp
= tcg_const_i64(s
->next_pc
);
1755 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
1756 tcg_temp_free_i64(tmp
);
1762 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
1764 /* We'll use the original input for cc computation, since we get to
1765 compare that against 0, which ought to be better than comparing
1766 the real output against 64. It also lets cc_dst be a convenient
1767 temporary during our computation. */
1768 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
1770 /* R1 = IN ? CLZ(IN) : 64. */
1771 gen_helper_clz(o
->out
, o
->in2
);
1773 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1774 value by 64, which is undefined. But since the shift is 64 iff the
1775 input is zero, we still get the correct result after and'ing. */
1776 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
1777 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
1778 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
1782 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
1784 int m3
= get_field(s
->fields
, m3
);
1785 int pos
, len
, base
= s
->insn
->data
;
1786 TCGv_i64 tmp
= tcg_temp_new_i64();
1791 /* Effectively a 32-bit load. */
1792 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
1799 /* Effectively a 16-bit load. */
1800 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
1808 /* Effectively an 8-bit load. */
1809 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1814 pos
= base
+ ctz32(m3
) * 8;
1815 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
1816 ccm
= ((1ull << len
) - 1) << pos
;
1820 /* This is going to be a sequence of loads and inserts. */
1821 pos
= base
+ 32 - 8;
1825 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
1826 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1827 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
1830 m3
= (m3
<< 1) & 0xf;
1836 tcg_gen_movi_i64(tmp
, ccm
);
1837 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
1838 tcg_temp_free_i64(tmp
);
1842 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
1844 int shift
= s
->insn
->data
& 0xff;
1845 int size
= s
->insn
->data
>> 8;
1846 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
1850 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
1855 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
1857 t1
= tcg_temp_new_i64();
1858 tcg_gen_shli_i64(t1
, psw_mask
, 20);
1859 tcg_gen_shri_i64(t1
, t1
, 36);
1860 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1862 tcg_gen_extu_i32_i64(t1
, cc_op
);
1863 tcg_gen_shli_i64(t1
, t1
, 28);
1864 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
1865 tcg_temp_free_i64(t1
);
1869 #ifndef CONFIG_USER_ONLY
1870 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
1872 check_privileged(s
);
1873 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
1877 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
1879 check_privileged(s
);
1880 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
1885 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
1887 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
1891 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
1893 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
1897 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
1899 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1903 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
1905 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1909 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
1911 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
1912 return_low128(o
->out2
);
1916 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
1918 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
1919 return_low128(o
->out2
);
1923 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
1925 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
1929 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
1931 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
1935 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
1937 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
1941 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
1943 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
1947 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
1949 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
1953 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
1955 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
1959 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
1961 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
1965 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
1967 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
1971 #ifndef CONFIG_USER_ONLY
1972 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
1974 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1975 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1976 check_privileged(s
);
1977 potential_page_fault(s
);
1978 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
1979 tcg_temp_free_i32(r1
);
1980 tcg_temp_free_i32(r3
);
1984 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
1986 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1987 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1988 check_privileged(s
);
1989 potential_page_fault(s
);
1990 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
1991 tcg_temp_free_i32(r1
);
1992 tcg_temp_free_i32(r3
);
1995 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
1997 check_privileged(s
);
1998 potential_page_fault(s
);
1999 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2004 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2008 check_privileged(s
);
2010 t1
= tcg_temp_new_i64();
2011 t2
= tcg_temp_new_i64();
2012 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2013 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2014 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2015 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2016 tcg_gen_shli_i64(t1
, t1
, 32);
2017 gen_helper_load_psw(cpu_env
, t1
, t2
);
2018 tcg_temp_free_i64(t1
);
2019 tcg_temp_free_i64(t2
);
2020 return EXIT_NORETURN
;
2023 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2027 check_privileged(s
);
2029 t1
= tcg_temp_new_i64();
2030 t2
= tcg_temp_new_i64();
2031 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2032 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2033 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2034 gen_helper_load_psw(cpu_env
, t1
, t2
);
2035 tcg_temp_free_i64(t1
);
2036 tcg_temp_free_i64(t2
);
2037 return EXIT_NORETURN
;
2041 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2043 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2044 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2045 potential_page_fault(s
);
2046 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2047 tcg_temp_free_i32(r1
);
2048 tcg_temp_free_i32(r3
);
2052 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2054 int r1
= get_field(s
->fields
, r1
);
2055 int r3
= get_field(s
->fields
, r3
);
2056 TCGv_i64 t
= tcg_temp_new_i64();
2057 TCGv_i64 t4
= tcg_const_i64(4);
2060 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2061 store_reg32_i64(r1
, t
);
2065 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2069 tcg_temp_free_i64(t
);
2070 tcg_temp_free_i64(t4
);
2074 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2076 int r1
= get_field(s
->fields
, r1
);
2077 int r3
= get_field(s
->fields
, r3
);
2078 TCGv_i64 t
= tcg_temp_new_i64();
2079 TCGv_i64 t4
= tcg_const_i64(4);
2082 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2083 store_reg32h_i64(r1
, t
);
2087 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2091 tcg_temp_free_i64(t
);
2092 tcg_temp_free_i64(t4
);
2096 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2098 int r1
= get_field(s
->fields
, r1
);
2099 int r3
= get_field(s
->fields
, r3
);
2100 TCGv_i64 t8
= tcg_const_i64(8);
2103 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2107 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2111 tcg_temp_free_i64(t8
);
2115 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2118 o
->g_out
= o
->g_in2
;
2119 TCGV_UNUSED_I64(o
->in2
);
2124 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2128 o
->g_out
= o
->g_in1
;
2129 o
->g_out2
= o
->g_in2
;
2130 TCGV_UNUSED_I64(o
->in1
);
2131 TCGV_UNUSED_I64(o
->in2
);
2132 o
->g_in1
= o
->g_in2
= false;
2136 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2138 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2139 potential_page_fault(s
);
2140 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2141 tcg_temp_free_i32(l
);
2145 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2147 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2148 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2149 potential_page_fault(s
);
2150 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2151 tcg_temp_free_i32(r1
);
2152 tcg_temp_free_i32(r2
);
2157 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2159 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2160 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2161 potential_page_fault(s
);
2162 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2163 tcg_temp_free_i32(r1
);
2164 tcg_temp_free_i32(r3
);
2169 #ifndef CONFIG_USER_ONLY
2170 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2172 int r1
= get_field(s
->fields
, l1
);
2173 check_privileged(s
);
2174 potential_page_fault(s
);
2175 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2180 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2182 int r1
= get_field(s
->fields
, l1
);
2183 check_privileged(s
);
2184 potential_page_fault(s
);
2185 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2191 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2193 potential_page_fault(s
);
2194 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2199 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2201 potential_page_fault(s
);
2202 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2204 return_low128(o
->in2
);
2208 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2210 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2214 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2216 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2217 return_low128(o
->out2
);
2221 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2223 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2227 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2229 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2233 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2235 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2239 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2241 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2242 return_low128(o
->out2
);
2246 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2248 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2249 return_low128(o
->out2
);
2253 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2255 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2256 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2257 tcg_temp_free_i64(r3
);
2261 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2263 int r3
= get_field(s
->fields
, r3
);
2264 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2268 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2270 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2271 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2272 tcg_temp_free_i64(r3
);
2276 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2278 int r3
= get_field(s
->fields
, r3
);
2279 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2283 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2285 gen_helper_nabs_i64(o
->out
, o
->in2
);
2289 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2291 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2295 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2297 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2301 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2303 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2304 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2308 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2310 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2311 potential_page_fault(s
);
2312 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2313 tcg_temp_free_i32(l
);
2318 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2320 tcg_gen_neg_i64(o
->out
, o
->in2
);
2324 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2326 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2330 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2332 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2336 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2338 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2339 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2343 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2345 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2346 potential_page_fault(s
);
2347 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2348 tcg_temp_free_i32(l
);
2353 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2355 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2359 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2361 int shift
= s
->insn
->data
& 0xff;
2362 int size
= s
->insn
->data
>> 8;
2363 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2366 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2367 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2369 /* Produce the CC from only the bits manipulated. */
2370 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2371 set_cc_nz_u64(s
, cc_dst
);
2375 #ifndef CONFIG_USER_ONLY
2376 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2378 check_privileged(s
);
2379 gen_helper_ptlb(cpu_env
);
2384 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2386 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2390 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2392 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2396 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2398 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2402 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2404 TCGv_i32 t1
= tcg_temp_new_i32();
2405 TCGv_i32 t2
= tcg_temp_new_i32();
2406 TCGv_i32 to
= tcg_temp_new_i32();
2407 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2408 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2409 tcg_gen_rotl_i32(to
, t1
, t2
);
2410 tcg_gen_extu_i32_i64(o
->out
, to
);
2411 tcg_temp_free_i32(t1
);
2412 tcg_temp_free_i32(t2
);
2413 tcg_temp_free_i32(to
);
2417 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2419 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2426 check_privileged(s
);
2427 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2432 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2434 check_privileged(s
);
2435 gen_helper_sacf(cpu_env
, o
->in2
);
2436 /* Addressing mode has changed, so end the block. */
2437 return EXIT_PC_STALE
;
2441 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2443 int r1
= get_field(s
->fields
, r1
);
2444 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2448 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2450 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2454 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2456 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2460 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2462 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2463 return_low128(o
->out2
);
2467 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2469 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2473 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2475 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2479 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2481 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2482 return_low128(o
->out2
);
2486 #ifndef CONFIG_USER_ONLY
2487 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2489 check_privileged(s
);
2490 potential_page_fault(s
);
2491 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2496 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2498 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2499 check_privileged(s
);
2500 potential_page_fault(s
);
2501 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2502 tcg_temp_free_i32(r1
);
2507 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2509 uint64_t sign
= 1ull << s
->insn
->data
;
2510 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2511 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2512 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2513 /* The arithmetic left shift is curious in that it does not affect
2514 the sign bit. Copy that over from the source unchanged. */
2515 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2516 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2517 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2521 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2523 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2527 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2529 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2533 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2535 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
2539 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
2541 gen_helper_sfpc(cpu_env
, o
->in2
);
2545 #ifndef CONFIG_USER_ONLY
2546 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
2548 check_privileged(s
);
2549 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
2550 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
2554 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
2556 check_privileged(s
);
2557 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
2561 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
2563 check_privileged(s
);
2564 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
2568 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
2570 check_privileged(s
);
2571 /* ??? Surely cpu address != cpu number. In any case the previous
2572 version of this stored more than the required half-word, so it
2573 is unlikely this has ever been tested. */
2574 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2578 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
2580 gen_helper_stck(o
->out
, cpu_env
);
2581 /* ??? We don't implement clock states. */
2582 gen_op_movi_cc(s
, 0);
2586 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
2588 TCGv_i64 c1
= tcg_temp_new_i64();
2589 TCGv_i64 c2
= tcg_temp_new_i64();
2590 gen_helper_stck(c1
, cpu_env
);
2591 /* Shift the 64-bit value into its place as a zero-extended
2592 104-bit value. Note that "bit positions 64-103 are always
2593 non-zero so that they compare differently to STCK"; we set
2594 the least significant bit to 1. */
2595 tcg_gen_shli_i64(c2
, c1
, 56);
2596 tcg_gen_shri_i64(c1
, c1
, 8);
2597 tcg_gen_ori_i64(c2
, c2
, 0x10000);
2598 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
2599 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2600 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
2601 tcg_temp_free_i64(c1
);
2602 tcg_temp_free_i64(c2
);
2603 /* ??? We don't implement clock states. */
2604 gen_op_movi_cc(s
, 0);
2608 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
2610 check_privileged(s
);
2611 gen_helper_sckc(cpu_env
, o
->in2
);
2615 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
2617 check_privileged(s
);
2618 gen_helper_stckc(o
->out
, cpu_env
);
2622 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
2624 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2625 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2626 check_privileged(s
);
2627 potential_page_fault(s
);
2628 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
2629 tcg_temp_free_i32(r1
);
2630 tcg_temp_free_i32(r3
);
2634 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
2636 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2637 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2638 check_privileged(s
);
2639 potential_page_fault(s
);
2640 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
2641 tcg_temp_free_i32(r1
);
2642 tcg_temp_free_i32(r3
);
2646 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
2648 check_privileged(s
);
2649 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
2653 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
2655 check_privileged(s
);
2656 gen_helper_spt(cpu_env
, o
->in2
);
2660 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
2663 /* We really ought to have more complete indication of facilities
2664 that we implement. Address this when STFLE is implemented. */
2665 check_privileged(s
);
2666 f
= tcg_const_i64(0xc0000000);
2667 a
= tcg_const_i64(200);
2668 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
2669 tcg_temp_free_i64(f
);
2670 tcg_temp_free_i64(a
);
2674 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
2676 check_privileged(s
);
2677 gen_helper_stpt(o
->out
, cpu_env
);
2681 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
2683 check_privileged(s
);
2684 potential_page_fault(s
);
2685 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
2690 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
2692 check_privileged(s
);
2693 gen_helper_spx(cpu_env
, o
->in2
);
2697 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
2699 check_privileged(s
);
2700 /* Not operational. */
2701 gen_op_movi_cc(s
, 3);
2705 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
2707 check_privileged(s
);
2708 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
2709 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
2713 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
2715 uint64_t i2
= get_field(s
->fields
, i2
);
2718 check_privileged(s
);
2720 /* It is important to do what the instruction name says: STORE THEN.
2721 If we let the output hook perform the store then if we fault and
2722 restart, we'll have the wrong SYSTEM MASK in place. */
2723 t
= tcg_temp_new_i64();
2724 tcg_gen_shri_i64(t
, psw_mask
, 56);
2725 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
2726 tcg_temp_free_i64(t
);
2728 if (s
->fields
->op
== 0xac) {
2729 tcg_gen_andi_i64(psw_mask
, psw_mask
,
2730 (i2
<< 56) | 0x00ffffffffffffffull
);
2732 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
2737 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
2739 check_privileged(s
);
2740 potential_page_fault(s
);
2741 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
2746 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
2748 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
2752 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
2754 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
2758 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
2760 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
2764 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
2766 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
2770 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
2772 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2773 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2774 potential_page_fault(s
);
2775 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
2776 tcg_temp_free_i32(r1
);
2777 tcg_temp_free_i32(r3
);
2781 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
2783 int m3
= get_field(s
->fields
, m3
);
2784 int pos
, base
= s
->insn
->data
;
2785 TCGv_i64 tmp
= tcg_temp_new_i64();
2787 pos
= base
+ ctz32(m3
) * 8;
2790 /* Effectively a 32-bit store. */
2791 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2792 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
2798 /* Effectively a 16-bit store. */
2799 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2800 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
2807 /* Effectively an 8-bit store. */
2808 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2809 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2813 /* This is going to be a sequence of shifts and stores. */
2814 pos
= base
+ 32 - 8;
2817 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
2818 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
2819 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2821 m3
= (m3
<< 1) & 0xf;
2826 tcg_temp_free_i64(tmp
);
2830 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
2832 int r1
= get_field(s
->fields
, r1
);
2833 int r3
= get_field(s
->fields
, r3
);
2834 int size
= s
->insn
->data
;
2835 TCGv_i64 tsize
= tcg_const_i64(size
);
2839 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
2841 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
2846 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
2850 tcg_temp_free_i64(tsize
);
2854 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
2856 int r1
= get_field(s
->fields
, r1
);
2857 int r3
= get_field(s
->fields
, r3
);
2858 TCGv_i64 t
= tcg_temp_new_i64();
2859 TCGv_i64 t4
= tcg_const_i64(4);
2860 TCGv_i64 t32
= tcg_const_i64(32);
2863 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
2864 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
2868 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2872 tcg_temp_free_i64(t
);
2873 tcg_temp_free_i64(t4
);
2874 tcg_temp_free_i64(t32
);
2878 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
2880 potential_page_fault(s
);
2881 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2883 return_low128(o
->in2
);
2887 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
2889 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
2893 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
2898 tcg_gen_not_i64(o
->in2
, o
->in2
);
2899 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2901 /* XXX possible optimization point */
2903 cc
= tcg_temp_new_i64();
2904 tcg_gen_extu_i32_i64(cc
, cc_op
);
2905 tcg_gen_shri_i64(cc
, cc
, 1);
2906 tcg_gen_add_i64(o
->out
, o
->out
, cc
);
2907 tcg_temp_free_i64(cc
);
2911 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
2918 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
2919 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
2920 tcg_temp_free_i32(t
);
2922 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
2923 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
2924 tcg_temp_free_i32(t
);
2926 gen_exception(EXCP_SVC
);
2927 return EXIT_NORETURN
;
2930 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
2932 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
2937 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
2939 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
2944 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
2946 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
2951 #ifndef CONFIG_USER_ONLY
2952 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
2954 potential_page_fault(s
);
2955 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
2961 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
2963 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2964 potential_page_fault(s
);
2965 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
2966 tcg_temp_free_i32(l
);
2971 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
2973 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2974 potential_page_fault(s
);
2975 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
2976 tcg_temp_free_i32(l
);
2980 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
2982 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2983 potential_page_fault(s
);
2984 gen_helper_xc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2985 tcg_temp_free_i32(l
);
2990 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
2992 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2996 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
2998 int shift
= s
->insn
->data
& 0xff;
2999 int size
= s
->insn
->data
>> 8;
3000 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3003 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3004 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3006 /* Produce the CC from only the bits manipulated. */
3007 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3008 set_cc_nz_u64(s
, cc_dst
);
3012 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3014 o
->out
= tcg_const_i64(0);
3018 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3020 o
->out
= tcg_const_i64(0);
3026 /* ====================================================================== */
3027 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3028 the original inputs), update the various cc data structures in order to
3029 be able to compute the new condition code. */
3031 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3033 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3036 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3038 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3041 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3043 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3046 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3048 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3051 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3053 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3056 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3058 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3061 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3063 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3066 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3068 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3071 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3073 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3076 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3078 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3081 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3083 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3086 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3088 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3091 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3093 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3096 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3098 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3101 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3103 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3106 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3108 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3111 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3113 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3116 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3118 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3121 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3123 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3126 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3128 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3129 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3132 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3134 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3137 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3139 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3142 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3144 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3147 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3149 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3152 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3154 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3157 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3159 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3162 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3164 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3167 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3169 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3172 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3174 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3177 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3179 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3182 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3184 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3187 /* ====================================================================== */
3188 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3189 with the TCG register to which we will write. Used in combination with
3190 the "wout" generators, in some cases we need a new temporary, and in
3191 some cases we can write to a TCG global. */
3193 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3195 o
->out
= tcg_temp_new_i64();
3198 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3200 o
->out
= tcg_temp_new_i64();
3201 o
->out2
= tcg_temp_new_i64();
3204 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3206 o
->out
= regs
[get_field(f
, r1
)];
3210 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3212 /* ??? Specification exception: r1 must be even. */
3213 int r1
= get_field(f
, r1
);
3215 o
->out2
= regs
[(r1
+ 1) & 15];
3216 o
->g_out
= o
->g_out2
= true;
3219 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3221 o
->out
= fregs
[get_field(f
, r1
)];
3225 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3227 /* ??? Specification exception: r1 must be < 14. */
3228 int r1
= get_field(f
, r1
);
3230 o
->out2
= fregs
[(r1
+ 2) & 15];
3231 o
->g_out
= o
->g_out2
= true;
3234 /* ====================================================================== */
3235 /* The "Write OUTput" generators. These generally perform some non-trivial
3236 copy of data to TCG globals, or to main memory. The trivial cases are
3237 generally handled by having a "prep" generator install the TCG global
3238 as the destination of the operation. */
3240 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3242 store_reg(get_field(f
, r1
), o
->out
);
3245 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3247 int r1
= get_field(f
, r1
);
3248 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3251 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3253 int r1
= get_field(f
, r1
);
3254 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3257 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3259 store_reg32_i64(get_field(f
, r1
), o
->out
);
3262 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3264 /* ??? Specification exception: r1 must be even. */
3265 int r1
= get_field(f
, r1
);
3266 store_reg32_i64(r1
, o
->out
);
3267 store_reg32_i64((r1
+ 1) & 15, o
->out2
);
3270 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3272 /* ??? Specification exception: r1 must be even. */
3273 int r1
= get_field(f
, r1
);
3274 store_reg32_i64((r1
+ 1) & 15, o
->out
);
3275 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3276 store_reg32_i64(r1
, o
->out
);
3279 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3281 store_freg32_i64(get_field(f
, r1
), o
->out
);
3284 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3286 store_freg(get_field(f
, r1
), o
->out
);
3289 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3291 /* ??? Specification exception: r1 must be < 14. */
3292 int f1
= get_field(s
->fields
, r1
);
3293 store_freg(f1
, o
->out
);
3294 store_freg((f1
+ 2) & 15, o
->out2
);
3297 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3299 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3300 store_reg32_i64(get_field(f
, r1
), o
->out
);
3304 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3306 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3307 store_freg32_i64(get_field(f
, r1
), o
->out
);
3311 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3313 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3316 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3318 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3321 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3323 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3326 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3328 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3331 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3333 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3336 /* ====================================================================== */
3337 /* The "INput 1" generators. These load the first operand to an insn. */
3339 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3341 o
->in1
= load_reg(get_field(f
, r1
));
3344 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3346 o
->in1
= regs
[get_field(f
, r1
)];
3350 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3352 o
->in1
= tcg_temp_new_i64();
3353 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3356 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3358 o
->in1
= tcg_temp_new_i64();
3359 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3362 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3364 o
->in1
= tcg_temp_new_i64();
3365 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3368 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3370 /* ??? Specification exception: r1 must be even. */
3371 int r1
= get_field(f
, r1
);
3372 o
->in1
= load_reg((r1
+ 1) & 15);
3375 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3377 /* ??? Specification exception: r1 must be even. */
3378 int r1
= get_field(f
, r1
);
3379 o
->in1
= tcg_temp_new_i64();
3380 tcg_gen_ext32s_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3383 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3385 /* ??? Specification exception: r1 must be even. */
3386 int r1
= get_field(f
, r1
);
3387 o
->in1
= tcg_temp_new_i64();
3388 tcg_gen_ext32u_i64(o
->in1
, regs
[(r1
+ 1) & 15]);
3391 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3393 /* ??? Specification exception: r1 must be even. */
3394 int r1
= get_field(f
, r1
);
3395 o
->in1
= tcg_temp_new_i64();
3396 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3399 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3401 o
->in1
= load_reg(get_field(f
, r2
));
3404 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3406 o
->in1
= load_reg(get_field(f
, r3
));
3409 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3411 o
->in1
= regs
[get_field(f
, r3
)];
3415 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3417 o
->in1
= tcg_temp_new_i64();
3418 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3421 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3423 o
->in1
= tcg_temp_new_i64();
3424 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
3427 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3429 o
->in1
= load_freg32_i64(get_field(f
, r1
));
3432 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3434 o
->in1
= fregs
[get_field(f
, r1
)];
3438 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3440 /* ??? Specification exception: r1 must be < 14. */
3441 int r1
= get_field(f
, r1
);
3443 o
->out2
= fregs
[(r1
+ 2) & 15];
3444 o
->g_out
= o
->g_out2
= true;
3447 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3449 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
3452 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3454 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3455 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3458 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3461 o
->in1
= tcg_temp_new_i64();
3462 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
3465 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3468 o
->in1
= tcg_temp_new_i64();
3469 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
3472 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3475 o
->in1
= tcg_temp_new_i64();
3476 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
3479 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3482 o
->in1
= tcg_temp_new_i64();
3483 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
3486 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3489 o
->in1
= tcg_temp_new_i64();
3490 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
3493 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3496 o
->in1
= tcg_temp_new_i64();
3497 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
3500 /* ====================================================================== */
3501 /* The "INput 2" generators. These load the second operand to an insn. */
3503 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3505 o
->in2
= regs
[get_field(f
, r1
)];
3509 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3511 o
->in2
= tcg_temp_new_i64();
3512 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3515 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3517 o
->in2
= tcg_temp_new_i64();
3518 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
3521 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3523 o
->in2
= load_reg(get_field(f
, r2
));
3526 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3528 o
->in2
= regs
[get_field(f
, r2
)];
3532 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3534 int r2
= get_field(f
, r2
);
3536 o
->in2
= load_reg(r2
);
3540 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3542 o
->in2
= tcg_temp_new_i64();
3543 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3546 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3548 o
->in2
= tcg_temp_new_i64();
3549 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3552 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3554 o
->in2
= tcg_temp_new_i64();
3555 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3558 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3560 o
->in2
= tcg_temp_new_i64();
3561 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3564 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3566 o
->in2
= load_reg(get_field(f
, r3
));
3569 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3571 o
->in2
= tcg_temp_new_i64();
3572 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3575 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3577 o
->in2
= tcg_temp_new_i64();
3578 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
3581 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3583 o
->in2
= load_freg32_i64(get_field(f
, r2
));
3586 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3588 o
->in2
= fregs
[get_field(f
, r2
)];
3592 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3594 /* ??? Specification exception: r1 must be < 14. */
3595 int r2
= get_field(f
, r2
);
3597 o
->in2
= fregs
[(r2
+ 2) & 15];
3598 o
->g_in1
= o
->g_in2
= true;
3601 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3603 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
3606 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3608 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
3609 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
3612 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3614 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
3617 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3619 help_l2_shift(s
, f
, o
, 31);
3622 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3624 help_l2_shift(s
, f
, o
, 63);
3627 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3630 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
3633 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3636 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
3639 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3642 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3645 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3648 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3651 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3654 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3657 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3660 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3663 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3666 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
3669 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3672 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
3675 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3678 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
3681 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3684 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
3687 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3689 o
->in2
= tcg_const_i64(get_field(f
, i2
));
3692 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3694 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
3697 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3699 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
3702 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3704 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
3707 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3709 uint64_t i2
= (uint16_t)get_field(f
, i2
);
3710 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3713 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3715 uint64_t i2
= (uint32_t)get_field(f
, i2
);
3716 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
3719 /* ====================================================================== */
3721 /* Find opc within the table of insns. This is formulated as a switch
3722 statement so that (1) we get compile-time notice of cut-paste errors
3723 for duplicated opcodes, and (2) the compiler generates the binary
3724 search tree, rather than us having to post-process the table. */
3726 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3727 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3729 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3731 enum DisasInsnEnum
{
3732 #include "insn-data.def"
3736 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3741 .help_in1 = in1_##I1, \
3742 .help_in2 = in2_##I2, \
3743 .help_prep = prep_##P, \
3744 .help_wout = wout_##W, \
3745 .help_cout = cout_##CC, \
3746 .help_op = op_##OP, \
3750 /* Allow 0 to be used for NULL in the table below. */
3758 static const DisasInsn insn_info
[] = {
3759 #include "insn-data.def"
3763 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3764 case OPC: return &insn_info[insn_ ## NM];
3766 static const DisasInsn
*lookup_opc(uint16_t opc
)
3769 #include "insn-data.def"
3778 /* Extract a field from the insn. The INSN should be left-aligned in
3779 the uint64_t so that we can more easily utilize the big-bit-endian
3780 definitions we extract from the Principals of Operation. */
3782 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
3790 /* Zero extract the field from the insn. */
3791 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
3793 /* Sign-extend, or un-swap the field as necessary. */
3795 case 0: /* unsigned */
3797 case 1: /* signed */
3798 assert(f
->size
<= 32);
3799 m
= 1u << (f
->size
- 1);
3802 case 2: /* dl+dh split, signed 20 bit. */
3803 r
= ((int8_t)r
<< 12) | (r
>> 8);
3809 /* Validate that the "compressed" encoding we selected above is valid.
3810 I.e. we havn't make two different original fields overlap. */
3811 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
3812 o
->presentC
|= 1 << f
->indexC
;
3813 o
->presentO
|= 1 << f
->indexO
;
3815 o
->c
[f
->indexC
] = r
;
3818 /* Lookup the insn at the current PC, extracting the operands into O and
3819 returning the info struct for the insn. Returns NULL for invalid insn. */
3821 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
3824 uint64_t insn
, pc
= s
->pc
;
3826 const DisasInsn
*info
;
3828 insn
= ld_code2(env
, pc
);
3829 op
= (insn
>> 8) & 0xff;
3830 ilen
= get_ilen(op
);
3831 s
->next_pc
= s
->pc
+ ilen
;
3838 insn
= ld_code4(env
, pc
) << 32;
3841 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
3847 /* We can't actually determine the insn format until we've looked up
3848 the full insn opcode. Which we can't do without locating the
3849 secondary opcode. Assume by default that OP2 is at bit 40; for
3850 those smaller insns that don't actually have a secondary opcode
3851 this will correctly result in OP2 = 0. */
3857 case 0xb2: /* S, RRF, RRE */
3858 case 0xb3: /* RRE, RRD, RRF */
3859 case 0xb9: /* RRE, RRF */
3860 case 0xe5: /* SSE, SIL */
3861 op2
= (insn
<< 8) >> 56;
3865 case 0xc0: /* RIL */
3866 case 0xc2: /* RIL */
3867 case 0xc4: /* RIL */
3868 case 0xc6: /* RIL */
3869 case 0xc8: /* SSF */
3870 case 0xcc: /* RIL */
3871 op2
= (insn
<< 12) >> 60;
3873 case 0xd0 ... 0xdf: /* SS */
3879 case 0xee ... 0xf3: /* SS */
3880 case 0xf8 ... 0xfd: /* SS */
3884 op2
= (insn
<< 40) >> 56;
3888 memset(f
, 0, sizeof(*f
));
3892 /* Lookup the instruction. */
3893 info
= lookup_opc(op
<< 8 | op2
);
3895 /* If we found it, extract the operands. */
3897 DisasFormat fmt
= info
->fmt
;
3900 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
3901 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
3907 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
3909 const DisasInsn
*insn
;
3910 ExitStatus ret
= NO_EXIT
;
3914 /* Search for the insn in the table. */
3915 insn
= extract_insn(env
, s
, &f
);
3917 /* Not found means unimplemented/illegal opcode. */
3919 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
3921 gen_illegal_opcode(s
);
3922 return EXIT_NORETURN
;
3925 /* Set up the strutures we use to communicate with the helpers. */
3928 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
3929 TCGV_UNUSED_I64(o
.out
);
3930 TCGV_UNUSED_I64(o
.out2
);
3931 TCGV_UNUSED_I64(o
.in1
);
3932 TCGV_UNUSED_I64(o
.in2
);
3933 TCGV_UNUSED_I64(o
.addr1
);
3935 /* Implement the instruction. */
3936 if (insn
->help_in1
) {
3937 insn
->help_in1(s
, &f
, &o
);
3939 if (insn
->help_in2
) {
3940 insn
->help_in2(s
, &f
, &o
);
3942 if (insn
->help_prep
) {
3943 insn
->help_prep(s
, &f
, &o
);
3945 if (insn
->help_op
) {
3946 ret
= insn
->help_op(s
, &o
);
3948 if (insn
->help_wout
) {
3949 insn
->help_wout(s
, &f
, &o
);
3951 if (insn
->help_cout
) {
3952 insn
->help_cout(s
, &o
);
3955 /* Free any temporaries created by the helpers. */
3956 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
3957 tcg_temp_free_i64(o
.out
);
3959 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
3960 tcg_temp_free_i64(o
.out2
);
3962 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
3963 tcg_temp_free_i64(o
.in1
);
3965 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
3966 tcg_temp_free_i64(o
.in2
);
3968 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
3969 tcg_temp_free_i64(o
.addr1
);
3972 /* Advance to the next instruction. */
3977 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
3978 TranslationBlock
*tb
,
3982 target_ulong pc_start
;
3983 uint64_t next_page_start
;
3984 uint16_t *gen_opc_end
;
3986 int num_insns
, max_insns
;
3994 if (!(tb
->flags
& FLAG_MASK_64
)) {
3995 pc_start
&= 0x7fffffff;
4000 dc
.cc_op
= CC_OP_DYNAMIC
;
4001 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4002 dc
.is_jmp
= DISAS_NEXT
;
4004 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4006 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4009 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4010 if (max_insns
== 0) {
4011 max_insns
= CF_COUNT_MASK
;
4018 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4022 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4025 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4026 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4027 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4028 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4030 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4034 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4035 tcg_gen_debug_insn_start(dc
.pc
);
4039 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4040 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4041 if (bp
->pc
== dc
.pc
) {
4042 status
= EXIT_PC_STALE
;
4048 if (status
== NO_EXIT
) {
4049 status
= translate_one(env
, &dc
);
4052 /* If we reach a page boundary, are single stepping,
4053 or exhaust instruction count, stop generation. */
4054 if (status
== NO_EXIT
4055 && (dc
.pc
>= next_page_start
4056 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4057 || num_insns
>= max_insns
4059 || env
->singlestep_enabled
)) {
4060 status
= EXIT_PC_STALE
;
4062 } while (status
== NO_EXIT
);
4064 if (tb
->cflags
& CF_LAST_IO
) {
4073 update_psw_addr(&dc
);
4075 case EXIT_PC_UPDATED
:
4076 if (singlestep
&& dc
.cc_op
!= CC_OP_DYNAMIC
) {
4077 gen_op_calc_cc(&dc
);
4079 /* Next TB starts off with CC_OP_DYNAMIC,
4080 so make sure the cc op type is in env */
4081 gen_op_set_cc_op(&dc
);
4084 gen_exception(EXCP_DEBUG
);
4086 /* Generate the return instruction */
4094 gen_icount_end(tb
, num_insns
);
4095 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4097 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4100 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4103 tb
->size
= dc
.pc
- pc_start
;
4104 tb
->icount
= num_insns
;
4107 #if defined(S390X_DEBUG_DISAS)
4108 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4109 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4110 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4116 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4118 gen_intermediate_code_internal(env
, tb
, 0);
4121 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4123 gen_intermediate_code_internal(env
, tb
, 1);
4126 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4129 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4130 cc_op
= gen_opc_cc_op
[pc_pos
];
4131 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {