4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit
[CC_OP_MAX
];
76 static uint64_t inline_branch_miss
[CC_OP_MAX
];
79 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
81 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
82 if (s
->tb
->flags
& FLAG_MASK_32
) {
83 return pc
| 0x80000000;
89 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
95 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
96 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
98 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
99 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
102 for (i
= 0; i
< 16; i
++) {
103 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
105 cpu_fprintf(f
, "\n");
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
114 cpu_fprintf(f
, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i
= 0; i
< 16; i
++) {
122 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
124 cpu_fprintf(f
, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i
= 0; i
< CC_OP_MAX
; i
++) {
133 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
134 inline_branch_miss
[i
], inline_branch_hit
[i
]);
138 cpu_fprintf(f
, "\n");
141 static TCGv_i64 psw_addr
;
142 static TCGv_i64 psw_mask
;
144 static TCGv_i32 cc_op
;
145 static TCGv_i64 cc_src
;
146 static TCGv_i64 cc_dst
;
147 static TCGv_i64 cc_vr
;
149 static char cpu_reg_names
[32][4];
150 static TCGv_i64 regs
[16];
151 static TCGv_i64 fregs
[16];
153 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
155 void s390x_translate_init(void)
159 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
160 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
161 offsetof(CPUS390XState
, psw
.addr
),
163 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
164 offsetof(CPUS390XState
, psw
.mask
),
167 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
169 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
171 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
173 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
176 for (i
= 0; i
< 16; i
++) {
177 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
178 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
179 offsetof(CPUS390XState
, regs
[i
]),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
185 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, fregs
[i
].d
),
187 cpu_reg_names
[i
+ 16]);
190 /* register helpers */
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
271 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
274 static int get_mem_index(DisasContext
*s
)
276 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
277 case PSW_ASC_PRIMARY
>> 32:
279 case PSW_ASC_SECONDARY
>> 32:
281 case PSW_ASC_HOME
>> 32:
289 static void gen_exception(int excp
)
291 TCGv_i32 tmp
= tcg_const_i32(excp
);
292 gen_helper_exception(cpu_env
, tmp
);
293 tcg_temp_free_i32(tmp
);
296 static void gen_program_exception(DisasContext
*s
, int code
)
300 /* Remember what pgm exeption this was. */
301 tmp
= tcg_const_i32(code
);
302 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
303 tcg_temp_free_i32(tmp
);
305 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
306 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
307 tcg_temp_free_i32(tmp
);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM
);
320 static inline void gen_illegal_opcode(DisasContext
*s
)
322 gen_program_exception(s
, PGM_SPECIFICATION
);
325 static inline void check_privileged(DisasContext
*s
)
327 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
328 gen_program_exception(s
, PGM_PRIVILEGED
);
332 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
343 tmp
= tcg_const_i64(d2
);
344 tcg_gen_add_i64(tmp
, tmp
, regs
[x2
]);
349 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
353 tmp
= tcg_const_i64(d2
);
354 tcg_gen_add_i64(tmp
, tmp
, regs
[b2
]);
359 tmp
= tcg_const_i64(d2
);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s
->tb
->flags
& FLAG_MASK_64
) && (x2
|| b2
)) {
364 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffffUL
);
370 static inline bool live_cc_data(DisasContext
*s
)
372 return (s
->cc_op
!= CC_OP_DYNAMIC
373 && s
->cc_op
!= CC_OP_STATIC
377 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
379 if (live_cc_data(s
)) {
380 tcg_gen_discard_i64(cc_src
);
381 tcg_gen_discard_i64(cc_dst
);
382 tcg_gen_discard_i64(cc_vr
);
384 s
->cc_op
= CC_OP_CONST0
+ val
;
387 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
389 if (live_cc_data(s
)) {
390 tcg_gen_discard_i64(cc_src
);
391 tcg_gen_discard_i64(cc_vr
);
393 tcg_gen_mov_i64(cc_dst
, dst
);
397 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
400 if (live_cc_data(s
)) {
401 tcg_gen_discard_i64(cc_vr
);
403 tcg_gen_mov_i64(cc_src
, src
);
404 tcg_gen_mov_i64(cc_dst
, dst
);
408 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
409 TCGv_i64 dst
, TCGv_i64 vr
)
411 tcg_gen_mov_i64(cc_src
, src
);
412 tcg_gen_mov_i64(cc_dst
, dst
);
413 tcg_gen_mov_i64(cc_vr
, vr
);
417 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
419 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
422 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
424 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
427 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
429 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
432 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
434 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext
*s
)
440 if (live_cc_data(s
)) {
441 tcg_gen_discard_i64(cc_src
);
442 tcg_gen_discard_i64(cc_dst
);
443 tcg_gen_discard_i64(cc_vr
);
445 s
->cc_op
= CC_OP_STATIC
;
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext
*s
)
451 TCGv_i32 local_cc_op
;
454 TCGV_UNUSED_I32(local_cc_op
);
455 TCGV_UNUSED_I64(dummy
);
458 dummy
= tcg_const_i64(0);
472 local_cc_op
= tcg_const_i32(s
->cc_op
);
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
492 /* env->cc_op already is the cc value */
507 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
512 case CC_OP_LTUGTU_32
:
513 case CC_OP_LTUGTU_64
:
520 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
535 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
545 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
546 tcg_temp_free_i32(local_cc_op
);
548 if (!TCGV_IS_UNUSED_I64(dummy
)) {
549 tcg_temp_free_i64(dummy
);
552 /* We now have cc in cc_op as constant */
556 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
560 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
561 && !s
->singlestep_enabled
562 && !(s
->tb
->cflags
& CF_LAST_IO
));
565 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss
[cc_op
]++;
572 static void account_inline_branch(DisasContext
*s
, int cc_op
)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit
[cc_op
]++;
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond
[16] = {
582 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
583 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
584 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
585 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
586 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
587 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
588 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
589 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond
[16] = {
595 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
596 TCG_COND_NEVER
, TCG_COND_NEVER
,
597 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
598 TCG_COND_NE
, TCG_COND_NE
,
599 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
600 TCG_COND_EQ
, TCG_COND_EQ
,
601 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
610 enum cc_op old_cc_op
= s
->cc_op
;
612 if (mask
== 15 || mask
== 0) {
613 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
616 c
->g1
= c
->g2
= true;
621 /* Find the TCG condition for the mask + cc op. */
627 cond
= ltgt_cond
[mask
];
628 if (cond
== TCG_COND_NEVER
) {
631 account_inline_branch(s
, old_cc_op
);
634 case CC_OP_LTUGTU_32
:
635 case CC_OP_LTUGTU_64
:
636 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
644 cond
= nz_cond
[mask
];
645 if (cond
== TCG_COND_NEVER
) {
648 account_inline_branch(s
, old_cc_op
);
663 account_inline_branch(s
, old_cc_op
);
678 account_inline_branch(s
, old_cc_op
);
682 switch (mask
& 0xa) {
683 case 8: /* src == 0 -> no one bit found */
686 case 2: /* src != 0 -> one bit found */
692 account_inline_branch(s
, old_cc_op
);
698 case 8 | 2: /* vr == 0 */
701 case 4 | 1: /* vr != 0 */
704 case 8 | 4: /* no carry -> vr >= src */
707 case 2 | 1: /* carry -> vr < src */
713 account_inline_branch(s
, old_cc_op
);
718 /* Note that CC=0 is impossible; treat it as dont-care. */
720 case 2: /* zero -> op1 == op2 */
723 case 4 | 1: /* !zero -> op1 != op2 */
726 case 4: /* borrow (!carry) -> op1 < op2 */
729 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
735 account_inline_branch(s
, old_cc_op
);
740 /* Calculate cc value. */
745 /* Jump based on CC. We'll load up the real cond below;
746 the assignment here merely avoids a compiler warning. */
747 account_noninline_branch(s
, old_cc_op
);
748 old_cc_op
= CC_OP_STATIC
;
749 cond
= TCG_COND_NEVER
;
753 /* Load up the arguments of the comparison. */
755 c
->g1
= c
->g2
= false;
759 c
->u
.s32
.a
= tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
761 c
->u
.s32
.b
= tcg_const_i32(0);
764 case CC_OP_LTUGTU_32
:
767 c
->u
.s32
.a
= tcg_temp_new_i32();
768 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
769 c
->u
.s32
.b
= tcg_temp_new_i32();
770 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
777 c
->u
.s64
.b
= tcg_const_i64(0);
781 case CC_OP_LTUGTU_64
:
785 c
->g1
= c
->g2
= true;
791 c
->u
.s64
.a
= tcg_temp_new_i64();
792 c
->u
.s64
.b
= tcg_const_i64(0);
793 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
798 c
->u
.s32
.a
= tcg_temp_new_i32();
799 c
->u
.s32
.b
= tcg_temp_new_i32();
800 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
801 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
802 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
804 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
811 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
812 c
->u
.s64
.b
= tcg_const_i64(0);
824 case 0x8 | 0x4 | 0x2: /* cc != 3 */
826 c
->u
.s32
.b
= tcg_const_i32(3);
828 case 0x8 | 0x4 | 0x1: /* cc != 2 */
830 c
->u
.s32
.b
= tcg_const_i32(2);
832 case 0x8 | 0x2 | 0x1: /* cc != 1 */
834 c
->u
.s32
.b
= tcg_const_i32(1);
836 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
839 c
->u
.s32
.a
= tcg_temp_new_i32();
840 c
->u
.s32
.b
= tcg_const_i32(0);
841 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
843 case 0x8 | 0x4: /* cc < 2 */
845 c
->u
.s32
.b
= tcg_const_i32(2);
847 case 0x8: /* cc == 0 */
849 c
->u
.s32
.b
= tcg_const_i32(0);
851 case 0x4 | 0x2 | 0x1: /* cc != 0 */
853 c
->u
.s32
.b
= tcg_const_i32(0);
855 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
858 c
->u
.s32
.a
= tcg_temp_new_i32();
859 c
->u
.s32
.b
= tcg_const_i32(0);
860 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
862 case 0x4: /* cc == 1 */
864 c
->u
.s32
.b
= tcg_const_i32(1);
866 case 0x2 | 0x1: /* cc > 1 */
868 c
->u
.s32
.b
= tcg_const_i32(1);
870 case 0x2: /* cc == 2 */
872 c
->u
.s32
.b
= tcg_const_i32(2);
874 case 0x1: /* cc == 3 */
876 c
->u
.s32
.b
= tcg_const_i32(3);
879 /* CC is masked by something else: (8 >> cc) & mask. */
882 c
->u
.s32
.a
= tcg_const_i32(8);
883 c
->u
.s32
.b
= tcg_const_i32(0);
884 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
885 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
896 static void free_compare(DisasCompare
*c
)
900 tcg_temp_free_i64(c
->u
.s64
.a
);
902 tcg_temp_free_i32(c
->u
.s32
.a
);
907 tcg_temp_free_i64(c
->u
.s64
.b
);
909 tcg_temp_free_i32(c
->u
.s32
.b
);
914 /* ====================================================================== */
915 /* Define the insn format enumeration. */
916 #define F0(N) FMT_##N,
917 #define F1(N, X1) F0(N)
918 #define F2(N, X1, X2) F0(N)
919 #define F3(N, X1, X2, X3) F0(N)
920 #define F4(N, X1, X2, X3, X4) F0(N)
921 #define F5(N, X1, X2, X3, X4, X5) F0(N)
924 #include "insn-format.def"
934 /* Define a structure to hold the decoded fields. We'll store each inside
935 an array indexed by an enum. In order to conserve memory, we'll arrange
936 for fields that do not exist at the same time to overlap, thus the "C"
937 for compact. For checking purposes there is an "O" for original index
938 as well that will be applied to availability bitmaps. */
940 enum DisasFieldIndexO
{
963 enum DisasFieldIndexC
{
997 unsigned presentC
:16;
998 unsigned int presentO
;
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1008 return (f
->presentO
>> c
) & 1;
1011 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1012 enum DisasFieldIndexC c
)
1014 assert(have_field1(f
, o
));
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField
{
1021 unsigned int size
:8;
1022 unsigned int type
:2;
1023 unsigned int indexC
:6;
1024 enum DisasFieldIndexO indexO
:8;
1027 typedef struct DisasFormatInfo
{
1028 DisasField op
[NUM_C_FIELD
];
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info
[] = {
1054 #include "insn-format.def"
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1076 bool g_out
, g_out2
, g_in1
, g_in2
;
1077 TCGv_i64 out
, out2
, in1
, in2
;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r1_f128 4
1090 #define SPEC_r2_f128 8
1092 /* Return values from translate_one, indicating the state of the TB. */
1094 /* Continue the TB. */
1096 /* We have emitted one or more goto_tb. No fixup required. */
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1102 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1103 updated the PC for the next instruction to be executed. */
1105 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1106 No following code will be executed. */
1110 typedef enum DisasFacility
{
1111 FAC_Z
, /* zarch (default) */
1112 FAC_CASS
, /* compare and swap and store */
1113 FAC_CASS2
, /* compare and swap and store 2*/
1114 FAC_DFP
, /* decimal floating point */
1115 FAC_DFPR
, /* decimal floating point rounding */
1116 FAC_DO
, /* distinct operands */
1117 FAC_EE
, /* execute extensions */
1118 FAC_EI
, /* extended immediate */
1119 FAC_FPE
, /* floating point extension */
1120 FAC_FPSSH
, /* floating point support sign handling */
1121 FAC_FPRGR
, /* FPR-GR transfer */
1122 FAC_GIE
, /* general instructions extension */
1123 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1124 FAC_HW
, /* high-word */
1125 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1126 FAC_LOC
, /* load/store on condition */
1127 FAC_LD
, /* long displacement */
1128 FAC_PC
, /* population count */
1129 FAC_SCF
, /* store clock fast */
1130 FAC_SFLE
, /* store facility list extended */
1136 DisasFacility fac
:6;
1141 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1142 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1143 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1144 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1145 void (*help_cout
)(DisasContext
*, DisasOps
*);
1146 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1151 /* ====================================================================== */
1152 /* Miscelaneous helpers, used by several operations. */
1154 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1155 DisasOps
*o
, int mask
)
1157 int b2
= get_field(f
, b2
);
1158 int d2
= get_field(f
, d2
);
1161 o
->in2
= tcg_const_i64(d2
& mask
);
1163 o
->in2
= get_address(s
, 0, b2
, d2
);
1164 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1168 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1170 if (dest
== s
->next_pc
) {
1173 if (use_goto_tb(s
, dest
)) {
1176 tcg_gen_movi_i64(psw_addr
, dest
);
1177 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1178 return EXIT_GOTO_TB
;
1180 tcg_gen_movi_i64(psw_addr
, dest
);
1181 return EXIT_PC_UPDATED
;
1185 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1186 bool is_imm
, int imm
, TCGv_i64 cdest
)
1189 uint64_t dest
= s
->pc
+ 2 * imm
;
1192 /* Take care of the special cases first. */
1193 if (c
->cond
== TCG_COND_NEVER
) {
1198 if (dest
== s
->next_pc
) {
1199 /* Branch to next. */
1203 if (c
->cond
== TCG_COND_ALWAYS
) {
1204 ret
= help_goto_direct(s
, dest
);
1208 if (TCGV_IS_UNUSED_I64(cdest
)) {
1209 /* E.g. bcr %r0 -> no branch. */
1213 if (c
->cond
== TCG_COND_ALWAYS
) {
1214 tcg_gen_mov_i64(psw_addr
, cdest
);
1215 ret
= EXIT_PC_UPDATED
;
1220 if (use_goto_tb(s
, s
->next_pc
)) {
1221 if (is_imm
&& use_goto_tb(s
, dest
)) {
1222 /* Both exits can use goto_tb. */
1225 lab
= gen_new_label();
1227 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1229 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1232 /* Branch not taken. */
1234 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1235 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1240 tcg_gen_movi_i64(psw_addr
, dest
);
1241 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1245 /* Fallthru can use goto_tb, but taken branch cannot. */
1246 /* Store taken branch destination before the brcond. This
1247 avoids having to allocate a new local temp to hold it.
1248 We'll overwrite this in the not taken case anyway. */
1250 tcg_gen_mov_i64(psw_addr
, cdest
);
1253 lab
= gen_new_label();
1255 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1257 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1260 /* Branch not taken. */
1263 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1264 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1268 tcg_gen_movi_i64(psw_addr
, dest
);
1270 ret
= EXIT_PC_UPDATED
;
1273 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1274 Most commonly we're single-stepping or some other condition that
1275 disables all use of goto_tb. Just update the PC and exit. */
1277 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1279 cdest
= tcg_const_i64(dest
);
1283 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1286 TCGv_i32 t0
= tcg_temp_new_i32();
1287 TCGv_i64 t1
= tcg_temp_new_i64();
1288 TCGv_i64 z
= tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1290 tcg_gen_extu_i32_i64(t1
, t0
);
1291 tcg_temp_free_i32(t0
);
1292 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1293 tcg_temp_free_i64(t1
);
1294 tcg_temp_free_i64(z
);
1298 tcg_temp_free_i64(cdest
);
1300 tcg_temp_free_i64(next
);
1302 ret
= EXIT_PC_UPDATED
;
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1314 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1316 gen_helper_abs_i64(o
->out
, o
->in2
);
1320 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1322 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1326 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1328 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1332 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1334 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1335 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1339 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1341 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1345 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1350 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1352 /* The carry flag is the msb of CC, therefore the branch mask that would
1353 create that comparison is 3. Feeding the generated comparison to
1354 setcond produces the carry flag that we desire. */
1355 disas_jcc(s
, &cmp
, 3);
1356 carry
= tcg_temp_new_i64();
1358 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1360 TCGv_i32 t
= tcg_temp_new_i32();
1361 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1362 tcg_gen_extu_i32_i64(carry
, t
);
1363 tcg_temp_free_i32(t
);
1367 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1368 tcg_temp_free_i64(carry
);
1372 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1374 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1378 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1380 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1384 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1386 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1387 return_low128(o
->out2
);
1391 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1393 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1397 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1399 int shift
= s
->insn
->data
& 0xff;
1400 int size
= s
->insn
->data
>> 8;
1401 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1404 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1405 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1406 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1408 /* Produce the CC from only the bits manipulated. */
1409 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1410 set_cc_nz_u64(s
, cc_dst
);
1414 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1416 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1417 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1418 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1419 return EXIT_PC_UPDATED
;
1425 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1427 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1428 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1431 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1433 int m1
= get_field(s
->fields
, m1
);
1434 bool is_imm
= have_field(s
->fields
, i2
);
1435 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1438 disas_jcc(s
, &c
, m1
);
1439 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1442 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1444 int r1
= get_field(s
->fields
, r1
);
1445 bool is_imm
= have_field(s
->fields
, i2
);
1446 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1450 c
.cond
= TCG_COND_NE
;
1455 t
= tcg_temp_new_i64();
1456 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1457 store_reg32_i64(r1
, t
);
1458 c
.u
.s32
.a
= tcg_temp_new_i32();
1459 c
.u
.s32
.b
= tcg_const_i32(0);
1460 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1461 tcg_temp_free_i64(t
);
1463 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1466 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1468 int r1
= get_field(s
->fields
, r1
);
1469 bool is_imm
= have_field(s
->fields
, i2
);
1470 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1473 c
.cond
= TCG_COND_NE
;
1478 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1479 c
.u
.s64
.a
= regs
[r1
];
1480 c
.u
.s64
.b
= tcg_const_i64(0);
1482 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1485 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1487 int r1
= get_field(s
->fields
, r1
);
1488 int r3
= get_field(s
->fields
, r3
);
1489 bool is_imm
= have_field(s
->fields
, i2
);
1490 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1494 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1499 t
= tcg_temp_new_i64();
1500 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1501 c
.u
.s32
.a
= tcg_temp_new_i32();
1502 c
.u
.s32
.b
= tcg_temp_new_i32();
1503 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1504 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1505 store_reg32_i64(r1
, t
);
1506 tcg_temp_free_i64(t
);
1508 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1511 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1513 int r1
= get_field(s
->fields
, r1
);
1514 int r3
= get_field(s
->fields
, r3
);
1515 bool is_imm
= have_field(s
->fields
, i2
);
1516 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1519 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1522 if (r1
== (r3
| 1)) {
1523 c
.u
.s64
.b
= load_reg(r3
| 1);
1526 c
.u
.s64
.b
= regs
[r3
| 1];
1530 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1531 c
.u
.s64
.a
= regs
[r1
];
1534 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1537 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1539 int imm
, m3
= get_field(s
->fields
, m3
);
1543 c
.cond
= ltgt_cond
[m3
];
1544 if (s
->insn
->data
) {
1545 c
.cond
= tcg_unsigned_cond(c
.cond
);
1547 c
.is_64
= c
.g1
= c
.g2
= true;
1551 is_imm
= have_field(s
->fields
, i4
);
1553 imm
= get_field(s
->fields
, i4
);
1556 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1557 get_field(s
->fields
, d4
));
1560 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1563 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1565 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1570 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1572 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1577 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1579 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1584 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1586 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1587 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1588 tcg_temp_free_i32(m3
);
1589 gen_set_cc_nz_f32(s
, o
->in2
);
1593 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1595 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1596 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1597 tcg_temp_free_i32(m3
);
1598 gen_set_cc_nz_f64(s
, o
->in2
);
1602 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1604 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1605 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1606 tcg_temp_free_i32(m3
);
1607 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1611 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1613 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1614 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1615 tcg_temp_free_i32(m3
);
1616 gen_set_cc_nz_f32(s
, o
->in2
);
1620 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1622 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1623 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1624 tcg_temp_free_i32(m3
);
1625 gen_set_cc_nz_f64(s
, o
->in2
);
1629 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1631 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1632 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1633 tcg_temp_free_i32(m3
);
1634 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1638 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1640 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1641 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1642 tcg_temp_free_i32(m3
);
1643 gen_set_cc_nz_f32(s
, o
->in2
);
1647 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1649 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1650 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1651 tcg_temp_free_i32(m3
);
1652 gen_set_cc_nz_f64(s
, o
->in2
);
1656 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1658 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1659 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1660 tcg_temp_free_i32(m3
);
1661 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1665 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1667 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1668 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1669 tcg_temp_free_i32(m3
);
1670 gen_set_cc_nz_f32(s
, o
->in2
);
1674 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1676 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1677 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1678 tcg_temp_free_i32(m3
);
1679 gen_set_cc_nz_f64(s
, o
->in2
);
1683 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1685 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1686 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1687 tcg_temp_free_i32(m3
);
1688 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1692 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1694 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1695 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1696 tcg_temp_free_i32(m3
);
1700 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1702 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1703 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1704 tcg_temp_free_i32(m3
);
1708 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1710 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1711 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1712 tcg_temp_free_i32(m3
);
1713 return_low128(o
->out2
);
1717 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1719 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1720 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1721 tcg_temp_free_i32(m3
);
1725 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1733 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1735 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1736 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1737 tcg_temp_free_i32(m3
);
1738 return_low128(o
->out2
);
1742 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1744 int r2
= get_field(s
->fields
, r2
);
1745 TCGv_i64 len
= tcg_temp_new_i64();
1747 potential_page_fault(s
);
1748 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1750 return_low128(o
->out
);
1752 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1753 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1754 tcg_temp_free_i64(len
);
1759 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1761 int l
= get_field(s
->fields
, l1
);
1766 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1767 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1770 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1771 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1774 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1775 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1778 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1779 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1782 potential_page_fault(s
);
1783 vl
= tcg_const_i32(l
);
1784 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1785 tcg_temp_free_i32(vl
);
1789 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1793 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1795 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1796 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1797 potential_page_fault(s
);
1798 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1799 tcg_temp_free_i32(r1
);
1800 tcg_temp_free_i32(r3
);
1805 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1807 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1808 TCGv_i32 t1
= tcg_temp_new_i32();
1809 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1810 potential_page_fault(s
);
1811 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1813 tcg_temp_free_i32(t1
);
1814 tcg_temp_free_i32(m3
);
1818 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1820 potential_page_fault(s
);
1821 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1823 return_low128(o
->in2
);
1827 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1829 TCGv_i64 t
= tcg_temp_new_i64();
1830 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1831 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1832 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1833 tcg_temp_free_i64(t
);
1837 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1839 int r3
= get_field(s
->fields
, r3
);
1840 potential_page_fault(s
);
1841 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1846 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1848 int r3
= get_field(s
->fields
, r3
);
1849 potential_page_fault(s
);
1850 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1855 #ifndef CONFIG_USER_ONLY
1856 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1858 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1859 check_privileged(s
);
1860 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1861 tcg_temp_free_i32(r1
);
1867 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1869 int r3
= get_field(s
->fields
, r3
);
1870 TCGv_i64 in3
= tcg_temp_new_i64();
1871 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1872 potential_page_fault(s
);
1873 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1874 tcg_temp_free_i64(in3
);
1879 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1881 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1882 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1883 potential_page_fault(s
);
1884 /* XXX rewrite in tcg */
1885 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1890 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1892 TCGv_i64 t1
= tcg_temp_new_i64();
1893 TCGv_i32 t2
= tcg_temp_new_i32();
1894 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1895 gen_helper_cvd(t1
, t2
);
1896 tcg_temp_free_i32(t2
);
1897 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1898 tcg_temp_free_i64(t1
);
1902 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1904 int m3
= get_field(s
->fields
, m3
);
1905 int lab
= gen_new_label();
1909 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1910 if (s
->insn
->data
) {
1911 c
= tcg_unsigned_cond(c
);
1913 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1915 /* Set DXC to 0xff. */
1916 t
= tcg_temp_new_i32();
1917 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1918 tcg_gen_ori_i32(t
, t
, 0xff00);
1919 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1920 tcg_temp_free_i32(t
);
1923 gen_program_exception(s
, PGM_DATA
);
1929 #ifndef CONFIG_USER_ONLY
1930 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1934 check_privileged(s
);
1935 potential_page_fault(s
);
1937 /* We pretend the format is RX_a so that D2 is the field we want. */
1938 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1939 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1940 tcg_temp_free_i32(tmp
);
1945 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1947 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1948 return_low128(o
->out
);
1952 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1954 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1955 return_low128(o
->out
);
1959 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1961 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1962 return_low128(o
->out
);
1966 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
1968 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
1969 return_low128(o
->out
);
1973 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
1975 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1979 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
1981 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1985 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
1987 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1988 return_low128(o
->out2
);
1992 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
1994 int r2
= get_field(s
->fields
, r2
);
1995 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1999 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2001 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2005 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2007 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2008 tb->flags, (ab)use the tb->cs_base field as the address of
2009 the template in memory, and grab 8 bits of tb->flags/cflags for
2010 the contents of the register. We would then recognize all this
2011 in gen_intermediate_code_internal, generating code for exactly
2012 one instruction. This new TB then gets executed normally.
2014 On the other hand, this seems to be mostly used for modifying
2015 MVC inside of memcpy, which needs a helper call anyway. So
2016 perhaps this doesn't bear thinking about any further. */
2023 tmp
= tcg_const_i64(s
->next_pc
);
2024 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2025 tcg_temp_free_i64(tmp
);
2031 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2033 /* We'll use the original input for cc computation, since we get to
2034 compare that against 0, which ought to be better than comparing
2035 the real output against 64. It also lets cc_dst be a convenient
2036 temporary during our computation. */
2037 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2039 /* R1 = IN ? CLZ(IN) : 64. */
2040 gen_helper_clz(o
->out
, o
->in2
);
2042 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2043 value by 64, which is undefined. But since the shift is 64 iff the
2044 input is zero, we still get the correct result after and'ing. */
2045 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2046 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2047 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2051 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2053 int m3
= get_field(s
->fields
, m3
);
2054 int pos
, len
, base
= s
->insn
->data
;
2055 TCGv_i64 tmp
= tcg_temp_new_i64();
2060 /* Effectively a 32-bit load. */
2061 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2068 /* Effectively a 16-bit load. */
2069 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2077 /* Effectively an 8-bit load. */
2078 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2083 pos
= base
+ ctz32(m3
) * 8;
2084 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2085 ccm
= ((1ull << len
) - 1) << pos
;
2089 /* This is going to be a sequence of loads and inserts. */
2090 pos
= base
+ 32 - 8;
2094 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2095 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2096 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2099 m3
= (m3
<< 1) & 0xf;
2105 tcg_gen_movi_i64(tmp
, ccm
);
2106 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2107 tcg_temp_free_i64(tmp
);
2111 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2113 int shift
= s
->insn
->data
& 0xff;
2114 int size
= s
->insn
->data
>> 8;
2115 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2119 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2124 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2126 t1
= tcg_temp_new_i64();
2127 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2128 tcg_gen_shri_i64(t1
, t1
, 36);
2129 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2131 tcg_gen_extu_i32_i64(t1
, cc_op
);
2132 tcg_gen_shli_i64(t1
, t1
, 28);
2133 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2134 tcg_temp_free_i64(t1
);
2138 #ifndef CONFIG_USER_ONLY
2139 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2141 check_privileged(s
);
2142 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2146 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2148 check_privileged(s
);
2149 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2154 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2156 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2160 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2162 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2166 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2168 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2172 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2174 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2178 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2180 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2181 return_low128(o
->out2
);
2185 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2187 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2188 return_low128(o
->out2
);
2192 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2194 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2198 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2200 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2204 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2206 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2210 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2212 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2216 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2218 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2222 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2224 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2228 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2230 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2234 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2236 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2240 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2244 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2247 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2251 TCGv_i32 t32
= tcg_temp_new_i32();
2254 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2257 t
= tcg_temp_new_i64();
2258 tcg_gen_extu_i32_i64(t
, t32
);
2259 tcg_temp_free_i32(t32
);
2261 z
= tcg_const_i64(0);
2262 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2263 tcg_temp_free_i64(t
);
2264 tcg_temp_free_i64(z
);
2270 #ifndef CONFIG_USER_ONLY
2271 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2273 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2274 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2275 check_privileged(s
);
2276 potential_page_fault(s
);
2277 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2278 tcg_temp_free_i32(r1
);
2279 tcg_temp_free_i32(r3
);
2283 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2285 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2286 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2287 check_privileged(s
);
2288 potential_page_fault(s
);
2289 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2290 tcg_temp_free_i32(r1
);
2291 tcg_temp_free_i32(r3
);
2294 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2296 check_privileged(s
);
2297 potential_page_fault(s
);
2298 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2303 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2307 check_privileged(s
);
2309 t1
= tcg_temp_new_i64();
2310 t2
= tcg_temp_new_i64();
2311 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2312 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2313 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2314 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2315 tcg_gen_shli_i64(t1
, t1
, 32);
2316 gen_helper_load_psw(cpu_env
, t1
, t2
);
2317 tcg_temp_free_i64(t1
);
2318 tcg_temp_free_i64(t2
);
2319 return EXIT_NORETURN
;
2322 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2326 check_privileged(s
);
2328 t1
= tcg_temp_new_i64();
2329 t2
= tcg_temp_new_i64();
2330 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2331 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2332 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2333 gen_helper_load_psw(cpu_env
, t1
, t2
);
2334 tcg_temp_free_i64(t1
);
2335 tcg_temp_free_i64(t2
);
2336 return EXIT_NORETURN
;
2340 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2342 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2343 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2344 potential_page_fault(s
);
2345 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2346 tcg_temp_free_i32(r1
);
2347 tcg_temp_free_i32(r3
);
2351 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2353 int r1
= get_field(s
->fields
, r1
);
2354 int r3
= get_field(s
->fields
, r3
);
2355 TCGv_i64 t
= tcg_temp_new_i64();
2356 TCGv_i64 t4
= tcg_const_i64(4);
2359 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2360 store_reg32_i64(r1
, t
);
2364 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2368 tcg_temp_free_i64(t
);
2369 tcg_temp_free_i64(t4
);
2373 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2375 int r1
= get_field(s
->fields
, r1
);
2376 int r3
= get_field(s
->fields
, r3
);
2377 TCGv_i64 t
= tcg_temp_new_i64();
2378 TCGv_i64 t4
= tcg_const_i64(4);
2381 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2382 store_reg32h_i64(r1
, t
);
2386 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2390 tcg_temp_free_i64(t
);
2391 tcg_temp_free_i64(t4
);
2395 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2397 int r1
= get_field(s
->fields
, r1
);
2398 int r3
= get_field(s
->fields
, r3
);
2399 TCGv_i64 t8
= tcg_const_i64(8);
2402 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2406 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2410 tcg_temp_free_i64(t8
);
2414 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2417 o
->g_out
= o
->g_in2
;
2418 TCGV_UNUSED_I64(o
->in2
);
2423 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2427 o
->g_out
= o
->g_in1
;
2428 o
->g_out2
= o
->g_in2
;
2429 TCGV_UNUSED_I64(o
->in1
);
2430 TCGV_UNUSED_I64(o
->in2
);
2431 o
->g_in1
= o
->g_in2
= false;
2435 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2437 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2438 potential_page_fault(s
);
2439 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2440 tcg_temp_free_i32(l
);
2444 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2446 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2447 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2448 potential_page_fault(s
);
2449 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2450 tcg_temp_free_i32(r1
);
2451 tcg_temp_free_i32(r2
);
2456 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2458 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2459 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2460 potential_page_fault(s
);
2461 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2462 tcg_temp_free_i32(r1
);
2463 tcg_temp_free_i32(r3
);
2468 #ifndef CONFIG_USER_ONLY
2469 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2471 int r1
= get_field(s
->fields
, l1
);
2472 check_privileged(s
);
2473 potential_page_fault(s
);
2474 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2479 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2481 int r1
= get_field(s
->fields
, l1
);
2482 check_privileged(s
);
2483 potential_page_fault(s
);
2484 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2490 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2492 potential_page_fault(s
);
2493 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2498 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2500 potential_page_fault(s
);
2501 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2503 return_low128(o
->in2
);
2507 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2509 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2513 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2515 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2516 return_low128(o
->out2
);
2520 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2522 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2526 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2528 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2532 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2534 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2538 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2540 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2541 return_low128(o
->out2
);
2545 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2547 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2548 return_low128(o
->out2
);
2552 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2554 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2555 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2556 tcg_temp_free_i64(r3
);
2560 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2562 int r3
= get_field(s
->fields
, r3
);
2563 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2567 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2569 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2570 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2571 tcg_temp_free_i64(r3
);
2575 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2577 int r3
= get_field(s
->fields
, r3
);
2578 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2582 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2584 gen_helper_nabs_i64(o
->out
, o
->in2
);
2588 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2590 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2594 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2596 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2600 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2602 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2603 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2607 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2609 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2610 potential_page_fault(s
);
2611 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2612 tcg_temp_free_i32(l
);
2617 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2619 tcg_gen_neg_i64(o
->out
, o
->in2
);
2623 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2625 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2629 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2631 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2635 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2637 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2638 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2642 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2644 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2645 potential_page_fault(s
);
2646 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2647 tcg_temp_free_i32(l
);
2652 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2654 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2658 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2660 int shift
= s
->insn
->data
& 0xff;
2661 int size
= s
->insn
->data
>> 8;
2662 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2665 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2666 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2668 /* Produce the CC from only the bits manipulated. */
2669 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2670 set_cc_nz_u64(s
, cc_dst
);
2674 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2676 gen_helper_popcnt(o
->out
, o
->in2
);
2680 #ifndef CONFIG_USER_ONLY
2681 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2683 check_privileged(s
);
2684 gen_helper_ptlb(cpu_env
);
2689 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2691 int i3
= get_field(s
->fields
, i3
);
2692 int i4
= get_field(s
->fields
, i4
);
2693 int i5
= get_field(s
->fields
, i5
);
2694 int do_zero
= i4
& 0x80;
2695 uint64_t mask
, imask
, pmask
;
2698 /* Adjust the arguments for the specific insn. */
2699 switch (s
->fields
->op2
) {
2700 case 0x55: /* risbg */
2705 case 0x5d: /* risbhg */
2708 pmask
= 0xffffffff00000000ull
;
2710 case 0x51: /* risblg */
2713 pmask
= 0x00000000ffffffffull
;
2719 /* MASK is the set of bits to be inserted from R2.
2720 Take care for I3/I4 wraparound. */
2723 mask
^= pmask
>> i4
>> 1;
2725 mask
|= ~(pmask
>> i4
>> 1);
2729 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2730 insns, we need to keep the other half of the register. */
2731 imask
= ~mask
| ~pmask
;
2733 if (s
->fields
->op2
== 0x55) {
2740 /* In some cases we can implement this with deposit, which can be more
2741 efficient on some hosts. */
2742 if (~mask
== imask
&& i3
<= i4
) {
2743 if (s
->fields
->op2
== 0x5d) {
2746 /* Note that we rotate the bits to be inserted to the lsb, not to
2747 the position as described in the PoO. */
2750 rot
= (i5
- pos
) & 63;
2756 /* Rotate the input as necessary. */
2757 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2759 /* Insert the selected bits into the output. */
2761 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2762 } else if (imask
== 0) {
2763 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2765 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2766 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2767 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2772 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2774 int i3
= get_field(s
->fields
, i3
);
2775 int i4
= get_field(s
->fields
, i4
);
2776 int i5
= get_field(s
->fields
, i5
);
2779 /* If this is a test-only form, arrange to discard the result. */
2781 o
->out
= tcg_temp_new_i64();
2789 /* MASK is the set of bits to be operated on from R2.
2790 Take care for I3/I4 wraparound. */
2793 mask
^= ~0ull >> i4
>> 1;
2795 mask
|= ~(~0ull >> i4
>> 1);
2798 /* Rotate the input as necessary. */
2799 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2802 switch (s
->fields
->op2
) {
2803 case 0x55: /* AND */
2804 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2805 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2808 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2809 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2811 case 0x57: /* XOR */
2812 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2813 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2820 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2821 set_cc_nz_u64(s
, cc_dst
);
2825 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2827 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2831 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2833 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2837 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2839 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2843 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2845 TCGv_i32 t1
= tcg_temp_new_i32();
2846 TCGv_i32 t2
= tcg_temp_new_i32();
2847 TCGv_i32 to
= tcg_temp_new_i32();
2848 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2849 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2850 tcg_gen_rotl_i32(to
, t1
, t2
);
2851 tcg_gen_extu_i32_i64(o
->out
, to
);
2852 tcg_temp_free_i32(t1
);
2853 tcg_temp_free_i32(t2
);
2854 tcg_temp_free_i32(to
);
2858 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2860 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2864 #ifndef CONFIG_USER_ONLY
2865 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2867 check_privileged(s
);
2868 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2873 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2875 check_privileged(s
);
2876 gen_helper_sacf(cpu_env
, o
->in2
);
2877 /* Addressing mode has changed, so end the block. */
2878 return EXIT_PC_STALE
;
2882 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2884 int r1
= get_field(s
->fields
, r1
);
2885 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2889 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2891 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2895 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2897 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2901 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2903 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2904 return_low128(o
->out2
);
2908 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2910 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2914 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2916 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2920 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2922 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2923 return_low128(o
->out2
);
2927 #ifndef CONFIG_USER_ONLY
2928 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2930 check_privileged(s
);
2931 potential_page_fault(s
);
2932 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2937 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2939 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2940 check_privileged(s
);
2941 potential_page_fault(s
);
2942 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2943 tcg_temp_free_i32(r1
);
2948 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
2954 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2956 lab
= gen_new_label();
2958 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
2960 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
2964 r1
= get_field(s
->fields
, r1
);
2965 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2966 if (s
->insn
->data
) {
2967 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
2969 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
2971 tcg_temp_free_i64(a
);
2977 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2979 uint64_t sign
= 1ull << s
->insn
->data
;
2980 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2981 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2982 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2983 /* The arithmetic left shift is curious in that it does not affect
2984 the sign bit. Copy that over from the source unchanged. */
2985 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2986 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2987 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2991 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2993 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2997 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2999 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3003 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3005 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3009 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3011 gen_helper_sfpc(cpu_env
, o
->in2
);
3015 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3017 gen_helper_sfas(cpu_env
, o
->in2
);
3021 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3023 int b2
= get_field(s
->fields
, b2
);
3024 int d2
= get_field(s
->fields
, d2
);
3025 TCGv_i64 t1
= tcg_temp_new_i64();
3026 TCGv_i64 t2
= tcg_temp_new_i64();
3029 switch (s
->fields
->op2
) {
3030 case 0x99: /* SRNM */
3033 case 0xb8: /* SRNMB */
3036 case 0xb9: /* SRNMT */
3041 mask
= (1 << len
) - 1;
3043 /* Insert the value into the appropriate field of the FPC. */
3045 tcg_gen_movi_i64(t1
, d2
& mask
);
3047 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3048 tcg_gen_andi_i64(t1
, t1
, mask
);
3050 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3051 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3052 tcg_temp_free_i64(t1
);
3054 /* Then install the new FPC to set the rounding mode in fpu_status. */
3055 gen_helper_sfpc(cpu_env
, t2
);
3056 tcg_temp_free_i64(t2
);
3060 #ifndef CONFIG_USER_ONLY
3061 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3063 check_privileged(s
);
3064 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3065 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3069 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3071 check_privileged(s
);
3072 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3076 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3078 check_privileged(s
);
3079 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3083 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3085 check_privileged(s
);
3086 /* ??? Surely cpu address != cpu number. In any case the previous
3087 version of this stored more than the required half-word, so it
3088 is unlikely this has ever been tested. */
3089 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3093 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3095 gen_helper_stck(o
->out
, cpu_env
);
3096 /* ??? We don't implement clock states. */
3097 gen_op_movi_cc(s
, 0);
3101 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3103 TCGv_i64 c1
= tcg_temp_new_i64();
3104 TCGv_i64 c2
= tcg_temp_new_i64();
3105 gen_helper_stck(c1
, cpu_env
);
3106 /* Shift the 64-bit value into its place as a zero-extended
3107 104-bit value. Note that "bit positions 64-103 are always
3108 non-zero so that they compare differently to STCK"; we set
3109 the least significant bit to 1. */
3110 tcg_gen_shli_i64(c2
, c1
, 56);
3111 tcg_gen_shri_i64(c1
, c1
, 8);
3112 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3113 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3114 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3115 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3116 tcg_temp_free_i64(c1
);
3117 tcg_temp_free_i64(c2
);
3118 /* ??? We don't implement clock states. */
3119 gen_op_movi_cc(s
, 0);
3123 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3125 check_privileged(s
);
3126 gen_helper_sckc(cpu_env
, o
->in2
);
3130 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3132 check_privileged(s
);
3133 gen_helper_stckc(o
->out
, cpu_env
);
3137 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3139 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3140 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3141 check_privileged(s
);
3142 potential_page_fault(s
);
3143 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3144 tcg_temp_free_i32(r1
);
3145 tcg_temp_free_i32(r3
);
3149 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3151 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3152 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3153 check_privileged(s
);
3154 potential_page_fault(s
);
3155 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3156 tcg_temp_free_i32(r1
);
3157 tcg_temp_free_i32(r3
);
3161 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3163 check_privileged(s
);
3164 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3168 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3170 check_privileged(s
);
3171 gen_helper_spt(cpu_env
, o
->in2
);
3175 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3178 /* We really ought to have more complete indication of facilities
3179 that we implement. Address this when STFLE is implemented. */
3180 check_privileged(s
);
3181 f
= tcg_const_i64(0xc0000000);
3182 a
= tcg_const_i64(200);
3183 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3184 tcg_temp_free_i64(f
);
3185 tcg_temp_free_i64(a
);
3189 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3191 check_privileged(s
);
3192 gen_helper_stpt(o
->out
, cpu_env
);
3196 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3198 check_privileged(s
);
3199 potential_page_fault(s
);
3200 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3205 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3207 check_privileged(s
);
3208 gen_helper_spx(cpu_env
, o
->in2
);
3212 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3214 check_privileged(s
);
3215 /* Not operational. */
3216 gen_op_movi_cc(s
, 3);
3220 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3222 check_privileged(s
);
3223 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3224 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3228 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3230 uint64_t i2
= get_field(s
->fields
, i2
);
3233 check_privileged(s
);
3235 /* It is important to do what the instruction name says: STORE THEN.
3236 If we let the output hook perform the store then if we fault and
3237 restart, we'll have the wrong SYSTEM MASK in place. */
3238 t
= tcg_temp_new_i64();
3239 tcg_gen_shri_i64(t
, psw_mask
, 56);
3240 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3241 tcg_temp_free_i64(t
);
3243 if (s
->fields
->op
== 0xac) {
3244 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3245 (i2
<< 56) | 0x00ffffffffffffffull
);
3247 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3252 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3254 check_privileged(s
);
3255 potential_page_fault(s
);
3256 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3261 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3263 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3267 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3269 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3273 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3275 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3279 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3281 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3285 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3287 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3288 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3289 potential_page_fault(s
);
3290 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3291 tcg_temp_free_i32(r1
);
3292 tcg_temp_free_i32(r3
);
3296 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3298 int m3
= get_field(s
->fields
, m3
);
3299 int pos
, base
= s
->insn
->data
;
3300 TCGv_i64 tmp
= tcg_temp_new_i64();
3302 pos
= base
+ ctz32(m3
) * 8;
3305 /* Effectively a 32-bit store. */
3306 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3307 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3313 /* Effectively a 16-bit store. */
3314 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3315 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3322 /* Effectively an 8-bit store. */
3323 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3324 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3328 /* This is going to be a sequence of shifts and stores. */
3329 pos
= base
+ 32 - 8;
3332 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3333 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3334 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3336 m3
= (m3
<< 1) & 0xf;
3341 tcg_temp_free_i64(tmp
);
3345 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3347 int r1
= get_field(s
->fields
, r1
);
3348 int r3
= get_field(s
->fields
, r3
);
3349 int size
= s
->insn
->data
;
3350 TCGv_i64 tsize
= tcg_const_i64(size
);
3354 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3356 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3361 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3365 tcg_temp_free_i64(tsize
);
3369 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3371 int r1
= get_field(s
->fields
, r1
);
3372 int r3
= get_field(s
->fields
, r3
);
3373 TCGv_i64 t
= tcg_temp_new_i64();
3374 TCGv_i64 t4
= tcg_const_i64(4);
3375 TCGv_i64 t32
= tcg_const_i64(32);
3378 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3379 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3383 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3387 tcg_temp_free_i64(t
);
3388 tcg_temp_free_i64(t4
);
3389 tcg_temp_free_i64(t32
);
3393 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3395 potential_page_fault(s
);
3396 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3398 return_low128(o
->in2
);
3402 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3404 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3408 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3413 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3415 /* The !borrow flag is the msb of CC. Since we want the inverse of
3416 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3417 disas_jcc(s
, &cmp
, 8 | 4);
3418 borrow
= tcg_temp_new_i64();
3420 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3422 TCGv_i32 t
= tcg_temp_new_i32();
3423 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3424 tcg_gen_extu_i32_i64(borrow
, t
);
3425 tcg_temp_free_i32(t
);
3429 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3430 tcg_temp_free_i64(borrow
);
3434 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3441 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3442 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3443 tcg_temp_free_i32(t
);
3445 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3446 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3447 tcg_temp_free_i32(t
);
3449 gen_exception(EXCP_SVC
);
3450 return EXIT_NORETURN
;
3453 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3455 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3460 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3462 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3467 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3469 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3474 #ifndef CONFIG_USER_ONLY
3475 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3477 potential_page_fault(s
);
3478 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3484 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3486 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3487 potential_page_fault(s
);
3488 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3489 tcg_temp_free_i32(l
);
3494 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3496 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3497 potential_page_fault(s
);
3498 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3499 tcg_temp_free_i32(l
);
3503 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3505 int d1
= get_field(s
->fields
, d1
);
3506 int d2
= get_field(s
->fields
, d2
);
3507 int b1
= get_field(s
->fields
, b1
);
3508 int b2
= get_field(s
->fields
, b2
);
3509 int l
= get_field(s
->fields
, l1
);
3512 o
->addr1
= get_address(s
, 0, b1
, d1
);
3514 /* If the addresses are identical, this is a store/memset of zero. */
3515 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3516 o
->in2
= tcg_const_i64(0);
3520 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3523 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3527 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3530 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3534 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3537 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3541 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3543 gen_op_movi_cc(s
, 0);
3547 /* But in general we'll defer to a helper. */
3548 o
->in2
= get_address(s
, 0, b2
, d2
);
3549 t32
= tcg_const_i32(l
);
3550 potential_page_fault(s
);
3551 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3552 tcg_temp_free_i32(t32
);
3557 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3559 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3563 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3565 int shift
= s
->insn
->data
& 0xff;
3566 int size
= s
->insn
->data
>> 8;
3567 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3570 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3571 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3573 /* Produce the CC from only the bits manipulated. */
3574 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3575 set_cc_nz_u64(s
, cc_dst
);
3579 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3581 o
->out
= tcg_const_i64(0);
3585 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3587 o
->out
= tcg_const_i64(0);
3593 /* ====================================================================== */
3594 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3595 the original inputs), update the various cc data structures in order to
3596 be able to compute the new condition code. */
3598 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3600 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3603 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3605 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3608 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3610 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3613 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3615 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3618 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3620 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3623 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3625 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3628 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3630 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3633 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3635 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3638 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3640 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3643 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3645 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3648 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3650 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3653 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3655 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3658 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3660 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3663 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3665 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3668 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3670 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3673 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3675 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3678 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3680 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3683 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3685 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3688 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3690 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3693 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3695 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3696 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3699 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3701 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3704 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3706 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3709 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3711 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3714 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3716 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3719 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3721 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3724 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3726 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3729 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3731 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3734 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3736 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3739 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3741 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3744 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3746 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3749 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3751 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3754 /* ====================================================================== */
3755 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3756 with the TCG register to which we will write. Used in combination with
3757 the "wout" generators, in some cases we need a new temporary, and in
3758 some cases we can write to a TCG global. */
3760 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3762 o
->out
= tcg_temp_new_i64();
3764 #define SPEC_prep_new 0
3766 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3768 o
->out
= tcg_temp_new_i64();
3769 o
->out2
= tcg_temp_new_i64();
3771 #define SPEC_prep_new_P 0
3773 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3775 o
->out
= regs
[get_field(f
, r1
)];
3778 #define SPEC_prep_r1 0
3780 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3782 int r1
= get_field(f
, r1
);
3784 o
->out2
= regs
[r1
+ 1];
3785 o
->g_out
= o
->g_out2
= true;
3787 #define SPEC_prep_r1_P SPEC_r1_even
3789 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3791 o
->out
= fregs
[get_field(f
, r1
)];
3794 #define SPEC_prep_f1 0
3796 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3798 int r1
= get_field(f
, r1
);
3800 o
->out2
= fregs
[r1
+ 2];
3801 o
->g_out
= o
->g_out2
= true;
3803 #define SPEC_prep_x1 SPEC_r1_f128
3805 /* ====================================================================== */
3806 /* The "Write OUTput" generators. These generally perform some non-trivial
3807 copy of data to TCG globals, or to main memory. The trivial cases are
3808 generally handled by having a "prep" generator install the TCG global
3809 as the destination of the operation. */
3811 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3813 store_reg(get_field(f
, r1
), o
->out
);
3815 #define SPEC_wout_r1 0
3817 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3819 int r1
= get_field(f
, r1
);
3820 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3822 #define SPEC_wout_r1_8 0
3824 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3826 int r1
= get_field(f
, r1
);
3827 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3829 #define SPEC_wout_r1_16 0
3831 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3833 store_reg32_i64(get_field(f
, r1
), o
->out
);
3835 #define SPEC_wout_r1_32 0
3837 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3839 int r1
= get_field(f
, r1
);
3840 store_reg32_i64(r1
, o
->out
);
3841 store_reg32_i64(r1
+ 1, o
->out2
);
3843 #define SPEC_wout_r1_P32 SPEC_r1_even
3845 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3847 int r1
= get_field(f
, r1
);
3848 store_reg32_i64(r1
+ 1, o
->out
);
3849 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3850 store_reg32_i64(r1
, o
->out
);
3852 #define SPEC_wout_r1_D32 SPEC_r1_even
3854 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3856 store_freg32_i64(get_field(f
, r1
), o
->out
);
3858 #define SPEC_wout_e1 0
3860 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3862 store_freg(get_field(f
, r1
), o
->out
);
3864 #define SPEC_wout_f1 0
3866 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 int f1
= get_field(s
->fields
, r1
);
3869 store_freg(f1
, o
->out
);
3870 store_freg(f1
+ 2, o
->out2
);
3872 #define SPEC_wout_x1 SPEC_r1_f128
3874 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3876 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3877 store_reg32_i64(get_field(f
, r1
), o
->out
);
3880 #define SPEC_wout_cond_r1r2_32 0
3882 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3884 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3885 store_freg32_i64(get_field(f
, r1
), o
->out
);
3888 #define SPEC_wout_cond_e1e2 0
3890 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3892 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3894 #define SPEC_wout_m1_8 0
3896 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3898 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3900 #define SPEC_wout_m1_16 0
3902 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3904 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3906 #define SPEC_wout_m1_32 0
3908 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3910 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3912 #define SPEC_wout_m1_64 0
3914 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3916 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3918 #define SPEC_wout_m2_32 0
3920 /* ====================================================================== */
3921 /* The "INput 1" generators. These load the first operand to an insn. */
3923 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3925 o
->in1
= load_reg(get_field(f
, r1
));
3927 #define SPEC_in1_r1 0
3929 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3931 o
->in1
= regs
[get_field(f
, r1
)];
3934 #define SPEC_in1_r1_o 0
3936 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3938 o
->in1
= tcg_temp_new_i64();
3939 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3941 #define SPEC_in1_r1_32s 0
3943 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3945 o
->in1
= tcg_temp_new_i64();
3946 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3948 #define SPEC_in1_r1_32u 0
3950 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3952 o
->in1
= tcg_temp_new_i64();
3953 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3955 #define SPEC_in1_r1_sr32 0
3957 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3959 o
->in1
= load_reg(get_field(f
, r1
) + 1);
3961 #define SPEC_in1_r1p1 SPEC_r1_even
3963 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3965 o
->in1
= tcg_temp_new_i64();
3966 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
3968 #define SPEC_in1_r1p1_32s SPEC_r1_even
3970 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3972 o
->in1
= tcg_temp_new_i64();
3973 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
3975 #define SPEC_in1_r1p1_32u SPEC_r1_even
3977 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3979 int r1
= get_field(f
, r1
);
3980 o
->in1
= tcg_temp_new_i64();
3981 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3983 #define SPEC_in1_r1_D32 SPEC_r1_even
3985 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3987 o
->in1
= load_reg(get_field(f
, r2
));
3989 #define SPEC_in1_r2 0
3991 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3993 o
->in1
= load_reg(get_field(f
, r3
));
3995 #define SPEC_in1_r3 0
3997 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3999 o
->in1
= regs
[get_field(f
, r3
)];
4002 #define SPEC_in1_r3_o 0
4004 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4006 o
->in1
= tcg_temp_new_i64();
4007 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4009 #define SPEC_in1_r3_32s 0
4011 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4013 o
->in1
= tcg_temp_new_i64();
4014 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4016 #define SPEC_in1_r3_32u 0
4018 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4020 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4022 #define SPEC_in1_e1 0
4024 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4026 o
->in1
= fregs
[get_field(f
, r1
)];
4029 #define SPEC_in1_f1_o 0
4031 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4033 int r1
= get_field(f
, r1
);
4035 o
->out2
= fregs
[r1
+ 2];
4036 o
->g_out
= o
->g_out2
= true;
4038 #define SPEC_in1_x1_o SPEC_r1_f128
4040 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4042 o
->in1
= fregs
[get_field(f
, r3
)];
4045 #define SPEC_in1_f3_o 0
4047 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4049 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4051 #define SPEC_in1_la1 0
4053 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4055 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4056 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4058 #define SPEC_in1_la2 0
4060 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4063 o
->in1
= tcg_temp_new_i64();
4064 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4066 #define SPEC_in1_m1_8u 0
4068 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4071 o
->in1
= tcg_temp_new_i64();
4072 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4074 #define SPEC_in1_m1_16s 0
4076 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4079 o
->in1
= tcg_temp_new_i64();
4080 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4082 #define SPEC_in1_m1_16u 0
4084 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4087 o
->in1
= tcg_temp_new_i64();
4088 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4090 #define SPEC_in1_m1_32s 0
4092 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4095 o
->in1
= tcg_temp_new_i64();
4096 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4098 #define SPEC_in1_m1_32u 0
4100 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4103 o
->in1
= tcg_temp_new_i64();
4104 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4106 #define SPEC_in1_m1_64 0
4108 /* ====================================================================== */
4109 /* The "INput 2" generators. These load the second operand to an insn. */
4111 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4113 o
->in2
= regs
[get_field(f
, r1
)];
4116 #define SPEC_in2_r1_o 0
4118 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4120 o
->in2
= tcg_temp_new_i64();
4121 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4123 #define SPEC_in2_r1_16u 0
4125 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4127 o
->in2
= tcg_temp_new_i64();
4128 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4130 #define SPEC_in2_r1_32u 0
4132 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4134 o
->in2
= load_reg(get_field(f
, r2
));
4136 #define SPEC_in2_r2 0
4138 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4140 o
->in2
= regs
[get_field(f
, r2
)];
4143 #define SPEC_in2_r2_o 0
4145 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4147 int r2
= get_field(f
, r2
);
4149 o
->in2
= load_reg(r2
);
4152 #define SPEC_in2_r2_nz 0
4154 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4156 o
->in2
= tcg_temp_new_i64();
4157 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4159 #define SPEC_in2_r2_8s 0
4161 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4163 o
->in2
= tcg_temp_new_i64();
4164 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4166 #define SPEC_in2_r2_8u 0
4168 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4170 o
->in2
= tcg_temp_new_i64();
4171 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4173 #define SPEC_in2_r2_16s 0
4175 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4177 o
->in2
= tcg_temp_new_i64();
4178 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4180 #define SPEC_in2_r2_16u 0
4182 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4184 o
->in2
= load_reg(get_field(f
, r3
));
4186 #define SPEC_in2_r3 0
4188 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4190 o
->in2
= tcg_temp_new_i64();
4191 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4193 #define SPEC_in2_r2_32s 0
4195 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4197 o
->in2
= tcg_temp_new_i64();
4198 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4200 #define SPEC_in2_r2_32u 0
4202 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4204 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4206 #define SPEC_in2_e2 0
4208 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4210 o
->in2
= fregs
[get_field(f
, r2
)];
4213 #define SPEC_in2_f2_o 0
4215 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4217 int r2
= get_field(f
, r2
);
4219 o
->in2
= fregs
[r2
+ 2];
4220 o
->g_in1
= o
->g_in2
= true;
4222 #define SPEC_in2_x2_o SPEC_r2_f128
4224 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4226 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4228 #define SPEC_in2_ra2 0
4230 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4232 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4233 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4235 #define SPEC_in2_a2 0
4237 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4239 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4241 #define SPEC_in2_ri2 0
4243 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4245 help_l2_shift(s
, f
, o
, 31);
4247 #define SPEC_in2_sh32 0
4249 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4251 help_l2_shift(s
, f
, o
, 63);
4253 #define SPEC_in2_sh64 0
4255 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4258 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4260 #define SPEC_in2_m2_8u 0
4262 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4265 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4267 #define SPEC_in2_m2_16s 0
4269 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4272 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4274 #define SPEC_in2_m2_16u 0
4276 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4279 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4281 #define SPEC_in2_m2_32s 0
4283 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4286 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4288 #define SPEC_in2_m2_32u 0
4290 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4293 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4295 #define SPEC_in2_m2_64 0
4297 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4300 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4302 #define SPEC_in2_mri2_16u 0
4304 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4307 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4309 #define SPEC_in2_mri2_32s 0
4311 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4314 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4316 #define SPEC_in2_mri2_32u 0
4318 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4321 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4323 #define SPEC_in2_mri2_64 0
4325 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4327 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4329 #define SPEC_in2_i2 0
4331 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4333 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4335 #define SPEC_in2_i2_8u 0
4337 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4339 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4341 #define SPEC_in2_i2_16u 0
4343 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4345 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4347 #define SPEC_in2_i2_32u 0
4349 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4351 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4352 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4354 #define SPEC_in2_i2_16u_shl 0
4356 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4358 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4359 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4361 #define SPEC_in2_i2_32u_shl 0
4363 /* ====================================================================== */
4365 /* Find opc within the table of insns. This is formulated as a switch
4366 statement so that (1) we get compile-time notice of cut-paste errors
4367 for duplicated opcodes, and (2) the compiler generates the binary
4368 search tree, rather than us having to post-process the table. */
4370 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4371 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4373 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4375 enum DisasInsnEnum
{
4376 #include "insn-data.def"
4380 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4384 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4386 .help_in1 = in1_##I1, \
4387 .help_in2 = in2_##I2, \
4388 .help_prep = prep_##P, \
4389 .help_wout = wout_##W, \
4390 .help_cout = cout_##CC, \
4391 .help_op = op_##OP, \
4395 /* Allow 0 to be used for NULL in the table below. */
4403 #define SPEC_in1_0 0
4404 #define SPEC_in2_0 0
4405 #define SPEC_prep_0 0
4406 #define SPEC_wout_0 0
4408 static const DisasInsn insn_info
[] = {
4409 #include "insn-data.def"
4413 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4414 case OPC: return &insn_info[insn_ ## NM];
4416 static const DisasInsn
*lookup_opc(uint16_t opc
)
4419 #include "insn-data.def"
4428 /* Extract a field from the insn. The INSN should be left-aligned in
4429 the uint64_t so that we can more easily utilize the big-bit-endian
4430 definitions we extract from the Principals of Operation. */
4432 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4440 /* Zero extract the field from the insn. */
4441 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4443 /* Sign-extend, or un-swap the field as necessary. */
4445 case 0: /* unsigned */
4447 case 1: /* signed */
4448 assert(f
->size
<= 32);
4449 m
= 1u << (f
->size
- 1);
4452 case 2: /* dl+dh split, signed 20 bit. */
4453 r
= ((int8_t)r
<< 12) | (r
>> 8);
4459 /* Validate that the "compressed" encoding we selected above is valid.
4460 I.e. we havn't make two different original fields overlap. */
4461 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4462 o
->presentC
|= 1 << f
->indexC
;
4463 o
->presentO
|= 1 << f
->indexO
;
4465 o
->c
[f
->indexC
] = r
;
4468 /* Lookup the insn at the current PC, extracting the operands into O and
4469 returning the info struct for the insn. Returns NULL for invalid insn. */
4471 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4474 uint64_t insn
, pc
= s
->pc
;
4476 const DisasInsn
*info
;
4478 insn
= ld_code2(env
, pc
);
4479 op
= (insn
>> 8) & 0xff;
4480 ilen
= get_ilen(op
);
4481 s
->next_pc
= s
->pc
+ ilen
;
4488 insn
= ld_code4(env
, pc
) << 32;
4491 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4497 /* We can't actually determine the insn format until we've looked up
4498 the full insn opcode. Which we can't do without locating the
4499 secondary opcode. Assume by default that OP2 is at bit 40; for
4500 those smaller insns that don't actually have a secondary opcode
4501 this will correctly result in OP2 = 0. */
4507 case 0xb2: /* S, RRF, RRE */
4508 case 0xb3: /* RRE, RRD, RRF */
4509 case 0xb9: /* RRE, RRF */
4510 case 0xe5: /* SSE, SIL */
4511 op2
= (insn
<< 8) >> 56;
4515 case 0xc0: /* RIL */
4516 case 0xc2: /* RIL */
4517 case 0xc4: /* RIL */
4518 case 0xc6: /* RIL */
4519 case 0xc8: /* SSF */
4520 case 0xcc: /* RIL */
4521 op2
= (insn
<< 12) >> 60;
4523 case 0xd0 ... 0xdf: /* SS */
4529 case 0xee ... 0xf3: /* SS */
4530 case 0xf8 ... 0xfd: /* SS */
4534 op2
= (insn
<< 40) >> 56;
4538 memset(f
, 0, sizeof(*f
));
4542 /* Lookup the instruction. */
4543 info
= lookup_opc(op
<< 8 | op2
);
4545 /* If we found it, extract the operands. */
4547 DisasFormat fmt
= info
->fmt
;
4550 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4551 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4557 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4559 const DisasInsn
*insn
;
4560 ExitStatus ret
= NO_EXIT
;
4564 /* Search for the insn in the table. */
4565 insn
= extract_insn(env
, s
, &f
);
4567 /* Not found means unimplemented/illegal opcode. */
4569 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4571 gen_illegal_opcode(s
);
4572 return EXIT_NORETURN
;
4575 /* Check for insn specification exceptions. */
4577 int spec
= insn
->spec
, excp
= 0, r
;
4579 if (spec
& SPEC_r1_even
) {
4580 r
= get_field(&f
, r1
);
4582 excp
= PGM_SPECIFICATION
;
4585 if (spec
& SPEC_r2_even
) {
4586 r
= get_field(&f
, r2
);
4588 excp
= PGM_SPECIFICATION
;
4591 if (spec
& SPEC_r1_f128
) {
4592 r
= get_field(&f
, r1
);
4594 excp
= PGM_SPECIFICATION
;
4597 if (spec
& SPEC_r2_f128
) {
4598 r
= get_field(&f
, r2
);
4600 excp
= PGM_SPECIFICATION
;
4604 gen_program_exception(s
, excp
);
4605 return EXIT_NORETURN
;
4609 /* Set up the strutures we use to communicate with the helpers. */
4612 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4613 TCGV_UNUSED_I64(o
.out
);
4614 TCGV_UNUSED_I64(o
.out2
);
4615 TCGV_UNUSED_I64(o
.in1
);
4616 TCGV_UNUSED_I64(o
.in2
);
4617 TCGV_UNUSED_I64(o
.addr1
);
4619 /* Implement the instruction. */
4620 if (insn
->help_in1
) {
4621 insn
->help_in1(s
, &f
, &o
);
4623 if (insn
->help_in2
) {
4624 insn
->help_in2(s
, &f
, &o
);
4626 if (insn
->help_prep
) {
4627 insn
->help_prep(s
, &f
, &o
);
4629 if (insn
->help_op
) {
4630 ret
= insn
->help_op(s
, &o
);
4632 if (insn
->help_wout
) {
4633 insn
->help_wout(s
, &f
, &o
);
4635 if (insn
->help_cout
) {
4636 insn
->help_cout(s
, &o
);
4639 /* Free any temporaries created by the helpers. */
4640 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4641 tcg_temp_free_i64(o
.out
);
4643 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4644 tcg_temp_free_i64(o
.out2
);
4646 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4647 tcg_temp_free_i64(o
.in1
);
4649 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4650 tcg_temp_free_i64(o
.in2
);
4652 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4653 tcg_temp_free_i64(o
.addr1
);
4656 /* Advance to the next instruction. */
4661 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4662 TranslationBlock
*tb
,
4666 target_ulong pc_start
;
4667 uint64_t next_page_start
;
4668 uint16_t *gen_opc_end
;
4670 int num_insns
, max_insns
;
4678 if (!(tb
->flags
& FLAG_MASK_64
)) {
4679 pc_start
&= 0x7fffffff;
4684 dc
.cc_op
= CC_OP_DYNAMIC
;
4685 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4687 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4689 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4692 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4693 if (max_insns
== 0) {
4694 max_insns
= CF_COUNT_MASK
;
4701 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4705 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4708 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4709 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4710 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4711 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4713 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4717 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4718 tcg_gen_debug_insn_start(dc
.pc
);
4722 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4723 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4724 if (bp
->pc
== dc
.pc
) {
4725 status
= EXIT_PC_STALE
;
4731 if (status
== NO_EXIT
) {
4732 status
= translate_one(env
, &dc
);
4735 /* If we reach a page boundary, are single stepping,
4736 or exhaust instruction count, stop generation. */
4737 if (status
== NO_EXIT
4738 && (dc
.pc
>= next_page_start
4739 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4740 || num_insns
>= max_insns
4742 || env
->singlestep_enabled
)) {
4743 status
= EXIT_PC_STALE
;
4745 } while (status
== NO_EXIT
);
4747 if (tb
->cflags
& CF_LAST_IO
) {
4756 update_psw_addr(&dc
);
4758 case EXIT_PC_UPDATED
:
4759 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4760 cc op type is in env */
4762 /* Exit the TB, either by raising a debug exception or by return. */
4764 gen_exception(EXCP_DEBUG
);
4773 gen_icount_end(tb
, num_insns
);
4774 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4776 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4779 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4782 tb
->size
= dc
.pc
- pc_start
;
4783 tb
->icount
= num_insns
;
4786 #if defined(S390X_DEBUG_DISAS)
4787 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4788 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4789 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4795 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4797 gen_intermediate_code_internal(env
, tb
, 0);
4800 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4802 gen_intermediate_code_internal(env
, tb
, 1);
4805 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4808 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4809 cc_op
= gen_opc_cc_op
[pc_pos
];
4810 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {