4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env
;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext
;
48 typedef struct DisasInsn DisasInsn
;
49 typedef struct DisasFields DisasFields
;
52 struct TranslationBlock
*tb
;
53 const DisasInsn
*insn
;
57 bool singlestep_enabled
;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a
, b
; } s64
;
68 struct { TCGv_i32 a
, b
; } s32
;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit
[CC_OP_MAX
];
76 static uint64_t inline_branch_miss
[CC_OP_MAX
];
79 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
81 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
82 if (s
->tb
->flags
& FLAG_MASK_32
) {
83 return pc
| 0x80000000;
89 void cpu_dump_state(CPUS390XState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
95 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
96 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
98 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
99 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
102 for (i
= 0; i
< 16; i
++) {
103 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
105 cpu_fprintf(f
, "\n");
111 for (i
= 0; i
< 16; i
++) {
112 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, env
->fregs
[i
].ll
);
114 cpu_fprintf(f
, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i
= 0; i
< 16; i
++) {
122 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
124 cpu_fprintf(f
, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i
= 0; i
< CC_OP_MAX
; i
++) {
133 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
134 inline_branch_miss
[i
], inline_branch_hit
[i
]);
138 cpu_fprintf(f
, "\n");
141 static TCGv_i64 psw_addr
;
142 static TCGv_i64 psw_mask
;
144 static TCGv_i32 cc_op
;
145 static TCGv_i64 cc_src
;
146 static TCGv_i64 cc_dst
;
147 static TCGv_i64 cc_vr
;
149 static char cpu_reg_names
[32][4];
150 static TCGv_i64 regs
[16];
151 static TCGv_i64 fregs
[16];
153 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
155 void s390x_translate_init(void)
159 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
160 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
161 offsetof(CPUS390XState
, psw
.addr
),
163 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
164 offsetof(CPUS390XState
, psw
.mask
),
167 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
169 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
171 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
173 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
176 for (i
= 0; i
< 16; i
++) {
177 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
178 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
179 offsetof(CPUS390XState
, regs
[i
]),
183 for (i
= 0; i
< 16; i
++) {
184 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
185 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
186 offsetof(CPUS390XState
, fregs
[i
].d
),
187 cpu_reg_names
[i
+ 16]);
190 /* register helpers */
195 static TCGv_i64
load_reg(int reg
)
197 TCGv_i64 r
= tcg_temp_new_i64();
198 tcg_gen_mov_i64(r
, regs
[reg
]);
202 static TCGv_i64
load_freg32_i64(int reg
)
204 TCGv_i64 r
= tcg_temp_new_i64();
205 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
209 static void store_reg(int reg
, TCGv_i64 v
)
211 tcg_gen_mov_i64(regs
[reg
], v
);
214 static void store_freg(int reg
, TCGv_i64 v
)
216 tcg_gen_mov_i64(fregs
[reg
], v
);
219 static void store_reg32_i64(int reg
, TCGv_i64 v
)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
225 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
227 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
230 static void store_freg32_i64(int reg
, TCGv_i64 v
)
232 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
235 static void return_low128(TCGv_i64 dest
)
237 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
240 static void update_psw_addr(DisasContext
*s
)
243 tcg_gen_movi_i64(psw_addr
, s
->pc
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static void potential_page_fault(DisasContext
*s
)
259 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)cpu_lduw_code(env
, pc
);
264 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
269 static inline uint64_t ld_code6(CPUS390XState
*env
, uint64_t pc
)
271 return (ld_code2(env
, pc
) << 32) | ld_code4(env
, pc
+ 2);
274 static int get_mem_index(DisasContext
*s
)
276 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
277 case PSW_ASC_PRIMARY
>> 32:
279 case PSW_ASC_SECONDARY
>> 32:
281 case PSW_ASC_HOME
>> 32:
289 static void gen_exception(int excp
)
291 TCGv_i32 tmp
= tcg_const_i32(excp
);
292 gen_helper_exception(cpu_env
, tmp
);
293 tcg_temp_free_i32(tmp
);
296 static void gen_program_exception(DisasContext
*s
, int code
)
300 /* Remember what pgm exeption this was. */
301 tmp
= tcg_const_i32(code
);
302 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
303 tcg_temp_free_i32(tmp
);
305 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
306 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
307 tcg_temp_free_i32(tmp
);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM
);
320 static inline void gen_illegal_opcode(DisasContext
*s
)
322 gen_program_exception(s
, PGM_SPECIFICATION
);
325 static inline void check_privileged(DisasContext
*s
)
327 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
328 gen_program_exception(s
, PGM_PRIVILEGED
);
332 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
334 TCGv_i64 tmp
= tcg_temp_new_i64();
335 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
337 /* Note that d2 is limited to 20 bits, signed. If we crop negative
338 displacements early we create larger immedate addends. */
340 /* Note that addi optimizes the imm==0 case. */
342 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
343 tcg_gen_addi_i64(tmp
, tmp
, d2
);
345 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
347 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
353 tcg_gen_movi_i64(tmp
, d2
);
356 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
362 static inline bool live_cc_data(DisasContext
*s
)
364 return (s
->cc_op
!= CC_OP_DYNAMIC
365 && s
->cc_op
!= CC_OP_STATIC
369 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
371 if (live_cc_data(s
)) {
372 tcg_gen_discard_i64(cc_src
);
373 tcg_gen_discard_i64(cc_dst
);
374 tcg_gen_discard_i64(cc_vr
);
376 s
->cc_op
= CC_OP_CONST0
+ val
;
379 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
381 if (live_cc_data(s
)) {
382 tcg_gen_discard_i64(cc_src
);
383 tcg_gen_discard_i64(cc_vr
);
385 tcg_gen_mov_i64(cc_dst
, dst
);
389 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
392 if (live_cc_data(s
)) {
393 tcg_gen_discard_i64(cc_vr
);
395 tcg_gen_mov_i64(cc_src
, src
);
396 tcg_gen_mov_i64(cc_dst
, dst
);
400 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
401 TCGv_i64 dst
, TCGv_i64 vr
)
403 tcg_gen_mov_i64(cc_src
, src
);
404 tcg_gen_mov_i64(cc_dst
, dst
);
405 tcg_gen_mov_i64(cc_vr
, vr
);
409 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
411 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
414 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
416 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
419 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
421 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
424 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
426 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
429 /* CC value is in env->cc_op */
430 static void set_cc_static(DisasContext
*s
)
432 if (live_cc_data(s
)) {
433 tcg_gen_discard_i64(cc_src
);
434 tcg_gen_discard_i64(cc_dst
);
435 tcg_gen_discard_i64(cc_vr
);
437 s
->cc_op
= CC_OP_STATIC
;
440 /* calculates cc into cc_op */
441 static void gen_op_calc_cc(DisasContext
*s
)
443 TCGv_i32 local_cc_op
;
446 TCGV_UNUSED_I32(local_cc_op
);
447 TCGV_UNUSED_I64(dummy
);
450 dummy
= tcg_const_i64(0);
464 local_cc_op
= tcg_const_i32(s
->cc_op
);
480 /* s->cc_op is the cc value */
481 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
484 /* env->cc_op already is the cc value */
499 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
504 case CC_OP_LTUGTU_32
:
505 case CC_OP_LTUGTU_64
:
512 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
527 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
530 /* unknown operation - assume 3 arguments and cc_op in env */
531 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
537 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
538 tcg_temp_free_i32(local_cc_op
);
540 if (!TCGV_IS_UNUSED_I64(dummy
)) {
541 tcg_temp_free_i64(dummy
);
544 /* We now have cc in cc_op as constant */
548 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
550 /* NOTE: we handle the case where the TB spans two pages here */
551 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
552 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
553 && !s
->singlestep_enabled
554 && !(s
->tb
->cflags
& CF_LAST_IO
));
557 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
559 #ifdef DEBUG_INLINE_BRANCHES
560 inline_branch_miss
[cc_op
]++;
564 static void account_inline_branch(DisasContext
*s
, int cc_op
)
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_hit
[cc_op
]++;
571 /* Table of mask values to comparison codes, given a comparison as input.
572 For such, CC=3 should not be possible. */
573 static const TCGCond ltgt_cond
[16] = {
574 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
575 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
576 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
577 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
578 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
579 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
580 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
581 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
584 /* Table of mask values to comparison codes, given a logic op as input.
585 For such, only CC=0 and CC=1 should be possible. */
586 static const TCGCond nz_cond
[16] = {
587 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
588 TCG_COND_NEVER
, TCG_COND_NEVER
,
589 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
590 TCG_COND_NE
, TCG_COND_NE
,
591 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
592 TCG_COND_EQ
, TCG_COND_EQ
,
593 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
594 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
597 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
598 details required to generate a TCG comparison. */
599 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
602 enum cc_op old_cc_op
= s
->cc_op
;
604 if (mask
== 15 || mask
== 0) {
605 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
608 c
->g1
= c
->g2
= true;
613 /* Find the TCG condition for the mask + cc op. */
619 cond
= ltgt_cond
[mask
];
620 if (cond
== TCG_COND_NEVER
) {
623 account_inline_branch(s
, old_cc_op
);
626 case CC_OP_LTUGTU_32
:
627 case CC_OP_LTUGTU_64
:
628 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
629 if (cond
== TCG_COND_NEVER
) {
632 account_inline_branch(s
, old_cc_op
);
636 cond
= nz_cond
[mask
];
637 if (cond
== TCG_COND_NEVER
) {
640 account_inline_branch(s
, old_cc_op
);
655 account_inline_branch(s
, old_cc_op
);
670 account_inline_branch(s
, old_cc_op
);
674 switch (mask
& 0xa) {
675 case 8: /* src == 0 -> no one bit found */
678 case 2: /* src != 0 -> one bit found */
684 account_inline_branch(s
, old_cc_op
);
690 case 8 | 2: /* vr == 0 */
693 case 4 | 1: /* vr != 0 */
696 case 8 | 4: /* no carry -> vr >= src */
699 case 2 | 1: /* carry -> vr < src */
705 account_inline_branch(s
, old_cc_op
);
710 /* Note that CC=0 is impossible; treat it as dont-care. */
712 case 2: /* zero -> op1 == op2 */
715 case 4 | 1: /* !zero -> op1 != op2 */
718 case 4: /* borrow (!carry) -> op1 < op2 */
721 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
727 account_inline_branch(s
, old_cc_op
);
732 /* Calculate cc value. */
737 /* Jump based on CC. We'll load up the real cond below;
738 the assignment here merely avoids a compiler warning. */
739 account_noninline_branch(s
, old_cc_op
);
740 old_cc_op
= CC_OP_STATIC
;
741 cond
= TCG_COND_NEVER
;
745 /* Load up the arguments of the comparison. */
747 c
->g1
= c
->g2
= false;
751 c
->u
.s32
.a
= tcg_temp_new_i32();
752 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
753 c
->u
.s32
.b
= tcg_const_i32(0);
756 case CC_OP_LTUGTU_32
:
759 c
->u
.s32
.a
= tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
761 c
->u
.s32
.b
= tcg_temp_new_i32();
762 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
769 c
->u
.s64
.b
= tcg_const_i64(0);
773 case CC_OP_LTUGTU_64
:
777 c
->g1
= c
->g2
= true;
783 c
->u
.s64
.a
= tcg_temp_new_i64();
784 c
->u
.s64
.b
= tcg_const_i64(0);
785 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
790 c
->u
.s32
.a
= tcg_temp_new_i32();
791 c
->u
.s32
.b
= tcg_temp_new_i32();
792 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
793 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
794 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
796 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
803 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
804 c
->u
.s64
.b
= tcg_const_i64(0);
816 case 0x8 | 0x4 | 0x2: /* cc != 3 */
818 c
->u
.s32
.b
= tcg_const_i32(3);
820 case 0x8 | 0x4 | 0x1: /* cc != 2 */
822 c
->u
.s32
.b
= tcg_const_i32(2);
824 case 0x8 | 0x2 | 0x1: /* cc != 1 */
826 c
->u
.s32
.b
= tcg_const_i32(1);
828 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
831 c
->u
.s32
.a
= tcg_temp_new_i32();
832 c
->u
.s32
.b
= tcg_const_i32(0);
833 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
835 case 0x8 | 0x4: /* cc < 2 */
837 c
->u
.s32
.b
= tcg_const_i32(2);
839 case 0x8: /* cc == 0 */
841 c
->u
.s32
.b
= tcg_const_i32(0);
843 case 0x4 | 0x2 | 0x1: /* cc != 0 */
845 c
->u
.s32
.b
= tcg_const_i32(0);
847 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
850 c
->u
.s32
.a
= tcg_temp_new_i32();
851 c
->u
.s32
.b
= tcg_const_i32(0);
852 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
854 case 0x4: /* cc == 1 */
856 c
->u
.s32
.b
= tcg_const_i32(1);
858 case 0x2 | 0x1: /* cc > 1 */
860 c
->u
.s32
.b
= tcg_const_i32(1);
862 case 0x2: /* cc == 2 */
864 c
->u
.s32
.b
= tcg_const_i32(2);
866 case 0x1: /* cc == 3 */
868 c
->u
.s32
.b
= tcg_const_i32(3);
871 /* CC is masked by something else: (8 >> cc) & mask. */
874 c
->u
.s32
.a
= tcg_const_i32(8);
875 c
->u
.s32
.b
= tcg_const_i32(0);
876 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
877 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
888 static void free_compare(DisasCompare
*c
)
892 tcg_temp_free_i64(c
->u
.s64
.a
);
894 tcg_temp_free_i32(c
->u
.s32
.a
);
899 tcg_temp_free_i64(c
->u
.s64
.b
);
901 tcg_temp_free_i32(c
->u
.s32
.b
);
906 /* ====================================================================== */
907 /* Define the insn format enumeration. */
908 #define F0(N) FMT_##N,
909 #define F1(N, X1) F0(N)
910 #define F2(N, X1, X2) F0(N)
911 #define F3(N, X1, X2, X3) F0(N)
912 #define F4(N, X1, X2, X3, X4) F0(N)
913 #define F5(N, X1, X2, X3, X4, X5) F0(N)
916 #include "insn-format.def"
926 /* Define a structure to hold the decoded fields. We'll store each inside
927 an array indexed by an enum. In order to conserve memory, we'll arrange
928 for fields that do not exist at the same time to overlap, thus the "C"
929 for compact. For checking purposes there is an "O" for original index
930 as well that will be applied to availability bitmaps. */
932 enum DisasFieldIndexO
{
955 enum DisasFieldIndexC
{
989 unsigned presentC
:16;
990 unsigned int presentO
;
994 /* This is the way fields are to be accessed out of DisasFields. */
995 #define have_field(S, F) have_field1((S), FLD_O_##F)
996 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
998 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1000 return (f
->presentO
>> c
) & 1;
1003 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1004 enum DisasFieldIndexC c
)
1006 assert(have_field1(f
, o
));
1010 /* Describe the layout of each field in each format. */
1011 typedef struct DisasField
{
1013 unsigned int size
:8;
1014 unsigned int type
:2;
1015 unsigned int indexC
:6;
1016 enum DisasFieldIndexO indexO
:8;
1019 typedef struct DisasFormatInfo
{
1020 DisasField op
[NUM_C_FIELD
];
1023 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1024 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1025 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1030 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1033 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1034 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1035 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1036 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1038 #define F0(N) { { } },
1039 #define F1(N, X1) { { X1 } },
1040 #define F2(N, X1, X2) { { X1, X2 } },
1041 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1042 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1043 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1045 static const DisasFormatInfo format_info
[] = {
1046 #include "insn-format.def"
1064 /* Generally, we'll extract operands into this structures, operate upon
1065 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1066 of routines below for more details. */
1068 bool g_out
, g_out2
, g_in1
, g_in2
;
1069 TCGv_i64 out
, out2
, in1
, in2
;
1073 /* Instructions can place constraints on their operands, raising specification
1074 exceptions if they are violated. To make this easy to automate, each "in1",
1075 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1076 of the following, or 0. To make this easy to document, we'll put the
1077 SPEC_<name> defines next to <name>. */
1079 #define SPEC_r1_even 1
1080 #define SPEC_r2_even 2
1081 #define SPEC_r1_f128 4
1082 #define SPEC_r2_f128 8
1084 /* Return values from translate_one, indicating the state of the TB. */
1086 /* Continue the TB. */
1088 /* We have emitted one or more goto_tb. No fixup required. */
1090 /* We are not using a goto_tb (for whatever reason), but have updated
1091 the PC (for whatever reason), so there's no need to do it again on
1094 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1095 updated the PC for the next instruction to be executed. */
1097 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1098 No following code will be executed. */
1102 typedef enum DisasFacility
{
1103 FAC_Z
, /* zarch (default) */
1104 FAC_CASS
, /* compare and swap and store */
1105 FAC_CASS2
, /* compare and swap and store 2*/
1106 FAC_DFP
, /* decimal floating point */
1107 FAC_DFPR
, /* decimal floating point rounding */
1108 FAC_DO
, /* distinct operands */
1109 FAC_EE
, /* execute extensions */
1110 FAC_EI
, /* extended immediate */
1111 FAC_FPE
, /* floating point extension */
1112 FAC_FPSSH
, /* floating point support sign handling */
1113 FAC_FPRGR
, /* FPR-GR transfer */
1114 FAC_GIE
, /* general instructions extension */
1115 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1116 FAC_HW
, /* high-word */
1117 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1118 FAC_LOC
, /* load/store on condition */
1119 FAC_LD
, /* long displacement */
1120 FAC_PC
, /* population count */
1121 FAC_SCF
, /* store clock fast */
1122 FAC_SFLE
, /* store facility list extended */
1128 DisasFacility fac
:6;
1133 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1136 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1137 void (*help_cout
)(DisasContext
*, DisasOps
*);
1138 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1143 /* ====================================================================== */
1144 /* Miscelaneous helpers, used by several operations. */
1146 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1147 DisasOps
*o
, int mask
)
1149 int b2
= get_field(f
, b2
);
1150 int d2
= get_field(f
, d2
);
1153 o
->in2
= tcg_const_i64(d2
& mask
);
1155 o
->in2
= get_address(s
, 0, b2
, d2
);
1156 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1160 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1162 if (dest
== s
->next_pc
) {
1165 if (use_goto_tb(s
, dest
)) {
1168 tcg_gen_movi_i64(psw_addr
, dest
);
1169 tcg_gen_exit_tb((tcg_target_long
)s
->tb
);
1170 return EXIT_GOTO_TB
;
1172 tcg_gen_movi_i64(psw_addr
, dest
);
1173 return EXIT_PC_UPDATED
;
1177 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1178 bool is_imm
, int imm
, TCGv_i64 cdest
)
1181 uint64_t dest
= s
->pc
+ 2 * imm
;
1184 /* Take care of the special cases first. */
1185 if (c
->cond
== TCG_COND_NEVER
) {
1190 if (dest
== s
->next_pc
) {
1191 /* Branch to next. */
1195 if (c
->cond
== TCG_COND_ALWAYS
) {
1196 ret
= help_goto_direct(s
, dest
);
1200 if (TCGV_IS_UNUSED_I64(cdest
)) {
1201 /* E.g. bcr %r0 -> no branch. */
1205 if (c
->cond
== TCG_COND_ALWAYS
) {
1206 tcg_gen_mov_i64(psw_addr
, cdest
);
1207 ret
= EXIT_PC_UPDATED
;
1212 if (use_goto_tb(s
, s
->next_pc
)) {
1213 if (is_imm
&& use_goto_tb(s
, dest
)) {
1214 /* Both exits can use goto_tb. */
1217 lab
= gen_new_label();
1219 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1221 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1224 /* Branch not taken. */
1226 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1227 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1232 tcg_gen_movi_i64(psw_addr
, dest
);
1233 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 1);
1237 /* Fallthru can use goto_tb, but taken branch cannot. */
1238 /* Store taken branch destination before the brcond. This
1239 avoids having to allocate a new local temp to hold it.
1240 We'll overwrite this in the not taken case anyway. */
1242 tcg_gen_mov_i64(psw_addr
, cdest
);
1245 lab
= gen_new_label();
1247 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1249 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1252 /* Branch not taken. */
1255 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1256 tcg_gen_exit_tb((tcg_target_long
)s
->tb
+ 0);
1260 tcg_gen_movi_i64(psw_addr
, dest
);
1262 ret
= EXIT_PC_UPDATED
;
1265 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1266 Most commonly we're single-stepping or some other condition that
1267 disables all use of goto_tb. Just update the PC and exit. */
1269 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1271 cdest
= tcg_const_i64(dest
);
1275 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1278 TCGv_i32 t0
= tcg_temp_new_i32();
1279 TCGv_i64 t1
= tcg_temp_new_i64();
1280 TCGv_i64 z
= tcg_const_i64(0);
1281 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1282 tcg_gen_extu_i32_i64(t1
, t0
);
1283 tcg_temp_free_i32(t0
);
1284 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1285 tcg_temp_free_i64(t1
);
1286 tcg_temp_free_i64(z
);
1290 tcg_temp_free_i64(cdest
);
1292 tcg_temp_free_i64(next
);
1294 ret
= EXIT_PC_UPDATED
;
1302 /* ====================================================================== */
1303 /* The operations. These perform the bulk of the work for any insn,
1304 usually after the operands have been loaded and output initialized. */
1306 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1308 gen_helper_abs_i64(o
->out
, o
->in2
);
1312 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1314 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1318 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1320 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1324 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1326 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1327 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1331 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1333 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1337 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1342 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1344 /* The carry flag is the msb of CC, therefore the branch mask that would
1345 create that comparison is 3. Feeding the generated comparison to
1346 setcond produces the carry flag that we desire. */
1347 disas_jcc(s
, &cmp
, 3);
1348 carry
= tcg_temp_new_i64();
1350 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1352 TCGv_i32 t
= tcg_temp_new_i32();
1353 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1354 tcg_gen_extu_i32_i64(carry
, t
);
1355 tcg_temp_free_i32(t
);
1359 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1360 tcg_temp_free_i64(carry
);
1364 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1366 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1370 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1372 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1376 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1378 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1379 return_low128(o
->out2
);
1383 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1385 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1389 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1391 int shift
= s
->insn
->data
& 0xff;
1392 int size
= s
->insn
->data
>> 8;
1393 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1396 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1397 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1398 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1400 /* Produce the CC from only the bits manipulated. */
1401 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1402 set_cc_nz_u64(s
, cc_dst
);
1406 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1408 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1409 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1410 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1411 return EXIT_PC_UPDATED
;
1417 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1419 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1420 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1423 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1425 int m1
= get_field(s
->fields
, m1
);
1426 bool is_imm
= have_field(s
->fields
, i2
);
1427 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1430 disas_jcc(s
, &c
, m1
);
1431 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1434 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1436 int r1
= get_field(s
->fields
, r1
);
1437 bool is_imm
= have_field(s
->fields
, i2
);
1438 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1442 c
.cond
= TCG_COND_NE
;
1447 t
= tcg_temp_new_i64();
1448 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1449 store_reg32_i64(r1
, t
);
1450 c
.u
.s32
.a
= tcg_temp_new_i32();
1451 c
.u
.s32
.b
= tcg_const_i32(0);
1452 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1453 tcg_temp_free_i64(t
);
1455 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1458 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1460 int r1
= get_field(s
->fields
, r1
);
1461 bool is_imm
= have_field(s
->fields
, i2
);
1462 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1465 c
.cond
= TCG_COND_NE
;
1470 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1471 c
.u
.s64
.a
= regs
[r1
];
1472 c
.u
.s64
.b
= tcg_const_i64(0);
1474 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1477 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1479 int r1
= get_field(s
->fields
, r1
);
1480 int r3
= get_field(s
->fields
, r3
);
1481 bool is_imm
= have_field(s
->fields
, i2
);
1482 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1486 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1491 t
= tcg_temp_new_i64();
1492 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1493 c
.u
.s32
.a
= tcg_temp_new_i32();
1494 c
.u
.s32
.b
= tcg_temp_new_i32();
1495 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1496 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1497 store_reg32_i64(r1
, t
);
1498 tcg_temp_free_i64(t
);
1500 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1503 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1505 int r1
= get_field(s
->fields
, r1
);
1506 int r3
= get_field(s
->fields
, r3
);
1507 bool is_imm
= have_field(s
->fields
, i2
);
1508 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1511 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1514 if (r1
== (r3
| 1)) {
1515 c
.u
.s64
.b
= load_reg(r3
| 1);
1518 c
.u
.s64
.b
= regs
[r3
| 1];
1522 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1523 c
.u
.s64
.a
= regs
[r1
];
1526 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1529 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1531 int imm
, m3
= get_field(s
->fields
, m3
);
1535 c
.cond
= ltgt_cond
[m3
];
1536 if (s
->insn
->data
) {
1537 c
.cond
= tcg_unsigned_cond(c
.cond
);
1539 c
.is_64
= c
.g1
= c
.g2
= true;
1543 is_imm
= have_field(s
->fields
, i4
);
1545 imm
= get_field(s
->fields
, i4
);
1548 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1549 get_field(s
->fields
, d4
));
1552 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1555 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1557 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1562 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1564 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1569 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1571 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1576 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1578 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1579 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1580 tcg_temp_free_i32(m3
);
1581 gen_set_cc_nz_f32(s
, o
->in2
);
1585 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1587 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1588 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1589 tcg_temp_free_i32(m3
);
1590 gen_set_cc_nz_f64(s
, o
->in2
);
1594 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1596 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1597 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1598 tcg_temp_free_i32(m3
);
1599 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1603 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1605 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1606 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1607 tcg_temp_free_i32(m3
);
1608 gen_set_cc_nz_f32(s
, o
->in2
);
1612 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1614 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1615 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1616 tcg_temp_free_i32(m3
);
1617 gen_set_cc_nz_f64(s
, o
->in2
);
1621 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1623 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1624 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1625 tcg_temp_free_i32(m3
);
1626 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1630 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1632 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1633 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1634 tcg_temp_free_i32(m3
);
1635 gen_set_cc_nz_f32(s
, o
->in2
);
1639 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1641 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1642 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1643 tcg_temp_free_i32(m3
);
1644 gen_set_cc_nz_f64(s
, o
->in2
);
1648 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1650 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1651 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1652 tcg_temp_free_i32(m3
);
1653 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1657 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1659 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1660 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1661 tcg_temp_free_i32(m3
);
1662 gen_set_cc_nz_f32(s
, o
->in2
);
1666 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1668 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1669 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1670 tcg_temp_free_i32(m3
);
1671 gen_set_cc_nz_f64(s
, o
->in2
);
1675 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1677 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1678 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1679 tcg_temp_free_i32(m3
);
1680 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1684 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1686 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1687 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1688 tcg_temp_free_i32(m3
);
1692 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1694 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1695 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1696 tcg_temp_free_i32(m3
);
1700 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1702 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1703 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1704 tcg_temp_free_i32(m3
);
1705 return_low128(o
->out2
);
1709 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1711 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1712 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1713 tcg_temp_free_i32(m3
);
1717 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1719 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1720 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1721 tcg_temp_free_i32(m3
);
1725 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1727 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1728 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1729 tcg_temp_free_i32(m3
);
1730 return_low128(o
->out2
);
1734 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1736 int r2
= get_field(s
->fields
, r2
);
1737 TCGv_i64 len
= tcg_temp_new_i64();
1739 potential_page_fault(s
);
1740 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1742 return_low128(o
->out
);
1744 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1745 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1746 tcg_temp_free_i64(len
);
1751 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1753 int l
= get_field(s
->fields
, l1
);
1758 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1759 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1762 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1763 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1766 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1767 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1770 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1771 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1774 potential_page_fault(s
);
1775 vl
= tcg_const_i32(l
);
1776 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1777 tcg_temp_free_i32(vl
);
1781 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1785 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1787 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1788 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1789 potential_page_fault(s
);
1790 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1791 tcg_temp_free_i32(r1
);
1792 tcg_temp_free_i32(r3
);
1797 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1799 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1800 TCGv_i32 t1
= tcg_temp_new_i32();
1801 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1802 potential_page_fault(s
);
1803 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1805 tcg_temp_free_i32(t1
);
1806 tcg_temp_free_i32(m3
);
1810 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1812 potential_page_fault(s
);
1813 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1815 return_low128(o
->in2
);
1819 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1821 TCGv_i64 t
= tcg_temp_new_i64();
1822 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1823 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1824 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1825 tcg_temp_free_i64(t
);
1829 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1831 int r3
= get_field(s
->fields
, r3
);
1832 potential_page_fault(s
);
1833 gen_helper_cs(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1838 static ExitStatus
op_csg(DisasContext
*s
, DisasOps
*o
)
1840 int r3
= get_field(s
->fields
, r3
);
1841 potential_page_fault(s
);
1842 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, regs
[r3
]);
1847 #ifndef CONFIG_USER_ONLY
1848 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1850 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1851 check_privileged(s
);
1852 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1853 tcg_temp_free_i32(r1
);
1859 static ExitStatus
op_cds(DisasContext
*s
, DisasOps
*o
)
1861 int r3
= get_field(s
->fields
, r3
);
1862 TCGv_i64 in3
= tcg_temp_new_i64();
1863 tcg_gen_deposit_i64(in3
, regs
[r3
+ 1], regs
[r3
], 32, 32);
1864 potential_page_fault(s
);
1865 gen_helper_csg(o
->out
, cpu_env
, o
->in1
, o
->in2
, in3
);
1866 tcg_temp_free_i64(in3
);
1871 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1873 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1874 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1875 potential_page_fault(s
);
1876 /* XXX rewrite in tcg */
1877 gen_helper_cdsg(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1882 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1884 TCGv_i64 t1
= tcg_temp_new_i64();
1885 TCGv_i32 t2
= tcg_temp_new_i32();
1886 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1887 gen_helper_cvd(t1
, t2
);
1888 tcg_temp_free_i32(t2
);
1889 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1890 tcg_temp_free_i64(t1
);
1894 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1896 int m3
= get_field(s
->fields
, m3
);
1897 int lab
= gen_new_label();
1901 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1902 if (s
->insn
->data
) {
1903 c
= tcg_unsigned_cond(c
);
1905 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1907 /* Set DXC to 0xff. */
1908 t
= tcg_temp_new_i32();
1909 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1910 tcg_gen_ori_i32(t
, t
, 0xff00);
1911 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1912 tcg_temp_free_i32(t
);
1915 gen_program_exception(s
, PGM_DATA
);
1921 #ifndef CONFIG_USER_ONLY
1922 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1926 check_privileged(s
);
1927 potential_page_fault(s
);
1929 /* We pretend the format is RX_a so that D2 is the field we want. */
1930 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
1931 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
1932 tcg_temp_free_i32(tmp
);
1937 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
1939 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1940 return_low128(o
->out
);
1944 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
1946 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1947 return_low128(o
->out
);
1951 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
1953 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
1954 return_low128(o
->out
);
1958 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
1960 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
1961 return_low128(o
->out
);
1965 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
1967 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1971 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
1973 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1977 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
1979 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1980 return_low128(o
->out2
);
1984 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
1986 int r2
= get_field(s
->fields
, r2
);
1987 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
1991 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
1993 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1997 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
1999 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2000 tb->flags, (ab)use the tb->cs_base field as the address of
2001 the template in memory, and grab 8 bits of tb->flags/cflags for
2002 the contents of the register. We would then recognize all this
2003 in gen_intermediate_code_internal, generating code for exactly
2004 one instruction. This new TB then gets executed normally.
2006 On the other hand, this seems to be mostly used for modifying
2007 MVC inside of memcpy, which needs a helper call anyway. So
2008 perhaps this doesn't bear thinking about any further. */
2015 tmp
= tcg_const_i64(s
->next_pc
);
2016 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2017 tcg_temp_free_i64(tmp
);
2023 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2025 /* We'll use the original input for cc computation, since we get to
2026 compare that against 0, which ought to be better than comparing
2027 the real output against 64. It also lets cc_dst be a convenient
2028 temporary during our computation. */
2029 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2031 /* R1 = IN ? CLZ(IN) : 64. */
2032 gen_helper_clz(o
->out
, o
->in2
);
2034 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2035 value by 64, which is undefined. But since the shift is 64 iff the
2036 input is zero, we still get the correct result after and'ing. */
2037 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2038 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2039 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2043 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2045 int m3
= get_field(s
->fields
, m3
);
2046 int pos
, len
, base
= s
->insn
->data
;
2047 TCGv_i64 tmp
= tcg_temp_new_i64();
2052 /* Effectively a 32-bit load. */
2053 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2060 /* Effectively a 16-bit load. */
2061 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2069 /* Effectively an 8-bit load. */
2070 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2075 pos
= base
+ ctz32(m3
) * 8;
2076 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2077 ccm
= ((1ull << len
) - 1) << pos
;
2081 /* This is going to be a sequence of loads and inserts. */
2082 pos
= base
+ 32 - 8;
2086 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2087 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2088 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2091 m3
= (m3
<< 1) & 0xf;
2097 tcg_gen_movi_i64(tmp
, ccm
);
2098 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2099 tcg_temp_free_i64(tmp
);
2103 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2105 int shift
= s
->insn
->data
& 0xff;
2106 int size
= s
->insn
->data
>> 8;
2107 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2111 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2116 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2118 t1
= tcg_temp_new_i64();
2119 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2120 tcg_gen_shri_i64(t1
, t1
, 36);
2121 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2123 tcg_gen_extu_i32_i64(t1
, cc_op
);
2124 tcg_gen_shli_i64(t1
, t1
, 28);
2125 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2126 tcg_temp_free_i64(t1
);
2130 #ifndef CONFIG_USER_ONLY
2131 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2133 check_privileged(s
);
2134 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2138 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2140 check_privileged(s
);
2141 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2146 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2148 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2152 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2154 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2158 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2160 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2164 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2166 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2170 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2172 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2173 return_low128(o
->out2
);
2177 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2179 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2180 return_low128(o
->out2
);
2184 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2186 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2190 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2192 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2196 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2198 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2202 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2204 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2208 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2210 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2214 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2216 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2220 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2222 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2226 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2228 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2232 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2236 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2239 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2243 TCGv_i32 t32
= tcg_temp_new_i32();
2246 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2249 t
= tcg_temp_new_i64();
2250 tcg_gen_extu_i32_i64(t
, t32
);
2251 tcg_temp_free_i32(t32
);
2253 z
= tcg_const_i64(0);
2254 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2255 tcg_temp_free_i64(t
);
2256 tcg_temp_free_i64(z
);
2262 #ifndef CONFIG_USER_ONLY
2263 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2265 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2266 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2267 check_privileged(s
);
2268 potential_page_fault(s
);
2269 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2270 tcg_temp_free_i32(r1
);
2271 tcg_temp_free_i32(r3
);
2275 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2277 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2278 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2279 check_privileged(s
);
2280 potential_page_fault(s
);
2281 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2282 tcg_temp_free_i32(r1
);
2283 tcg_temp_free_i32(r3
);
2286 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2288 check_privileged(s
);
2289 potential_page_fault(s
);
2290 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2295 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2299 check_privileged(s
);
2301 t1
= tcg_temp_new_i64();
2302 t2
= tcg_temp_new_i64();
2303 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2304 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2305 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2306 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2307 tcg_gen_shli_i64(t1
, t1
, 32);
2308 gen_helper_load_psw(cpu_env
, t1
, t2
);
2309 tcg_temp_free_i64(t1
);
2310 tcg_temp_free_i64(t2
);
2311 return EXIT_NORETURN
;
2314 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2318 check_privileged(s
);
2320 t1
= tcg_temp_new_i64();
2321 t2
= tcg_temp_new_i64();
2322 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2323 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2324 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2325 gen_helper_load_psw(cpu_env
, t1
, t2
);
2326 tcg_temp_free_i64(t1
);
2327 tcg_temp_free_i64(t2
);
2328 return EXIT_NORETURN
;
2332 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2334 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2335 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2336 potential_page_fault(s
);
2337 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2338 tcg_temp_free_i32(r1
);
2339 tcg_temp_free_i32(r3
);
2343 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2345 int r1
= get_field(s
->fields
, r1
);
2346 int r3
= get_field(s
->fields
, r3
);
2347 TCGv_i64 t
= tcg_temp_new_i64();
2348 TCGv_i64 t4
= tcg_const_i64(4);
2351 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2352 store_reg32_i64(r1
, t
);
2356 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2360 tcg_temp_free_i64(t
);
2361 tcg_temp_free_i64(t4
);
2365 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2367 int r1
= get_field(s
->fields
, r1
);
2368 int r3
= get_field(s
->fields
, r3
);
2369 TCGv_i64 t
= tcg_temp_new_i64();
2370 TCGv_i64 t4
= tcg_const_i64(4);
2373 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2374 store_reg32h_i64(r1
, t
);
2378 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2382 tcg_temp_free_i64(t
);
2383 tcg_temp_free_i64(t4
);
2387 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2389 int r1
= get_field(s
->fields
, r1
);
2390 int r3
= get_field(s
->fields
, r3
);
2391 TCGv_i64 t8
= tcg_const_i64(8);
2394 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2398 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2402 tcg_temp_free_i64(t8
);
2406 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2409 o
->g_out
= o
->g_in2
;
2410 TCGV_UNUSED_I64(o
->in2
);
2415 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2419 o
->g_out
= o
->g_in1
;
2420 o
->g_out2
= o
->g_in2
;
2421 TCGV_UNUSED_I64(o
->in1
);
2422 TCGV_UNUSED_I64(o
->in2
);
2423 o
->g_in1
= o
->g_in2
= false;
2427 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2429 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2430 potential_page_fault(s
);
2431 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2432 tcg_temp_free_i32(l
);
2436 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2438 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2439 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2440 potential_page_fault(s
);
2441 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2442 tcg_temp_free_i32(r1
);
2443 tcg_temp_free_i32(r2
);
2448 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2450 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2451 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2452 potential_page_fault(s
);
2453 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2454 tcg_temp_free_i32(r1
);
2455 tcg_temp_free_i32(r3
);
2460 #ifndef CONFIG_USER_ONLY
2461 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2463 int r1
= get_field(s
->fields
, l1
);
2464 check_privileged(s
);
2465 potential_page_fault(s
);
2466 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2471 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2473 int r1
= get_field(s
->fields
, l1
);
2474 check_privileged(s
);
2475 potential_page_fault(s
);
2476 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2482 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2484 potential_page_fault(s
);
2485 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2490 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2492 potential_page_fault(s
);
2493 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2495 return_low128(o
->in2
);
2499 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2501 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2505 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2507 gen_helper_mul128(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2508 return_low128(o
->out2
);
2512 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2514 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2518 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2520 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2524 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2526 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2530 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2532 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2533 return_low128(o
->out2
);
2537 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2539 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2540 return_low128(o
->out2
);
2544 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2546 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2547 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2548 tcg_temp_free_i64(r3
);
2552 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2554 int r3
= get_field(s
->fields
, r3
);
2555 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2559 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2561 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2562 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2563 tcg_temp_free_i64(r3
);
2567 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2569 int r3
= get_field(s
->fields
, r3
);
2570 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2574 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2576 gen_helper_nabs_i64(o
->out
, o
->in2
);
2580 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2582 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2586 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2588 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2592 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2594 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2595 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2599 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2601 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2602 potential_page_fault(s
);
2603 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2604 tcg_temp_free_i32(l
);
2609 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2611 tcg_gen_neg_i64(o
->out
, o
->in2
);
2615 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2617 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2621 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2623 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2627 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2629 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2630 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2634 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2636 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2637 potential_page_fault(s
);
2638 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2639 tcg_temp_free_i32(l
);
2644 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2646 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2650 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2652 int shift
= s
->insn
->data
& 0xff;
2653 int size
= s
->insn
->data
>> 8;
2654 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2657 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2658 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2660 /* Produce the CC from only the bits manipulated. */
2661 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2662 set_cc_nz_u64(s
, cc_dst
);
2666 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2668 gen_helper_popcnt(o
->out
, o
->in2
);
2672 #ifndef CONFIG_USER_ONLY
2673 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2675 check_privileged(s
);
2676 gen_helper_ptlb(cpu_env
);
2681 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2683 int i3
= get_field(s
->fields
, i3
);
2684 int i4
= get_field(s
->fields
, i4
);
2685 int i5
= get_field(s
->fields
, i5
);
2686 int do_zero
= i4
& 0x80;
2687 uint64_t mask
, imask
, pmask
;
2690 /* Adjust the arguments for the specific insn. */
2691 switch (s
->fields
->op2
) {
2692 case 0x55: /* risbg */
2697 case 0x5d: /* risbhg */
2700 pmask
= 0xffffffff00000000ull
;
2702 case 0x51: /* risblg */
2705 pmask
= 0x00000000ffffffffull
;
2711 /* MASK is the set of bits to be inserted from R2.
2712 Take care for I3/I4 wraparound. */
2715 mask
^= pmask
>> i4
>> 1;
2717 mask
|= ~(pmask
>> i4
>> 1);
2721 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2722 insns, we need to keep the other half of the register. */
2723 imask
= ~mask
| ~pmask
;
2725 if (s
->fields
->op2
== 0x55) {
2732 /* In some cases we can implement this with deposit, which can be more
2733 efficient on some hosts. */
2734 if (~mask
== imask
&& i3
<= i4
) {
2735 if (s
->fields
->op2
== 0x5d) {
2738 /* Note that we rotate the bits to be inserted to the lsb, not to
2739 the position as described in the PoO. */
2742 rot
= (i5
- pos
) & 63;
2748 /* Rotate the input as necessary. */
2749 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2751 /* Insert the selected bits into the output. */
2753 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2754 } else if (imask
== 0) {
2755 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2757 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2758 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2759 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2764 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2766 int i3
= get_field(s
->fields
, i3
);
2767 int i4
= get_field(s
->fields
, i4
);
2768 int i5
= get_field(s
->fields
, i5
);
2771 /* If this is a test-only form, arrange to discard the result. */
2773 o
->out
= tcg_temp_new_i64();
2781 /* MASK is the set of bits to be operated on from R2.
2782 Take care for I3/I4 wraparound. */
2785 mask
^= ~0ull >> i4
>> 1;
2787 mask
|= ~(~0ull >> i4
>> 1);
2790 /* Rotate the input as necessary. */
2791 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2794 switch (s
->fields
->op2
) {
2795 case 0x55: /* AND */
2796 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2797 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2800 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2801 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2803 case 0x57: /* XOR */
2804 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2805 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2812 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2813 set_cc_nz_u64(s
, cc_dst
);
2817 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2819 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2823 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2825 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2829 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2831 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2835 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2837 TCGv_i32 t1
= tcg_temp_new_i32();
2838 TCGv_i32 t2
= tcg_temp_new_i32();
2839 TCGv_i32 to
= tcg_temp_new_i32();
2840 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2841 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2842 tcg_gen_rotl_i32(to
, t1
, t2
);
2843 tcg_gen_extu_i32_i64(o
->out
, to
);
2844 tcg_temp_free_i32(t1
);
2845 tcg_temp_free_i32(t2
);
2846 tcg_temp_free_i32(to
);
2850 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2852 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2856 #ifndef CONFIG_USER_ONLY
2857 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2859 check_privileged(s
);
2860 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2865 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2867 check_privileged(s
);
2868 gen_helper_sacf(cpu_env
, o
->in2
);
2869 /* Addressing mode has changed, so end the block. */
2870 return EXIT_PC_STALE
;
2874 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
2876 int r1
= get_field(s
->fields
, r1
);
2877 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
2881 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
2883 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2887 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
2889 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2893 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
2895 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2896 return_low128(o
->out2
);
2900 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
2902 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
2906 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
2908 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
2912 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
2914 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2915 return_low128(o
->out2
);
2919 #ifndef CONFIG_USER_ONLY
2920 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
2922 check_privileged(s
);
2923 potential_page_fault(s
);
2924 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
2929 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
2931 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2932 check_privileged(s
);
2933 potential_page_fault(s
);
2934 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
2935 tcg_temp_free_i32(r1
);
2940 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
2946 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2948 lab
= gen_new_label();
2950 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
2952 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
2956 r1
= get_field(s
->fields
, r1
);
2957 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2958 if (s
->insn
->data
) {
2959 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
2961 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
2963 tcg_temp_free_i64(a
);
2969 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
2971 uint64_t sign
= 1ull << s
->insn
->data
;
2972 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
2973 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
2974 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2975 /* The arithmetic left shift is curious in that it does not affect
2976 the sign bit. Copy that over from the source unchanged. */
2977 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
2978 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
2979 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
2983 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
2985 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
2989 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
2991 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
2995 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
2997 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3001 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3003 gen_helper_sfpc(cpu_env
, o
->in2
);
3007 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3009 gen_helper_sfas(cpu_env
, o
->in2
);
3013 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3015 int b2
= get_field(s
->fields
, b2
);
3016 int d2
= get_field(s
->fields
, d2
);
3017 TCGv_i64 t1
= tcg_temp_new_i64();
3018 TCGv_i64 t2
= tcg_temp_new_i64();
3021 switch (s
->fields
->op2
) {
3022 case 0x99: /* SRNM */
3025 case 0xb8: /* SRNMB */
3028 case 0xb9: /* SRNMT */
3033 mask
= (1 << len
) - 1;
3035 /* Insert the value into the appropriate field of the FPC. */
3037 tcg_gen_movi_i64(t1
, d2
& mask
);
3039 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3040 tcg_gen_andi_i64(t1
, t1
, mask
);
3042 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3043 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3044 tcg_temp_free_i64(t1
);
3046 /* Then install the new FPC to set the rounding mode in fpu_status. */
3047 gen_helper_sfpc(cpu_env
, t2
);
3048 tcg_temp_free_i64(t2
);
3052 #ifndef CONFIG_USER_ONLY
3053 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3055 check_privileged(s
);
3056 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3057 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3061 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3063 check_privileged(s
);
3064 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3068 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3070 check_privileged(s
);
3071 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3075 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3077 check_privileged(s
);
3078 /* ??? Surely cpu address != cpu number. In any case the previous
3079 version of this stored more than the required half-word, so it
3080 is unlikely this has ever been tested. */
3081 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3085 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3087 gen_helper_stck(o
->out
, cpu_env
);
3088 /* ??? We don't implement clock states. */
3089 gen_op_movi_cc(s
, 0);
3093 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3095 TCGv_i64 c1
= tcg_temp_new_i64();
3096 TCGv_i64 c2
= tcg_temp_new_i64();
3097 gen_helper_stck(c1
, cpu_env
);
3098 /* Shift the 64-bit value into its place as a zero-extended
3099 104-bit value. Note that "bit positions 64-103 are always
3100 non-zero so that they compare differently to STCK"; we set
3101 the least significant bit to 1. */
3102 tcg_gen_shli_i64(c2
, c1
, 56);
3103 tcg_gen_shri_i64(c1
, c1
, 8);
3104 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3105 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3106 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3107 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3108 tcg_temp_free_i64(c1
);
3109 tcg_temp_free_i64(c2
);
3110 /* ??? We don't implement clock states. */
3111 gen_op_movi_cc(s
, 0);
3115 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3117 check_privileged(s
);
3118 gen_helper_sckc(cpu_env
, o
->in2
);
3122 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3124 check_privileged(s
);
3125 gen_helper_stckc(o
->out
, cpu_env
);
3129 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3131 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3132 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3133 check_privileged(s
);
3134 potential_page_fault(s
);
3135 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3136 tcg_temp_free_i32(r1
);
3137 tcg_temp_free_i32(r3
);
3141 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3143 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3144 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3145 check_privileged(s
);
3146 potential_page_fault(s
);
3147 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3148 tcg_temp_free_i32(r1
);
3149 tcg_temp_free_i32(r3
);
3153 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3155 check_privileged(s
);
3156 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3160 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3162 check_privileged(s
);
3163 gen_helper_spt(cpu_env
, o
->in2
);
3167 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3170 /* We really ought to have more complete indication of facilities
3171 that we implement. Address this when STFLE is implemented. */
3172 check_privileged(s
);
3173 f
= tcg_const_i64(0xc0000000);
3174 a
= tcg_const_i64(200);
3175 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3176 tcg_temp_free_i64(f
);
3177 tcg_temp_free_i64(a
);
3181 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3183 check_privileged(s
);
3184 gen_helper_stpt(o
->out
, cpu_env
);
3188 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3190 check_privileged(s
);
3191 potential_page_fault(s
);
3192 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3197 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3199 check_privileged(s
);
3200 gen_helper_spx(cpu_env
, o
->in2
);
3204 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3206 check_privileged(s
);
3207 /* Not operational. */
3208 gen_op_movi_cc(s
, 3);
3212 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3214 check_privileged(s
);
3215 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3216 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3220 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3222 uint64_t i2
= get_field(s
->fields
, i2
);
3225 check_privileged(s
);
3227 /* It is important to do what the instruction name says: STORE THEN.
3228 If we let the output hook perform the store then if we fault and
3229 restart, we'll have the wrong SYSTEM MASK in place. */
3230 t
= tcg_temp_new_i64();
3231 tcg_gen_shri_i64(t
, psw_mask
, 56);
3232 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3233 tcg_temp_free_i64(t
);
3235 if (s
->fields
->op
== 0xac) {
3236 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3237 (i2
<< 56) | 0x00ffffffffffffffull
);
3239 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3244 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3246 check_privileged(s
);
3247 potential_page_fault(s
);
3248 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3253 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3255 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3259 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3261 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3265 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3267 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3271 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3273 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3277 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3279 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3280 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3281 potential_page_fault(s
);
3282 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3283 tcg_temp_free_i32(r1
);
3284 tcg_temp_free_i32(r3
);
3288 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3290 int m3
= get_field(s
->fields
, m3
);
3291 int pos
, base
= s
->insn
->data
;
3292 TCGv_i64 tmp
= tcg_temp_new_i64();
3294 pos
= base
+ ctz32(m3
) * 8;
3297 /* Effectively a 32-bit store. */
3298 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3299 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3305 /* Effectively a 16-bit store. */
3306 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3307 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3314 /* Effectively an 8-bit store. */
3315 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3316 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3320 /* This is going to be a sequence of shifts and stores. */
3321 pos
= base
+ 32 - 8;
3324 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3325 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3326 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3328 m3
= (m3
<< 1) & 0xf;
3333 tcg_temp_free_i64(tmp
);
3337 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3339 int r1
= get_field(s
->fields
, r1
);
3340 int r3
= get_field(s
->fields
, r3
);
3341 int size
= s
->insn
->data
;
3342 TCGv_i64 tsize
= tcg_const_i64(size
);
3346 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3348 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3353 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3357 tcg_temp_free_i64(tsize
);
3361 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3363 int r1
= get_field(s
->fields
, r1
);
3364 int r3
= get_field(s
->fields
, r3
);
3365 TCGv_i64 t
= tcg_temp_new_i64();
3366 TCGv_i64 t4
= tcg_const_i64(4);
3367 TCGv_i64 t32
= tcg_const_i64(32);
3370 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3371 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3375 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3379 tcg_temp_free_i64(t
);
3380 tcg_temp_free_i64(t4
);
3381 tcg_temp_free_i64(t32
);
3385 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3387 potential_page_fault(s
);
3388 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3390 return_low128(o
->in2
);
3394 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3396 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3400 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3405 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3407 /* The !borrow flag is the msb of CC. Since we want the inverse of
3408 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3409 disas_jcc(s
, &cmp
, 8 | 4);
3410 borrow
= tcg_temp_new_i64();
3412 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3414 TCGv_i32 t
= tcg_temp_new_i32();
3415 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3416 tcg_gen_extu_i32_i64(borrow
, t
);
3417 tcg_temp_free_i32(t
);
3421 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3422 tcg_temp_free_i64(borrow
);
3426 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3433 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3434 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3435 tcg_temp_free_i32(t
);
3437 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3438 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3439 tcg_temp_free_i32(t
);
3441 gen_exception(EXCP_SVC
);
3442 return EXIT_NORETURN
;
3445 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3447 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3452 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3454 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3459 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3461 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3466 #ifndef CONFIG_USER_ONLY
3467 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3469 potential_page_fault(s
);
3470 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3476 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3478 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3479 potential_page_fault(s
);
3480 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3481 tcg_temp_free_i32(l
);
3486 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3488 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3489 potential_page_fault(s
);
3490 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3491 tcg_temp_free_i32(l
);
3495 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3497 int d1
= get_field(s
->fields
, d1
);
3498 int d2
= get_field(s
->fields
, d2
);
3499 int b1
= get_field(s
->fields
, b1
);
3500 int b2
= get_field(s
->fields
, b2
);
3501 int l
= get_field(s
->fields
, l1
);
3504 o
->addr1
= get_address(s
, 0, b1
, d1
);
3506 /* If the addresses are identical, this is a store/memset of zero. */
3507 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3508 o
->in2
= tcg_const_i64(0);
3512 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3515 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3519 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3522 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3526 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3529 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3533 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3535 gen_op_movi_cc(s
, 0);
3539 /* But in general we'll defer to a helper. */
3540 o
->in2
= get_address(s
, 0, b2
, d2
);
3541 t32
= tcg_const_i32(l
);
3542 potential_page_fault(s
);
3543 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3544 tcg_temp_free_i32(t32
);
3549 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3551 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3555 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3557 int shift
= s
->insn
->data
& 0xff;
3558 int size
= s
->insn
->data
>> 8;
3559 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3562 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3563 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3565 /* Produce the CC from only the bits manipulated. */
3566 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3567 set_cc_nz_u64(s
, cc_dst
);
3571 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3573 o
->out
= tcg_const_i64(0);
3577 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3579 o
->out
= tcg_const_i64(0);
3585 /* ====================================================================== */
3586 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3587 the original inputs), update the various cc data structures in order to
3588 be able to compute the new condition code. */
3590 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3592 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3595 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3597 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3600 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3602 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3605 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3607 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3610 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3612 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3615 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3617 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3620 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3622 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3625 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3627 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3630 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3632 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3635 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3637 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3640 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3642 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3645 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3647 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3650 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3652 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3655 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3657 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3660 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3662 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3665 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3667 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3670 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3672 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3675 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3677 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3680 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3682 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3685 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3687 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3688 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3691 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3693 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3696 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3698 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3701 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3703 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3706 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3708 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3711 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3713 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3716 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3718 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3721 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3723 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3726 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3728 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3731 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3733 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3736 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3738 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3741 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3743 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3746 /* ====================================================================== */
3747 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3748 with the TCG register to which we will write. Used in combination with
3749 the "wout" generators, in some cases we need a new temporary, and in
3750 some cases we can write to a TCG global. */
3752 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3754 o
->out
= tcg_temp_new_i64();
3756 #define SPEC_prep_new 0
3758 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3760 o
->out
= tcg_temp_new_i64();
3761 o
->out2
= tcg_temp_new_i64();
3763 #define SPEC_prep_new_P 0
3765 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3767 o
->out
= regs
[get_field(f
, r1
)];
3770 #define SPEC_prep_r1 0
3772 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3774 int r1
= get_field(f
, r1
);
3776 o
->out2
= regs
[r1
+ 1];
3777 o
->g_out
= o
->g_out2
= true;
3779 #define SPEC_prep_r1_P SPEC_r1_even
3781 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3783 o
->out
= fregs
[get_field(f
, r1
)];
3786 #define SPEC_prep_f1 0
3788 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3790 int r1
= get_field(f
, r1
);
3792 o
->out2
= fregs
[r1
+ 2];
3793 o
->g_out
= o
->g_out2
= true;
3795 #define SPEC_prep_x1 SPEC_r1_f128
3797 /* ====================================================================== */
3798 /* The "Write OUTput" generators. These generally perform some non-trivial
3799 copy of data to TCG globals, or to main memory. The trivial cases are
3800 generally handled by having a "prep" generator install the TCG global
3801 as the destination of the operation. */
3803 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3805 store_reg(get_field(f
, r1
), o
->out
);
3807 #define SPEC_wout_r1 0
3809 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3811 int r1
= get_field(f
, r1
);
3812 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3814 #define SPEC_wout_r1_8 0
3816 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3818 int r1
= get_field(f
, r1
);
3819 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3821 #define SPEC_wout_r1_16 0
3823 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3825 store_reg32_i64(get_field(f
, r1
), o
->out
);
3827 #define SPEC_wout_r1_32 0
3829 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3831 int r1
= get_field(f
, r1
);
3832 store_reg32_i64(r1
, o
->out
);
3833 store_reg32_i64(r1
+ 1, o
->out2
);
3835 #define SPEC_wout_r1_P32 SPEC_r1_even
3837 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3839 int r1
= get_field(f
, r1
);
3840 store_reg32_i64(r1
+ 1, o
->out
);
3841 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
3842 store_reg32_i64(r1
, o
->out
);
3844 #define SPEC_wout_r1_D32 SPEC_r1_even
3846 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3848 store_freg32_i64(get_field(f
, r1
), o
->out
);
3850 #define SPEC_wout_e1 0
3852 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3854 store_freg(get_field(f
, r1
), o
->out
);
3856 #define SPEC_wout_f1 0
3858 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3860 int f1
= get_field(s
->fields
, r1
);
3861 store_freg(f1
, o
->out
);
3862 store_freg(f1
+ 2, o
->out2
);
3864 #define SPEC_wout_x1 SPEC_r1_f128
3866 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3868 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3869 store_reg32_i64(get_field(f
, r1
), o
->out
);
3872 #define SPEC_wout_cond_r1r2_32 0
3874 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3876 if (get_field(f
, r1
) != get_field(f
, r2
)) {
3877 store_freg32_i64(get_field(f
, r1
), o
->out
);
3880 #define SPEC_wout_cond_e1e2 0
3882 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3884 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
3886 #define SPEC_wout_m1_8 0
3888 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3890 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
3892 #define SPEC_wout_m1_16 0
3894 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3896 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
3898 #define SPEC_wout_m1_32 0
3900 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3902 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
3904 #define SPEC_wout_m1_64 0
3906 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3908 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
3910 #define SPEC_wout_m2_32 0
3912 /* ====================================================================== */
3913 /* The "INput 1" generators. These load the first operand to an insn. */
3915 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3917 o
->in1
= load_reg(get_field(f
, r1
));
3919 #define SPEC_in1_r1 0
3921 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3923 o
->in1
= regs
[get_field(f
, r1
)];
3926 #define SPEC_in1_r1_o 0
3928 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3930 o
->in1
= tcg_temp_new_i64();
3931 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3933 #define SPEC_in1_r1_32s 0
3935 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3937 o
->in1
= tcg_temp_new_i64();
3938 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
3940 #define SPEC_in1_r1_32u 0
3942 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3944 o
->in1
= tcg_temp_new_i64();
3945 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
3947 #define SPEC_in1_r1_sr32 0
3949 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3951 o
->in1
= load_reg(get_field(f
, r1
) + 1);
3953 #define SPEC_in1_r1p1 SPEC_r1_even
3955 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3957 o
->in1
= tcg_temp_new_i64();
3958 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
3960 #define SPEC_in1_r1p1_32s SPEC_r1_even
3962 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3964 o
->in1
= tcg_temp_new_i64();
3965 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
3967 #define SPEC_in1_r1p1_32u SPEC_r1_even
3969 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3971 int r1
= get_field(f
, r1
);
3972 o
->in1
= tcg_temp_new_i64();
3973 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
3975 #define SPEC_in1_r1_D32 SPEC_r1_even
3977 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3979 o
->in1
= load_reg(get_field(f
, r2
));
3981 #define SPEC_in1_r2 0
3983 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3985 o
->in1
= load_reg(get_field(f
, r3
));
3987 #define SPEC_in1_r3 0
3989 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3991 o
->in1
= regs
[get_field(f
, r3
)];
3994 #define SPEC_in1_r3_o 0
3996 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3998 o
->in1
= tcg_temp_new_i64();
3999 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4001 #define SPEC_in1_r3_32s 0
4003 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4005 o
->in1
= tcg_temp_new_i64();
4006 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4008 #define SPEC_in1_r3_32u 0
4010 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4012 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4014 #define SPEC_in1_e1 0
4016 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4018 o
->in1
= fregs
[get_field(f
, r1
)];
4021 #define SPEC_in1_f1_o 0
4023 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4025 int r1
= get_field(f
, r1
);
4027 o
->out2
= fregs
[r1
+ 2];
4028 o
->g_out
= o
->g_out2
= true;
4030 #define SPEC_in1_x1_o SPEC_r1_f128
4032 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4034 o
->in1
= fregs
[get_field(f
, r3
)];
4037 #define SPEC_in1_f3_o 0
4039 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4041 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4043 #define SPEC_in1_la1 0
4045 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4047 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4048 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4050 #define SPEC_in1_la2 0
4052 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4055 o
->in1
= tcg_temp_new_i64();
4056 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4058 #define SPEC_in1_m1_8u 0
4060 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4063 o
->in1
= tcg_temp_new_i64();
4064 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4066 #define SPEC_in1_m1_16s 0
4068 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4071 o
->in1
= tcg_temp_new_i64();
4072 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4074 #define SPEC_in1_m1_16u 0
4076 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4079 o
->in1
= tcg_temp_new_i64();
4080 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4082 #define SPEC_in1_m1_32s 0
4084 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4087 o
->in1
= tcg_temp_new_i64();
4088 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4090 #define SPEC_in1_m1_32u 0
4092 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4095 o
->in1
= tcg_temp_new_i64();
4096 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4098 #define SPEC_in1_m1_64 0
4100 /* ====================================================================== */
4101 /* The "INput 2" generators. These load the second operand to an insn. */
4103 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4105 o
->in2
= regs
[get_field(f
, r1
)];
4108 #define SPEC_in2_r1_o 0
4110 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4112 o
->in2
= tcg_temp_new_i64();
4113 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4115 #define SPEC_in2_r1_16u 0
4117 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4119 o
->in2
= tcg_temp_new_i64();
4120 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4122 #define SPEC_in2_r1_32u 0
4124 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4126 o
->in2
= load_reg(get_field(f
, r2
));
4128 #define SPEC_in2_r2 0
4130 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4132 o
->in2
= regs
[get_field(f
, r2
)];
4135 #define SPEC_in2_r2_o 0
4137 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4139 int r2
= get_field(f
, r2
);
4141 o
->in2
= load_reg(r2
);
4144 #define SPEC_in2_r2_nz 0
4146 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4148 o
->in2
= tcg_temp_new_i64();
4149 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4151 #define SPEC_in2_r2_8s 0
4153 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4155 o
->in2
= tcg_temp_new_i64();
4156 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4158 #define SPEC_in2_r2_8u 0
4160 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4162 o
->in2
= tcg_temp_new_i64();
4163 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4165 #define SPEC_in2_r2_16s 0
4167 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4169 o
->in2
= tcg_temp_new_i64();
4170 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4172 #define SPEC_in2_r2_16u 0
4174 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4176 o
->in2
= load_reg(get_field(f
, r3
));
4178 #define SPEC_in2_r3 0
4180 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4182 o
->in2
= tcg_temp_new_i64();
4183 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4185 #define SPEC_in2_r2_32s 0
4187 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4189 o
->in2
= tcg_temp_new_i64();
4190 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4192 #define SPEC_in2_r2_32u 0
4194 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4196 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4198 #define SPEC_in2_e2 0
4200 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4202 o
->in2
= fregs
[get_field(f
, r2
)];
4205 #define SPEC_in2_f2_o 0
4207 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4209 int r2
= get_field(f
, r2
);
4211 o
->in2
= fregs
[r2
+ 2];
4212 o
->g_in1
= o
->g_in2
= true;
4214 #define SPEC_in2_x2_o SPEC_r2_f128
4216 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4218 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4220 #define SPEC_in2_ra2 0
4222 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4224 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4225 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4227 #define SPEC_in2_a2 0
4229 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4231 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4233 #define SPEC_in2_ri2 0
4235 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4237 help_l2_shift(s
, f
, o
, 31);
4239 #define SPEC_in2_sh32 0
4241 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4243 help_l2_shift(s
, f
, o
, 63);
4245 #define SPEC_in2_sh64 0
4247 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4250 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4252 #define SPEC_in2_m2_8u 0
4254 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4257 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4259 #define SPEC_in2_m2_16s 0
4261 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4264 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4266 #define SPEC_in2_m2_16u 0
4268 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4271 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4273 #define SPEC_in2_m2_32s 0
4275 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4278 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4280 #define SPEC_in2_m2_32u 0
4282 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4285 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4287 #define SPEC_in2_m2_64 0
4289 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4292 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4294 #define SPEC_in2_mri2_16u 0
4296 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4299 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4301 #define SPEC_in2_mri2_32s 0
4303 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4306 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4308 #define SPEC_in2_mri2_32u 0
4310 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4313 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4315 #define SPEC_in2_mri2_64 0
4317 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4319 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4321 #define SPEC_in2_i2 0
4323 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4325 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4327 #define SPEC_in2_i2_8u 0
4329 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4331 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4333 #define SPEC_in2_i2_16u 0
4335 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4337 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4339 #define SPEC_in2_i2_32u 0
4341 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4343 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4344 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4346 #define SPEC_in2_i2_16u_shl 0
4348 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4350 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4351 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4353 #define SPEC_in2_i2_32u_shl 0
4355 /* ====================================================================== */
4357 /* Find opc within the table of insns. This is formulated as a switch
4358 statement so that (1) we get compile-time notice of cut-paste errors
4359 for duplicated opcodes, and (2) the compiler generates the binary
4360 search tree, rather than us having to post-process the table. */
4362 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4363 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4365 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4367 enum DisasInsnEnum
{
4368 #include "insn-data.def"
4372 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4376 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4378 .help_in1 = in1_##I1, \
4379 .help_in2 = in2_##I2, \
4380 .help_prep = prep_##P, \
4381 .help_wout = wout_##W, \
4382 .help_cout = cout_##CC, \
4383 .help_op = op_##OP, \
4387 /* Allow 0 to be used for NULL in the table below. */
4395 #define SPEC_in1_0 0
4396 #define SPEC_in2_0 0
4397 #define SPEC_prep_0 0
4398 #define SPEC_wout_0 0
4400 static const DisasInsn insn_info
[] = {
4401 #include "insn-data.def"
4405 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4406 case OPC: return &insn_info[insn_ ## NM];
4408 static const DisasInsn
*lookup_opc(uint16_t opc
)
4411 #include "insn-data.def"
4420 /* Extract a field from the insn. The INSN should be left-aligned in
4421 the uint64_t so that we can more easily utilize the big-bit-endian
4422 definitions we extract from the Principals of Operation. */
4424 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4432 /* Zero extract the field from the insn. */
4433 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4435 /* Sign-extend, or un-swap the field as necessary. */
4437 case 0: /* unsigned */
4439 case 1: /* signed */
4440 assert(f
->size
<= 32);
4441 m
= 1u << (f
->size
- 1);
4444 case 2: /* dl+dh split, signed 20 bit. */
4445 r
= ((int8_t)r
<< 12) | (r
>> 8);
4451 /* Validate that the "compressed" encoding we selected above is valid.
4452 I.e. we havn't make two different original fields overlap. */
4453 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4454 o
->presentC
|= 1 << f
->indexC
;
4455 o
->presentO
|= 1 << f
->indexO
;
4457 o
->c
[f
->indexC
] = r
;
4460 /* Lookup the insn at the current PC, extracting the operands into O and
4461 returning the info struct for the insn. Returns NULL for invalid insn. */
4463 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4466 uint64_t insn
, pc
= s
->pc
;
4468 const DisasInsn
*info
;
4470 insn
= ld_code2(env
, pc
);
4471 op
= (insn
>> 8) & 0xff;
4472 ilen
= get_ilen(op
);
4473 s
->next_pc
= s
->pc
+ ilen
;
4480 insn
= ld_code4(env
, pc
) << 32;
4483 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4489 /* We can't actually determine the insn format until we've looked up
4490 the full insn opcode. Which we can't do without locating the
4491 secondary opcode. Assume by default that OP2 is at bit 40; for
4492 those smaller insns that don't actually have a secondary opcode
4493 this will correctly result in OP2 = 0. */
4499 case 0xb2: /* S, RRF, RRE */
4500 case 0xb3: /* RRE, RRD, RRF */
4501 case 0xb9: /* RRE, RRF */
4502 case 0xe5: /* SSE, SIL */
4503 op2
= (insn
<< 8) >> 56;
4507 case 0xc0: /* RIL */
4508 case 0xc2: /* RIL */
4509 case 0xc4: /* RIL */
4510 case 0xc6: /* RIL */
4511 case 0xc8: /* SSF */
4512 case 0xcc: /* RIL */
4513 op2
= (insn
<< 12) >> 60;
4515 case 0xd0 ... 0xdf: /* SS */
4521 case 0xee ... 0xf3: /* SS */
4522 case 0xf8 ... 0xfd: /* SS */
4526 op2
= (insn
<< 40) >> 56;
4530 memset(f
, 0, sizeof(*f
));
4534 /* Lookup the instruction. */
4535 info
= lookup_opc(op
<< 8 | op2
);
4537 /* If we found it, extract the operands. */
4539 DisasFormat fmt
= info
->fmt
;
4542 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4543 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4549 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4551 const DisasInsn
*insn
;
4552 ExitStatus ret
= NO_EXIT
;
4556 /* Search for the insn in the table. */
4557 insn
= extract_insn(env
, s
, &f
);
4559 /* Not found means unimplemented/illegal opcode. */
4561 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4563 gen_illegal_opcode(s
);
4564 return EXIT_NORETURN
;
4567 /* Check for insn specification exceptions. */
4569 int spec
= insn
->spec
, excp
= 0, r
;
4571 if (spec
& SPEC_r1_even
) {
4572 r
= get_field(&f
, r1
);
4574 excp
= PGM_SPECIFICATION
;
4577 if (spec
& SPEC_r2_even
) {
4578 r
= get_field(&f
, r2
);
4580 excp
= PGM_SPECIFICATION
;
4583 if (spec
& SPEC_r1_f128
) {
4584 r
= get_field(&f
, r1
);
4586 excp
= PGM_SPECIFICATION
;
4589 if (spec
& SPEC_r2_f128
) {
4590 r
= get_field(&f
, r2
);
4592 excp
= PGM_SPECIFICATION
;
4596 gen_program_exception(s
, excp
);
4597 return EXIT_NORETURN
;
4601 /* Set up the strutures we use to communicate with the helpers. */
4604 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4605 TCGV_UNUSED_I64(o
.out
);
4606 TCGV_UNUSED_I64(o
.out2
);
4607 TCGV_UNUSED_I64(o
.in1
);
4608 TCGV_UNUSED_I64(o
.in2
);
4609 TCGV_UNUSED_I64(o
.addr1
);
4611 /* Implement the instruction. */
4612 if (insn
->help_in1
) {
4613 insn
->help_in1(s
, &f
, &o
);
4615 if (insn
->help_in2
) {
4616 insn
->help_in2(s
, &f
, &o
);
4618 if (insn
->help_prep
) {
4619 insn
->help_prep(s
, &f
, &o
);
4621 if (insn
->help_op
) {
4622 ret
= insn
->help_op(s
, &o
);
4624 if (insn
->help_wout
) {
4625 insn
->help_wout(s
, &f
, &o
);
4627 if (insn
->help_cout
) {
4628 insn
->help_cout(s
, &o
);
4631 /* Free any temporaries created by the helpers. */
4632 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4633 tcg_temp_free_i64(o
.out
);
4635 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4636 tcg_temp_free_i64(o
.out2
);
4638 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4639 tcg_temp_free_i64(o
.in1
);
4641 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4642 tcg_temp_free_i64(o
.in2
);
4644 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4645 tcg_temp_free_i64(o
.addr1
);
4648 /* Advance to the next instruction. */
4653 static inline void gen_intermediate_code_internal(CPUS390XState
*env
,
4654 TranslationBlock
*tb
,
4658 target_ulong pc_start
;
4659 uint64_t next_page_start
;
4660 uint16_t *gen_opc_end
;
4662 int num_insns
, max_insns
;
4670 if (!(tb
->flags
& FLAG_MASK_64
)) {
4671 pc_start
&= 0x7fffffff;
4676 dc
.cc_op
= CC_OP_DYNAMIC
;
4677 do_debug
= dc
.singlestep_enabled
= env
->singlestep_enabled
;
4679 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4681 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4684 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4685 if (max_insns
== 0) {
4686 max_insns
= CF_COUNT_MASK
;
4693 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4697 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4700 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4701 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4702 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4703 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4705 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4709 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4710 tcg_gen_debug_insn_start(dc
.pc
);
4714 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
4715 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
4716 if (bp
->pc
== dc
.pc
) {
4717 status
= EXIT_PC_STALE
;
4723 if (status
== NO_EXIT
) {
4724 status
= translate_one(env
, &dc
);
4727 /* If we reach a page boundary, are single stepping,
4728 or exhaust instruction count, stop generation. */
4729 if (status
== NO_EXIT
4730 && (dc
.pc
>= next_page_start
4731 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
4732 || num_insns
>= max_insns
4734 || env
->singlestep_enabled
)) {
4735 status
= EXIT_PC_STALE
;
4737 } while (status
== NO_EXIT
);
4739 if (tb
->cflags
& CF_LAST_IO
) {
4748 update_psw_addr(&dc
);
4750 case EXIT_PC_UPDATED
:
4751 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4752 cc op type is in env */
4754 /* Exit the TB, either by raising a debug exception or by return. */
4756 gen_exception(EXCP_DEBUG
);
4765 gen_icount_end(tb
, num_insns
);
4766 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4768 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
4771 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4774 tb
->size
= dc
.pc
- pc_start
;
4775 tb
->icount
= num_insns
;
4778 #if defined(S390X_DEBUG_DISAS)
4779 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4780 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4781 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
4787 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4789 gen_intermediate_code_internal(env
, tb
, 0);
4792 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
4794 gen_intermediate_code_internal(env
, tb
, 1);
4797 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
4800 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4801 cc_op
= gen_opc_cc_op
[pc_pos
];
4802 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {