4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env
;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext
;
50 typedef struct DisasInsn DisasInsn
;
51 typedef struct DisasFields DisasFields
;
54 struct TranslationBlock
*tb
;
55 const DisasInsn
*insn
;
59 bool singlestep_enabled
;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a
, b
; } s64
;
70 struct { TCGv_i32 a
, b
; } s32
;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit
[CC_OP_MAX
];
78 static uint64_t inline_branch_miss
[CC_OP_MAX
];
81 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
83 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
84 if (s
->tb
->flags
& FLAG_MASK_32
) {
85 return pc
| 0x80000000;
91 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
94 S390CPU
*cpu
= S390_CPU(cs
);
95 CPUS390XState
*env
= &cpu
->env
;
99 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
100 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
102 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
103 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
106 for (i
= 0; i
< 16; i
++) {
107 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
109 cpu_fprintf(f
, "\n");
115 for (i
= 0; i
< 16; i
++) {
116 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
118 cpu_fprintf(f
, "\n");
124 for (i
= 0; i
< 32; i
++) {
125 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
126 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
127 cpu_fprintf(f
, (i
% 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i
= 0; i
< 16; i
++) {
132 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
134 cpu_fprintf(f
, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i
= 0; i
< CC_OP_MAX
; i
++) {
143 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
144 inline_branch_miss
[i
], inline_branch_hit
[i
]);
148 cpu_fprintf(f
, "\n");
151 static TCGv_i64 psw_addr
;
152 static TCGv_i64 psw_mask
;
154 static TCGv_i32 cc_op
;
155 static TCGv_i64 cc_src
;
156 static TCGv_i64 cc_dst
;
157 static TCGv_i64 cc_vr
;
159 static char cpu_reg_names
[32][4];
160 static TCGv_i64 regs
[16];
161 static TCGv_i64 fregs
[16];
163 static uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
165 void s390x_translate_init(void)
169 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
170 psw_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
171 offsetof(CPUS390XState
, psw
.addr
),
173 psw_mask
= tcg_global_mem_new_i64(TCG_AREG0
,
174 offsetof(CPUS390XState
, psw
.mask
),
177 cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUS390XState
, cc_op
),
179 cc_src
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_src
),
181 cc_dst
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_dst
),
183 cc_vr
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUS390XState
, cc_vr
),
186 for (i
= 0; i
< 16; i
++) {
187 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
188 regs
[i
] = tcg_global_mem_new(TCG_AREG0
,
189 offsetof(CPUS390XState
, regs
[i
]),
193 for (i
= 0; i
< 16; i
++) {
194 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
195 fregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
196 offsetof(CPUS390XState
, vregs
[i
][0].d
),
197 cpu_reg_names
[i
+ 16]);
201 static TCGv_i64
load_reg(int reg
)
203 TCGv_i64 r
= tcg_temp_new_i64();
204 tcg_gen_mov_i64(r
, regs
[reg
]);
208 static TCGv_i64
load_freg32_i64(int reg
)
210 TCGv_i64 r
= tcg_temp_new_i64();
211 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
215 static void store_reg(int reg
, TCGv_i64 v
)
217 tcg_gen_mov_i64(regs
[reg
], v
);
220 static void store_freg(int reg
, TCGv_i64 v
)
222 tcg_gen_mov_i64(fregs
[reg
], v
);
225 static void store_reg32_i64(int reg
, TCGv_i64 v
)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
231 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
233 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
236 static void store_freg32_i64(int reg
, TCGv_i64 v
)
238 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
241 static void return_low128(TCGv_i64 dest
)
243 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
246 static void update_psw_addr(DisasContext
*s
)
249 tcg_gen_movi_i64(psw_addr
, s
->pc
);
252 static void update_cc_op(DisasContext
*s
)
254 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
255 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
259 static void potential_page_fault(DisasContext
*s
)
265 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
267 return (uint64_t)cpu_lduw_code(env
, pc
);
270 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
275 static int get_mem_index(DisasContext
*s
)
277 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
278 case PSW_ASC_PRIMARY
>> 32:
280 case PSW_ASC_SECONDARY
>> 32:
282 case PSW_ASC_HOME
>> 32:
290 static void gen_exception(int excp
)
292 TCGv_i32 tmp
= tcg_const_i32(excp
);
293 gen_helper_exception(cpu_env
, tmp
);
294 tcg_temp_free_i32(tmp
);
297 static void gen_program_exception(DisasContext
*s
, int code
)
301 /* Remember what pgm exeption this was. */
302 tmp
= tcg_const_i32(code
);
303 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
304 tcg_temp_free_i32(tmp
);
306 tmp
= tcg_const_i32(s
->next_pc
- s
->pc
);
307 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
308 tcg_temp_free_i32(tmp
);
310 /* Advance past instruction. */
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM
);
321 static inline void gen_illegal_opcode(DisasContext
*s
)
323 gen_program_exception(s
, PGM_SPECIFICATION
);
326 #ifndef CONFIG_USER_ONLY
327 static void check_privileged(DisasContext
*s
)
329 if (s
->tb
->flags
& (PSW_MASK_PSTATE
>> 32)) {
330 gen_program_exception(s
, PGM_PRIVILEGED
);
335 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
337 TCGv_i64 tmp
= tcg_temp_new_i64();
338 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
340 /* Note that d2 is limited to 20 bits, signed. If we crop negative
341 displacements early we create larger immedate addends. */
343 /* Note that addi optimizes the imm==0 case. */
345 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
346 tcg_gen_addi_i64(tmp
, tmp
, d2
);
348 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
350 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
356 tcg_gen_movi_i64(tmp
, d2
);
359 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
365 static inline bool live_cc_data(DisasContext
*s
)
367 return (s
->cc_op
!= CC_OP_DYNAMIC
368 && s
->cc_op
!= CC_OP_STATIC
372 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
374 if (live_cc_data(s
)) {
375 tcg_gen_discard_i64(cc_src
);
376 tcg_gen_discard_i64(cc_dst
);
377 tcg_gen_discard_i64(cc_vr
);
379 s
->cc_op
= CC_OP_CONST0
+ val
;
382 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
384 if (live_cc_data(s
)) {
385 tcg_gen_discard_i64(cc_src
);
386 tcg_gen_discard_i64(cc_vr
);
388 tcg_gen_mov_i64(cc_dst
, dst
);
392 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
395 if (live_cc_data(s
)) {
396 tcg_gen_discard_i64(cc_vr
);
398 tcg_gen_mov_i64(cc_src
, src
);
399 tcg_gen_mov_i64(cc_dst
, dst
);
403 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
404 TCGv_i64 dst
, TCGv_i64 vr
)
406 tcg_gen_mov_i64(cc_src
, src
);
407 tcg_gen_mov_i64(cc_dst
, dst
);
408 tcg_gen_mov_i64(cc_vr
, vr
);
412 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
414 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
417 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
419 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
422 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
424 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
427 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
429 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
432 /* CC value is in env->cc_op */
433 static void set_cc_static(DisasContext
*s
)
435 if (live_cc_data(s
)) {
436 tcg_gen_discard_i64(cc_src
);
437 tcg_gen_discard_i64(cc_dst
);
438 tcg_gen_discard_i64(cc_vr
);
440 s
->cc_op
= CC_OP_STATIC
;
443 /* calculates cc into cc_op */
444 static void gen_op_calc_cc(DisasContext
*s
)
446 TCGv_i32 local_cc_op
;
449 TCGV_UNUSED_I32(local_cc_op
);
450 TCGV_UNUSED_I64(dummy
);
453 dummy
= tcg_const_i64(0);
467 local_cc_op
= tcg_const_i32(s
->cc_op
);
483 /* s->cc_op is the cc value */
484 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
487 /* env->cc_op already is the cc value */
502 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
507 case CC_OP_LTUGTU_32
:
508 case CC_OP_LTUGTU_64
:
515 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
530 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
533 /* unknown operation - assume 3 arguments and cc_op in env */
534 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
540 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
541 tcg_temp_free_i32(local_cc_op
);
543 if (!TCGV_IS_UNUSED_I64(dummy
)) {
544 tcg_temp_free_i64(dummy
);
547 /* We now have cc in cc_op as constant */
551 static int use_goto_tb(DisasContext
*s
, uint64_t dest
)
553 /* NOTE: we handle the case where the TB spans two pages here */
554 return (((dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
)
555 || (dest
& TARGET_PAGE_MASK
) == ((s
->pc
- 1) & TARGET_PAGE_MASK
))
556 && !s
->singlestep_enabled
557 && !(s
->tb
->cflags
& CF_LAST_IO
));
560 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
562 #ifdef DEBUG_INLINE_BRANCHES
563 inline_branch_miss
[cc_op
]++;
567 static void account_inline_branch(DisasContext
*s
, int cc_op
)
569 #ifdef DEBUG_INLINE_BRANCHES
570 inline_branch_hit
[cc_op
]++;
574 /* Table of mask values to comparison codes, given a comparison as input.
575 For such, CC=3 should not be possible. */
576 static const TCGCond ltgt_cond
[16] = {
577 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
578 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
579 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
580 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
581 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
582 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
583 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
584 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
587 /* Table of mask values to comparison codes, given a logic op as input.
588 For such, only CC=0 and CC=1 should be possible. */
589 static const TCGCond nz_cond
[16] = {
590 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
591 TCG_COND_NEVER
, TCG_COND_NEVER
,
592 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
593 TCG_COND_NE
, TCG_COND_NE
,
594 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
595 TCG_COND_EQ
, TCG_COND_EQ
,
596 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
597 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
600 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
601 details required to generate a TCG comparison. */
602 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
605 enum cc_op old_cc_op
= s
->cc_op
;
607 if (mask
== 15 || mask
== 0) {
608 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
611 c
->g1
= c
->g2
= true;
616 /* Find the TCG condition for the mask + cc op. */
622 cond
= ltgt_cond
[mask
];
623 if (cond
== TCG_COND_NEVER
) {
626 account_inline_branch(s
, old_cc_op
);
629 case CC_OP_LTUGTU_32
:
630 case CC_OP_LTUGTU_64
:
631 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
632 if (cond
== TCG_COND_NEVER
) {
635 account_inline_branch(s
, old_cc_op
);
639 cond
= nz_cond
[mask
];
640 if (cond
== TCG_COND_NEVER
) {
643 account_inline_branch(s
, old_cc_op
);
658 account_inline_branch(s
, old_cc_op
);
673 account_inline_branch(s
, old_cc_op
);
677 switch (mask
& 0xa) {
678 case 8: /* src == 0 -> no one bit found */
681 case 2: /* src != 0 -> one bit found */
687 account_inline_branch(s
, old_cc_op
);
693 case 8 | 2: /* vr == 0 */
696 case 4 | 1: /* vr != 0 */
699 case 8 | 4: /* no carry -> vr >= src */
702 case 2 | 1: /* carry -> vr < src */
708 account_inline_branch(s
, old_cc_op
);
713 /* Note that CC=0 is impossible; treat it as dont-care. */
715 case 2: /* zero -> op1 == op2 */
718 case 4 | 1: /* !zero -> op1 != op2 */
721 case 4: /* borrow (!carry) -> op1 < op2 */
724 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
730 account_inline_branch(s
, old_cc_op
);
735 /* Calculate cc value. */
740 /* Jump based on CC. We'll load up the real cond below;
741 the assignment here merely avoids a compiler warning. */
742 account_noninline_branch(s
, old_cc_op
);
743 old_cc_op
= CC_OP_STATIC
;
744 cond
= TCG_COND_NEVER
;
748 /* Load up the arguments of the comparison. */
750 c
->g1
= c
->g2
= false;
754 c
->u
.s32
.a
= tcg_temp_new_i32();
755 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_dst
);
756 c
->u
.s32
.b
= tcg_const_i32(0);
759 case CC_OP_LTUGTU_32
:
762 c
->u
.s32
.a
= tcg_temp_new_i32();
763 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_src
);
764 c
->u
.s32
.b
= tcg_temp_new_i32();
765 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_dst
);
772 c
->u
.s64
.b
= tcg_const_i64(0);
776 case CC_OP_LTUGTU_64
:
780 c
->g1
= c
->g2
= true;
786 c
->u
.s64
.a
= tcg_temp_new_i64();
787 c
->u
.s64
.b
= tcg_const_i64(0);
788 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
793 c
->u
.s32
.a
= tcg_temp_new_i32();
794 c
->u
.s32
.b
= tcg_temp_new_i32();
795 tcg_gen_trunc_i64_i32(c
->u
.s32
.a
, cc_vr
);
796 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
797 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
799 tcg_gen_trunc_i64_i32(c
->u
.s32
.b
, cc_src
);
806 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
807 c
->u
.s64
.b
= tcg_const_i64(0);
819 case 0x8 | 0x4 | 0x2: /* cc != 3 */
821 c
->u
.s32
.b
= tcg_const_i32(3);
823 case 0x8 | 0x4 | 0x1: /* cc != 2 */
825 c
->u
.s32
.b
= tcg_const_i32(2);
827 case 0x8 | 0x2 | 0x1: /* cc != 1 */
829 c
->u
.s32
.b
= tcg_const_i32(1);
831 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
834 c
->u
.s32
.a
= tcg_temp_new_i32();
835 c
->u
.s32
.b
= tcg_const_i32(0);
836 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
838 case 0x8 | 0x4: /* cc < 2 */
840 c
->u
.s32
.b
= tcg_const_i32(2);
842 case 0x8: /* cc == 0 */
844 c
->u
.s32
.b
= tcg_const_i32(0);
846 case 0x4 | 0x2 | 0x1: /* cc != 0 */
848 c
->u
.s32
.b
= tcg_const_i32(0);
850 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
853 c
->u
.s32
.a
= tcg_temp_new_i32();
854 c
->u
.s32
.b
= tcg_const_i32(0);
855 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
857 case 0x4: /* cc == 1 */
859 c
->u
.s32
.b
= tcg_const_i32(1);
861 case 0x2 | 0x1: /* cc > 1 */
863 c
->u
.s32
.b
= tcg_const_i32(1);
865 case 0x2: /* cc == 2 */
867 c
->u
.s32
.b
= tcg_const_i32(2);
869 case 0x1: /* cc == 3 */
871 c
->u
.s32
.b
= tcg_const_i32(3);
874 /* CC is masked by something else: (8 >> cc) & mask. */
877 c
->u
.s32
.a
= tcg_const_i32(8);
878 c
->u
.s32
.b
= tcg_const_i32(0);
879 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
880 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
891 static void free_compare(DisasCompare
*c
)
895 tcg_temp_free_i64(c
->u
.s64
.a
);
897 tcg_temp_free_i32(c
->u
.s32
.a
);
902 tcg_temp_free_i64(c
->u
.s64
.b
);
904 tcg_temp_free_i32(c
->u
.s32
.b
);
909 /* ====================================================================== */
910 /* Define the insn format enumeration. */
911 #define F0(N) FMT_##N,
912 #define F1(N, X1) F0(N)
913 #define F2(N, X1, X2) F0(N)
914 #define F3(N, X1, X2, X3) F0(N)
915 #define F4(N, X1, X2, X3, X4) F0(N)
916 #define F5(N, X1, X2, X3, X4, X5) F0(N)
919 #include "insn-format.def"
929 /* Define a structure to hold the decoded fields. We'll store each inside
930 an array indexed by an enum. In order to conserve memory, we'll arrange
931 for fields that do not exist at the same time to overlap, thus the "C"
932 for compact. For checking purposes there is an "O" for original index
933 as well that will be applied to availability bitmaps. */
935 enum DisasFieldIndexO
{
958 enum DisasFieldIndexC
{
992 unsigned presentC
:16;
993 unsigned int presentO
;
997 /* This is the way fields are to be accessed out of DisasFields. */
998 #define have_field(S, F) have_field1((S), FLD_O_##F)
999 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1001 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1003 return (f
->presentO
>> c
) & 1;
1006 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1007 enum DisasFieldIndexC c
)
1009 assert(have_field1(f
, o
));
1013 /* Describe the layout of each field in each format. */
1014 typedef struct DisasField
{
1016 unsigned int size
:8;
1017 unsigned int type
:2;
1018 unsigned int indexC
:6;
1019 enum DisasFieldIndexO indexO
:8;
1022 typedef struct DisasFormatInfo
{
1023 DisasField op
[NUM_C_FIELD
];
1026 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1027 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1028 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1029 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1030 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1032 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1033 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1035 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1038 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1039 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1041 #define F0(N) { { } },
1042 #define F1(N, X1) { { X1 } },
1043 #define F2(N, X1, X2) { { X1, X2 } },
1044 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1045 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1046 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1048 static const DisasFormatInfo format_info
[] = {
1049 #include "insn-format.def"
1067 /* Generally, we'll extract operands into this structures, operate upon
1068 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1069 of routines below for more details. */
1071 bool g_out
, g_out2
, g_in1
, g_in2
;
1072 TCGv_i64 out
, out2
, in1
, in2
;
1076 /* Instructions can place constraints on their operands, raising specification
1077 exceptions if they are violated. To make this easy to automate, each "in1",
1078 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1079 of the following, or 0. To make this easy to document, we'll put the
1080 SPEC_<name> defines next to <name>. */
1082 #define SPEC_r1_even 1
1083 #define SPEC_r2_even 2
1084 #define SPEC_r3_even 4
1085 #define SPEC_r1_f128 8
1086 #define SPEC_r2_f128 16
1088 /* Return values from translate_one, indicating the state of the TB. */
1090 /* Continue the TB. */
1092 /* We have emitted one or more goto_tb. No fixup required. */
1094 /* We are not using a goto_tb (for whatever reason), but have updated
1095 the PC (for whatever reason), so there's no need to do it again on
1098 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1099 updated the PC for the next instruction to be executed. */
1101 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1102 No following code will be executed. */
1106 typedef enum DisasFacility
{
1107 FAC_Z
, /* zarch (default) */
1108 FAC_CASS
, /* compare and swap and store */
1109 FAC_CASS2
, /* compare and swap and store 2*/
1110 FAC_DFP
, /* decimal floating point */
1111 FAC_DFPR
, /* decimal floating point rounding */
1112 FAC_DO
, /* distinct operands */
1113 FAC_EE
, /* execute extensions */
1114 FAC_EI
, /* extended immediate */
1115 FAC_FPE
, /* floating point extension */
1116 FAC_FPSSH
, /* floating point support sign handling */
1117 FAC_FPRGR
, /* FPR-GR transfer */
1118 FAC_GIE
, /* general instructions extension */
1119 FAC_HFP_MA
, /* HFP multiply-and-add/subtract */
1120 FAC_HW
, /* high-word */
1121 FAC_IEEEE_SIM
, /* IEEE exception sumilation */
1122 FAC_LOC
, /* load/store on condition */
1123 FAC_LD
, /* long displacement */
1124 FAC_PC
, /* population count */
1125 FAC_SCF
, /* store clock fast */
1126 FAC_SFLE
, /* store facility list extended */
1127 FAC_ILA
, /* interlocked access facility 1 */
1133 DisasFacility fac
:8;
1138 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1139 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1140 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1141 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1142 void (*help_cout
)(DisasContext
*, DisasOps
*);
1143 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1148 /* ====================================================================== */
1149 /* Miscellaneous helpers, used by several operations. */
1151 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1152 DisasOps
*o
, int mask
)
1154 int b2
= get_field(f
, b2
);
1155 int d2
= get_field(f
, d2
);
1158 o
->in2
= tcg_const_i64(d2
& mask
);
1160 o
->in2
= get_address(s
, 0, b2
, d2
);
1161 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1165 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1167 if (dest
== s
->next_pc
) {
1170 if (use_goto_tb(s
, dest
)) {
1173 tcg_gen_movi_i64(psw_addr
, dest
);
1174 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1175 return EXIT_GOTO_TB
;
1177 tcg_gen_movi_i64(psw_addr
, dest
);
1178 return EXIT_PC_UPDATED
;
1182 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1183 bool is_imm
, int imm
, TCGv_i64 cdest
)
1186 uint64_t dest
= s
->pc
+ 2 * imm
;
1189 /* Take care of the special cases first. */
1190 if (c
->cond
== TCG_COND_NEVER
) {
1195 if (dest
== s
->next_pc
) {
1196 /* Branch to next. */
1200 if (c
->cond
== TCG_COND_ALWAYS
) {
1201 ret
= help_goto_direct(s
, dest
);
1205 if (TCGV_IS_UNUSED_I64(cdest
)) {
1206 /* E.g. bcr %r0 -> no branch. */
1210 if (c
->cond
== TCG_COND_ALWAYS
) {
1211 tcg_gen_mov_i64(psw_addr
, cdest
);
1212 ret
= EXIT_PC_UPDATED
;
1217 if (use_goto_tb(s
, s
->next_pc
)) {
1218 if (is_imm
&& use_goto_tb(s
, dest
)) {
1219 /* Both exits can use goto_tb. */
1222 lab
= gen_new_label();
1224 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1226 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1229 /* Branch not taken. */
1231 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1232 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1237 tcg_gen_movi_i64(psw_addr
, dest
);
1238 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1242 /* Fallthru can use goto_tb, but taken branch cannot. */
1243 /* Store taken branch destination before the brcond. This
1244 avoids having to allocate a new local temp to hold it.
1245 We'll overwrite this in the not taken case anyway. */
1247 tcg_gen_mov_i64(psw_addr
, cdest
);
1250 lab
= gen_new_label();
1252 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1254 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1257 /* Branch not taken. */
1260 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1261 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1265 tcg_gen_movi_i64(psw_addr
, dest
);
1267 ret
= EXIT_PC_UPDATED
;
1270 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1271 Most commonly we're single-stepping or some other condition that
1272 disables all use of goto_tb. Just update the PC and exit. */
1274 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1276 cdest
= tcg_const_i64(dest
);
1280 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1283 TCGv_i32 t0
= tcg_temp_new_i32();
1284 TCGv_i64 t1
= tcg_temp_new_i64();
1285 TCGv_i64 z
= tcg_const_i64(0);
1286 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1287 tcg_gen_extu_i32_i64(t1
, t0
);
1288 tcg_temp_free_i32(t0
);
1289 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1290 tcg_temp_free_i64(t1
);
1291 tcg_temp_free_i64(z
);
1295 tcg_temp_free_i64(cdest
);
1297 tcg_temp_free_i64(next
);
1299 ret
= EXIT_PC_UPDATED
;
1307 /* ====================================================================== */
1308 /* The operations. These perform the bulk of the work for any insn,
1309 usually after the operands have been loaded and output initialized. */
1311 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1314 z
= tcg_const_i64(0);
1315 n
= tcg_temp_new_i64();
1316 tcg_gen_neg_i64(n
, o
->in2
);
1317 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1318 tcg_temp_free_i64(n
);
1319 tcg_temp_free_i64(z
);
1323 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1325 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1329 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1331 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1335 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1337 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1338 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1342 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1344 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1348 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1353 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1355 /* The carry flag is the msb of CC, therefore the branch mask that would
1356 create that comparison is 3. Feeding the generated comparison to
1357 setcond produces the carry flag that we desire. */
1358 disas_jcc(s
, &cmp
, 3);
1359 carry
= tcg_temp_new_i64();
1361 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1363 TCGv_i32 t
= tcg_temp_new_i32();
1364 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1365 tcg_gen_extu_i32_i64(carry
, t
);
1366 tcg_temp_free_i32(t
);
1370 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1371 tcg_temp_free_i64(carry
);
1375 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1377 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1381 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1383 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1387 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1389 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1390 return_low128(o
->out2
);
1394 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1396 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1400 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1402 int shift
= s
->insn
->data
& 0xff;
1403 int size
= s
->insn
->data
>> 8;
1404 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1407 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1408 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1409 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1411 /* Produce the CC from only the bits manipulated. */
1412 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1413 set_cc_nz_u64(s
, cc_dst
);
1417 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1419 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1420 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1421 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1422 return EXIT_PC_UPDATED
;
1428 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1430 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1431 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1434 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1436 int m1
= get_field(s
->fields
, m1
);
1437 bool is_imm
= have_field(s
->fields
, i2
);
1438 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1441 disas_jcc(s
, &c
, m1
);
1442 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1445 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1447 int r1
= get_field(s
->fields
, r1
);
1448 bool is_imm
= have_field(s
->fields
, i2
);
1449 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1453 c
.cond
= TCG_COND_NE
;
1458 t
= tcg_temp_new_i64();
1459 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1460 store_reg32_i64(r1
, t
);
1461 c
.u
.s32
.a
= tcg_temp_new_i32();
1462 c
.u
.s32
.b
= tcg_const_i32(0);
1463 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1464 tcg_temp_free_i64(t
);
1466 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1469 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1471 int r1
= get_field(s
->fields
, r1
);
1472 bool is_imm
= have_field(s
->fields
, i2
);
1473 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1476 c
.cond
= TCG_COND_NE
;
1481 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1482 c
.u
.s64
.a
= regs
[r1
];
1483 c
.u
.s64
.b
= tcg_const_i64(0);
1485 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1488 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1490 int r1
= get_field(s
->fields
, r1
);
1491 int r3
= get_field(s
->fields
, r3
);
1492 bool is_imm
= have_field(s
->fields
, i2
);
1493 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1497 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1502 t
= tcg_temp_new_i64();
1503 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1504 c
.u
.s32
.a
= tcg_temp_new_i32();
1505 c
.u
.s32
.b
= tcg_temp_new_i32();
1506 tcg_gen_trunc_i64_i32(c
.u
.s32
.a
, t
);
1507 tcg_gen_trunc_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1508 store_reg32_i64(r1
, t
);
1509 tcg_temp_free_i64(t
);
1511 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1514 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1516 int r1
= get_field(s
->fields
, r1
);
1517 int r3
= get_field(s
->fields
, r3
);
1518 bool is_imm
= have_field(s
->fields
, i2
);
1519 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1522 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1525 if (r1
== (r3
| 1)) {
1526 c
.u
.s64
.b
= load_reg(r3
| 1);
1529 c
.u
.s64
.b
= regs
[r3
| 1];
1533 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1534 c
.u
.s64
.a
= regs
[r1
];
1537 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1540 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1542 int imm
, m3
= get_field(s
->fields
, m3
);
1546 c
.cond
= ltgt_cond
[m3
];
1547 if (s
->insn
->data
) {
1548 c
.cond
= tcg_unsigned_cond(c
.cond
);
1550 c
.is_64
= c
.g1
= c
.g2
= true;
1554 is_imm
= have_field(s
->fields
, i4
);
1556 imm
= get_field(s
->fields
, i4
);
1559 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1560 get_field(s
->fields
, d4
));
1563 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1566 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1568 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1573 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1575 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1580 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1582 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1587 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1589 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1590 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1591 tcg_temp_free_i32(m3
);
1592 gen_set_cc_nz_f32(s
, o
->in2
);
1596 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1598 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1599 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1600 tcg_temp_free_i32(m3
);
1601 gen_set_cc_nz_f64(s
, o
->in2
);
1605 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1607 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1608 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1609 tcg_temp_free_i32(m3
);
1610 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1614 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1616 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1617 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1618 tcg_temp_free_i32(m3
);
1619 gen_set_cc_nz_f32(s
, o
->in2
);
1623 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1625 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1626 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1627 tcg_temp_free_i32(m3
);
1628 gen_set_cc_nz_f64(s
, o
->in2
);
1632 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1634 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1635 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1636 tcg_temp_free_i32(m3
);
1637 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1641 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1643 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1644 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1645 tcg_temp_free_i32(m3
);
1646 gen_set_cc_nz_f32(s
, o
->in2
);
1650 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1652 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1653 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1654 tcg_temp_free_i32(m3
);
1655 gen_set_cc_nz_f64(s
, o
->in2
);
1659 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1661 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1662 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1663 tcg_temp_free_i32(m3
);
1664 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1668 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1670 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1671 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1672 tcg_temp_free_i32(m3
);
1673 gen_set_cc_nz_f32(s
, o
->in2
);
1677 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1679 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1680 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1681 tcg_temp_free_i32(m3
);
1682 gen_set_cc_nz_f64(s
, o
->in2
);
1686 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1688 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1689 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1690 tcg_temp_free_i32(m3
);
1691 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1695 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1697 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1698 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1699 tcg_temp_free_i32(m3
);
1703 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1705 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1706 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1707 tcg_temp_free_i32(m3
);
1711 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1716 return_low128(o
->out2
);
1720 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1722 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1723 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1724 tcg_temp_free_i32(m3
);
1728 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1730 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1731 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1732 tcg_temp_free_i32(m3
);
1736 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1738 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1739 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1740 tcg_temp_free_i32(m3
);
1741 return_low128(o
->out2
);
1745 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1747 int r2
= get_field(s
->fields
, r2
);
1748 TCGv_i64 len
= tcg_temp_new_i64();
1750 potential_page_fault(s
);
1751 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1753 return_low128(o
->out
);
1755 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1756 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1757 tcg_temp_free_i64(len
);
1762 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1764 int l
= get_field(s
->fields
, l1
);
1769 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1770 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1773 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1774 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1777 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1778 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1781 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1782 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1785 potential_page_fault(s
);
1786 vl
= tcg_const_i32(l
);
1787 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1788 tcg_temp_free_i32(vl
);
1792 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1796 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1798 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1799 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
1800 potential_page_fault(s
);
1801 gen_helper_clcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
1802 tcg_temp_free_i32(r1
);
1803 tcg_temp_free_i32(r3
);
1808 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1810 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1811 TCGv_i32 t1
= tcg_temp_new_i32();
1812 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
1813 potential_page_fault(s
);
1814 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1816 tcg_temp_free_i32(t1
);
1817 tcg_temp_free_i32(m3
);
1821 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1823 potential_page_fault(s
);
1824 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1826 return_low128(o
->in2
);
1830 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1832 TCGv_i64 t
= tcg_temp_new_i64();
1833 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1834 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1835 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1836 tcg_temp_free_i64(t
);
1840 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1842 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1843 int d2
= get_field(s
->fields
, d2
);
1844 int b2
= get_field(s
->fields
, b2
);
1845 int is_64
= s
->insn
->data
;
1846 TCGv_i64 addr
, mem
, cc
, z
;
1848 /* Note that in1 = R3 (new value) and
1849 in2 = (zero-extended) R1 (expected value). */
1851 /* Load the memory into the (temporary) output. While the PoO only talks
1852 about moving the memory to R1 on inequality, if we include equality it
1853 means that R1 is equal to the memory in all conditions. */
1854 addr
= get_address(s
, 0, b2
, d2
);
1856 tcg_gen_qemu_ld64(o
->out
, addr
, get_mem_index(s
));
1858 tcg_gen_qemu_ld32u(o
->out
, addr
, get_mem_index(s
));
1861 /* Are the memory and expected values (un)equal? Note that this setcond
1862 produces the output CC value, thus the NE sense of the test. */
1863 cc
= tcg_temp_new_i64();
1864 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1866 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1867 Recall that we are allowed to unconditionally issue the store (and
1868 thus any possible write trap), so (re-)store the original contents
1869 of MEM in case of inequality. */
1870 z
= tcg_const_i64(0);
1871 mem
= tcg_temp_new_i64();
1872 tcg_gen_movcond_i64(TCG_COND_EQ
, mem
, cc
, z
, o
->in1
, o
->out
);
1874 tcg_gen_qemu_st64(mem
, addr
, get_mem_index(s
));
1876 tcg_gen_qemu_st32(mem
, addr
, get_mem_index(s
));
1878 tcg_temp_free_i64(z
);
1879 tcg_temp_free_i64(mem
);
1880 tcg_temp_free_i64(addr
);
1882 /* Store CC back to cc_op. Wait until after the store so that any
1883 exception gets the old cc_op value. */
1884 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1885 tcg_temp_free_i64(cc
);
1890 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1892 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1893 int r1
= get_field(s
->fields
, r1
);
1894 int r3
= get_field(s
->fields
, r3
);
1895 int d2
= get_field(s
->fields
, d2
);
1896 int b2
= get_field(s
->fields
, b2
);
1897 TCGv_i64 addrh
, addrl
, memh
, meml
, outh
, outl
, cc
, z
;
1899 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1901 addrh
= get_address(s
, 0, b2
, d2
);
1902 addrl
= get_address(s
, 0, b2
, d2
+ 8);
1903 outh
= tcg_temp_new_i64();
1904 outl
= tcg_temp_new_i64();
1906 tcg_gen_qemu_ld64(outh
, addrh
, get_mem_index(s
));
1907 tcg_gen_qemu_ld64(outl
, addrl
, get_mem_index(s
));
1909 /* Fold the double-word compare with arithmetic. */
1910 cc
= tcg_temp_new_i64();
1911 z
= tcg_temp_new_i64();
1912 tcg_gen_xor_i64(cc
, outh
, regs
[r1
]);
1913 tcg_gen_xor_i64(z
, outl
, regs
[r1
+ 1]);
1914 tcg_gen_or_i64(cc
, cc
, z
);
1915 tcg_gen_movi_i64(z
, 0);
1916 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, cc
, z
);
1918 memh
= tcg_temp_new_i64();
1919 meml
= tcg_temp_new_i64();
1920 tcg_gen_movcond_i64(TCG_COND_EQ
, memh
, cc
, z
, regs
[r3
], outh
);
1921 tcg_gen_movcond_i64(TCG_COND_EQ
, meml
, cc
, z
, regs
[r3
+ 1], outl
);
1922 tcg_temp_free_i64(z
);
1924 tcg_gen_qemu_st64(memh
, addrh
, get_mem_index(s
));
1925 tcg_gen_qemu_st64(meml
, addrl
, get_mem_index(s
));
1926 tcg_temp_free_i64(memh
);
1927 tcg_temp_free_i64(meml
);
1928 tcg_temp_free_i64(addrh
);
1929 tcg_temp_free_i64(addrl
);
1931 /* Save back state now that we've passed all exceptions. */
1932 tcg_gen_mov_i64(regs
[r1
], outh
);
1933 tcg_gen_mov_i64(regs
[r1
+ 1], outl
);
1934 tcg_gen_trunc_i64_i32(cc_op
, cc
);
1935 tcg_temp_free_i64(outh
);
1936 tcg_temp_free_i64(outl
);
1937 tcg_temp_free_i64(cc
);
1942 #ifndef CONFIG_USER_ONLY
1943 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1945 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
1946 check_privileged(s
);
1947 gen_helper_csp(cc_op
, cpu_env
, r1
, o
->in2
);
1948 tcg_temp_free_i32(r1
);
1954 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
1956 TCGv_i64 t1
= tcg_temp_new_i64();
1957 TCGv_i32 t2
= tcg_temp_new_i32();
1958 tcg_gen_trunc_i64_i32(t2
, o
->in1
);
1959 gen_helper_cvd(t1
, t2
);
1960 tcg_temp_free_i32(t2
);
1961 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
1962 tcg_temp_free_i64(t1
);
1966 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
1968 int m3
= get_field(s
->fields
, m3
);
1969 TCGLabel
*lab
= gen_new_label();
1973 c
= tcg_invert_cond(ltgt_cond
[m3
]);
1974 if (s
->insn
->data
) {
1975 c
= tcg_unsigned_cond(c
);
1977 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
1979 /* Set DXC to 0xff. */
1980 t
= tcg_temp_new_i32();
1981 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1982 tcg_gen_ori_i32(t
, t
, 0xff00);
1983 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
1984 tcg_temp_free_i32(t
);
1987 gen_program_exception(s
, PGM_DATA
);
1993 #ifndef CONFIG_USER_ONLY
1994 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
1998 check_privileged(s
);
1999 potential_page_fault(s
);
2001 /* We pretend the format is RX_a so that D2 is the field we want. */
2002 tmp
= tcg_const_i32(get_field(s
->fields
, d2
) & 0xfff);
2003 gen_helper_diag(regs
[2], cpu_env
, tmp
, regs
[2], regs
[1]);
2004 tcg_temp_free_i32(tmp
);
2009 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2011 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2012 return_low128(o
->out
);
2016 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2018 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2019 return_low128(o
->out
);
2023 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2025 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2026 return_low128(o
->out
);
2030 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2032 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2033 return_low128(o
->out
);
2037 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2039 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2043 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2045 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2049 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2051 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2052 return_low128(o
->out2
);
2056 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2058 int r2
= get_field(s
->fields
, r2
);
2059 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2063 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2065 /* No cache information provided. */
2066 tcg_gen_movi_i64(o
->out
, -1);
2070 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2072 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2076 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2078 int r1
= get_field(s
->fields
, r1
);
2079 int r2
= get_field(s
->fields
, r2
);
2080 TCGv_i64 t
= tcg_temp_new_i64();
2082 /* Note the "subsequently" in the PoO, which implies a defined result
2083 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2084 tcg_gen_shri_i64(t
, psw_mask
, 32);
2085 store_reg32_i64(r1
, t
);
2087 store_reg32_i64(r2
, psw_mask
);
2090 tcg_temp_free_i64(t
);
2094 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2096 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2097 tb->flags, (ab)use the tb->cs_base field as the address of
2098 the template in memory, and grab 8 bits of tb->flags/cflags for
2099 the contents of the register. We would then recognize all this
2100 in gen_intermediate_code_internal, generating code for exactly
2101 one instruction. This new TB then gets executed normally.
2103 On the other hand, this seems to be mostly used for modifying
2104 MVC inside of memcpy, which needs a helper call anyway. So
2105 perhaps this doesn't bear thinking about any further. */
2112 tmp
= tcg_const_i64(s
->next_pc
);
2113 gen_helper_ex(cc_op
, cpu_env
, cc_op
, o
->in1
, o
->in2
, tmp
);
2114 tcg_temp_free_i64(tmp
);
2119 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2121 /* We'll use the original input for cc computation, since we get to
2122 compare that against 0, which ought to be better than comparing
2123 the real output against 64. It also lets cc_dst be a convenient
2124 temporary during our computation. */
2125 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2127 /* R1 = IN ? CLZ(IN) : 64. */
2128 gen_helper_clz(o
->out
, o
->in2
);
2130 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2131 value by 64, which is undefined. But since the shift is 64 iff the
2132 input is zero, we still get the correct result after and'ing. */
2133 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2134 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2135 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2139 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2141 int m3
= get_field(s
->fields
, m3
);
2142 int pos
, len
, base
= s
->insn
->data
;
2143 TCGv_i64 tmp
= tcg_temp_new_i64();
2148 /* Effectively a 32-bit load. */
2149 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2156 /* Effectively a 16-bit load. */
2157 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2165 /* Effectively an 8-bit load. */
2166 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2171 pos
= base
+ ctz32(m3
) * 8;
2172 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2173 ccm
= ((1ull << len
) - 1) << pos
;
2177 /* This is going to be a sequence of loads and inserts. */
2178 pos
= base
+ 32 - 8;
2182 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2183 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2184 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2187 m3
= (m3
<< 1) & 0xf;
2193 tcg_gen_movi_i64(tmp
, ccm
);
2194 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2195 tcg_temp_free_i64(tmp
);
2199 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2201 int shift
= s
->insn
->data
& 0xff;
2202 int size
= s
->insn
->data
>> 8;
2203 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2207 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2212 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2214 t1
= tcg_temp_new_i64();
2215 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2216 tcg_gen_shri_i64(t1
, t1
, 36);
2217 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2219 tcg_gen_extu_i32_i64(t1
, cc_op
);
2220 tcg_gen_shli_i64(t1
, t1
, 28);
2221 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2222 tcg_temp_free_i64(t1
);
2226 #ifndef CONFIG_USER_ONLY
2227 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2229 check_privileged(s
);
2230 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
);
2234 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2236 check_privileged(s
);
2237 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2242 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2244 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2248 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2250 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2254 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2256 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2260 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2262 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2266 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2268 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2269 return_low128(o
->out2
);
2273 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2275 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2276 return_low128(o
->out2
);
2280 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2282 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2286 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2288 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2292 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2294 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2298 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2300 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2304 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2306 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2310 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2312 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2316 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2318 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2322 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2324 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2328 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2332 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2335 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2339 TCGv_i32 t32
= tcg_temp_new_i32();
2342 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2345 t
= tcg_temp_new_i64();
2346 tcg_gen_extu_i32_i64(t
, t32
);
2347 tcg_temp_free_i32(t32
);
2349 z
= tcg_const_i64(0);
2350 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2351 tcg_temp_free_i64(t
);
2352 tcg_temp_free_i64(z
);
2358 #ifndef CONFIG_USER_ONLY
2359 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2361 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2362 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2363 check_privileged(s
);
2364 potential_page_fault(s
);
2365 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2366 tcg_temp_free_i32(r1
);
2367 tcg_temp_free_i32(r3
);
2371 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2373 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2374 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2375 check_privileged(s
);
2376 potential_page_fault(s
);
2377 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2378 tcg_temp_free_i32(r1
);
2379 tcg_temp_free_i32(r3
);
2382 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2384 check_privileged(s
);
2385 potential_page_fault(s
);
2386 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2391 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2395 check_privileged(s
);
2397 t1
= tcg_temp_new_i64();
2398 t2
= tcg_temp_new_i64();
2399 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2400 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2401 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2402 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2403 tcg_gen_shli_i64(t1
, t1
, 32);
2404 gen_helper_load_psw(cpu_env
, t1
, t2
);
2405 tcg_temp_free_i64(t1
);
2406 tcg_temp_free_i64(t2
);
2407 return EXIT_NORETURN
;
2410 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2414 check_privileged(s
);
2416 t1
= tcg_temp_new_i64();
2417 t2
= tcg_temp_new_i64();
2418 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2419 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2420 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2421 gen_helper_load_psw(cpu_env
, t1
, t2
);
2422 tcg_temp_free_i64(t1
);
2423 tcg_temp_free_i64(t2
);
2424 return EXIT_NORETURN
;
2428 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2430 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2431 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2432 potential_page_fault(s
);
2433 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2434 tcg_temp_free_i32(r1
);
2435 tcg_temp_free_i32(r3
);
2439 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2441 int r1
= get_field(s
->fields
, r1
);
2442 int r3
= get_field(s
->fields
, r3
);
2443 TCGv_i64 t
= tcg_temp_new_i64();
2444 TCGv_i64 t4
= tcg_const_i64(4);
2447 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2448 store_reg32_i64(r1
, t
);
2452 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2456 tcg_temp_free_i64(t
);
2457 tcg_temp_free_i64(t4
);
2461 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2463 int r1
= get_field(s
->fields
, r1
);
2464 int r3
= get_field(s
->fields
, r3
);
2465 TCGv_i64 t
= tcg_temp_new_i64();
2466 TCGv_i64 t4
= tcg_const_i64(4);
2469 tcg_gen_qemu_ld32u(t
, o
->in2
, get_mem_index(s
));
2470 store_reg32h_i64(r1
, t
);
2474 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
2478 tcg_temp_free_i64(t
);
2479 tcg_temp_free_i64(t4
);
2483 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2485 int r1
= get_field(s
->fields
, r1
);
2486 int r3
= get_field(s
->fields
, r3
);
2487 TCGv_i64 t8
= tcg_const_i64(8);
2490 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2494 tcg_gen_add_i64(o
->in2
, o
->in2
, t8
);
2498 tcg_temp_free_i64(t8
);
2502 #ifndef CONFIG_USER_ONLY
2503 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2505 check_privileged(s
);
2506 potential_page_fault(s
);
2507 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2511 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2513 check_privileged(s
);
2514 potential_page_fault(s
);
2515 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2520 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2523 o
->g_out
= o
->g_in2
;
2524 TCGV_UNUSED_I64(o
->in2
);
2529 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2533 o
->g_out
= o
->g_in1
;
2534 o
->g_out2
= o
->g_in2
;
2535 TCGV_UNUSED_I64(o
->in1
);
2536 TCGV_UNUSED_I64(o
->in2
);
2537 o
->g_in1
= o
->g_in2
= false;
2541 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2543 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2544 potential_page_fault(s
);
2545 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2546 tcg_temp_free_i32(l
);
2550 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
2552 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2553 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
2554 potential_page_fault(s
);
2555 gen_helper_mvcl(cc_op
, cpu_env
, r1
, r2
);
2556 tcg_temp_free_i32(r1
);
2557 tcg_temp_free_i32(r2
);
2562 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
2564 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2565 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2566 potential_page_fault(s
);
2567 gen_helper_mvcle(cc_op
, cpu_env
, r1
, o
->in2
, r3
);
2568 tcg_temp_free_i32(r1
);
2569 tcg_temp_free_i32(r3
);
2574 #ifndef CONFIG_USER_ONLY
2575 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
2577 int r1
= get_field(s
->fields
, l1
);
2578 check_privileged(s
);
2579 potential_page_fault(s
);
2580 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2585 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
2587 int r1
= get_field(s
->fields
, l1
);
2588 check_privileged(s
);
2589 potential_page_fault(s
);
2590 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
2596 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
2598 potential_page_fault(s
);
2599 gen_helper_mvpg(cpu_env
, regs
[0], o
->in1
, o
->in2
);
2604 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
2606 potential_page_fault(s
);
2607 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
2609 return_low128(o
->in2
);
2613 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
2615 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
2619 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
2621 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
2625 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
2627 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2631 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
2633 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2637 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
2639 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2643 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
2645 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2646 return_low128(o
->out2
);
2650 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
2652 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2653 return_low128(o
->out2
);
2657 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
2659 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2660 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2661 tcg_temp_free_i64(r3
);
2665 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
2667 int r3
= get_field(s
->fields
, r3
);
2668 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2672 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
2674 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
2675 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
2676 tcg_temp_free_i64(r3
);
2680 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
2682 int r3
= get_field(s
->fields
, r3
);
2683 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
2687 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
2690 z
= tcg_const_i64(0);
2691 n
= tcg_temp_new_i64();
2692 tcg_gen_neg_i64(n
, o
->in2
);
2693 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
2694 tcg_temp_free_i64(n
);
2695 tcg_temp_free_i64(z
);
2699 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
2701 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2705 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
2707 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2711 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
2713 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2714 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2718 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
2720 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2721 potential_page_fault(s
);
2722 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2723 tcg_temp_free_i32(l
);
2728 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
2730 tcg_gen_neg_i64(o
->out
, o
->in2
);
2734 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
2736 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
2740 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
2742 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
2746 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
2748 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
2749 tcg_gen_mov_i64(o
->out2
, o
->in2
);
2753 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
2755 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2756 potential_page_fault(s
);
2757 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
2758 tcg_temp_free_i32(l
);
2763 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
2765 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2769 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
2771 int shift
= s
->insn
->data
& 0xff;
2772 int size
= s
->insn
->data
>> 8;
2773 uint64_t mask
= ((1ull << size
) - 1) << shift
;
2776 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
2777 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2779 /* Produce the CC from only the bits manipulated. */
2780 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2781 set_cc_nz_u64(s
, cc_dst
);
2785 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
2787 gen_helper_popcnt(o
->out
, o
->in2
);
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
2794 check_privileged(s
);
2795 gen_helper_ptlb(cpu_env
);
2800 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
2802 int i3
= get_field(s
->fields
, i3
);
2803 int i4
= get_field(s
->fields
, i4
);
2804 int i5
= get_field(s
->fields
, i5
);
2805 int do_zero
= i4
& 0x80;
2806 uint64_t mask
, imask
, pmask
;
2809 /* Adjust the arguments for the specific insn. */
2810 switch (s
->fields
->op2
) {
2811 case 0x55: /* risbg */
2816 case 0x5d: /* risbhg */
2819 pmask
= 0xffffffff00000000ull
;
2821 case 0x51: /* risblg */
2824 pmask
= 0x00000000ffffffffull
;
2830 /* MASK is the set of bits to be inserted from R2.
2831 Take care for I3/I4 wraparound. */
2834 mask
^= pmask
>> i4
>> 1;
2836 mask
|= ~(pmask
>> i4
>> 1);
2840 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2841 insns, we need to keep the other half of the register. */
2842 imask
= ~mask
| ~pmask
;
2844 if (s
->fields
->op2
== 0x55) {
2851 /* In some cases we can implement this with deposit, which can be more
2852 efficient on some hosts. */
2853 if (~mask
== imask
&& i3
<= i4
) {
2854 if (s
->fields
->op2
== 0x5d) {
2857 /* Note that we rotate the bits to be inserted to the lsb, not to
2858 the position as described in the PoO. */
2861 rot
= (i5
- pos
) & 63;
2867 /* Rotate the input as necessary. */
2868 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
2870 /* Insert the selected bits into the output. */
2872 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
2873 } else if (imask
== 0) {
2874 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
2876 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2877 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
2878 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2883 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
2885 int i3
= get_field(s
->fields
, i3
);
2886 int i4
= get_field(s
->fields
, i4
);
2887 int i5
= get_field(s
->fields
, i5
);
2890 /* If this is a test-only form, arrange to discard the result. */
2892 o
->out
= tcg_temp_new_i64();
2900 /* MASK is the set of bits to be operated on from R2.
2901 Take care for I3/I4 wraparound. */
2904 mask
^= ~0ull >> i4
>> 1;
2906 mask
|= ~(~0ull >> i4
>> 1);
2909 /* Rotate the input as necessary. */
2910 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
2913 switch (s
->fields
->op2
) {
2914 case 0x55: /* AND */
2915 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
2916 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
2919 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2920 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
2922 case 0x57: /* XOR */
2923 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
2924 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
2931 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
2932 set_cc_nz_u64(s
, cc_dst
);
2936 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
2938 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
2942 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
2944 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
2948 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
2950 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
2954 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
2956 TCGv_i32 t1
= tcg_temp_new_i32();
2957 TCGv_i32 t2
= tcg_temp_new_i32();
2958 TCGv_i32 to
= tcg_temp_new_i32();
2959 tcg_gen_trunc_i64_i32(t1
, o
->in1
);
2960 tcg_gen_trunc_i64_i32(t2
, o
->in2
);
2961 tcg_gen_rotl_i32(to
, t1
, t2
);
2962 tcg_gen_extu_i32_i64(o
->out
, to
);
2963 tcg_temp_free_i32(t1
);
2964 tcg_temp_free_i32(t2
);
2965 tcg_temp_free_i32(to
);
2969 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
2971 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
2975 #ifndef CONFIG_USER_ONLY
2976 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
2978 check_privileged(s
);
2979 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
2984 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
2986 check_privileged(s
);
2987 gen_helper_sacf(cpu_env
, o
->in2
);
2988 /* Addressing mode has changed, so end the block. */
2989 return EXIT_PC_STALE
;
2993 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
2995 int sam
= s
->insn
->data
;
3011 /* Bizarre but true, we check the address of the current insn for the
3012 specification exception, not the next to be executed. Thus the PoO
3013 documents that Bad Things Happen two bytes before the end. */
3014 if (s
->pc
& ~mask
) {
3015 gen_program_exception(s
, PGM_SPECIFICATION
);
3016 return EXIT_NORETURN
;
3020 tsam
= tcg_const_i64(sam
);
3021 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3022 tcg_temp_free_i64(tsam
);
3024 /* Always exit the TB, since we (may have) changed execution mode. */
3025 return EXIT_PC_STALE
;
3028 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3030 int r1
= get_field(s
->fields
, r1
);
3031 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3035 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3037 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3041 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3043 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3047 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3049 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3050 return_low128(o
->out2
);
3054 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3056 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3060 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3062 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3066 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3068 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3069 return_low128(o
->out2
);
3073 #ifndef CONFIG_USER_ONLY
3074 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3076 check_privileged(s
);
3077 potential_page_fault(s
);
3078 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3083 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3085 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3086 check_privileged(s
);
3087 potential_page_fault(s
);
3088 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3089 tcg_temp_free_i32(r1
);
3094 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3101 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3103 /* We want to store when the condition is fulfilled, so branch
3104 out when it's not */
3105 c
.cond
= tcg_invert_cond(c
.cond
);
3107 lab
= gen_new_label();
3109 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3111 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3115 r1
= get_field(s
->fields
, r1
);
3116 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3117 if (s
->insn
->data
) {
3118 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3120 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3122 tcg_temp_free_i64(a
);
3128 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3130 uint64_t sign
= 1ull << s
->insn
->data
;
3131 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3132 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3133 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3134 /* The arithmetic left shift is curious in that it does not affect
3135 the sign bit. Copy that over from the source unchanged. */
3136 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3137 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3138 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3142 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3144 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3148 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3150 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3154 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3156 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3160 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3162 gen_helper_sfpc(cpu_env
, o
->in2
);
3166 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3168 gen_helper_sfas(cpu_env
, o
->in2
);
3172 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3174 int b2
= get_field(s
->fields
, b2
);
3175 int d2
= get_field(s
->fields
, d2
);
3176 TCGv_i64 t1
= tcg_temp_new_i64();
3177 TCGv_i64 t2
= tcg_temp_new_i64();
3180 switch (s
->fields
->op2
) {
3181 case 0x99: /* SRNM */
3184 case 0xb8: /* SRNMB */
3187 case 0xb9: /* SRNMT */
3193 mask
= (1 << len
) - 1;
3195 /* Insert the value into the appropriate field of the FPC. */
3197 tcg_gen_movi_i64(t1
, d2
& mask
);
3199 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3200 tcg_gen_andi_i64(t1
, t1
, mask
);
3202 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3203 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3204 tcg_temp_free_i64(t1
);
3206 /* Then install the new FPC to set the rounding mode in fpu_status. */
3207 gen_helper_sfpc(cpu_env
, t2
);
3208 tcg_temp_free_i64(t2
);
3212 #ifndef CONFIG_USER_ONLY
3213 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3215 check_privileged(s
);
3216 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3217 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
- 4, 4);
3221 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3223 check_privileged(s
);
3224 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3228 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3230 check_privileged(s
);
3231 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3235 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3237 check_privileged(s
);
3238 /* ??? Surely cpu address != cpu number. In any case the previous
3239 version of this stored more than the required half-word, so it
3240 is unlikely this has ever been tested. */
3241 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3245 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3247 gen_helper_stck(o
->out
, cpu_env
);
3248 /* ??? We don't implement clock states. */
3249 gen_op_movi_cc(s
, 0);
3253 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3255 TCGv_i64 c1
= tcg_temp_new_i64();
3256 TCGv_i64 c2
= tcg_temp_new_i64();
3257 gen_helper_stck(c1
, cpu_env
);
3258 /* Shift the 64-bit value into its place as a zero-extended
3259 104-bit value. Note that "bit positions 64-103 are always
3260 non-zero so that they compare differently to STCK"; we set
3261 the least significant bit to 1. */
3262 tcg_gen_shli_i64(c2
, c1
, 56);
3263 tcg_gen_shri_i64(c1
, c1
, 8);
3264 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3265 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3266 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3267 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3268 tcg_temp_free_i64(c1
);
3269 tcg_temp_free_i64(c2
);
3270 /* ??? We don't implement clock states. */
3271 gen_op_movi_cc(s
, 0);
3275 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3277 check_privileged(s
);
3278 gen_helper_sckc(cpu_env
, o
->in2
);
3282 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3284 check_privileged(s
);
3285 gen_helper_stckc(o
->out
, cpu_env
);
3289 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3291 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3292 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3293 check_privileged(s
);
3294 potential_page_fault(s
);
3295 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3296 tcg_temp_free_i32(r1
);
3297 tcg_temp_free_i32(r3
);
3301 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3303 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3304 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3305 check_privileged(s
);
3306 potential_page_fault(s
);
3307 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3308 tcg_temp_free_i32(r1
);
3309 tcg_temp_free_i32(r3
);
3313 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3315 TCGv_i64 t1
= tcg_temp_new_i64();
3317 check_privileged(s
);
3318 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3319 tcg_gen_ld32u_i64(t1
, cpu_env
, offsetof(CPUS390XState
, machine_type
));
3320 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 32, 32);
3321 tcg_temp_free_i64(t1
);
3326 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3328 check_privileged(s
);
3329 gen_helper_spt(cpu_env
, o
->in2
);
3333 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3336 /* We really ought to have more complete indication of facilities
3337 that we implement. Address this when STFLE is implemented. */
3338 check_privileged(s
);
3339 f
= tcg_const_i64(0xc0000000);
3340 a
= tcg_const_i64(200);
3341 tcg_gen_qemu_st32(f
, a
, get_mem_index(s
));
3342 tcg_temp_free_i64(f
);
3343 tcg_temp_free_i64(a
);
3347 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3349 check_privileged(s
);
3350 gen_helper_stpt(o
->out
, cpu_env
);
3354 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3356 check_privileged(s
);
3357 potential_page_fault(s
);
3358 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3363 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3365 check_privileged(s
);
3366 gen_helper_spx(cpu_env
, o
->in2
);
3370 static ExitStatus
op_subchannel(DisasContext
*s
, DisasOps
*o
)
3372 check_privileged(s
);
3373 /* Not operational. */
3374 gen_op_movi_cc(s
, 3);
3378 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
3380 check_privileged(s
);
3381 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
3382 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
3386 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
3388 uint64_t i2
= get_field(s
->fields
, i2
);
3391 check_privileged(s
);
3393 /* It is important to do what the instruction name says: STORE THEN.
3394 If we let the output hook perform the store then if we fault and
3395 restart, we'll have the wrong SYSTEM MASK in place. */
3396 t
= tcg_temp_new_i64();
3397 tcg_gen_shri_i64(t
, psw_mask
, 56);
3398 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
3399 tcg_temp_free_i64(t
);
3401 if (s
->fields
->op
== 0xac) {
3402 tcg_gen_andi_i64(psw_mask
, psw_mask
,
3403 (i2
<< 56) | 0x00ffffffffffffffull
);
3405 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
3410 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
3412 check_privileged(s
);
3413 potential_page_fault(s
);
3414 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
3418 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
3420 check_privileged(s
);
3421 potential_page_fault(s
);
3422 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
3427 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
3429 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
3433 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
3435 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
3439 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
3441 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
3445 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
3447 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
3451 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
3453 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3454 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3455 potential_page_fault(s
);
3456 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
3457 tcg_temp_free_i32(r1
);
3458 tcg_temp_free_i32(r3
);
3462 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
3464 int m3
= get_field(s
->fields
, m3
);
3465 int pos
, base
= s
->insn
->data
;
3466 TCGv_i64 tmp
= tcg_temp_new_i64();
3468 pos
= base
+ ctz32(m3
) * 8;
3471 /* Effectively a 32-bit store. */
3472 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3473 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
3479 /* Effectively a 16-bit store. */
3480 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3481 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
3488 /* Effectively an 8-bit store. */
3489 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3490 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3494 /* This is going to be a sequence of shifts and stores. */
3495 pos
= base
+ 32 - 8;
3498 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
3499 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
3500 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
3502 m3
= (m3
<< 1) & 0xf;
3507 tcg_temp_free_i64(tmp
);
3511 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
3513 int r1
= get_field(s
->fields
, r1
);
3514 int r3
= get_field(s
->fields
, r3
);
3515 int size
= s
->insn
->data
;
3516 TCGv_i64 tsize
= tcg_const_i64(size
);
3520 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
3522 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
3527 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
3531 tcg_temp_free_i64(tsize
);
3535 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
3537 int r1
= get_field(s
->fields
, r1
);
3538 int r3
= get_field(s
->fields
, r3
);
3539 TCGv_i64 t
= tcg_temp_new_i64();
3540 TCGv_i64 t4
= tcg_const_i64(4);
3541 TCGv_i64 t32
= tcg_const_i64(32);
3544 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
3545 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
3549 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
3553 tcg_temp_free_i64(t
);
3554 tcg_temp_free_i64(t4
);
3555 tcg_temp_free_i64(t32
);
3559 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
3561 potential_page_fault(s
);
3562 gen_helper_srst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3564 return_low128(o
->in2
);
3568 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
3570 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3574 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
3579 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
3581 /* The !borrow flag is the msb of CC. Since we want the inverse of
3582 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3583 disas_jcc(s
, &cmp
, 8 | 4);
3584 borrow
= tcg_temp_new_i64();
3586 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
3588 TCGv_i32 t
= tcg_temp_new_i32();
3589 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
3590 tcg_gen_extu_i32_i64(borrow
, t
);
3591 tcg_temp_free_i32(t
);
3595 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
3596 tcg_temp_free_i64(borrow
);
3600 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
3607 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
3608 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
3609 tcg_temp_free_i32(t
);
3611 t
= tcg_const_i32(s
->next_pc
- s
->pc
);
3612 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
3613 tcg_temp_free_i32(t
);
3615 gen_exception(EXCP_SVC
);
3616 return EXIT_NORETURN
;
3619 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
3621 gen_helper_tceb(cc_op
, o
->in1
, o
->in2
);
3626 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
3628 gen_helper_tcdb(cc_op
, o
->in1
, o
->in2
);
3633 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
3635 gen_helper_tcxb(cc_op
, o
->out
, o
->out2
, o
->in2
);
3640 #ifndef CONFIG_USER_ONLY
3641 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
3643 potential_page_fault(s
);
3644 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
3650 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
3652 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3653 potential_page_fault(s
);
3654 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
3655 tcg_temp_free_i32(l
);
3660 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
3662 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3663 potential_page_fault(s
);
3664 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
3665 tcg_temp_free_i32(l
);
3669 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
3671 int d1
= get_field(s
->fields
, d1
);
3672 int d2
= get_field(s
->fields
, d2
);
3673 int b1
= get_field(s
->fields
, b1
);
3674 int b2
= get_field(s
->fields
, b2
);
3675 int l
= get_field(s
->fields
, l1
);
3678 o
->addr1
= get_address(s
, 0, b1
, d1
);
3680 /* If the addresses are identical, this is a store/memset of zero. */
3681 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
3682 o
->in2
= tcg_const_i64(0);
3686 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
3689 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
3693 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
3696 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
3700 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
3703 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
3707 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
3709 gen_op_movi_cc(s
, 0);
3713 /* But in general we'll defer to a helper. */
3714 o
->in2
= get_address(s
, 0, b2
, d2
);
3715 t32
= tcg_const_i32(l
);
3716 potential_page_fault(s
);
3717 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
3718 tcg_temp_free_i32(t32
);
3723 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
3725 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3729 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
3731 int shift
= s
->insn
->data
& 0xff;
3732 int size
= s
->insn
->data
>> 8;
3733 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3736 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3737 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
3739 /* Produce the CC from only the bits manipulated. */
3740 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3741 set_cc_nz_u64(s
, cc_dst
);
3745 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
3747 o
->out
= tcg_const_i64(0);
3751 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
3753 o
->out
= tcg_const_i64(0);
3759 /* ====================================================================== */
3760 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3761 the original inputs), update the various cc data structures in order to
3762 be able to compute the new condition code. */
3764 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
3766 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
3769 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
3771 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
3774 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
3776 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
3779 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
3781 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
3784 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
3786 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
3789 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
3791 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
3794 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
3796 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
3799 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
3801 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
3804 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
3806 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
3809 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
3811 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
3814 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
3816 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
3819 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
3821 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
3824 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
3826 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
3829 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
3831 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
3834 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
3836 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
3839 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
3841 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
3844 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
3846 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
3849 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
3851 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
3854 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
3856 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
3859 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
3861 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
3862 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
3865 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
3867 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
3870 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
3872 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
3875 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
3877 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
3880 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
3882 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
3885 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
3887 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
3890 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
3892 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
3895 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
3897 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
3900 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
3902 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
3905 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
3907 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
3910 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
3912 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
3915 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
3917 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
3920 /* ====================================================================== */
3921 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3922 with the TCG register to which we will write. Used in combination with
3923 the "wout" generators, in some cases we need a new temporary, and in
3924 some cases we can write to a TCG global. */
3926 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3928 o
->out
= tcg_temp_new_i64();
3930 #define SPEC_prep_new 0
3932 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3934 o
->out
= tcg_temp_new_i64();
3935 o
->out2
= tcg_temp_new_i64();
3937 #define SPEC_prep_new_P 0
3939 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3941 o
->out
= regs
[get_field(f
, r1
)];
3944 #define SPEC_prep_r1 0
3946 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3948 int r1
= get_field(f
, r1
);
3950 o
->out2
= regs
[r1
+ 1];
3951 o
->g_out
= o
->g_out2
= true;
3953 #define SPEC_prep_r1_P SPEC_r1_even
3955 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3957 o
->out
= fregs
[get_field(f
, r1
)];
3960 #define SPEC_prep_f1 0
3962 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3964 int r1
= get_field(f
, r1
);
3966 o
->out2
= fregs
[r1
+ 2];
3967 o
->g_out
= o
->g_out2
= true;
3969 #define SPEC_prep_x1 SPEC_r1_f128
3971 /* ====================================================================== */
3972 /* The "Write OUTput" generators. These generally perform some non-trivial
3973 copy of data to TCG globals, or to main memory. The trivial cases are
3974 generally handled by having a "prep" generator install the TCG global
3975 as the destination of the operation. */
3977 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3979 store_reg(get_field(f
, r1
), o
->out
);
3981 #define SPEC_wout_r1 0
3983 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3985 int r1
= get_field(f
, r1
);
3986 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
3988 #define SPEC_wout_r1_8 0
3990 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3992 int r1
= get_field(f
, r1
);
3993 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
3995 #define SPEC_wout_r1_16 0
3997 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
3999 store_reg32_i64(get_field(f
, r1
), o
->out
);
4001 #define SPEC_wout_r1_32 0
4003 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4005 int r1
= get_field(f
, r1
);
4006 store_reg32_i64(r1
, o
->out
);
4007 store_reg32_i64(r1
+ 1, o
->out2
);
4009 #define SPEC_wout_r1_P32 SPEC_r1_even
4011 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4013 int r1
= get_field(f
, r1
);
4014 store_reg32_i64(r1
+ 1, o
->out
);
4015 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4016 store_reg32_i64(r1
, o
->out
);
4018 #define SPEC_wout_r1_D32 SPEC_r1_even
4020 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4022 store_freg32_i64(get_field(f
, r1
), o
->out
);
4024 #define SPEC_wout_e1 0
4026 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4028 store_freg(get_field(f
, r1
), o
->out
);
4030 #define SPEC_wout_f1 0
4032 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4034 int f1
= get_field(s
->fields
, r1
);
4035 store_freg(f1
, o
->out
);
4036 store_freg(f1
+ 2, o
->out2
);
4038 #define SPEC_wout_x1 SPEC_r1_f128
4040 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4042 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4043 store_reg32_i64(get_field(f
, r1
), o
->out
);
4046 #define SPEC_wout_cond_r1r2_32 0
4048 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4050 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4051 store_freg32_i64(get_field(f
, r1
), o
->out
);
4054 #define SPEC_wout_cond_e1e2 0
4056 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4058 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4060 #define SPEC_wout_m1_8 0
4062 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4064 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4066 #define SPEC_wout_m1_16 0
4068 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4070 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4072 #define SPEC_wout_m1_32 0
4074 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4076 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4078 #define SPEC_wout_m1_64 0
4080 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4082 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4084 #define SPEC_wout_m2_32 0
4086 static void wout_m2_32_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4088 /* XXX release reservation */
4089 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4090 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4092 #define SPEC_wout_m2_32_r1_atomic 0
4094 static void wout_m2_64_r1_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4096 /* XXX release reservation */
4097 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4098 store_reg(get_field(f
, r1
), o
->in2
);
4100 #define SPEC_wout_m2_64_r1_atomic 0
4102 /* ====================================================================== */
4103 /* The "INput 1" generators. These load the first operand to an insn. */
4105 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4107 o
->in1
= load_reg(get_field(f
, r1
));
4109 #define SPEC_in1_r1 0
4111 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4113 o
->in1
= regs
[get_field(f
, r1
)];
4116 #define SPEC_in1_r1_o 0
4118 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4120 o
->in1
= tcg_temp_new_i64();
4121 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4123 #define SPEC_in1_r1_32s 0
4125 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4127 o
->in1
= tcg_temp_new_i64();
4128 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4130 #define SPEC_in1_r1_32u 0
4132 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4134 o
->in1
= tcg_temp_new_i64();
4135 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4137 #define SPEC_in1_r1_sr32 0
4139 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4141 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4143 #define SPEC_in1_r1p1 SPEC_r1_even
4145 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4147 o
->in1
= tcg_temp_new_i64();
4148 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4150 #define SPEC_in1_r1p1_32s SPEC_r1_even
4152 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4154 o
->in1
= tcg_temp_new_i64();
4155 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4157 #define SPEC_in1_r1p1_32u SPEC_r1_even
4159 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4161 int r1
= get_field(f
, r1
);
4162 o
->in1
= tcg_temp_new_i64();
4163 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4165 #define SPEC_in1_r1_D32 SPEC_r1_even
4167 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4169 o
->in1
= load_reg(get_field(f
, r2
));
4171 #define SPEC_in1_r2 0
4173 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4175 o
->in1
= load_reg(get_field(f
, r3
));
4177 #define SPEC_in1_r3 0
4179 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4181 o
->in1
= regs
[get_field(f
, r3
)];
4184 #define SPEC_in1_r3_o 0
4186 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4188 o
->in1
= tcg_temp_new_i64();
4189 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4191 #define SPEC_in1_r3_32s 0
4193 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4195 o
->in1
= tcg_temp_new_i64();
4196 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
4198 #define SPEC_in1_r3_32u 0
4200 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4202 int r3
= get_field(f
, r3
);
4203 o
->in1
= tcg_temp_new_i64();
4204 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
4206 #define SPEC_in1_r3_D32 SPEC_r3_even
4208 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4210 o
->in1
= load_freg32_i64(get_field(f
, r1
));
4212 #define SPEC_in1_e1 0
4214 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4216 o
->in1
= fregs
[get_field(f
, r1
)];
4219 #define SPEC_in1_f1_o 0
4221 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4223 int r1
= get_field(f
, r1
);
4225 o
->out2
= fregs
[r1
+ 2];
4226 o
->g_out
= o
->g_out2
= true;
4228 #define SPEC_in1_x1_o SPEC_r1_f128
4230 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4232 o
->in1
= fregs
[get_field(f
, r3
)];
4235 #define SPEC_in1_f3_o 0
4237 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4239 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
4241 #define SPEC_in1_la1 0
4243 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4245 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4246 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4248 #define SPEC_in1_la2 0
4250 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4253 o
->in1
= tcg_temp_new_i64();
4254 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
4256 #define SPEC_in1_m1_8u 0
4258 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4261 o
->in1
= tcg_temp_new_i64();
4262 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
4264 #define SPEC_in1_m1_16s 0
4266 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4269 o
->in1
= tcg_temp_new_i64();
4270 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
4272 #define SPEC_in1_m1_16u 0
4274 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4277 o
->in1
= tcg_temp_new_i64();
4278 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
4280 #define SPEC_in1_m1_32s 0
4282 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4285 o
->in1
= tcg_temp_new_i64();
4286 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
4288 #define SPEC_in1_m1_32u 0
4290 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4293 o
->in1
= tcg_temp_new_i64();
4294 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
4296 #define SPEC_in1_m1_64 0
4298 /* ====================================================================== */
4299 /* The "INput 2" generators. These load the second operand to an insn. */
4301 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4303 o
->in2
= regs
[get_field(f
, r1
)];
4306 #define SPEC_in2_r1_o 0
4308 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4310 o
->in2
= tcg_temp_new_i64();
4311 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4313 #define SPEC_in2_r1_16u 0
4315 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4317 o
->in2
= tcg_temp_new_i64();
4318 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
4320 #define SPEC_in2_r1_32u 0
4322 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4324 int r1
= get_field(f
, r1
);
4325 o
->in2
= tcg_temp_new_i64();
4326 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
4328 #define SPEC_in2_r1_D32 SPEC_r1_even
4330 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4332 o
->in2
= load_reg(get_field(f
, r2
));
4334 #define SPEC_in2_r2 0
4336 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4338 o
->in2
= regs
[get_field(f
, r2
)];
4341 #define SPEC_in2_r2_o 0
4343 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4345 int r2
= get_field(f
, r2
);
4347 o
->in2
= load_reg(r2
);
4350 #define SPEC_in2_r2_nz 0
4352 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4354 o
->in2
= tcg_temp_new_i64();
4355 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4357 #define SPEC_in2_r2_8s 0
4359 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4361 o
->in2
= tcg_temp_new_i64();
4362 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4364 #define SPEC_in2_r2_8u 0
4366 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4368 o
->in2
= tcg_temp_new_i64();
4369 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4371 #define SPEC_in2_r2_16s 0
4373 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4375 o
->in2
= tcg_temp_new_i64();
4376 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4378 #define SPEC_in2_r2_16u 0
4380 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4382 o
->in2
= load_reg(get_field(f
, r3
));
4384 #define SPEC_in2_r3 0
4386 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4388 o
->in2
= tcg_temp_new_i64();
4389 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4391 #define SPEC_in2_r2_32s 0
4393 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4395 o
->in2
= tcg_temp_new_i64();
4396 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
4398 #define SPEC_in2_r2_32u 0
4400 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4402 o
->in2
= load_freg32_i64(get_field(f
, r2
));
4404 #define SPEC_in2_e2 0
4406 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4408 o
->in2
= fregs
[get_field(f
, r2
)];
4411 #define SPEC_in2_f2_o 0
4413 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4415 int r2
= get_field(f
, r2
);
4417 o
->in2
= fregs
[r2
+ 2];
4418 o
->g_in1
= o
->g_in2
= true;
4420 #define SPEC_in2_x2_o SPEC_r2_f128
4422 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4424 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
4426 #define SPEC_in2_ra2 0
4428 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4430 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
4431 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
4433 #define SPEC_in2_a2 0
4435 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4437 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
4439 #define SPEC_in2_ri2 0
4441 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4443 help_l2_shift(s
, f
, o
, 31);
4445 #define SPEC_in2_sh32 0
4447 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4449 help_l2_shift(s
, f
, o
, 63);
4451 #define SPEC_in2_sh64 0
4453 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4456 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
4458 #define SPEC_in2_m2_8u 0
4460 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4463 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
4465 #define SPEC_in2_m2_16s 0
4467 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4470 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4472 #define SPEC_in2_m2_16u 0
4474 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4477 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4479 #define SPEC_in2_m2_32s 0
4481 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4484 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4486 #define SPEC_in2_m2_32u 0
4488 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4491 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4493 #define SPEC_in2_m2_64 0
4495 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4498 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
4500 #define SPEC_in2_mri2_16u 0
4502 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4505 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
4507 #define SPEC_in2_mri2_32s 0
4509 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4512 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
4514 #define SPEC_in2_mri2_32u 0
4516 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4519 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
4521 #define SPEC_in2_mri2_64 0
4523 static void in2_m2_32s_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4525 /* XXX should reserve the address */
4527 o
->in2
= tcg_temp_new_i64();
4528 tcg_gen_qemu_ld32s(o
->in2
, o
->addr1
, get_mem_index(s
));
4530 #define SPEC_in2_m2_32s_atomic 0
4532 static void in2_m2_64_atomic(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4534 /* XXX should reserve the address */
4536 o
->in2
= tcg_temp_new_i64();
4537 tcg_gen_qemu_ld64(o
->in2
, o
->addr1
, get_mem_index(s
));
4539 #define SPEC_in2_m2_64_atomic 0
4541 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4543 o
->in2
= tcg_const_i64(get_field(f
, i2
));
4545 #define SPEC_in2_i2 0
4547 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4549 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
4551 #define SPEC_in2_i2_8u 0
4553 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4555 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
4557 #define SPEC_in2_i2_16u 0
4559 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4561 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
4563 #define SPEC_in2_i2_32u 0
4565 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4567 uint64_t i2
= (uint16_t)get_field(f
, i2
);
4568 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4570 #define SPEC_in2_i2_16u_shl 0
4572 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4574 uint64_t i2
= (uint32_t)get_field(f
, i2
);
4575 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
4577 #define SPEC_in2_i2_32u_shl 0
4579 /* ====================================================================== */
4581 /* Find opc within the table of insns. This is formulated as a switch
4582 statement so that (1) we get compile-time notice of cut-paste errors
4583 for duplicated opcodes, and (2) the compiler generates the binary
4584 search tree, rather than us having to post-process the table. */
4586 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4587 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4589 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4591 enum DisasInsnEnum
{
4592 #include "insn-data.def"
4596 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4600 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4602 .help_in1 = in1_##I1, \
4603 .help_in2 = in2_##I2, \
4604 .help_prep = prep_##P, \
4605 .help_wout = wout_##W, \
4606 .help_cout = cout_##CC, \
4607 .help_op = op_##OP, \
4611 /* Allow 0 to be used for NULL in the table below. */
4619 #define SPEC_in1_0 0
4620 #define SPEC_in2_0 0
4621 #define SPEC_prep_0 0
4622 #define SPEC_wout_0 0
4624 static const DisasInsn insn_info
[] = {
4625 #include "insn-data.def"
4629 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4630 case OPC: return &insn_info[insn_ ## NM];
4632 static const DisasInsn
*lookup_opc(uint16_t opc
)
4635 #include "insn-data.def"
4644 /* Extract a field from the insn. The INSN should be left-aligned in
4645 the uint64_t so that we can more easily utilize the big-bit-endian
4646 definitions we extract from the Principals of Operation. */
4648 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
4656 /* Zero extract the field from the insn. */
4657 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
4659 /* Sign-extend, or un-swap the field as necessary. */
4661 case 0: /* unsigned */
4663 case 1: /* signed */
4664 assert(f
->size
<= 32);
4665 m
= 1u << (f
->size
- 1);
4668 case 2: /* dl+dh split, signed 20 bit. */
4669 r
= ((int8_t)r
<< 12) | (r
>> 8);
4675 /* Validate that the "compressed" encoding we selected above is valid.
4676 I.e. we havn't make two different original fields overlap. */
4677 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
4678 o
->presentC
|= 1 << f
->indexC
;
4679 o
->presentO
|= 1 << f
->indexO
;
4681 o
->c
[f
->indexC
] = r
;
4684 /* Lookup the insn at the current PC, extracting the operands into O and
4685 returning the info struct for the insn. Returns NULL for invalid insn. */
4687 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
4690 uint64_t insn
, pc
= s
->pc
;
4692 const DisasInsn
*info
;
4694 insn
= ld_code2(env
, pc
);
4695 op
= (insn
>> 8) & 0xff;
4696 ilen
= get_ilen(op
);
4697 s
->next_pc
= s
->pc
+ ilen
;
4704 insn
= ld_code4(env
, pc
) << 32;
4707 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
4713 /* We can't actually determine the insn format until we've looked up
4714 the full insn opcode. Which we can't do without locating the
4715 secondary opcode. Assume by default that OP2 is at bit 40; for
4716 those smaller insns that don't actually have a secondary opcode
4717 this will correctly result in OP2 = 0. */
4723 case 0xb2: /* S, RRF, RRE */
4724 case 0xb3: /* RRE, RRD, RRF */
4725 case 0xb9: /* RRE, RRF */
4726 case 0xe5: /* SSE, SIL */
4727 op2
= (insn
<< 8) >> 56;
4731 case 0xc0: /* RIL */
4732 case 0xc2: /* RIL */
4733 case 0xc4: /* RIL */
4734 case 0xc6: /* RIL */
4735 case 0xc8: /* SSF */
4736 case 0xcc: /* RIL */
4737 op2
= (insn
<< 12) >> 60;
4739 case 0xd0 ... 0xdf: /* SS */
4745 case 0xee ... 0xf3: /* SS */
4746 case 0xf8 ... 0xfd: /* SS */
4750 op2
= (insn
<< 40) >> 56;
4754 memset(f
, 0, sizeof(*f
));
4758 /* Lookup the instruction. */
4759 info
= lookup_opc(op
<< 8 | op2
);
4761 /* If we found it, extract the operands. */
4763 DisasFormat fmt
= info
->fmt
;
4766 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
4767 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
4773 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
4775 const DisasInsn
*insn
;
4776 ExitStatus ret
= NO_EXIT
;
4780 /* Search for the insn in the table. */
4781 insn
= extract_insn(env
, s
, &f
);
4783 /* Not found means unimplemented/illegal opcode. */
4785 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
4787 gen_illegal_opcode(s
);
4788 return EXIT_NORETURN
;
4791 /* Check for insn specification exceptions. */
4793 int spec
= insn
->spec
, excp
= 0, r
;
4795 if (spec
& SPEC_r1_even
) {
4796 r
= get_field(&f
, r1
);
4798 excp
= PGM_SPECIFICATION
;
4801 if (spec
& SPEC_r2_even
) {
4802 r
= get_field(&f
, r2
);
4804 excp
= PGM_SPECIFICATION
;
4807 if (spec
& SPEC_r3_even
) {
4808 r
= get_field(&f
, r3
);
4810 excp
= PGM_SPECIFICATION
;
4813 if (spec
& SPEC_r1_f128
) {
4814 r
= get_field(&f
, r1
);
4816 excp
= PGM_SPECIFICATION
;
4819 if (spec
& SPEC_r2_f128
) {
4820 r
= get_field(&f
, r2
);
4822 excp
= PGM_SPECIFICATION
;
4826 gen_program_exception(s
, excp
);
4827 return EXIT_NORETURN
;
4831 /* Set up the strutures we use to communicate with the helpers. */
4834 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
4835 TCGV_UNUSED_I64(o
.out
);
4836 TCGV_UNUSED_I64(o
.out2
);
4837 TCGV_UNUSED_I64(o
.in1
);
4838 TCGV_UNUSED_I64(o
.in2
);
4839 TCGV_UNUSED_I64(o
.addr1
);
4841 /* Implement the instruction. */
4842 if (insn
->help_in1
) {
4843 insn
->help_in1(s
, &f
, &o
);
4845 if (insn
->help_in2
) {
4846 insn
->help_in2(s
, &f
, &o
);
4848 if (insn
->help_prep
) {
4849 insn
->help_prep(s
, &f
, &o
);
4851 if (insn
->help_op
) {
4852 ret
= insn
->help_op(s
, &o
);
4854 if (insn
->help_wout
) {
4855 insn
->help_wout(s
, &f
, &o
);
4857 if (insn
->help_cout
) {
4858 insn
->help_cout(s
, &o
);
4861 /* Free any temporaries created by the helpers. */
4862 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
4863 tcg_temp_free_i64(o
.out
);
4865 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
4866 tcg_temp_free_i64(o
.out2
);
4868 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
4869 tcg_temp_free_i64(o
.in1
);
4871 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
4872 tcg_temp_free_i64(o
.in2
);
4874 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
4875 tcg_temp_free_i64(o
.addr1
);
4878 /* Advance to the next instruction. */
4883 static inline void gen_intermediate_code_internal(S390CPU
*cpu
,
4884 TranslationBlock
*tb
,
4887 CPUState
*cs
= CPU(cpu
);
4888 CPUS390XState
*env
= &cpu
->env
;
4890 target_ulong pc_start
;
4891 uint64_t next_page_start
;
4893 int num_insns
, max_insns
;
4901 if (!(tb
->flags
& FLAG_MASK_64
)) {
4902 pc_start
&= 0x7fffffff;
4907 dc
.cc_op
= CC_OP_DYNAMIC
;
4908 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
4910 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4913 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
4914 if (max_insns
== 0) {
4915 max_insns
= CF_COUNT_MASK
;
4922 j
= tcg_op_buf_count();
4926 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
4929 tcg_ctx
.gen_opc_pc
[lj
] = dc
.pc
;
4930 gen_opc_cc_op
[lj
] = dc
.cc_op
;
4931 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
4932 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
4934 if (++num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
4938 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
4939 tcg_gen_debug_insn_start(dc
.pc
);
4943 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
4944 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
4945 if (bp
->pc
== dc
.pc
) {
4946 status
= EXIT_PC_STALE
;
4952 if (status
== NO_EXIT
) {
4953 status
= translate_one(env
, &dc
);
4956 /* If we reach a page boundary, are single stepping,
4957 or exhaust instruction count, stop generation. */
4958 if (status
== NO_EXIT
4959 && (dc
.pc
>= next_page_start
4960 || tcg_op_buf_full()
4961 || num_insns
>= max_insns
4963 || cs
->singlestep_enabled
)) {
4964 status
= EXIT_PC_STALE
;
4966 } while (status
== NO_EXIT
);
4968 if (tb
->cflags
& CF_LAST_IO
) {
4977 update_psw_addr(&dc
);
4979 case EXIT_PC_UPDATED
:
4980 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4981 cc op type is in env */
4983 /* Exit the TB, either by raising a debug exception or by return. */
4985 gen_exception(EXCP_DEBUG
);
4994 gen_tb_end(tb
, num_insns
);
4997 j
= tcg_op_buf_count();
5000 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
5003 tb
->size
= dc
.pc
- pc_start
;
5004 tb
->icount
= num_insns
;
5007 #if defined(S390X_DEBUG_DISAS)
5008 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5009 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5010 log_target_disas(env
, pc_start
, dc
.pc
- pc_start
, 1);
5016 void gen_intermediate_code (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5018 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, false);
5021 void gen_intermediate_code_pc (CPUS390XState
*env
, struct TranslationBlock
*tb
)
5023 gen_intermediate_code_internal(s390_env_get_cpu(env
), tb
, true);
5026 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
, int pc_pos
)
5029 env
->psw
.addr
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5030 cc_op
= gen_opc_cc_op
[pc_pos
];
5031 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {