2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* ??? The translation blocks produced by TCG are generally small enough to
28 be entirely reachable with a 16-bit displacement. Leaving the option for
29 a 32-bit displacement here Just In Case. */
30 #define USE_LONG_BRANCHES 0
32 #define TCG_CT_CONST_32 0x0100
33 #define TCG_CT_CONST_NEG 0x0200
34 #define TCG_CT_CONST_ADDI 0x0400
35 #define TCG_CT_CONST_MULI 0x0800
36 #define TCG_CT_CONST_ANDI 0x1000
37 #define TCG_CT_CONST_ORI 0x2000
38 #define TCG_CT_CONST_XORI 0x4000
39 #define TCG_CT_CONST_CMPI 0x8000
41 /* Several places within the instruction set 0 means "no register"
42 rather than TCG_REG_R0. */
43 #define TCG_REG_NONE 0
45 /* A scratch register that may be be used throughout the backend. */
46 #define TCG_TMP0 TCG_REG_R14
48 #ifdef CONFIG_USE_GUEST_BASE
49 #define TCG_GUEST_BASE_REG TCG_REG_R13
51 #define TCG_GUEST_BASE_REG TCG_REG_R0
59 /* All of the following instructions are prefixed with their instruction
60 format, and are defined as 8- or 16-bit quantities, even when the two
61 halves of the 16-bit quantity may appear 32 bits apart in the insn.
62 This makes it easy to copy the values from the tables in Appendix B. */
63 typedef enum S390Opcode
{
207 #define LD_SIGNED 0x04
208 #define LD_UINT8 0x00
209 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
210 #define LD_UINT16 0x01
211 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
212 #define LD_UINT32 0x02
213 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
214 #define LD_UINT64 0x03
215 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
218 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
219 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
220 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
224 /* Since R6 is a potential argument register, choose it last of the
225 call-saved registers. Likewise prefer the call-clobbered registers
226 in reverse order to maximize the chance of avoiding the arguments. */
227 static const int tcg_target_reg_alloc_order
[] = {
245 static const int tcg_target_call_iarg_regs
[] = {
253 static const int tcg_target_call_oarg_regs
[] = {
255 #if TCG_TARGET_REG_BITS == 32
264 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
265 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
266 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
267 #define S390_CC_NEVER 0
268 #define S390_CC_ALWAYS 15
270 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
271 static const uint8_t tcg_cond_to_s390_cond
[10] = {
272 [TCG_COND_EQ
] = S390_CC_EQ
,
273 [TCG_COND_NE
] = S390_CC_NE
,
274 [TCG_COND_LT
] = S390_CC_LT
,
275 [TCG_COND_LE
] = S390_CC_LE
,
276 [TCG_COND_GT
] = S390_CC_GT
,
277 [TCG_COND_GE
] = S390_CC_GE
,
278 [TCG_COND_LTU
] = S390_CC_LT
,
279 [TCG_COND_LEU
] = S390_CC_LE
,
280 [TCG_COND_GTU
] = S390_CC_GT
,
281 [TCG_COND_GEU
] = S390_CC_GE
,
284 /* Condition codes that result from a LOAD AND TEST. Here, we have no
285 unsigned instruction variation, however since the test is vs zero we
286 can re-map the outcomes appropriately. */
287 static const uint8_t tcg_cond_to_ltr_cond
[10] = {
288 [TCG_COND_EQ
] = S390_CC_EQ
,
289 [TCG_COND_NE
] = S390_CC_NE
,
290 [TCG_COND_LT
] = S390_CC_LT
,
291 [TCG_COND_LE
] = S390_CC_LE
,
292 [TCG_COND_GT
] = S390_CC_GT
,
293 [TCG_COND_GE
] = S390_CC_GE
,
294 [TCG_COND_LTU
] = S390_CC_NEVER
,
295 [TCG_COND_LEU
] = S390_CC_EQ
,
296 [TCG_COND_GTU
] = S390_CC_NE
,
297 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
300 #ifdef CONFIG_SOFTMMU
302 #include "../../softmmu_defs.h"
304 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
306 static const void * const qemu_ld_helpers
[4] = {
313 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
314 uintxx_t val, int mmu_idx) */
315 static const void * const qemu_st_helpers
[4] = {
323 static uint8_t *tb_ret_addr
;
325 /* A list of relevant facilities used by this translator. Some of these
326 are required for proper operation, and these are checked at startup. */
328 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
329 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
330 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
331 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
333 static uint64_t facilities
;
335 static void patch_reloc(uint8_t *code_ptr
, int type
,
336 tcg_target_long value
, tcg_target_long addend
)
338 tcg_target_long code_ptr_tl
= (tcg_target_long
)code_ptr
;
339 tcg_target_long pcrel2
;
341 /* ??? Not the usual definition of "addend". */
342 pcrel2
= (value
- (code_ptr_tl
+ addend
)) >> 1;
346 assert(pcrel2
== (int16_t)pcrel2
);
347 *(int16_t *)code_ptr
= pcrel2
;
350 assert(pcrel2
== (int32_t)pcrel2
);
351 *(int32_t *)code_ptr
= pcrel2
;
359 static int tcg_target_get_call_iarg_regs_count(int flags
)
361 return sizeof(tcg_target_call_iarg_regs
) / sizeof(int);
364 /* parse target specific constraints */
365 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
367 const char *ct_str
= *pct_str
;
370 case 'r': /* all registers */
371 ct
->ct
|= TCG_CT_REG
;
372 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
374 case 'R': /* not R0 */
375 ct
->ct
|= TCG_CT_REG
;
376 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
377 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
379 case 'L': /* qemu_ld/st constraint */
380 ct
->ct
|= TCG_CT_REG
;
381 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
382 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R2
);
383 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R3
);
385 case 'a': /* force R2 for division */
386 ct
->ct
|= TCG_CT_REG
;
387 tcg_regset_clear(ct
->u
.regs
);
388 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
390 case 'b': /* force R3 for division */
391 ct
->ct
|= TCG_CT_REG
;
392 tcg_regset_clear(ct
->u
.regs
);
393 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
395 case 'N': /* force immediate negate */
396 ct
->ct
|= TCG_CT_CONST_NEG
;
398 case 'W': /* force 32-bit ("word") immediate */
399 ct
->ct
|= TCG_CT_CONST_32
;
402 ct
->ct
|= TCG_CT_CONST_ADDI
;
405 ct
->ct
|= TCG_CT_CONST_MULI
;
408 ct
->ct
|= TCG_CT_CONST_ANDI
;
411 ct
->ct
|= TCG_CT_CONST_ORI
;
414 ct
->ct
|= TCG_CT_CONST_XORI
;
417 ct
->ct
|= TCG_CT_CONST_CMPI
;
428 /* Immediates to be used with logical AND. This is an optimization only,
429 since a full 64-bit immediate AND can always be performed with 4 sequential
430 NI[LH][LH] instructions. What we're looking for is immediates that we
431 can load efficiently, and the immediate load plus the reg-reg AND is
432 smaller than the sequential NI's. */
434 static int tcg_match_andi(int ct
, tcg_target_ulong val
)
438 if (facilities
& FACILITY_EXT_IMM
) {
439 if (ct
& TCG_CT_CONST_32
) {
440 /* All 32-bit ANDs can be performed with 1 48-bit insn. */
444 /* Zero-extensions. */
445 if (val
== 0xff || val
== 0xffff || val
== 0xffffffff) {
449 if (ct
& TCG_CT_CONST_32
) {
451 } else if (val
== 0xffffffff) {
456 /* Try all 32-bit insns that can perform it in one go. */
457 for (i
= 0; i
< 4; i
++) {
458 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
459 if ((val
& mask
) == mask
) {
464 /* Look for 16-bit values performing the mask. These are better
465 to load with LLI[LH][LH]. */
466 for (i
= 0; i
< 4; i
++) {
467 tcg_target_ulong mask
= 0xffffull
<< i
*16;
468 if ((val
& mask
) == val
) {
473 /* Look for 32-bit values performing the 64-bit mask. These
474 are better to load with LLI[LH]F, or if extended immediates
475 not available, with a pair of LLI insns. */
476 if ((ct
& TCG_CT_CONST_32
) == 0) {
477 if (val
<= 0xffffffff || (val
& 0xffffffff) == 0) {
485 /* Immediates to be used with logical OR. This is an optimization only,
486 since a full 64-bit immediate OR can always be performed with 4 sequential
487 OI[LH][LH] instructions. What we're looking for is immediates that we
488 can load efficiently, and the immediate load plus the reg-reg OR is
489 smaller than the sequential OI's. */
491 static int tcg_match_ori(int ct
, tcg_target_long val
)
493 if (facilities
& FACILITY_EXT_IMM
) {
494 if (ct
& TCG_CT_CONST_32
) {
495 /* All 32-bit ORs can be performed with 1 48-bit insn. */
500 /* Look for negative values. These are best to load with LGHI. */
502 if (val
== (int16_t)val
) {
505 if (facilities
& FACILITY_EXT_IMM
) {
506 if (val
== (int32_t)val
) {
515 /* Immediates to be used with logical XOR. This is almost, but not quite,
516 only an optimization. XOR with immediate is only supported with the
517 extended-immediate facility. That said, there are a few patterns for
518 which it is better to load the value into a register first. */
520 static int tcg_match_xori(int ct
, tcg_target_long val
)
522 if ((facilities
& FACILITY_EXT_IMM
) == 0) {
526 if (ct
& TCG_CT_CONST_32
) {
527 /* All 32-bit XORs can be performed with 1 48-bit insn. */
531 /* Look for negative values. These are best to load with LGHI. */
532 if (val
< 0 && val
== (int32_t)val
) {
539 /* Imediates to be used with comparisons. */
541 static int tcg_match_cmpi(int ct
, tcg_target_long val
)
543 if (facilities
& FACILITY_EXT_IMM
) {
544 /* The COMPARE IMMEDIATE instruction is available. */
545 if (ct
& TCG_CT_CONST_32
) {
546 /* We have a 32-bit immediate and can compare against anything. */
549 /* ??? We have no insight here into whether the comparison is
550 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
551 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
552 a 32-bit unsigned immediate. If we were to use the (semi)
553 obvious "val == (int32_t)val" we would be enabling unsigned
554 comparisons vs very large numbers. The only solution is to
555 take the intersection of the ranges. */
556 /* ??? Another possible solution is to simply lie and allow all
557 constants here and force the out-of-range values into a temp
558 register in tgen_cmp when we have knowledge of the actual
559 comparison code in use. */
560 return val
>= 0 && val
<= 0x7fffffff;
563 /* Only the LOAD AND TEST instruction is available. */
568 /* Test if a constant matches the constraint. */
569 static int tcg_target_const_match(tcg_target_long val
,
570 const TCGArgConstraint
*arg_ct
)
574 if (ct
& TCG_CT_CONST
) {
578 /* Handle the modifiers. */
579 if (ct
& TCG_CT_CONST_NEG
) {
582 if (ct
& TCG_CT_CONST_32
) {
586 /* The following are mutually exclusive. */
587 if (ct
& TCG_CT_CONST_ADDI
) {
588 /* Immediates that may be used with add. If we have the
589 extended-immediates facility then we have ADD IMMEDIATE
590 with signed and unsigned 32-bit, otherwise we have only
591 ADD HALFWORD IMMEDIATE with a signed 16-bit. */
592 if (facilities
& FACILITY_EXT_IMM
) {
593 return val
== (int32_t)val
|| val
== (uint32_t)val
;
595 return val
== (int16_t)val
;
597 } else if (ct
& TCG_CT_CONST_MULI
) {
598 /* Immediates that may be used with multiply. If we have the
599 general-instruction-extensions, then we have MULTIPLY SINGLE
600 IMMEDIATE with a signed 32-bit, otherwise we have only
601 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
602 if (facilities
& FACILITY_GEN_INST_EXT
) {
603 return val
== (int32_t)val
;
605 return val
== (int16_t)val
;
607 } else if (ct
& TCG_CT_CONST_ANDI
) {
608 return tcg_match_andi(ct
, val
);
609 } else if (ct
& TCG_CT_CONST_ORI
) {
610 return tcg_match_ori(ct
, val
);
611 } else if (ct
& TCG_CT_CONST_XORI
) {
612 return tcg_match_xori(ct
, val
);
613 } else if (ct
& TCG_CT_CONST_CMPI
) {
614 return tcg_match_cmpi(ct
, val
);
620 /* Emit instructions according to the given instruction format. */
622 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
624 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
627 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
628 TCGReg r1
, TCGReg r2
)
630 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
633 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
635 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
638 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
640 tcg_out16(s
, op
| (r1
<< 4));
644 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
645 TCGReg b2
, TCGReg r3
, int disp
)
647 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
651 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
652 TCGReg b2
, TCGReg r3
, int disp
)
654 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
655 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
656 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
659 #define tcg_out_insn_RX tcg_out_insn_RS
660 #define tcg_out_insn_RXY tcg_out_insn_RSY
662 /* Emit an opcode with "type-checking" of the format. */
663 #define tcg_out_insn(S, FMT, OP, ...) \
664 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
667 /* emit 64-bit shifts */
668 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
669 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
671 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
674 /* emit 32-bit shifts */
675 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
676 TCGReg sh_reg
, int sh_imm
)
678 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
681 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
684 if (type
== TCG_TYPE_I32
) {
685 tcg_out_insn(s
, RR
, LR
, dst
, src
);
687 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
692 /* load a register with an immediate value */
693 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
694 TCGReg ret
, tcg_target_long sval
)
696 static const S390Opcode lli_insns
[4] = {
697 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
700 tcg_target_ulong uval
= sval
;
703 if (type
== TCG_TYPE_I32
) {
704 uval
= (uint32_t)sval
;
705 sval
= (int32_t)sval
;
708 /* Try all 32-bit insns that can load it in one go. */
709 if (sval
>= -0x8000 && sval
< 0x8000) {
710 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
714 for (i
= 0; i
< 4; i
++) {
715 tcg_target_long mask
= 0xffffull
<< i
*16;
716 if ((uval
& mask
) == uval
) {
717 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
722 /* Try all 48-bit insns that can load it in one go. */
723 if (facilities
& FACILITY_EXT_IMM
) {
724 if (sval
== (int32_t)sval
) {
725 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
728 if (uval
<= 0xffffffff) {
729 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
732 if ((uval
& 0xffffffff) == 0) {
733 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 31 >> 1);
738 /* Try for PC-relative address load. */
739 if ((sval
& 1) == 0) {
740 intptr_t off
= (sval
- (intptr_t)s
->code_ptr
) >> 1;
741 if (off
== (int32_t)off
) {
742 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
747 /* If extended immediates are not present, then we may have to issue
748 several instructions to load the low 32 bits. */
749 if (!(facilities
& FACILITY_EXT_IMM
)) {
750 /* A 32-bit unsigned value can be loaded in 2 insns. And given
751 that the lli_insns loop above did not succeed, we know that
752 both insns are required. */
753 if (uval
<= 0xffffffff) {
754 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
755 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
759 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
760 We first want to make sure that all the high bits get set. With
761 luck the low 16-bits can be considered negative to perform that for
762 free, otherwise we load an explicit -1. */
763 if (sval
>> 31 >> 1 == -1) {
765 tcg_out_insn(s
, RI
, LGHI
, ret
, uval
);
767 tcg_out_insn(s
, RI
, LGHI
, ret
, -1);
768 tcg_out_insn(s
, RI
, IILL
, ret
, uval
);
770 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
775 /* If we get here, both the high and low parts have non-zero bits. */
777 /* Recurse to load the lower 32-bits. */
778 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, sval
);
780 /* Insert data into the high 32-bits. */
781 uval
= uval
>> 31 >> 1;
782 if (facilities
& FACILITY_EXT_IMM
) {
783 if (uval
< 0x10000) {
784 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
785 } else if ((uval
& 0xffff) == 0) {
786 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
788 tcg_out_insn(s
, RIL
, IIHF
, ret
, uval
);
792 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
794 if (uval
& 0xffff0000) {
795 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
801 /* Emit a load/store type instruction. Inputs are:
802 DATA: The register to be loaded or stored.
803 BASE+OFS: The effective address.
804 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
805 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
807 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
808 TCGReg data
, TCGReg base
, TCGReg index
,
811 if (ofs
< -0x80000 || ofs
>= 0x80000) {
812 /* Combine the low 16 bits of the offset with the actual load insn;
813 the high 48 bits must come from an immediate load. */
814 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
& ~0xffff);
817 /* If we were already given an index register, add it in. */
818 if (index
!= TCG_REG_NONE
) {
819 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
824 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
825 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
827 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
832 /* load data without address translation or endianness conversion */
833 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
834 TCGReg base
, tcg_target_long ofs
)
836 if (type
== TCG_TYPE_I32
) {
837 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
839 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
843 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
844 TCGReg base
, tcg_target_long ofs
)
846 if (type
== TCG_TYPE_I32
) {
847 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
849 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
853 /* load data from an absolute host address */
854 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
856 tcg_target_long addr
= (tcg_target_long
)abs
;
858 if (facilities
& FACILITY_GEN_INST_EXT
) {
859 tcg_target_long disp
= (addr
- (tcg_target_long
)s
->code_ptr
) >> 1;
860 if (disp
== (int32_t)disp
) {
861 if (type
== TCG_TYPE_I32
) {
862 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
864 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
870 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
871 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
874 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
876 if (facilities
& FACILITY_EXT_IMM
) {
877 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
881 if (type
== TCG_TYPE_I32
) {
883 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
885 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
887 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
889 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
890 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
894 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
896 if (facilities
& FACILITY_EXT_IMM
) {
897 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
902 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
905 tcg_out_movi(s
, type
, dest
, 0xff);
907 if (type
== TCG_TYPE_I32
) {
908 tcg_out_insn(s
, RR
, NR
, dest
, src
);
910 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
914 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
916 if (facilities
& FACILITY_EXT_IMM
) {
917 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
921 if (type
== TCG_TYPE_I32
) {
923 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
925 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
927 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
929 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
930 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
934 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
936 if (facilities
& FACILITY_EXT_IMM
) {
937 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
942 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
945 tcg_out_movi(s
, type
, dest
, 0xffff);
947 if (type
== TCG_TYPE_I32
) {
948 tcg_out_insn(s
, RR
, NR
, dest
, src
);
950 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
954 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
956 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
959 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
961 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
964 static inline void tgen32_addi(TCGContext
*s
, TCGReg dest
, int32_t val
)
966 if (val
== (int16_t)val
) {
967 tcg_out_insn(s
, RI
, AHI
, dest
, val
);
969 tcg_out_insn(s
, RIL
, AFI
, dest
, val
);
973 static inline void tgen64_addi(TCGContext
*s
, TCGReg dest
, int64_t val
)
975 if (val
== (int16_t)val
) {
976 tcg_out_insn(s
, RI
, AGHI
, dest
, val
);
977 } else if (val
== (int32_t)val
) {
978 tcg_out_insn(s
, RIL
, AGFI
, dest
, val
);
979 } else if (val
== (uint32_t)val
) {
980 tcg_out_insn(s
, RIL
, ALGFI
, dest
, val
);
987 static void tgen64_andi(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
989 static const S390Opcode ni_insns
[4] = {
990 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
992 static const S390Opcode nif_insns
[2] = {
998 /* Look for no-op. */
1003 /* Look for the zero-extensions. */
1004 if (val
== 0xffffffff) {
1005 tgen_ext32u(s
, dest
, dest
);
1009 if (facilities
& FACILITY_EXT_IMM
) {
1011 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
1014 if (val
== 0xffff) {
1015 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
1019 /* Try all 32-bit insns that can perform it in one go. */
1020 for (i
= 0; i
< 4; i
++) {
1021 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
1022 if ((val
& mask
) == mask
) {
1023 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
1028 /* Try all 48-bit insns that can perform it in one go. */
1029 if (facilities
& FACILITY_EXT_IMM
) {
1030 for (i
= 0; i
< 2; i
++) {
1031 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
1032 if ((val
& mask
) == mask
) {
1033 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1039 /* Perform the AND via sequential modifications to the high and low
1040 parts. Do this via recursion to handle 16-bit vs 32-bit masks in
1042 tgen64_andi(s
, dest
, val
| 0xffffffff00000000ull
);
1043 tgen64_andi(s
, dest
, val
| 0x00000000ffffffffull
);
1045 /* With no extended-immediate facility, just emit the sequence. */
1046 for (i
= 0; i
< 4; i
++) {
1047 tcg_target_ulong mask
= 0xffffull
<< i
*16;
1048 if ((val
& mask
) != mask
) {
1049 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
1055 static void tgen64_ori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1057 static const S390Opcode oi_insns
[4] = {
1058 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
1060 static const S390Opcode nif_insns
[2] = {
1066 /* Look for no-op. */
1071 if (facilities
& FACILITY_EXT_IMM
) {
1072 /* Try all 32-bit insns that can perform it in one go. */
1073 for (i
= 0; i
< 4; i
++) {
1074 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1075 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1076 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1081 /* Try all 48-bit insns that can perform it in one go. */
1082 for (i
= 0; i
< 2; i
++) {
1083 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1084 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1085 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1090 /* Perform the OR via sequential modifications to the high and
1091 low parts. Do this via recursion to handle 16-bit vs 32-bit
1092 masks in each half. */
1093 tgen64_ori(s
, dest
, val
& 0x00000000ffffffffull
);
1094 tgen64_ori(s
, dest
, val
& 0xffffffff00000000ull
);
1096 /* With no extended-immediate facility, we don't need to be so
1097 clever. Just iterate over the insns and mask in the constant. */
1098 for (i
= 0; i
< 4; i
++) {
1099 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1100 if ((val
& mask
) != 0) {
1101 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1107 static void tgen64_xori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1109 /* Perform the xor by parts. */
1110 if (val
& 0xffffffff) {
1111 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1113 if (val
> 0xffffffff) {
1114 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 31 >> 1);
1118 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1119 TCGArg c2
, int c2const
)
1121 bool is_unsigned
= (c
> TCG_COND_GT
);
1124 if (type
== TCG_TYPE_I32
) {
1125 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1127 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1129 return tcg_cond_to_ltr_cond
[c
];
1132 if (type
== TCG_TYPE_I32
) {
1133 tcg_out_insn(s
, RIL
, CLFI
, r1
, c2
);
1135 tcg_out_insn(s
, RIL
, CLGFI
, r1
, c2
);
1138 if (type
== TCG_TYPE_I32
) {
1139 tcg_out_insn(s
, RIL
, CFI
, r1
, c2
);
1141 tcg_out_insn(s
, RIL
, CGFI
, r1
, c2
);
1147 if (type
== TCG_TYPE_I32
) {
1148 tcg_out_insn(s
, RR
, CLR
, r1
, c2
);
1150 tcg_out_insn(s
, RRE
, CLGR
, r1
, c2
);
1153 if (type
== TCG_TYPE_I32
) {
1154 tcg_out_insn(s
, RR
, CR
, r1
, c2
);
1156 tcg_out_insn(s
, RRE
, CGR
, r1
, c2
);
1160 return tcg_cond_to_s390_cond
[c
];
1163 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1164 TCGReg dest
, TCGReg r1
, TCGArg c2
, int c2const
)
1166 int cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1168 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1169 tcg_out_movi(s
, type
, dest
, 1);
1170 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1171 tcg_out_movi(s
, type
, dest
, 0);
1174 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_target_long dest
)
1176 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1177 if (off
> -0x8000 && off
< 0x7fff) {
1178 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1179 } else if (off
== (int32_t)off
) {
1180 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1182 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1183 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1187 static void tgen_branch(TCGContext
*s
, int cc
, int labelno
)
1189 TCGLabel
* l
= &s
->labels
[labelno
];
1191 tgen_gotoi(s
, cc
, l
->u
.value
);
1192 } else if (USE_LONG_BRANCHES
) {
1193 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1194 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, labelno
, -2);
1197 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1198 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, labelno
, -2);
1203 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1204 TCGReg r1
, TCGReg r2
, int labelno
)
1206 TCGLabel
* l
= &s
->labels
[labelno
];
1207 tcg_target_long off
;
1210 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1212 /* We need to keep the offset unchanged for retranslation. */
1213 off
= ((int16_t *)s
->code_ptr
)[1];
1214 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1217 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1219 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1222 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1223 TCGReg r1
, int i2
, int labelno
)
1225 TCGLabel
* l
= &s
->labels
[labelno
];
1226 tcg_target_long off
;
1229 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1231 /* We need to keep the offset unchanged for retranslation. */
1232 off
= ((int16_t *)s
->code_ptr
)[1];
1233 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1236 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1238 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1241 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1242 TCGReg r1
, TCGArg c2
, int c2const
, int labelno
)
1246 if (facilities
& FACILITY_GEN_INST_EXT
) {
1247 bool is_unsigned
= (c
> TCG_COND_GT
);
1251 cc
= tcg_cond_to_s390_cond
[c
];
1254 opc
= (type
== TCG_TYPE_I32
1255 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1256 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1257 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1261 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1262 If the immediate we've been given does not fit that range, we'll
1263 fall back to separate compare and branch instructions using the
1264 larger comparison range afforded by COMPARE IMMEDIATE. */
1265 if (type
== TCG_TYPE_I32
) {
1268 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1271 in_range
= (int32_t)c2
== (int8_t)c2
;
1276 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1279 in_range
= (int64_t)c2
== (int8_t)c2
;
1283 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1288 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1289 tgen_branch(s
, cc
, labelno
);
1292 static void tgen_calli(TCGContext
*s
, tcg_target_long dest
)
1294 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1295 if (off
== (int32_t)off
) {
1296 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1298 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1299 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1303 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int opc
, TCGReg data
,
1304 TCGReg base
, TCGReg index
, int disp
)
1306 #ifdef TARGET_WORDS_BIGENDIAN
1307 const int bswap
= 0;
1309 const int bswap
= 1;
1313 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1316 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1320 /* swapped unsigned halfword load with upper bits zeroed */
1321 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1322 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1324 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1329 /* swapped sign-extended halfword load */
1330 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1331 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1333 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1338 /* swapped unsigned int load with upper bits zeroed */
1339 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1340 tgen_ext32u(s
, data
, data
);
1342 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1347 /* swapped sign-extended int load */
1348 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1349 tgen_ext32s(s
, data
, data
);
1351 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1356 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1358 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1366 static void tcg_out_qemu_st_direct(TCGContext
*s
, int opc
, TCGReg data
,
1367 TCGReg base
, TCGReg index
, int disp
)
1369 #ifdef TARGET_WORDS_BIGENDIAN
1370 const int bswap
= 0;
1372 const int bswap
= 1;
1376 if (disp
>= 0 && disp
< 0x1000) {
1377 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1379 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1384 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1385 } else if (disp
>= 0 && disp
< 0x1000) {
1386 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1388 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1393 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1394 } else if (disp
>= 0 && disp
< 0x1000) {
1395 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1397 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1402 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1404 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1412 #if defined(CONFIG_SOFTMMU)
1413 static void tgen64_andi_tmp(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1415 if (tcg_match_andi(0, val
)) {
1416 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_TMP0
, val
);
1417 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
1419 tgen64_andi(s
, dest
, val
);
1423 static void tcg_prepare_qemu_ldst(TCGContext
* s
, TCGReg data_reg
,
1424 TCGReg addr_reg
, int mem_index
, int opc
,
1425 uint16_t **label2_ptr_p
, int is_store
)
1427 const TCGReg arg0
= TCG_REG_R2
;
1428 const TCGReg arg1
= TCG_REG_R3
;
1429 int s_bits
= opc
& 3;
1430 uint16_t *label1_ptr
;
1431 tcg_target_long ofs
;
1433 if (TARGET_LONG_BITS
== 32) {
1434 tgen_ext32u(s
, arg0
, addr_reg
);
1436 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, addr_reg
);
1439 tcg_out_sh64(s
, RSY_SRLG
, arg1
, addr_reg
, TCG_REG_NONE
,
1440 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1442 tgen64_andi_tmp(s
, arg0
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
1443 tgen64_andi_tmp(s
, arg1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1446 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1448 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1450 assert(ofs
< 0x80000);
1452 if (TARGET_LONG_BITS
== 32) {
1453 tcg_out_mem(s
, RX_C
, RXY_CY
, arg0
, arg1
, TCG_AREG0
, ofs
);
1455 tcg_out_mem(s
, 0, RXY_CG
, arg0
, arg1
, TCG_AREG0
, ofs
);
1458 if (TARGET_LONG_BITS
== 32) {
1459 tgen_ext32u(s
, arg0
, addr_reg
);
1461 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, addr_reg
);
1464 label1_ptr
= (uint16_t*)s
->code_ptr
;
1466 /* je label1 (offset will be patched in later) */
1467 tcg_out_insn(s
, RI
, BRC
, S390_CC_EQ
, 0);
1469 /* call load/store helper */
1471 /* Make sure to zero-extend the value to the full register
1472 for the calling convention. */
1475 tgen_ext8u(s
, TCG_TYPE_I64
, arg1
, data_reg
);
1478 tgen_ext16u(s
, TCG_TYPE_I64
, arg1
, data_reg
);
1481 tgen_ext32u(s
, arg1
, data_reg
);
1484 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, data_reg
);
1489 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R4
, mem_index
);
1490 /* XXX/FIXME: suboptimal */
1491 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[3],
1492 tcg_target_call_iarg_regs
[2]);
1493 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[2],
1494 tcg_target_call_iarg_regs
[1]);
1495 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[1],
1496 tcg_target_call_iarg_regs
[0]);
1497 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[0],
1499 tgen_calli(s
, (tcg_target_ulong
)qemu_st_helpers
[s_bits
]);
1501 tcg_out_movi(s
, TCG_TYPE_I32
, arg1
, mem_index
);
1502 /* XXX/FIXME: suboptimal */
1503 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[2],
1504 tcg_target_call_iarg_regs
[1]);
1505 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[1],
1506 tcg_target_call_iarg_regs
[0]);
1507 tcg_out_mov(s
, TCG_TYPE_I64
, tcg_target_call_iarg_regs
[0],
1509 tgen_calli(s
, (tcg_target_ulong
)qemu_ld_helpers
[s_bits
]);
1511 /* sign extension */
1514 tgen_ext8s(s
, TCG_TYPE_I64
, data_reg
, arg0
);
1517 tgen_ext16s(s
, TCG_TYPE_I64
, data_reg
, arg0
);
1520 tgen_ext32s(s
, data_reg
, arg0
);
1523 /* unsigned -> just copy */
1524 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, arg0
);
1529 /* jump to label2 (end) */
1530 *label2_ptr_p
= (uint16_t*)s
->code_ptr
;
1532 tcg_out_insn(s
, RI
, BRC
, S390_CC_ALWAYS
, 0);
1534 /* this is label1, patch branch */
1535 *(label1_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1536 (unsigned long)label1_ptr
) >> 1;
1538 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1539 assert(ofs
< 0x80000);
1541 tcg_out_mem(s
, 0, RXY_AG
, arg0
, arg1
, TCG_AREG0
, ofs
);
1544 static void tcg_finish_qemu_ldst(TCGContext
* s
, uint16_t *label2_ptr
)
1547 *(label2_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1548 (unsigned long)label2_ptr
) >> 1;
1551 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1552 TCGReg
*index_reg
, tcg_target_long
*disp
)
1554 if (TARGET_LONG_BITS
== 32) {
1555 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1556 *addr_reg
= TCG_TMP0
;
1558 if (GUEST_BASE
< 0x80000) {
1559 *index_reg
= TCG_REG_NONE
;
1562 *index_reg
= TCG_GUEST_BASE_REG
;
1566 #endif /* CONFIG_SOFTMMU */
1568 /* load data with address translation (if applicable)
1569 and endianness conversion */
1570 static void tcg_out_qemu_ld(TCGContext
* s
, const TCGArg
* args
, int opc
)
1572 TCGReg addr_reg
, data_reg
;
1573 #if defined(CONFIG_SOFTMMU)
1575 uint16_t *label2_ptr
;
1578 tcg_target_long disp
;
1584 #if defined(CONFIG_SOFTMMU)
1587 tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1588 opc
, &label2_ptr
, 0);
1590 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, TCG_REG_R2
, TCG_REG_NONE
, 0);
1592 tcg_finish_qemu_ldst(s
, label2_ptr
);
1594 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1595 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1599 static void tcg_out_qemu_st(TCGContext
* s
, const TCGArg
* args
, int opc
)
1601 TCGReg addr_reg
, data_reg
;
1602 #if defined(CONFIG_SOFTMMU)
1604 uint16_t *label2_ptr
;
1607 tcg_target_long disp
;
1613 #if defined(CONFIG_SOFTMMU)
1616 tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1617 opc
, &label2_ptr
, 1);
1619 tcg_out_qemu_st_direct(s
, opc
, data_reg
, TCG_REG_R2
, TCG_REG_NONE
, 0);
1621 tcg_finish_qemu_ldst(s
, label2_ptr
);
1623 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1624 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1628 #if TCG_TARGET_REG_BITS == 64
1629 # define OP_32_64(x) \
1630 case glue(glue(INDEX_op_,x),_i32): \
1631 case glue(glue(INDEX_op_,x),_i64)
1633 # define OP_32_64(x) \
1634 case glue(glue(INDEX_op_,x),_i32)
1637 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1638 const TCGArg
*args
, const int *const_args
)
1643 case INDEX_op_exit_tb
:
1645 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, args
[0]);
1646 tgen_gotoi(s
, S390_CC_ALWAYS
, (unsigned long)tb_ret_addr
);
1649 case INDEX_op_goto_tb
:
1650 if (s
->tb_jmp_offset
) {
1653 /* load address stored at s->tb_next + args[0] */
1654 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_TMP0
, s
->tb_next
+ args
[0]);
1656 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_TMP0
);
1658 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1662 if (const_args
[0]) {
1663 tgen_calli(s
, args
[0]);
1665 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, args
[0]);
1669 case INDEX_op_mov_i32
:
1670 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1672 case INDEX_op_movi_i32
:
1673 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1677 /* ??? LLC (RXY format) is only present with the extended-immediate
1678 facility, whereas LLGC is always present. */
1679 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1683 /* ??? LB is no smaller than LGB, so no point to using it. */
1684 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1688 /* ??? LLH (RXY format) is only present with the extended-immediate
1689 facility, whereas LLGH is always present. */
1690 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1693 case INDEX_op_ld16s_i32
:
1694 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1697 case INDEX_op_ld_i32
:
1698 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1702 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1703 TCG_REG_NONE
, args
[2]);
1707 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1708 TCG_REG_NONE
, args
[2]);
1711 case INDEX_op_st_i32
:
1712 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1715 case INDEX_op_add_i32
:
1716 if (const_args
[2]) {
1717 tgen32_addi(s
, args
[0], args
[2]);
1719 tcg_out_insn(s
, RR
, AR
, args
[0], args
[2]);
1722 case INDEX_op_sub_i32
:
1723 if (const_args
[2]) {
1724 tgen32_addi(s
, args
[0], -args
[2]);
1726 tcg_out_insn(s
, RR
, SR
, args
[0], args
[2]);
1730 case INDEX_op_and_i32
:
1731 if (const_args
[2]) {
1732 tgen64_andi(s
, args
[0], args
[2] | 0xffffffff00000000ull
);
1734 tcg_out_insn(s
, RR
, NR
, args
[0], args
[2]);
1737 case INDEX_op_or_i32
:
1738 if (const_args
[2]) {
1739 tgen64_ori(s
, args
[0], args
[2] & 0xffffffff);
1741 tcg_out_insn(s
, RR
, OR
, args
[0], args
[2]);
1744 case INDEX_op_xor_i32
:
1745 if (const_args
[2]) {
1746 tgen64_xori(s
, args
[0], args
[2] & 0xffffffff);
1748 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1752 case INDEX_op_neg_i32
:
1753 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1756 case INDEX_op_mul_i32
:
1757 if (const_args
[2]) {
1758 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1759 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1761 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1764 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1768 case INDEX_op_div2_i32
:
1769 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1771 case INDEX_op_divu2_i32
:
1772 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1775 case INDEX_op_shl_i32
:
1778 if (const_args
[2]) {
1779 tcg_out_sh32(s
, op
, args
[0], TCG_REG_NONE
, args
[2]);
1781 tcg_out_sh32(s
, op
, args
[0], args
[2], 0);
1784 case INDEX_op_shr_i32
:
1787 case INDEX_op_sar_i32
:
1791 case INDEX_op_rotl_i32
:
1792 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1793 if (const_args
[2]) {
1794 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1796 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1799 case INDEX_op_rotr_i32
:
1800 if (const_args
[2]) {
1801 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1802 TCG_REG_NONE
, (32 - args
[2]) & 31);
1804 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1805 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1809 case INDEX_op_ext8s_i32
:
1810 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1812 case INDEX_op_ext16s_i32
:
1813 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1815 case INDEX_op_ext8u_i32
:
1816 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1818 case INDEX_op_ext16u_i32
:
1819 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1823 /* The TCG bswap definition requires bits 0-47 already be zero.
1824 Thus we don't need the G-type insns to implement bswap16_i64. */
1825 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1826 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
1829 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1833 tgen_branch(s
, S390_CC_ALWAYS
, args
[0]);
1836 case INDEX_op_brcond_i32
:
1837 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
1838 args
[1], const_args
[1], args
[3]);
1840 case INDEX_op_setcond_i32
:
1841 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
1842 args
[2], const_args
[2]);
1845 case INDEX_op_qemu_ld8u
:
1846 tcg_out_qemu_ld(s
, args
, LD_UINT8
);
1848 case INDEX_op_qemu_ld8s
:
1849 tcg_out_qemu_ld(s
, args
, LD_INT8
);
1851 case INDEX_op_qemu_ld16u
:
1852 tcg_out_qemu_ld(s
, args
, LD_UINT16
);
1854 case INDEX_op_qemu_ld16s
:
1855 tcg_out_qemu_ld(s
, args
, LD_INT16
);
1857 case INDEX_op_qemu_ld32
:
1858 /* ??? Technically we can use a non-extending instruction. */
1859 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
1861 case INDEX_op_qemu_ld64
:
1862 tcg_out_qemu_ld(s
, args
, LD_UINT64
);
1865 case INDEX_op_qemu_st8
:
1866 tcg_out_qemu_st(s
, args
, LD_UINT8
);
1868 case INDEX_op_qemu_st16
:
1869 tcg_out_qemu_st(s
, args
, LD_UINT16
);
1871 case INDEX_op_qemu_st32
:
1872 tcg_out_qemu_st(s
, args
, LD_UINT32
);
1874 case INDEX_op_qemu_st64
:
1875 tcg_out_qemu_st(s
, args
, LD_UINT64
);
1878 #if TCG_TARGET_REG_BITS == 64
1879 case INDEX_op_mov_i64
:
1880 tcg_out_mov(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1882 case INDEX_op_movi_i64
:
1883 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1886 case INDEX_op_ld16s_i64
:
1887 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1889 case INDEX_op_ld32u_i64
:
1890 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1892 case INDEX_op_ld32s_i64
:
1893 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1895 case INDEX_op_ld_i64
:
1896 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1899 case INDEX_op_st32_i64
:
1900 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1902 case INDEX_op_st_i64
:
1903 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1906 case INDEX_op_add_i64
:
1907 if (const_args
[2]) {
1908 tgen64_addi(s
, args
[0], args
[2]);
1910 tcg_out_insn(s
, RRE
, AGR
, args
[0], args
[2]);
1913 case INDEX_op_sub_i64
:
1914 if (const_args
[2]) {
1915 tgen64_addi(s
, args
[0], -args
[2]);
1917 tcg_out_insn(s
, RRE
, SGR
, args
[0], args
[2]);
1921 case INDEX_op_and_i64
:
1922 if (const_args
[2]) {
1923 tgen64_andi(s
, args
[0], args
[2]);
1925 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
1928 case INDEX_op_or_i64
:
1929 if (const_args
[2]) {
1930 tgen64_ori(s
, args
[0], args
[2]);
1932 tcg_out_insn(s
, RRE
, OGR
, args
[0], args
[2]);
1935 case INDEX_op_xor_i64
:
1936 if (const_args
[2]) {
1937 tgen64_xori(s
, args
[0], args
[2]);
1939 tcg_out_insn(s
, RRE
, XGR
, args
[0], args
[2]);
1943 case INDEX_op_neg_i64
:
1944 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
1946 case INDEX_op_bswap64_i64
:
1947 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
1950 case INDEX_op_mul_i64
:
1951 if (const_args
[2]) {
1952 if (args
[2] == (int16_t)args
[2]) {
1953 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
1955 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
1958 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
1962 case INDEX_op_div2_i64
:
1963 /* ??? We get an unnecessary sign-extension of the dividend
1964 into R3 with this definition, but as we do in fact always
1965 produce both quotient and remainder using INDEX_op_div_i64
1966 instead requires jumping through even more hoops. */
1967 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
1969 case INDEX_op_divu2_i64
:
1970 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
1973 case INDEX_op_shl_i64
:
1976 if (const_args
[2]) {
1977 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1979 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
1982 case INDEX_op_shr_i64
:
1985 case INDEX_op_sar_i64
:
1989 case INDEX_op_rotl_i64
:
1990 if (const_args
[2]) {
1991 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
1992 TCG_REG_NONE
, args
[2]);
1994 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
1997 case INDEX_op_rotr_i64
:
1998 if (const_args
[2]) {
1999 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2000 TCG_REG_NONE
, (64 - args
[2]) & 63);
2002 /* We can use the smaller 32-bit negate because only the
2003 low 6 bits are examined for the rotate. */
2004 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2005 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2009 case INDEX_op_ext8s_i64
:
2010 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2012 case INDEX_op_ext16s_i64
:
2013 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2015 case INDEX_op_ext32s_i64
:
2016 tgen_ext32s(s
, args
[0], args
[1]);
2018 case INDEX_op_ext8u_i64
:
2019 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2021 case INDEX_op_ext16u_i64
:
2022 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2024 case INDEX_op_ext32u_i64
:
2025 tgen_ext32u(s
, args
[0], args
[1]);
2028 case INDEX_op_brcond_i64
:
2029 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2030 args
[1], const_args
[1], args
[3]);
2032 case INDEX_op_setcond_i64
:
2033 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2034 args
[2], const_args
[2]);
2037 case INDEX_op_qemu_ld32u
:
2038 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
2040 case INDEX_op_qemu_ld32s
:
2041 tcg_out_qemu_ld(s
, args
, LD_INT32
);
2043 #endif /* TCG_TARGET_REG_BITS == 64 */
2046 /* This one is obsolete and never emitted. */
2051 fprintf(stderr
,"unimplemented opc 0x%x\n",opc
);
2056 static const TCGTargetOpDef s390_op_defs
[] = {
2057 { INDEX_op_exit_tb
, { } },
2058 { INDEX_op_goto_tb
, { } },
2059 { INDEX_op_call
, { "ri" } },
2060 { INDEX_op_jmp
, { "ri" } },
2061 { INDEX_op_br
, { } },
2063 { INDEX_op_mov_i32
, { "r", "r" } },
2064 { INDEX_op_movi_i32
, { "r" } },
2066 { INDEX_op_ld8u_i32
, { "r", "r" } },
2067 { INDEX_op_ld8s_i32
, { "r", "r" } },
2068 { INDEX_op_ld16u_i32
, { "r", "r" } },
2069 { INDEX_op_ld16s_i32
, { "r", "r" } },
2070 { INDEX_op_ld_i32
, { "r", "r" } },
2071 { INDEX_op_st8_i32
, { "r", "r" } },
2072 { INDEX_op_st16_i32
, { "r", "r" } },
2073 { INDEX_op_st_i32
, { "r", "r" } },
2075 { INDEX_op_add_i32
, { "r", "0", "rWI" } },
2076 { INDEX_op_sub_i32
, { "r", "0", "rWNI" } },
2077 { INDEX_op_mul_i32
, { "r", "0", "rK" } },
2079 { INDEX_op_div2_i32
, { "b", "a", "0", "1", "r" } },
2080 { INDEX_op_divu2_i32
, { "b", "a", "0", "1", "r" } },
2082 { INDEX_op_and_i32
, { "r", "0", "rWA" } },
2083 { INDEX_op_or_i32
, { "r", "0", "rWO" } },
2084 { INDEX_op_xor_i32
, { "r", "0", "rWX" } },
2086 { INDEX_op_neg_i32
, { "r", "r" } },
2088 { INDEX_op_shl_i32
, { "r", "0", "Ri" } },
2089 { INDEX_op_shr_i32
, { "r", "0", "Ri" } },
2090 { INDEX_op_sar_i32
, { "r", "0", "Ri" } },
2092 { INDEX_op_rotl_i32
, { "r", "r", "Ri" } },
2093 { INDEX_op_rotr_i32
, { "r", "r", "Ri" } },
2095 { INDEX_op_ext8s_i32
, { "r", "r" } },
2096 { INDEX_op_ext8u_i32
, { "r", "r" } },
2097 { INDEX_op_ext16s_i32
, { "r", "r" } },
2098 { INDEX_op_ext16u_i32
, { "r", "r" } },
2100 { INDEX_op_bswap16_i32
, { "r", "r" } },
2101 { INDEX_op_bswap32_i32
, { "r", "r" } },
2103 { INDEX_op_brcond_i32
, { "r", "rWC" } },
2104 { INDEX_op_setcond_i32
, { "r", "r", "rWC" } },
2106 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2107 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2108 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2109 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2110 { INDEX_op_qemu_ld32
, { "r", "L" } },
2111 { INDEX_op_qemu_ld64
, { "r", "L" } },
2113 { INDEX_op_qemu_st8
, { "L", "L" } },
2114 { INDEX_op_qemu_st16
, { "L", "L" } },
2115 { INDEX_op_qemu_st32
, { "L", "L" } },
2116 { INDEX_op_qemu_st64
, { "L", "L" } },
2118 #if defined(__s390x__)
2119 { INDEX_op_mov_i64
, { "r", "r" } },
2120 { INDEX_op_movi_i64
, { "r" } },
2122 { INDEX_op_ld8u_i64
, { "r", "r" } },
2123 { INDEX_op_ld8s_i64
, { "r", "r" } },
2124 { INDEX_op_ld16u_i64
, { "r", "r" } },
2125 { INDEX_op_ld16s_i64
, { "r", "r" } },
2126 { INDEX_op_ld32u_i64
, { "r", "r" } },
2127 { INDEX_op_ld32s_i64
, { "r", "r" } },
2128 { INDEX_op_ld_i64
, { "r", "r" } },
2130 { INDEX_op_st8_i64
, { "r", "r" } },
2131 { INDEX_op_st16_i64
, { "r", "r" } },
2132 { INDEX_op_st32_i64
, { "r", "r" } },
2133 { INDEX_op_st_i64
, { "r", "r" } },
2135 { INDEX_op_add_i64
, { "r", "0", "rI" } },
2136 { INDEX_op_sub_i64
, { "r", "0", "rNI" } },
2137 { INDEX_op_mul_i64
, { "r", "0", "rK" } },
2139 { INDEX_op_div2_i64
, { "b", "a", "0", "1", "r" } },
2140 { INDEX_op_divu2_i64
, { "b", "a", "0", "1", "r" } },
2142 { INDEX_op_and_i64
, { "r", "0", "rA" } },
2143 { INDEX_op_or_i64
, { "r", "0", "rO" } },
2144 { INDEX_op_xor_i64
, { "r", "0", "rX" } },
2146 { INDEX_op_neg_i64
, { "r", "r" } },
2148 { INDEX_op_shl_i64
, { "r", "r", "Ri" } },
2149 { INDEX_op_shr_i64
, { "r", "r", "Ri" } },
2150 { INDEX_op_sar_i64
, { "r", "r", "Ri" } },
2152 { INDEX_op_rotl_i64
, { "r", "r", "Ri" } },
2153 { INDEX_op_rotr_i64
, { "r", "r", "Ri" } },
2155 { INDEX_op_ext8s_i64
, { "r", "r" } },
2156 { INDEX_op_ext8u_i64
, { "r", "r" } },
2157 { INDEX_op_ext16s_i64
, { "r", "r" } },
2158 { INDEX_op_ext16u_i64
, { "r", "r" } },
2159 { INDEX_op_ext32s_i64
, { "r", "r" } },
2160 { INDEX_op_ext32u_i64
, { "r", "r" } },
2162 { INDEX_op_bswap16_i64
, { "r", "r" } },
2163 { INDEX_op_bswap32_i64
, { "r", "r" } },
2164 { INDEX_op_bswap64_i64
, { "r", "r" } },
2166 { INDEX_op_brcond_i64
, { "r", "rC" } },
2167 { INDEX_op_setcond_i64
, { "r", "r", "rC" } },
2169 { INDEX_op_qemu_ld32u
, { "r", "L" } },
2170 { INDEX_op_qemu_ld32s
, { "r", "L" } },
2176 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2177 this information. However, getting at that entry is not easy this far
2178 away from main. Our options are: start searching from environ, but
2179 that fails as soon as someone does a setenv in between. Read the data
2180 from /proc/self/auxv. Or do the probing ourselves. The only thing
2181 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2182 that the kernel saves all 64-bits of the registers around traps while
2183 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2184 back and see from when this might not be true). */
2188 static volatile sig_atomic_t got_sigill
;
2190 static void sigill_handler(int sig
)
2195 static void query_facilities(void)
2197 struct sigaction sa_old
, sa_new
;
2198 register int r0
__asm__("0");
2199 register void *r1
__asm__("1");
2202 memset(&sa_new
, 0, sizeof(sa_new
));
2203 sa_new
.sa_handler
= sigill_handler
;
2204 sigaction(SIGILL
, &sa_new
, &sa_old
);
2206 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2207 we need not do any more probing. Unfortunately, this itself is an
2208 extension and the original STORE FACILITY LIST instruction is
2209 kernel-only, storing its results at absolute address 200. */
2212 asm volatile(".word 0xb2b0,0x1000"
2213 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2216 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2217 kind of instruction that we're interested in. */
2218 /* ??? Possibly some of these are in practice never present unless
2219 the store-facility-extended facility is also present. But since
2220 that isn't documented it's just better to probe for each. */
2222 /* Test for z/Architecture. Required even in 31-bit mode. */
2225 asm volatile(".word 0xb908,0x0000" : "=r"(r0
) : : "cc");
2227 facilities
|= FACILITY_ZARCH_ACTIVE
;
2230 /* Test for long displacement. */
2234 asm volatile(".word 0xe300,0x1000,0x0058"
2235 : "=r"(r0
) : "r"(r1
) : "cc");
2237 facilities
|= FACILITY_LONG_DISP
;
2240 /* Test for extended immediates. */
2243 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2245 facilities
|= FACILITY_EXT_IMM
;
2248 /* Test for general-instructions-extension. */
2251 asm volatile(".word 0xc201,0x0000,0x0001");
2253 facilities
|= FACILITY_GEN_INST_EXT
;
2257 sigaction(SIGILL
, &sa_old
, NULL
);
2259 /* The translator currently uses these extensions unconditionally.
2260 Pruning this back to the base ESA/390 architecture doesn't seem
2261 worthwhile, since even the KVM target requires z/Arch. */
2263 if ((facilities
& FACILITY_ZARCH_ACTIVE
) == 0) {
2264 fprintf(stderr
, "TCG: z/Arch facility is required.\n");
2265 fprintf(stderr
, "TCG: Boot with a 64-bit enabled kernel.\n");
2268 if ((facilities
& FACILITY_LONG_DISP
) == 0) {
2269 fprintf(stderr
, "TCG: long-displacement facility is required.\n");
2273 /* So far there's just enough support for 31-bit mode to let the
2274 compile succeed. This is good enough to run QEMU with KVM. */
2275 if (sizeof(void *) != 8) {
2276 fprintf(stderr
, "TCG: 31-bit mode is not supported.\n");
2285 static void tcg_target_init(TCGContext
*s
)
2287 #if !defined(CONFIG_USER_ONLY)
2289 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
)) {
2296 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2297 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2299 tcg_regset_clear(tcg_target_call_clobber_regs
);
2300 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2301 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2302 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2303 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2304 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2305 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2306 /* The return register can be considered call-clobbered. */
2307 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2309 tcg_regset_clear(s
->reserved_regs
);
2310 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2311 /* XXX many insns can't be used with R0, so we better avoid it for now */
2312 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2313 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2315 tcg_add_target_add_op_defs(s390_op_defs
);
2316 tcg_set_frame(s
, TCG_AREG0
, offsetof(CPUArchState
, temp_buf
),
2317 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2320 static void tcg_target_qemu_prologue(TCGContext
*s
)
2322 /* stmg %r6,%r15,48(%r15) (save registers) */
2323 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2325 /* aghi %r15,-160 (stack frame) */
2326 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -160);
2328 if (GUEST_BASE
>= 0x80000) {
2329 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
2330 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2333 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2334 /* br %r3 (go to TB) */
2335 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2337 tb_ret_addr
= s
->code_ptr
;
2339 /* lmg %r6,%r15,208(%r15) (restore registers) */
2340 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 208);
2342 /* br %r14 (return) */
2343 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);