X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=tcg%2Fia64%2Ftcg-target.c;h=11a414c47c1f217f8907f584cd5621d3848f6518;hb=d15de15ca01fa990544b015fb972f2d04ab4d2d0;hp=dc9c12cf18ee059fe4573f837fed7ed48a55697d;hpb=6e17d0c5cdf7b3d0086708ba1d2df931e18cb5b5;p=qemu.git diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c index dc9c12cf1..11a414c47 100644 --- a/tcg/ia64/tcg-target.c +++ b/tcg/ia64/tcg-target.c @@ -23,6 +23,8 @@ * THE SOFTWARE. */ +#include "tcg-be-null.h" + /* * Register definitions */ @@ -107,7 +109,6 @@ enum { }; static const int tcg_target_reg_alloc_order[] = { - TCG_REG_R33, TCG_REG_R35, TCG_REG_R36, TCG_REG_R37, @@ -224,12 +225,15 @@ enum { OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull, OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull, OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull, + OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull, OPC_CMP_LT_A6 = 0x18000000000ull, OPC_CMP_LTU_A6 = 0x1a000000000ull, OPC_CMP_EQ_A6 = 0x1c000000000ull, OPC_CMP4_LT_A6 = 0x18400000000ull, OPC_CMP4_LTU_A6 = 0x1a400000000ull, OPC_CMP4_EQ_A6 = 0x1c400000000ull, + OPC_DEP_I14 = 0x0ae00000000ull, + OPC_DEP_I15 = 0x08000000000ull, OPC_DEP_Z_I12 = 0x0a600000000ull, OPC_EXTR_I11 = 0x0a400002000ull, OPC_EXTR_U_I11 = 0x0a400000000ull, @@ -280,6 +284,9 @@ enum { OPC_ZXT1_I29 = 0x00080000000ull, OPC_ZXT2_I29 = 0x00088000000ull, OPC_ZXT4_I29 = 0x00090000000ull, + + INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */ + INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */ }; static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1, @@ -501,6 +508,30 @@ static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1, | (qp & 0x3f); } +static inline uint64_t tcg_opc_i14(int qp, uint64_t opc, int r1, uint64_t imm, + int r3, uint64_t pos, uint64_t len) +{ + return opc + | ((imm & 0x01) << 36) + | ((len & 0x3f) << 27) + | ((r3 & 0x7f) << 20) + | ((pos & 0x3f) << 14) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i15(int qp, uint64_t opc, int r1, int r2, + int r3, uint64_t pos, uint64_t len) +{ + return opc + | ((pos & 0x3f) << 31) + | ((len & 0x0f) << 27) + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm) { return opc @@ -553,6 +584,8 @@ static inline uint64_t tcg_opc_l3(uint64_t imm) return (imm & 0x07fffffffff00000ull) >> 18; } +#define tcg_opc_l4 tcg_opc_l3 + static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3) { return opc @@ -637,21 +670,30 @@ static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm) | (qp & 0x3f); } +static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm) +{ + return opc + | ((imm & 0x0800000000000000ull) >> 23) /* i */ + | ((imm & 0x00000000000fffffull) << 13) /* imm20b */ + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + /* * Relocations */ -static inline void reloc_pcrel21b (void *pc, tcg_target_long target) +static inline void reloc_pcrel21b(void *pc, intptr_t target) { uint64_t imm; int64_t disp; int slot; - slot = (tcg_target_long) pc & 3; - pc = (void *)((tcg_target_long) pc & ~3); + slot = (intptr_t)pc & 3; + pc = (void *)((intptr_t)pc & ~3); - disp = target - (tcg_target_long) pc; + disp = target - (intptr_t)pc; imm = (uint64_t) disp >> 4; switch(slot) { @@ -702,12 +744,12 @@ static inline uint64_t get_reloc_pcrel21b (void *pc) } } -static inline void reloc_pcrel60b (void *pc, tcg_target_long target) +static inline void reloc_pcrel60b(void *pc, intptr_t target) { int64_t disp; uint64_t imm; - disp = target - (tcg_target_long) pc; + disp = target - (intptr_t)pc; imm = (uint64_t) disp >> 4; *(uint64_t *)(pc + 8) = (*(uint64_t *)(pc + 8) & 0xf700000fff800000ull) @@ -733,7 +775,7 @@ static inline uint64_t get_reloc_pcrel60b (void *pc) static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value, tcg_target_long addend) + intptr_t value, intptr_t addend) { value += addend; switch (type) { @@ -827,8 +869,8 @@ static inline void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { tcg_out_bundle(s, mmI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, 0, arg)); } @@ -836,7 +878,7 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, TCGReg reg, tcg_target_long arg) { tcg_out_bundle(s, mLX, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_l2 (arg), tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg)); } @@ -849,8 +891,8 @@ static void tcg_out_br(TCGContext *s, int label_index) the existing value and using it again. This ensure that caches and memory are kept coherent during retranslation. */ tcg_out_bundle(s, mmB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_b1 (TCG_REG_P0, OPC_BR_SPTK_MANY_B1, get_reloc_pcrel21b(s->code_ptr + 2))); @@ -862,7 +904,23 @@ static void tcg_out_br(TCGContext *s, int label_index) } } -static inline void tcg_out_call(TCGContext *s, TCGArg addr) +static inline void tcg_out_calli(TCGContext *s, uintptr_t addr) +{ + /* Look through the function descriptor. */ + uintptr_t disp, *desc = (uintptr_t *)addr; + tcg_out_bundle(s, mlx, + INSN_NOP_M, + tcg_opc_l2 (desc[1]), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, desc[1])); + disp = (desc[0] - (uintptr_t)s->code_ptr) >> 4; + tcg_out_bundle(s, mLX, + INSN_NOP_M, + tcg_opc_l4 (disp), + tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4, + TCG_REG_B0, disp)); +} + +static inline void tcg_out_callr(TCGContext *s, TCGReg addr) { tcg_out_bundle(s, MmI, tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, addr), @@ -871,7 +929,7 @@ static inline void tcg_out_call(TCGContext *s, TCGArg addr) TCG_REG_B6, TCG_REG_R2, 0)); tcg_out_bundle(s, mmB, tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R3), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_b5 (TCG_REG_P0, OPC_BR_CALL_SPTK_MANY_B5, TCG_REG_B0, TCG_REG_B6)); } @@ -887,7 +945,7 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg) imm = (uint64_t)disp >> 4; tcg_out_bundle(s, mLX, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_l3 (imm), tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm)); } @@ -904,12 +962,12 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) tcg_out_bundle(s, MmI, tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, TCG_REG_R2, 0)); tcg_out_bundle(s, mmB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); } @@ -919,12 +977,12 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) static inline void tcg_out_jmp(TCGContext *s, TCGArg addr) { tcg_out_bundle(s, mmI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0)); tcg_out_bundle(s, mmB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); } @@ -936,14 +994,14 @@ static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R2, arg2, arg1), tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); tcg_out_bundle(s, MmI, tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, TCG_REG_R2, arg1), tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } } @@ -955,19 +1013,19 @@ static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R2, arg2, arg1), tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); tcg_out_bundle(s, MmI, tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, TCG_REG_R2, arg1), tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, tcg_target_long arg2) + TCGReg arg1, intptr_t arg2) { if (type == TCG_TYPE_I32) { tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2); @@ -977,7 +1035,7 @@ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, } static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, tcg_target_long arg2) + TCGReg arg1, intptr_t arg2) { if (type == TCG_TYPE_I32) { tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2); @@ -997,7 +1055,7 @@ static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, TCGArg ret, TCG_REG_R2, arg1, TCG_REG_R0); arg1 = TCG_REG_R2; } else { - opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0); + opc1 = INSN_NOP_M; } if (const_arg2 && arg2 != 0) { @@ -1005,7 +1063,7 @@ static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, TCGArg ret, TCG_REG_R3, arg2, TCG_REG_R0); arg2 = TCG_REG_R3; } else { - opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0); + opc2 = INSN_NOP_I; } tcg_out_bundle(s, mII, @@ -1019,7 +1077,7 @@ static inline void tcg_out_eqv(TCGContext *s, TCGArg ret, TCGArg arg2, int const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2), tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); } @@ -1029,7 +1087,7 @@ static inline void tcg_out_nand(TCGContext *s, TCGArg ret, TCGArg arg2, int const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2), tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); } @@ -1039,7 +1097,7 @@ static inline void tcg_out_nor(TCGContext *s, TCGArg ret, TCGArg arg2, int const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2), tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); } @@ -1049,7 +1107,7 @@ static inline void tcg_out_orc(TCGContext *s, TCGArg ret, TCGArg arg2, int const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2), tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2)); } @@ -1060,16 +1118,16 @@ static inline void tcg_out_mul(TCGContext *s, TCGArg ret, tcg_out_bundle(s, mmI, tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1), tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); tcg_out_bundle(s, mmF, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, + INSN_NOP_M, tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6, TCG_REG_F7, TCG_REG_F0)); tcg_out_bundle(s, miI, tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I, + INSN_NOP_I); } static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1, @@ -1077,8 +1135,8 @@ static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, ret, arg1, arg2, 31 - arg2)); } else { @@ -1096,14 +1154,14 @@ static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, ret, arg1, arg2, 63 - arg2)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2)); } } @@ -1113,13 +1171,13 @@ static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg1, 63 - arg2, 31 - arg2)); } else { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2, 0x1f, arg2), tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, @@ -1132,14 +1190,14 @@ static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg1, 63 - arg2, 63 - arg2)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, arg1, arg2)); } @@ -1150,8 +1208,8 @@ static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, arg1, arg2, 31 - arg2)); } else { @@ -1169,14 +1227,14 @@ static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, arg1, arg2, 63 - arg2)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, arg1, arg2)); } @@ -1187,20 +1245,20 @@ static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, TCG_REG_R2, arg1, arg1), tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, TCG_REG_R2, 32 - arg2, 31)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, TCG_REG_R2, arg1, arg1), tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, 0x1f, arg2)); tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3, 0x20, TCG_REG_R3), tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, @@ -1213,8 +1271,8 @@ static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, arg1, 0x40 - arg2)); } else { @@ -1226,8 +1284,8 @@ static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2, arg1, TCG_REG_R2)); tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, TCG_REG_R2, TCG_REG_R3)); } @@ -1238,7 +1296,7 @@ static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, TCG_REG_R2, arg1, arg1), tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, @@ -1259,8 +1317,8 @@ static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, { if (const_arg2) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, arg1, arg2)); } else { @@ -1272,8 +1330,8 @@ static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2, arg1, TCG_REG_R2)); tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, TCG_REG_R2, TCG_REG_R3)); } @@ -1283,15 +1341,15 @@ static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29, TCGArg ret, TCGArg arg) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg)); } static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb)); } @@ -1299,7 +1357,7 @@ static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg) static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb)); } @@ -1307,11 +1365,42 @@ static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg) static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, arg, 0xb)); } +static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1, + TCGArg a2, int const_a2, int pos, int len) +{ + uint64_t i1 = 0, i2 = 0; + int cpos = 63 - pos, lm1 = len - 1; + + if (const_a2) { + /* Truncate the value of a constant a2 to the width of the field. */ + int mask = (1u << len) - 1; + a2 &= mask; + + if (a2 == 0 || a2 == mask) { + /* 1-bit signed constant inserted into register. */ + i2 = tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, ret, a2, a1, cpos, lm1); + } else { + /* Otherwise, load any constant into a temporary. Do this into + the first I slot to help out with cross-unit delays. */ + i1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, + TCG_REG_R2, a2, TCG_REG_R0); + a2 = TCG_REG_R2; + } + } + if (i2 == 0) { + i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1); + } + tcg_out_bundle(s, (i1 ? mII : miI), + INSN_NOP_M, + i1 ? i1 : INSN_NOP_I, + i2); +} + static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1, TCGArg arg2, int cmp4) { @@ -1354,38 +1443,16 @@ static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1, } } -static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1, - int const_arg1, TCGArg arg2, int const_arg2, - int label_index, int cmp4) +static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, int label_index, int cmp4) { TCGLabel *l = &s->labels[label_index]; - uint64_t opc1, opc2; - if (const_arg1 && arg1 != 0) { - opc1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2, - arg1, TCG_REG_R0); - arg1 = TCG_REG_R2; - } else { - opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0); - } - - if (const_arg2 && arg2 != 0) { - opc2 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R3, - arg2, TCG_REG_R0); - arg2 = TCG_REG_R3; - } else { - opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0); - } - - tcg_out_bundle(s, mII, - opc1, - opc2, - tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4)); - tcg_out_bundle(s, mmB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_b1 (TCG_REG_P6, OPC_BR_DPTK_FEW_B1, - get_reloc_pcrel21b(s->code_ptr + 2))); + tcg_out_bundle(s, miB, + INSN_NOP_M, + tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), + tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, + get_reloc_pcrel21b(s->code_ptr + 2))); if (l->has_value) { reloc_pcrel21b((s->code_ptr - 16) + 2, l->u.value); @@ -1404,21 +1471,44 @@ static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret, tcg_opc_a5(TCG_REG_P7, OPC_ADDL_A5, ret, 0, TCG_REG_R0)); } -#if defined(CONFIG_SOFTMMU) +static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret, + TCGArg c1, TCGArg c2, + TCGArg v1, int const_v1, + TCGArg v2, int const_v2, int cmp4) +{ + uint64_t opc1, opc2; + + if (const_v1) { + opc1 = tcg_opc_a5(TCG_REG_P6, OPC_ADDL_A5, ret, v1, TCG_REG_R0); + } else if (ret == v1) { + opc1 = INSN_NOP_M; + } else { + opc1 = tcg_opc_a4(TCG_REG_P6, OPC_ADDS_A4, ret, 0, v1); + } + if (const_v2) { + opc2 = tcg_opc_a5(TCG_REG_P7, OPC_ADDL_A5, ret, v2, TCG_REG_R0); + } else if (ret == v2) { + opc2 = INSN_NOP_I; + } else { + opc2 = tcg_opc_a4(TCG_REG_P7, OPC_ADDS_A4, ret, 0, v2); + } -#include "../../softmmu_defs.h" + tcg_out_bundle(s, MmI, + tcg_opc_cmp_a(TCG_REG_P0, cond, c1, c2, cmp4), + opc1, + opc2); +} +#if defined(CONFIG_SOFTMMU) /* Load and compare a TLB entry, and return the result in (p6, p7). R2 is loaded with the address of the addend TLB entry. - R56 is loaded with the address, zero extented on 32-bit targets. */ + R57 is loaded with the address, zero extented on 32-bit targets. */ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, - int s_bits, uint64_t offset_rw, + TCGMemOp s_bits, uint64_t offset_rw, uint64_t offset_addend) { tcg_out_bundle(s, mII, - tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R3, - TARGET_PAGE_MASK | ((1 << s_bits) - 1), - TCG_REG_R0), + INSN_NOP_M, tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2, addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1), tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2, @@ -1428,9 +1518,9 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2, offset_rw, TCG_REG_R2), #if TARGET_LONG_BITS == 32 - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R56, addr_reg), + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R57, addr_reg), #else - tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R56, + tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R57, 0, addr_reg), #endif tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, @@ -1438,12 +1528,13 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, tcg_out_bundle(s, mII, tcg_opc_m3 (TCG_REG_P0, (TARGET_LONG_BITS == 32 - ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R57, + ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R56, TCG_REG_R2, offset_addend - offset_rw), - tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, TCG_REG_R3, - TCG_REG_R3, TCG_REG_R56), + tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R3, 0, + TCG_REG_R57, 63 - s_bits, + TARGET_PAGE_BITS - s_bits - 1), tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6, - TCG_REG_P7, TCG_REG_R3, TCG_REG_R57)); + TCG_REG_P7, TCG_REG_R3, TCG_REG_R56)); } /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, @@ -1455,23 +1546,24 @@ static const void * const qemu_ld_helpers[4] = { helper_ldq_mmu, }; -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, + TCGMemOp opc) { - int addr_reg, data_reg, mem_index, s_bits, bswap; - uint64_t opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 }; - uint64_t opc_ext_i29[8] = { OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0, - OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 }; + static const uint64_t opc_ld_m1[4] = { + OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 + }; + static const uint64_t opc_ext_i29[8] = { + OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0, + OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 + }; + int addr_reg, data_reg, mem_index; + TCGMemOp s_bits, bswap; data_reg = *args++; addr_reg = *args++; mem_index = *args; - s_bits = opc & 3; - -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif + s_bits = opc & MO_SIZE; + bswap = opc & MO_BSWAP; /* Read the TLB entry */ tcg_out_qemu_tlb(s, addr_reg, s_bits, @@ -1480,8 +1572,8 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) /* P6 is the fast path, and P7 the slow path */ tcg_out_bundle(s, mLX, - tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R57, - mem_index, TCG_REG_R0), + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, + TCG_REG_R56, 0, TCG_AREG0), tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]), tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2, (tcg_target_long) qemu_ld_helpers[s_bits])); @@ -1489,17 +1581,17 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3, TCG_REG_R2, 8), tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3, - TCG_REG_R3, TCG_REG_R56), + TCG_REG_R3, TCG_REG_R57), tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6, TCG_REG_R3, 0)); - if (bswap && s_bits == 1) { + if (bswap && s_bits == MO_16) { tcg_out_bundle(s, MmI, tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], TCG_REG_R8, TCG_REG_R3), tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, TCG_REG_R8, TCG_REG_R8, 15, 15)); - } else if (bswap && s_bits == 2) { + } else if (bswap && s_bits == MO_32) { tcg_out_bundle(s, MmI, tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], TCG_REG_R8, TCG_REG_R3), @@ -1511,42 +1603,36 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], TCG_REG_R8, TCG_REG_R3), tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } - /* XXX/FIXME: suboptimal */ - tcg_out_bundle(s, mII, - tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58, - mem_index, TCG_REG_R0), - tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, - TCG_REG_R57, 0, TCG_REG_R56), - tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, - TCG_REG_R56, 0, TCG_AREG0)); - if (!bswap || s_bits == 0) { + if (!bswap) { tcg_out_bundle(s, miB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58, + mem_index, TCG_REG_R0), + INSN_NOP_I, tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, TCG_REG_B0, TCG_REG_B6)); } else { tcg_out_bundle(s, miB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58, + mem_index, TCG_REG_R0), tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3, TCG_REG_R8, TCG_REG_R8, 0xb), tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, TCG_REG_B0, TCG_REG_B6)); } - if (opc == 3) { + if (s_bits == MO_64) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, data_reg, 0, TCG_REG_R8)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), - tcg_opc_i29(TCG_REG_P0, opc_ext_i29[opc], + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i29(TCG_REG_P0, opc_ext_i29[opc & MO_SSIZE], data_reg, TCG_REG_R8)); } } @@ -1560,94 +1646,112 @@ static const void * const qemu_st_helpers[4] = { helper_stq_mmu, }; -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, + TCGMemOp opc) { - int addr_reg, data_reg, mem_index, bswap; - uint64_t opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 }; + static const uint64_t opc_st_m4[4] = { + OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 + }; + int addr_reg, data_reg, mem_index; + TCGMemOp s_bits; data_reg = *args++; addr_reg = *args++; mem_index = *args; + s_bits = opc & MO_SIZE; -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif - - tcg_out_qemu_tlb(s, addr_reg, opc, + tcg_out_qemu_tlb(s, addr_reg, s_bits, offsetof(CPUArchState, tlb_table[mem_index][0].addr_write), offsetof(CPUArchState, tlb_table[mem_index][0].addend)); /* P6 is the fast path, and P7 the slow path */ tcg_out_bundle(s, mLX, - tcg_opc_a4(TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R57, - 0, data_reg), - tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[opc]), + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, + TCG_REG_R56, 0, TCG_AREG0), + tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[s_bits]), tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2, - (tcg_target_long) qemu_st_helpers[opc])); + (tcg_target_long) qemu_st_helpers[s_bits])); tcg_out_bundle(s, MmI, tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3, TCG_REG_R2, 8), tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3, - TCG_REG_R3, TCG_REG_R56), + TCG_REG_R3, TCG_REG_R57), tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6, TCG_REG_R3, 0)); - if (!bswap || opc == 0) { - tcg_out_bundle(s, mII, + switch (opc) { + case MO_8: + case MO_16: + case MO_32: + case MO_64: + tcg_out_bundle(s, mii, tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); - } else if (opc == 1) { - tcg_out_bundle(s, mII, + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58, + 0, data_reg), + INSN_NOP_I); + break; + + case MO_16 | MO_BSWAP: + tcg_out_bundle(s, miI, tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), + INSN_NOP_I, tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R2, data_reg, 15, 15), + TCG_REG_R2, data_reg, 15, 15)); + tcg_out_bundle(s, miI, + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58, + 0, data_reg), + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3, TCG_REG_R2, TCG_REG_R2, 0xb)); data_reg = TCG_REG_R2; - } else if (opc == 2) { - tcg_out_bundle(s, mII, + break; + + case MO_32 | MO_BSWAP: + tcg_out_bundle(s, miI, tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), + INSN_NOP_I, tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12, - TCG_REG_R2, data_reg, 31, 31), + TCG_REG_R2, data_reg, 31, 31)); + tcg_out_bundle(s, miI, + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58, + 0, data_reg), + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3, TCG_REG_R2, TCG_REG_R2, 0xb)); data_reg = TCG_REG_R2; - } else if (opc == 3) { + break; + + case MO_64 | MO_BSWAP: tcg_out_bundle(s, miI, tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58, + 0, data_reg), tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3, TCG_REG_R2, data_reg, 0xb)); data_reg = TCG_REG_R2; + break; + + default: + tcg_abort(); } - /* XXX/FIXME: suboptimal */ - tcg_out_bundle(s, mII, - tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R59, - mem_index, TCG_REG_R0), - tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, - TCG_REG_R58, 0, TCG_REG_R57), - tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, - TCG_REG_R57, 0, TCG_REG_R56)); tcg_out_bundle(s, miB, - tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc], + tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits], data_reg, TCG_REG_R3), - tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, - TCG_REG_R56, 0, TCG_AREG0), + tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R59, + mem_index, TCG_REG_R0), tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5, TCG_REG_B0, TCG_REG_B6)); } #else /* !CONFIG_SOFTMMU */ -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, + TCGMemOp opc) { static uint64_t const opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 @@ -1655,81 +1759,77 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) static uint64_t const opc_sxt_i29[4] = { OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 }; - int addr_reg, data_reg, s_bits, bswap; + int addr_reg, data_reg; + TCGMemOp s_bits, bswap; data_reg = *args++; addr_reg = *args++; - s_bits = opc & 3; - -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif + s_bits = opc & MO_SIZE; + bswap = opc & MO_BSWAP; #if TARGET_LONG_BITS == 32 if (GUEST_BASE != 0) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R3, addr_reg), tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, TCG_GUEST_BASE_REG, TCG_REG_R3)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, addr_reg), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } - if (!bswap || s_bits == 0) { - if (s_bits == opc) { + if (!bswap) { + if (!(opc & MO_SIGN)) { tcg_out_bundle(s, miI, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I, + INSN_NOP_I); } else { tcg_out_bundle(s, mII, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_I, tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits], data_reg, data_reg)); } - } else if (s_bits == 3) { + } else if (s_bits == MO_64) { tcg_out_bundle(s, mII, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb)); } else { - if (s_bits == 1) { + if (s_bits == MO_16) { tcg_out_bundle(s, mII, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_I, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, data_reg, data_reg, 15, 15)); } else { tcg_out_bundle(s, mII, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_I, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, data_reg, data_reg, 31, 31)); } - if (opc == s_bits) { + if (!(opc & MO_SIGN)) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb)); } else { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb), tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits], @@ -1743,129 +1843,127 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) TCG_GUEST_BASE_REG, addr_reg), tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, TCG_REG_R2), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } else { tcg_out_bundle(s, mmI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], data_reg, addr_reg), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } - if (bswap && s_bits == 1) { + if (bswap && s_bits == MO_16) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, data_reg, data_reg, 15, 15), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb)); - } else if (bswap && s_bits == 2) { + } else if (bswap && s_bits == MO_32) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, data_reg, data_reg, 31, 31), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb)); - } else if (bswap && s_bits == 3) { + } else if (bswap && s_bits == MO_64) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, data_reg, data_reg, 0xb)); } - if (s_bits != opc) { + if (opc & MO_SIGN) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits], data_reg, data_reg)); } #endif } -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, + TCGMemOp opc) { static uint64_t const opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 }; - int addr_reg, data_reg, bswap; + int addr_reg, data_reg; #if TARGET_LONG_BITS == 64 uint64_t add_guest_base; #endif + TCGMemOp s_bits, bswap; data_reg = *args++; addr_reg = *args++; - -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif + s_bits = opc & MO_SIZE; + bswap = opc & MO_BSWAP; #if TARGET_LONG_BITS == 32 if (GUEST_BASE != 0) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R3, addr_reg), tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, TCG_GUEST_BASE_REG, TCG_REG_R3)); } else { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, addr_reg), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } if (bswap) { - if (opc == 1) { + if (s_bits == MO_16) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3, data_reg, 15, 15), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, TCG_REG_R3, 0xb)); data_reg = TCG_REG_R3; - } else if (opc == 2) { + } else if (s_bits == MO_32) { tcg_out_bundle(s, mII, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3, data_reg, 31, 31), tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, TCG_REG_R3, 0xb)); data_reg = TCG_REG_R3; - } else if (opc == 3) { + } else if (s_bits == MO_64) { tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_M, + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, data_reg, 0xb)); data_reg = TCG_REG_R3; } } tcg_out_bundle(s, mmI, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc], + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], data_reg, TCG_REG_R2), - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_M, + INSN_NOP_I); #else if (GUEST_BASE != 0) { add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, TCG_GUEST_BASE_REG, addr_reg); addr_reg = TCG_REG_R2; } else { - add_guest_base = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0); + add_guest_base = INSN_NOP_M; } - if (!bswap || opc == 0) { + if (!bswap) { tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI), add_guest_base, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc], + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], data_reg, addr_reg), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I); } else { - if (opc == 1) { + if (s_bits == MO_16) { tcg_out_bundle(s, mII, add_guest_base, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, @@ -1873,7 +1971,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, TCG_REG_R3, 0xb)); data_reg = TCG_REG_R3; - } else if (opc == 2) { + } else if (s_bits == MO_32) { tcg_out_bundle(s, mII, add_guest_base, tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, @@ -1881,19 +1979,19 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, TCG_REG_R3, 0xb)); data_reg = TCG_REG_R3; - } else if (opc == 3) { + } else if (s_bits == MO_64) { tcg_out_bundle(s, miI, add_guest_base, - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), + INSN_NOP_I, tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, TCG_REG_R3, data_reg, 0xb)); data_reg = TCG_REG_R3; } tcg_out_bundle(s, miI, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc], + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], data_reg, addr_reg), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0), - tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); + INSN_NOP_I, + INSN_NOP_I); } #endif } @@ -1911,14 +2009,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_br(s, args[0]); break; case INDEX_op_call: - tcg_out_call(s, args[0]); + if (likely(const_args[0])) { + tcg_out_calli(s, args[0]); + } else { + tcg_out_callr(s, args[0]); + } break; case INDEX_op_goto_tb: tcg_out_goto_tb(s, args[0]); break; - case INDEX_op_jmp: - tcg_out_jmp(s, args[0]); - break; case INDEX_op_movi_i32: tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); @@ -2095,13 +2194,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_bswap64(s, args[0], args[1]); break; + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + tcg_out_deposit(s, args[0], args[1], args[2], const_args[2], + args[3], args[4]); + break; + case INDEX_op_brcond_i32: - tcg_out_brcond(s, args[2], args[0], const_args[0], - args[1], const_args[1], args[3], 1); + tcg_out_brcond(s, args[2], args[0], args[1], args[3], 1); break; case INDEX_op_brcond_i64: - tcg_out_brcond(s, args[2], args[0], const_args[0], - args[1], const_args[1], args[3], 0); + tcg_out_brcond(s, args[2], args[0], args[1], args[3], 0); break; case INDEX_op_setcond_i32: tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1); @@ -2109,41 +2212,49 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_setcond_i64: tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0); break; + case INDEX_op_movcond_i32: + tcg_out_movcond(s, args[5], args[0], args[1], args[2], + args[3], const_args[3], args[4], const_args[4], 1); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond(s, args[5], args[0], args[1], args[2], + args[3], const_args[3], args[4], const_args[4], 0); + break; case INDEX_op_qemu_ld8u: - tcg_out_qemu_ld(s, args, 0); + tcg_out_qemu_ld(s, args, MO_UB); break; case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); + tcg_out_qemu_ld(s, args, MO_SB); break; case INDEX_op_qemu_ld16u: - tcg_out_qemu_ld(s, args, 1); + tcg_out_qemu_ld(s, args, MO_TEUW); break; case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); + tcg_out_qemu_ld(s, args, MO_TESW); break; case INDEX_op_qemu_ld32: case INDEX_op_qemu_ld32u: - tcg_out_qemu_ld(s, args, 2); + tcg_out_qemu_ld(s, args, MO_TEUL); break; case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, 2 | 4); + tcg_out_qemu_ld(s, args, MO_TESL); break; case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); + tcg_out_qemu_ld(s, args, MO_TEQ); break; case INDEX_op_qemu_st8: - tcg_out_qemu_st(s, args, 0); + tcg_out_qemu_st(s, args, MO_UB); break; case INDEX_op_qemu_st16: - tcg_out_qemu_st(s, args, 1); + tcg_out_qemu_st(s, args, MO_TEUW); break; case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); + tcg_out_qemu_st(s, args, MO_TEUL); break; case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); + tcg_out_qemu_st(s, args, MO_TEQ); break; default: @@ -2153,10 +2264,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, static const TCGTargetOpDef ia64_op_defs[] = { { INDEX_op_br, { } }, - { INDEX_op_call, { "r" } }, + { INDEX_op_call, { "ri" } }, { INDEX_op_exit_tb, { } }, { INDEX_op_goto_tb, { } }, - { INDEX_op_jmp, { "r" } }, { INDEX_op_mov_i32, { "r", "r" } }, { INDEX_op_movi_i32, { "r" } }, @@ -2198,8 +2308,9 @@ static const TCGTargetOpDef ia64_op_defs[] = { { INDEX_op_bswap16_i32, { "r", "rZ" } }, { INDEX_op_bswap32_i32, { "r", "rZ" } }, - { INDEX_op_brcond_i32, { "rI", "rI" } }, + { INDEX_op_brcond_i32, { "rZ", "rZ" } }, { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } }, { INDEX_op_mov_i64, { "r", "r" } }, { INDEX_op_movi_i64, { "r" } }, @@ -2247,8 +2358,12 @@ static const TCGTargetOpDef ia64_op_defs[] = { { INDEX_op_bswap32_i64, { "r", "rZ" } }, { INDEX_op_bswap64_i64, { "r", "rZ" } }, - { INDEX_op_brcond_i64, { "rI", "rI" } }, + { INDEX_op_brcond_i64, { "rZ", "rZ" } }, { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } }, + + { INDEX_op_deposit_i32, { "r", "rZ", "ri" } }, + { INDEX_op_deposit_i64, { "r", "rZ", "ri" } }, { INDEX_op_qemu_ld8u, { "r", "r" } }, { INDEX_op_qemu_ld8s, { "r", "r" } }, @@ -2273,9 +2388,12 @@ static void tcg_target_qemu_prologue(TCGContext *s) int frame_size; /* reserve some stack space */ - frame_size = TCG_STATIC_CALL_ARGS_SIZE; + frame_size = TCG_STATIC_CALL_ARGS_SIZE + + CPU_TEMP_BUF_NLONGS * sizeof(long); frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); + tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); /* First emit adhoc function descriptor */ *(uint64_t *)(s->code_ptr) = (uint64_t)s->code_ptr + 16; /* entry point */ @@ -2285,8 +2403,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_bundle(s, miI, tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34, TCG_REG_R34, 32, 24, 0), - tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, - TCG_AREG0, 0, TCG_REG_R32), + INSN_NOP_I, tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, TCG_REG_R33, 0)); @@ -2294,7 +2411,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) an ADDL in the M slot of the next bundle. */ if (GUEST_BASE != 0) { tcg_out_bundle(s, mlx, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_l2 (GUEST_BASE), tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_GUEST_BASE_REG, GUEST_BASE)); @@ -2305,19 +2422,19 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R12, -frame_size, TCG_REG_R12), tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, - TCG_REG_R32, TCG_REG_B0), + TCG_REG_R33, TCG_REG_B0), tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); /* epilogue */ tb_ret_addr = s->code_ptr; tcg_out_bundle(s, miI, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, - TCG_REG_B0, TCG_REG_R32, 0), + TCG_REG_B0, TCG_REG_R33, 0), tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R12, frame_size, TCG_REG_R12)); tcg_out_bundle(s, miB, - tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), + INSN_NOP_M, tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26, TCG_REG_PFS, TCG_REG_R34), tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4, @@ -2370,18 +2487,17 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R32); /* return address */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */ - /* The following 3 are not in use, are call-saved, but *not* saved + /* The following 4 are not in use, are call-saved, but *not* saved by the prologue. Therefore we cannot use them without modifying the prologue. There doesn't seem to be any good reason to use these as opposed to the windowed registers. */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7); tcg_add_target_add_op_defs(ia64_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), - CPU_TEMP_BUF_NLONGS * sizeof(long)); }