2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/cacheflush.h"
38 #include "qemu/cacheinfo.h"
39 #include "qemu/timer.h"
41 /* Note: the long term plan is to reduce the dependencies on the QEMU
42 CPU definitions. Currently they are used for qemu_ld/st
44 #define NO_CPU_IO_DEFS
46 #include "exec/exec-all.h"
47 #include "tcg/tcg-op.h"
49 #if UINTPTR_MAX == UINT32_MAX
50 # define ELF_CLASS ELFCLASS32
52 # define ELF_CLASS ELFCLASS64
55 # define ELF_DATA ELFDATA2MSB
57 # define ELF_DATA ELFDATA2LSB
62 #include "tcg/tcg-ldst.h"
63 #include "tcg/tcg-temp-internal.h"
64 #include "tcg-internal.h"
65 #include "accel/tcg/perf.h"
67 /* Forward declarations for functions declared in tcg-target.c.inc and
69 static void tcg_target_init(TCGContext
*s
);
70 static void tcg_target_qemu_prologue(TCGContext
*s
);
71 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
72 intptr_t value
, intptr_t addend
);
74 /* The CIE and FDE header definitions will be common to all hosts. */
76 uint32_t len
__attribute__((aligned((sizeof(void *)))));
82 uint8_t return_column
;
85 typedef struct QEMU_PACKED
{
86 uint32_t len
__attribute__((aligned((sizeof(void *)))));
90 } DebugFrameFDEHeader
;
92 typedef struct QEMU_PACKED
{
94 DebugFrameFDEHeader fde
;
97 typedef struct TCGLabelQemuLdst
{
98 bool is_ld
; /* qemu_ld: true, qemu_st: false */
100 TCGType type
; /* result type of a load */
101 TCGReg addrlo_reg
; /* reg index for low word of guest virtual addr */
102 TCGReg addrhi_reg
; /* reg index for high word of guest virtual addr */
103 TCGReg datalo_reg
; /* reg index for low word to be loaded or stored */
104 TCGReg datahi_reg
; /* reg index for high word to be loaded or stored */
105 const tcg_insn_unit
*raddr
; /* addr of the next IR of qemu_ld/st IR */
106 tcg_insn_unit
*label_ptr
[2]; /* label pointers to be updated */
107 QSIMPLEQ_ENTRY(TCGLabelQemuLdst
) next
;
110 static void tcg_register_jit_int(const void *buf
, size_t size
,
111 const void *debug_frame
,
112 size_t debug_frame_size
)
113 __attribute__((unused
));
115 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
116 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
118 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
119 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
120 TCGReg ret
, tcg_target_long arg
);
121 static void tcg_out_ext8s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
122 static void tcg_out_ext16s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
123 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
124 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
125 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
126 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
127 static void tcg_out_exts_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
128 static void tcg_out_extu_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
129 static void tcg_out_extrl_i64_i32(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
130 static void tcg_out_addi_ptr(TCGContext
*s
, TCGReg
, TCGReg
, tcg_target_long
);
131 static bool tcg_out_xchg(TCGContext
*s
, TCGType type
, TCGReg r1
, TCGReg r2
);
132 static void tcg_out_exit_tb(TCGContext
*s
, uintptr_t arg
);
133 static void tcg_out_goto_tb(TCGContext
*s
, int which
);
134 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
135 const TCGArg args
[TCG_MAX_OP_ARGS
],
136 const int const_args
[TCG_MAX_OP_ARGS
]);
137 #if TCG_TARGET_MAYBE_vec
138 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
139 TCGReg dst
, TCGReg src
);
140 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
141 TCGReg dst
, TCGReg base
, intptr_t offset
);
142 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
143 TCGReg dst
, int64_t arg
);
144 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
145 unsigned vecl
, unsigned vece
,
146 const TCGArg args
[TCG_MAX_OP_ARGS
],
147 const int const_args
[TCG_MAX_OP_ARGS
]);
149 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
150 TCGReg dst
, TCGReg src
)
152 g_assert_not_reached();
154 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
155 TCGReg dst
, TCGReg base
, intptr_t offset
)
157 g_assert_not_reached();
159 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
160 TCGReg dst
, int64_t arg
)
162 g_assert_not_reached();
164 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
165 unsigned vecl
, unsigned vece
,
166 const TCGArg args
[TCG_MAX_OP_ARGS
],
167 const int const_args
[TCG_MAX_OP_ARGS
])
169 g_assert_not_reached();
172 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
174 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
175 TCGReg base
, intptr_t ofs
);
176 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
177 const TCGHelperInfo
*info
);
178 static TCGReg
tcg_target_call_oarg_reg(TCGCallReturnKind kind
, int slot
);
179 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
);
180 #ifdef TCG_TARGET_NEED_LDST_LABELS
181 static int tcg_out_ldst_finalize(TCGContext
*s
);
184 typedef struct TCGLdstHelperParam
{
185 TCGReg (*ra_gen
)(TCGContext
*s
, const TCGLabelQemuLdst
*l
, int arg_reg
);
188 } TCGLdstHelperParam
;
190 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
191 const TCGLdstHelperParam
*p
)
192 __attribute__((unused
));
193 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
194 bool load_sign
, const TCGLdstHelperParam
*p
)
195 __attribute__((unused
));
196 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
197 const TCGLdstHelperParam
*p
)
198 __attribute__((unused
));
200 static void * const qemu_ld_helpers
[MO_SSIZE
+ 1] __attribute__((unused
)) = {
201 [MO_UB
] = helper_ldub_mmu
,
202 [MO_SB
] = helper_ldsb_mmu
,
203 [MO_UW
] = helper_lduw_mmu
,
204 [MO_SW
] = helper_ldsw_mmu
,
205 [MO_UL
] = helper_ldul_mmu
,
206 [MO_UQ
] = helper_ldq_mmu
,
207 #if TCG_TARGET_REG_BITS == 64
208 [MO_SL
] = helper_ldsl_mmu
,
209 [MO_128
] = helper_ld16_mmu
,
213 static void * const qemu_st_helpers
[MO_SIZE
+ 1] __attribute__((unused
)) = {
214 [MO_8
] = helper_stb_mmu
,
215 [MO_16
] = helper_stw_mmu
,
216 [MO_32
] = helper_stl_mmu
,
217 [MO_64
] = helper_stq_mmu
,
218 #if TCG_TARGET_REG_BITS == 64
219 [MO_128
] = helper_st16_mmu
,
224 MemOp atom
; /* lg2 bits of atomicity required */
225 MemOp align
; /* lg2 bits of alignment to use */
228 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
229 MemOp host_atom
, bool allow_two_ops
)
230 __attribute__((unused
));
232 TCGContext tcg_init_ctx
;
233 __thread TCGContext
*tcg_ctx
;
235 TCGContext
**tcg_ctxs
;
236 unsigned int tcg_cur_ctxs
;
237 unsigned int tcg_max_ctxs
;
238 TCGv_env cpu_env
= 0;
239 const void *tcg_code_gen_epilogue
;
240 uintptr_t tcg_splitwx_diff
;
242 #ifndef CONFIG_TCG_INTERPRETER
243 tcg_prologue_fn
*tcg_qemu_tb_exec
;
246 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
247 static TCGRegSet tcg_target_call_clobber_regs
;
249 #if TCG_TARGET_INSN_UNIT_SIZE == 1
250 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
255 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
262 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
263 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
265 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
268 tcg_insn_unit
*p
= s
->code_ptr
;
269 memcpy(p
, &v
, sizeof(v
));
270 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
274 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
277 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
280 memcpy(p
, &v
, sizeof(v
));
285 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
286 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
288 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
291 tcg_insn_unit
*p
= s
->code_ptr
;
292 memcpy(p
, &v
, sizeof(v
));
293 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
297 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
300 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
303 memcpy(p
, &v
, sizeof(v
));
308 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
309 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
311 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
314 tcg_insn_unit
*p
= s
->code_ptr
;
315 memcpy(p
, &v
, sizeof(v
));
316 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
320 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
323 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
326 memcpy(p
, &v
, sizeof(v
));
331 /* label relocation processing */
333 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
334 TCGLabel
*l
, intptr_t addend
)
336 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
341 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
344 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
346 tcg_debug_assert(!l
->has_value
);
348 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
351 TCGLabel
*gen_new_label(void)
353 TCGContext
*s
= tcg_ctx
;
354 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
356 memset(l
, 0, sizeof(TCGLabel
));
357 l
->id
= s
->nb_labels
++;
358 QSIMPLEQ_INIT(&l
->branches
);
359 QSIMPLEQ_INIT(&l
->relocs
);
361 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
366 static bool tcg_resolve_relocs(TCGContext
*s
)
370 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
372 uintptr_t value
= l
->u
.value
;
374 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
375 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
383 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
386 * We will check for overflow at the end of the opcode loop in
387 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
389 s
->gen_tb
->jmp_reset_offset
[which
] = tcg_current_code_size(s
);
392 static void G_GNUC_UNUSED
set_jmp_insn_offset(TCGContext
*s
, int which
)
395 * We will check for overflow at the end of the opcode loop in
396 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
398 s
->gen_tb
->jmp_insn_offset
[which
] = tcg_current_code_size(s
);
401 static uintptr_t G_GNUC_UNUSED
get_jmp_target_addr(TCGContext
*s
, int which
)
404 * Return the read-execute version of the pointer, for the benefit
405 * of any pc-relative addressing mode.
407 return (uintptr_t)tcg_splitwx_to_rx(&s
->gen_tb
->jmp_target_addr
[which
]);
410 /* Signal overflow, starting over with fewer guest insns. */
412 void tcg_raise_tb_overflow(TCGContext
*s
)
414 siglongjmp(s
->jmp_trans
, -2);
418 * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
419 * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
421 * However, tcg_out_helper_load_slots reuses this field to hold an
422 * argument slot number (which may designate a argument register or an
423 * argument stack slot), converting to TCGReg once all arguments that
424 * are destined for the stack are processed.
426 typedef struct TCGMovExtend
{
435 * tcg_out_movext -- move and extend
437 * @dst_type: integral type for destination
438 * @dst: destination register
439 * @src_type: integral type for source
440 * @src_ext: extension to apply to source
441 * @src: source register
443 * Move or extend @src into @dst, depending on @src_ext and the types.
445 static void tcg_out_movext(TCGContext
*s
, TCGType dst_type
, TCGReg dst
,
446 TCGType src_type
, MemOp src_ext
, TCGReg src
)
450 tcg_out_ext8u(s
, dst
, src
);
453 tcg_out_ext8s(s
, dst_type
, dst
, src
);
456 tcg_out_ext16u(s
, dst
, src
);
459 tcg_out_ext16s(s
, dst_type
, dst
, src
);
463 if (dst_type
== TCG_TYPE_I32
) {
464 if (src_type
== TCG_TYPE_I32
) {
465 tcg_out_mov(s
, TCG_TYPE_I32
, dst
, src
);
467 tcg_out_extrl_i64_i32(s
, dst
, src
);
469 } else if (src_type
== TCG_TYPE_I32
) {
470 if (src_ext
& MO_SIGN
) {
471 tcg_out_exts_i32_i64(s
, dst
, src
);
473 tcg_out_extu_i32_i64(s
, dst
, src
);
476 if (src_ext
& MO_SIGN
) {
477 tcg_out_ext32s(s
, dst
, src
);
479 tcg_out_ext32u(s
, dst
, src
);
484 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
485 if (dst_type
== TCG_TYPE_I32
) {
486 tcg_out_extrl_i64_i32(s
, dst
, src
);
488 tcg_out_mov(s
, TCG_TYPE_I64
, dst
, src
);
492 g_assert_not_reached();
496 /* Minor variations on a theme, using a structure. */
497 static void tcg_out_movext1_new_src(TCGContext
*s
, const TCGMovExtend
*i
,
500 tcg_out_movext(s
, i
->dst_type
, i
->dst
, i
->src_type
, i
->src_ext
, src
);
503 static void tcg_out_movext1(TCGContext
*s
, const TCGMovExtend
*i
)
505 tcg_out_movext1_new_src(s
, i
, i
->src
);
509 * tcg_out_movext2 -- move and extend two pair
511 * @i1: first move description
512 * @i2: second move description
513 * @scratch: temporary register, or -1 for none
515 * As tcg_out_movext, for both @i1 and @i2, caring for overlap
516 * between the sources and destinations.
519 static void tcg_out_movext2(TCGContext
*s
, const TCGMovExtend
*i1
,
520 const TCGMovExtend
*i2
, int scratch
)
522 TCGReg src1
= i1
->src
;
523 TCGReg src2
= i2
->src
;
525 if (i1
->dst
!= src2
) {
526 tcg_out_movext1(s
, i1
);
527 tcg_out_movext1(s
, i2
);
530 if (i2
->dst
== src1
) {
531 TCGType src1_type
= i1
->src_type
;
532 TCGType src2_type
= i2
->src_type
;
534 if (tcg_out_xchg(s
, MAX(src1_type
, src2_type
), src1
, src2
)) {
535 /* The data is now in the correct registers, now extend. */
539 tcg_debug_assert(scratch
>= 0);
540 tcg_out_mov(s
, src1_type
, scratch
, src1
);
544 tcg_out_movext1_new_src(s
, i2
, src2
);
545 tcg_out_movext1_new_src(s
, i1
, src1
);
549 * tcg_out_movext3 -- move and extend three pair
551 * @i1: first move description
552 * @i2: second move description
553 * @i3: third move description
554 * @scratch: temporary register, or -1 for none
556 * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
557 * between the sources and destinations.
560 static void tcg_out_movext3(TCGContext
*s
, const TCGMovExtend
*i1
,
561 const TCGMovExtend
*i2
, const TCGMovExtend
*i3
,
564 TCGReg src1
= i1
->src
;
565 TCGReg src2
= i2
->src
;
566 TCGReg src3
= i3
->src
;
568 if (i1
->dst
!= src2
&& i1
->dst
!= src3
) {
569 tcg_out_movext1(s
, i1
);
570 tcg_out_movext2(s
, i2
, i3
, scratch
);
573 if (i2
->dst
!= src1
&& i2
->dst
!= src3
) {
574 tcg_out_movext1(s
, i2
);
575 tcg_out_movext2(s
, i1
, i3
, scratch
);
578 if (i3
->dst
!= src1
&& i3
->dst
!= src2
) {
579 tcg_out_movext1(s
, i3
);
580 tcg_out_movext2(s
, i1
, i2
, scratch
);
585 * There is a cycle. Since there are only 3 nodes, the cycle is
586 * either "clockwise" or "anti-clockwise", and can be solved with
587 * a single scratch or two xchg.
589 if (i1
->dst
== src2
&& i2
->dst
== src3
&& i3
->dst
== src1
) {
591 if (tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
)) {
592 tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
);
593 /* The data is now in the correct registers, now extend. */
594 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
595 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
596 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
598 tcg_debug_assert(scratch
>= 0);
599 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
600 tcg_out_movext1(s
, i3
);
601 tcg_out_movext1(s
, i2
);
602 tcg_out_movext1_new_src(s
, i1
, scratch
);
604 } else if (i1
->dst
== src3
&& i2
->dst
== src1
&& i3
->dst
== src2
) {
605 /* "Anti-clockwise" */
606 if (tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
)) {
607 tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
);
608 /* The data is now in the correct registers, now extend. */
609 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
610 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
611 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
613 tcg_debug_assert(scratch
>= 0);
614 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
615 tcg_out_movext1(s
, i2
);
616 tcg_out_movext1(s
, i3
);
617 tcg_out_movext1_new_src(s
, i1
, scratch
);
620 g_assert_not_reached();
624 #define C_PFX1(P, A) P##A
625 #define C_PFX2(P, A, B) P##A##_##B
626 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
627 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
628 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
629 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
631 /* Define an enumeration for the various combinations. */
633 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
634 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
635 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
636 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
638 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
639 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
640 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
641 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
643 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
645 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
646 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
647 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
648 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
651 #include "tcg-target-con-set.h"
652 } TCGConstraintSetIndex
;
654 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
670 /* Put all of the constraint sets into an array, indexed by the enum. */
672 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
673 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
674 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
675 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
677 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
678 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
679 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
680 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
682 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
684 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
685 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
686 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
687 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
689 static const TCGTargetOpDef constraint_sets
[] = {
690 #include "tcg-target-con-set.h"
708 /* Expand the enumerator to be returned from tcg_target_op_def(). */
710 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
711 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
712 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
713 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
715 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
716 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
717 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
718 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
720 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
722 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
723 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
724 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
725 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
727 #include "tcg-target.c.inc"
729 static void alloc_tcg_plugin_context(TCGContext
*s
)
732 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
733 s
->plugin_tb
->insns
=
734 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
739 * All TCG threads except the parent (i.e. the one that called tcg_context_init
740 * and registered the target's TCG globals) must register with this function
741 * before initiating translation.
743 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
744 * of tcg_region_init() for the reasoning behind this.
746 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
747 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
748 * is not used anymore for translation once this function is called.
750 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
751 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
753 #ifdef CONFIG_USER_ONLY
754 void tcg_register_thread(void)
756 tcg_ctx
= &tcg_init_ctx
;
759 void tcg_register_thread(void)
761 TCGContext
*s
= g_malloc(sizeof(*s
));
766 /* Relink mem_base. */
767 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
768 if (tcg_init_ctx
.temps
[i
].mem_base
) {
769 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
770 tcg_debug_assert(b
>= 0 && b
< n
);
771 s
->temps
[i
].mem_base
= &s
->temps
[b
];
775 /* Claim an entry in tcg_ctxs */
776 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
777 g_assert(n
< tcg_max_ctxs
);
778 qatomic_set(&tcg_ctxs
[n
], s
);
781 alloc_tcg_plugin_context(s
);
782 tcg_region_initial_alloc(s
);
787 #endif /* !CONFIG_USER_ONLY */
789 /* pool based memory allocation */
790 void *tcg_malloc_internal(TCGContext
*s
, int size
)
795 if (size
> TCG_POOL_CHUNK_SIZE
) {
796 /* big malloc: insert a new pool (XXX: could optimize) */
797 p
= g_malloc(sizeof(TCGPool
) + size
);
799 p
->next
= s
->pool_first_large
;
800 s
->pool_first_large
= p
;
811 pool_size
= TCG_POOL_CHUNK_SIZE
;
812 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
815 if (s
->pool_current
) {
816 s
->pool_current
->next
= p
;
826 s
->pool_cur
= p
->data
+ size
;
827 s
->pool_end
= p
->data
+ p
->size
;
831 void tcg_pool_reset(TCGContext
*s
)
834 for (p
= s
->pool_first_large
; p
; p
= t
) {
838 s
->pool_first_large
= NULL
;
839 s
->pool_cur
= s
->pool_end
= NULL
;
840 s
->pool_current
= NULL
;
843 #include "exec/helper-proto.h"
845 static TCGHelperInfo all_helpers
[] = {
846 #include "exec/helper-tcg.h"
848 static GHashTable
*helper_table
;
851 * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
852 * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
853 * We only use these for layout in tcg_out_ld_helper_ret and
854 * tcg_out_st_helper_args, and share them between several of
855 * the helpers, with the end result that it's easier to build manually.
858 #if TCG_TARGET_REG_BITS == 32
859 # define dh_typecode_ttl dh_typecode_i32
861 # define dh_typecode_ttl dh_typecode_i64
864 static TCGHelperInfo info_helper_ld32_mmu
= {
865 .flags
= TCG_CALL_NO_WG
,
866 .typemask
= dh_typemask(ttl
, 0) /* return tcg_target_ulong */
867 | dh_typemask(env
, 1)
868 | dh_typemask(tl
, 2) /* target_ulong addr */
869 | dh_typemask(i32
, 3) /* unsigned oi */
870 | dh_typemask(ptr
, 4) /* uintptr_t ra */
873 static TCGHelperInfo info_helper_ld64_mmu
= {
874 .flags
= TCG_CALL_NO_WG
,
875 .typemask
= dh_typemask(i64
, 0) /* return uint64_t */
876 | dh_typemask(env
, 1)
877 | dh_typemask(tl
, 2) /* target_ulong addr */
878 | dh_typemask(i32
, 3) /* unsigned oi */
879 | dh_typemask(ptr
, 4) /* uintptr_t ra */
882 static TCGHelperInfo info_helper_ld128_mmu
= {
883 .flags
= TCG_CALL_NO_WG
,
884 .typemask
= dh_typemask(i128
, 0) /* return Int128 */
885 | dh_typemask(env
, 1)
886 | dh_typemask(tl
, 2) /* target_ulong addr */
887 | dh_typemask(i32
, 3) /* unsigned oi */
888 | dh_typemask(ptr
, 4) /* uintptr_t ra */
891 static TCGHelperInfo info_helper_st32_mmu
= {
892 .flags
= TCG_CALL_NO_WG
,
893 .typemask
= dh_typemask(void, 0)
894 | dh_typemask(env
, 1)
895 | dh_typemask(tl
, 2) /* target_ulong addr */
896 | dh_typemask(i32
, 3) /* uint32_t data */
897 | dh_typemask(i32
, 4) /* unsigned oi */
898 | dh_typemask(ptr
, 5) /* uintptr_t ra */
901 static TCGHelperInfo info_helper_st64_mmu
= {
902 .flags
= TCG_CALL_NO_WG
,
903 .typemask
= dh_typemask(void, 0)
904 | dh_typemask(env
, 1)
905 | dh_typemask(tl
, 2) /* target_ulong addr */
906 | dh_typemask(i64
, 3) /* uint64_t data */
907 | dh_typemask(i32
, 4) /* unsigned oi */
908 | dh_typemask(ptr
, 5) /* uintptr_t ra */
911 static TCGHelperInfo info_helper_st128_mmu
= {
912 .flags
= TCG_CALL_NO_WG
,
913 .typemask
= dh_typemask(void, 0)
914 | dh_typemask(env
, 1)
915 | dh_typemask(tl
, 2) /* target_ulong addr */
916 | dh_typemask(i128
, 3) /* Int128 data */
917 | dh_typemask(i32
, 4) /* unsigned oi */
918 | dh_typemask(ptr
, 5) /* uintptr_t ra */
921 #ifdef CONFIG_TCG_INTERPRETER
922 static ffi_type
*typecode_to_ffi(int argmask
)
925 * libffi does not support __int128_t, so we have forced Int128
926 * to use the structure definition instead of the builtin type.
928 static ffi_type
*ffi_type_i128_elements
[3] = {
933 static ffi_type ffi_type_i128
= {
935 .alignment
= __alignof__(Int128
),
936 .type
= FFI_TYPE_STRUCT
,
937 .elements
= ffi_type_i128_elements
,
941 case dh_typecode_void
:
942 return &ffi_type_void
;
943 case dh_typecode_i32
:
944 return &ffi_type_uint32
;
945 case dh_typecode_s32
:
946 return &ffi_type_sint32
;
947 case dh_typecode_i64
:
948 return &ffi_type_uint64
;
949 case dh_typecode_s64
:
950 return &ffi_type_sint64
;
951 case dh_typecode_ptr
:
952 return &ffi_type_pointer
;
953 case dh_typecode_i128
:
954 return &ffi_type_i128
;
956 g_assert_not_reached();
959 static void init_ffi_layouts(void)
961 /* g_direct_hash/equal for direct comparisons on uint32_t. */
962 GHashTable
*ffi_table
= g_hash_table_new(NULL
, NULL
);
964 for (int i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
965 TCGHelperInfo
*info
= &all_helpers
[i
];
966 unsigned typemask
= info
->typemask
;
967 gpointer hash
= (gpointer
)(uintptr_t)typemask
;
976 cif
= g_hash_table_lookup(ffi_table
, hash
);
982 /* Ignoring the return type, find the last non-zero field. */
983 nargs
= 32 - clz32(typemask
>> 3);
984 nargs
= DIV_ROUND_UP(nargs
, 3);
985 assert(nargs
<= MAX_CALL_IARGS
);
987 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
988 ca
->cif
.rtype
= typecode_to_ffi(typemask
& 7);
989 ca
->cif
.nargs
= nargs
;
992 ca
->cif
.arg_types
= ca
->args
;
993 for (int j
= 0; j
< nargs
; ++j
) {
994 int typecode
= extract32(typemask
, (j
+ 1) * 3, 3);
995 ca
->args
[j
] = typecode_to_ffi(typecode
);
999 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
1000 ca
->cif
.rtype
, ca
->cif
.arg_types
);
1001 assert(status
== FFI_OK
);
1005 g_hash_table_insert(ffi_table
, hash
, (gpointer
)cif
);
1008 g_hash_table_destroy(ffi_table
);
1010 #endif /* CONFIG_TCG_INTERPRETER */
1012 static inline bool arg_slot_reg_p(unsigned arg_slot
)
1015 * Split the sizeof away from the comparison to avoid Werror from
1016 * "unsigned < 0 is always false", when iarg_regs is empty.
1018 unsigned nreg
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1019 return arg_slot
< nreg
;
1022 static inline int arg_slot_stk_ofs(unsigned arg_slot
)
1024 unsigned max
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1025 unsigned stk_slot
= arg_slot
- ARRAY_SIZE(tcg_target_call_iarg_regs
);
1027 tcg_debug_assert(stk_slot
< max
);
1028 return TCG_TARGET_CALL_STACK_OFFSET
+ stk_slot
* sizeof(tcg_target_long
);
1031 typedef struct TCGCumulativeArgs
{
1032 int arg_idx
; /* tcg_gen_callN args[] */
1033 int info_in_idx
; /* TCGHelperInfo in[] */
1034 int arg_slot
; /* regs+stack slot */
1035 int ref_slot
; /* stack slots for references */
1036 } TCGCumulativeArgs
;
1038 static void layout_arg_even(TCGCumulativeArgs
*cum
)
1040 cum
->arg_slot
+= cum
->arg_slot
& 1;
1043 static void layout_arg_1(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
,
1044 TCGCallArgumentKind kind
)
1046 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1048 *loc
= (TCGCallArgumentLoc
){
1050 .arg_idx
= cum
->arg_idx
,
1051 .arg_slot
= cum
->arg_slot
,
1057 static void layout_arg_normal_n(TCGCumulativeArgs
*cum
,
1058 TCGHelperInfo
*info
, int n
)
1060 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1062 for (int i
= 0; i
< n
; ++i
) {
1063 /* Layout all using the same arg_idx, adjusting the subindex. */
1064 loc
[i
] = (TCGCallArgumentLoc
){
1065 .kind
= TCG_CALL_ARG_NORMAL
,
1066 .arg_idx
= cum
->arg_idx
,
1068 .arg_slot
= cum
->arg_slot
+ i
,
1071 cum
->info_in_idx
+= n
;
1075 static void layout_arg_by_ref(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
)
1077 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1078 int n
= 128 / TCG_TARGET_REG_BITS
;
1080 /* The first subindex carries the pointer. */
1081 layout_arg_1(cum
, info
, TCG_CALL_ARG_BY_REF
);
1084 * The callee is allowed to clobber memory associated with
1085 * structure pass by-reference. Therefore we must make copies.
1086 * Allocate space from "ref_slot", which will be adjusted to
1087 * follow the parameters on the stack.
1089 loc
[0].ref_slot
= cum
->ref_slot
;
1092 * Subsequent words also go into the reference slot, but
1093 * do not accumulate into the regular arguments.
1095 for (int i
= 1; i
< n
; ++i
) {
1096 loc
[i
] = (TCGCallArgumentLoc
){
1097 .kind
= TCG_CALL_ARG_BY_REF_N
,
1098 .arg_idx
= cum
->arg_idx
,
1100 .ref_slot
= cum
->ref_slot
+ i
,
1103 cum
->info_in_idx
+= n
;
1107 static void init_call_layout(TCGHelperInfo
*info
)
1109 int max_reg_slots
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1110 int max_stk_slots
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1111 unsigned typemask
= info
->typemask
;
1113 TCGCumulativeArgs cum
= { };
1116 * Parse and place any function return value.
1118 typecode
= typemask
& 7;
1120 case dh_typecode_void
:
1123 case dh_typecode_i32
:
1124 case dh_typecode_s32
:
1125 case dh_typecode_ptr
:
1127 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1129 case dh_typecode_i64
:
1130 case dh_typecode_s64
:
1131 info
->nr_out
= 64 / TCG_TARGET_REG_BITS
;
1132 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1133 /* Query the last register now to trigger any assert early. */
1134 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1136 case dh_typecode_i128
:
1137 info
->nr_out
= 128 / TCG_TARGET_REG_BITS
;
1138 info
->out_kind
= TCG_TARGET_CALL_RET_I128
;
1139 switch (TCG_TARGET_CALL_RET_I128
) {
1140 case TCG_CALL_RET_NORMAL
:
1141 /* Query the last register now to trigger any assert early. */
1142 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1144 case TCG_CALL_RET_BY_VEC
:
1145 /* Query the single register now to trigger any assert early. */
1146 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0);
1148 case TCG_CALL_RET_BY_REF
:
1150 * Allocate the first argument to the output.
1151 * We don't need to store this anywhere, just make it
1152 * unavailable for use in the input loop below.
1157 qemu_build_not_reached();
1161 g_assert_not_reached();
1165 * Parse and place function arguments.
1167 for (typemask
>>= 3; typemask
; typemask
>>= 3, cum
.arg_idx
++) {
1168 TCGCallArgumentKind kind
;
1171 typecode
= typemask
& 7;
1173 case dh_typecode_i32
:
1174 case dh_typecode_s32
:
1175 type
= TCG_TYPE_I32
;
1177 case dh_typecode_i64
:
1178 case dh_typecode_s64
:
1179 type
= TCG_TYPE_I64
;
1181 case dh_typecode_ptr
:
1182 type
= TCG_TYPE_PTR
;
1184 case dh_typecode_i128
:
1185 type
= TCG_TYPE_I128
;
1188 g_assert_not_reached();
1193 switch (TCG_TARGET_CALL_ARG_I32
) {
1194 case TCG_CALL_ARG_EVEN
:
1195 layout_arg_even(&cum
);
1197 case TCG_CALL_ARG_NORMAL
:
1198 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1200 case TCG_CALL_ARG_EXTEND
:
1201 kind
= TCG_CALL_ARG_EXTEND_U
+ (typecode
& 1);
1202 layout_arg_1(&cum
, info
, kind
);
1205 qemu_build_not_reached();
1210 switch (TCG_TARGET_CALL_ARG_I64
) {
1211 case TCG_CALL_ARG_EVEN
:
1212 layout_arg_even(&cum
);
1214 case TCG_CALL_ARG_NORMAL
:
1215 if (TCG_TARGET_REG_BITS
== 32) {
1216 layout_arg_normal_n(&cum
, info
, 2);
1218 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1222 qemu_build_not_reached();
1227 switch (TCG_TARGET_CALL_ARG_I128
) {
1228 case TCG_CALL_ARG_EVEN
:
1229 layout_arg_even(&cum
);
1231 case TCG_CALL_ARG_NORMAL
:
1232 layout_arg_normal_n(&cum
, info
, 128 / TCG_TARGET_REG_BITS
);
1234 case TCG_CALL_ARG_BY_REF
:
1235 layout_arg_by_ref(&cum
, info
);
1238 qemu_build_not_reached();
1243 g_assert_not_reached();
1246 info
->nr_in
= cum
.info_in_idx
;
1248 /* Validate that we didn't overrun the input array. */
1249 assert(cum
.info_in_idx
<= ARRAY_SIZE(info
->in
));
1250 /* Validate the backend has enough argument space. */
1251 assert(cum
.arg_slot
<= max_reg_slots
+ max_stk_slots
);
1254 * Relocate the "ref_slot" area to the end of the parameters.
1255 * Minimizing this stack offset helps code size for x86,
1256 * which has a signed 8-bit offset encoding.
1258 if (cum
.ref_slot
!= 0) {
1261 if (cum
.arg_slot
> max_reg_slots
) {
1262 int align
= __alignof(Int128
) / sizeof(tcg_target_long
);
1264 ref_base
= cum
.arg_slot
- max_reg_slots
;
1266 ref_base
= ROUND_UP(ref_base
, align
);
1269 assert(ref_base
+ cum
.ref_slot
<= max_stk_slots
);
1270 ref_base
+= max_reg_slots
;
1272 if (ref_base
!= 0) {
1273 for (int i
= cum
.info_in_idx
- 1; i
>= 0; --i
) {
1274 TCGCallArgumentLoc
*loc
= &info
->in
[i
];
1275 switch (loc
->kind
) {
1276 case TCG_CALL_ARG_BY_REF
:
1277 case TCG_CALL_ARG_BY_REF_N
:
1278 loc
->ref_slot
+= ref_base
;
1288 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
1289 static void process_op_defs(TCGContext
*s
);
1290 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1291 TCGReg reg
, const char *name
);
1293 static void tcg_context_init(unsigned max_cpus
)
1295 TCGContext
*s
= &tcg_init_ctx
;
1296 int op
, total_args
, n
, i
;
1298 TCGArgConstraint
*args_ct
;
1301 memset(s
, 0, sizeof(*s
));
1304 /* Count total number of arguments and allocate the corresponding
1307 for(op
= 0; op
< NB_OPS
; op
++) {
1308 def
= &tcg_op_defs
[op
];
1309 n
= def
->nb_iargs
+ def
->nb_oargs
;
1313 args_ct
= g_new0(TCGArgConstraint
, total_args
);
1315 for(op
= 0; op
< NB_OPS
; op
++) {
1316 def
= &tcg_op_defs
[op
];
1317 def
->args_ct
= args_ct
;
1318 n
= def
->nb_iargs
+ def
->nb_oargs
;
1322 /* Register helpers. */
1323 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
1324 helper_table
= g_hash_table_new(NULL
, NULL
);
1326 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
1327 init_call_layout(&all_helpers
[i
]);
1328 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
1329 (gpointer
)&all_helpers
[i
]);
1332 init_call_layout(&info_helper_ld32_mmu
);
1333 init_call_layout(&info_helper_ld64_mmu
);
1334 init_call_layout(&info_helper_ld128_mmu
);
1335 init_call_layout(&info_helper_st32_mmu
);
1336 init_call_layout(&info_helper_st64_mmu
);
1337 init_call_layout(&info_helper_st128_mmu
);
1339 #ifdef CONFIG_TCG_INTERPRETER
1346 /* Reverse the order of the saved registers, assuming they're all at
1347 the start of tcg_target_reg_alloc_order. */
1348 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
1349 int r
= tcg_target_reg_alloc_order
[n
];
1350 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
1354 for (i
= 0; i
< n
; ++i
) {
1355 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
1357 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
1358 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
1361 alloc_tcg_plugin_context(s
);
1365 * In user-mode we simply share the init context among threads, since we
1366 * use a single region. See the documentation tcg_region_init() for the
1367 * reasoning behind this.
1368 * In softmmu we will have at most max_cpus TCG threads.
1370 #ifdef CONFIG_USER_ONLY
1371 tcg_ctxs
= &tcg_ctx
;
1375 tcg_max_ctxs
= max_cpus
;
1376 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
1379 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
1380 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
1381 cpu_env
= temp_tcgv_ptr(ts
);
1384 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
1386 tcg_context_init(max_cpus
);
1387 tcg_region_init(tb_size
, splitwx
, max_cpus
);
1391 * Allocate TBs right before their corresponding translated code, making
1392 * sure that TBs and code are on different cache lines.
1394 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1396 uintptr_t align
= qemu_icache_linesize
;
1397 TranslationBlock
*tb
;
1401 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1402 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1404 if (unlikely(next
> s
->code_gen_highwater
)) {
1405 if (tcg_region_alloc(s
)) {
1410 qatomic_set(&s
->code_gen_ptr
, next
);
1411 s
->data_gen_ptr
= NULL
;
1415 void tcg_prologue_init(TCGContext
*s
)
1417 size_t prologue_size
;
1419 s
->code_ptr
= s
->code_gen_ptr
;
1420 s
->code_buf
= s
->code_gen_ptr
;
1421 s
->data_gen_ptr
= NULL
;
1423 #ifndef CONFIG_TCG_INTERPRETER
1424 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
1427 #ifdef TCG_TARGET_NEED_POOL_LABELS
1428 s
->pool_labels
= NULL
;
1431 qemu_thread_jit_write();
1432 /* Generate the prologue. */
1433 tcg_target_qemu_prologue(s
);
1435 #ifdef TCG_TARGET_NEED_POOL_LABELS
1436 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1438 int result
= tcg_out_pool_finalize(s
);
1439 tcg_debug_assert(result
== 0);
1443 prologue_size
= tcg_current_code_size(s
);
1444 perf_report_prologue(s
->code_gen_ptr
, prologue_size
);
1446 #ifndef CONFIG_TCG_INTERPRETER
1447 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
1448 (uintptr_t)s
->code_buf
, prologue_size
);
1452 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1453 FILE *logfile
= qemu_log_trylock();
1455 fprintf(logfile
, "PROLOGUE: [size=%zu]\n", prologue_size
);
1456 if (s
->data_gen_ptr
) {
1457 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
1458 size_t data_size
= prologue_size
- code_size
;
1461 disas(logfile
, s
->code_gen_ptr
, code_size
);
1463 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1464 if (sizeof(tcg_target_ulong
) == 8) {
1466 "0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1467 (uintptr_t)s
->data_gen_ptr
+ i
,
1468 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1471 "0x%08" PRIxPTR
": .long 0x%08x\n",
1472 (uintptr_t)s
->data_gen_ptr
+ i
,
1473 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1477 disas(logfile
, s
->code_gen_ptr
, prologue_size
);
1479 fprintf(logfile
, "\n");
1480 qemu_log_unlock(logfile
);
1485 #ifndef CONFIG_TCG_INTERPRETER
1487 * Assert that goto_ptr is implemented completely, setting an epilogue.
1488 * For tci, we use NULL as the signal to return from the interpreter,
1489 * so skip this check.
1491 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
1494 tcg_region_prologue_set(s
);
1497 void tcg_func_start(TCGContext
*s
)
1500 s
->nb_temps
= s
->nb_globals
;
1502 /* No temps have been previously allocated for size or locality. */
1503 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1505 /* No constant temps have been previously allocated. */
1506 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
1507 if (s
->const_table
[i
]) {
1508 g_hash_table_remove_all(s
->const_table
[i
]);
1514 s
->current_frame_offset
= s
->frame_start
;
1516 #ifdef CONFIG_DEBUG_TCG
1517 s
->goto_tb_issue_mask
= 0;
1520 QTAILQ_INIT(&s
->ops
);
1521 QTAILQ_INIT(&s
->free_ops
);
1522 QSIMPLEQ_INIT(&s
->labels
);
1525 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1527 int n
= s
->nb_temps
++;
1529 if (n
>= TCG_MAX_TEMPS
) {
1530 tcg_raise_tb_overflow(s
);
1532 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1535 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1539 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1540 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1542 ts
= tcg_temp_alloc(s
);
1543 ts
->kind
= TEMP_GLOBAL
;
1548 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1549 TCGReg reg
, const char *name
)
1553 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1555 ts
= tcg_global_alloc(s
);
1556 ts
->base_type
= type
;
1558 ts
->kind
= TEMP_FIXED
;
1561 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1566 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1568 s
->frame_start
= start
;
1569 s
->frame_end
= start
+ size
;
1571 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1574 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1575 intptr_t offset
, const char *name
)
1577 TCGContext
*s
= tcg_ctx
;
1578 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1579 TCGTemp
*ts
= tcg_global_alloc(s
);
1580 int indirect_reg
= 0;
1582 switch (base_ts
->kind
) {
1586 /* We do not support double-indirect registers. */
1587 tcg_debug_assert(!base_ts
->indirect_reg
);
1588 base_ts
->indirect_base
= 1;
1589 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1594 g_assert_not_reached();
1597 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1598 TCGTemp
*ts2
= tcg_global_alloc(s
);
1601 ts
->base_type
= TCG_TYPE_I64
;
1602 ts
->type
= TCG_TYPE_I32
;
1603 ts
->indirect_reg
= indirect_reg
;
1604 ts
->mem_allocated
= 1;
1605 ts
->mem_base
= base_ts
;
1606 ts
->mem_offset
= offset
;
1607 pstrcpy(buf
, sizeof(buf
), name
);
1608 pstrcat(buf
, sizeof(buf
), "_0");
1609 ts
->name
= strdup(buf
);
1611 tcg_debug_assert(ts2
== ts
+ 1);
1612 ts2
->base_type
= TCG_TYPE_I64
;
1613 ts2
->type
= TCG_TYPE_I32
;
1614 ts2
->indirect_reg
= indirect_reg
;
1615 ts2
->mem_allocated
= 1;
1616 ts2
->mem_base
= base_ts
;
1617 ts2
->mem_offset
= offset
+ 4;
1618 ts2
->temp_subindex
= 1;
1619 pstrcpy(buf
, sizeof(buf
), name
);
1620 pstrcat(buf
, sizeof(buf
), "_1");
1621 ts2
->name
= strdup(buf
);
1623 ts
->base_type
= type
;
1625 ts
->indirect_reg
= indirect_reg
;
1626 ts
->mem_allocated
= 1;
1627 ts
->mem_base
= base_ts
;
1628 ts
->mem_offset
= offset
;
1634 TCGTemp
*tcg_temp_new_internal(TCGType type
, TCGTempKind kind
)
1636 TCGContext
*s
= tcg_ctx
;
1640 if (kind
== TEMP_EBB
) {
1641 int idx
= find_first_bit(s
->free_temps
[type
].l
, TCG_MAX_TEMPS
);
1643 if (idx
< TCG_MAX_TEMPS
) {
1644 /* There is already an available temp with the right type. */
1645 clear_bit(idx
, s
->free_temps
[type
].l
);
1647 ts
= &s
->temps
[idx
];
1648 ts
->temp_allocated
= 1;
1649 tcg_debug_assert(ts
->base_type
== type
);
1650 tcg_debug_assert(ts
->kind
== kind
);
1654 tcg_debug_assert(kind
== TEMP_TB
);
1665 n
= 64 / TCG_TARGET_REG_BITS
;
1668 n
= 128 / TCG_TARGET_REG_BITS
;
1671 g_assert_not_reached();
1674 ts
= tcg_temp_alloc(s
);
1675 ts
->base_type
= type
;
1676 ts
->temp_allocated
= 1;
1682 ts
->type
= TCG_TYPE_REG
;
1684 for (int i
= 1; i
< n
; ++i
) {
1685 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1687 tcg_debug_assert(ts2
== ts
+ i
);
1688 ts2
->base_type
= type
;
1689 ts2
->type
= TCG_TYPE_REG
;
1690 ts2
->temp_allocated
= 1;
1691 ts2
->temp_subindex
= i
;
1698 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1702 #ifdef CONFIG_DEBUG_TCG
1705 assert(TCG_TARGET_HAS_v64
);
1708 assert(TCG_TARGET_HAS_v128
);
1711 assert(TCG_TARGET_HAS_v256
);
1714 g_assert_not_reached();
1718 t
= tcg_temp_new_internal(type
, TEMP_EBB
);
1719 return temp_tcgv_vec(t
);
1722 /* Create a new temp of the same type as an existing temp. */
1723 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1725 TCGTemp
*t
= tcgv_vec_temp(match
);
1727 tcg_debug_assert(t
->temp_allocated
!= 0);
1729 t
= tcg_temp_new_internal(t
->base_type
, TEMP_EBB
);
1730 return temp_tcgv_vec(t
);
1733 void tcg_temp_free_internal(TCGTemp
*ts
)
1735 TCGContext
*s
= tcg_ctx
;
1740 /* Silently ignore free. */
1743 tcg_debug_assert(ts
->temp_allocated
!= 0);
1744 ts
->temp_allocated
= 0;
1745 set_bit(temp_idx(ts
), s
->free_temps
[ts
->base_type
].l
);
1748 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1749 g_assert_not_reached();
1753 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1755 TCGContext
*s
= tcg_ctx
;
1756 GHashTable
*h
= s
->const_table
[type
];
1760 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1761 s
->const_table
[type
] = h
;
1764 ts
= g_hash_table_lookup(h
, &val
);
1768 ts
= tcg_temp_alloc(s
);
1770 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1771 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1773 tcg_debug_assert(ts2
== ts
+ 1);
1775 ts
->base_type
= TCG_TYPE_I64
;
1776 ts
->type
= TCG_TYPE_I32
;
1777 ts
->kind
= TEMP_CONST
;
1778 ts
->temp_allocated
= 1;
1780 ts2
->base_type
= TCG_TYPE_I64
;
1781 ts2
->type
= TCG_TYPE_I32
;
1782 ts2
->kind
= TEMP_CONST
;
1783 ts2
->temp_allocated
= 1;
1784 ts2
->temp_subindex
= 1;
1787 * Retain the full value of the 64-bit constant in the low
1788 * part, so that the hash table works. Actual uses will
1789 * truncate the value to the low part.
1791 ts
[HOST_BIG_ENDIAN
].val
= val
;
1792 ts
[!HOST_BIG_ENDIAN
].val
= val
>> 32;
1793 val_ptr
= &ts
[HOST_BIG_ENDIAN
].val
;
1795 ts
->base_type
= type
;
1797 ts
->kind
= TEMP_CONST
;
1798 ts
->temp_allocated
= 1;
1802 g_hash_table_insert(h
, val_ptr
, ts
);
1808 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1810 val
= dup_const(vece
, val
);
1811 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1814 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1816 TCGTemp
*t
= tcgv_vec_temp(match
);
1818 tcg_debug_assert(t
->temp_allocated
!= 0);
1819 return tcg_constant_vec(t
->base_type
, vece
, val
);
1822 /* Return true if OP may appear in the opcode stream.
1823 Test the runtime variable that controls each opcode. */
1824 bool tcg_op_supported(TCGOpcode op
)
1827 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1830 case INDEX_op_discard
:
1831 case INDEX_op_set_label
:
1835 case INDEX_op_insn_start
:
1836 case INDEX_op_exit_tb
:
1837 case INDEX_op_goto_tb
:
1838 case INDEX_op_goto_ptr
:
1839 case INDEX_op_qemu_ld_i32
:
1840 case INDEX_op_qemu_st_i32
:
1841 case INDEX_op_qemu_ld_i64
:
1842 case INDEX_op_qemu_st_i64
:
1845 case INDEX_op_qemu_st8_i32
:
1846 return TCG_TARGET_HAS_qemu_st8_i32
;
1848 case INDEX_op_qemu_ld_i128
:
1849 case INDEX_op_qemu_st_i128
:
1850 return TCG_TARGET_HAS_qemu_ldst_i128
;
1852 case INDEX_op_mov_i32
:
1853 case INDEX_op_setcond_i32
:
1854 case INDEX_op_brcond_i32
:
1855 case INDEX_op_ld8u_i32
:
1856 case INDEX_op_ld8s_i32
:
1857 case INDEX_op_ld16u_i32
:
1858 case INDEX_op_ld16s_i32
:
1859 case INDEX_op_ld_i32
:
1860 case INDEX_op_st8_i32
:
1861 case INDEX_op_st16_i32
:
1862 case INDEX_op_st_i32
:
1863 case INDEX_op_add_i32
:
1864 case INDEX_op_sub_i32
:
1865 case INDEX_op_mul_i32
:
1866 case INDEX_op_and_i32
:
1867 case INDEX_op_or_i32
:
1868 case INDEX_op_xor_i32
:
1869 case INDEX_op_shl_i32
:
1870 case INDEX_op_shr_i32
:
1871 case INDEX_op_sar_i32
:
1874 case INDEX_op_movcond_i32
:
1875 return TCG_TARGET_HAS_movcond_i32
;
1876 case INDEX_op_div_i32
:
1877 case INDEX_op_divu_i32
:
1878 return TCG_TARGET_HAS_div_i32
;
1879 case INDEX_op_rem_i32
:
1880 case INDEX_op_remu_i32
:
1881 return TCG_TARGET_HAS_rem_i32
;
1882 case INDEX_op_div2_i32
:
1883 case INDEX_op_divu2_i32
:
1884 return TCG_TARGET_HAS_div2_i32
;
1885 case INDEX_op_rotl_i32
:
1886 case INDEX_op_rotr_i32
:
1887 return TCG_TARGET_HAS_rot_i32
;
1888 case INDEX_op_deposit_i32
:
1889 return TCG_TARGET_HAS_deposit_i32
;
1890 case INDEX_op_extract_i32
:
1891 return TCG_TARGET_HAS_extract_i32
;
1892 case INDEX_op_sextract_i32
:
1893 return TCG_TARGET_HAS_sextract_i32
;
1894 case INDEX_op_extract2_i32
:
1895 return TCG_TARGET_HAS_extract2_i32
;
1896 case INDEX_op_add2_i32
:
1897 return TCG_TARGET_HAS_add2_i32
;
1898 case INDEX_op_sub2_i32
:
1899 return TCG_TARGET_HAS_sub2_i32
;
1900 case INDEX_op_mulu2_i32
:
1901 return TCG_TARGET_HAS_mulu2_i32
;
1902 case INDEX_op_muls2_i32
:
1903 return TCG_TARGET_HAS_muls2_i32
;
1904 case INDEX_op_muluh_i32
:
1905 return TCG_TARGET_HAS_muluh_i32
;
1906 case INDEX_op_mulsh_i32
:
1907 return TCG_TARGET_HAS_mulsh_i32
;
1908 case INDEX_op_ext8s_i32
:
1909 return TCG_TARGET_HAS_ext8s_i32
;
1910 case INDEX_op_ext16s_i32
:
1911 return TCG_TARGET_HAS_ext16s_i32
;
1912 case INDEX_op_ext8u_i32
:
1913 return TCG_TARGET_HAS_ext8u_i32
;
1914 case INDEX_op_ext16u_i32
:
1915 return TCG_TARGET_HAS_ext16u_i32
;
1916 case INDEX_op_bswap16_i32
:
1917 return TCG_TARGET_HAS_bswap16_i32
;
1918 case INDEX_op_bswap32_i32
:
1919 return TCG_TARGET_HAS_bswap32_i32
;
1920 case INDEX_op_not_i32
:
1921 return TCG_TARGET_HAS_not_i32
;
1922 case INDEX_op_neg_i32
:
1923 return TCG_TARGET_HAS_neg_i32
;
1924 case INDEX_op_andc_i32
:
1925 return TCG_TARGET_HAS_andc_i32
;
1926 case INDEX_op_orc_i32
:
1927 return TCG_TARGET_HAS_orc_i32
;
1928 case INDEX_op_eqv_i32
:
1929 return TCG_TARGET_HAS_eqv_i32
;
1930 case INDEX_op_nand_i32
:
1931 return TCG_TARGET_HAS_nand_i32
;
1932 case INDEX_op_nor_i32
:
1933 return TCG_TARGET_HAS_nor_i32
;
1934 case INDEX_op_clz_i32
:
1935 return TCG_TARGET_HAS_clz_i32
;
1936 case INDEX_op_ctz_i32
:
1937 return TCG_TARGET_HAS_ctz_i32
;
1938 case INDEX_op_ctpop_i32
:
1939 return TCG_TARGET_HAS_ctpop_i32
;
1941 case INDEX_op_brcond2_i32
:
1942 case INDEX_op_setcond2_i32
:
1943 return TCG_TARGET_REG_BITS
== 32;
1945 case INDEX_op_mov_i64
:
1946 case INDEX_op_setcond_i64
:
1947 case INDEX_op_brcond_i64
:
1948 case INDEX_op_ld8u_i64
:
1949 case INDEX_op_ld8s_i64
:
1950 case INDEX_op_ld16u_i64
:
1951 case INDEX_op_ld16s_i64
:
1952 case INDEX_op_ld32u_i64
:
1953 case INDEX_op_ld32s_i64
:
1954 case INDEX_op_ld_i64
:
1955 case INDEX_op_st8_i64
:
1956 case INDEX_op_st16_i64
:
1957 case INDEX_op_st32_i64
:
1958 case INDEX_op_st_i64
:
1959 case INDEX_op_add_i64
:
1960 case INDEX_op_sub_i64
:
1961 case INDEX_op_mul_i64
:
1962 case INDEX_op_and_i64
:
1963 case INDEX_op_or_i64
:
1964 case INDEX_op_xor_i64
:
1965 case INDEX_op_shl_i64
:
1966 case INDEX_op_shr_i64
:
1967 case INDEX_op_sar_i64
:
1968 case INDEX_op_ext_i32_i64
:
1969 case INDEX_op_extu_i32_i64
:
1970 return TCG_TARGET_REG_BITS
== 64;
1972 case INDEX_op_movcond_i64
:
1973 return TCG_TARGET_HAS_movcond_i64
;
1974 case INDEX_op_div_i64
:
1975 case INDEX_op_divu_i64
:
1976 return TCG_TARGET_HAS_div_i64
;
1977 case INDEX_op_rem_i64
:
1978 case INDEX_op_remu_i64
:
1979 return TCG_TARGET_HAS_rem_i64
;
1980 case INDEX_op_div2_i64
:
1981 case INDEX_op_divu2_i64
:
1982 return TCG_TARGET_HAS_div2_i64
;
1983 case INDEX_op_rotl_i64
:
1984 case INDEX_op_rotr_i64
:
1985 return TCG_TARGET_HAS_rot_i64
;
1986 case INDEX_op_deposit_i64
:
1987 return TCG_TARGET_HAS_deposit_i64
;
1988 case INDEX_op_extract_i64
:
1989 return TCG_TARGET_HAS_extract_i64
;
1990 case INDEX_op_sextract_i64
:
1991 return TCG_TARGET_HAS_sextract_i64
;
1992 case INDEX_op_extract2_i64
:
1993 return TCG_TARGET_HAS_extract2_i64
;
1994 case INDEX_op_extrl_i64_i32
:
1995 return TCG_TARGET_HAS_extrl_i64_i32
;
1996 case INDEX_op_extrh_i64_i32
:
1997 return TCG_TARGET_HAS_extrh_i64_i32
;
1998 case INDEX_op_ext8s_i64
:
1999 return TCG_TARGET_HAS_ext8s_i64
;
2000 case INDEX_op_ext16s_i64
:
2001 return TCG_TARGET_HAS_ext16s_i64
;
2002 case INDEX_op_ext32s_i64
:
2003 return TCG_TARGET_HAS_ext32s_i64
;
2004 case INDEX_op_ext8u_i64
:
2005 return TCG_TARGET_HAS_ext8u_i64
;
2006 case INDEX_op_ext16u_i64
:
2007 return TCG_TARGET_HAS_ext16u_i64
;
2008 case INDEX_op_ext32u_i64
:
2009 return TCG_TARGET_HAS_ext32u_i64
;
2010 case INDEX_op_bswap16_i64
:
2011 return TCG_TARGET_HAS_bswap16_i64
;
2012 case INDEX_op_bswap32_i64
:
2013 return TCG_TARGET_HAS_bswap32_i64
;
2014 case INDEX_op_bswap64_i64
:
2015 return TCG_TARGET_HAS_bswap64_i64
;
2016 case INDEX_op_not_i64
:
2017 return TCG_TARGET_HAS_not_i64
;
2018 case INDEX_op_neg_i64
:
2019 return TCG_TARGET_HAS_neg_i64
;
2020 case INDEX_op_andc_i64
:
2021 return TCG_TARGET_HAS_andc_i64
;
2022 case INDEX_op_orc_i64
:
2023 return TCG_TARGET_HAS_orc_i64
;
2024 case INDEX_op_eqv_i64
:
2025 return TCG_TARGET_HAS_eqv_i64
;
2026 case INDEX_op_nand_i64
:
2027 return TCG_TARGET_HAS_nand_i64
;
2028 case INDEX_op_nor_i64
:
2029 return TCG_TARGET_HAS_nor_i64
;
2030 case INDEX_op_clz_i64
:
2031 return TCG_TARGET_HAS_clz_i64
;
2032 case INDEX_op_ctz_i64
:
2033 return TCG_TARGET_HAS_ctz_i64
;
2034 case INDEX_op_ctpop_i64
:
2035 return TCG_TARGET_HAS_ctpop_i64
;
2036 case INDEX_op_add2_i64
:
2037 return TCG_TARGET_HAS_add2_i64
;
2038 case INDEX_op_sub2_i64
:
2039 return TCG_TARGET_HAS_sub2_i64
;
2040 case INDEX_op_mulu2_i64
:
2041 return TCG_TARGET_HAS_mulu2_i64
;
2042 case INDEX_op_muls2_i64
:
2043 return TCG_TARGET_HAS_muls2_i64
;
2044 case INDEX_op_muluh_i64
:
2045 return TCG_TARGET_HAS_muluh_i64
;
2046 case INDEX_op_mulsh_i64
:
2047 return TCG_TARGET_HAS_mulsh_i64
;
2049 case INDEX_op_mov_vec
:
2050 case INDEX_op_dup_vec
:
2051 case INDEX_op_dupm_vec
:
2052 case INDEX_op_ld_vec
:
2053 case INDEX_op_st_vec
:
2054 case INDEX_op_add_vec
:
2055 case INDEX_op_sub_vec
:
2056 case INDEX_op_and_vec
:
2057 case INDEX_op_or_vec
:
2058 case INDEX_op_xor_vec
:
2059 case INDEX_op_cmp_vec
:
2061 case INDEX_op_dup2_vec
:
2062 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
2063 case INDEX_op_not_vec
:
2064 return have_vec
&& TCG_TARGET_HAS_not_vec
;
2065 case INDEX_op_neg_vec
:
2066 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
2067 case INDEX_op_abs_vec
:
2068 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
2069 case INDEX_op_andc_vec
:
2070 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
2071 case INDEX_op_orc_vec
:
2072 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
2073 case INDEX_op_nand_vec
:
2074 return have_vec
&& TCG_TARGET_HAS_nand_vec
;
2075 case INDEX_op_nor_vec
:
2076 return have_vec
&& TCG_TARGET_HAS_nor_vec
;
2077 case INDEX_op_eqv_vec
:
2078 return have_vec
&& TCG_TARGET_HAS_eqv_vec
;
2079 case INDEX_op_mul_vec
:
2080 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
2081 case INDEX_op_shli_vec
:
2082 case INDEX_op_shri_vec
:
2083 case INDEX_op_sari_vec
:
2084 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
2085 case INDEX_op_shls_vec
:
2086 case INDEX_op_shrs_vec
:
2087 case INDEX_op_sars_vec
:
2088 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
2089 case INDEX_op_shlv_vec
:
2090 case INDEX_op_shrv_vec
:
2091 case INDEX_op_sarv_vec
:
2092 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
2093 case INDEX_op_rotli_vec
:
2094 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
2095 case INDEX_op_rotls_vec
:
2096 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
2097 case INDEX_op_rotlv_vec
:
2098 case INDEX_op_rotrv_vec
:
2099 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
2100 case INDEX_op_ssadd_vec
:
2101 case INDEX_op_usadd_vec
:
2102 case INDEX_op_sssub_vec
:
2103 case INDEX_op_ussub_vec
:
2104 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
2105 case INDEX_op_smin_vec
:
2106 case INDEX_op_umin_vec
:
2107 case INDEX_op_smax_vec
:
2108 case INDEX_op_umax_vec
:
2109 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
2110 case INDEX_op_bitsel_vec
:
2111 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
2112 case INDEX_op_cmpsel_vec
:
2113 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
2116 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
2121 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
);
2123 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
2125 const TCGHelperInfo
*info
;
2126 TCGv_i64 extend_free
[MAX_CALL_IARGS
];
2129 int i
, n
, pi
= 0, total_args
;
2131 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
2132 total_args
= info
->nr_out
+ info
->nr_in
+ 2;
2133 op
= tcg_op_alloc(INDEX_op_call
, total_args
);
2135 #ifdef CONFIG_PLUGIN
2136 /* Flag helpers that may affect guest state */
2137 if (tcg_ctx
->plugin_insn
&&
2138 !(info
->flags
& TCG_CALL_PLUGIN
) &&
2139 !(info
->flags
& TCG_CALL_NO_SIDE_EFFECTS
)) {
2140 tcg_ctx
->plugin_insn
->calls_helpers
= true;
2144 TCGOP_CALLO(op
) = n
= info
->nr_out
;
2147 tcg_debug_assert(ret
== NULL
);
2150 tcg_debug_assert(ret
!= NULL
);
2151 op
->args
[pi
++] = temp_arg(ret
);
2155 tcg_debug_assert(ret
!= NULL
);
2156 tcg_debug_assert(ret
->base_type
== ret
->type
+ ctz32(n
));
2157 tcg_debug_assert(ret
->temp_subindex
== 0);
2158 for (i
= 0; i
< n
; ++i
) {
2159 op
->args
[pi
++] = temp_arg(ret
+ i
);
2163 g_assert_not_reached();
2166 TCGOP_CALLI(op
) = n
= info
->nr_in
;
2167 for (i
= 0; i
< n
; i
++) {
2168 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2169 TCGTemp
*ts
= args
[loc
->arg_idx
] + loc
->tmp_subindex
;
2171 switch (loc
->kind
) {
2172 case TCG_CALL_ARG_NORMAL
:
2173 case TCG_CALL_ARG_BY_REF
:
2174 case TCG_CALL_ARG_BY_REF_N
:
2175 op
->args
[pi
++] = temp_arg(ts
);
2178 case TCG_CALL_ARG_EXTEND_U
:
2179 case TCG_CALL_ARG_EXTEND_S
:
2181 TCGv_i64 temp
= tcg_temp_ebb_new_i64();
2182 TCGv_i32 orig
= temp_tcgv_i32(ts
);
2184 if (loc
->kind
== TCG_CALL_ARG_EXTEND_S
) {
2185 tcg_gen_ext_i32_i64(temp
, orig
);
2187 tcg_gen_extu_i32_i64(temp
, orig
);
2189 op
->args
[pi
++] = tcgv_i64_arg(temp
);
2190 extend_free
[n_extend
++] = temp
;
2195 g_assert_not_reached();
2198 op
->args
[pi
++] = (uintptr_t)func
;
2199 op
->args
[pi
++] = (uintptr_t)info
;
2200 tcg_debug_assert(pi
== total_args
);
2202 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2204 tcg_debug_assert(n_extend
< ARRAY_SIZE(extend_free
));
2205 for (i
= 0; i
< n_extend
; ++i
) {
2206 tcg_temp_free_i64(extend_free
[i
]);
2210 static void tcg_reg_alloc_start(TCGContext
*s
)
2214 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
2215 TCGTemp
*ts
= &s
->temps
[i
];
2216 TCGTempVal val
= TEMP_VAL_MEM
;
2220 val
= TEMP_VAL_CONST
;
2228 val
= TEMP_VAL_DEAD
;
2231 ts
->mem_allocated
= 0;
2234 g_assert_not_reached();
2239 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
2242 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
2245 int idx
= temp_idx(ts
);
2250 pstrcpy(buf
, buf_size
, ts
->name
);
2253 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
2256 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
2261 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
2263 #if TCG_TARGET_REG_BITS > 32
2265 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
2271 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
2272 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
2275 g_assert_not_reached();
2282 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
2283 int buf_size
, TCGArg arg
)
2285 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
2288 static const char * const cond_name
[] =
2290 [TCG_COND_NEVER
] = "never",
2291 [TCG_COND_ALWAYS
] = "always",
2292 [TCG_COND_EQ
] = "eq",
2293 [TCG_COND_NE
] = "ne",
2294 [TCG_COND_LT
] = "lt",
2295 [TCG_COND_GE
] = "ge",
2296 [TCG_COND_LE
] = "le",
2297 [TCG_COND_GT
] = "gt",
2298 [TCG_COND_LTU
] = "ltu",
2299 [TCG_COND_GEU
] = "geu",
2300 [TCG_COND_LEU
] = "leu",
2301 [TCG_COND_GTU
] = "gtu"
2304 static const char * const ldst_name
[(MO_BSWAP
| MO_SSIZE
) + 1] =
2318 [MO_128
+ MO_BE
] = "beo",
2319 [MO_128
+ MO_LE
] = "leo",
2322 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
2323 [MO_UNALN
>> MO_ASHIFT
] = "un+",
2324 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
2325 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
2326 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
2327 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
2328 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
2329 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
2330 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
2333 static const char * const atom_name
[(MO_ATOM_MASK
>> MO_ATOM_SHIFT
) + 1] = {
2334 [MO_ATOM_IFALIGN
>> MO_ATOM_SHIFT
] = "",
2335 [MO_ATOM_IFALIGN_PAIR
>> MO_ATOM_SHIFT
] = "pair+",
2336 [MO_ATOM_WITHIN16
>> MO_ATOM_SHIFT
] = "w16+",
2337 [MO_ATOM_WITHIN16_PAIR
>> MO_ATOM_SHIFT
] = "w16p+",
2338 [MO_ATOM_SUBALIGN
>> MO_ATOM_SHIFT
] = "sub+",
2339 [MO_ATOM_NONE
>> MO_ATOM_SHIFT
] = "noat+",
2342 static const char bswap_flag_name
[][6] = {
2343 [TCG_BSWAP_IZ
] = "iz",
2344 [TCG_BSWAP_OZ
] = "oz",
2345 [TCG_BSWAP_OS
] = "os",
2346 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
2347 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
2350 static inline bool tcg_regset_single(TCGRegSet d
)
2352 return (d
& (d
- 1)) == 0;
2355 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
2357 if (TCG_TARGET_NB_REGS
<= 32) {
2364 /* Return only the number of characters output -- no error return. */
2365 #define ne_fprintf(...) \
2366 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2368 static void tcg_dump_ops(TCGContext
*s
, FILE *f
, bool have_prefs
)
2373 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
2374 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
2375 const TCGOpDef
*def
;
2380 def
= &tcg_op_defs
[c
];
2382 if (c
== INDEX_op_insn_start
) {
2384 col
+= ne_fprintf(f
, "\n ----");
2386 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
2387 col
+= ne_fprintf(f
, " %016" PRIx64
,
2388 tcg_get_insn_start_param(op
, i
));
2390 } else if (c
== INDEX_op_call
) {
2391 const TCGHelperInfo
*info
= tcg_call_info(op
);
2392 void *func
= tcg_call_func(op
);
2394 /* variable number of arguments */
2395 nb_oargs
= TCGOP_CALLO(op
);
2396 nb_iargs
= TCGOP_CALLI(op
);
2397 nb_cargs
= def
->nb_cargs
;
2399 col
+= ne_fprintf(f
, " %s ", def
->name
);
2402 * Print the function name from TCGHelperInfo, if available.
2403 * Note that plugins have a template function for the info,
2404 * but the actual function pointer comes from the plugin.
2406 if (func
== info
->func
) {
2407 col
+= ne_fprintf(f
, "%s", info
->name
);
2409 col
+= ne_fprintf(f
, "plugin(%p)", func
);
2412 col
+= ne_fprintf(f
, ",$0x%x,$%d", info
->flags
, nb_oargs
);
2413 for (i
= 0; i
< nb_oargs
; i
++) {
2414 col
+= ne_fprintf(f
, ",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2417 for (i
= 0; i
< nb_iargs
; i
++) {
2418 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2419 const char *t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2420 col
+= ne_fprintf(f
, ",%s", t
);
2423 col
+= ne_fprintf(f
, " %s ", def
->name
);
2425 nb_oargs
= def
->nb_oargs
;
2426 nb_iargs
= def
->nb_iargs
;
2427 nb_cargs
= def
->nb_cargs
;
2429 if (def
->flags
& TCG_OPF_VECTOR
) {
2430 col
+= ne_fprintf(f
, "v%d,e%d,", 64 << TCGOP_VECL(op
),
2431 8 << TCGOP_VECE(op
));
2435 for (i
= 0; i
< nb_oargs
; i
++) {
2436 const char *sep
= k
? "," : "";
2437 col
+= ne_fprintf(f
, "%s%s", sep
,
2438 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2441 for (i
= 0; i
< nb_iargs
; i
++) {
2442 const char *sep
= k
? "," : "";
2443 col
+= ne_fprintf(f
, "%s%s", sep
,
2444 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2448 case INDEX_op_brcond_i32
:
2449 case INDEX_op_setcond_i32
:
2450 case INDEX_op_movcond_i32
:
2451 case INDEX_op_brcond2_i32
:
2452 case INDEX_op_setcond2_i32
:
2453 case INDEX_op_brcond_i64
:
2454 case INDEX_op_setcond_i64
:
2455 case INDEX_op_movcond_i64
:
2456 case INDEX_op_cmp_vec
:
2457 case INDEX_op_cmpsel_vec
:
2458 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2459 && cond_name
[op
->args
[k
]]) {
2460 col
+= ne_fprintf(f
, ",%s", cond_name
[op
->args
[k
++]]);
2462 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2466 case INDEX_op_qemu_ld_i32
:
2467 case INDEX_op_qemu_st_i32
:
2468 case INDEX_op_qemu_st8_i32
:
2469 case INDEX_op_qemu_ld_i64
:
2470 case INDEX_op_qemu_st_i64
:
2471 case INDEX_op_qemu_ld_i128
:
2472 case INDEX_op_qemu_st_i128
:
2474 const char *s_al
, *s_op
, *s_at
;
2475 MemOpIdx oi
= op
->args
[k
++];
2476 MemOp op
= get_memop(oi
);
2477 unsigned ix
= get_mmuidx(oi
);
2479 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
2480 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
2481 s_at
= atom_name
[(op
& MO_ATOM_MASK
) >> MO_ATOM_SHIFT
];
2482 op
&= ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
| MO_ATOM_MASK
);
2484 /* If all fields are accounted for, print symbolically. */
2485 if (!op
&& s_al
&& s_op
&& s_at
) {
2486 col
+= ne_fprintf(f
, ",%s%s%s,%u",
2487 s_at
, s_al
, s_op
, ix
);
2490 col
+= ne_fprintf(f
, ",$0x%x,%u", op
, ix
);
2495 case INDEX_op_bswap16_i32
:
2496 case INDEX_op_bswap16_i64
:
2497 case INDEX_op_bswap32_i32
:
2498 case INDEX_op_bswap32_i64
:
2499 case INDEX_op_bswap64_i64
:
2501 TCGArg flags
= op
->args
[k
];
2502 const char *name
= NULL
;
2504 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
2505 name
= bswap_flag_name
[flags
];
2508 col
+= ne_fprintf(f
, ",%s", name
);
2510 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, flags
);
2520 case INDEX_op_set_label
:
2522 case INDEX_op_brcond_i32
:
2523 case INDEX_op_brcond_i64
:
2524 case INDEX_op_brcond2_i32
:
2525 col
+= ne_fprintf(f
, "%s$L%d", k
? "," : "",
2526 arg_label(op
->args
[k
])->id
);
2531 TCGBar membar
= op
->args
[k
];
2532 const char *b_op
, *m_op
;
2534 switch (membar
& TCG_BAR_SC
) {
2548 g_assert_not_reached();
2551 switch (membar
& TCG_MO_ALL
) {
2567 case TCG_MO_LD_LD
| TCG_MO_LD_ST
:
2570 case TCG_MO_LD_LD
| TCG_MO_ST_LD
:
2573 case TCG_MO_LD_LD
| TCG_MO_ST_ST
:
2576 case TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2579 case TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2582 case TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2585 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2588 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2591 case TCG_MO_LD_LD
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2594 case TCG_MO_LD_ST
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2601 g_assert_not_reached();
2604 col
+= ne_fprintf(f
, "%s%s:%s", (k
? "," : ""), b_op
, m_op
);
2611 for (; i
< nb_cargs
; i
++, k
++) {
2612 col
+= ne_fprintf(f
, "%s$0x%" TCG_PRIlx
, k
? "," : "",
2617 if (have_prefs
|| op
->life
) {
2618 for (; col
< 40; ++col
) {
2624 unsigned life
= op
->life
;
2626 if (life
& (SYNC_ARG
* 3)) {
2627 ne_fprintf(f
, " sync:");
2628 for (i
= 0; i
< 2; ++i
) {
2629 if (life
& (SYNC_ARG
<< i
)) {
2630 ne_fprintf(f
, " %d", i
);
2636 ne_fprintf(f
, " dead:");
2637 for (i
= 0; life
; ++i
, life
>>= 1) {
2639 ne_fprintf(f
, " %d", i
);
2646 for (i
= 0; i
< nb_oargs
; ++i
) {
2647 TCGRegSet set
= output_pref(op
, i
);
2650 ne_fprintf(f
, " pref=");
2655 ne_fprintf(f
, "none");
2656 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2657 ne_fprintf(f
, "all");
2658 #ifdef CONFIG_DEBUG_TCG
2659 } else if (tcg_regset_single(set
)) {
2660 TCGReg reg
= tcg_regset_first(set
);
2661 ne_fprintf(f
, "%s", tcg_target_reg_names
[reg
]);
2663 } else if (TCG_TARGET_NB_REGS
<= 32) {
2664 ne_fprintf(f
, "0x%x", (uint32_t)set
);
2666 ne_fprintf(f
, "0x%" PRIx64
, (uint64_t)set
);
2675 /* we give more priority to constraints with less registers */
2676 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2678 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2679 int n
= ctpop64(arg_ct
->regs
);
2682 * Sort constraints of a single register first, which includes output
2683 * aliases (which must exactly match the input already allocated).
2685 if (n
== 1 || arg_ct
->oalias
) {
2690 * Sort register pairs next, first then second immediately after.
2691 * Arbitrarily sort multiple pairs by the index of the first reg;
2692 * there shouldn't be many pairs.
2694 switch (arg_ct
->pair
) {
2699 return (arg_ct
->pair_index
+ 1) * 2 - 1;
2702 /* Finally, sort by decreasing register count. */
2707 /* sort from highest priority to lowest */
2708 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2711 TCGArgConstraint
*a
= def
->args_ct
;
2713 for (i
= 0; i
< n
; i
++) {
2714 a
[start
+ i
].sort_index
= start
+ i
;
2719 for (i
= 0; i
< n
- 1; i
++) {
2720 for (j
= i
+ 1; j
< n
; j
++) {
2721 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2722 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2724 int tmp
= a
[start
+ i
].sort_index
;
2725 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2726 a
[start
+ j
].sort_index
= tmp
;
2732 static void process_op_defs(TCGContext
*s
)
2736 for (op
= 0; op
< NB_OPS
; op
++) {
2737 TCGOpDef
*def
= &tcg_op_defs
[op
];
2738 const TCGTargetOpDef
*tdefs
;
2739 bool saw_alias_pair
= false;
2740 int i
, o
, i2
, o2
, nb_args
;
2742 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2746 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2752 * Macro magic should make it impossible, but double-check that
2753 * the array index is in range. Since the signness of an enum
2754 * is implementation defined, force the result to unsigned.
2756 unsigned con_set
= tcg_target_op_def(op
);
2757 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2758 tdefs
= &constraint_sets
[con_set
];
2760 for (i
= 0; i
< nb_args
; i
++) {
2761 const char *ct_str
= tdefs
->args_ct_str
[i
];
2762 bool input_p
= i
>= def
->nb_oargs
;
2764 /* Incomplete TCGTargetOpDef entry. */
2765 tcg_debug_assert(ct_str
!= NULL
);
2770 tcg_debug_assert(input_p
);
2771 tcg_debug_assert(o
< def
->nb_oargs
);
2772 tcg_debug_assert(def
->args_ct
[o
].regs
!= 0);
2773 tcg_debug_assert(!def
->args_ct
[o
].oalias
);
2774 def
->args_ct
[i
] = def
->args_ct
[o
];
2775 /* The output sets oalias. */
2776 def
->args_ct
[o
].oalias
= 1;
2777 def
->args_ct
[o
].alias_index
= i
;
2778 /* The input sets ialias. */
2779 def
->args_ct
[i
].ialias
= 1;
2780 def
->args_ct
[i
].alias_index
= o
;
2781 if (def
->args_ct
[i
].pair
) {
2782 saw_alias_pair
= true;
2784 tcg_debug_assert(ct_str
[1] == '\0');
2788 tcg_debug_assert(!input_p
);
2789 def
->args_ct
[i
].newreg
= true;
2793 case 'p': /* plus */
2794 /* Allocate to the register after the previous. */
2795 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2797 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2798 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2799 def
->args_ct
[i
] = (TCGArgConstraint
){
2802 .regs
= def
->args_ct
[o
].regs
<< 1,
2804 def
->args_ct
[o
].pair
= 1;
2805 def
->args_ct
[o
].pair_index
= i
;
2806 tcg_debug_assert(ct_str
[1] == '\0');
2809 case 'm': /* minus */
2810 /* Allocate to the register before the previous. */
2811 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2813 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2814 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2815 def
->args_ct
[i
] = (TCGArgConstraint
){
2818 .regs
= def
->args_ct
[o
].regs
>> 1,
2820 def
->args_ct
[o
].pair
= 2;
2821 def
->args_ct
[o
].pair_index
= i
;
2822 tcg_debug_assert(ct_str
[1] == '\0');
2829 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2832 /* Include all of the target-specific constraints. */
2835 #define CONST(CASE, MASK) \
2836 case CASE: def->args_ct[i].ct |= MASK; break;
2837 #define REGS(CASE, MASK) \
2838 case CASE: def->args_ct[i].regs |= MASK; break;
2840 #include "tcg-target-con-str.h"
2849 /* Typo in TCGTargetOpDef constraint. */
2850 g_assert_not_reached();
2852 } while (*++ct_str
!= '\0');
2855 /* TCGTargetOpDef entry with too much information? */
2856 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2859 * Fix up output pairs that are aliased with inputs.
2860 * When we created the alias, we copied pair from the output.
2861 * There are three cases:
2862 * (1a) Pairs of inputs alias pairs of outputs.
2863 * (1b) One input aliases the first of a pair of outputs.
2864 * (2) One input aliases the second of a pair of outputs.
2866 * Case 1a is handled by making sure that the pair_index'es are
2867 * properly updated so that they appear the same as a pair of inputs.
2869 * Case 1b is handled by setting the pair_index of the input to
2870 * itself, simply so it doesn't point to an unrelated argument.
2871 * Since we don't encounter the "second" during the input allocation
2872 * phase, nothing happens with the second half of the input pair.
2874 * Case 2 is handled by setting the second input to pair=3, the
2875 * first output to pair=3, and the pair_index'es to match.
2877 if (saw_alias_pair
) {
2878 for (i
= def
->nb_oargs
; i
< nb_args
; i
++) {
2880 * Since [0-9pm] must be alone in the constraint string,
2881 * the only way they can both be set is if the pair comes
2882 * from the output alias.
2884 if (!def
->args_ct
[i
].ialias
) {
2887 switch (def
->args_ct
[i
].pair
) {
2891 o
= def
->args_ct
[i
].alias_index
;
2892 o2
= def
->args_ct
[o
].pair_index
;
2893 tcg_debug_assert(def
->args_ct
[o
].pair
== 1);
2894 tcg_debug_assert(def
->args_ct
[o2
].pair
== 2);
2895 if (def
->args_ct
[o2
].oalias
) {
2897 i2
= def
->args_ct
[o2
].alias_index
;
2898 tcg_debug_assert(def
->args_ct
[i2
].pair
== 2);
2899 def
->args_ct
[i2
].pair_index
= i
;
2900 def
->args_ct
[i
].pair_index
= i2
;
2903 def
->args_ct
[i
].pair_index
= i
;
2907 o
= def
->args_ct
[i
].alias_index
;
2908 o2
= def
->args_ct
[o
].pair_index
;
2909 tcg_debug_assert(def
->args_ct
[o
].pair
== 2);
2910 tcg_debug_assert(def
->args_ct
[o2
].pair
== 1);
2911 if (def
->args_ct
[o2
].oalias
) {
2913 i2
= def
->args_ct
[o2
].alias_index
;
2914 tcg_debug_assert(def
->args_ct
[i2
].pair
== 1);
2915 def
->args_ct
[i2
].pair_index
= i
;
2916 def
->args_ct
[i
].pair_index
= i2
;
2919 def
->args_ct
[i
].pair
= 3;
2920 def
->args_ct
[o2
].pair
= 3;
2921 def
->args_ct
[i
].pair_index
= o2
;
2922 def
->args_ct
[o2
].pair_index
= i
;
2926 g_assert_not_reached();
2931 /* sort the constraints (XXX: this is just an heuristic) */
2932 sort_constraints(def
, 0, def
->nb_oargs
);
2933 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2937 static void remove_label_use(TCGOp
*op
, int idx
)
2939 TCGLabel
*label
= arg_label(op
->args
[idx
]);
2942 QSIMPLEQ_FOREACH(use
, &label
->branches
, next
) {
2943 if (use
->op
== op
) {
2944 QSIMPLEQ_REMOVE(&label
->branches
, use
, TCGLabelUse
, next
);
2948 g_assert_not_reached();
2951 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2955 remove_label_use(op
, 0);
2957 case INDEX_op_brcond_i32
:
2958 case INDEX_op_brcond_i64
:
2959 remove_label_use(op
, 3);
2961 case INDEX_op_brcond2_i32
:
2962 remove_label_use(op
, 5);
2968 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2969 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2972 #ifdef CONFIG_PROFILER
2973 qatomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2977 void tcg_remove_ops_after(TCGOp
*op
)
2979 TCGContext
*s
= tcg_ctx
;
2982 TCGOp
*last
= tcg_last_op();
2986 tcg_op_remove(s
, last
);
2990 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
)
2992 TCGContext
*s
= tcg_ctx
;
2995 if (unlikely(!QTAILQ_EMPTY(&s
->free_ops
))) {
2996 QTAILQ_FOREACH(op
, &s
->free_ops
, link
) {
2997 if (nargs
<= op
->nargs
) {
2998 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
3005 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3006 nargs
= MAX(4, nargs
);
3007 op
= tcg_malloc(sizeof(TCGOp
) + sizeof(TCGArg
) * nargs
);
3010 memset(op
, 0, offsetof(TCGOp
, link
));
3014 /* Check for bitfield overflow. */
3015 tcg_debug_assert(op
->nargs
== nargs
);
3021 TCGOp
*tcg_emit_op(TCGOpcode opc
, unsigned nargs
)
3023 TCGOp
*op
= tcg_op_alloc(opc
, nargs
);
3024 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
3028 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
3029 TCGOpcode opc
, unsigned nargs
)
3031 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3032 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
3036 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
3037 TCGOpcode opc
, unsigned nargs
)
3039 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3040 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
3044 static void move_label_uses(TCGLabel
*to
, TCGLabel
*from
)
3048 QSIMPLEQ_FOREACH(u
, &from
->branches
, next
) {
3052 op
->args
[0] = label_arg(to
);
3054 case INDEX_op_brcond_i32
:
3055 case INDEX_op_brcond_i64
:
3056 op
->args
[3] = label_arg(to
);
3058 case INDEX_op_brcond2_i32
:
3059 op
->args
[5] = label_arg(to
);
3062 g_assert_not_reached();
3066 QSIMPLEQ_CONCAT(&to
->branches
, &from
->branches
);
3069 /* Reachable analysis : remove unreachable code. */
3070 static void __attribute__((noinline
))
3071 reachable_code_pass(TCGContext
*s
)
3073 TCGOp
*op
, *op_next
, *op_prev
;
3076 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3081 case INDEX_op_set_label
:
3082 label
= arg_label(op
->args
[0]);
3085 * Note that the first op in the TB is always a load,
3086 * so there is always something before a label.
3088 op_prev
= QTAILQ_PREV(op
, link
);
3091 * If we find two sequential labels, move all branches to
3092 * reference the second label and remove the first label.
3093 * Do this before branch to next optimization, so that the
3094 * middle label is out of the way.
3096 if (op_prev
->opc
== INDEX_op_set_label
) {
3097 move_label_uses(label
, arg_label(op_prev
->args
[0]));
3098 tcg_op_remove(s
, op_prev
);
3099 op_prev
= QTAILQ_PREV(op
, link
);
3103 * Optimization can fold conditional branches to unconditional.
3104 * If we find a label which is preceded by an unconditional
3105 * branch to next, remove the branch. We couldn't do this when
3106 * processing the branch because any dead code between the branch
3107 * and label had not yet been removed.
3109 if (op_prev
->opc
== INDEX_op_br
&&
3110 label
== arg_label(op_prev
->args
[0])) {
3111 tcg_op_remove(s
, op_prev
);
3112 /* Fall through means insns become live again. */
3116 if (QSIMPLEQ_EMPTY(&label
->branches
)) {
3118 * While there is an occasional backward branch, virtually
3119 * all branches generated by the translators are forward.
3120 * Which means that generally we will have already removed
3121 * all references to the label that will be, and there is
3122 * little to be gained by iterating.
3126 /* Once we see a label, insns become live again. */
3133 case INDEX_op_exit_tb
:
3134 case INDEX_op_goto_ptr
:
3135 /* Unconditional branches; everything following is dead. */
3140 /* Notice noreturn helper calls, raising exceptions. */
3141 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
3146 case INDEX_op_insn_start
:
3147 /* Never remove -- we need to keep these for unwind. */
3156 tcg_op_remove(s
, op
);
3164 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
3165 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3167 /* For liveness_pass_1, the register preferences for a given temp. */
3168 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
3170 return ts
->state_ptr
;
3173 /* For liveness_pass_1, reset the preferences for a given temp to the
3174 * maximal regset for its type.
3176 static inline void la_reset_pref(TCGTemp
*ts
)
3179 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
3182 /* liveness analysis: end of function: all temps are dead, and globals
3183 should be in memory. */
3184 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
3188 for (i
= 0; i
< ng
; ++i
) {
3189 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3190 la_reset_pref(&s
->temps
[i
]);
3192 for (i
= ng
; i
< nt
; ++i
) {
3193 s
->temps
[i
].state
= TS_DEAD
;
3194 la_reset_pref(&s
->temps
[i
]);
3198 /* liveness analysis: end of basic block: all temps are dead, globals
3199 and local temps should be in memory. */
3200 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
3204 for (i
= 0; i
< nt
; ++i
) {
3205 TCGTemp
*ts
= &s
->temps
[i
];
3212 state
= TS_DEAD
| TS_MEM
;
3219 g_assert_not_reached();
3226 /* liveness analysis: sync globals back to memory. */
3227 static void la_global_sync(TCGContext
*s
, int ng
)
3231 for (i
= 0; i
< ng
; ++i
) {
3232 int state
= s
->temps
[i
].state
;
3233 s
->temps
[i
].state
= state
| TS_MEM
;
3234 if (state
== TS_DEAD
) {
3235 /* If the global was previously dead, reset prefs. */
3236 la_reset_pref(&s
->temps
[i
]);
3242 * liveness analysis: conditional branch: all temps are dead unless
3243 * explicitly live-across-conditional-branch, globals and local temps
3246 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
3248 la_global_sync(s
, ng
);
3250 for (int i
= ng
; i
< nt
; ++i
) {
3251 TCGTemp
*ts
= &s
->temps
[i
];
3257 ts
->state
= state
| TS_MEM
;
3258 if (state
!= TS_DEAD
) {
3266 g_assert_not_reached();
3268 la_reset_pref(&s
->temps
[i
]);
3272 /* liveness analysis: sync globals back to memory and kill. */
3273 static void la_global_kill(TCGContext
*s
, int ng
)
3277 for (i
= 0; i
< ng
; i
++) {
3278 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3279 la_reset_pref(&s
->temps
[i
]);
3283 /* liveness analysis: note live globals crossing calls. */
3284 static void la_cross_call(TCGContext
*s
, int nt
)
3286 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
3289 for (i
= 0; i
< nt
; i
++) {
3290 TCGTemp
*ts
= &s
->temps
[i
];
3291 if (!(ts
->state
& TS_DEAD
)) {
3292 TCGRegSet
*pset
= la_temp_pref(ts
);
3293 TCGRegSet set
= *pset
;
3296 /* If the combination is not possible, restart. */
3298 set
= tcg_target_available_regs
[ts
->type
] & mask
;
3306 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3307 * to TEMP_EBB, if possible.
3309 static void __attribute__((noinline
))
3310 liveness_pass_0(TCGContext
*s
)
3312 void * const multiple_ebb
= (void *)(uintptr_t)-1;
3313 int nb_temps
= s
->nb_temps
;
3316 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3317 s
->temps
[i
].state_ptr
= NULL
;
3321 * Represent each EBB by the op at which it begins. In the case of
3322 * the first EBB, this is the first op, otherwise it is a label.
3323 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3324 * within a single EBB, else MULTIPLE_EBB.
3326 ebb
= QTAILQ_FIRST(&s
->ops
);
3327 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3328 const TCGOpDef
*def
;
3329 int nb_oargs
, nb_iargs
;
3332 case INDEX_op_set_label
:
3335 case INDEX_op_discard
:
3338 nb_oargs
= TCGOP_CALLO(op
);
3339 nb_iargs
= TCGOP_CALLI(op
);
3342 def
= &tcg_op_defs
[op
->opc
];
3343 nb_oargs
= def
->nb_oargs
;
3344 nb_iargs
= def
->nb_iargs
;
3348 for (int i
= 0; i
< nb_oargs
+ nb_iargs
; ++i
) {
3349 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
3351 if (ts
->kind
!= TEMP_TB
) {
3354 if (ts
->state_ptr
== NULL
) {
3355 ts
->state_ptr
= ebb
;
3356 } else if (ts
->state_ptr
!= ebb
) {
3357 ts
->state_ptr
= multiple_ebb
;
3363 * For TEMP_TB that turned out not to be used beyond one EBB,
3364 * reduce the liveness to TEMP_EBB.
3366 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3367 TCGTemp
*ts
= &s
->temps
[i
];
3368 if (ts
->kind
== TEMP_TB
&& ts
->state_ptr
!= multiple_ebb
) {
3369 ts
->kind
= TEMP_EBB
;
3374 /* Liveness analysis : update the opc_arg_life array to tell if a
3375 given input arguments is dead. Instructions updating dead
3376 temporaries are removed. */
3377 static void __attribute__((noinline
))
3378 liveness_pass_1(TCGContext
*s
)
3380 int nb_globals
= s
->nb_globals
;
3381 int nb_temps
= s
->nb_temps
;
3382 TCGOp
*op
, *op_prev
;
3386 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
3387 for (i
= 0; i
< nb_temps
; ++i
) {
3388 s
->temps
[i
].state_ptr
= prefs
+ i
;
3391 /* ??? Should be redundant with the exit_tb that ends the TB. */
3392 la_func_end(s
, nb_globals
, nb_temps
);
3394 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
3395 int nb_iargs
, nb_oargs
;
3396 TCGOpcode opc_new
, opc_new2
;
3398 TCGLifeData arg_life
= 0;
3400 TCGOpcode opc
= op
->opc
;
3401 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3406 const TCGHelperInfo
*info
= tcg_call_info(op
);
3407 int call_flags
= tcg_call_flags(op
);
3409 nb_oargs
= TCGOP_CALLO(op
);
3410 nb_iargs
= TCGOP_CALLI(op
);
3412 /* pure functions can be removed if their result is unused */
3413 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
3414 for (i
= 0; i
< nb_oargs
; i
++) {
3415 ts
= arg_temp(op
->args
[i
]);
3416 if (ts
->state
!= TS_DEAD
) {
3417 goto do_not_remove_call
;
3424 /* Output args are dead. */
3425 for (i
= 0; i
< nb_oargs
; i
++) {
3426 ts
= arg_temp(op
->args
[i
]);
3427 if (ts
->state
& TS_DEAD
) {
3428 arg_life
|= DEAD_ARG
<< i
;
3430 if (ts
->state
& TS_MEM
) {
3431 arg_life
|= SYNC_ARG
<< i
;
3433 ts
->state
= TS_DEAD
;
3437 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3438 memset(op
->output_pref
, 0, sizeof(op
->output_pref
));
3440 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
3441 TCG_CALL_NO_READ_GLOBALS
))) {
3442 la_global_kill(s
, nb_globals
);
3443 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
3444 la_global_sync(s
, nb_globals
);
3447 /* Record arguments that die in this helper. */
3448 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3449 ts
= arg_temp(op
->args
[i
]);
3450 if (ts
->state
& TS_DEAD
) {
3451 arg_life
|= DEAD_ARG
<< i
;
3455 /* For all live registers, remove call-clobbered prefs. */
3456 la_cross_call(s
, nb_temps
);
3459 * Input arguments are live for preceding opcodes.
3461 * For those arguments that die, and will be allocated in
3462 * registers, clear the register set for that arg, to be
3463 * filled in below. For args that will be on the stack,
3464 * reset to any available reg. Process arguments in reverse
3465 * order so that if a temp is used more than once, the stack
3466 * reset to max happens before the register reset to 0.
3468 for (i
= nb_iargs
- 1; i
>= 0; i
--) {
3469 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3470 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3472 if (ts
->state
& TS_DEAD
) {
3473 switch (loc
->kind
) {
3474 case TCG_CALL_ARG_NORMAL
:
3475 case TCG_CALL_ARG_EXTEND_U
:
3476 case TCG_CALL_ARG_EXTEND_S
:
3477 if (arg_slot_reg_p(loc
->arg_slot
)) {
3478 *la_temp_pref(ts
) = 0;
3484 tcg_target_available_regs
[ts
->type
];
3487 ts
->state
&= ~TS_DEAD
;
3492 * For each input argument, add its input register to prefs.
3493 * If a temp is used once, this produces a single set bit;
3494 * if a temp is used multiple times, this produces a set.
3496 for (i
= 0; i
< nb_iargs
; i
++) {
3497 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3498 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3500 switch (loc
->kind
) {
3501 case TCG_CALL_ARG_NORMAL
:
3502 case TCG_CALL_ARG_EXTEND_U
:
3503 case TCG_CALL_ARG_EXTEND_S
:
3504 if (arg_slot_reg_p(loc
->arg_slot
)) {
3505 tcg_regset_set_reg(*la_temp_pref(ts
),
3506 tcg_target_call_iarg_regs
[loc
->arg_slot
]);
3515 case INDEX_op_insn_start
:
3517 case INDEX_op_discard
:
3518 /* mark the temporary as dead */
3519 ts
= arg_temp(op
->args
[0]);
3520 ts
->state
= TS_DEAD
;
3524 case INDEX_op_add2_i32
:
3525 opc_new
= INDEX_op_add_i32
;
3527 case INDEX_op_sub2_i32
:
3528 opc_new
= INDEX_op_sub_i32
;
3530 case INDEX_op_add2_i64
:
3531 opc_new
= INDEX_op_add_i64
;
3533 case INDEX_op_sub2_i64
:
3534 opc_new
= INDEX_op_sub_i64
;
3538 /* Test if the high part of the operation is dead, but not
3539 the low part. The result can be optimized to a simple
3540 add or sub. This happens often for x86_64 guest when the
3541 cpu mode is set to 32 bit. */
3542 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3543 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3546 /* Replace the opcode and adjust the args in place,
3547 leaving 3 unused args at the end. */
3548 op
->opc
= opc
= opc_new
;
3549 op
->args
[1] = op
->args
[2];
3550 op
->args
[2] = op
->args
[4];
3551 /* Fall through and mark the single-word operation live. */
3557 case INDEX_op_mulu2_i32
:
3558 opc_new
= INDEX_op_mul_i32
;
3559 opc_new2
= INDEX_op_muluh_i32
;
3560 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
3562 case INDEX_op_muls2_i32
:
3563 opc_new
= INDEX_op_mul_i32
;
3564 opc_new2
= INDEX_op_mulsh_i32
;
3565 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
3567 case INDEX_op_mulu2_i64
:
3568 opc_new
= INDEX_op_mul_i64
;
3569 opc_new2
= INDEX_op_muluh_i64
;
3570 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
3572 case INDEX_op_muls2_i64
:
3573 opc_new
= INDEX_op_mul_i64
;
3574 opc_new2
= INDEX_op_mulsh_i64
;
3575 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
3580 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3581 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3582 /* Both parts of the operation are dead. */
3585 /* The high part of the operation is dead; generate the low. */
3586 op
->opc
= opc
= opc_new
;
3587 op
->args
[1] = op
->args
[2];
3588 op
->args
[2] = op
->args
[3];
3589 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
3590 /* The low part of the operation is dead; generate the high. */
3591 op
->opc
= opc
= opc_new2
;
3592 op
->args
[0] = op
->args
[1];
3593 op
->args
[1] = op
->args
[2];
3594 op
->args
[2] = op
->args
[3];
3598 /* Mark the single-word operation live. */
3603 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3604 nb_iargs
= def
->nb_iargs
;
3605 nb_oargs
= def
->nb_oargs
;
3607 /* Test if the operation can be removed because all
3608 its outputs are dead. We assume that nb_oargs == 0
3609 implies side effects */
3610 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
3611 for (i
= 0; i
< nb_oargs
; i
++) {
3612 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
3621 tcg_op_remove(s
, op
);
3625 for (i
= 0; i
< nb_oargs
; i
++) {
3626 ts
= arg_temp(op
->args
[i
]);
3628 /* Remember the preference of the uses that followed. */
3629 if (i
< ARRAY_SIZE(op
->output_pref
)) {
3630 op
->output_pref
[i
] = *la_temp_pref(ts
);
3633 /* Output args are dead. */
3634 if (ts
->state
& TS_DEAD
) {
3635 arg_life
|= DEAD_ARG
<< i
;
3637 if (ts
->state
& TS_MEM
) {
3638 arg_life
|= SYNC_ARG
<< i
;
3640 ts
->state
= TS_DEAD
;
3644 /* If end of basic block, update. */
3645 if (def
->flags
& TCG_OPF_BB_EXIT
) {
3646 la_func_end(s
, nb_globals
, nb_temps
);
3647 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3648 la_bb_sync(s
, nb_globals
, nb_temps
);
3649 } else if (def
->flags
& TCG_OPF_BB_END
) {
3650 la_bb_end(s
, nb_globals
, nb_temps
);
3651 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3652 la_global_sync(s
, nb_globals
);
3653 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3654 la_cross_call(s
, nb_temps
);
3658 /* Record arguments that die in this opcode. */
3659 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3660 ts
= arg_temp(op
->args
[i
]);
3661 if (ts
->state
& TS_DEAD
) {
3662 arg_life
|= DEAD_ARG
<< i
;
3666 /* Input arguments are live for preceding opcodes. */
3667 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3668 ts
= arg_temp(op
->args
[i
]);
3669 if (ts
->state
& TS_DEAD
) {
3670 /* For operands that were dead, initially allow
3671 all regs for the type. */
3672 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
3673 ts
->state
&= ~TS_DEAD
;
3677 /* Incorporate constraints for this operand. */
3679 case INDEX_op_mov_i32
:
3680 case INDEX_op_mov_i64
:
3681 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3682 have proper constraints. That said, special case
3683 moves to propagate preferences backward. */
3684 if (IS_DEAD_ARG(1)) {
3685 *la_temp_pref(arg_temp(op
->args
[0]))
3686 = *la_temp_pref(arg_temp(op
->args
[1]));
3691 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3692 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3693 TCGRegSet set
, *pset
;
3695 ts
= arg_temp(op
->args
[i
]);
3696 pset
= la_temp_pref(ts
);
3701 set
&= output_pref(op
, ct
->alias_index
);
3703 /* If the combination is not possible, restart. */
3713 op
->life
= arg_life
;
3717 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3718 static bool __attribute__((noinline
))
3719 liveness_pass_2(TCGContext
*s
)
3721 int nb_globals
= s
->nb_globals
;
3723 bool changes
= false;
3724 TCGOp
*op
, *op_next
;
3726 /* Create a temporary for each indirect global. */
3727 for (i
= 0; i
< nb_globals
; ++i
) {
3728 TCGTemp
*its
= &s
->temps
[i
];
3729 if (its
->indirect_reg
) {
3730 TCGTemp
*dts
= tcg_temp_alloc(s
);
3731 dts
->type
= its
->type
;
3732 dts
->base_type
= its
->base_type
;
3733 dts
->temp_subindex
= its
->temp_subindex
;
3734 dts
->kind
= TEMP_EBB
;
3735 its
->state_ptr
= dts
;
3737 its
->state_ptr
= NULL
;
3739 /* All globals begin dead. */
3740 its
->state
= TS_DEAD
;
3742 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3743 TCGTemp
*its
= &s
->temps
[i
];
3744 its
->state_ptr
= NULL
;
3745 its
->state
= TS_DEAD
;
3748 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3749 TCGOpcode opc
= op
->opc
;
3750 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3751 TCGLifeData arg_life
= op
->life
;
3752 int nb_iargs
, nb_oargs
, call_flags
;
3753 TCGTemp
*arg_ts
, *dir_ts
;
3755 if (opc
== INDEX_op_call
) {
3756 nb_oargs
= TCGOP_CALLO(op
);
3757 nb_iargs
= TCGOP_CALLI(op
);
3758 call_flags
= tcg_call_flags(op
);
3760 nb_iargs
= def
->nb_iargs
;
3761 nb_oargs
= def
->nb_oargs
;
3763 /* Set flags similar to how calls require. */
3764 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3765 /* Like reading globals: sync_globals */
3766 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3767 } else if (def
->flags
& TCG_OPF_BB_END
) {
3768 /* Like writing globals: save_globals */
3770 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3771 /* Like reading globals: sync_globals */
3772 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3774 /* No effect on globals. */
3775 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3776 TCG_CALL_NO_WRITE_GLOBALS
);
3780 /* Make sure that input arguments are available. */
3781 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3782 arg_ts
= arg_temp(op
->args
[i
]);
3783 dir_ts
= arg_ts
->state_ptr
;
3784 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3785 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3788 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
3790 lop
->args
[0] = temp_arg(dir_ts
);
3791 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3792 lop
->args
[2] = arg_ts
->mem_offset
;
3794 /* Loaded, but synced with memory. */
3795 arg_ts
->state
= TS_MEM
;
3799 /* Perform input replacement, and mark inputs that became dead.
3800 No action is required except keeping temp_state up to date
3801 so that we reload when needed. */
3802 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3803 arg_ts
= arg_temp(op
->args
[i
]);
3804 dir_ts
= arg_ts
->state_ptr
;
3806 op
->args
[i
] = temp_arg(dir_ts
);
3808 if (IS_DEAD_ARG(i
)) {
3809 arg_ts
->state
= TS_DEAD
;
3814 /* Liveness analysis should ensure that the following are
3815 all correct, for call sites and basic block end points. */
3816 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
3818 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3819 for (i
= 0; i
< nb_globals
; ++i
) {
3820 /* Liveness should see that globals are synced back,
3821 that is, either TS_DEAD or TS_MEM. */
3822 arg_ts
= &s
->temps
[i
];
3823 tcg_debug_assert(arg_ts
->state_ptr
== 0
3824 || arg_ts
->state
!= 0);
3827 for (i
= 0; i
< nb_globals
; ++i
) {
3828 /* Liveness should see that globals are saved back,
3829 that is, TS_DEAD, waiting to be reloaded. */
3830 arg_ts
= &s
->temps
[i
];
3831 tcg_debug_assert(arg_ts
->state_ptr
== 0
3832 || arg_ts
->state
== TS_DEAD
);
3836 /* Outputs become available. */
3837 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
3838 arg_ts
= arg_temp(op
->args
[0]);
3839 dir_ts
= arg_ts
->state_ptr
;
3841 op
->args
[0] = temp_arg(dir_ts
);
3844 /* The output is now live and modified. */
3847 if (NEED_SYNC_ARG(0)) {
3848 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3851 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3852 TCGTemp
*out_ts
= dir_ts
;
3854 if (IS_DEAD_ARG(0)) {
3855 out_ts
= arg_temp(op
->args
[1]);
3856 arg_ts
->state
= TS_DEAD
;
3857 tcg_op_remove(s
, op
);
3859 arg_ts
->state
= TS_MEM
;
3862 sop
->args
[0] = temp_arg(out_ts
);
3863 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3864 sop
->args
[2] = arg_ts
->mem_offset
;
3866 tcg_debug_assert(!IS_DEAD_ARG(0));
3870 for (i
= 0; i
< nb_oargs
; i
++) {
3871 arg_ts
= arg_temp(op
->args
[i
]);
3872 dir_ts
= arg_ts
->state_ptr
;
3876 op
->args
[i
] = temp_arg(dir_ts
);
3879 /* The output is now live and modified. */
3882 /* Sync outputs upon their last write. */
3883 if (NEED_SYNC_ARG(i
)) {
3884 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3887 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3889 sop
->args
[0] = temp_arg(dir_ts
);
3890 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3891 sop
->args
[2] = arg_ts
->mem_offset
;
3893 arg_ts
->state
= TS_MEM
;
3895 /* Drop outputs that are dead. */
3896 if (IS_DEAD_ARG(i
)) {
3897 arg_ts
->state
= TS_DEAD
;
3906 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3911 /* When allocating an object, look at the full type. */
3912 size
= tcg_type_size(ts
->base_type
);
3913 switch (ts
->base_type
) {
3925 * Note that we do not require aligned storage for V256,
3926 * and that we provide alignment for I128 to match V128,
3927 * even if that's above what the host ABI requires.
3932 g_assert_not_reached();
3936 * Assume the stack is sufficiently aligned.
3937 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3938 * and do not require 16 byte vector alignment. This seems slightly
3939 * easier than fully parameterizing the above switch statement.
3941 align
= MIN(TCG_TARGET_STACK_ALIGN
, align
);
3942 off
= ROUND_UP(s
->current_frame_offset
, align
);
3944 /* If we've exhausted the stack frame, restart with a smaller TB. */
3945 if (off
+ size
> s
->frame_end
) {
3946 tcg_raise_tb_overflow(s
);
3948 s
->current_frame_offset
= off
+ size
;
3949 #if defined(__sparc__)
3950 off
+= TCG_TARGET_STACK_BIAS
;
3953 /* If the object was subdivided, assign memory to all the parts. */
3954 if (ts
->base_type
!= ts
->type
) {
3955 int part_size
= tcg_type_size(ts
->type
);
3956 int part_count
= size
/ part_size
;
3959 * Each part is allocated sequentially in tcg_temp_new_internal.
3960 * Jump back to the first part by subtracting the current index.
3962 ts
-= ts
->temp_subindex
;
3963 for (int i
= 0; i
< part_count
; ++i
) {
3964 ts
[i
].mem_offset
= off
+ i
* part_size
;
3965 ts
[i
].mem_base
= s
->frame_temp
;
3966 ts
[i
].mem_allocated
= 1;
3969 ts
->mem_offset
= off
;
3970 ts
->mem_base
= s
->frame_temp
;
3971 ts
->mem_allocated
= 1;
3975 /* Assign @reg to @ts, and update reg_to_temp[]. */
3976 static void set_temp_val_reg(TCGContext
*s
, TCGTemp
*ts
, TCGReg reg
)
3978 if (ts
->val_type
== TEMP_VAL_REG
) {
3979 TCGReg old
= ts
->reg
;
3980 tcg_debug_assert(s
->reg_to_temp
[old
] == ts
);
3984 s
->reg_to_temp
[old
] = NULL
;
3986 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3987 s
->reg_to_temp
[reg
] = ts
;
3988 ts
->val_type
= TEMP_VAL_REG
;
3992 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3993 static void set_temp_val_nonreg(TCGContext
*s
, TCGTemp
*ts
, TCGTempVal type
)
3995 tcg_debug_assert(type
!= TEMP_VAL_REG
);
3996 if (ts
->val_type
== TEMP_VAL_REG
) {
3997 TCGReg reg
= ts
->reg
;
3998 tcg_debug_assert(s
->reg_to_temp
[reg
] == ts
);
3999 s
->reg_to_temp
[reg
] = NULL
;
4001 ts
->val_type
= type
;
4004 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
4006 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
4007 mark it free; otherwise mark it dead. */
4008 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
4010 TCGTempVal new_type
;
4017 new_type
= TEMP_VAL_MEM
;
4020 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
4023 new_type
= TEMP_VAL_CONST
;
4026 g_assert_not_reached();
4028 set_temp_val_nonreg(s
, ts
, new_type
);
4031 /* Mark a temporary as dead. */
4032 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
4034 temp_free_or_dead(s
, ts
, 1);
4037 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4038 registers needs to be allocated to store a constant. If 'free_or_dead'
4039 is non-zero, subsequently release the temporary; if it is positive, the
4040 temp is dead; if it is negative, the temp is free. */
4041 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
4042 TCGRegSet preferred_regs
, int free_or_dead
)
4044 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
4045 if (!ts
->mem_allocated
) {
4046 temp_allocate_frame(s
, ts
);
4048 switch (ts
->val_type
) {
4049 case TEMP_VAL_CONST
:
4050 /* If we're going to free the temp immediately, then we won't
4051 require it later in a register, so attempt to store the
4052 constant to memory directly. */
4054 && tcg_out_sti(s
, ts
->type
, ts
->val
,
4055 ts
->mem_base
->reg
, ts
->mem_offset
)) {
4058 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4059 allocated_regs
, preferred_regs
);
4063 tcg_out_st(s
, ts
->type
, ts
->reg
,
4064 ts
->mem_base
->reg
, ts
->mem_offset
);
4072 g_assert_not_reached();
4074 ts
->mem_coherent
= 1;
4077 temp_free_or_dead(s
, ts
, free_or_dead
);
4081 /* free register 'reg' by spilling the corresponding temporary if necessary */
4082 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
4084 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
4086 temp_sync(s
, ts
, allocated_regs
, 0, -1);
4092 * @required_regs: Set of registers in which we must allocate.
4093 * @allocated_regs: Set of registers which must be avoided.
4094 * @preferred_regs: Set of registers we should prefer.
4095 * @rev: True if we search the registers in "indirect" order.
4097 * The allocated register must be in @required_regs & ~@allocated_regs,
4098 * but if we can put it in @preferred_regs we may save a move later.
4100 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
4101 TCGRegSet allocated_regs
,
4102 TCGRegSet preferred_regs
, bool rev
)
4104 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4105 TCGRegSet reg_ct
[2];
4108 reg_ct
[1] = required_regs
& ~allocated_regs
;
4109 tcg_debug_assert(reg_ct
[1] != 0);
4110 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4112 /* Skip the preferred_regs option if it cannot be satisfied,
4113 or if the preference made no difference. */
4114 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4116 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4118 /* Try free registers, preferences first. */
4119 for (j
= f
; j
< 2; j
++) {
4120 TCGRegSet set
= reg_ct
[j
];
4122 if (tcg_regset_single(set
)) {
4123 /* One register in the set. */
4124 TCGReg reg
= tcg_regset_first(set
);
4125 if (s
->reg_to_temp
[reg
] == NULL
) {
4129 for (i
= 0; i
< n
; i
++) {
4130 TCGReg reg
= order
[i
];
4131 if (s
->reg_to_temp
[reg
] == NULL
&&
4132 tcg_regset_test_reg(set
, reg
)) {
4139 /* We must spill something. */
4140 for (j
= f
; j
< 2; j
++) {
4141 TCGRegSet set
= reg_ct
[j
];
4143 if (tcg_regset_single(set
)) {
4144 /* One register in the set. */
4145 TCGReg reg
= tcg_regset_first(set
);
4146 tcg_reg_free(s
, reg
, allocated_regs
);
4149 for (i
= 0; i
< n
; i
++) {
4150 TCGReg reg
= order
[i
];
4151 if (tcg_regset_test_reg(set
, reg
)) {
4152 tcg_reg_free(s
, reg
, allocated_regs
);
4159 g_assert_not_reached();
4162 static TCGReg
tcg_reg_alloc_pair(TCGContext
*s
, TCGRegSet required_regs
,
4163 TCGRegSet allocated_regs
,
4164 TCGRegSet preferred_regs
, bool rev
)
4166 int i
, j
, k
, fmin
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4167 TCGRegSet reg_ct
[2];
4170 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4171 reg_ct
[1] = required_regs
& ~(allocated_regs
| (allocated_regs
>> 1));
4172 tcg_debug_assert(reg_ct
[1] != 0);
4173 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4175 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4178 * Skip the preferred_regs option if it cannot be satisfied,
4179 * or if the preference made no difference.
4181 k
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4184 * Minimize the number of flushes by looking for 2 free registers first,
4185 * then a single flush, then two flushes.
4187 for (fmin
= 2; fmin
>= 0; fmin
--) {
4188 for (j
= k
; j
< 2; j
++) {
4189 TCGRegSet set
= reg_ct
[j
];
4191 for (i
= 0; i
< n
; i
++) {
4192 TCGReg reg
= order
[i
];
4194 if (tcg_regset_test_reg(set
, reg
)) {
4195 int f
= !s
->reg_to_temp
[reg
] + !s
->reg_to_temp
[reg
+ 1];
4197 tcg_reg_free(s
, reg
, allocated_regs
);
4198 tcg_reg_free(s
, reg
+ 1, allocated_regs
);
4205 g_assert_not_reached();
4208 /* Make sure the temporary is in a register. If needed, allocate the register
4209 from DESIRED while avoiding ALLOCATED. */
4210 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
4211 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
4215 switch (ts
->val_type
) {
4218 case TEMP_VAL_CONST
:
4219 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4220 preferred_regs
, ts
->indirect_base
);
4221 if (ts
->type
<= TCG_TYPE_I64
) {
4222 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
4224 uint64_t val
= ts
->val
;
4228 * Find the minimal vector element that matches the constant.
4229 * The targets will, in general, have to do this search anyway,
4230 * do this generically.
4232 if (val
== dup_const(MO_8
, val
)) {
4234 } else if (val
== dup_const(MO_16
, val
)) {
4236 } else if (val
== dup_const(MO_32
, val
)) {
4240 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
4242 ts
->mem_coherent
= 0;
4245 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4246 preferred_regs
, ts
->indirect_base
);
4247 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
4248 ts
->mem_coherent
= 1;
4252 g_assert_not_reached();
4254 set_temp_val_reg(s
, ts
, reg
);
4257 /* Save a temporary to memory. 'allocated_regs' is used in case a
4258 temporary registers needs to be allocated to store a constant. */
4259 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
4261 /* The liveness analysis already ensures that globals are back
4262 in memory. Keep an tcg_debug_assert for safety. */
4263 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
4266 /* save globals to their canonical location and assume they can be
4267 modified be the following code. 'allocated_regs' is used in case a
4268 temporary registers needs to be allocated to store a constant. */
4269 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4273 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4274 temp_save(s
, &s
->temps
[i
], allocated_regs
);
4278 /* sync globals to their canonical location and assume they can be
4279 read by the following code. 'allocated_regs' is used in case a
4280 temporary registers needs to be allocated to store a constant. */
4281 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4285 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4286 TCGTemp
*ts
= &s
->temps
[i
];
4287 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
4288 || ts
->kind
== TEMP_FIXED
4289 || ts
->mem_coherent
);
4293 /* at the end of a basic block, we assume all temporaries are dead and
4294 all globals are stored at their canonical location. */
4295 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
4299 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4300 TCGTemp
*ts
= &s
->temps
[i
];
4304 temp_save(s
, ts
, allocated_regs
);
4307 /* The liveness analysis already ensures that temps are dead.
4308 Keep an tcg_debug_assert for safety. */
4309 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
4312 /* Similarly, we should have freed any allocated register. */
4313 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
4316 g_assert_not_reached();
4320 save_globals(s
, allocated_regs
);
4324 * At a conditional branch, we assume all temporaries are dead unless
4325 * explicitly live-across-conditional-branch; all globals and local
4326 * temps are synced to their location.
4328 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
4330 sync_globals(s
, allocated_regs
);
4332 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4333 TCGTemp
*ts
= &s
->temps
[i
];
4335 * The liveness analysis already ensures that temps are dead.
4336 * Keep tcg_debug_asserts for safety.
4340 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
4346 g_assert_not_reached();
4352 * Specialized code generation for INDEX_op_mov_* with a constant.
4354 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
4355 tcg_target_ulong val
, TCGLifeData arg_life
,
4356 TCGRegSet preferred_regs
)
4358 /* ENV should not be modified. */
4359 tcg_debug_assert(!temp_readonly(ots
));
4361 /* The movi is not explicitly generated here. */
4362 set_temp_val_nonreg(s
, ots
, TEMP_VAL_CONST
);
4364 ots
->mem_coherent
= 0;
4365 if (NEED_SYNC_ARG(0)) {
4366 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
4367 } else if (IS_DEAD_ARG(0)) {
4373 * Specialized code generation for INDEX_op_mov_*.
4375 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
4377 const TCGLifeData arg_life
= op
->life
;
4378 TCGRegSet allocated_regs
, preferred_regs
;
4380 TCGType otype
, itype
;
4383 allocated_regs
= s
->reserved_regs
;
4384 preferred_regs
= output_pref(op
, 0);
4385 ots
= arg_temp(op
->args
[0]);
4386 ts
= arg_temp(op
->args
[1]);
4388 /* ENV should not be modified. */
4389 tcg_debug_assert(!temp_readonly(ots
));
4391 /* Note that otype != itype for no-op truncation. */
4395 if (ts
->val_type
== TEMP_VAL_CONST
) {
4396 /* propagate constant or generate sti */
4397 tcg_target_ulong val
= ts
->val
;
4398 if (IS_DEAD_ARG(1)) {
4401 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
4405 /* If the source value is in memory we're going to be forced
4406 to have it in a register in order to perform the copy. Copy
4407 the SOURCE value into its own register first, that way we
4408 don't have to reload SOURCE the next time it is used. */
4409 if (ts
->val_type
== TEMP_VAL_MEM
) {
4410 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
4411 allocated_regs
, preferred_regs
);
4413 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
4416 if (IS_DEAD_ARG(0)) {
4417 /* mov to a non-saved dead register makes no sense (even with
4418 liveness analysis disabled). */
4419 tcg_debug_assert(NEED_SYNC_ARG(0));
4420 if (!ots
->mem_allocated
) {
4421 temp_allocate_frame(s
, ots
);
4423 tcg_out_st(s
, otype
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4424 if (IS_DEAD_ARG(1)) {
4431 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
4433 * The mov can be suppressed. Kill input first, so that it
4434 * is unlinked from reg_to_temp, then set the output to the
4435 * reg that we saved from the input.
4440 if (ots
->val_type
== TEMP_VAL_REG
) {
4443 /* Make sure to not spill the input register during allocation. */
4444 oreg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
4445 allocated_regs
| ((TCGRegSet
)1 << ireg
),
4446 preferred_regs
, ots
->indirect_base
);
4448 if (!tcg_out_mov(s
, otype
, oreg
, ireg
)) {
4450 * Cross register class move not supported.
4451 * Store the source register into the destination slot
4452 * and leave the destination temp as TEMP_VAL_MEM.
4454 assert(!temp_readonly(ots
));
4455 if (!ts
->mem_allocated
) {
4456 temp_allocate_frame(s
, ots
);
4458 tcg_out_st(s
, ts
->type
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4459 set_temp_val_nonreg(s
, ts
, TEMP_VAL_MEM
);
4460 ots
->mem_coherent
= 1;
4464 set_temp_val_reg(s
, ots
, oreg
);
4465 ots
->mem_coherent
= 0;
4467 if (NEED_SYNC_ARG(0)) {
4468 temp_sync(s
, ots
, allocated_regs
, 0, 0);
4473 * Specialized code generation for INDEX_op_dup_vec.
4475 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
4477 const TCGLifeData arg_life
= op
->life
;
4478 TCGRegSet dup_out_regs
, dup_in_regs
;
4480 TCGType itype
, vtype
;
4485 ots
= arg_temp(op
->args
[0]);
4486 its
= arg_temp(op
->args
[1]);
4488 /* ENV should not be modified. */
4489 tcg_debug_assert(!temp_readonly(ots
));
4492 vece
= TCGOP_VECE(op
);
4493 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4495 if (its
->val_type
== TEMP_VAL_CONST
) {
4496 /* Propagate constant via movi -> dupi. */
4497 tcg_target_ulong val
= its
->val
;
4498 if (IS_DEAD_ARG(1)) {
4501 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, output_pref(op
, 0));
4505 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4506 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
4508 /* Allocate the output register now. */
4509 if (ots
->val_type
!= TEMP_VAL_REG
) {
4510 TCGRegSet allocated_regs
= s
->reserved_regs
;
4513 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
4514 /* Make sure to not spill the input register. */
4515 tcg_regset_set_reg(allocated_regs
, its
->reg
);
4517 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4518 output_pref(op
, 0), ots
->indirect_base
);
4519 set_temp_val_reg(s
, ots
, oreg
);
4522 switch (its
->val_type
) {
4525 * The dup constriaints must be broad, covering all possible VECE.
4526 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4527 * to fail, indicating that extra moves are required for that case.
4529 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
4530 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
4533 /* Try again from memory or a vector input register. */
4535 if (!its
->mem_coherent
) {
4537 * The input register is not synced, and so an extra store
4538 * would be required to use memory. Attempt an integer-vector
4539 * register move first. We do not have a TCGRegSet for this.
4541 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
4544 /* Sync the temp back to its slot and load from there. */
4545 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
4551 if (HOST_BIG_ENDIAN
) {
4552 lowpart_ofs
= tcg_type_size(itype
) - (1 << vece
);
4554 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
4555 its
->mem_offset
+ lowpart_ofs
)) {
4558 /* Load the input into the destination vector register. */
4559 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
4563 g_assert_not_reached();
4566 /* We now have a vector input register, so dup must succeed. */
4567 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
4568 tcg_debug_assert(ok
);
4571 ots
->mem_coherent
= 0;
4572 if (IS_DEAD_ARG(1)) {
4575 if (NEED_SYNC_ARG(0)) {
4576 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
4578 if (IS_DEAD_ARG(0)) {
4583 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
4585 const TCGLifeData arg_life
= op
->life
;
4586 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
4587 TCGRegSet i_allocated_regs
;
4588 TCGRegSet o_allocated_regs
;
4589 int i
, k
, nb_iargs
, nb_oargs
;
4592 const TCGArgConstraint
*arg_ct
;
4594 TCGArg new_args
[TCG_MAX_OP_ARGS
];
4595 int const_args
[TCG_MAX_OP_ARGS
];
4597 nb_oargs
= def
->nb_oargs
;
4598 nb_iargs
= def
->nb_iargs
;
4600 /* copy constants */
4601 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
4602 op
->args
+ nb_oargs
+ nb_iargs
,
4603 sizeof(TCGArg
) * def
->nb_cargs
);
4605 i_allocated_regs
= s
->reserved_regs
;
4606 o_allocated_regs
= s
->reserved_regs
;
4608 /* satisfy input constraints */
4609 for (k
= 0; k
< nb_iargs
; k
++) {
4610 TCGRegSet i_preferred_regs
, i_required_regs
;
4611 bool allocate_new_reg
, copyto_new_reg
;
4615 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
4617 arg_ct
= &def
->args_ct
[i
];
4620 if (ts
->val_type
== TEMP_VAL_CONST
4621 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
)) {
4622 /* constant is OK for instruction */
4624 new_args
[i
] = ts
->val
;
4629 i_preferred_regs
= 0;
4630 i_required_regs
= arg_ct
->regs
;
4631 allocate_new_reg
= false;
4632 copyto_new_reg
= false;
4634 switch (arg_ct
->pair
) {
4635 case 0: /* not paired */
4636 if (arg_ct
->ialias
) {
4637 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4640 * If the input is readonly, then it cannot also be an
4641 * output and aliased to itself. If the input is not
4642 * dead after the instruction, we must allocate a new
4643 * register and move it.
4645 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)) {
4646 allocate_new_reg
= true;
4647 } else if (ts
->val_type
== TEMP_VAL_REG
) {
4649 * Check if the current register has already been
4650 * allocated for another input.
4653 tcg_regset_test_reg(i_allocated_regs
, reg
);
4656 if (!allocate_new_reg
) {
4657 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4660 allocate_new_reg
= !tcg_regset_test_reg(i_required_regs
, reg
);
4662 if (allocate_new_reg
) {
4664 * Allocate a new register matching the constraint
4665 * and move the temporary register into it.
4667 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4668 i_allocated_regs
, 0);
4669 reg
= tcg_reg_alloc(s
, i_required_regs
, i_allocated_regs
,
4670 i_preferred_regs
, ts
->indirect_base
);
4671 copyto_new_reg
= true;
4676 /* First of an input pair; if i1 == i2, the second is an output. */
4678 i2
= arg_ct
->pair_index
;
4679 ts2
= i1
!= i2
? arg_temp(op
->args
[i2
]) : NULL
;
4682 * It is easier to default to allocating a new pair
4683 * and to identify a few cases where it's not required.
4685 if (arg_ct
->ialias
) {
4686 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4687 if (IS_DEAD_ARG(i1
) &&
4689 !temp_readonly(ts
) &&
4690 ts
->val_type
== TEMP_VAL_REG
&&
4691 ts
->reg
< TCG_TARGET_NB_REGS
- 1 &&
4692 tcg_regset_test_reg(i_required_regs
, reg
) &&
4693 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4694 !tcg_regset_test_reg(i_allocated_regs
, reg
+ 1) &&
4696 ? ts2
->val_type
== TEMP_VAL_REG
&&
4697 ts2
->reg
== reg
+ 1 &&
4699 : s
->reg_to_temp
[reg
+ 1] == NULL
)) {
4703 /* Without aliasing, the pair must also be an input. */
4704 tcg_debug_assert(ts2
);
4705 if (ts
->val_type
== TEMP_VAL_REG
&&
4706 ts2
->val_type
== TEMP_VAL_REG
&&
4707 ts2
->reg
== reg
+ 1 &&
4708 tcg_regset_test_reg(i_required_regs
, reg
)) {
4712 reg
= tcg_reg_alloc_pair(s
, i_required_regs
, i_allocated_regs
,
4713 0, ts
->indirect_base
);
4716 case 2: /* pair second */
4717 reg
= new_args
[arg_ct
->pair_index
] + 1;
4720 case 3: /* ialias with second output, no first input */
4721 tcg_debug_assert(arg_ct
->ialias
);
4722 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4724 if (IS_DEAD_ARG(i
) &&
4725 !temp_readonly(ts
) &&
4726 ts
->val_type
== TEMP_VAL_REG
&&
4728 s
->reg_to_temp
[reg
- 1] == NULL
&&
4729 tcg_regset_test_reg(i_required_regs
, reg
) &&
4730 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4731 !tcg_regset_test_reg(i_allocated_regs
, reg
- 1)) {
4732 tcg_regset_set_reg(i_allocated_regs
, reg
- 1);
4735 reg
= tcg_reg_alloc_pair(s
, i_required_regs
>> 1,
4736 i_allocated_regs
, 0,
4738 tcg_regset_set_reg(i_allocated_regs
, reg
);
4744 * If an aliased input is not dead after the instruction,
4745 * we must allocate a new register and move it.
4747 if (arg_ct
->ialias
&& (!IS_DEAD_ARG(i
) || temp_readonly(ts
))) {
4748 TCGRegSet t_allocated_regs
= i_allocated_regs
;
4751 * Because of the alias, and the continued life, make sure
4752 * that the temp is somewhere *other* than the reg pair,
4753 * and we get a copy in reg.
4755 tcg_regset_set_reg(t_allocated_regs
, reg
);
4756 tcg_regset_set_reg(t_allocated_regs
, reg
+ 1);
4757 if (ts
->val_type
== TEMP_VAL_REG
&& ts
->reg
== reg
) {
4758 /* If ts was already in reg, copy it somewhere else. */
4762 tcg_debug_assert(ts
->kind
!= TEMP_FIXED
);
4763 nr
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
4764 t_allocated_regs
, 0, ts
->indirect_base
);
4765 ok
= tcg_out_mov(s
, ts
->type
, nr
, reg
);
4766 tcg_debug_assert(ok
);
4768 set_temp_val_reg(s
, ts
, nr
);
4770 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4771 t_allocated_regs
, 0);
4772 copyto_new_reg
= true;
4775 /* Preferably allocate to reg, otherwise copy. */
4776 i_required_regs
= (TCGRegSet
)1 << reg
;
4777 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4779 copyto_new_reg
= ts
->reg
!= reg
;
4784 g_assert_not_reached();
4787 if (copyto_new_reg
) {
4788 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4790 * Cross register class move not supported. Sync the
4791 * temp back to its slot and load from there.
4793 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
4794 tcg_out_ld(s
, ts
->type
, reg
,
4795 ts
->mem_base
->reg
, ts
->mem_offset
);
4800 tcg_regset_set_reg(i_allocated_regs
, reg
);
4803 /* mark dead temporaries and free the associated registers */
4804 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
4805 if (IS_DEAD_ARG(i
)) {
4806 temp_dead(s
, arg_temp(op
->args
[i
]));
4810 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
4811 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
4812 } else if (def
->flags
& TCG_OPF_BB_END
) {
4813 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
4815 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
4816 /* XXX: permit generic clobber register list ? */
4817 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4818 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4819 tcg_reg_free(s
, i
, i_allocated_regs
);
4823 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
4824 /* sync globals if the op has side effects and might trigger
4826 sync_globals(s
, i_allocated_regs
);
4829 /* satisfy the output constraints */
4830 for(k
= 0; k
< nb_oargs
; k
++) {
4831 i
= def
->args_ct
[k
].sort_index
;
4833 arg_ct
= &def
->args_ct
[i
];
4836 /* ENV should not be modified. */
4837 tcg_debug_assert(!temp_readonly(ts
));
4839 switch (arg_ct
->pair
) {
4840 case 0: /* not paired */
4841 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
4842 reg
= new_args
[arg_ct
->alias_index
];
4843 } else if (arg_ct
->newreg
) {
4844 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
4845 i_allocated_regs
| o_allocated_regs
,
4846 output_pref(op
, k
), ts
->indirect_base
);
4848 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
4849 output_pref(op
, k
), ts
->indirect_base
);
4853 case 1: /* first of pair */
4854 tcg_debug_assert(!arg_ct
->newreg
);
4855 if (arg_ct
->oalias
) {
4856 reg
= new_args
[arg_ct
->alias_index
];
4859 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
, o_allocated_regs
,
4860 output_pref(op
, k
), ts
->indirect_base
);
4863 case 2: /* second of pair */
4864 tcg_debug_assert(!arg_ct
->newreg
);
4865 if (arg_ct
->oalias
) {
4866 reg
= new_args
[arg_ct
->alias_index
];
4868 reg
= new_args
[arg_ct
->pair_index
] + 1;
4872 case 3: /* first of pair, aliasing with a second input */
4873 tcg_debug_assert(!arg_ct
->newreg
);
4874 reg
= new_args
[arg_ct
->pair_index
] - 1;
4878 g_assert_not_reached();
4880 tcg_regset_set_reg(o_allocated_regs
, reg
);
4881 set_temp_val_reg(s
, ts
, reg
);
4882 ts
->mem_coherent
= 0;
4887 /* emit instruction */
4889 case INDEX_op_ext8s_i32
:
4890 tcg_out_ext8s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4892 case INDEX_op_ext8s_i64
:
4893 tcg_out_ext8s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4895 case INDEX_op_ext8u_i32
:
4896 case INDEX_op_ext8u_i64
:
4897 tcg_out_ext8u(s
, new_args
[0], new_args
[1]);
4899 case INDEX_op_ext16s_i32
:
4900 tcg_out_ext16s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4902 case INDEX_op_ext16s_i64
:
4903 tcg_out_ext16s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4905 case INDEX_op_ext16u_i32
:
4906 case INDEX_op_ext16u_i64
:
4907 tcg_out_ext16u(s
, new_args
[0], new_args
[1]);
4909 case INDEX_op_ext32s_i64
:
4910 tcg_out_ext32s(s
, new_args
[0], new_args
[1]);
4912 case INDEX_op_ext32u_i64
:
4913 tcg_out_ext32u(s
, new_args
[0], new_args
[1]);
4915 case INDEX_op_ext_i32_i64
:
4916 tcg_out_exts_i32_i64(s
, new_args
[0], new_args
[1]);
4918 case INDEX_op_extu_i32_i64
:
4919 tcg_out_extu_i32_i64(s
, new_args
[0], new_args
[1]);
4921 case INDEX_op_extrl_i64_i32
:
4922 tcg_out_extrl_i64_i32(s
, new_args
[0], new_args
[1]);
4925 if (def
->flags
& TCG_OPF_VECTOR
) {
4926 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
4927 new_args
, const_args
);
4929 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
4934 /* move the outputs in the correct register if needed */
4935 for(i
= 0; i
< nb_oargs
; i
++) {
4936 ts
= arg_temp(op
->args
[i
]);
4938 /* ENV should not be modified. */
4939 tcg_debug_assert(!temp_readonly(ts
));
4941 if (NEED_SYNC_ARG(i
)) {
4942 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
4943 } else if (IS_DEAD_ARG(i
)) {
4949 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
4951 const TCGLifeData arg_life
= op
->life
;
4952 TCGTemp
*ots
, *itsl
, *itsh
;
4953 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4955 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4956 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
4957 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
4959 ots
= arg_temp(op
->args
[0]);
4960 itsl
= arg_temp(op
->args
[1]);
4961 itsh
= arg_temp(op
->args
[2]);
4963 /* ENV should not be modified. */
4964 tcg_debug_assert(!temp_readonly(ots
));
4966 /* Allocate the output register now. */
4967 if (ots
->val_type
!= TEMP_VAL_REG
) {
4968 TCGRegSet allocated_regs
= s
->reserved_regs
;
4969 TCGRegSet dup_out_regs
=
4970 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4973 /* Make sure to not spill the input registers. */
4974 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
4975 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
4977 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
4978 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
4981 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4982 output_pref(op
, 0), ots
->indirect_base
);
4983 set_temp_val_reg(s
, ots
, oreg
);
4986 /* Promote dup2 of immediates to dupi_vec. */
4987 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
4988 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
4991 if (val
== dup_const(MO_8
, val
)) {
4993 } else if (val
== dup_const(MO_16
, val
)) {
4995 } else if (val
== dup_const(MO_32
, val
)) {
4999 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
5003 /* If the two inputs form one 64-bit value, try dupm_vec. */
5004 if (itsl
->temp_subindex
== HOST_BIG_ENDIAN
&&
5005 itsh
->temp_subindex
== !HOST_BIG_ENDIAN
&&
5006 itsl
== itsh
+ (HOST_BIG_ENDIAN
? 1 : -1)) {
5007 TCGTemp
*its
= itsl
- HOST_BIG_ENDIAN
;
5009 temp_sync(s
, its
+ 0, s
->reserved_regs
, 0, 0);
5010 temp_sync(s
, its
+ 1, s
->reserved_regs
, 0, 0);
5012 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
5013 its
->mem_base
->reg
, its
->mem_offset
)) {
5018 /* Fall back to generic expansion. */
5022 ots
->mem_coherent
= 0;
5023 if (IS_DEAD_ARG(1)) {
5026 if (IS_DEAD_ARG(2)) {
5029 if (NEED_SYNC_ARG(0)) {
5030 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
5031 } else if (IS_DEAD_ARG(0)) {
5037 static void load_arg_reg(TCGContext
*s
, TCGReg reg
, TCGTemp
*ts
,
5038 TCGRegSet allocated_regs
)
5040 if (ts
->val_type
== TEMP_VAL_REG
) {
5041 if (ts
->reg
!= reg
) {
5042 tcg_reg_free(s
, reg
, allocated_regs
);
5043 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
5045 * Cross register class move not supported. Sync the
5046 * temp back to its slot and load from there.
5048 temp_sync(s
, ts
, allocated_regs
, 0, 0);
5049 tcg_out_ld(s
, ts
->type
, reg
,
5050 ts
->mem_base
->reg
, ts
->mem_offset
);
5054 TCGRegSet arg_set
= 0;
5056 tcg_reg_free(s
, reg
, allocated_regs
);
5057 tcg_regset_set_reg(arg_set
, reg
);
5058 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
5062 static void load_arg_stk(TCGContext
*s
, unsigned arg_slot
, TCGTemp
*ts
,
5063 TCGRegSet allocated_regs
)
5066 * When the destination is on the stack, load up the temp and store.
5067 * If there are many call-saved registers, the temp might live to
5068 * see another use; otherwise it'll be discarded.
5070 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
], allocated_regs
, 0);
5071 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
,
5072 arg_slot_stk_ofs(arg_slot
));
5075 static void load_arg_normal(TCGContext
*s
, const TCGCallArgumentLoc
*l
,
5076 TCGTemp
*ts
, TCGRegSet
*allocated_regs
)
5078 if (arg_slot_reg_p(l
->arg_slot
)) {
5079 TCGReg reg
= tcg_target_call_iarg_regs
[l
->arg_slot
];
5080 load_arg_reg(s
, reg
, ts
, *allocated_regs
);
5081 tcg_regset_set_reg(*allocated_regs
, reg
);
5083 load_arg_stk(s
, l
->arg_slot
, ts
, *allocated_regs
);
5087 static void load_arg_ref(TCGContext
*s
, unsigned arg_slot
, TCGReg ref_base
,
5088 intptr_t ref_off
, TCGRegSet
*allocated_regs
)
5092 if (arg_slot_reg_p(arg_slot
)) {
5093 reg
= tcg_target_call_iarg_regs
[arg_slot
];
5094 tcg_reg_free(s
, reg
, *allocated_regs
);
5095 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5096 tcg_regset_set_reg(*allocated_regs
, reg
);
5098 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[TCG_TYPE_PTR
],
5099 *allocated_regs
, 0, false);
5100 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5101 tcg_out_st(s
, TCG_TYPE_PTR
, reg
, TCG_REG_CALL_STACK
,
5102 arg_slot_stk_ofs(arg_slot
));
5106 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
5108 const int nb_oargs
= TCGOP_CALLO(op
);
5109 const int nb_iargs
= TCGOP_CALLI(op
);
5110 const TCGLifeData arg_life
= op
->life
;
5111 const TCGHelperInfo
*info
= tcg_call_info(op
);
5112 TCGRegSet allocated_regs
= s
->reserved_regs
;
5116 * Move inputs into place in reverse order,
5117 * so that we place stacked arguments first.
5119 for (i
= nb_iargs
- 1; i
>= 0; --i
) {
5120 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
5121 TCGTemp
*ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
5123 switch (loc
->kind
) {
5124 case TCG_CALL_ARG_NORMAL
:
5125 case TCG_CALL_ARG_EXTEND_U
:
5126 case TCG_CALL_ARG_EXTEND_S
:
5127 load_arg_normal(s
, loc
, ts
, &allocated_regs
);
5129 case TCG_CALL_ARG_BY_REF
:
5130 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5131 load_arg_ref(s
, loc
->arg_slot
, TCG_REG_CALL_STACK
,
5132 arg_slot_stk_ofs(loc
->ref_slot
),
5135 case TCG_CALL_ARG_BY_REF_N
:
5136 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5139 g_assert_not_reached();
5143 /* Mark dead temporaries and free the associated registers. */
5144 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
5145 if (IS_DEAD_ARG(i
)) {
5146 temp_dead(s
, arg_temp(op
->args
[i
]));
5150 /* Clobber call registers. */
5151 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
5152 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
5153 tcg_reg_free(s
, i
, allocated_regs
);
5158 * Save globals if they might be written by the helper,
5159 * sync them if they might be read.
5161 if (info
->flags
& TCG_CALL_NO_READ_GLOBALS
) {
5163 } else if (info
->flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
5164 sync_globals(s
, allocated_regs
);
5166 save_globals(s
, allocated_regs
);
5170 * If the ABI passes a pointer to the returned struct as the first
5171 * argument, load that now. Pass a pointer to the output home slot.
5173 if (info
->out_kind
== TCG_CALL_RET_BY_REF
) {
5174 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5176 if (!ts
->mem_allocated
) {
5177 temp_allocate_frame(s
, ts
);
5179 load_arg_ref(s
, 0, ts
->mem_base
->reg
, ts
->mem_offset
, &allocated_regs
);
5182 tcg_out_call(s
, tcg_call_func(op
), info
);
5184 /* Assign output registers and emit moves if needed. */
5185 switch (info
->out_kind
) {
5186 case TCG_CALL_RET_NORMAL
:
5187 for (i
= 0; i
< nb_oargs
; i
++) {
5188 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5189 TCGReg reg
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, i
);
5191 /* ENV should not be modified. */
5192 tcg_debug_assert(!temp_readonly(ts
));
5194 set_temp_val_reg(s
, ts
, reg
);
5195 ts
->mem_coherent
= 0;
5199 case TCG_CALL_RET_BY_VEC
:
5201 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5203 tcg_debug_assert(ts
->base_type
== TCG_TYPE_I128
);
5204 tcg_debug_assert(ts
->temp_subindex
== 0);
5205 if (!ts
->mem_allocated
) {
5206 temp_allocate_frame(s
, ts
);
5208 tcg_out_st(s
, TCG_TYPE_V128
,
5209 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5210 ts
->mem_base
->reg
, ts
->mem_offset
);
5212 /* fall through to mark all parts in memory */
5214 case TCG_CALL_RET_BY_REF
:
5215 /* The callee has performed a write through the reference. */
5216 for (i
= 0; i
< nb_oargs
; i
++) {
5217 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5218 ts
->val_type
= TEMP_VAL_MEM
;
5223 g_assert_not_reached();
5226 /* Flush or discard output registers as needed. */
5227 for (i
= 0; i
< nb_oargs
; i
++) {
5228 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5229 if (NEED_SYNC_ARG(i
)) {
5230 temp_sync(s
, ts
, s
->reserved_regs
, 0, IS_DEAD_ARG(i
));
5231 } else if (IS_DEAD_ARG(i
)) {
5238 * atom_and_align_for_opc:
5240 * @opc: memory operation code
5241 * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5242 * @allow_two_ops: true if we are prepared to issue two operations
5244 * Return the alignment and atomicity to use for the inline fast path
5245 * for the given memory operation. The alignment may be larger than
5246 * that specified in @opc, and the correct alignment will be diagnosed
5247 * by the slow path helper.
5249 * If @allow_two_ops, the host is prepared to test for 2x alignment,
5250 * and issue two loads or stores for subalignment.
5252 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
5253 MemOp host_atom
, bool allow_two_ops
)
5255 MemOp align
= get_alignment_bits(opc
);
5256 MemOp size
= opc
& MO_SIZE
;
5257 MemOp half
= size
? size
- 1 : 0;
5261 /* When serialized, no further atomicity required. */
5262 if (s
->gen_tb
->cflags
& CF_PARALLEL
) {
5263 atom
= opc
& MO_ATOM_MASK
;
5265 atom
= MO_ATOM_NONE
;
5270 /* The operation requires no specific atomicity. */
5274 case MO_ATOM_IFALIGN
:
5278 case MO_ATOM_IFALIGN_PAIR
:
5282 case MO_ATOM_WITHIN16
:
5284 if (size
== MO_128
) {
5285 /* Misalignment implies !within16, and therefore no atomicity. */
5286 } else if (host_atom
!= MO_ATOM_WITHIN16
) {
5287 /* The host does not implement within16, so require alignment. */
5288 align
= MAX(align
, size
);
5292 case MO_ATOM_WITHIN16_PAIR
:
5295 * Misalignment implies !within16, and therefore half atomicity.
5296 * Any host prepared for two operations can implement this with
5299 if (host_atom
!= MO_ATOM_WITHIN16
&& allow_two_ops
) {
5300 align
= MAX(align
, half
);
5304 case MO_ATOM_SUBALIGN
:
5306 if (host_atom
!= MO_ATOM_SUBALIGN
) {
5307 /* If unaligned but not odd, there are subobjects up to half. */
5308 if (allow_two_ops
) {
5309 align
= MAX(align
, half
);
5311 align
= MAX(align
, size
);
5317 g_assert_not_reached();
5320 return (TCGAtomAlign
){ .atom
= atmax
, .align
= align
};
5324 * Similarly for qemu_ld/st slow path helpers.
5325 * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5326 * using only the provided backend tcg_out_* functions.
5329 static int tcg_out_helper_stk_ofs(TCGType type
, unsigned slot
)
5331 int ofs
= arg_slot_stk_ofs(slot
);
5334 * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
5335 * require extension to uint64_t, adjust the address for uint32_t.
5337 if (HOST_BIG_ENDIAN
&&
5338 TCG_TARGET_REG_BITS
== 64 &&
5339 type
== TCG_TYPE_I32
) {
5345 static void tcg_out_helper_load_slots(TCGContext
*s
,
5346 unsigned nmov
, TCGMovExtend
*mov
,
5347 const TCGLdstHelperParam
*parm
)
5353 * Start from the end, storing to the stack first.
5354 * This frees those registers, so we need not consider overlap.
5356 for (i
= nmov
; i
-- > 0; ) {
5357 unsigned slot
= mov
[i
].dst
;
5359 if (arg_slot_reg_p(slot
)) {
5363 TCGReg src
= mov
[i
].src
;
5364 TCGType dst_type
= mov
[i
].dst_type
;
5365 MemOp dst_mo
= dst_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5367 /* The argument is going onto the stack; extend into scratch. */
5368 if ((mov
[i
].src_ext
& MO_SIZE
) != dst_mo
) {
5369 tcg_debug_assert(parm
->ntmp
!= 0);
5370 mov
[i
].dst
= src
= parm
->tmp
[0];
5371 tcg_out_movext1(s
, &mov
[i
]);
5374 tcg_out_st(s
, dst_type
, src
, TCG_REG_CALL_STACK
,
5375 tcg_out_helper_stk_ofs(dst_type
, slot
));
5381 * The remaining arguments are in registers.
5382 * Convert slot numbers to argument registers.
5385 for (i
= 0; i
< nmov
; ++i
) {
5386 mov
[i
].dst
= tcg_target_call_iarg_regs
[mov
[i
].dst
];
5391 /* The backend must have provided enough temps for the worst case. */
5392 tcg_debug_assert(parm
->ntmp
>= 2);
5395 for (unsigned j
= 0; j
< 3; ++j
) {
5396 if (dst3
== mov
[j
].src
) {
5398 * Conflict. Copy the source to a temporary, perform the
5399 * remaining moves, then the extension from our scratch
5402 TCGReg scratch
= parm
->tmp
[1];
5404 tcg_out_mov(s
, mov
[3].src_type
, scratch
, mov
[3].src
);
5405 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2, parm
->tmp
[0]);
5406 tcg_out_movext1_new_src(s
, &mov
[3], scratch
);
5411 /* No conflicts: perform this move and continue. */
5412 tcg_out_movext1(s
, &mov
[3]);
5416 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2,
5417 parm
->ntmp
? parm
->tmp
[0] : -1);
5420 tcg_out_movext2(s
, mov
, mov
+ 1,
5421 parm
->ntmp
? parm
->tmp
[0] : -1);
5424 tcg_out_movext1(s
, mov
);
5427 g_assert_not_reached();
5431 static void tcg_out_helper_load_imm(TCGContext
*s
, unsigned slot
,
5432 TCGType type
, tcg_target_long imm
,
5433 const TCGLdstHelperParam
*parm
)
5435 if (arg_slot_reg_p(slot
)) {
5436 tcg_out_movi(s
, type
, tcg_target_call_iarg_regs
[slot
], imm
);
5438 int ofs
= tcg_out_helper_stk_ofs(type
, slot
);
5439 if (!tcg_out_sti(s
, type
, imm
, TCG_REG_CALL_STACK
, ofs
)) {
5440 tcg_debug_assert(parm
->ntmp
!= 0);
5441 tcg_out_movi(s
, type
, parm
->tmp
[0], imm
);
5442 tcg_out_st(s
, type
, parm
->tmp
[0], TCG_REG_CALL_STACK
, ofs
);
5447 static void tcg_out_helper_load_common_args(TCGContext
*s
,
5448 const TCGLabelQemuLdst
*ldst
,
5449 const TCGLdstHelperParam
*parm
,
5450 const TCGHelperInfo
*info
,
5453 TCGMovExtend ptr_mov
= {
5454 .dst_type
= TCG_TYPE_PTR
,
5455 .src_type
= TCG_TYPE_PTR
,
5456 .src_ext
= sizeof(void *) == 4 ? MO_32
: MO_64
5458 const TCGCallArgumentLoc
*loc
= &info
->in
[0];
5461 tcg_target_ulong imm
;
5464 * Handle env, which is always first.
5466 ptr_mov
.dst
= loc
->arg_slot
;
5467 ptr_mov
.src
= TCG_AREG0
;
5468 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5474 loc
= &info
->in
[next_arg
];
5475 type
= TCG_TYPE_I32
;
5476 switch (loc
->kind
) {
5477 case TCG_CALL_ARG_NORMAL
:
5479 case TCG_CALL_ARG_EXTEND_U
:
5480 case TCG_CALL_ARG_EXTEND_S
:
5481 /* No extension required for MemOpIdx. */
5482 tcg_debug_assert(imm
<= INT32_MAX
);
5483 type
= TCG_TYPE_REG
;
5486 g_assert_not_reached();
5488 tcg_out_helper_load_imm(s
, loc
->arg_slot
, type
, imm
, parm
);
5494 loc
= &info
->in
[next_arg
];
5495 slot
= loc
->arg_slot
;
5500 if (arg_slot_reg_p(slot
)) {
5501 arg_reg
= tcg_target_call_iarg_regs
[slot
];
5503 ra_reg
= parm
->ra_gen(s
, ldst
, arg_reg
);
5506 ptr_mov
.src
= ra_reg
;
5507 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5509 imm
= (uintptr_t)ldst
->raddr
;
5510 tcg_out_helper_load_imm(s
, slot
, TCG_TYPE_PTR
, imm
, parm
);
5514 static unsigned tcg_out_helper_add_mov(TCGMovExtend
*mov
,
5515 const TCGCallArgumentLoc
*loc
,
5516 TCGType dst_type
, TCGType src_type
,
5517 TCGReg lo
, TCGReg hi
)
5521 if (dst_type
<= TCG_TYPE_REG
) {
5524 switch (loc
->kind
) {
5525 case TCG_CALL_ARG_NORMAL
:
5526 src_ext
= src_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5528 case TCG_CALL_ARG_EXTEND_U
:
5529 dst_type
= TCG_TYPE_REG
;
5532 case TCG_CALL_ARG_EXTEND_S
:
5533 dst_type
= TCG_TYPE_REG
;
5537 g_assert_not_reached();
5540 mov
[0].dst
= loc
->arg_slot
;
5541 mov
[0].dst_type
= dst_type
;
5543 mov
[0].src_type
= src_type
;
5544 mov
[0].src_ext
= src_ext
;
5548 if (TCG_TARGET_REG_BITS
== 32) {
5549 assert(dst_type
== TCG_TYPE_I64
);
5552 assert(dst_type
== TCG_TYPE_I128
);
5556 mov
[0].dst
= loc
[HOST_BIG_ENDIAN
].arg_slot
;
5558 mov
[0].dst_type
= TCG_TYPE_REG
;
5559 mov
[0].src_type
= TCG_TYPE_REG
;
5560 mov
[0].src_ext
= reg_mo
;
5562 mov
[1].dst
= loc
[!HOST_BIG_ENDIAN
].arg_slot
;
5564 mov
[1].dst_type
= TCG_TYPE_REG
;
5565 mov
[1].src_type
= TCG_TYPE_REG
;
5566 mov
[1].src_ext
= reg_mo
;
5571 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5572 const TCGLdstHelperParam
*parm
)
5574 const TCGHelperInfo
*info
;
5575 const TCGCallArgumentLoc
*loc
;
5576 TCGMovExtend mov
[2];
5577 unsigned next_arg
, nmov
;
5578 MemOp mop
= get_memop(ldst
->oi
);
5580 switch (mop
& MO_SIZE
) {
5584 info
= &info_helper_ld32_mmu
;
5587 info
= &info_helper_ld64_mmu
;
5590 info
= &info_helper_ld128_mmu
;
5593 g_assert_not_reached();
5596 /* Defer env argument. */
5599 loc
= &info
->in
[next_arg
];
5600 nmov
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_TL
, TCG_TYPE_TL
,
5601 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5604 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5606 switch (info
->out_kind
) {
5607 case TCG_CALL_RET_NORMAL
:
5608 case TCG_CALL_RET_BY_VEC
:
5610 case TCG_CALL_RET_BY_REF
:
5612 * The return reference is in the first argument slot.
5613 * We need memory in which to return: re-use the top of stack.
5616 int ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5618 if (arg_slot_reg_p(0)) {
5619 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[0],
5620 TCG_REG_CALL_STACK
, ofs_slot0
);
5622 tcg_debug_assert(parm
->ntmp
!= 0);
5623 tcg_out_addi_ptr(s
, parm
->tmp
[0],
5624 TCG_REG_CALL_STACK
, ofs_slot0
);
5625 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5626 TCG_REG_CALL_STACK
, ofs_slot0
);
5631 g_assert_not_reached();
5634 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5637 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5639 const TCGLdstHelperParam
*parm
)
5641 MemOp mop
= get_memop(ldst
->oi
);
5642 TCGMovExtend mov
[2];
5645 switch (ldst
->type
) {
5647 if (TCG_TARGET_REG_BITS
== 32) {
5653 mov
[0].dst
= ldst
->datalo_reg
;
5654 mov
[0].src
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, 0);
5655 mov
[0].dst_type
= ldst
->type
;
5656 mov
[0].src_type
= TCG_TYPE_REG
;
5659 * If load_sign, then we allowed the helper to perform the
5660 * appropriate sign extension to tcg_target_ulong, and all
5661 * we need now is a plain move.
5663 * If they do not, then we expect the relevant extension
5664 * instruction to be no more expensive than a move, and
5665 * we thus save the icache etc by only using one of two
5668 if (load_sign
|| !(mop
& MO_SIGN
)) {
5669 if (TCG_TARGET_REG_BITS
== 32 || ldst
->type
== TCG_TYPE_I32
) {
5670 mov
[0].src_ext
= MO_32
;
5672 mov
[0].src_ext
= MO_64
;
5675 mov
[0].src_ext
= mop
& MO_SSIZE
;
5677 tcg_out_movext1(s
, mov
);
5681 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5682 ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5683 switch (TCG_TARGET_CALL_RET_I128
) {
5684 case TCG_CALL_RET_NORMAL
:
5686 case TCG_CALL_RET_BY_VEC
:
5687 tcg_out_st(s
, TCG_TYPE_V128
,
5688 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5689 TCG_REG_CALL_STACK
, ofs_slot0
);
5691 case TCG_CALL_RET_BY_REF
:
5692 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datalo_reg
,
5693 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * HOST_BIG_ENDIAN
);
5694 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datahi_reg
,
5695 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * !HOST_BIG_ENDIAN
);
5698 g_assert_not_reached();
5703 g_assert_not_reached();
5706 mov
[0].dst
= ldst
->datalo_reg
;
5708 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, HOST_BIG_ENDIAN
);
5709 mov
[0].dst_type
= TCG_TYPE_I32
;
5710 mov
[0].src_type
= TCG_TYPE_I32
;
5711 mov
[0].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5713 mov
[1].dst
= ldst
->datahi_reg
;
5715 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, !HOST_BIG_ENDIAN
);
5716 mov
[1].dst_type
= TCG_TYPE_REG
;
5717 mov
[1].src_type
= TCG_TYPE_REG
;
5718 mov
[1].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5720 tcg_out_movext2(s
, mov
, mov
+ 1, parm
->ntmp
? parm
->tmp
[0] : -1);
5723 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5724 const TCGLdstHelperParam
*parm
)
5726 const TCGHelperInfo
*info
;
5727 const TCGCallArgumentLoc
*loc
;
5728 TCGMovExtend mov
[4];
5730 unsigned next_arg
, nmov
, n
;
5731 MemOp mop
= get_memop(ldst
->oi
);
5733 switch (mop
& MO_SIZE
) {
5737 info
= &info_helper_st32_mmu
;
5738 data_type
= TCG_TYPE_I32
;
5741 info
= &info_helper_st64_mmu
;
5742 data_type
= TCG_TYPE_I64
;
5745 info
= &info_helper_st128_mmu
;
5746 data_type
= TCG_TYPE_I128
;
5749 g_assert_not_reached();
5752 /* Defer env argument. */
5756 /* Handle addr argument. */
5757 loc
= &info
->in
[next_arg
];
5758 n
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_TL
, TCG_TYPE_TL
,
5759 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5763 /* Handle data argument. */
5764 loc
= &info
->in
[next_arg
];
5765 switch (loc
->kind
) {
5766 case TCG_CALL_ARG_NORMAL
:
5767 case TCG_CALL_ARG_EXTEND_U
:
5768 case TCG_CALL_ARG_EXTEND_S
:
5769 n
= tcg_out_helper_add_mov(mov
+ nmov
, loc
, data_type
, ldst
->type
,
5770 ldst
->datalo_reg
, ldst
->datahi_reg
);
5773 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5776 case TCG_CALL_ARG_BY_REF
:
5777 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5778 tcg_debug_assert(data_type
== TCG_TYPE_I128
);
5779 tcg_out_st(s
, TCG_TYPE_I64
,
5780 HOST_BIG_ENDIAN
? ldst
->datahi_reg
: ldst
->datalo_reg
,
5781 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[0].ref_slot
));
5782 tcg_out_st(s
, TCG_TYPE_I64
,
5783 HOST_BIG_ENDIAN
? ldst
->datalo_reg
: ldst
->datahi_reg
,
5784 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[1].ref_slot
));
5786 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5788 if (arg_slot_reg_p(loc
->arg_slot
)) {
5789 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[loc
->arg_slot
],
5791 arg_slot_stk_ofs(loc
->ref_slot
));
5793 tcg_debug_assert(parm
->ntmp
!= 0);
5794 tcg_out_addi_ptr(s
, parm
->tmp
[0], TCG_REG_CALL_STACK
,
5795 arg_slot_stk_ofs(loc
->ref_slot
));
5796 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5797 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
->arg_slot
));
5803 g_assert_not_reached();
5806 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5809 #ifdef CONFIG_PROFILER
5811 /* avoid copy/paste errors */
5812 #define PROF_ADD(to, from, field) \
5814 (to)->field += qatomic_read(&((from)->field)); \
5817 #define PROF_MAX(to, from, field) \
5819 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
5820 if (val__ > (to)->field) { \
5821 (to)->field = val__; \
5825 /* Pass in a zero'ed @prof */
5827 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
5829 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
5832 for (i
= 0; i
< n_ctxs
; i
++) {
5833 TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
5834 const TCGProfile
*orig
= &s
->prof
;
5837 PROF_ADD(prof
, orig
, cpu_exec_time
);
5838 PROF_ADD(prof
, orig
, tb_count1
);
5839 PROF_ADD(prof
, orig
, tb_count
);
5840 PROF_ADD(prof
, orig
, op_count
);
5841 PROF_MAX(prof
, orig
, op_count_max
);
5842 PROF_ADD(prof
, orig
, temp_count
);
5843 PROF_MAX(prof
, orig
, temp_count_max
);
5844 PROF_ADD(prof
, orig
, del_op_count
);
5845 PROF_ADD(prof
, orig
, code_in_len
);
5846 PROF_ADD(prof
, orig
, code_out_len
);
5847 PROF_ADD(prof
, orig
, search_out_len
);
5848 PROF_ADD(prof
, orig
, interm_time
);
5849 PROF_ADD(prof
, orig
, code_time
);
5850 PROF_ADD(prof
, orig
, la_time
);
5851 PROF_ADD(prof
, orig
, opt_time
);
5852 PROF_ADD(prof
, orig
, restore_count
);
5853 PROF_ADD(prof
, orig
, restore_time
);
5858 for (i
= 0; i
< NB_OPS
; i
++) {
5859 PROF_ADD(prof
, orig
, table_op_count
[i
]);
5868 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
5870 tcg_profile_snapshot(prof
, true, false);
5873 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
5875 tcg_profile_snapshot(prof
, false, true);
5878 void tcg_dump_op_count(GString
*buf
)
5880 TCGProfile prof
= {};
5883 tcg_profile_snapshot_table(&prof
);
5884 for (i
= 0; i
< NB_OPS
; i
++) {
5885 g_string_append_printf(buf
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
5886 prof
.table_op_count
[i
]);
5890 int64_t tcg_cpu_exec_time(void)
5892 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
5896 for (i
= 0; i
< n_ctxs
; i
++) {
5897 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
5898 const TCGProfile
*prof
= &s
->prof
;
5900 ret
+= qatomic_read(&prof
->cpu_exec_time
);
5905 void tcg_dump_op_count(GString
*buf
)
5907 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
5910 int64_t tcg_cpu_exec_time(void)
5912 error_report("%s: TCG profiler not compiled", __func__
);
5918 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
, target_ulong pc_start
)
5920 #ifdef CONFIG_PROFILER
5921 TCGProfile
*prof
= &s
->prof
;
5926 #ifdef CONFIG_PROFILER
5930 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
5933 qatomic_set(&prof
->op_count
, prof
->op_count
+ n
);
5934 if (n
> prof
->op_count_max
) {
5935 qatomic_set(&prof
->op_count_max
, n
);
5939 qatomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
5940 if (n
> prof
->temp_count_max
) {
5941 qatomic_set(&prof
->temp_count_max
, n
);
5947 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
5948 && qemu_log_in_addr_range(pc_start
))) {
5949 FILE *logfile
= qemu_log_trylock();
5951 fprintf(logfile
, "OP:\n");
5952 tcg_dump_ops(s
, logfile
, false);
5953 fprintf(logfile
, "\n");
5954 qemu_log_unlock(logfile
);
5959 #ifdef CONFIG_DEBUG_TCG
5960 /* Ensure all labels referenced have been emitted. */
5965 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
5966 if (unlikely(!l
->present
) && !QSIMPLEQ_EMPTY(&l
->branches
)) {
5967 qemu_log_mask(CPU_LOG_TB_OP
,
5968 "$L%d referenced but not present.\n", l
->id
);
5976 #ifdef CONFIG_PROFILER
5977 qatomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
5980 #ifdef USE_TCG_OPTIMIZATIONS
5984 #ifdef CONFIG_PROFILER
5985 qatomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
5986 qatomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
5989 reachable_code_pass(s
);
5993 if (s
->nb_indirects
> 0) {
5995 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
5996 && qemu_log_in_addr_range(pc_start
))) {
5997 FILE *logfile
= qemu_log_trylock();
5999 fprintf(logfile
, "OP before indirect lowering:\n");
6000 tcg_dump_ops(s
, logfile
, false);
6001 fprintf(logfile
, "\n");
6002 qemu_log_unlock(logfile
);
6006 /* Replace indirect temps with direct temps. */
6007 if (liveness_pass_2(s
)) {
6008 /* If changes were made, re-run liveness. */
6013 #ifdef CONFIG_PROFILER
6014 qatomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
6018 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
6019 && qemu_log_in_addr_range(pc_start
))) {
6020 FILE *logfile
= qemu_log_trylock();
6022 fprintf(logfile
, "OP after optimization and liveness analysis:\n");
6023 tcg_dump_ops(s
, logfile
, true);
6024 fprintf(logfile
, "\n");
6025 qemu_log_unlock(logfile
);
6030 /* Initialize goto_tb jump offsets. */
6031 tb
->jmp_reset_offset
[0] = TB_JMP_OFFSET_INVALID
;
6032 tb
->jmp_reset_offset
[1] = TB_JMP_OFFSET_INVALID
;
6033 tb
->jmp_insn_offset
[0] = TB_JMP_OFFSET_INVALID
;
6034 tb
->jmp_insn_offset
[1] = TB_JMP_OFFSET_INVALID
;
6036 tcg_reg_alloc_start(s
);
6039 * Reset the buffer pointers when restarting after overflow.
6040 * TODO: Move this into translate-all.c with the rest of the
6041 * buffer management. Having only this done here is confusing.
6043 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
6044 s
->code_ptr
= s
->code_buf
;
6046 #ifdef TCG_TARGET_NEED_LDST_LABELS
6047 QSIMPLEQ_INIT(&s
->ldst_labels
);
6049 #ifdef TCG_TARGET_NEED_POOL_LABELS
6050 s
->pool_labels
= NULL
;
6054 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
6055 TCGOpcode opc
= op
->opc
;
6057 #ifdef CONFIG_PROFILER
6058 qatomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
6062 case INDEX_op_mov_i32
:
6063 case INDEX_op_mov_i64
:
6064 case INDEX_op_mov_vec
:
6065 tcg_reg_alloc_mov(s
, op
);
6067 case INDEX_op_dup_vec
:
6068 tcg_reg_alloc_dup(s
, op
);
6070 case INDEX_op_insn_start
:
6071 if (num_insns
>= 0) {
6072 size_t off
= tcg_current_code_size(s
);
6073 s
->gen_insn_end_off
[num_insns
] = off
;
6074 /* Assert that we do not overflow our stored offset. */
6075 assert(s
->gen_insn_end_off
[num_insns
] == off
);
6078 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
6079 s
->gen_insn_data
[num_insns
][i
] =
6080 tcg_get_insn_start_param(op
, i
);
6083 case INDEX_op_discard
:
6084 temp_dead(s
, arg_temp(op
->args
[0]));
6086 case INDEX_op_set_label
:
6087 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
6088 tcg_out_label(s
, arg_label(op
->args
[0]));
6091 tcg_reg_alloc_call(s
, op
);
6093 case INDEX_op_exit_tb
:
6094 tcg_out_exit_tb(s
, op
->args
[0]);
6096 case INDEX_op_goto_tb
:
6097 tcg_out_goto_tb(s
, op
->args
[0]);
6099 case INDEX_op_dup2_vec
:
6100 if (tcg_reg_alloc_dup2(s
, op
)) {
6105 /* Sanity check that we've not introduced any unhandled opcodes. */
6106 tcg_debug_assert(tcg_op_supported(opc
));
6107 /* Note: in order to speed up the code, it would be much
6108 faster to have specialized register allocator functions for
6109 some common argument patterns */
6110 tcg_reg_alloc_op(s
, op
);
6113 /* Test for (pending) buffer overflow. The assumption is that any
6114 one operation beginning below the high water mark cannot overrun
6115 the buffer completely. Thus we can test for overflow after
6116 generating code without having to check during generation. */
6117 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
6120 /* Test for TB overflow, as seen by gen_insn_end_off. */
6121 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
6125 tcg_debug_assert(num_insns
>= 0);
6126 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
6128 /* Generate TB finalization at the end of block */
6129 #ifdef TCG_TARGET_NEED_LDST_LABELS
6130 i
= tcg_out_ldst_finalize(s
);
6135 #ifdef TCG_TARGET_NEED_POOL_LABELS
6136 i
= tcg_out_pool_finalize(s
);
6141 if (!tcg_resolve_relocs(s
)) {
6145 #ifndef CONFIG_TCG_INTERPRETER
6146 /* flush instruction cache */
6147 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
6148 (uintptr_t)s
->code_buf
,
6149 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
6152 return tcg_current_code_size(s
);
6155 #ifdef CONFIG_PROFILER
6156 void tcg_dump_info(GString
*buf
)
6158 TCGProfile prof
= {};
6159 const TCGProfile
*s
;
6161 int64_t tb_div_count
;
6164 tcg_profile_snapshot_counters(&prof
);
6166 tb_count
= s
->tb_count
;
6167 tb_div_count
= tb_count
? tb_count
: 1;
6168 tot
= s
->interm_time
+ s
->code_time
;
6170 g_string_append_printf(buf
, "JIT cycles %" PRId64
6171 " (%0.3f s at 2.4 GHz)\n",
6173 g_string_append_printf(buf
, "translated TBs %" PRId64
6174 " (aborted=%" PRId64
" %0.1f%%)\n",
6175 tb_count
, s
->tb_count1
- tb_count
,
6176 (double)(s
->tb_count1
- s
->tb_count
)
6177 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
6178 g_string_append_printf(buf
, "avg ops/TB %0.1f max=%d\n",
6179 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
6180 g_string_append_printf(buf
, "deleted ops/TB %0.2f\n",
6181 (double)s
->del_op_count
/ tb_div_count
);
6182 g_string_append_printf(buf
, "avg temps/TB %0.2f max=%d\n",
6183 (double)s
->temp_count
/ tb_div_count
,
6185 g_string_append_printf(buf
, "avg host code/TB %0.1f\n",
6186 (double)s
->code_out_len
/ tb_div_count
);
6187 g_string_append_printf(buf
, "avg search data/TB %0.1f\n",
6188 (double)s
->search_out_len
/ tb_div_count
);
6190 g_string_append_printf(buf
, "cycles/op %0.1f\n",
6191 s
->op_count
? (double)tot
/ s
->op_count
: 0);
6192 g_string_append_printf(buf
, "cycles/in byte %0.1f\n",
6193 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
6194 g_string_append_printf(buf
, "cycles/out byte %0.1f\n",
6195 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
6196 g_string_append_printf(buf
, "cycles/search byte %0.1f\n",
6198 (double)tot
/ s
->search_out_len
: 0);
6202 g_string_append_printf(buf
, " gen_interm time %0.1f%%\n",
6203 (double)s
->interm_time
/ tot
* 100.0);
6204 g_string_append_printf(buf
, " gen_code time %0.1f%%\n",
6205 (double)s
->code_time
/ tot
* 100.0);
6206 g_string_append_printf(buf
, "optim./code time %0.1f%%\n",
6207 (double)s
->opt_time
/ (s
->code_time
?
6210 g_string_append_printf(buf
, "liveness/code time %0.1f%%\n",
6211 (double)s
->la_time
/ (s
->code_time
?
6212 s
->code_time
: 1) * 100.0);
6213 g_string_append_printf(buf
, "cpu_restore count %" PRId64
"\n",
6215 g_string_append_printf(buf
, " avg cycles %0.1f\n",
6217 (double)s
->restore_time
/ s
->restore_count
: 0);
6220 void tcg_dump_info(GString
*buf
)
6222 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
6226 #ifdef ELF_HOST_MACHINE
6227 /* In order to use this feature, the backend needs to do three things:
6229 (1) Define ELF_HOST_MACHINE to indicate both what value to
6230 put into the ELF image and to indicate support for the feature.
6232 (2) Define tcg_register_jit. This should create a buffer containing
6233 the contents of a .debug_frame section that describes the post-
6234 prologue unwind info for the tcg machine.
6236 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6239 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
6246 struct jit_code_entry
{
6247 struct jit_code_entry
*next_entry
;
6248 struct jit_code_entry
*prev_entry
;
6249 const void *symfile_addr
;
6250 uint64_t symfile_size
;
6253 struct jit_descriptor
{
6255 uint32_t action_flag
;
6256 struct jit_code_entry
*relevant_entry
;
6257 struct jit_code_entry
*first_entry
;
6260 void __jit_debug_register_code(void) __attribute__((noinline
));
6261 void __jit_debug_register_code(void)
6266 /* Must statically initialize the version, because GDB may check
6267 the version before we can set it. */
6268 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
6270 /* End GDB interface. */
6272 static int find_string(const char *strtab
, const char *str
)
6274 const char *p
= strtab
+ 1;
6277 if (strcmp(p
, str
) == 0) {
6284 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
6285 const void *debug_frame
,
6286 size_t debug_frame_size
)
6288 struct __attribute__((packed
)) DebugInfo
{
6295 uintptr_t cu_low_pc
;
6296 uintptr_t cu_high_pc
;
6299 uintptr_t fn_low_pc
;
6300 uintptr_t fn_high_pc
;
6309 struct DebugInfo di
;
6314 struct ElfImage
*img
;
6316 static const struct ElfImage img_template
= {
6318 .e_ident
[EI_MAG0
] = ELFMAG0
,
6319 .e_ident
[EI_MAG1
] = ELFMAG1
,
6320 .e_ident
[EI_MAG2
] = ELFMAG2
,
6321 .e_ident
[EI_MAG3
] = ELFMAG3
,
6322 .e_ident
[EI_CLASS
] = ELF_CLASS
,
6323 .e_ident
[EI_DATA
] = ELF_DATA
,
6324 .e_ident
[EI_VERSION
] = EV_CURRENT
,
6326 .e_machine
= ELF_HOST_MACHINE
,
6327 .e_version
= EV_CURRENT
,
6328 .e_phoff
= offsetof(struct ElfImage
, phdr
),
6329 .e_shoff
= offsetof(struct ElfImage
, shdr
),
6330 .e_ehsize
= sizeof(ElfW(Shdr
)),
6331 .e_phentsize
= sizeof(ElfW(Phdr
)),
6333 .e_shentsize
= sizeof(ElfW(Shdr
)),
6334 .e_shnum
= ARRAY_SIZE(img
->shdr
),
6335 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
6336 #ifdef ELF_HOST_FLAGS
6337 .e_flags
= ELF_HOST_FLAGS
,
6340 .e_ident
[EI_OSABI
] = ELF_OSABI
,
6348 [0] = { .sh_type
= SHT_NULL
},
6349 /* Trick: The contents of code_gen_buffer are not present in
6350 this fake ELF file; that got allocated elsewhere. Therefore
6351 we mark .text as SHT_NOBITS (similar to .bss) so that readers
6352 will not look for contents. We can record any address. */
6354 .sh_type
= SHT_NOBITS
,
6355 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
6357 [2] = { /* .debug_info */
6358 .sh_type
= SHT_PROGBITS
,
6359 .sh_offset
= offsetof(struct ElfImage
, di
),
6360 .sh_size
= sizeof(struct DebugInfo
),
6362 [3] = { /* .debug_abbrev */
6363 .sh_type
= SHT_PROGBITS
,
6364 .sh_offset
= offsetof(struct ElfImage
, da
),
6365 .sh_size
= sizeof(img
->da
),
6367 [4] = { /* .debug_frame */
6368 .sh_type
= SHT_PROGBITS
,
6369 .sh_offset
= sizeof(struct ElfImage
),
6371 [5] = { /* .symtab */
6372 .sh_type
= SHT_SYMTAB
,
6373 .sh_offset
= offsetof(struct ElfImage
, sym
),
6374 .sh_size
= sizeof(img
->sym
),
6376 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
6377 .sh_entsize
= sizeof(ElfW(Sym
)),
6379 [6] = { /* .strtab */
6380 .sh_type
= SHT_STRTAB
,
6381 .sh_offset
= offsetof(struct ElfImage
, str
),
6382 .sh_size
= sizeof(img
->str
),
6386 [1] = { /* code_gen_buffer */
6387 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
6392 .len
= sizeof(struct DebugInfo
) - 4,
6394 .ptr_size
= sizeof(void *),
6396 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
6398 .fn_name
= "code_gen_buffer"
6401 1, /* abbrev number (the cu) */
6402 0x11, 1, /* DW_TAG_compile_unit, has children */
6403 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
6404 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6405 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6406 0, 0, /* end of abbrev */
6407 2, /* abbrev number (the fn) */
6408 0x2e, 0, /* DW_TAG_subprogram, no children */
6409 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
6410 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6411 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6412 0, 0, /* end of abbrev */
6413 0 /* no more abbrev */
6415 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6416 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6419 /* We only need a single jit entry; statically allocate it. */
6420 static struct jit_code_entry one_entry
;
6422 uintptr_t buf
= (uintptr_t)buf_ptr
;
6423 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
6424 DebugFrameHeader
*dfh
;
6426 img
= g_malloc(img_size
);
6427 *img
= img_template
;
6429 img
->phdr
.p_vaddr
= buf
;
6430 img
->phdr
.p_paddr
= buf
;
6431 img
->phdr
.p_memsz
= buf_size
;
6433 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
6434 img
->shdr
[1].sh_addr
= buf
;
6435 img
->shdr
[1].sh_size
= buf_size
;
6437 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
6438 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
6440 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
6441 img
->shdr
[4].sh_size
= debug_frame_size
;
6443 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
6444 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
6446 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
6447 img
->sym
[1].st_value
= buf
;
6448 img
->sym
[1].st_size
= buf_size
;
6450 img
->di
.cu_low_pc
= buf
;
6451 img
->di
.cu_high_pc
= buf
+ buf_size
;
6452 img
->di
.fn_low_pc
= buf
;
6453 img
->di
.fn_high_pc
= buf
+ buf_size
;
6455 dfh
= (DebugFrameHeader
*)(img
+ 1);
6456 memcpy(dfh
, debug_frame
, debug_frame_size
);
6457 dfh
->fde
.func_start
= buf
;
6458 dfh
->fde
.func_len
= buf_size
;
6461 /* Enable this block to be able to debug the ELF image file creation.
6462 One can use readelf, objdump, or other inspection utilities. */
6464 g_autofree
char *jit
= g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6465 FILE *f
= fopen(jit
, "w+b");
6467 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
6468 /* Avoid stupid unused return value warning for fwrite. */
6475 one_entry
.symfile_addr
= img
;
6476 one_entry
.symfile_size
= img_size
;
6478 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
6479 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
6480 __jit_debug_descriptor
.first_entry
= &one_entry
;
6481 __jit_debug_register_code();
6484 /* No support for the feature. Provide the entry point expected by exec.c,
6485 and implement the internal function we declared earlier. */
6487 static void tcg_register_jit_int(const void *buf
, size_t size
,
6488 const void *debug_frame
,
6489 size_t debug_frame_size
)
6493 void tcg_register_jit(const void *buf
, size_t buf_size
)
6496 #endif /* ELF_HOST_MACHINE */
6498 #if !TCG_TARGET_MAYBE_vec
6499 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
6501 g_assert_not_reached();