2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 /* Define to jump the ELF file used to communicate with GDB. */
30 #include "qemu/error-report.h"
31 #include "qemu/cutils.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/qemu-print.h"
34 #include "qemu/cacheflush.h"
35 #include "qemu/cacheinfo.h"
36 #include "qemu/timer.h"
37 #include "exec/translation-block.h"
38 #include "exec/tlb-common.h"
39 #include "tcg/tcg-op-common.h"
41 #if UINTPTR_MAX == UINT32_MAX
42 # define ELF_CLASS ELFCLASS32
44 # define ELF_CLASS ELFCLASS64
47 # define ELF_DATA ELFDATA2MSB
49 # define ELF_DATA ELFDATA2LSB
54 #include "tcg/tcg-ldst.h"
55 #include "tcg/tcg-temp-internal.h"
56 #include "tcg-internal.h"
57 #include "accel/tcg/perf.h"
58 #ifdef CONFIG_USER_ONLY
59 #include "exec/user/guest-base.h"
62 /* Forward declarations for functions declared in tcg-target.c.inc and
64 static void tcg_target_init(TCGContext
*s
);
65 static void tcg_target_qemu_prologue(TCGContext
*s
);
66 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
67 intptr_t value
, intptr_t addend
);
69 /* The CIE and FDE header definitions will be common to all hosts. */
71 uint32_t len
__attribute__((aligned((sizeof(void *)))));
77 uint8_t return_column
;
80 typedef struct QEMU_PACKED
{
81 uint32_t len
__attribute__((aligned((sizeof(void *)))));
85 } DebugFrameFDEHeader
;
87 typedef struct QEMU_PACKED
{
89 DebugFrameFDEHeader fde
;
92 typedef struct TCGLabelQemuLdst
{
93 bool is_ld
; /* qemu_ld: true, qemu_st: false */
95 TCGType type
; /* result type of a load */
96 TCGReg addrlo_reg
; /* reg index for low word of guest virtual addr */
97 TCGReg addrhi_reg
; /* reg index for high word of guest virtual addr */
98 TCGReg datalo_reg
; /* reg index for low word to be loaded or stored */
99 TCGReg datahi_reg
; /* reg index for high word to be loaded or stored */
100 const tcg_insn_unit
*raddr
; /* addr of the next IR of qemu_ld/st IR */
101 tcg_insn_unit
*label_ptr
[2]; /* label pointers to be updated */
102 QSIMPLEQ_ENTRY(TCGLabelQemuLdst
) next
;
105 static void tcg_register_jit_int(const void *buf
, size_t size
,
106 const void *debug_frame
,
107 size_t debug_frame_size
)
108 __attribute__((unused
));
110 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
111 static void tcg_out_tb_start(TCGContext
*s
);
112 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
114 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
115 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
116 TCGReg ret
, tcg_target_long arg
);
117 static void tcg_out_ext8s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
118 static void tcg_out_ext16s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
119 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
120 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
121 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
122 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
123 static void tcg_out_exts_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
124 static void tcg_out_extu_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
125 static void tcg_out_extrl_i64_i32(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
126 static void tcg_out_addi_ptr(TCGContext
*s
, TCGReg
, TCGReg
, tcg_target_long
);
127 static bool tcg_out_xchg(TCGContext
*s
, TCGType type
, TCGReg r1
, TCGReg r2
);
128 static void tcg_out_exit_tb(TCGContext
*s
, uintptr_t arg
);
129 static void tcg_out_goto_tb(TCGContext
*s
, int which
);
130 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
131 const TCGArg args
[TCG_MAX_OP_ARGS
],
132 const int const_args
[TCG_MAX_OP_ARGS
]);
133 #if TCG_TARGET_MAYBE_vec
134 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
135 TCGReg dst
, TCGReg src
);
136 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
137 TCGReg dst
, TCGReg base
, intptr_t offset
);
138 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
139 TCGReg dst
, int64_t arg
);
140 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
141 unsigned vecl
, unsigned vece
,
142 const TCGArg args
[TCG_MAX_OP_ARGS
],
143 const int const_args
[TCG_MAX_OP_ARGS
]);
145 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
146 TCGReg dst
, TCGReg src
)
148 g_assert_not_reached();
150 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
151 TCGReg dst
, TCGReg base
, intptr_t offset
)
153 g_assert_not_reached();
155 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
156 TCGReg dst
, int64_t arg
)
158 g_assert_not_reached();
160 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
161 unsigned vecl
, unsigned vece
,
162 const TCGArg args
[TCG_MAX_OP_ARGS
],
163 const int const_args
[TCG_MAX_OP_ARGS
])
165 g_assert_not_reached();
168 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
170 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
171 TCGReg base
, intptr_t ofs
);
172 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
173 const TCGHelperInfo
*info
);
174 static TCGReg
tcg_target_call_oarg_reg(TCGCallReturnKind kind
, int slot
);
175 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
, int vece
);
176 #ifdef TCG_TARGET_NEED_LDST_LABELS
177 static int tcg_out_ldst_finalize(TCGContext
*s
);
180 typedef struct TCGLdstHelperParam
{
181 TCGReg (*ra_gen
)(TCGContext
*s
, const TCGLabelQemuLdst
*l
, int arg_reg
);
184 } TCGLdstHelperParam
;
186 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
187 const TCGLdstHelperParam
*p
)
188 __attribute__((unused
));
189 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
190 bool load_sign
, const TCGLdstHelperParam
*p
)
191 __attribute__((unused
));
192 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
193 const TCGLdstHelperParam
*p
)
194 __attribute__((unused
));
196 static void * const qemu_ld_helpers
[MO_SSIZE
+ 1] __attribute__((unused
)) = {
197 [MO_UB
] = helper_ldub_mmu
,
198 [MO_SB
] = helper_ldsb_mmu
,
199 [MO_UW
] = helper_lduw_mmu
,
200 [MO_SW
] = helper_ldsw_mmu
,
201 [MO_UL
] = helper_ldul_mmu
,
202 [MO_UQ
] = helper_ldq_mmu
,
203 #if TCG_TARGET_REG_BITS == 64
204 [MO_SL
] = helper_ldsl_mmu
,
205 [MO_128
] = helper_ld16_mmu
,
209 static void * const qemu_st_helpers
[MO_SIZE
+ 1] __attribute__((unused
)) = {
210 [MO_8
] = helper_stb_mmu
,
211 [MO_16
] = helper_stw_mmu
,
212 [MO_32
] = helper_stl_mmu
,
213 [MO_64
] = helper_stq_mmu
,
214 #if TCG_TARGET_REG_BITS == 64
215 [MO_128
] = helper_st16_mmu
,
220 MemOp atom
; /* lg2 bits of atomicity required */
221 MemOp align
; /* lg2 bits of alignment to use */
224 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
225 MemOp host_atom
, bool allow_two_ops
)
226 __attribute__((unused
));
228 TCGContext tcg_init_ctx
;
229 __thread TCGContext
*tcg_ctx
;
231 TCGContext
**tcg_ctxs
;
232 unsigned int tcg_cur_ctxs
;
233 unsigned int tcg_max_ctxs
;
235 const void *tcg_code_gen_epilogue
;
236 uintptr_t tcg_splitwx_diff
;
238 #ifndef CONFIG_TCG_INTERPRETER
239 tcg_prologue_fn
*tcg_qemu_tb_exec
;
242 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
243 static TCGRegSet tcg_target_call_clobber_regs
;
245 #if TCG_TARGET_INSN_UNIT_SIZE == 1
246 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
251 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
258 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
259 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
261 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
264 tcg_insn_unit
*p
= s
->code_ptr
;
265 memcpy(p
, &v
, sizeof(v
));
266 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
270 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
273 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
276 memcpy(p
, &v
, sizeof(v
));
281 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
282 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
284 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
287 tcg_insn_unit
*p
= s
->code_ptr
;
288 memcpy(p
, &v
, sizeof(v
));
289 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
293 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
296 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
299 memcpy(p
, &v
, sizeof(v
));
304 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
305 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
307 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
310 tcg_insn_unit
*p
= s
->code_ptr
;
311 memcpy(p
, &v
, sizeof(v
));
312 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
316 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
319 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
322 memcpy(p
, &v
, sizeof(v
));
327 /* label relocation processing */
329 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
330 TCGLabel
*l
, intptr_t addend
)
332 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
337 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
340 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
342 tcg_debug_assert(!l
->has_value
);
344 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
347 TCGLabel
*gen_new_label(void)
349 TCGContext
*s
= tcg_ctx
;
350 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
352 memset(l
, 0, sizeof(TCGLabel
));
353 l
->id
= s
->nb_labels
++;
354 QSIMPLEQ_INIT(&l
->branches
);
355 QSIMPLEQ_INIT(&l
->relocs
);
357 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
362 static bool tcg_resolve_relocs(TCGContext
*s
)
366 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
368 uintptr_t value
= l
->u
.value
;
370 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
371 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
379 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
382 * We will check for overflow at the end of the opcode loop in
383 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
385 s
->gen_tb
->jmp_reset_offset
[which
] = tcg_current_code_size(s
);
388 static void G_GNUC_UNUSED
set_jmp_insn_offset(TCGContext
*s
, int which
)
391 * We will check for overflow at the end of the opcode loop in
392 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
394 s
->gen_tb
->jmp_insn_offset
[which
] = tcg_current_code_size(s
);
397 static uintptr_t G_GNUC_UNUSED
get_jmp_target_addr(TCGContext
*s
, int which
)
400 * Return the read-execute version of the pointer, for the benefit
401 * of any pc-relative addressing mode.
403 return (uintptr_t)tcg_splitwx_to_rx(&s
->gen_tb
->jmp_target_addr
[which
]);
406 #if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
407 static int tlb_mask_table_ofs(TCGContext
*s
, int which
)
409 return (offsetof(CPUNegativeOffsetState
, tlb
.f
[which
]) -
410 sizeof(CPUNegativeOffsetState
));
414 /* Signal overflow, starting over with fewer guest insns. */
416 void tcg_raise_tb_overflow(TCGContext
*s
)
418 siglongjmp(s
->jmp_trans
, -2);
422 * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
423 * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
425 * However, tcg_out_helper_load_slots reuses this field to hold an
426 * argument slot number (which may designate a argument register or an
427 * argument stack slot), converting to TCGReg once all arguments that
428 * are destined for the stack are processed.
430 typedef struct TCGMovExtend
{
439 * tcg_out_movext -- move and extend
441 * @dst_type: integral type for destination
442 * @dst: destination register
443 * @src_type: integral type for source
444 * @src_ext: extension to apply to source
445 * @src: source register
447 * Move or extend @src into @dst, depending on @src_ext and the types.
449 static void tcg_out_movext(TCGContext
*s
, TCGType dst_type
, TCGReg dst
,
450 TCGType src_type
, MemOp src_ext
, TCGReg src
)
454 tcg_out_ext8u(s
, dst
, src
);
457 tcg_out_ext8s(s
, dst_type
, dst
, src
);
460 tcg_out_ext16u(s
, dst
, src
);
463 tcg_out_ext16s(s
, dst_type
, dst
, src
);
467 if (dst_type
== TCG_TYPE_I32
) {
468 if (src_type
== TCG_TYPE_I32
) {
469 tcg_out_mov(s
, TCG_TYPE_I32
, dst
, src
);
471 tcg_out_extrl_i64_i32(s
, dst
, src
);
473 } else if (src_type
== TCG_TYPE_I32
) {
474 if (src_ext
& MO_SIGN
) {
475 tcg_out_exts_i32_i64(s
, dst
, src
);
477 tcg_out_extu_i32_i64(s
, dst
, src
);
480 if (src_ext
& MO_SIGN
) {
481 tcg_out_ext32s(s
, dst
, src
);
483 tcg_out_ext32u(s
, dst
, src
);
488 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
489 if (dst_type
== TCG_TYPE_I32
) {
490 tcg_out_extrl_i64_i32(s
, dst
, src
);
492 tcg_out_mov(s
, TCG_TYPE_I64
, dst
, src
);
496 g_assert_not_reached();
500 /* Minor variations on a theme, using a structure. */
501 static void tcg_out_movext1_new_src(TCGContext
*s
, const TCGMovExtend
*i
,
504 tcg_out_movext(s
, i
->dst_type
, i
->dst
, i
->src_type
, i
->src_ext
, src
);
507 static void tcg_out_movext1(TCGContext
*s
, const TCGMovExtend
*i
)
509 tcg_out_movext1_new_src(s
, i
, i
->src
);
513 * tcg_out_movext2 -- move and extend two pair
515 * @i1: first move description
516 * @i2: second move description
517 * @scratch: temporary register, or -1 for none
519 * As tcg_out_movext, for both @i1 and @i2, caring for overlap
520 * between the sources and destinations.
523 static void tcg_out_movext2(TCGContext
*s
, const TCGMovExtend
*i1
,
524 const TCGMovExtend
*i2
, int scratch
)
526 TCGReg src1
= i1
->src
;
527 TCGReg src2
= i2
->src
;
529 if (i1
->dst
!= src2
) {
530 tcg_out_movext1(s
, i1
);
531 tcg_out_movext1(s
, i2
);
534 if (i2
->dst
== src1
) {
535 TCGType src1_type
= i1
->src_type
;
536 TCGType src2_type
= i2
->src_type
;
538 if (tcg_out_xchg(s
, MAX(src1_type
, src2_type
), src1
, src2
)) {
539 /* The data is now in the correct registers, now extend. */
543 tcg_debug_assert(scratch
>= 0);
544 tcg_out_mov(s
, src1_type
, scratch
, src1
);
548 tcg_out_movext1_new_src(s
, i2
, src2
);
549 tcg_out_movext1_new_src(s
, i1
, src1
);
553 * tcg_out_movext3 -- move and extend three pair
555 * @i1: first move description
556 * @i2: second move description
557 * @i3: third move description
558 * @scratch: temporary register, or -1 for none
560 * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
561 * between the sources and destinations.
564 static void tcg_out_movext3(TCGContext
*s
, const TCGMovExtend
*i1
,
565 const TCGMovExtend
*i2
, const TCGMovExtend
*i3
,
568 TCGReg src1
= i1
->src
;
569 TCGReg src2
= i2
->src
;
570 TCGReg src3
= i3
->src
;
572 if (i1
->dst
!= src2
&& i1
->dst
!= src3
) {
573 tcg_out_movext1(s
, i1
);
574 tcg_out_movext2(s
, i2
, i3
, scratch
);
577 if (i2
->dst
!= src1
&& i2
->dst
!= src3
) {
578 tcg_out_movext1(s
, i2
);
579 tcg_out_movext2(s
, i1
, i3
, scratch
);
582 if (i3
->dst
!= src1
&& i3
->dst
!= src2
) {
583 tcg_out_movext1(s
, i3
);
584 tcg_out_movext2(s
, i1
, i2
, scratch
);
589 * There is a cycle. Since there are only 3 nodes, the cycle is
590 * either "clockwise" or "anti-clockwise", and can be solved with
591 * a single scratch or two xchg.
593 if (i1
->dst
== src2
&& i2
->dst
== src3
&& i3
->dst
== src1
) {
595 if (tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
)) {
596 tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
);
597 /* The data is now in the correct registers, now extend. */
598 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
599 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
600 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
602 tcg_debug_assert(scratch
>= 0);
603 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
604 tcg_out_movext1(s
, i3
);
605 tcg_out_movext1(s
, i2
);
606 tcg_out_movext1_new_src(s
, i1
, scratch
);
608 } else if (i1
->dst
== src3
&& i2
->dst
== src1
&& i3
->dst
== src2
) {
609 /* "Anti-clockwise" */
610 if (tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
)) {
611 tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
);
612 /* The data is now in the correct registers, now extend. */
613 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
614 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
615 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
617 tcg_debug_assert(scratch
>= 0);
618 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
619 tcg_out_movext1(s
, i2
);
620 tcg_out_movext1(s
, i3
);
621 tcg_out_movext1_new_src(s
, i1
, scratch
);
624 g_assert_not_reached();
628 #define C_PFX1(P, A) P##A
629 #define C_PFX2(P, A, B) P##A##_##B
630 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
631 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
632 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
633 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
635 /* Define an enumeration for the various combinations. */
637 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
638 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
639 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
640 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
642 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
643 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
644 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
645 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
647 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
649 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
650 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
651 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
652 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
653 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
656 #include "tcg-target-con-set.h"
657 } TCGConstraintSetIndex
;
659 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
676 /* Put all of the constraint sets into an array, indexed by the enum. */
678 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
679 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
680 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
681 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
683 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
684 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
685 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
686 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
688 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
690 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
691 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
692 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
693 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
694 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
696 static const TCGTargetOpDef constraint_sets
[] = {
697 #include "tcg-target-con-set.h"
716 /* Expand the enumerator to be returned from tcg_target_op_def(). */
718 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
719 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
720 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
721 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
723 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
724 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
725 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
726 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
728 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
730 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
731 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
732 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
733 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
734 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
736 #include "tcg-target.c.inc"
738 #ifndef CONFIG_TCG_INTERPRETER
739 /* Validate CPUTLBDescFast placement. */
740 QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState
, tlb
.f
[0]) -
741 sizeof(CPUNegativeOffsetState
))
742 < MIN_TLB_MASK_TABLE_OFS
);
745 static void alloc_tcg_plugin_context(TCGContext
*s
)
748 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
749 s
->plugin_tb
->insns
=
750 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
755 * All TCG threads except the parent (i.e. the one that called tcg_context_init
756 * and registered the target's TCG globals) must register with this function
757 * before initiating translation.
759 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
760 * of tcg_region_init() for the reasoning behind this.
762 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
763 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
764 * is not used anymore for translation once this function is called.
766 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
767 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
769 #ifdef CONFIG_USER_ONLY
770 void tcg_register_thread(void)
772 tcg_ctx
= &tcg_init_ctx
;
775 void tcg_register_thread(void)
777 TCGContext
*s
= g_malloc(sizeof(*s
));
782 /* Relink mem_base. */
783 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
784 if (tcg_init_ctx
.temps
[i
].mem_base
) {
785 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
786 tcg_debug_assert(b
>= 0 && b
< n
);
787 s
->temps
[i
].mem_base
= &s
->temps
[b
];
791 /* Claim an entry in tcg_ctxs */
792 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
793 g_assert(n
< tcg_max_ctxs
);
794 qatomic_set(&tcg_ctxs
[n
], s
);
797 alloc_tcg_plugin_context(s
);
798 tcg_region_initial_alloc(s
);
803 #endif /* !CONFIG_USER_ONLY */
805 /* pool based memory allocation */
806 void *tcg_malloc_internal(TCGContext
*s
, int size
)
811 if (size
> TCG_POOL_CHUNK_SIZE
) {
812 /* big malloc: insert a new pool (XXX: could optimize) */
813 p
= g_malloc(sizeof(TCGPool
) + size
);
815 p
->next
= s
->pool_first_large
;
816 s
->pool_first_large
= p
;
827 pool_size
= TCG_POOL_CHUNK_SIZE
;
828 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
831 if (s
->pool_current
) {
832 s
->pool_current
->next
= p
;
842 s
->pool_cur
= p
->data
+ size
;
843 s
->pool_end
= p
->data
+ p
->size
;
847 void tcg_pool_reset(TCGContext
*s
)
850 for (p
= s
->pool_first_large
; p
; p
= t
) {
854 s
->pool_first_large
= NULL
;
855 s
->pool_cur
= s
->pool_end
= NULL
;
856 s
->pool_current
= NULL
;
860 * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
861 * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
862 * We only use these for layout in tcg_out_ld_helper_ret and
863 * tcg_out_st_helper_args, and share them between several of
864 * the helpers, with the end result that it's easier to build manually.
867 #if TCG_TARGET_REG_BITS == 32
868 # define dh_typecode_ttl dh_typecode_i32
870 # define dh_typecode_ttl dh_typecode_i64
873 static TCGHelperInfo info_helper_ld32_mmu
= {
874 .flags
= TCG_CALL_NO_WG
,
875 .typemask
= dh_typemask(ttl
, 0) /* return tcg_target_ulong */
876 | dh_typemask(env
, 1)
877 | dh_typemask(i64
, 2) /* uint64_t addr */
878 | dh_typemask(i32
, 3) /* unsigned oi */
879 | dh_typemask(ptr
, 4) /* uintptr_t ra */
882 static TCGHelperInfo info_helper_ld64_mmu
= {
883 .flags
= TCG_CALL_NO_WG
,
884 .typemask
= dh_typemask(i64
, 0) /* return uint64_t */
885 | dh_typemask(env
, 1)
886 | dh_typemask(i64
, 2) /* uint64_t addr */
887 | dh_typemask(i32
, 3) /* unsigned oi */
888 | dh_typemask(ptr
, 4) /* uintptr_t ra */
891 static TCGHelperInfo info_helper_ld128_mmu
= {
892 .flags
= TCG_CALL_NO_WG
,
893 .typemask
= dh_typemask(i128
, 0) /* return Int128 */
894 | dh_typemask(env
, 1)
895 | dh_typemask(i64
, 2) /* uint64_t addr */
896 | dh_typemask(i32
, 3) /* unsigned oi */
897 | dh_typemask(ptr
, 4) /* uintptr_t ra */
900 static TCGHelperInfo info_helper_st32_mmu
= {
901 .flags
= TCG_CALL_NO_WG
,
902 .typemask
= dh_typemask(void, 0)
903 | dh_typemask(env
, 1)
904 | dh_typemask(i64
, 2) /* uint64_t addr */
905 | dh_typemask(i32
, 3) /* uint32_t data */
906 | dh_typemask(i32
, 4) /* unsigned oi */
907 | dh_typemask(ptr
, 5) /* uintptr_t ra */
910 static TCGHelperInfo info_helper_st64_mmu
= {
911 .flags
= TCG_CALL_NO_WG
,
912 .typemask
= dh_typemask(void, 0)
913 | dh_typemask(env
, 1)
914 | dh_typemask(i64
, 2) /* uint64_t addr */
915 | dh_typemask(i64
, 3) /* uint64_t data */
916 | dh_typemask(i32
, 4) /* unsigned oi */
917 | dh_typemask(ptr
, 5) /* uintptr_t ra */
920 static TCGHelperInfo info_helper_st128_mmu
= {
921 .flags
= TCG_CALL_NO_WG
,
922 .typemask
= dh_typemask(void, 0)
923 | dh_typemask(env
, 1)
924 | dh_typemask(i64
, 2) /* uint64_t addr */
925 | dh_typemask(i128
, 3) /* Int128 data */
926 | dh_typemask(i32
, 4) /* unsigned oi */
927 | dh_typemask(ptr
, 5) /* uintptr_t ra */
930 #ifdef CONFIG_TCG_INTERPRETER
931 static ffi_type
*typecode_to_ffi(int argmask
)
934 * libffi does not support __int128_t, so we have forced Int128
935 * to use the structure definition instead of the builtin type.
937 static ffi_type
*ffi_type_i128_elements
[3] = {
942 static ffi_type ffi_type_i128
= {
944 .alignment
= __alignof__(Int128
),
945 .type
= FFI_TYPE_STRUCT
,
946 .elements
= ffi_type_i128_elements
,
950 case dh_typecode_void
:
951 return &ffi_type_void
;
952 case dh_typecode_i32
:
953 return &ffi_type_uint32
;
954 case dh_typecode_s32
:
955 return &ffi_type_sint32
;
956 case dh_typecode_i64
:
957 return &ffi_type_uint64
;
958 case dh_typecode_s64
:
959 return &ffi_type_sint64
;
960 case dh_typecode_ptr
:
961 return &ffi_type_pointer
;
962 case dh_typecode_i128
:
963 return &ffi_type_i128
;
965 g_assert_not_reached();
968 static ffi_cif
*init_ffi_layout(TCGHelperInfo
*info
)
970 unsigned typemask
= info
->typemask
;
978 /* Ignoring the return type, find the last non-zero field. */
979 nargs
= 32 - clz32(typemask
>> 3);
980 nargs
= DIV_ROUND_UP(nargs
, 3);
981 assert(nargs
<= MAX_CALL_IARGS
);
983 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
984 ca
->cif
.rtype
= typecode_to_ffi(typemask
& 7);
985 ca
->cif
.nargs
= nargs
;
988 ca
->cif
.arg_types
= ca
->args
;
989 for (int j
= 0; j
< nargs
; ++j
) {
990 int typecode
= extract32(typemask
, (j
+ 1) * 3, 3);
991 ca
->args
[j
] = typecode_to_ffi(typecode
);
995 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
996 ca
->cif
.rtype
, ca
->cif
.arg_types
);
997 assert(status
== FFI_OK
);
1002 #define HELPER_INFO_INIT(I) (&(I)->cif)
1003 #define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
1005 #define HELPER_INFO_INIT(I) (&(I)->init)
1006 #define HELPER_INFO_INIT_VAL(I) 1
1007 #endif /* CONFIG_TCG_INTERPRETER */
1009 static inline bool arg_slot_reg_p(unsigned arg_slot
)
1012 * Split the sizeof away from the comparison to avoid Werror from
1013 * "unsigned < 0 is always false", when iarg_regs is empty.
1015 unsigned nreg
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1016 return arg_slot
< nreg
;
1019 static inline int arg_slot_stk_ofs(unsigned arg_slot
)
1021 unsigned max
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1022 unsigned stk_slot
= arg_slot
- ARRAY_SIZE(tcg_target_call_iarg_regs
);
1024 tcg_debug_assert(stk_slot
< max
);
1025 return TCG_TARGET_CALL_STACK_OFFSET
+ stk_slot
* sizeof(tcg_target_long
);
1028 typedef struct TCGCumulativeArgs
{
1029 int arg_idx
; /* tcg_gen_callN args[] */
1030 int info_in_idx
; /* TCGHelperInfo in[] */
1031 int arg_slot
; /* regs+stack slot */
1032 int ref_slot
; /* stack slots for references */
1033 } TCGCumulativeArgs
;
1035 static void layout_arg_even(TCGCumulativeArgs
*cum
)
1037 cum
->arg_slot
+= cum
->arg_slot
& 1;
1040 static void layout_arg_1(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
,
1041 TCGCallArgumentKind kind
)
1043 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1045 *loc
= (TCGCallArgumentLoc
){
1047 .arg_idx
= cum
->arg_idx
,
1048 .arg_slot
= cum
->arg_slot
,
1054 static void layout_arg_normal_n(TCGCumulativeArgs
*cum
,
1055 TCGHelperInfo
*info
, int n
)
1057 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1059 for (int i
= 0; i
< n
; ++i
) {
1060 /* Layout all using the same arg_idx, adjusting the subindex. */
1061 loc
[i
] = (TCGCallArgumentLoc
){
1062 .kind
= TCG_CALL_ARG_NORMAL
,
1063 .arg_idx
= cum
->arg_idx
,
1065 .arg_slot
= cum
->arg_slot
+ i
,
1068 cum
->info_in_idx
+= n
;
1072 static void layout_arg_by_ref(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
)
1074 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1075 int n
= 128 / TCG_TARGET_REG_BITS
;
1077 /* The first subindex carries the pointer. */
1078 layout_arg_1(cum
, info
, TCG_CALL_ARG_BY_REF
);
1081 * The callee is allowed to clobber memory associated with
1082 * structure pass by-reference. Therefore we must make copies.
1083 * Allocate space from "ref_slot", which will be adjusted to
1084 * follow the parameters on the stack.
1086 loc
[0].ref_slot
= cum
->ref_slot
;
1089 * Subsequent words also go into the reference slot, but
1090 * do not accumulate into the regular arguments.
1092 for (int i
= 1; i
< n
; ++i
) {
1093 loc
[i
] = (TCGCallArgumentLoc
){
1094 .kind
= TCG_CALL_ARG_BY_REF_N
,
1095 .arg_idx
= cum
->arg_idx
,
1097 .ref_slot
= cum
->ref_slot
+ i
,
1100 cum
->info_in_idx
+= n
- 1; /* i=0 accounted for in layout_arg_1 */
1104 static void init_call_layout(TCGHelperInfo
*info
)
1106 int max_reg_slots
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1107 int max_stk_slots
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1108 unsigned typemask
= info
->typemask
;
1110 TCGCumulativeArgs cum
= { };
1113 * Parse and place any function return value.
1115 typecode
= typemask
& 7;
1117 case dh_typecode_void
:
1120 case dh_typecode_i32
:
1121 case dh_typecode_s32
:
1122 case dh_typecode_ptr
:
1124 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1126 case dh_typecode_i64
:
1127 case dh_typecode_s64
:
1128 info
->nr_out
= 64 / TCG_TARGET_REG_BITS
;
1129 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1130 /* Query the last register now to trigger any assert early. */
1131 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1133 case dh_typecode_i128
:
1134 info
->nr_out
= 128 / TCG_TARGET_REG_BITS
;
1135 info
->out_kind
= TCG_TARGET_CALL_RET_I128
;
1136 switch (TCG_TARGET_CALL_RET_I128
) {
1137 case TCG_CALL_RET_NORMAL
:
1138 /* Query the last register now to trigger any assert early. */
1139 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1141 case TCG_CALL_RET_BY_VEC
:
1142 /* Query the single register now to trigger any assert early. */
1143 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0);
1145 case TCG_CALL_RET_BY_REF
:
1147 * Allocate the first argument to the output.
1148 * We don't need to store this anywhere, just make it
1149 * unavailable for use in the input loop below.
1154 qemu_build_not_reached();
1158 g_assert_not_reached();
1162 * Parse and place function arguments.
1164 for (typemask
>>= 3; typemask
; typemask
>>= 3, cum
.arg_idx
++) {
1165 TCGCallArgumentKind kind
;
1168 typecode
= typemask
& 7;
1170 case dh_typecode_i32
:
1171 case dh_typecode_s32
:
1172 type
= TCG_TYPE_I32
;
1174 case dh_typecode_i64
:
1175 case dh_typecode_s64
:
1176 type
= TCG_TYPE_I64
;
1178 case dh_typecode_ptr
:
1179 type
= TCG_TYPE_PTR
;
1181 case dh_typecode_i128
:
1182 type
= TCG_TYPE_I128
;
1185 g_assert_not_reached();
1190 switch (TCG_TARGET_CALL_ARG_I32
) {
1191 case TCG_CALL_ARG_EVEN
:
1192 layout_arg_even(&cum
);
1194 case TCG_CALL_ARG_NORMAL
:
1195 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1197 case TCG_CALL_ARG_EXTEND
:
1198 kind
= TCG_CALL_ARG_EXTEND_U
+ (typecode
& 1);
1199 layout_arg_1(&cum
, info
, kind
);
1202 qemu_build_not_reached();
1207 switch (TCG_TARGET_CALL_ARG_I64
) {
1208 case TCG_CALL_ARG_EVEN
:
1209 layout_arg_even(&cum
);
1211 case TCG_CALL_ARG_NORMAL
:
1212 if (TCG_TARGET_REG_BITS
== 32) {
1213 layout_arg_normal_n(&cum
, info
, 2);
1215 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1219 qemu_build_not_reached();
1224 switch (TCG_TARGET_CALL_ARG_I128
) {
1225 case TCG_CALL_ARG_EVEN
:
1226 layout_arg_even(&cum
);
1228 case TCG_CALL_ARG_NORMAL
:
1229 layout_arg_normal_n(&cum
, info
, 128 / TCG_TARGET_REG_BITS
);
1231 case TCG_CALL_ARG_BY_REF
:
1232 layout_arg_by_ref(&cum
, info
);
1235 qemu_build_not_reached();
1240 g_assert_not_reached();
1243 info
->nr_in
= cum
.info_in_idx
;
1245 /* Validate that we didn't overrun the input array. */
1246 assert(cum
.info_in_idx
<= ARRAY_SIZE(info
->in
));
1247 /* Validate the backend has enough argument space. */
1248 assert(cum
.arg_slot
<= max_reg_slots
+ max_stk_slots
);
1251 * Relocate the "ref_slot" area to the end of the parameters.
1252 * Minimizing this stack offset helps code size for x86,
1253 * which has a signed 8-bit offset encoding.
1255 if (cum
.ref_slot
!= 0) {
1258 if (cum
.arg_slot
> max_reg_slots
) {
1259 int align
= __alignof(Int128
) / sizeof(tcg_target_long
);
1261 ref_base
= cum
.arg_slot
- max_reg_slots
;
1263 ref_base
= ROUND_UP(ref_base
, align
);
1266 assert(ref_base
+ cum
.ref_slot
<= max_stk_slots
);
1267 ref_base
+= max_reg_slots
;
1269 if (ref_base
!= 0) {
1270 for (int i
= cum
.info_in_idx
- 1; i
>= 0; --i
) {
1271 TCGCallArgumentLoc
*loc
= &info
->in
[i
];
1272 switch (loc
->kind
) {
1273 case TCG_CALL_ARG_BY_REF
:
1274 case TCG_CALL_ARG_BY_REF_N
:
1275 loc
->ref_slot
+= ref_base
;
1285 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
1286 static void process_op_defs(TCGContext
*s
);
1287 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1288 TCGReg reg
, const char *name
);
1290 static void tcg_context_init(unsigned max_cpus
)
1292 TCGContext
*s
= &tcg_init_ctx
;
1293 int op
, total_args
, n
, i
;
1295 TCGArgConstraint
*args_ct
;
1298 memset(s
, 0, sizeof(*s
));
1301 /* Count total number of arguments and allocate the corresponding
1304 for(op
= 0; op
< NB_OPS
; op
++) {
1305 def
= &tcg_op_defs
[op
];
1306 n
= def
->nb_iargs
+ def
->nb_oargs
;
1310 args_ct
= g_new0(TCGArgConstraint
, total_args
);
1312 for(op
= 0; op
< NB_OPS
; op
++) {
1313 def
= &tcg_op_defs
[op
];
1314 def
->args_ct
= args_ct
;
1315 n
= def
->nb_iargs
+ def
->nb_oargs
;
1319 init_call_layout(&info_helper_ld32_mmu
);
1320 init_call_layout(&info_helper_ld64_mmu
);
1321 init_call_layout(&info_helper_ld128_mmu
);
1322 init_call_layout(&info_helper_st32_mmu
);
1323 init_call_layout(&info_helper_st64_mmu
);
1324 init_call_layout(&info_helper_st128_mmu
);
1329 /* Reverse the order of the saved registers, assuming they're all at
1330 the start of tcg_target_reg_alloc_order. */
1331 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
1332 int r
= tcg_target_reg_alloc_order
[n
];
1333 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
1337 for (i
= 0; i
< n
; ++i
) {
1338 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
1340 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
1341 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
1344 alloc_tcg_plugin_context(s
);
1348 * In user-mode we simply share the init context among threads, since we
1349 * use a single region. See the documentation tcg_region_init() for the
1350 * reasoning behind this.
1351 * In softmmu we will have at most max_cpus TCG threads.
1353 #ifdef CONFIG_USER_ONLY
1354 tcg_ctxs
= &tcg_ctx
;
1358 tcg_max_ctxs
= max_cpus
;
1359 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
1362 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
1363 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
1364 tcg_env
= temp_tcgv_ptr(ts
);
1367 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
1369 tcg_context_init(max_cpus
);
1370 tcg_region_init(tb_size
, splitwx
, max_cpus
);
1374 * Allocate TBs right before their corresponding translated code, making
1375 * sure that TBs and code are on different cache lines.
1377 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1379 uintptr_t align
= qemu_icache_linesize
;
1380 TranslationBlock
*tb
;
1384 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1385 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1387 if (unlikely(next
> s
->code_gen_highwater
)) {
1388 if (tcg_region_alloc(s
)) {
1393 qatomic_set(&s
->code_gen_ptr
, next
);
1394 s
->data_gen_ptr
= NULL
;
1398 void tcg_prologue_init(void)
1400 TCGContext
*s
= tcg_ctx
;
1401 size_t prologue_size
;
1403 s
->code_ptr
= s
->code_gen_ptr
;
1404 s
->code_buf
= s
->code_gen_ptr
;
1405 s
->data_gen_ptr
= NULL
;
1407 #ifndef CONFIG_TCG_INTERPRETER
1408 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
1411 #ifdef TCG_TARGET_NEED_POOL_LABELS
1412 s
->pool_labels
= NULL
;
1415 qemu_thread_jit_write();
1416 /* Generate the prologue. */
1417 tcg_target_qemu_prologue(s
);
1419 #ifdef TCG_TARGET_NEED_POOL_LABELS
1420 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1422 int result
= tcg_out_pool_finalize(s
);
1423 tcg_debug_assert(result
== 0);
1427 prologue_size
= tcg_current_code_size(s
);
1428 perf_report_prologue(s
->code_gen_ptr
, prologue_size
);
1430 #ifndef CONFIG_TCG_INTERPRETER
1431 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
1432 (uintptr_t)s
->code_buf
, prologue_size
);
1435 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1436 FILE *logfile
= qemu_log_trylock();
1438 fprintf(logfile
, "PROLOGUE: [size=%zu]\n", prologue_size
);
1439 if (s
->data_gen_ptr
) {
1440 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
1441 size_t data_size
= prologue_size
- code_size
;
1444 disas(logfile
, s
->code_gen_ptr
, code_size
);
1446 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1447 if (sizeof(tcg_target_ulong
) == 8) {
1449 "0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1450 (uintptr_t)s
->data_gen_ptr
+ i
,
1451 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1454 "0x%08" PRIxPTR
": .long 0x%08x\n",
1455 (uintptr_t)s
->data_gen_ptr
+ i
,
1456 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1460 disas(logfile
, s
->code_gen_ptr
, prologue_size
);
1462 fprintf(logfile
, "\n");
1463 qemu_log_unlock(logfile
);
1467 #ifndef CONFIG_TCG_INTERPRETER
1469 * Assert that goto_ptr is implemented completely, setting an epilogue.
1470 * For tci, we use NULL as the signal to return from the interpreter,
1471 * so skip this check.
1473 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
1476 tcg_region_prologue_set(s
);
1479 void tcg_func_start(TCGContext
*s
)
1482 s
->nb_temps
= s
->nb_globals
;
1484 /* No temps have been previously allocated for size or locality. */
1485 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1487 /* No constant temps have been previously allocated. */
1488 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
1489 if (s
->const_table
[i
]) {
1490 g_hash_table_remove_all(s
->const_table
[i
]);
1496 s
->current_frame_offset
= s
->frame_start
;
1498 #ifdef CONFIG_DEBUG_TCG
1499 s
->goto_tb_issue_mask
= 0;
1502 QTAILQ_INIT(&s
->ops
);
1503 QTAILQ_INIT(&s
->free_ops
);
1504 QSIMPLEQ_INIT(&s
->labels
);
1506 tcg_debug_assert(s
->addr_type
== TCG_TYPE_I32
||
1507 s
->addr_type
== TCG_TYPE_I64
);
1509 tcg_debug_assert(s
->insn_start_words
> 0);
1512 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1514 int n
= s
->nb_temps
++;
1516 if (n
>= TCG_MAX_TEMPS
) {
1517 tcg_raise_tb_overflow(s
);
1519 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1522 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1526 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1527 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1529 ts
= tcg_temp_alloc(s
);
1530 ts
->kind
= TEMP_GLOBAL
;
1535 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1536 TCGReg reg
, const char *name
)
1540 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1542 ts
= tcg_global_alloc(s
);
1543 ts
->base_type
= type
;
1545 ts
->kind
= TEMP_FIXED
;
1548 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1553 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1555 s
->frame_start
= start
;
1556 s
->frame_end
= start
+ size
;
1558 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1561 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1562 intptr_t offset
, const char *name
)
1564 TCGContext
*s
= tcg_ctx
;
1565 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1566 TCGTemp
*ts
= tcg_global_alloc(s
);
1567 int indirect_reg
= 0;
1569 switch (base_ts
->kind
) {
1573 /* We do not support double-indirect registers. */
1574 tcg_debug_assert(!base_ts
->indirect_reg
);
1575 base_ts
->indirect_base
= 1;
1576 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1581 g_assert_not_reached();
1584 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1585 TCGTemp
*ts2
= tcg_global_alloc(s
);
1588 ts
->base_type
= TCG_TYPE_I64
;
1589 ts
->type
= TCG_TYPE_I32
;
1590 ts
->indirect_reg
= indirect_reg
;
1591 ts
->mem_allocated
= 1;
1592 ts
->mem_base
= base_ts
;
1593 ts
->mem_offset
= offset
;
1594 pstrcpy(buf
, sizeof(buf
), name
);
1595 pstrcat(buf
, sizeof(buf
), "_0");
1596 ts
->name
= strdup(buf
);
1598 tcg_debug_assert(ts2
== ts
+ 1);
1599 ts2
->base_type
= TCG_TYPE_I64
;
1600 ts2
->type
= TCG_TYPE_I32
;
1601 ts2
->indirect_reg
= indirect_reg
;
1602 ts2
->mem_allocated
= 1;
1603 ts2
->mem_base
= base_ts
;
1604 ts2
->mem_offset
= offset
+ 4;
1605 ts2
->temp_subindex
= 1;
1606 pstrcpy(buf
, sizeof(buf
), name
);
1607 pstrcat(buf
, sizeof(buf
), "_1");
1608 ts2
->name
= strdup(buf
);
1610 ts
->base_type
= type
;
1612 ts
->indirect_reg
= indirect_reg
;
1613 ts
->mem_allocated
= 1;
1614 ts
->mem_base
= base_ts
;
1615 ts
->mem_offset
= offset
;
1621 TCGTemp
*tcg_temp_new_internal(TCGType type
, TCGTempKind kind
)
1623 TCGContext
*s
= tcg_ctx
;
1627 if (kind
== TEMP_EBB
) {
1628 int idx
= find_first_bit(s
->free_temps
[type
].l
, TCG_MAX_TEMPS
);
1630 if (idx
< TCG_MAX_TEMPS
) {
1631 /* There is already an available temp with the right type. */
1632 clear_bit(idx
, s
->free_temps
[type
].l
);
1634 ts
= &s
->temps
[idx
];
1635 ts
->temp_allocated
= 1;
1636 tcg_debug_assert(ts
->base_type
== type
);
1637 tcg_debug_assert(ts
->kind
== kind
);
1641 tcg_debug_assert(kind
== TEMP_TB
);
1652 n
= 64 / TCG_TARGET_REG_BITS
;
1655 n
= 128 / TCG_TARGET_REG_BITS
;
1658 g_assert_not_reached();
1661 ts
= tcg_temp_alloc(s
);
1662 ts
->base_type
= type
;
1663 ts
->temp_allocated
= 1;
1669 ts
->type
= TCG_TYPE_REG
;
1671 for (int i
= 1; i
< n
; ++i
) {
1672 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1674 tcg_debug_assert(ts2
== ts
+ i
);
1675 ts2
->base_type
= type
;
1676 ts2
->type
= TCG_TYPE_REG
;
1677 ts2
->temp_allocated
= 1;
1678 ts2
->temp_subindex
= i
;
1685 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1689 #ifdef CONFIG_DEBUG_TCG
1692 assert(TCG_TARGET_HAS_v64
);
1695 assert(TCG_TARGET_HAS_v128
);
1698 assert(TCG_TARGET_HAS_v256
);
1701 g_assert_not_reached();
1705 t
= tcg_temp_new_internal(type
, TEMP_EBB
);
1706 return temp_tcgv_vec(t
);
1709 /* Create a new temp of the same type as an existing temp. */
1710 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1712 TCGTemp
*t
= tcgv_vec_temp(match
);
1714 tcg_debug_assert(t
->temp_allocated
!= 0);
1716 t
= tcg_temp_new_internal(t
->base_type
, TEMP_EBB
);
1717 return temp_tcgv_vec(t
);
1720 void tcg_temp_free_internal(TCGTemp
*ts
)
1722 TCGContext
*s
= tcg_ctx
;
1727 /* Silently ignore free. */
1730 tcg_debug_assert(ts
->temp_allocated
!= 0);
1731 ts
->temp_allocated
= 0;
1732 set_bit(temp_idx(ts
), s
->free_temps
[ts
->base_type
].l
);
1735 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1736 g_assert_not_reached();
1740 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1742 TCGContext
*s
= tcg_ctx
;
1743 GHashTable
*h
= s
->const_table
[type
];
1747 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1748 s
->const_table
[type
] = h
;
1751 ts
= g_hash_table_lookup(h
, &val
);
1755 ts
= tcg_temp_alloc(s
);
1757 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1758 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1760 tcg_debug_assert(ts2
== ts
+ 1);
1762 ts
->base_type
= TCG_TYPE_I64
;
1763 ts
->type
= TCG_TYPE_I32
;
1764 ts
->kind
= TEMP_CONST
;
1765 ts
->temp_allocated
= 1;
1767 ts2
->base_type
= TCG_TYPE_I64
;
1768 ts2
->type
= TCG_TYPE_I32
;
1769 ts2
->kind
= TEMP_CONST
;
1770 ts2
->temp_allocated
= 1;
1771 ts2
->temp_subindex
= 1;
1774 * Retain the full value of the 64-bit constant in the low
1775 * part, so that the hash table works. Actual uses will
1776 * truncate the value to the low part.
1778 ts
[HOST_BIG_ENDIAN
].val
= val
;
1779 ts
[!HOST_BIG_ENDIAN
].val
= val
>> 32;
1780 val_ptr
= &ts
[HOST_BIG_ENDIAN
].val
;
1782 ts
->base_type
= type
;
1784 ts
->kind
= TEMP_CONST
;
1785 ts
->temp_allocated
= 1;
1789 g_hash_table_insert(h
, val_ptr
, ts
);
1795 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1797 val
= dup_const(vece
, val
);
1798 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1801 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1803 TCGTemp
*t
= tcgv_vec_temp(match
);
1805 tcg_debug_assert(t
->temp_allocated
!= 0);
1806 return tcg_constant_vec(t
->base_type
, vece
, val
);
1809 #ifdef CONFIG_DEBUG_TCG
1810 size_t temp_idx(TCGTemp
*ts
)
1812 ptrdiff_t n
= ts
- tcg_ctx
->temps
;
1813 assert(n
>= 0 && n
< tcg_ctx
->nb_temps
);
1817 TCGTemp
*tcgv_i32_temp(TCGv_i32 v
)
1819 uintptr_t o
= (uintptr_t)v
- offsetof(TCGContext
, temps
);
1821 assert(o
< sizeof(TCGTemp
) * tcg_ctx
->nb_temps
);
1822 assert(o
% sizeof(TCGTemp
) == 0);
1824 return (void *)tcg_ctx
+ (uintptr_t)v
;
1826 #endif /* CONFIG_DEBUG_TCG */
1828 /* Return true if OP may appear in the opcode stream.
1829 Test the runtime variable that controls each opcode. */
1830 bool tcg_op_supported(TCGOpcode op
)
1833 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1836 case INDEX_op_discard
:
1837 case INDEX_op_set_label
:
1841 case INDEX_op_insn_start
:
1842 case INDEX_op_exit_tb
:
1843 case INDEX_op_goto_tb
:
1844 case INDEX_op_goto_ptr
:
1845 case INDEX_op_qemu_ld_a32_i32
:
1846 case INDEX_op_qemu_ld_a64_i32
:
1847 case INDEX_op_qemu_st_a32_i32
:
1848 case INDEX_op_qemu_st_a64_i32
:
1849 case INDEX_op_qemu_ld_a32_i64
:
1850 case INDEX_op_qemu_ld_a64_i64
:
1851 case INDEX_op_qemu_st_a32_i64
:
1852 case INDEX_op_qemu_st_a64_i64
:
1855 case INDEX_op_qemu_st8_a32_i32
:
1856 case INDEX_op_qemu_st8_a64_i32
:
1857 return TCG_TARGET_HAS_qemu_st8_i32
;
1859 case INDEX_op_qemu_ld_a32_i128
:
1860 case INDEX_op_qemu_ld_a64_i128
:
1861 case INDEX_op_qemu_st_a32_i128
:
1862 case INDEX_op_qemu_st_a64_i128
:
1863 return TCG_TARGET_HAS_qemu_ldst_i128
;
1865 case INDEX_op_mov_i32
:
1866 case INDEX_op_setcond_i32
:
1867 case INDEX_op_brcond_i32
:
1868 case INDEX_op_ld8u_i32
:
1869 case INDEX_op_ld8s_i32
:
1870 case INDEX_op_ld16u_i32
:
1871 case INDEX_op_ld16s_i32
:
1872 case INDEX_op_ld_i32
:
1873 case INDEX_op_st8_i32
:
1874 case INDEX_op_st16_i32
:
1875 case INDEX_op_st_i32
:
1876 case INDEX_op_add_i32
:
1877 case INDEX_op_sub_i32
:
1878 case INDEX_op_mul_i32
:
1879 case INDEX_op_and_i32
:
1880 case INDEX_op_or_i32
:
1881 case INDEX_op_xor_i32
:
1882 case INDEX_op_shl_i32
:
1883 case INDEX_op_shr_i32
:
1884 case INDEX_op_sar_i32
:
1887 case INDEX_op_negsetcond_i32
:
1888 return TCG_TARGET_HAS_negsetcond_i32
;
1889 case INDEX_op_movcond_i32
:
1890 return TCG_TARGET_HAS_movcond_i32
;
1891 case INDEX_op_div_i32
:
1892 case INDEX_op_divu_i32
:
1893 return TCG_TARGET_HAS_div_i32
;
1894 case INDEX_op_rem_i32
:
1895 case INDEX_op_remu_i32
:
1896 return TCG_TARGET_HAS_rem_i32
;
1897 case INDEX_op_div2_i32
:
1898 case INDEX_op_divu2_i32
:
1899 return TCG_TARGET_HAS_div2_i32
;
1900 case INDEX_op_rotl_i32
:
1901 case INDEX_op_rotr_i32
:
1902 return TCG_TARGET_HAS_rot_i32
;
1903 case INDEX_op_deposit_i32
:
1904 return TCG_TARGET_HAS_deposit_i32
;
1905 case INDEX_op_extract_i32
:
1906 return TCG_TARGET_HAS_extract_i32
;
1907 case INDEX_op_sextract_i32
:
1908 return TCG_TARGET_HAS_sextract_i32
;
1909 case INDEX_op_extract2_i32
:
1910 return TCG_TARGET_HAS_extract2_i32
;
1911 case INDEX_op_add2_i32
:
1912 return TCG_TARGET_HAS_add2_i32
;
1913 case INDEX_op_sub2_i32
:
1914 return TCG_TARGET_HAS_sub2_i32
;
1915 case INDEX_op_mulu2_i32
:
1916 return TCG_TARGET_HAS_mulu2_i32
;
1917 case INDEX_op_muls2_i32
:
1918 return TCG_TARGET_HAS_muls2_i32
;
1919 case INDEX_op_muluh_i32
:
1920 return TCG_TARGET_HAS_muluh_i32
;
1921 case INDEX_op_mulsh_i32
:
1922 return TCG_TARGET_HAS_mulsh_i32
;
1923 case INDEX_op_ext8s_i32
:
1924 return TCG_TARGET_HAS_ext8s_i32
;
1925 case INDEX_op_ext16s_i32
:
1926 return TCG_TARGET_HAS_ext16s_i32
;
1927 case INDEX_op_ext8u_i32
:
1928 return TCG_TARGET_HAS_ext8u_i32
;
1929 case INDEX_op_ext16u_i32
:
1930 return TCG_TARGET_HAS_ext16u_i32
;
1931 case INDEX_op_bswap16_i32
:
1932 return TCG_TARGET_HAS_bswap16_i32
;
1933 case INDEX_op_bswap32_i32
:
1934 return TCG_TARGET_HAS_bswap32_i32
;
1935 case INDEX_op_not_i32
:
1936 return TCG_TARGET_HAS_not_i32
;
1937 case INDEX_op_neg_i32
:
1938 return TCG_TARGET_HAS_neg_i32
;
1939 case INDEX_op_andc_i32
:
1940 return TCG_TARGET_HAS_andc_i32
;
1941 case INDEX_op_orc_i32
:
1942 return TCG_TARGET_HAS_orc_i32
;
1943 case INDEX_op_eqv_i32
:
1944 return TCG_TARGET_HAS_eqv_i32
;
1945 case INDEX_op_nand_i32
:
1946 return TCG_TARGET_HAS_nand_i32
;
1947 case INDEX_op_nor_i32
:
1948 return TCG_TARGET_HAS_nor_i32
;
1949 case INDEX_op_clz_i32
:
1950 return TCG_TARGET_HAS_clz_i32
;
1951 case INDEX_op_ctz_i32
:
1952 return TCG_TARGET_HAS_ctz_i32
;
1953 case INDEX_op_ctpop_i32
:
1954 return TCG_TARGET_HAS_ctpop_i32
;
1956 case INDEX_op_brcond2_i32
:
1957 case INDEX_op_setcond2_i32
:
1958 return TCG_TARGET_REG_BITS
== 32;
1960 case INDEX_op_mov_i64
:
1961 case INDEX_op_setcond_i64
:
1962 case INDEX_op_brcond_i64
:
1963 case INDEX_op_ld8u_i64
:
1964 case INDEX_op_ld8s_i64
:
1965 case INDEX_op_ld16u_i64
:
1966 case INDEX_op_ld16s_i64
:
1967 case INDEX_op_ld32u_i64
:
1968 case INDEX_op_ld32s_i64
:
1969 case INDEX_op_ld_i64
:
1970 case INDEX_op_st8_i64
:
1971 case INDEX_op_st16_i64
:
1972 case INDEX_op_st32_i64
:
1973 case INDEX_op_st_i64
:
1974 case INDEX_op_add_i64
:
1975 case INDEX_op_sub_i64
:
1976 case INDEX_op_mul_i64
:
1977 case INDEX_op_and_i64
:
1978 case INDEX_op_or_i64
:
1979 case INDEX_op_xor_i64
:
1980 case INDEX_op_shl_i64
:
1981 case INDEX_op_shr_i64
:
1982 case INDEX_op_sar_i64
:
1983 case INDEX_op_ext_i32_i64
:
1984 case INDEX_op_extu_i32_i64
:
1985 return TCG_TARGET_REG_BITS
== 64;
1987 case INDEX_op_negsetcond_i64
:
1988 return TCG_TARGET_HAS_negsetcond_i64
;
1989 case INDEX_op_movcond_i64
:
1990 return TCG_TARGET_HAS_movcond_i64
;
1991 case INDEX_op_div_i64
:
1992 case INDEX_op_divu_i64
:
1993 return TCG_TARGET_HAS_div_i64
;
1994 case INDEX_op_rem_i64
:
1995 case INDEX_op_remu_i64
:
1996 return TCG_TARGET_HAS_rem_i64
;
1997 case INDEX_op_div2_i64
:
1998 case INDEX_op_divu2_i64
:
1999 return TCG_TARGET_HAS_div2_i64
;
2000 case INDEX_op_rotl_i64
:
2001 case INDEX_op_rotr_i64
:
2002 return TCG_TARGET_HAS_rot_i64
;
2003 case INDEX_op_deposit_i64
:
2004 return TCG_TARGET_HAS_deposit_i64
;
2005 case INDEX_op_extract_i64
:
2006 return TCG_TARGET_HAS_extract_i64
;
2007 case INDEX_op_sextract_i64
:
2008 return TCG_TARGET_HAS_sextract_i64
;
2009 case INDEX_op_extract2_i64
:
2010 return TCG_TARGET_HAS_extract2_i64
;
2011 case INDEX_op_extrl_i64_i32
:
2012 case INDEX_op_extrh_i64_i32
:
2013 return TCG_TARGET_HAS_extr_i64_i32
;
2014 case INDEX_op_ext8s_i64
:
2015 return TCG_TARGET_HAS_ext8s_i64
;
2016 case INDEX_op_ext16s_i64
:
2017 return TCG_TARGET_HAS_ext16s_i64
;
2018 case INDEX_op_ext32s_i64
:
2019 return TCG_TARGET_HAS_ext32s_i64
;
2020 case INDEX_op_ext8u_i64
:
2021 return TCG_TARGET_HAS_ext8u_i64
;
2022 case INDEX_op_ext16u_i64
:
2023 return TCG_TARGET_HAS_ext16u_i64
;
2024 case INDEX_op_ext32u_i64
:
2025 return TCG_TARGET_HAS_ext32u_i64
;
2026 case INDEX_op_bswap16_i64
:
2027 return TCG_TARGET_HAS_bswap16_i64
;
2028 case INDEX_op_bswap32_i64
:
2029 return TCG_TARGET_HAS_bswap32_i64
;
2030 case INDEX_op_bswap64_i64
:
2031 return TCG_TARGET_HAS_bswap64_i64
;
2032 case INDEX_op_not_i64
:
2033 return TCG_TARGET_HAS_not_i64
;
2034 case INDEX_op_neg_i64
:
2035 return TCG_TARGET_HAS_neg_i64
;
2036 case INDEX_op_andc_i64
:
2037 return TCG_TARGET_HAS_andc_i64
;
2038 case INDEX_op_orc_i64
:
2039 return TCG_TARGET_HAS_orc_i64
;
2040 case INDEX_op_eqv_i64
:
2041 return TCG_TARGET_HAS_eqv_i64
;
2042 case INDEX_op_nand_i64
:
2043 return TCG_TARGET_HAS_nand_i64
;
2044 case INDEX_op_nor_i64
:
2045 return TCG_TARGET_HAS_nor_i64
;
2046 case INDEX_op_clz_i64
:
2047 return TCG_TARGET_HAS_clz_i64
;
2048 case INDEX_op_ctz_i64
:
2049 return TCG_TARGET_HAS_ctz_i64
;
2050 case INDEX_op_ctpop_i64
:
2051 return TCG_TARGET_HAS_ctpop_i64
;
2052 case INDEX_op_add2_i64
:
2053 return TCG_TARGET_HAS_add2_i64
;
2054 case INDEX_op_sub2_i64
:
2055 return TCG_TARGET_HAS_sub2_i64
;
2056 case INDEX_op_mulu2_i64
:
2057 return TCG_TARGET_HAS_mulu2_i64
;
2058 case INDEX_op_muls2_i64
:
2059 return TCG_TARGET_HAS_muls2_i64
;
2060 case INDEX_op_muluh_i64
:
2061 return TCG_TARGET_HAS_muluh_i64
;
2062 case INDEX_op_mulsh_i64
:
2063 return TCG_TARGET_HAS_mulsh_i64
;
2065 case INDEX_op_mov_vec
:
2066 case INDEX_op_dup_vec
:
2067 case INDEX_op_dupm_vec
:
2068 case INDEX_op_ld_vec
:
2069 case INDEX_op_st_vec
:
2070 case INDEX_op_add_vec
:
2071 case INDEX_op_sub_vec
:
2072 case INDEX_op_and_vec
:
2073 case INDEX_op_or_vec
:
2074 case INDEX_op_xor_vec
:
2075 case INDEX_op_cmp_vec
:
2077 case INDEX_op_dup2_vec
:
2078 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
2079 case INDEX_op_not_vec
:
2080 return have_vec
&& TCG_TARGET_HAS_not_vec
;
2081 case INDEX_op_neg_vec
:
2082 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
2083 case INDEX_op_abs_vec
:
2084 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
2085 case INDEX_op_andc_vec
:
2086 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
2087 case INDEX_op_orc_vec
:
2088 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
2089 case INDEX_op_nand_vec
:
2090 return have_vec
&& TCG_TARGET_HAS_nand_vec
;
2091 case INDEX_op_nor_vec
:
2092 return have_vec
&& TCG_TARGET_HAS_nor_vec
;
2093 case INDEX_op_eqv_vec
:
2094 return have_vec
&& TCG_TARGET_HAS_eqv_vec
;
2095 case INDEX_op_mul_vec
:
2096 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
2097 case INDEX_op_shli_vec
:
2098 case INDEX_op_shri_vec
:
2099 case INDEX_op_sari_vec
:
2100 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
2101 case INDEX_op_shls_vec
:
2102 case INDEX_op_shrs_vec
:
2103 case INDEX_op_sars_vec
:
2104 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
2105 case INDEX_op_shlv_vec
:
2106 case INDEX_op_shrv_vec
:
2107 case INDEX_op_sarv_vec
:
2108 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
2109 case INDEX_op_rotli_vec
:
2110 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
2111 case INDEX_op_rotls_vec
:
2112 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
2113 case INDEX_op_rotlv_vec
:
2114 case INDEX_op_rotrv_vec
:
2115 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
2116 case INDEX_op_ssadd_vec
:
2117 case INDEX_op_usadd_vec
:
2118 case INDEX_op_sssub_vec
:
2119 case INDEX_op_ussub_vec
:
2120 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
2121 case INDEX_op_smin_vec
:
2122 case INDEX_op_umin_vec
:
2123 case INDEX_op_smax_vec
:
2124 case INDEX_op_umax_vec
:
2125 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
2126 case INDEX_op_bitsel_vec
:
2127 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
2128 case INDEX_op_cmpsel_vec
:
2129 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
2132 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
2137 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
);
2139 static void tcg_gen_callN(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
**args
)
2141 TCGv_i64 extend_free
[MAX_CALL_IARGS
];
2144 int i
, n
, pi
= 0, total_args
;
2146 if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info
)))) {
2147 init_call_layout(info
);
2148 g_once_init_leave(HELPER_INFO_INIT(info
), HELPER_INFO_INIT_VAL(info
));
2151 total_args
= info
->nr_out
+ info
->nr_in
+ 2;
2152 op
= tcg_op_alloc(INDEX_op_call
, total_args
);
2154 #ifdef CONFIG_PLUGIN
2155 /* Flag helpers that may affect guest state */
2156 if (tcg_ctx
->plugin_insn
&&
2157 !(info
->flags
& TCG_CALL_PLUGIN
) &&
2158 !(info
->flags
& TCG_CALL_NO_SIDE_EFFECTS
)) {
2159 tcg_ctx
->plugin_insn
->calls_helpers
= true;
2163 TCGOP_CALLO(op
) = n
= info
->nr_out
;
2166 tcg_debug_assert(ret
== NULL
);
2169 tcg_debug_assert(ret
!= NULL
);
2170 op
->args
[pi
++] = temp_arg(ret
);
2174 tcg_debug_assert(ret
!= NULL
);
2175 tcg_debug_assert(ret
->base_type
== ret
->type
+ ctz32(n
));
2176 tcg_debug_assert(ret
->temp_subindex
== 0);
2177 for (i
= 0; i
< n
; ++i
) {
2178 op
->args
[pi
++] = temp_arg(ret
+ i
);
2182 g_assert_not_reached();
2185 TCGOP_CALLI(op
) = n
= info
->nr_in
;
2186 for (i
= 0; i
< n
; i
++) {
2187 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2188 TCGTemp
*ts
= args
[loc
->arg_idx
] + loc
->tmp_subindex
;
2190 switch (loc
->kind
) {
2191 case TCG_CALL_ARG_NORMAL
:
2192 case TCG_CALL_ARG_BY_REF
:
2193 case TCG_CALL_ARG_BY_REF_N
:
2194 op
->args
[pi
++] = temp_arg(ts
);
2197 case TCG_CALL_ARG_EXTEND_U
:
2198 case TCG_CALL_ARG_EXTEND_S
:
2200 TCGv_i64 temp
= tcg_temp_ebb_new_i64();
2201 TCGv_i32 orig
= temp_tcgv_i32(ts
);
2203 if (loc
->kind
== TCG_CALL_ARG_EXTEND_S
) {
2204 tcg_gen_ext_i32_i64(temp
, orig
);
2206 tcg_gen_extu_i32_i64(temp
, orig
);
2208 op
->args
[pi
++] = tcgv_i64_arg(temp
);
2209 extend_free
[n_extend
++] = temp
;
2214 g_assert_not_reached();
2217 op
->args
[pi
++] = (uintptr_t)info
->func
;
2218 op
->args
[pi
++] = (uintptr_t)info
;
2219 tcg_debug_assert(pi
== total_args
);
2221 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2223 tcg_debug_assert(n_extend
< ARRAY_SIZE(extend_free
));
2224 for (i
= 0; i
< n_extend
; ++i
) {
2225 tcg_temp_free_i64(extend_free
[i
]);
2229 void tcg_gen_call0(TCGHelperInfo
*info
, TCGTemp
*ret
)
2231 tcg_gen_callN(info
, ret
, NULL
);
2234 void tcg_gen_call1(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
)
2236 tcg_gen_callN(info
, ret
, &t1
);
2239 void tcg_gen_call2(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
)
2241 TCGTemp
*args
[2] = { t1
, t2
};
2242 tcg_gen_callN(info
, ret
, args
);
2245 void tcg_gen_call3(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2246 TCGTemp
*t2
, TCGTemp
*t3
)
2248 TCGTemp
*args
[3] = { t1
, t2
, t3
};
2249 tcg_gen_callN(info
, ret
, args
);
2252 void tcg_gen_call4(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2253 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
)
2255 TCGTemp
*args
[4] = { t1
, t2
, t3
, t4
};
2256 tcg_gen_callN(info
, ret
, args
);
2259 void tcg_gen_call5(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2260 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
)
2262 TCGTemp
*args
[5] = { t1
, t2
, t3
, t4
, t5
};
2263 tcg_gen_callN(info
, ret
, args
);
2266 void tcg_gen_call6(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
,
2267 TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
, TCGTemp
*t6
)
2269 TCGTemp
*args
[6] = { t1
, t2
, t3
, t4
, t5
, t6
};
2270 tcg_gen_callN(info
, ret
, args
);
2273 void tcg_gen_call7(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2274 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
,
2275 TCGTemp
*t5
, TCGTemp
*t6
, TCGTemp
*t7
)
2277 TCGTemp
*args
[7] = { t1
, t2
, t3
, t4
, t5
, t6
, t7
};
2278 tcg_gen_callN(info
, ret
, args
);
2281 static void tcg_reg_alloc_start(TCGContext
*s
)
2285 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
2286 TCGTemp
*ts
= &s
->temps
[i
];
2287 TCGTempVal val
= TEMP_VAL_MEM
;
2291 val
= TEMP_VAL_CONST
;
2299 val
= TEMP_VAL_DEAD
;
2302 ts
->mem_allocated
= 0;
2305 g_assert_not_reached();
2310 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
2313 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
2316 int idx
= temp_idx(ts
);
2321 pstrcpy(buf
, buf_size
, ts
->name
);
2324 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
2327 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
2332 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
2334 #if TCG_TARGET_REG_BITS > 32
2336 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
2342 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
2343 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
2346 g_assert_not_reached();
2353 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
2354 int buf_size
, TCGArg arg
)
2356 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
2359 static const char * const cond_name
[] =
2361 [TCG_COND_NEVER
] = "never",
2362 [TCG_COND_ALWAYS
] = "always",
2363 [TCG_COND_EQ
] = "eq",
2364 [TCG_COND_NE
] = "ne",
2365 [TCG_COND_LT
] = "lt",
2366 [TCG_COND_GE
] = "ge",
2367 [TCG_COND_LE
] = "le",
2368 [TCG_COND_GT
] = "gt",
2369 [TCG_COND_LTU
] = "ltu",
2370 [TCG_COND_GEU
] = "geu",
2371 [TCG_COND_LEU
] = "leu",
2372 [TCG_COND_GTU
] = "gtu"
2375 static const char * const ldst_name
[(MO_BSWAP
| MO_SSIZE
) + 1] =
2389 [MO_128
+ MO_BE
] = "beo",
2390 [MO_128
+ MO_LE
] = "leo",
2393 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
2394 [MO_UNALN
>> MO_ASHIFT
] = "un+",
2395 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
2396 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
2397 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
2398 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
2399 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
2400 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
2401 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
2404 static const char * const atom_name
[(MO_ATOM_MASK
>> MO_ATOM_SHIFT
) + 1] = {
2405 [MO_ATOM_IFALIGN
>> MO_ATOM_SHIFT
] = "",
2406 [MO_ATOM_IFALIGN_PAIR
>> MO_ATOM_SHIFT
] = "pair+",
2407 [MO_ATOM_WITHIN16
>> MO_ATOM_SHIFT
] = "w16+",
2408 [MO_ATOM_WITHIN16_PAIR
>> MO_ATOM_SHIFT
] = "w16p+",
2409 [MO_ATOM_SUBALIGN
>> MO_ATOM_SHIFT
] = "sub+",
2410 [MO_ATOM_NONE
>> MO_ATOM_SHIFT
] = "noat+",
2413 static const char bswap_flag_name
[][6] = {
2414 [TCG_BSWAP_IZ
] = "iz",
2415 [TCG_BSWAP_OZ
] = "oz",
2416 [TCG_BSWAP_OS
] = "os",
2417 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
2418 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
2421 static inline bool tcg_regset_single(TCGRegSet d
)
2423 return (d
& (d
- 1)) == 0;
2426 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
2428 if (TCG_TARGET_NB_REGS
<= 32) {
2435 /* Return only the number of characters output -- no error return. */
2436 #define ne_fprintf(...) \
2437 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2439 static void tcg_dump_ops(TCGContext
*s
, FILE *f
, bool have_prefs
)
2444 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
2445 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
2446 const TCGOpDef
*def
;
2451 def
= &tcg_op_defs
[c
];
2453 if (c
== INDEX_op_insn_start
) {
2455 col
+= ne_fprintf(f
, "\n ----");
2457 for (i
= 0, k
= s
->insn_start_words
; i
< k
; ++i
) {
2458 col
+= ne_fprintf(f
, " %016" PRIx64
,
2459 tcg_get_insn_start_param(op
, i
));
2461 } else if (c
== INDEX_op_call
) {
2462 const TCGHelperInfo
*info
= tcg_call_info(op
);
2463 void *func
= tcg_call_func(op
);
2465 /* variable number of arguments */
2466 nb_oargs
= TCGOP_CALLO(op
);
2467 nb_iargs
= TCGOP_CALLI(op
);
2468 nb_cargs
= def
->nb_cargs
;
2470 col
+= ne_fprintf(f
, " %s ", def
->name
);
2473 * Print the function name from TCGHelperInfo, if available.
2474 * Note that plugins have a template function for the info,
2475 * but the actual function pointer comes from the plugin.
2477 if (func
== info
->func
) {
2478 col
+= ne_fprintf(f
, "%s", info
->name
);
2480 col
+= ne_fprintf(f
, "plugin(%p)", func
);
2483 col
+= ne_fprintf(f
, ",$0x%x,$%d", info
->flags
, nb_oargs
);
2484 for (i
= 0; i
< nb_oargs
; i
++) {
2485 col
+= ne_fprintf(f
, ",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2488 for (i
= 0; i
< nb_iargs
; i
++) {
2489 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2490 const char *t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2491 col
+= ne_fprintf(f
, ",%s", t
);
2494 col
+= ne_fprintf(f
, " %s ", def
->name
);
2496 nb_oargs
= def
->nb_oargs
;
2497 nb_iargs
= def
->nb_iargs
;
2498 nb_cargs
= def
->nb_cargs
;
2500 if (def
->flags
& TCG_OPF_VECTOR
) {
2501 col
+= ne_fprintf(f
, "v%d,e%d,", 64 << TCGOP_VECL(op
),
2502 8 << TCGOP_VECE(op
));
2506 for (i
= 0; i
< nb_oargs
; i
++) {
2507 const char *sep
= k
? "," : "";
2508 col
+= ne_fprintf(f
, "%s%s", sep
,
2509 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2512 for (i
= 0; i
< nb_iargs
; i
++) {
2513 const char *sep
= k
? "," : "";
2514 col
+= ne_fprintf(f
, "%s%s", sep
,
2515 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2519 case INDEX_op_brcond_i32
:
2520 case INDEX_op_setcond_i32
:
2521 case INDEX_op_negsetcond_i32
:
2522 case INDEX_op_movcond_i32
:
2523 case INDEX_op_brcond2_i32
:
2524 case INDEX_op_setcond2_i32
:
2525 case INDEX_op_brcond_i64
:
2526 case INDEX_op_setcond_i64
:
2527 case INDEX_op_negsetcond_i64
:
2528 case INDEX_op_movcond_i64
:
2529 case INDEX_op_cmp_vec
:
2530 case INDEX_op_cmpsel_vec
:
2531 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2532 && cond_name
[op
->args
[k
]]) {
2533 col
+= ne_fprintf(f
, ",%s", cond_name
[op
->args
[k
++]]);
2535 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2539 case INDEX_op_qemu_ld_a32_i32
:
2540 case INDEX_op_qemu_ld_a64_i32
:
2541 case INDEX_op_qemu_st_a32_i32
:
2542 case INDEX_op_qemu_st_a64_i32
:
2543 case INDEX_op_qemu_st8_a32_i32
:
2544 case INDEX_op_qemu_st8_a64_i32
:
2545 case INDEX_op_qemu_ld_a32_i64
:
2546 case INDEX_op_qemu_ld_a64_i64
:
2547 case INDEX_op_qemu_st_a32_i64
:
2548 case INDEX_op_qemu_st_a64_i64
:
2549 case INDEX_op_qemu_ld_a32_i128
:
2550 case INDEX_op_qemu_ld_a64_i128
:
2551 case INDEX_op_qemu_st_a32_i128
:
2552 case INDEX_op_qemu_st_a64_i128
:
2554 const char *s_al
, *s_op
, *s_at
;
2555 MemOpIdx oi
= op
->args
[k
++];
2556 MemOp mop
= get_memop(oi
);
2557 unsigned ix
= get_mmuidx(oi
);
2559 s_al
= alignment_name
[(mop
& MO_AMASK
) >> MO_ASHIFT
];
2560 s_op
= ldst_name
[mop
& (MO_BSWAP
| MO_SSIZE
)];
2561 s_at
= atom_name
[(mop
& MO_ATOM_MASK
) >> MO_ATOM_SHIFT
];
2562 mop
&= ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
| MO_ATOM_MASK
);
2564 /* If all fields are accounted for, print symbolically. */
2565 if (!mop
&& s_al
&& s_op
&& s_at
) {
2566 col
+= ne_fprintf(f
, ",%s%s%s,%u",
2567 s_at
, s_al
, s_op
, ix
);
2569 mop
= get_memop(oi
);
2570 col
+= ne_fprintf(f
, ",$0x%x,%u", mop
, ix
);
2575 case INDEX_op_bswap16_i32
:
2576 case INDEX_op_bswap16_i64
:
2577 case INDEX_op_bswap32_i32
:
2578 case INDEX_op_bswap32_i64
:
2579 case INDEX_op_bswap64_i64
:
2581 TCGArg flags
= op
->args
[k
];
2582 const char *name
= NULL
;
2584 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
2585 name
= bswap_flag_name
[flags
];
2588 col
+= ne_fprintf(f
, ",%s", name
);
2590 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, flags
);
2600 case INDEX_op_set_label
:
2602 case INDEX_op_brcond_i32
:
2603 case INDEX_op_brcond_i64
:
2604 case INDEX_op_brcond2_i32
:
2605 col
+= ne_fprintf(f
, "%s$L%d", k
? "," : "",
2606 arg_label(op
->args
[k
])->id
);
2611 TCGBar membar
= op
->args
[k
];
2612 const char *b_op
, *m_op
;
2614 switch (membar
& TCG_BAR_SC
) {
2628 g_assert_not_reached();
2631 switch (membar
& TCG_MO_ALL
) {
2647 case TCG_MO_LD_LD
| TCG_MO_LD_ST
:
2650 case TCG_MO_LD_LD
| TCG_MO_ST_LD
:
2653 case TCG_MO_LD_LD
| TCG_MO_ST_ST
:
2656 case TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2659 case TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2662 case TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2665 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2668 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2671 case TCG_MO_LD_LD
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2674 case TCG_MO_LD_ST
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2681 g_assert_not_reached();
2684 col
+= ne_fprintf(f
, "%s%s:%s", (k
? "," : ""), b_op
, m_op
);
2691 for (; i
< nb_cargs
; i
++, k
++) {
2692 col
+= ne_fprintf(f
, "%s$0x%" TCG_PRIlx
, k
? "," : "",
2697 if (have_prefs
|| op
->life
) {
2698 for (; col
< 40; ++col
) {
2704 unsigned life
= op
->life
;
2706 if (life
& (SYNC_ARG
* 3)) {
2707 ne_fprintf(f
, " sync:");
2708 for (i
= 0; i
< 2; ++i
) {
2709 if (life
& (SYNC_ARG
<< i
)) {
2710 ne_fprintf(f
, " %d", i
);
2716 ne_fprintf(f
, " dead:");
2717 for (i
= 0; life
; ++i
, life
>>= 1) {
2719 ne_fprintf(f
, " %d", i
);
2726 for (i
= 0; i
< nb_oargs
; ++i
) {
2727 TCGRegSet set
= output_pref(op
, i
);
2730 ne_fprintf(f
, " pref=");
2735 ne_fprintf(f
, "none");
2736 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2737 ne_fprintf(f
, "all");
2738 #ifdef CONFIG_DEBUG_TCG
2739 } else if (tcg_regset_single(set
)) {
2740 TCGReg reg
= tcg_regset_first(set
);
2741 ne_fprintf(f
, "%s", tcg_target_reg_names
[reg
]);
2743 } else if (TCG_TARGET_NB_REGS
<= 32) {
2744 ne_fprintf(f
, "0x%x", (uint32_t)set
);
2746 ne_fprintf(f
, "0x%" PRIx64
, (uint64_t)set
);
2755 /* we give more priority to constraints with less registers */
2756 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2758 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2759 int n
= ctpop64(arg_ct
->regs
);
2762 * Sort constraints of a single register first, which includes output
2763 * aliases (which must exactly match the input already allocated).
2765 if (n
== 1 || arg_ct
->oalias
) {
2770 * Sort register pairs next, first then second immediately after.
2771 * Arbitrarily sort multiple pairs by the index of the first reg;
2772 * there shouldn't be many pairs.
2774 switch (arg_ct
->pair
) {
2779 return (arg_ct
->pair_index
+ 1) * 2 - 1;
2782 /* Finally, sort by decreasing register count. */
2787 /* sort from highest priority to lowest */
2788 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2791 TCGArgConstraint
*a
= def
->args_ct
;
2793 for (i
= 0; i
< n
; i
++) {
2794 a
[start
+ i
].sort_index
= start
+ i
;
2799 for (i
= 0; i
< n
- 1; i
++) {
2800 for (j
= i
+ 1; j
< n
; j
++) {
2801 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2802 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2804 int tmp
= a
[start
+ i
].sort_index
;
2805 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2806 a
[start
+ j
].sort_index
= tmp
;
2812 static void process_op_defs(TCGContext
*s
)
2816 for (op
= 0; op
< NB_OPS
; op
++) {
2817 TCGOpDef
*def
= &tcg_op_defs
[op
];
2818 const TCGTargetOpDef
*tdefs
;
2819 bool saw_alias_pair
= false;
2820 int i
, o
, i2
, o2
, nb_args
;
2822 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2826 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2832 * Macro magic should make it impossible, but double-check that
2833 * the array index is in range. Since the signness of an enum
2834 * is implementation defined, force the result to unsigned.
2836 unsigned con_set
= tcg_target_op_def(op
);
2837 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2838 tdefs
= &constraint_sets
[con_set
];
2840 for (i
= 0; i
< nb_args
; i
++) {
2841 const char *ct_str
= tdefs
->args_ct_str
[i
];
2842 bool input_p
= i
>= def
->nb_oargs
;
2844 /* Incomplete TCGTargetOpDef entry. */
2845 tcg_debug_assert(ct_str
!= NULL
);
2850 tcg_debug_assert(input_p
);
2851 tcg_debug_assert(o
< def
->nb_oargs
);
2852 tcg_debug_assert(def
->args_ct
[o
].regs
!= 0);
2853 tcg_debug_assert(!def
->args_ct
[o
].oalias
);
2854 def
->args_ct
[i
] = def
->args_ct
[o
];
2855 /* The output sets oalias. */
2856 def
->args_ct
[o
].oalias
= 1;
2857 def
->args_ct
[o
].alias_index
= i
;
2858 /* The input sets ialias. */
2859 def
->args_ct
[i
].ialias
= 1;
2860 def
->args_ct
[i
].alias_index
= o
;
2861 if (def
->args_ct
[i
].pair
) {
2862 saw_alias_pair
= true;
2864 tcg_debug_assert(ct_str
[1] == '\0');
2868 tcg_debug_assert(!input_p
);
2869 def
->args_ct
[i
].newreg
= true;
2873 case 'p': /* plus */
2874 /* Allocate to the register after the previous. */
2875 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2877 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2878 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2879 def
->args_ct
[i
] = (TCGArgConstraint
){
2882 .regs
= def
->args_ct
[o
].regs
<< 1,
2884 def
->args_ct
[o
].pair
= 1;
2885 def
->args_ct
[o
].pair_index
= i
;
2886 tcg_debug_assert(ct_str
[1] == '\0');
2889 case 'm': /* minus */
2890 /* Allocate to the register before the previous. */
2891 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2893 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2894 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2895 def
->args_ct
[i
] = (TCGArgConstraint
){
2898 .regs
= def
->args_ct
[o
].regs
>> 1,
2900 def
->args_ct
[o
].pair
= 2;
2901 def
->args_ct
[o
].pair_index
= i
;
2902 tcg_debug_assert(ct_str
[1] == '\0');
2909 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2912 /* Include all of the target-specific constraints. */
2915 #define CONST(CASE, MASK) \
2916 case CASE: def->args_ct[i].ct |= MASK; break;
2917 #define REGS(CASE, MASK) \
2918 case CASE: def->args_ct[i].regs |= MASK; break;
2920 #include "tcg-target-con-str.h"
2929 /* Typo in TCGTargetOpDef constraint. */
2930 g_assert_not_reached();
2932 } while (*++ct_str
!= '\0');
2935 /* TCGTargetOpDef entry with too much information? */
2936 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2939 * Fix up output pairs that are aliased with inputs.
2940 * When we created the alias, we copied pair from the output.
2941 * There are three cases:
2942 * (1a) Pairs of inputs alias pairs of outputs.
2943 * (1b) One input aliases the first of a pair of outputs.
2944 * (2) One input aliases the second of a pair of outputs.
2946 * Case 1a is handled by making sure that the pair_index'es are
2947 * properly updated so that they appear the same as a pair of inputs.
2949 * Case 1b is handled by setting the pair_index of the input to
2950 * itself, simply so it doesn't point to an unrelated argument.
2951 * Since we don't encounter the "second" during the input allocation
2952 * phase, nothing happens with the second half of the input pair.
2954 * Case 2 is handled by setting the second input to pair=3, the
2955 * first output to pair=3, and the pair_index'es to match.
2957 if (saw_alias_pair
) {
2958 for (i
= def
->nb_oargs
; i
< nb_args
; i
++) {
2960 * Since [0-9pm] must be alone in the constraint string,
2961 * the only way they can both be set is if the pair comes
2962 * from the output alias.
2964 if (!def
->args_ct
[i
].ialias
) {
2967 switch (def
->args_ct
[i
].pair
) {
2971 o
= def
->args_ct
[i
].alias_index
;
2972 o2
= def
->args_ct
[o
].pair_index
;
2973 tcg_debug_assert(def
->args_ct
[o
].pair
== 1);
2974 tcg_debug_assert(def
->args_ct
[o2
].pair
== 2);
2975 if (def
->args_ct
[o2
].oalias
) {
2977 i2
= def
->args_ct
[o2
].alias_index
;
2978 tcg_debug_assert(def
->args_ct
[i2
].pair
== 2);
2979 def
->args_ct
[i2
].pair_index
= i
;
2980 def
->args_ct
[i
].pair_index
= i2
;
2983 def
->args_ct
[i
].pair_index
= i
;
2987 o
= def
->args_ct
[i
].alias_index
;
2988 o2
= def
->args_ct
[o
].pair_index
;
2989 tcg_debug_assert(def
->args_ct
[o
].pair
== 2);
2990 tcg_debug_assert(def
->args_ct
[o2
].pair
== 1);
2991 if (def
->args_ct
[o2
].oalias
) {
2993 i2
= def
->args_ct
[o2
].alias_index
;
2994 tcg_debug_assert(def
->args_ct
[i2
].pair
== 1);
2995 def
->args_ct
[i2
].pair_index
= i
;
2996 def
->args_ct
[i
].pair_index
= i2
;
2999 def
->args_ct
[i
].pair
= 3;
3000 def
->args_ct
[o2
].pair
= 3;
3001 def
->args_ct
[i
].pair_index
= o2
;
3002 def
->args_ct
[o2
].pair_index
= i
;
3006 g_assert_not_reached();
3011 /* sort the constraints (XXX: this is just an heuristic) */
3012 sort_constraints(def
, 0, def
->nb_oargs
);
3013 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
3017 static void remove_label_use(TCGOp
*op
, int idx
)
3019 TCGLabel
*label
= arg_label(op
->args
[idx
]);
3022 QSIMPLEQ_FOREACH(use
, &label
->branches
, next
) {
3023 if (use
->op
== op
) {
3024 QSIMPLEQ_REMOVE(&label
->branches
, use
, TCGLabelUse
, next
);
3028 g_assert_not_reached();
3031 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
3035 remove_label_use(op
, 0);
3037 case INDEX_op_brcond_i32
:
3038 case INDEX_op_brcond_i64
:
3039 remove_label_use(op
, 3);
3041 case INDEX_op_brcond2_i32
:
3042 remove_label_use(op
, 5);
3048 QTAILQ_REMOVE(&s
->ops
, op
, link
);
3049 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
3053 void tcg_remove_ops_after(TCGOp
*op
)
3055 TCGContext
*s
= tcg_ctx
;
3058 TCGOp
*last
= tcg_last_op();
3062 tcg_op_remove(s
, last
);
3066 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
)
3068 TCGContext
*s
= tcg_ctx
;
3071 if (unlikely(!QTAILQ_EMPTY(&s
->free_ops
))) {
3072 QTAILQ_FOREACH(op
, &s
->free_ops
, link
) {
3073 if (nargs
<= op
->nargs
) {
3074 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
3081 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3082 nargs
= MAX(4, nargs
);
3083 op
= tcg_malloc(sizeof(TCGOp
) + sizeof(TCGArg
) * nargs
);
3086 memset(op
, 0, offsetof(TCGOp
, link
));
3090 /* Check for bitfield overflow. */
3091 tcg_debug_assert(op
->nargs
== nargs
);
3097 TCGOp
*tcg_emit_op(TCGOpcode opc
, unsigned nargs
)
3099 TCGOp
*op
= tcg_op_alloc(opc
, nargs
);
3100 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
3104 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
3105 TCGOpcode opc
, unsigned nargs
)
3107 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3108 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
3112 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
3113 TCGOpcode opc
, unsigned nargs
)
3115 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3116 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
3120 static void move_label_uses(TCGLabel
*to
, TCGLabel
*from
)
3124 QSIMPLEQ_FOREACH(u
, &from
->branches
, next
) {
3128 op
->args
[0] = label_arg(to
);
3130 case INDEX_op_brcond_i32
:
3131 case INDEX_op_brcond_i64
:
3132 op
->args
[3] = label_arg(to
);
3134 case INDEX_op_brcond2_i32
:
3135 op
->args
[5] = label_arg(to
);
3138 g_assert_not_reached();
3142 QSIMPLEQ_CONCAT(&to
->branches
, &from
->branches
);
3145 /* Reachable analysis : remove unreachable code. */
3146 static void __attribute__((noinline
))
3147 reachable_code_pass(TCGContext
*s
)
3149 TCGOp
*op
, *op_next
, *op_prev
;
3152 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3157 case INDEX_op_set_label
:
3158 label
= arg_label(op
->args
[0]);
3161 * Note that the first op in the TB is always a load,
3162 * so there is always something before a label.
3164 op_prev
= QTAILQ_PREV(op
, link
);
3167 * If we find two sequential labels, move all branches to
3168 * reference the second label and remove the first label.
3169 * Do this before branch to next optimization, so that the
3170 * middle label is out of the way.
3172 if (op_prev
->opc
== INDEX_op_set_label
) {
3173 move_label_uses(label
, arg_label(op_prev
->args
[0]));
3174 tcg_op_remove(s
, op_prev
);
3175 op_prev
= QTAILQ_PREV(op
, link
);
3179 * Optimization can fold conditional branches to unconditional.
3180 * If we find a label which is preceded by an unconditional
3181 * branch to next, remove the branch. We couldn't do this when
3182 * processing the branch because any dead code between the branch
3183 * and label had not yet been removed.
3185 if (op_prev
->opc
== INDEX_op_br
&&
3186 label
== arg_label(op_prev
->args
[0])) {
3187 tcg_op_remove(s
, op_prev
);
3188 /* Fall through means insns become live again. */
3192 if (QSIMPLEQ_EMPTY(&label
->branches
)) {
3194 * While there is an occasional backward branch, virtually
3195 * all branches generated by the translators are forward.
3196 * Which means that generally we will have already removed
3197 * all references to the label that will be, and there is
3198 * little to be gained by iterating.
3202 /* Once we see a label, insns become live again. */
3209 case INDEX_op_exit_tb
:
3210 case INDEX_op_goto_ptr
:
3211 /* Unconditional branches; everything following is dead. */
3216 /* Notice noreturn helper calls, raising exceptions. */
3217 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
3222 case INDEX_op_insn_start
:
3223 /* Never remove -- we need to keep these for unwind. */
3232 tcg_op_remove(s
, op
);
3240 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
3241 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3243 /* For liveness_pass_1, the register preferences for a given temp. */
3244 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
3246 return ts
->state_ptr
;
3249 /* For liveness_pass_1, reset the preferences for a given temp to the
3250 * maximal regset for its type.
3252 static inline void la_reset_pref(TCGTemp
*ts
)
3255 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
3258 /* liveness analysis: end of function: all temps are dead, and globals
3259 should be in memory. */
3260 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
3264 for (i
= 0; i
< ng
; ++i
) {
3265 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3266 la_reset_pref(&s
->temps
[i
]);
3268 for (i
= ng
; i
< nt
; ++i
) {
3269 s
->temps
[i
].state
= TS_DEAD
;
3270 la_reset_pref(&s
->temps
[i
]);
3274 /* liveness analysis: end of basic block: all temps are dead, globals
3275 and local temps should be in memory. */
3276 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
3280 for (i
= 0; i
< nt
; ++i
) {
3281 TCGTemp
*ts
= &s
->temps
[i
];
3288 state
= TS_DEAD
| TS_MEM
;
3295 g_assert_not_reached();
3302 /* liveness analysis: sync globals back to memory. */
3303 static void la_global_sync(TCGContext
*s
, int ng
)
3307 for (i
= 0; i
< ng
; ++i
) {
3308 int state
= s
->temps
[i
].state
;
3309 s
->temps
[i
].state
= state
| TS_MEM
;
3310 if (state
== TS_DEAD
) {
3311 /* If the global was previously dead, reset prefs. */
3312 la_reset_pref(&s
->temps
[i
]);
3318 * liveness analysis: conditional branch: all temps are dead unless
3319 * explicitly live-across-conditional-branch, globals and local temps
3322 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
3324 la_global_sync(s
, ng
);
3326 for (int i
= ng
; i
< nt
; ++i
) {
3327 TCGTemp
*ts
= &s
->temps
[i
];
3333 ts
->state
= state
| TS_MEM
;
3334 if (state
!= TS_DEAD
) {
3342 g_assert_not_reached();
3344 la_reset_pref(&s
->temps
[i
]);
3348 /* liveness analysis: sync globals back to memory and kill. */
3349 static void la_global_kill(TCGContext
*s
, int ng
)
3353 for (i
= 0; i
< ng
; i
++) {
3354 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3355 la_reset_pref(&s
->temps
[i
]);
3359 /* liveness analysis: note live globals crossing calls. */
3360 static void la_cross_call(TCGContext
*s
, int nt
)
3362 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
3365 for (i
= 0; i
< nt
; i
++) {
3366 TCGTemp
*ts
= &s
->temps
[i
];
3367 if (!(ts
->state
& TS_DEAD
)) {
3368 TCGRegSet
*pset
= la_temp_pref(ts
);
3369 TCGRegSet set
= *pset
;
3372 /* If the combination is not possible, restart. */
3374 set
= tcg_target_available_regs
[ts
->type
] & mask
;
3382 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3383 * to TEMP_EBB, if possible.
3385 static void __attribute__((noinline
))
3386 liveness_pass_0(TCGContext
*s
)
3388 void * const multiple_ebb
= (void *)(uintptr_t)-1;
3389 int nb_temps
= s
->nb_temps
;
3392 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3393 s
->temps
[i
].state_ptr
= NULL
;
3397 * Represent each EBB by the op at which it begins. In the case of
3398 * the first EBB, this is the first op, otherwise it is a label.
3399 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3400 * within a single EBB, else MULTIPLE_EBB.
3402 ebb
= QTAILQ_FIRST(&s
->ops
);
3403 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3404 const TCGOpDef
*def
;
3405 int nb_oargs
, nb_iargs
;
3408 case INDEX_op_set_label
:
3411 case INDEX_op_discard
:
3414 nb_oargs
= TCGOP_CALLO(op
);
3415 nb_iargs
= TCGOP_CALLI(op
);
3418 def
= &tcg_op_defs
[op
->opc
];
3419 nb_oargs
= def
->nb_oargs
;
3420 nb_iargs
= def
->nb_iargs
;
3424 for (int i
= 0; i
< nb_oargs
+ nb_iargs
; ++i
) {
3425 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
3427 if (ts
->kind
!= TEMP_TB
) {
3430 if (ts
->state_ptr
== NULL
) {
3431 ts
->state_ptr
= ebb
;
3432 } else if (ts
->state_ptr
!= ebb
) {
3433 ts
->state_ptr
= multiple_ebb
;
3439 * For TEMP_TB that turned out not to be used beyond one EBB,
3440 * reduce the liveness to TEMP_EBB.
3442 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3443 TCGTemp
*ts
= &s
->temps
[i
];
3444 if (ts
->kind
== TEMP_TB
&& ts
->state_ptr
!= multiple_ebb
) {
3445 ts
->kind
= TEMP_EBB
;
3450 /* Liveness analysis : update the opc_arg_life array to tell if a
3451 given input arguments is dead. Instructions updating dead
3452 temporaries are removed. */
3453 static void __attribute__((noinline
))
3454 liveness_pass_1(TCGContext
*s
)
3456 int nb_globals
= s
->nb_globals
;
3457 int nb_temps
= s
->nb_temps
;
3458 TCGOp
*op
, *op_prev
;
3462 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
3463 for (i
= 0; i
< nb_temps
; ++i
) {
3464 s
->temps
[i
].state_ptr
= prefs
+ i
;
3467 /* ??? Should be redundant with the exit_tb that ends the TB. */
3468 la_func_end(s
, nb_globals
, nb_temps
);
3470 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
3471 int nb_iargs
, nb_oargs
;
3472 TCGOpcode opc_new
, opc_new2
;
3474 TCGLifeData arg_life
= 0;
3476 TCGOpcode opc
= op
->opc
;
3477 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3482 const TCGHelperInfo
*info
= tcg_call_info(op
);
3483 int call_flags
= tcg_call_flags(op
);
3485 nb_oargs
= TCGOP_CALLO(op
);
3486 nb_iargs
= TCGOP_CALLI(op
);
3488 /* pure functions can be removed if their result is unused */
3489 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
3490 for (i
= 0; i
< nb_oargs
; i
++) {
3491 ts
= arg_temp(op
->args
[i
]);
3492 if (ts
->state
!= TS_DEAD
) {
3493 goto do_not_remove_call
;
3500 /* Output args are dead. */
3501 for (i
= 0; i
< nb_oargs
; i
++) {
3502 ts
= arg_temp(op
->args
[i
]);
3503 if (ts
->state
& TS_DEAD
) {
3504 arg_life
|= DEAD_ARG
<< i
;
3506 if (ts
->state
& TS_MEM
) {
3507 arg_life
|= SYNC_ARG
<< i
;
3509 ts
->state
= TS_DEAD
;
3513 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3514 memset(op
->output_pref
, 0, sizeof(op
->output_pref
));
3516 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
3517 TCG_CALL_NO_READ_GLOBALS
))) {
3518 la_global_kill(s
, nb_globals
);
3519 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
3520 la_global_sync(s
, nb_globals
);
3523 /* Record arguments that die in this helper. */
3524 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3525 ts
= arg_temp(op
->args
[i
]);
3526 if (ts
->state
& TS_DEAD
) {
3527 arg_life
|= DEAD_ARG
<< i
;
3531 /* For all live registers, remove call-clobbered prefs. */
3532 la_cross_call(s
, nb_temps
);
3535 * Input arguments are live for preceding opcodes.
3537 * For those arguments that die, and will be allocated in
3538 * registers, clear the register set for that arg, to be
3539 * filled in below. For args that will be on the stack,
3540 * reset to any available reg. Process arguments in reverse
3541 * order so that if a temp is used more than once, the stack
3542 * reset to max happens before the register reset to 0.
3544 for (i
= nb_iargs
- 1; i
>= 0; i
--) {
3545 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3546 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3548 if (ts
->state
& TS_DEAD
) {
3549 switch (loc
->kind
) {
3550 case TCG_CALL_ARG_NORMAL
:
3551 case TCG_CALL_ARG_EXTEND_U
:
3552 case TCG_CALL_ARG_EXTEND_S
:
3553 if (arg_slot_reg_p(loc
->arg_slot
)) {
3554 *la_temp_pref(ts
) = 0;
3560 tcg_target_available_regs
[ts
->type
];
3563 ts
->state
&= ~TS_DEAD
;
3568 * For each input argument, add its input register to prefs.
3569 * If a temp is used once, this produces a single set bit;
3570 * if a temp is used multiple times, this produces a set.
3572 for (i
= 0; i
< nb_iargs
; i
++) {
3573 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3574 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3576 switch (loc
->kind
) {
3577 case TCG_CALL_ARG_NORMAL
:
3578 case TCG_CALL_ARG_EXTEND_U
:
3579 case TCG_CALL_ARG_EXTEND_S
:
3580 if (arg_slot_reg_p(loc
->arg_slot
)) {
3581 tcg_regset_set_reg(*la_temp_pref(ts
),
3582 tcg_target_call_iarg_regs
[loc
->arg_slot
]);
3591 case INDEX_op_insn_start
:
3593 case INDEX_op_discard
:
3594 /* mark the temporary as dead */
3595 ts
= arg_temp(op
->args
[0]);
3596 ts
->state
= TS_DEAD
;
3600 case INDEX_op_add2_i32
:
3601 opc_new
= INDEX_op_add_i32
;
3603 case INDEX_op_sub2_i32
:
3604 opc_new
= INDEX_op_sub_i32
;
3606 case INDEX_op_add2_i64
:
3607 opc_new
= INDEX_op_add_i64
;
3609 case INDEX_op_sub2_i64
:
3610 opc_new
= INDEX_op_sub_i64
;
3614 /* Test if the high part of the operation is dead, but not
3615 the low part. The result can be optimized to a simple
3616 add or sub. This happens often for x86_64 guest when the
3617 cpu mode is set to 32 bit. */
3618 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3619 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3622 /* Replace the opcode and adjust the args in place,
3623 leaving 3 unused args at the end. */
3624 op
->opc
= opc
= opc_new
;
3625 op
->args
[1] = op
->args
[2];
3626 op
->args
[2] = op
->args
[4];
3627 /* Fall through and mark the single-word operation live. */
3633 case INDEX_op_mulu2_i32
:
3634 opc_new
= INDEX_op_mul_i32
;
3635 opc_new2
= INDEX_op_muluh_i32
;
3636 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
3638 case INDEX_op_muls2_i32
:
3639 opc_new
= INDEX_op_mul_i32
;
3640 opc_new2
= INDEX_op_mulsh_i32
;
3641 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
3643 case INDEX_op_mulu2_i64
:
3644 opc_new
= INDEX_op_mul_i64
;
3645 opc_new2
= INDEX_op_muluh_i64
;
3646 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
3648 case INDEX_op_muls2_i64
:
3649 opc_new
= INDEX_op_mul_i64
;
3650 opc_new2
= INDEX_op_mulsh_i64
;
3651 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
3656 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3657 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3658 /* Both parts of the operation are dead. */
3661 /* The high part of the operation is dead; generate the low. */
3662 op
->opc
= opc
= opc_new
;
3663 op
->args
[1] = op
->args
[2];
3664 op
->args
[2] = op
->args
[3];
3665 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
3666 /* The low part of the operation is dead; generate the high. */
3667 op
->opc
= opc
= opc_new2
;
3668 op
->args
[0] = op
->args
[1];
3669 op
->args
[1] = op
->args
[2];
3670 op
->args
[2] = op
->args
[3];
3674 /* Mark the single-word operation live. */
3679 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3680 nb_iargs
= def
->nb_iargs
;
3681 nb_oargs
= def
->nb_oargs
;
3683 /* Test if the operation can be removed because all
3684 its outputs are dead. We assume that nb_oargs == 0
3685 implies side effects */
3686 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
3687 for (i
= 0; i
< nb_oargs
; i
++) {
3688 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
3697 tcg_op_remove(s
, op
);
3701 for (i
= 0; i
< nb_oargs
; i
++) {
3702 ts
= arg_temp(op
->args
[i
]);
3704 /* Remember the preference of the uses that followed. */
3705 if (i
< ARRAY_SIZE(op
->output_pref
)) {
3706 op
->output_pref
[i
] = *la_temp_pref(ts
);
3709 /* Output args are dead. */
3710 if (ts
->state
& TS_DEAD
) {
3711 arg_life
|= DEAD_ARG
<< i
;
3713 if (ts
->state
& TS_MEM
) {
3714 arg_life
|= SYNC_ARG
<< i
;
3716 ts
->state
= TS_DEAD
;
3720 /* If end of basic block, update. */
3721 if (def
->flags
& TCG_OPF_BB_EXIT
) {
3722 la_func_end(s
, nb_globals
, nb_temps
);
3723 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3724 la_bb_sync(s
, nb_globals
, nb_temps
);
3725 } else if (def
->flags
& TCG_OPF_BB_END
) {
3726 la_bb_end(s
, nb_globals
, nb_temps
);
3727 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3728 la_global_sync(s
, nb_globals
);
3729 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3730 la_cross_call(s
, nb_temps
);
3734 /* Record arguments that die in this opcode. */
3735 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3736 ts
= arg_temp(op
->args
[i
]);
3737 if (ts
->state
& TS_DEAD
) {
3738 arg_life
|= DEAD_ARG
<< i
;
3742 /* Input arguments are live for preceding opcodes. */
3743 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3744 ts
= arg_temp(op
->args
[i
]);
3745 if (ts
->state
& TS_DEAD
) {
3746 /* For operands that were dead, initially allow
3747 all regs for the type. */
3748 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
3749 ts
->state
&= ~TS_DEAD
;
3753 /* Incorporate constraints for this operand. */
3755 case INDEX_op_mov_i32
:
3756 case INDEX_op_mov_i64
:
3757 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3758 have proper constraints. That said, special case
3759 moves to propagate preferences backward. */
3760 if (IS_DEAD_ARG(1)) {
3761 *la_temp_pref(arg_temp(op
->args
[0]))
3762 = *la_temp_pref(arg_temp(op
->args
[1]));
3767 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3768 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3769 TCGRegSet set
, *pset
;
3771 ts
= arg_temp(op
->args
[i
]);
3772 pset
= la_temp_pref(ts
);
3777 set
&= output_pref(op
, ct
->alias_index
);
3779 /* If the combination is not possible, restart. */
3789 op
->life
= arg_life
;
3793 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3794 static bool __attribute__((noinline
))
3795 liveness_pass_2(TCGContext
*s
)
3797 int nb_globals
= s
->nb_globals
;
3799 bool changes
= false;
3800 TCGOp
*op
, *op_next
;
3802 /* Create a temporary for each indirect global. */
3803 for (i
= 0; i
< nb_globals
; ++i
) {
3804 TCGTemp
*its
= &s
->temps
[i
];
3805 if (its
->indirect_reg
) {
3806 TCGTemp
*dts
= tcg_temp_alloc(s
);
3807 dts
->type
= its
->type
;
3808 dts
->base_type
= its
->base_type
;
3809 dts
->temp_subindex
= its
->temp_subindex
;
3810 dts
->kind
= TEMP_EBB
;
3811 its
->state_ptr
= dts
;
3813 its
->state_ptr
= NULL
;
3815 /* All globals begin dead. */
3816 its
->state
= TS_DEAD
;
3818 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3819 TCGTemp
*its
= &s
->temps
[i
];
3820 its
->state_ptr
= NULL
;
3821 its
->state
= TS_DEAD
;
3824 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3825 TCGOpcode opc
= op
->opc
;
3826 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3827 TCGLifeData arg_life
= op
->life
;
3828 int nb_iargs
, nb_oargs
, call_flags
;
3829 TCGTemp
*arg_ts
, *dir_ts
;
3831 if (opc
== INDEX_op_call
) {
3832 nb_oargs
= TCGOP_CALLO(op
);
3833 nb_iargs
= TCGOP_CALLI(op
);
3834 call_flags
= tcg_call_flags(op
);
3836 nb_iargs
= def
->nb_iargs
;
3837 nb_oargs
= def
->nb_oargs
;
3839 /* Set flags similar to how calls require. */
3840 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3841 /* Like reading globals: sync_globals */
3842 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3843 } else if (def
->flags
& TCG_OPF_BB_END
) {
3844 /* Like writing globals: save_globals */
3846 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3847 /* Like reading globals: sync_globals */
3848 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3850 /* No effect on globals. */
3851 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3852 TCG_CALL_NO_WRITE_GLOBALS
);
3856 /* Make sure that input arguments are available. */
3857 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3858 arg_ts
= arg_temp(op
->args
[i
]);
3859 dir_ts
= arg_ts
->state_ptr
;
3860 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3861 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3864 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
3866 lop
->args
[0] = temp_arg(dir_ts
);
3867 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3868 lop
->args
[2] = arg_ts
->mem_offset
;
3870 /* Loaded, but synced with memory. */
3871 arg_ts
->state
= TS_MEM
;
3875 /* Perform input replacement, and mark inputs that became dead.
3876 No action is required except keeping temp_state up to date
3877 so that we reload when needed. */
3878 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3879 arg_ts
= arg_temp(op
->args
[i
]);
3880 dir_ts
= arg_ts
->state_ptr
;
3882 op
->args
[i
] = temp_arg(dir_ts
);
3884 if (IS_DEAD_ARG(i
)) {
3885 arg_ts
->state
= TS_DEAD
;
3890 /* Liveness analysis should ensure that the following are
3891 all correct, for call sites and basic block end points. */
3892 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
3894 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3895 for (i
= 0; i
< nb_globals
; ++i
) {
3896 /* Liveness should see that globals are synced back,
3897 that is, either TS_DEAD or TS_MEM. */
3898 arg_ts
= &s
->temps
[i
];
3899 tcg_debug_assert(arg_ts
->state_ptr
== 0
3900 || arg_ts
->state
!= 0);
3903 for (i
= 0; i
< nb_globals
; ++i
) {
3904 /* Liveness should see that globals are saved back,
3905 that is, TS_DEAD, waiting to be reloaded. */
3906 arg_ts
= &s
->temps
[i
];
3907 tcg_debug_assert(arg_ts
->state_ptr
== 0
3908 || arg_ts
->state
== TS_DEAD
);
3912 /* Outputs become available. */
3913 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
3914 arg_ts
= arg_temp(op
->args
[0]);
3915 dir_ts
= arg_ts
->state_ptr
;
3917 op
->args
[0] = temp_arg(dir_ts
);
3920 /* The output is now live and modified. */
3923 if (NEED_SYNC_ARG(0)) {
3924 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3927 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3928 TCGTemp
*out_ts
= dir_ts
;
3930 if (IS_DEAD_ARG(0)) {
3931 out_ts
= arg_temp(op
->args
[1]);
3932 arg_ts
->state
= TS_DEAD
;
3933 tcg_op_remove(s
, op
);
3935 arg_ts
->state
= TS_MEM
;
3938 sop
->args
[0] = temp_arg(out_ts
);
3939 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3940 sop
->args
[2] = arg_ts
->mem_offset
;
3942 tcg_debug_assert(!IS_DEAD_ARG(0));
3946 for (i
= 0; i
< nb_oargs
; i
++) {
3947 arg_ts
= arg_temp(op
->args
[i
]);
3948 dir_ts
= arg_ts
->state_ptr
;
3952 op
->args
[i
] = temp_arg(dir_ts
);
3955 /* The output is now live and modified. */
3958 /* Sync outputs upon their last write. */
3959 if (NEED_SYNC_ARG(i
)) {
3960 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3963 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3965 sop
->args
[0] = temp_arg(dir_ts
);
3966 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3967 sop
->args
[2] = arg_ts
->mem_offset
;
3969 arg_ts
->state
= TS_MEM
;
3971 /* Drop outputs that are dead. */
3972 if (IS_DEAD_ARG(i
)) {
3973 arg_ts
->state
= TS_DEAD
;
3982 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3987 /* When allocating an object, look at the full type. */
3988 size
= tcg_type_size(ts
->base_type
);
3989 switch (ts
->base_type
) {
4001 * Note that we do not require aligned storage for V256,
4002 * and that we provide alignment for I128 to match V128,
4003 * even if that's above what the host ABI requires.
4008 g_assert_not_reached();
4012 * Assume the stack is sufficiently aligned.
4013 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
4014 * and do not require 16 byte vector alignment. This seems slightly
4015 * easier than fully parameterizing the above switch statement.
4017 align
= MIN(TCG_TARGET_STACK_ALIGN
, align
);
4018 off
= ROUND_UP(s
->current_frame_offset
, align
);
4020 /* If we've exhausted the stack frame, restart with a smaller TB. */
4021 if (off
+ size
> s
->frame_end
) {
4022 tcg_raise_tb_overflow(s
);
4024 s
->current_frame_offset
= off
+ size
;
4025 #if defined(__sparc__)
4026 off
+= TCG_TARGET_STACK_BIAS
;
4029 /* If the object was subdivided, assign memory to all the parts. */
4030 if (ts
->base_type
!= ts
->type
) {
4031 int part_size
= tcg_type_size(ts
->type
);
4032 int part_count
= size
/ part_size
;
4035 * Each part is allocated sequentially in tcg_temp_new_internal.
4036 * Jump back to the first part by subtracting the current index.
4038 ts
-= ts
->temp_subindex
;
4039 for (int i
= 0; i
< part_count
; ++i
) {
4040 ts
[i
].mem_offset
= off
+ i
* part_size
;
4041 ts
[i
].mem_base
= s
->frame_temp
;
4042 ts
[i
].mem_allocated
= 1;
4045 ts
->mem_offset
= off
;
4046 ts
->mem_base
= s
->frame_temp
;
4047 ts
->mem_allocated
= 1;
4051 /* Assign @reg to @ts, and update reg_to_temp[]. */
4052 static void set_temp_val_reg(TCGContext
*s
, TCGTemp
*ts
, TCGReg reg
)
4054 if (ts
->val_type
== TEMP_VAL_REG
) {
4055 TCGReg old
= ts
->reg
;
4056 tcg_debug_assert(s
->reg_to_temp
[old
] == ts
);
4060 s
->reg_to_temp
[old
] = NULL
;
4062 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
4063 s
->reg_to_temp
[reg
] = ts
;
4064 ts
->val_type
= TEMP_VAL_REG
;
4068 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
4069 static void set_temp_val_nonreg(TCGContext
*s
, TCGTemp
*ts
, TCGTempVal type
)
4071 tcg_debug_assert(type
!= TEMP_VAL_REG
);
4072 if (ts
->val_type
== TEMP_VAL_REG
) {
4073 TCGReg reg
= ts
->reg
;
4074 tcg_debug_assert(s
->reg_to_temp
[reg
] == ts
);
4075 s
->reg_to_temp
[reg
] = NULL
;
4077 ts
->val_type
= type
;
4080 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
4082 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
4083 mark it free; otherwise mark it dead. */
4084 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
4086 TCGTempVal new_type
;
4093 new_type
= TEMP_VAL_MEM
;
4096 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
4099 new_type
= TEMP_VAL_CONST
;
4102 g_assert_not_reached();
4104 set_temp_val_nonreg(s
, ts
, new_type
);
4107 /* Mark a temporary as dead. */
4108 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
4110 temp_free_or_dead(s
, ts
, 1);
4113 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4114 registers needs to be allocated to store a constant. If 'free_or_dead'
4115 is non-zero, subsequently release the temporary; if it is positive, the
4116 temp is dead; if it is negative, the temp is free. */
4117 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
4118 TCGRegSet preferred_regs
, int free_or_dead
)
4120 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
4121 if (!ts
->mem_allocated
) {
4122 temp_allocate_frame(s
, ts
);
4124 switch (ts
->val_type
) {
4125 case TEMP_VAL_CONST
:
4126 /* If we're going to free the temp immediately, then we won't
4127 require it later in a register, so attempt to store the
4128 constant to memory directly. */
4130 && tcg_out_sti(s
, ts
->type
, ts
->val
,
4131 ts
->mem_base
->reg
, ts
->mem_offset
)) {
4134 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4135 allocated_regs
, preferred_regs
);
4139 tcg_out_st(s
, ts
->type
, ts
->reg
,
4140 ts
->mem_base
->reg
, ts
->mem_offset
);
4148 g_assert_not_reached();
4150 ts
->mem_coherent
= 1;
4153 temp_free_or_dead(s
, ts
, free_or_dead
);
4157 /* free register 'reg' by spilling the corresponding temporary if necessary */
4158 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
4160 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
4162 temp_sync(s
, ts
, allocated_regs
, 0, -1);
4168 * @required_regs: Set of registers in which we must allocate.
4169 * @allocated_regs: Set of registers which must be avoided.
4170 * @preferred_regs: Set of registers we should prefer.
4171 * @rev: True if we search the registers in "indirect" order.
4173 * The allocated register must be in @required_regs & ~@allocated_regs,
4174 * but if we can put it in @preferred_regs we may save a move later.
4176 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
4177 TCGRegSet allocated_regs
,
4178 TCGRegSet preferred_regs
, bool rev
)
4180 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4181 TCGRegSet reg_ct
[2];
4184 reg_ct
[1] = required_regs
& ~allocated_regs
;
4185 tcg_debug_assert(reg_ct
[1] != 0);
4186 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4188 /* Skip the preferred_regs option if it cannot be satisfied,
4189 or if the preference made no difference. */
4190 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4192 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4194 /* Try free registers, preferences first. */
4195 for (j
= f
; j
< 2; j
++) {
4196 TCGRegSet set
= reg_ct
[j
];
4198 if (tcg_regset_single(set
)) {
4199 /* One register in the set. */
4200 TCGReg reg
= tcg_regset_first(set
);
4201 if (s
->reg_to_temp
[reg
] == NULL
) {
4205 for (i
= 0; i
< n
; i
++) {
4206 TCGReg reg
= order
[i
];
4207 if (s
->reg_to_temp
[reg
] == NULL
&&
4208 tcg_regset_test_reg(set
, reg
)) {
4215 /* We must spill something. */
4216 for (j
= f
; j
< 2; j
++) {
4217 TCGRegSet set
= reg_ct
[j
];
4219 if (tcg_regset_single(set
)) {
4220 /* One register in the set. */
4221 TCGReg reg
= tcg_regset_first(set
);
4222 tcg_reg_free(s
, reg
, allocated_regs
);
4225 for (i
= 0; i
< n
; i
++) {
4226 TCGReg reg
= order
[i
];
4227 if (tcg_regset_test_reg(set
, reg
)) {
4228 tcg_reg_free(s
, reg
, allocated_regs
);
4235 g_assert_not_reached();
4238 static TCGReg
tcg_reg_alloc_pair(TCGContext
*s
, TCGRegSet required_regs
,
4239 TCGRegSet allocated_regs
,
4240 TCGRegSet preferred_regs
, bool rev
)
4242 int i
, j
, k
, fmin
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4243 TCGRegSet reg_ct
[2];
4246 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4247 reg_ct
[1] = required_regs
& ~(allocated_regs
| (allocated_regs
>> 1));
4248 tcg_debug_assert(reg_ct
[1] != 0);
4249 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4251 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4254 * Skip the preferred_regs option if it cannot be satisfied,
4255 * or if the preference made no difference.
4257 k
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4260 * Minimize the number of flushes by looking for 2 free registers first,
4261 * then a single flush, then two flushes.
4263 for (fmin
= 2; fmin
>= 0; fmin
--) {
4264 for (j
= k
; j
< 2; j
++) {
4265 TCGRegSet set
= reg_ct
[j
];
4267 for (i
= 0; i
< n
; i
++) {
4268 TCGReg reg
= order
[i
];
4270 if (tcg_regset_test_reg(set
, reg
)) {
4271 int f
= !s
->reg_to_temp
[reg
] + !s
->reg_to_temp
[reg
+ 1];
4273 tcg_reg_free(s
, reg
, allocated_regs
);
4274 tcg_reg_free(s
, reg
+ 1, allocated_regs
);
4281 g_assert_not_reached();
4284 /* Make sure the temporary is in a register. If needed, allocate the register
4285 from DESIRED while avoiding ALLOCATED. */
4286 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
4287 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
4291 switch (ts
->val_type
) {
4294 case TEMP_VAL_CONST
:
4295 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4296 preferred_regs
, ts
->indirect_base
);
4297 if (ts
->type
<= TCG_TYPE_I64
) {
4298 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
4300 uint64_t val
= ts
->val
;
4304 * Find the minimal vector element that matches the constant.
4305 * The targets will, in general, have to do this search anyway,
4306 * do this generically.
4308 if (val
== dup_const(MO_8
, val
)) {
4310 } else if (val
== dup_const(MO_16
, val
)) {
4312 } else if (val
== dup_const(MO_32
, val
)) {
4316 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
4318 ts
->mem_coherent
= 0;
4321 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4322 preferred_regs
, ts
->indirect_base
);
4323 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
4324 ts
->mem_coherent
= 1;
4328 g_assert_not_reached();
4330 set_temp_val_reg(s
, ts
, reg
);
4333 /* Save a temporary to memory. 'allocated_regs' is used in case a
4334 temporary registers needs to be allocated to store a constant. */
4335 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
4337 /* The liveness analysis already ensures that globals are back
4338 in memory. Keep an tcg_debug_assert for safety. */
4339 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
4342 /* save globals to their canonical location and assume they can be
4343 modified be the following code. 'allocated_regs' is used in case a
4344 temporary registers needs to be allocated to store a constant. */
4345 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4349 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4350 temp_save(s
, &s
->temps
[i
], allocated_regs
);
4354 /* sync globals to their canonical location and assume they can be
4355 read by the following code. 'allocated_regs' is used in case a
4356 temporary registers needs to be allocated to store a constant. */
4357 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4361 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4362 TCGTemp
*ts
= &s
->temps
[i
];
4363 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
4364 || ts
->kind
== TEMP_FIXED
4365 || ts
->mem_coherent
);
4369 /* at the end of a basic block, we assume all temporaries are dead and
4370 all globals are stored at their canonical location. */
4371 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
4375 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4376 TCGTemp
*ts
= &s
->temps
[i
];
4380 temp_save(s
, ts
, allocated_regs
);
4383 /* The liveness analysis already ensures that temps are dead.
4384 Keep an tcg_debug_assert for safety. */
4385 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
4388 /* Similarly, we should have freed any allocated register. */
4389 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
4392 g_assert_not_reached();
4396 save_globals(s
, allocated_regs
);
4400 * At a conditional branch, we assume all temporaries are dead unless
4401 * explicitly live-across-conditional-branch; all globals and local
4402 * temps are synced to their location.
4404 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
4406 sync_globals(s
, allocated_regs
);
4408 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4409 TCGTemp
*ts
= &s
->temps
[i
];
4411 * The liveness analysis already ensures that temps are dead.
4412 * Keep tcg_debug_asserts for safety.
4416 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
4422 g_assert_not_reached();
4428 * Specialized code generation for INDEX_op_mov_* with a constant.
4430 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
4431 tcg_target_ulong val
, TCGLifeData arg_life
,
4432 TCGRegSet preferred_regs
)
4434 /* ENV should not be modified. */
4435 tcg_debug_assert(!temp_readonly(ots
));
4437 /* The movi is not explicitly generated here. */
4438 set_temp_val_nonreg(s
, ots
, TEMP_VAL_CONST
);
4440 ots
->mem_coherent
= 0;
4441 if (NEED_SYNC_ARG(0)) {
4442 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
4443 } else if (IS_DEAD_ARG(0)) {
4449 * Specialized code generation for INDEX_op_mov_*.
4451 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
4453 const TCGLifeData arg_life
= op
->life
;
4454 TCGRegSet allocated_regs
, preferred_regs
;
4456 TCGType otype
, itype
;
4459 allocated_regs
= s
->reserved_regs
;
4460 preferred_regs
= output_pref(op
, 0);
4461 ots
= arg_temp(op
->args
[0]);
4462 ts
= arg_temp(op
->args
[1]);
4464 /* ENV should not be modified. */
4465 tcg_debug_assert(!temp_readonly(ots
));
4467 /* Note that otype != itype for no-op truncation. */
4471 if (ts
->val_type
== TEMP_VAL_CONST
) {
4472 /* propagate constant or generate sti */
4473 tcg_target_ulong val
= ts
->val
;
4474 if (IS_DEAD_ARG(1)) {
4477 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
4481 /* If the source value is in memory we're going to be forced
4482 to have it in a register in order to perform the copy. Copy
4483 the SOURCE value into its own register first, that way we
4484 don't have to reload SOURCE the next time it is used. */
4485 if (ts
->val_type
== TEMP_VAL_MEM
) {
4486 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
4487 allocated_regs
, preferred_regs
);
4489 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
4492 if (IS_DEAD_ARG(0)) {
4493 /* mov to a non-saved dead register makes no sense (even with
4494 liveness analysis disabled). */
4495 tcg_debug_assert(NEED_SYNC_ARG(0));
4496 if (!ots
->mem_allocated
) {
4497 temp_allocate_frame(s
, ots
);
4499 tcg_out_st(s
, otype
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4500 if (IS_DEAD_ARG(1)) {
4507 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
4509 * The mov can be suppressed. Kill input first, so that it
4510 * is unlinked from reg_to_temp, then set the output to the
4511 * reg that we saved from the input.
4516 if (ots
->val_type
== TEMP_VAL_REG
) {
4519 /* Make sure to not spill the input register during allocation. */
4520 oreg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
4521 allocated_regs
| ((TCGRegSet
)1 << ireg
),
4522 preferred_regs
, ots
->indirect_base
);
4524 if (!tcg_out_mov(s
, otype
, oreg
, ireg
)) {
4526 * Cross register class move not supported.
4527 * Store the source register into the destination slot
4528 * and leave the destination temp as TEMP_VAL_MEM.
4530 assert(!temp_readonly(ots
));
4531 if (!ts
->mem_allocated
) {
4532 temp_allocate_frame(s
, ots
);
4534 tcg_out_st(s
, ts
->type
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4535 set_temp_val_nonreg(s
, ts
, TEMP_VAL_MEM
);
4536 ots
->mem_coherent
= 1;
4540 set_temp_val_reg(s
, ots
, oreg
);
4541 ots
->mem_coherent
= 0;
4543 if (NEED_SYNC_ARG(0)) {
4544 temp_sync(s
, ots
, allocated_regs
, 0, 0);
4549 * Specialized code generation for INDEX_op_dup_vec.
4551 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
4553 const TCGLifeData arg_life
= op
->life
;
4554 TCGRegSet dup_out_regs
, dup_in_regs
;
4556 TCGType itype
, vtype
;
4561 ots
= arg_temp(op
->args
[0]);
4562 its
= arg_temp(op
->args
[1]);
4564 /* ENV should not be modified. */
4565 tcg_debug_assert(!temp_readonly(ots
));
4568 vece
= TCGOP_VECE(op
);
4569 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4571 if (its
->val_type
== TEMP_VAL_CONST
) {
4572 /* Propagate constant via movi -> dupi. */
4573 tcg_target_ulong val
= its
->val
;
4574 if (IS_DEAD_ARG(1)) {
4577 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, output_pref(op
, 0));
4581 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4582 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
4584 /* Allocate the output register now. */
4585 if (ots
->val_type
!= TEMP_VAL_REG
) {
4586 TCGRegSet allocated_regs
= s
->reserved_regs
;
4589 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
4590 /* Make sure to not spill the input register. */
4591 tcg_regset_set_reg(allocated_regs
, its
->reg
);
4593 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4594 output_pref(op
, 0), ots
->indirect_base
);
4595 set_temp_val_reg(s
, ots
, oreg
);
4598 switch (its
->val_type
) {
4601 * The dup constriaints must be broad, covering all possible VECE.
4602 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4603 * to fail, indicating that extra moves are required for that case.
4605 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
4606 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
4609 /* Try again from memory or a vector input register. */
4611 if (!its
->mem_coherent
) {
4613 * The input register is not synced, and so an extra store
4614 * would be required to use memory. Attempt an integer-vector
4615 * register move first. We do not have a TCGRegSet for this.
4617 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
4620 /* Sync the temp back to its slot and load from there. */
4621 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
4627 if (HOST_BIG_ENDIAN
) {
4628 lowpart_ofs
= tcg_type_size(itype
) - (1 << vece
);
4630 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
4631 its
->mem_offset
+ lowpart_ofs
)) {
4634 /* Load the input into the destination vector register. */
4635 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
4639 g_assert_not_reached();
4642 /* We now have a vector input register, so dup must succeed. */
4643 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
4644 tcg_debug_assert(ok
);
4647 ots
->mem_coherent
= 0;
4648 if (IS_DEAD_ARG(1)) {
4651 if (NEED_SYNC_ARG(0)) {
4652 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
4654 if (IS_DEAD_ARG(0)) {
4659 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
4661 const TCGLifeData arg_life
= op
->life
;
4662 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
4663 TCGRegSet i_allocated_regs
;
4664 TCGRegSet o_allocated_regs
;
4665 int i
, k
, nb_iargs
, nb_oargs
;
4668 const TCGArgConstraint
*arg_ct
;
4670 TCGArg new_args
[TCG_MAX_OP_ARGS
];
4671 int const_args
[TCG_MAX_OP_ARGS
];
4673 nb_oargs
= def
->nb_oargs
;
4674 nb_iargs
= def
->nb_iargs
;
4676 /* copy constants */
4677 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
4678 op
->args
+ nb_oargs
+ nb_iargs
,
4679 sizeof(TCGArg
) * def
->nb_cargs
);
4681 i_allocated_regs
= s
->reserved_regs
;
4682 o_allocated_regs
= s
->reserved_regs
;
4684 /* satisfy input constraints */
4685 for (k
= 0; k
< nb_iargs
; k
++) {
4686 TCGRegSet i_preferred_regs
, i_required_regs
;
4687 bool allocate_new_reg
, copyto_new_reg
;
4691 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
4693 arg_ct
= &def
->args_ct
[i
];
4696 if (ts
->val_type
== TEMP_VAL_CONST
4697 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
, TCGOP_VECE(op
))) {
4698 /* constant is OK for instruction */
4700 new_args
[i
] = ts
->val
;
4705 i_preferred_regs
= 0;
4706 i_required_regs
= arg_ct
->regs
;
4707 allocate_new_reg
= false;
4708 copyto_new_reg
= false;
4710 switch (arg_ct
->pair
) {
4711 case 0: /* not paired */
4712 if (arg_ct
->ialias
) {
4713 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4716 * If the input is readonly, then it cannot also be an
4717 * output and aliased to itself. If the input is not
4718 * dead after the instruction, we must allocate a new
4719 * register and move it.
4721 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)
4722 || def
->args_ct
[arg_ct
->alias_index
].newreg
) {
4723 allocate_new_reg
= true;
4724 } else if (ts
->val_type
== TEMP_VAL_REG
) {
4726 * Check if the current register has already been
4727 * allocated for another input.
4730 tcg_regset_test_reg(i_allocated_regs
, reg
);
4733 if (!allocate_new_reg
) {
4734 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4737 allocate_new_reg
= !tcg_regset_test_reg(i_required_regs
, reg
);
4739 if (allocate_new_reg
) {
4741 * Allocate a new register matching the constraint
4742 * and move the temporary register into it.
4744 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4745 i_allocated_regs
, 0);
4746 reg
= tcg_reg_alloc(s
, i_required_regs
, i_allocated_regs
,
4747 i_preferred_regs
, ts
->indirect_base
);
4748 copyto_new_reg
= true;
4753 /* First of an input pair; if i1 == i2, the second is an output. */
4755 i2
= arg_ct
->pair_index
;
4756 ts2
= i1
!= i2
? arg_temp(op
->args
[i2
]) : NULL
;
4759 * It is easier to default to allocating a new pair
4760 * and to identify a few cases where it's not required.
4762 if (arg_ct
->ialias
) {
4763 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4764 if (IS_DEAD_ARG(i1
) &&
4766 !temp_readonly(ts
) &&
4767 ts
->val_type
== TEMP_VAL_REG
&&
4768 ts
->reg
< TCG_TARGET_NB_REGS
- 1 &&
4769 tcg_regset_test_reg(i_required_regs
, reg
) &&
4770 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4771 !tcg_regset_test_reg(i_allocated_regs
, reg
+ 1) &&
4773 ? ts2
->val_type
== TEMP_VAL_REG
&&
4774 ts2
->reg
== reg
+ 1 &&
4776 : s
->reg_to_temp
[reg
+ 1] == NULL
)) {
4780 /* Without aliasing, the pair must also be an input. */
4781 tcg_debug_assert(ts2
);
4782 if (ts
->val_type
== TEMP_VAL_REG
&&
4783 ts2
->val_type
== TEMP_VAL_REG
&&
4784 ts2
->reg
== reg
+ 1 &&
4785 tcg_regset_test_reg(i_required_regs
, reg
)) {
4789 reg
= tcg_reg_alloc_pair(s
, i_required_regs
, i_allocated_regs
,
4790 0, ts
->indirect_base
);
4793 case 2: /* pair second */
4794 reg
= new_args
[arg_ct
->pair_index
] + 1;
4797 case 3: /* ialias with second output, no first input */
4798 tcg_debug_assert(arg_ct
->ialias
);
4799 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4801 if (IS_DEAD_ARG(i
) &&
4802 !temp_readonly(ts
) &&
4803 ts
->val_type
== TEMP_VAL_REG
&&
4805 s
->reg_to_temp
[reg
- 1] == NULL
&&
4806 tcg_regset_test_reg(i_required_regs
, reg
) &&
4807 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4808 !tcg_regset_test_reg(i_allocated_regs
, reg
- 1)) {
4809 tcg_regset_set_reg(i_allocated_regs
, reg
- 1);
4812 reg
= tcg_reg_alloc_pair(s
, i_required_regs
>> 1,
4813 i_allocated_regs
, 0,
4815 tcg_regset_set_reg(i_allocated_regs
, reg
);
4821 * If an aliased input is not dead after the instruction,
4822 * we must allocate a new register and move it.
4824 if (arg_ct
->ialias
&& (!IS_DEAD_ARG(i
) || temp_readonly(ts
))) {
4825 TCGRegSet t_allocated_regs
= i_allocated_regs
;
4828 * Because of the alias, and the continued life, make sure
4829 * that the temp is somewhere *other* than the reg pair,
4830 * and we get a copy in reg.
4832 tcg_regset_set_reg(t_allocated_regs
, reg
);
4833 tcg_regset_set_reg(t_allocated_regs
, reg
+ 1);
4834 if (ts
->val_type
== TEMP_VAL_REG
&& ts
->reg
== reg
) {
4835 /* If ts was already in reg, copy it somewhere else. */
4839 tcg_debug_assert(ts
->kind
!= TEMP_FIXED
);
4840 nr
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
4841 t_allocated_regs
, 0, ts
->indirect_base
);
4842 ok
= tcg_out_mov(s
, ts
->type
, nr
, reg
);
4843 tcg_debug_assert(ok
);
4845 set_temp_val_reg(s
, ts
, nr
);
4847 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4848 t_allocated_regs
, 0);
4849 copyto_new_reg
= true;
4852 /* Preferably allocate to reg, otherwise copy. */
4853 i_required_regs
= (TCGRegSet
)1 << reg
;
4854 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4856 copyto_new_reg
= ts
->reg
!= reg
;
4861 g_assert_not_reached();
4864 if (copyto_new_reg
) {
4865 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4867 * Cross register class move not supported. Sync the
4868 * temp back to its slot and load from there.
4870 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
4871 tcg_out_ld(s
, ts
->type
, reg
,
4872 ts
->mem_base
->reg
, ts
->mem_offset
);
4877 tcg_regset_set_reg(i_allocated_regs
, reg
);
4880 /* mark dead temporaries and free the associated registers */
4881 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
4882 if (IS_DEAD_ARG(i
)) {
4883 temp_dead(s
, arg_temp(op
->args
[i
]));
4887 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
4888 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
4889 } else if (def
->flags
& TCG_OPF_BB_END
) {
4890 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
4892 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
4893 /* XXX: permit generic clobber register list ? */
4894 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4895 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4896 tcg_reg_free(s
, i
, i_allocated_regs
);
4900 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
4901 /* sync globals if the op has side effects and might trigger
4903 sync_globals(s
, i_allocated_regs
);
4906 /* satisfy the output constraints */
4907 for(k
= 0; k
< nb_oargs
; k
++) {
4908 i
= def
->args_ct
[k
].sort_index
;
4910 arg_ct
= &def
->args_ct
[i
];
4913 /* ENV should not be modified. */
4914 tcg_debug_assert(!temp_readonly(ts
));
4916 switch (arg_ct
->pair
) {
4917 case 0: /* not paired */
4918 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
4919 reg
= new_args
[arg_ct
->alias_index
];
4920 } else if (arg_ct
->newreg
) {
4921 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
4922 i_allocated_regs
| o_allocated_regs
,
4923 output_pref(op
, k
), ts
->indirect_base
);
4925 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
4926 output_pref(op
, k
), ts
->indirect_base
);
4930 case 1: /* first of pair */
4931 tcg_debug_assert(!arg_ct
->newreg
);
4932 if (arg_ct
->oalias
) {
4933 reg
= new_args
[arg_ct
->alias_index
];
4936 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
, o_allocated_regs
,
4937 output_pref(op
, k
), ts
->indirect_base
);
4940 case 2: /* second of pair */
4941 tcg_debug_assert(!arg_ct
->newreg
);
4942 if (arg_ct
->oalias
) {
4943 reg
= new_args
[arg_ct
->alias_index
];
4945 reg
= new_args
[arg_ct
->pair_index
] + 1;
4949 case 3: /* first of pair, aliasing with a second input */
4950 tcg_debug_assert(!arg_ct
->newreg
);
4951 reg
= new_args
[arg_ct
->pair_index
] - 1;
4955 g_assert_not_reached();
4957 tcg_regset_set_reg(o_allocated_regs
, reg
);
4958 set_temp_val_reg(s
, ts
, reg
);
4959 ts
->mem_coherent
= 0;
4964 /* emit instruction */
4966 case INDEX_op_ext8s_i32
:
4967 tcg_out_ext8s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4969 case INDEX_op_ext8s_i64
:
4970 tcg_out_ext8s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4972 case INDEX_op_ext8u_i32
:
4973 case INDEX_op_ext8u_i64
:
4974 tcg_out_ext8u(s
, new_args
[0], new_args
[1]);
4976 case INDEX_op_ext16s_i32
:
4977 tcg_out_ext16s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4979 case INDEX_op_ext16s_i64
:
4980 tcg_out_ext16s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4982 case INDEX_op_ext16u_i32
:
4983 case INDEX_op_ext16u_i64
:
4984 tcg_out_ext16u(s
, new_args
[0], new_args
[1]);
4986 case INDEX_op_ext32s_i64
:
4987 tcg_out_ext32s(s
, new_args
[0], new_args
[1]);
4989 case INDEX_op_ext32u_i64
:
4990 tcg_out_ext32u(s
, new_args
[0], new_args
[1]);
4992 case INDEX_op_ext_i32_i64
:
4993 tcg_out_exts_i32_i64(s
, new_args
[0], new_args
[1]);
4995 case INDEX_op_extu_i32_i64
:
4996 tcg_out_extu_i32_i64(s
, new_args
[0], new_args
[1]);
4998 case INDEX_op_extrl_i64_i32
:
4999 tcg_out_extrl_i64_i32(s
, new_args
[0], new_args
[1]);
5002 if (def
->flags
& TCG_OPF_VECTOR
) {
5003 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
5004 new_args
, const_args
);
5006 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
5011 /* move the outputs in the correct register if needed */
5012 for(i
= 0; i
< nb_oargs
; i
++) {
5013 ts
= arg_temp(op
->args
[i
]);
5015 /* ENV should not be modified. */
5016 tcg_debug_assert(!temp_readonly(ts
));
5018 if (NEED_SYNC_ARG(i
)) {
5019 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
5020 } else if (IS_DEAD_ARG(i
)) {
5026 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
5028 const TCGLifeData arg_life
= op
->life
;
5029 TCGTemp
*ots
, *itsl
, *itsh
;
5030 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
5032 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
5033 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
5034 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
5036 ots
= arg_temp(op
->args
[0]);
5037 itsl
= arg_temp(op
->args
[1]);
5038 itsh
= arg_temp(op
->args
[2]);
5040 /* ENV should not be modified. */
5041 tcg_debug_assert(!temp_readonly(ots
));
5043 /* Allocate the output register now. */
5044 if (ots
->val_type
!= TEMP_VAL_REG
) {
5045 TCGRegSet allocated_regs
= s
->reserved_regs
;
5046 TCGRegSet dup_out_regs
=
5047 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
5050 /* Make sure to not spill the input registers. */
5051 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
5052 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
5054 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
5055 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
5058 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
5059 output_pref(op
, 0), ots
->indirect_base
);
5060 set_temp_val_reg(s
, ots
, oreg
);
5063 /* Promote dup2 of immediates to dupi_vec. */
5064 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
5065 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
5068 if (val
== dup_const(MO_8
, val
)) {
5070 } else if (val
== dup_const(MO_16
, val
)) {
5072 } else if (val
== dup_const(MO_32
, val
)) {
5076 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
5080 /* If the two inputs form one 64-bit value, try dupm_vec. */
5081 if (itsl
->temp_subindex
== HOST_BIG_ENDIAN
&&
5082 itsh
->temp_subindex
== !HOST_BIG_ENDIAN
&&
5083 itsl
== itsh
+ (HOST_BIG_ENDIAN
? 1 : -1)) {
5084 TCGTemp
*its
= itsl
- HOST_BIG_ENDIAN
;
5086 temp_sync(s
, its
+ 0, s
->reserved_regs
, 0, 0);
5087 temp_sync(s
, its
+ 1, s
->reserved_regs
, 0, 0);
5089 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
5090 its
->mem_base
->reg
, its
->mem_offset
)) {
5095 /* Fall back to generic expansion. */
5099 ots
->mem_coherent
= 0;
5100 if (IS_DEAD_ARG(1)) {
5103 if (IS_DEAD_ARG(2)) {
5106 if (NEED_SYNC_ARG(0)) {
5107 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
5108 } else if (IS_DEAD_ARG(0)) {
5114 static void load_arg_reg(TCGContext
*s
, TCGReg reg
, TCGTemp
*ts
,
5115 TCGRegSet allocated_regs
)
5117 if (ts
->val_type
== TEMP_VAL_REG
) {
5118 if (ts
->reg
!= reg
) {
5119 tcg_reg_free(s
, reg
, allocated_regs
);
5120 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
5122 * Cross register class move not supported. Sync the
5123 * temp back to its slot and load from there.
5125 temp_sync(s
, ts
, allocated_regs
, 0, 0);
5126 tcg_out_ld(s
, ts
->type
, reg
,
5127 ts
->mem_base
->reg
, ts
->mem_offset
);
5131 TCGRegSet arg_set
= 0;
5133 tcg_reg_free(s
, reg
, allocated_regs
);
5134 tcg_regset_set_reg(arg_set
, reg
);
5135 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
5139 static void load_arg_stk(TCGContext
*s
, unsigned arg_slot
, TCGTemp
*ts
,
5140 TCGRegSet allocated_regs
)
5143 * When the destination is on the stack, load up the temp and store.
5144 * If there are many call-saved registers, the temp might live to
5145 * see another use; otherwise it'll be discarded.
5147 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
], allocated_regs
, 0);
5148 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
,
5149 arg_slot_stk_ofs(arg_slot
));
5152 static void load_arg_normal(TCGContext
*s
, const TCGCallArgumentLoc
*l
,
5153 TCGTemp
*ts
, TCGRegSet
*allocated_regs
)
5155 if (arg_slot_reg_p(l
->arg_slot
)) {
5156 TCGReg reg
= tcg_target_call_iarg_regs
[l
->arg_slot
];
5157 load_arg_reg(s
, reg
, ts
, *allocated_regs
);
5158 tcg_regset_set_reg(*allocated_regs
, reg
);
5160 load_arg_stk(s
, l
->arg_slot
, ts
, *allocated_regs
);
5164 static void load_arg_ref(TCGContext
*s
, unsigned arg_slot
, TCGReg ref_base
,
5165 intptr_t ref_off
, TCGRegSet
*allocated_regs
)
5169 if (arg_slot_reg_p(arg_slot
)) {
5170 reg
= tcg_target_call_iarg_regs
[arg_slot
];
5171 tcg_reg_free(s
, reg
, *allocated_regs
);
5172 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5173 tcg_regset_set_reg(*allocated_regs
, reg
);
5175 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[TCG_TYPE_PTR
],
5176 *allocated_regs
, 0, false);
5177 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5178 tcg_out_st(s
, TCG_TYPE_PTR
, reg
, TCG_REG_CALL_STACK
,
5179 arg_slot_stk_ofs(arg_slot
));
5183 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
5185 const int nb_oargs
= TCGOP_CALLO(op
);
5186 const int nb_iargs
= TCGOP_CALLI(op
);
5187 const TCGLifeData arg_life
= op
->life
;
5188 const TCGHelperInfo
*info
= tcg_call_info(op
);
5189 TCGRegSet allocated_regs
= s
->reserved_regs
;
5193 * Move inputs into place in reverse order,
5194 * so that we place stacked arguments first.
5196 for (i
= nb_iargs
- 1; i
>= 0; --i
) {
5197 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
5198 TCGTemp
*ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
5200 switch (loc
->kind
) {
5201 case TCG_CALL_ARG_NORMAL
:
5202 case TCG_CALL_ARG_EXTEND_U
:
5203 case TCG_CALL_ARG_EXTEND_S
:
5204 load_arg_normal(s
, loc
, ts
, &allocated_regs
);
5206 case TCG_CALL_ARG_BY_REF
:
5207 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5208 load_arg_ref(s
, loc
->arg_slot
, TCG_REG_CALL_STACK
,
5209 arg_slot_stk_ofs(loc
->ref_slot
),
5212 case TCG_CALL_ARG_BY_REF_N
:
5213 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5216 g_assert_not_reached();
5220 /* Mark dead temporaries and free the associated registers. */
5221 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
5222 if (IS_DEAD_ARG(i
)) {
5223 temp_dead(s
, arg_temp(op
->args
[i
]));
5227 /* Clobber call registers. */
5228 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
5229 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
5230 tcg_reg_free(s
, i
, allocated_regs
);
5235 * Save globals if they might be written by the helper,
5236 * sync them if they might be read.
5238 if (info
->flags
& TCG_CALL_NO_READ_GLOBALS
) {
5240 } else if (info
->flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
5241 sync_globals(s
, allocated_regs
);
5243 save_globals(s
, allocated_regs
);
5247 * If the ABI passes a pointer to the returned struct as the first
5248 * argument, load that now. Pass a pointer to the output home slot.
5250 if (info
->out_kind
== TCG_CALL_RET_BY_REF
) {
5251 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5253 if (!ts
->mem_allocated
) {
5254 temp_allocate_frame(s
, ts
);
5256 load_arg_ref(s
, 0, ts
->mem_base
->reg
, ts
->mem_offset
, &allocated_regs
);
5259 tcg_out_call(s
, tcg_call_func(op
), info
);
5261 /* Assign output registers and emit moves if needed. */
5262 switch (info
->out_kind
) {
5263 case TCG_CALL_RET_NORMAL
:
5264 for (i
= 0; i
< nb_oargs
; i
++) {
5265 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5266 TCGReg reg
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, i
);
5268 /* ENV should not be modified. */
5269 tcg_debug_assert(!temp_readonly(ts
));
5271 set_temp_val_reg(s
, ts
, reg
);
5272 ts
->mem_coherent
= 0;
5276 case TCG_CALL_RET_BY_VEC
:
5278 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5280 tcg_debug_assert(ts
->base_type
== TCG_TYPE_I128
);
5281 tcg_debug_assert(ts
->temp_subindex
== 0);
5282 if (!ts
->mem_allocated
) {
5283 temp_allocate_frame(s
, ts
);
5285 tcg_out_st(s
, TCG_TYPE_V128
,
5286 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5287 ts
->mem_base
->reg
, ts
->mem_offset
);
5289 /* fall through to mark all parts in memory */
5291 case TCG_CALL_RET_BY_REF
:
5292 /* The callee has performed a write through the reference. */
5293 for (i
= 0; i
< nb_oargs
; i
++) {
5294 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5295 ts
->val_type
= TEMP_VAL_MEM
;
5300 g_assert_not_reached();
5303 /* Flush or discard output registers as needed. */
5304 for (i
= 0; i
< nb_oargs
; i
++) {
5305 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5306 if (NEED_SYNC_ARG(i
)) {
5307 temp_sync(s
, ts
, s
->reserved_regs
, 0, IS_DEAD_ARG(i
));
5308 } else if (IS_DEAD_ARG(i
)) {
5315 * atom_and_align_for_opc:
5317 * @opc: memory operation code
5318 * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5319 * @allow_two_ops: true if we are prepared to issue two operations
5321 * Return the alignment and atomicity to use for the inline fast path
5322 * for the given memory operation. The alignment may be larger than
5323 * that specified in @opc, and the correct alignment will be diagnosed
5324 * by the slow path helper.
5326 * If @allow_two_ops, the host is prepared to test for 2x alignment,
5327 * and issue two loads or stores for subalignment.
5329 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
5330 MemOp host_atom
, bool allow_two_ops
)
5332 MemOp align
= get_alignment_bits(opc
);
5333 MemOp size
= opc
& MO_SIZE
;
5334 MemOp half
= size
? size
- 1 : 0;
5338 /* When serialized, no further atomicity required. */
5339 if (s
->gen_tb
->cflags
& CF_PARALLEL
) {
5340 atom
= opc
& MO_ATOM_MASK
;
5342 atom
= MO_ATOM_NONE
;
5347 /* The operation requires no specific atomicity. */
5351 case MO_ATOM_IFALIGN
:
5355 case MO_ATOM_IFALIGN_PAIR
:
5359 case MO_ATOM_WITHIN16
:
5361 if (size
== MO_128
) {
5362 /* Misalignment implies !within16, and therefore no atomicity. */
5363 } else if (host_atom
!= MO_ATOM_WITHIN16
) {
5364 /* The host does not implement within16, so require alignment. */
5365 align
= MAX(align
, size
);
5369 case MO_ATOM_WITHIN16_PAIR
:
5372 * Misalignment implies !within16, and therefore half atomicity.
5373 * Any host prepared for two operations can implement this with
5376 if (host_atom
!= MO_ATOM_WITHIN16
&& allow_two_ops
) {
5377 align
= MAX(align
, half
);
5381 case MO_ATOM_SUBALIGN
:
5383 if (host_atom
!= MO_ATOM_SUBALIGN
) {
5384 /* If unaligned but not odd, there are subobjects up to half. */
5385 if (allow_two_ops
) {
5386 align
= MAX(align
, half
);
5388 align
= MAX(align
, size
);
5394 g_assert_not_reached();
5397 return (TCGAtomAlign
){ .atom
= atmax
, .align
= align
};
5401 * Similarly for qemu_ld/st slow path helpers.
5402 * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5403 * using only the provided backend tcg_out_* functions.
5406 static int tcg_out_helper_stk_ofs(TCGType type
, unsigned slot
)
5408 int ofs
= arg_slot_stk_ofs(slot
);
5411 * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
5412 * require extension to uint64_t, adjust the address for uint32_t.
5414 if (HOST_BIG_ENDIAN
&&
5415 TCG_TARGET_REG_BITS
== 64 &&
5416 type
== TCG_TYPE_I32
) {
5422 static void tcg_out_helper_load_slots(TCGContext
*s
,
5423 unsigned nmov
, TCGMovExtend
*mov
,
5424 const TCGLdstHelperParam
*parm
)
5430 * Start from the end, storing to the stack first.
5431 * This frees those registers, so we need not consider overlap.
5433 for (i
= nmov
; i
-- > 0; ) {
5434 unsigned slot
= mov
[i
].dst
;
5436 if (arg_slot_reg_p(slot
)) {
5440 TCGReg src
= mov
[i
].src
;
5441 TCGType dst_type
= mov
[i
].dst_type
;
5442 MemOp dst_mo
= dst_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5444 /* The argument is going onto the stack; extend into scratch. */
5445 if ((mov
[i
].src_ext
& MO_SIZE
) != dst_mo
) {
5446 tcg_debug_assert(parm
->ntmp
!= 0);
5447 mov
[i
].dst
= src
= parm
->tmp
[0];
5448 tcg_out_movext1(s
, &mov
[i
]);
5451 tcg_out_st(s
, dst_type
, src
, TCG_REG_CALL_STACK
,
5452 tcg_out_helper_stk_ofs(dst_type
, slot
));
5458 * The remaining arguments are in registers.
5459 * Convert slot numbers to argument registers.
5462 for (i
= 0; i
< nmov
; ++i
) {
5463 mov
[i
].dst
= tcg_target_call_iarg_regs
[mov
[i
].dst
];
5468 /* The backend must have provided enough temps for the worst case. */
5469 tcg_debug_assert(parm
->ntmp
>= 2);
5472 for (unsigned j
= 0; j
< 3; ++j
) {
5473 if (dst3
== mov
[j
].src
) {
5475 * Conflict. Copy the source to a temporary, perform the
5476 * remaining moves, then the extension from our scratch
5479 TCGReg scratch
= parm
->tmp
[1];
5481 tcg_out_mov(s
, mov
[3].src_type
, scratch
, mov
[3].src
);
5482 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2, parm
->tmp
[0]);
5483 tcg_out_movext1_new_src(s
, &mov
[3], scratch
);
5488 /* No conflicts: perform this move and continue. */
5489 tcg_out_movext1(s
, &mov
[3]);
5493 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2,
5494 parm
->ntmp
? parm
->tmp
[0] : -1);
5497 tcg_out_movext2(s
, mov
, mov
+ 1,
5498 parm
->ntmp
? parm
->tmp
[0] : -1);
5501 tcg_out_movext1(s
, mov
);
5504 g_assert_not_reached();
5508 static void tcg_out_helper_load_imm(TCGContext
*s
, unsigned slot
,
5509 TCGType type
, tcg_target_long imm
,
5510 const TCGLdstHelperParam
*parm
)
5512 if (arg_slot_reg_p(slot
)) {
5513 tcg_out_movi(s
, type
, tcg_target_call_iarg_regs
[slot
], imm
);
5515 int ofs
= tcg_out_helper_stk_ofs(type
, slot
);
5516 if (!tcg_out_sti(s
, type
, imm
, TCG_REG_CALL_STACK
, ofs
)) {
5517 tcg_debug_assert(parm
->ntmp
!= 0);
5518 tcg_out_movi(s
, type
, parm
->tmp
[0], imm
);
5519 tcg_out_st(s
, type
, parm
->tmp
[0], TCG_REG_CALL_STACK
, ofs
);
5524 static void tcg_out_helper_load_common_args(TCGContext
*s
,
5525 const TCGLabelQemuLdst
*ldst
,
5526 const TCGLdstHelperParam
*parm
,
5527 const TCGHelperInfo
*info
,
5530 TCGMovExtend ptr_mov
= {
5531 .dst_type
= TCG_TYPE_PTR
,
5532 .src_type
= TCG_TYPE_PTR
,
5533 .src_ext
= sizeof(void *) == 4 ? MO_32
: MO_64
5535 const TCGCallArgumentLoc
*loc
= &info
->in
[0];
5538 tcg_target_ulong imm
;
5541 * Handle env, which is always first.
5543 ptr_mov
.dst
= loc
->arg_slot
;
5544 ptr_mov
.src
= TCG_AREG0
;
5545 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5551 loc
= &info
->in
[next_arg
];
5552 type
= TCG_TYPE_I32
;
5553 switch (loc
->kind
) {
5554 case TCG_CALL_ARG_NORMAL
:
5556 case TCG_CALL_ARG_EXTEND_U
:
5557 case TCG_CALL_ARG_EXTEND_S
:
5558 /* No extension required for MemOpIdx. */
5559 tcg_debug_assert(imm
<= INT32_MAX
);
5560 type
= TCG_TYPE_REG
;
5563 g_assert_not_reached();
5565 tcg_out_helper_load_imm(s
, loc
->arg_slot
, type
, imm
, parm
);
5571 loc
= &info
->in
[next_arg
];
5572 slot
= loc
->arg_slot
;
5577 if (arg_slot_reg_p(slot
)) {
5578 arg_reg
= tcg_target_call_iarg_regs
[slot
];
5580 ra_reg
= parm
->ra_gen(s
, ldst
, arg_reg
);
5583 ptr_mov
.src
= ra_reg
;
5584 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5586 imm
= (uintptr_t)ldst
->raddr
;
5587 tcg_out_helper_load_imm(s
, slot
, TCG_TYPE_PTR
, imm
, parm
);
5591 static unsigned tcg_out_helper_add_mov(TCGMovExtend
*mov
,
5592 const TCGCallArgumentLoc
*loc
,
5593 TCGType dst_type
, TCGType src_type
,
5594 TCGReg lo
, TCGReg hi
)
5598 if (dst_type
<= TCG_TYPE_REG
) {
5601 switch (loc
->kind
) {
5602 case TCG_CALL_ARG_NORMAL
:
5603 src_ext
= src_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5605 case TCG_CALL_ARG_EXTEND_U
:
5606 dst_type
= TCG_TYPE_REG
;
5609 case TCG_CALL_ARG_EXTEND_S
:
5610 dst_type
= TCG_TYPE_REG
;
5614 g_assert_not_reached();
5617 mov
[0].dst
= loc
->arg_slot
;
5618 mov
[0].dst_type
= dst_type
;
5620 mov
[0].src_type
= src_type
;
5621 mov
[0].src_ext
= src_ext
;
5625 if (TCG_TARGET_REG_BITS
== 32) {
5626 assert(dst_type
== TCG_TYPE_I64
);
5629 assert(dst_type
== TCG_TYPE_I128
);
5633 mov
[0].dst
= loc
[HOST_BIG_ENDIAN
].arg_slot
;
5635 mov
[0].dst_type
= TCG_TYPE_REG
;
5636 mov
[0].src_type
= TCG_TYPE_REG
;
5637 mov
[0].src_ext
= reg_mo
;
5639 mov
[1].dst
= loc
[!HOST_BIG_ENDIAN
].arg_slot
;
5641 mov
[1].dst_type
= TCG_TYPE_REG
;
5642 mov
[1].src_type
= TCG_TYPE_REG
;
5643 mov
[1].src_ext
= reg_mo
;
5648 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5649 const TCGLdstHelperParam
*parm
)
5651 const TCGHelperInfo
*info
;
5652 const TCGCallArgumentLoc
*loc
;
5653 TCGMovExtend mov
[2];
5654 unsigned next_arg
, nmov
;
5655 MemOp mop
= get_memop(ldst
->oi
);
5657 switch (mop
& MO_SIZE
) {
5661 info
= &info_helper_ld32_mmu
;
5664 info
= &info_helper_ld64_mmu
;
5667 info
= &info_helper_ld128_mmu
;
5670 g_assert_not_reached();
5673 /* Defer env argument. */
5676 loc
= &info
->in
[next_arg
];
5677 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5679 * 32-bit host with 32-bit guest: zero-extend the guest address
5680 * to 64-bits for the helper by storing the low part, then
5681 * load a zero for the high part.
5683 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
5684 TCG_TYPE_I32
, TCG_TYPE_I32
,
5685 ldst
->addrlo_reg
, -1);
5686 tcg_out_helper_load_slots(s
, 1, mov
, parm
);
5688 tcg_out_helper_load_imm(s
, loc
[!HOST_BIG_ENDIAN
].arg_slot
,
5689 TCG_TYPE_I32
, 0, parm
);
5692 nmov
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
5693 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5694 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5698 switch (info
->out_kind
) {
5699 case TCG_CALL_RET_NORMAL
:
5700 case TCG_CALL_RET_BY_VEC
:
5702 case TCG_CALL_RET_BY_REF
:
5704 * The return reference is in the first argument slot.
5705 * We need memory in which to return: re-use the top of stack.
5708 int ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5710 if (arg_slot_reg_p(0)) {
5711 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[0],
5712 TCG_REG_CALL_STACK
, ofs_slot0
);
5714 tcg_debug_assert(parm
->ntmp
!= 0);
5715 tcg_out_addi_ptr(s
, parm
->tmp
[0],
5716 TCG_REG_CALL_STACK
, ofs_slot0
);
5717 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5718 TCG_REG_CALL_STACK
, ofs_slot0
);
5723 g_assert_not_reached();
5726 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5729 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5731 const TCGLdstHelperParam
*parm
)
5733 MemOp mop
= get_memop(ldst
->oi
);
5734 TCGMovExtend mov
[2];
5737 switch (ldst
->type
) {
5739 if (TCG_TARGET_REG_BITS
== 32) {
5745 mov
[0].dst
= ldst
->datalo_reg
;
5746 mov
[0].src
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, 0);
5747 mov
[0].dst_type
= ldst
->type
;
5748 mov
[0].src_type
= TCG_TYPE_REG
;
5751 * If load_sign, then we allowed the helper to perform the
5752 * appropriate sign extension to tcg_target_ulong, and all
5753 * we need now is a plain move.
5755 * If they do not, then we expect the relevant extension
5756 * instruction to be no more expensive than a move, and
5757 * we thus save the icache etc by only using one of two
5760 if (load_sign
|| !(mop
& MO_SIGN
)) {
5761 if (TCG_TARGET_REG_BITS
== 32 || ldst
->type
== TCG_TYPE_I32
) {
5762 mov
[0].src_ext
= MO_32
;
5764 mov
[0].src_ext
= MO_64
;
5767 mov
[0].src_ext
= mop
& MO_SSIZE
;
5769 tcg_out_movext1(s
, mov
);
5773 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5774 ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5775 switch (TCG_TARGET_CALL_RET_I128
) {
5776 case TCG_CALL_RET_NORMAL
:
5778 case TCG_CALL_RET_BY_VEC
:
5779 tcg_out_st(s
, TCG_TYPE_V128
,
5780 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5781 TCG_REG_CALL_STACK
, ofs_slot0
);
5783 case TCG_CALL_RET_BY_REF
:
5784 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datalo_reg
,
5785 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * HOST_BIG_ENDIAN
);
5786 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datahi_reg
,
5787 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * !HOST_BIG_ENDIAN
);
5790 g_assert_not_reached();
5795 g_assert_not_reached();
5798 mov
[0].dst
= ldst
->datalo_reg
;
5800 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, HOST_BIG_ENDIAN
);
5801 mov
[0].dst_type
= TCG_TYPE_REG
;
5802 mov
[0].src_type
= TCG_TYPE_REG
;
5803 mov
[0].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5805 mov
[1].dst
= ldst
->datahi_reg
;
5807 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, !HOST_BIG_ENDIAN
);
5808 mov
[1].dst_type
= TCG_TYPE_REG
;
5809 mov
[1].src_type
= TCG_TYPE_REG
;
5810 mov
[1].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5812 tcg_out_movext2(s
, mov
, mov
+ 1, parm
->ntmp
? parm
->tmp
[0] : -1);
5815 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5816 const TCGLdstHelperParam
*parm
)
5818 const TCGHelperInfo
*info
;
5819 const TCGCallArgumentLoc
*loc
;
5820 TCGMovExtend mov
[4];
5822 unsigned next_arg
, nmov
, n
;
5823 MemOp mop
= get_memop(ldst
->oi
);
5825 switch (mop
& MO_SIZE
) {
5829 info
= &info_helper_st32_mmu
;
5830 data_type
= TCG_TYPE_I32
;
5833 info
= &info_helper_st64_mmu
;
5834 data_type
= TCG_TYPE_I64
;
5837 info
= &info_helper_st128_mmu
;
5838 data_type
= TCG_TYPE_I128
;
5841 g_assert_not_reached();
5844 /* Defer env argument. */
5848 /* Handle addr argument. */
5849 loc
= &info
->in
[next_arg
];
5850 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5852 * 32-bit host with 32-bit guest: zero-extend the guest address
5853 * to 64-bits for the helper by storing the low part. Later,
5854 * after we have processed the register inputs, we will load a
5855 * zero for the high part.
5857 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
5858 TCG_TYPE_I32
, TCG_TYPE_I32
,
5859 ldst
->addrlo_reg
, -1);
5863 n
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
5864 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5869 /* Handle data argument. */
5870 loc
= &info
->in
[next_arg
];
5871 switch (loc
->kind
) {
5872 case TCG_CALL_ARG_NORMAL
:
5873 case TCG_CALL_ARG_EXTEND_U
:
5874 case TCG_CALL_ARG_EXTEND_S
:
5875 n
= tcg_out_helper_add_mov(mov
+ nmov
, loc
, data_type
, ldst
->type
,
5876 ldst
->datalo_reg
, ldst
->datahi_reg
);
5879 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5882 case TCG_CALL_ARG_BY_REF
:
5883 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5884 tcg_debug_assert(data_type
== TCG_TYPE_I128
);
5885 tcg_out_st(s
, TCG_TYPE_I64
,
5886 HOST_BIG_ENDIAN
? ldst
->datahi_reg
: ldst
->datalo_reg
,
5887 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[0].ref_slot
));
5888 tcg_out_st(s
, TCG_TYPE_I64
,
5889 HOST_BIG_ENDIAN
? ldst
->datalo_reg
: ldst
->datahi_reg
,
5890 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[1].ref_slot
));
5892 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5894 if (arg_slot_reg_p(loc
->arg_slot
)) {
5895 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[loc
->arg_slot
],
5897 arg_slot_stk_ofs(loc
->ref_slot
));
5899 tcg_debug_assert(parm
->ntmp
!= 0);
5900 tcg_out_addi_ptr(s
, parm
->tmp
[0], TCG_REG_CALL_STACK
,
5901 arg_slot_stk_ofs(loc
->ref_slot
));
5902 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5903 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
->arg_slot
));
5909 g_assert_not_reached();
5912 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5913 /* Zero extend the address by loading a zero for the high part. */
5914 loc
= &info
->in
[1 + !HOST_BIG_ENDIAN
];
5915 tcg_out_helper_load_imm(s
, loc
->arg_slot
, TCG_TYPE_I32
, 0, parm
);
5918 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5921 void tcg_dump_op_count(GString
*buf
)
5923 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
5926 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
, uint64_t pc_start
)
5928 int i
, start_words
, num_insns
;
5931 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
5932 && qemu_log_in_addr_range(pc_start
))) {
5933 FILE *logfile
= qemu_log_trylock();
5935 fprintf(logfile
, "OP:\n");
5936 tcg_dump_ops(s
, logfile
, false);
5937 fprintf(logfile
, "\n");
5938 qemu_log_unlock(logfile
);
5942 #ifdef CONFIG_DEBUG_TCG
5943 /* Ensure all labels referenced have been emitted. */
5948 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
5949 if (unlikely(!l
->present
) && !QSIMPLEQ_EMPTY(&l
->branches
)) {
5950 qemu_log_mask(CPU_LOG_TB_OP
,
5951 "$L%d referenced but not present.\n", l
->id
);
5961 reachable_code_pass(s
);
5965 if (s
->nb_indirects
> 0) {
5966 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
5967 && qemu_log_in_addr_range(pc_start
))) {
5968 FILE *logfile
= qemu_log_trylock();
5970 fprintf(logfile
, "OP before indirect lowering:\n");
5971 tcg_dump_ops(s
, logfile
, false);
5972 fprintf(logfile
, "\n");
5973 qemu_log_unlock(logfile
);
5977 /* Replace indirect temps with direct temps. */
5978 if (liveness_pass_2(s
)) {
5979 /* If changes were made, re-run liveness. */
5984 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
5985 && qemu_log_in_addr_range(pc_start
))) {
5986 FILE *logfile
= qemu_log_trylock();
5988 fprintf(logfile
, "OP after optimization and liveness analysis:\n");
5989 tcg_dump_ops(s
, logfile
, true);
5990 fprintf(logfile
, "\n");
5991 qemu_log_unlock(logfile
);
5995 /* Initialize goto_tb jump offsets. */
5996 tb
->jmp_reset_offset
[0] = TB_JMP_OFFSET_INVALID
;
5997 tb
->jmp_reset_offset
[1] = TB_JMP_OFFSET_INVALID
;
5998 tb
->jmp_insn_offset
[0] = TB_JMP_OFFSET_INVALID
;
5999 tb
->jmp_insn_offset
[1] = TB_JMP_OFFSET_INVALID
;
6001 tcg_reg_alloc_start(s
);
6004 * Reset the buffer pointers when restarting after overflow.
6005 * TODO: Move this into translate-all.c with the rest of the
6006 * buffer management. Having only this done here is confusing.
6008 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
6009 s
->code_ptr
= s
->code_buf
;
6011 #ifdef TCG_TARGET_NEED_LDST_LABELS
6012 QSIMPLEQ_INIT(&s
->ldst_labels
);
6014 #ifdef TCG_TARGET_NEED_POOL_LABELS
6015 s
->pool_labels
= NULL
;
6018 start_words
= s
->insn_start_words
;
6020 tcg_malloc(sizeof(uint64_t) * s
->gen_tb
->icount
* start_words
);
6022 tcg_out_tb_start(s
);
6025 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
6026 TCGOpcode opc
= op
->opc
;
6029 case INDEX_op_mov_i32
:
6030 case INDEX_op_mov_i64
:
6031 case INDEX_op_mov_vec
:
6032 tcg_reg_alloc_mov(s
, op
);
6034 case INDEX_op_dup_vec
:
6035 tcg_reg_alloc_dup(s
, op
);
6037 case INDEX_op_insn_start
:
6038 if (num_insns
>= 0) {
6039 size_t off
= tcg_current_code_size(s
);
6040 s
->gen_insn_end_off
[num_insns
] = off
;
6041 /* Assert that we do not overflow our stored offset. */
6042 assert(s
->gen_insn_end_off
[num_insns
] == off
);
6045 for (i
= 0; i
< start_words
; ++i
) {
6046 s
->gen_insn_data
[num_insns
* start_words
+ i
] =
6047 tcg_get_insn_start_param(op
, i
);
6050 case INDEX_op_discard
:
6051 temp_dead(s
, arg_temp(op
->args
[0]));
6053 case INDEX_op_set_label
:
6054 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
6055 tcg_out_label(s
, arg_label(op
->args
[0]));
6058 tcg_reg_alloc_call(s
, op
);
6060 case INDEX_op_exit_tb
:
6061 tcg_out_exit_tb(s
, op
->args
[0]);
6063 case INDEX_op_goto_tb
:
6064 tcg_out_goto_tb(s
, op
->args
[0]);
6066 case INDEX_op_dup2_vec
:
6067 if (tcg_reg_alloc_dup2(s
, op
)) {
6072 /* Sanity check that we've not introduced any unhandled opcodes. */
6073 tcg_debug_assert(tcg_op_supported(opc
));
6074 /* Note: in order to speed up the code, it would be much
6075 faster to have specialized register allocator functions for
6076 some common argument patterns */
6077 tcg_reg_alloc_op(s
, op
);
6080 /* Test for (pending) buffer overflow. The assumption is that any
6081 one operation beginning below the high water mark cannot overrun
6082 the buffer completely. Thus we can test for overflow after
6083 generating code without having to check during generation. */
6084 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
6087 /* Test for TB overflow, as seen by gen_insn_end_off. */
6088 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
6092 tcg_debug_assert(num_insns
+ 1 == s
->gen_tb
->icount
);
6093 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
6095 /* Generate TB finalization at the end of block */
6096 #ifdef TCG_TARGET_NEED_LDST_LABELS
6097 i
= tcg_out_ldst_finalize(s
);
6102 #ifdef TCG_TARGET_NEED_POOL_LABELS
6103 i
= tcg_out_pool_finalize(s
);
6108 if (!tcg_resolve_relocs(s
)) {
6112 #ifndef CONFIG_TCG_INTERPRETER
6113 /* flush instruction cache */
6114 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
6115 (uintptr_t)s
->code_buf
,
6116 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
6119 return tcg_current_code_size(s
);
6122 void tcg_dump_info(GString
*buf
)
6124 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
6127 #ifdef ELF_HOST_MACHINE
6128 /* In order to use this feature, the backend needs to do three things:
6130 (1) Define ELF_HOST_MACHINE to indicate both what value to
6131 put into the ELF image and to indicate support for the feature.
6133 (2) Define tcg_register_jit. This should create a buffer containing
6134 the contents of a .debug_frame section that describes the post-
6135 prologue unwind info for the tcg machine.
6137 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6140 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
6147 struct jit_code_entry
{
6148 struct jit_code_entry
*next_entry
;
6149 struct jit_code_entry
*prev_entry
;
6150 const void *symfile_addr
;
6151 uint64_t symfile_size
;
6154 struct jit_descriptor
{
6156 uint32_t action_flag
;
6157 struct jit_code_entry
*relevant_entry
;
6158 struct jit_code_entry
*first_entry
;
6161 void __jit_debug_register_code(void) __attribute__((noinline
));
6162 void __jit_debug_register_code(void)
6167 /* Must statically initialize the version, because GDB may check
6168 the version before we can set it. */
6169 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
6171 /* End GDB interface. */
6173 static int find_string(const char *strtab
, const char *str
)
6175 const char *p
= strtab
+ 1;
6178 if (strcmp(p
, str
) == 0) {
6185 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
6186 const void *debug_frame
,
6187 size_t debug_frame_size
)
6189 struct __attribute__((packed
)) DebugInfo
{
6196 uintptr_t cu_low_pc
;
6197 uintptr_t cu_high_pc
;
6200 uintptr_t fn_low_pc
;
6201 uintptr_t fn_high_pc
;
6210 struct DebugInfo di
;
6215 struct ElfImage
*img
;
6217 static const struct ElfImage img_template
= {
6219 .e_ident
[EI_MAG0
] = ELFMAG0
,
6220 .e_ident
[EI_MAG1
] = ELFMAG1
,
6221 .e_ident
[EI_MAG2
] = ELFMAG2
,
6222 .e_ident
[EI_MAG3
] = ELFMAG3
,
6223 .e_ident
[EI_CLASS
] = ELF_CLASS
,
6224 .e_ident
[EI_DATA
] = ELF_DATA
,
6225 .e_ident
[EI_VERSION
] = EV_CURRENT
,
6227 .e_machine
= ELF_HOST_MACHINE
,
6228 .e_version
= EV_CURRENT
,
6229 .e_phoff
= offsetof(struct ElfImage
, phdr
),
6230 .e_shoff
= offsetof(struct ElfImage
, shdr
),
6231 .e_ehsize
= sizeof(ElfW(Shdr
)),
6232 .e_phentsize
= sizeof(ElfW(Phdr
)),
6234 .e_shentsize
= sizeof(ElfW(Shdr
)),
6235 .e_shnum
= ARRAY_SIZE(img
->shdr
),
6236 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
6237 #ifdef ELF_HOST_FLAGS
6238 .e_flags
= ELF_HOST_FLAGS
,
6241 .e_ident
[EI_OSABI
] = ELF_OSABI
,
6249 [0] = { .sh_type
= SHT_NULL
},
6250 /* Trick: The contents of code_gen_buffer are not present in
6251 this fake ELF file; that got allocated elsewhere. Therefore
6252 we mark .text as SHT_NOBITS (similar to .bss) so that readers
6253 will not look for contents. We can record any address. */
6255 .sh_type
= SHT_NOBITS
,
6256 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
6258 [2] = { /* .debug_info */
6259 .sh_type
= SHT_PROGBITS
,
6260 .sh_offset
= offsetof(struct ElfImage
, di
),
6261 .sh_size
= sizeof(struct DebugInfo
),
6263 [3] = { /* .debug_abbrev */
6264 .sh_type
= SHT_PROGBITS
,
6265 .sh_offset
= offsetof(struct ElfImage
, da
),
6266 .sh_size
= sizeof(img
->da
),
6268 [4] = { /* .debug_frame */
6269 .sh_type
= SHT_PROGBITS
,
6270 .sh_offset
= sizeof(struct ElfImage
),
6272 [5] = { /* .symtab */
6273 .sh_type
= SHT_SYMTAB
,
6274 .sh_offset
= offsetof(struct ElfImage
, sym
),
6275 .sh_size
= sizeof(img
->sym
),
6277 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
6278 .sh_entsize
= sizeof(ElfW(Sym
)),
6280 [6] = { /* .strtab */
6281 .sh_type
= SHT_STRTAB
,
6282 .sh_offset
= offsetof(struct ElfImage
, str
),
6283 .sh_size
= sizeof(img
->str
),
6287 [1] = { /* code_gen_buffer */
6288 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
6293 .len
= sizeof(struct DebugInfo
) - 4,
6295 .ptr_size
= sizeof(void *),
6297 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
6299 .fn_name
= "code_gen_buffer"
6302 1, /* abbrev number (the cu) */
6303 0x11, 1, /* DW_TAG_compile_unit, has children */
6304 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
6305 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6306 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6307 0, 0, /* end of abbrev */
6308 2, /* abbrev number (the fn) */
6309 0x2e, 0, /* DW_TAG_subprogram, no children */
6310 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
6311 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6312 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6313 0, 0, /* end of abbrev */
6314 0 /* no more abbrev */
6316 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6317 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6320 /* We only need a single jit entry; statically allocate it. */
6321 static struct jit_code_entry one_entry
;
6323 uintptr_t buf
= (uintptr_t)buf_ptr
;
6324 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
6325 DebugFrameHeader
*dfh
;
6327 img
= g_malloc(img_size
);
6328 *img
= img_template
;
6330 img
->phdr
.p_vaddr
= buf
;
6331 img
->phdr
.p_paddr
= buf
;
6332 img
->phdr
.p_memsz
= buf_size
;
6334 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
6335 img
->shdr
[1].sh_addr
= buf
;
6336 img
->shdr
[1].sh_size
= buf_size
;
6338 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
6339 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
6341 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
6342 img
->shdr
[4].sh_size
= debug_frame_size
;
6344 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
6345 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
6347 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
6348 img
->sym
[1].st_value
= buf
;
6349 img
->sym
[1].st_size
= buf_size
;
6351 img
->di
.cu_low_pc
= buf
;
6352 img
->di
.cu_high_pc
= buf
+ buf_size
;
6353 img
->di
.fn_low_pc
= buf
;
6354 img
->di
.fn_high_pc
= buf
+ buf_size
;
6356 dfh
= (DebugFrameHeader
*)(img
+ 1);
6357 memcpy(dfh
, debug_frame
, debug_frame_size
);
6358 dfh
->fde
.func_start
= buf
;
6359 dfh
->fde
.func_len
= buf_size
;
6362 /* Enable this block to be able to debug the ELF image file creation.
6363 One can use readelf, objdump, or other inspection utilities. */
6365 g_autofree
char *jit
= g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6366 FILE *f
= fopen(jit
, "w+b");
6368 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
6369 /* Avoid stupid unused return value warning for fwrite. */
6376 one_entry
.symfile_addr
= img
;
6377 one_entry
.symfile_size
= img_size
;
6379 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
6380 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
6381 __jit_debug_descriptor
.first_entry
= &one_entry
;
6382 __jit_debug_register_code();
6385 /* No support for the feature. Provide the entry point expected by exec.c,
6386 and implement the internal function we declared earlier. */
6388 static void tcg_register_jit_int(const void *buf
, size_t size
,
6389 const void *debug_frame
,
6390 size_t debug_frame_size
)
6394 void tcg_register_jit(const void *buf
, size_t buf_size
)
6397 #endif /* ELF_HOST_MACHINE */
6399 #if !TCG_TARGET_MAYBE_vec
6400 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
6402 g_assert_not_reached();