2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38 CPU definitions. Currently they are used for qemu_ld/st
40 #define NO_CPU_IO_DEFS
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
51 # define ELF_CLASS ELFCLASS64
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
56 # define ELF_DATA ELFDATA2LSB
61 #include "sysemu/sysemu.h"
63 /* Forward declarations for functions declared in tcg-target.inc.c and
65 static void tcg_target_init(TCGContext
*s
);
66 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode
);
67 static void tcg_target_qemu_prologue(TCGContext
*s
);
68 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
69 intptr_t value
, intptr_t addend
);
71 /* The CIE and FDE header definitions will be common to all hosts. */
73 uint32_t len
__attribute__((aligned((sizeof(void *)))));
79 uint8_t return_column
;
82 typedef struct QEMU_PACKED
{
83 uint32_t len
__attribute__((aligned((sizeof(void *)))));
87 } DebugFrameFDEHeader
;
89 typedef struct QEMU_PACKED
{
91 DebugFrameFDEHeader fde
;
94 static void tcg_register_jit_int(void *buf
, size_t size
,
95 const void *debug_frame
,
96 size_t debug_frame_size
)
97 __attribute__((unused
));
99 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
100 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
101 const char *ct_str
, TCGType type
);
102 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
104 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
105 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
106 TCGReg ret
, tcg_target_long arg
);
107 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
108 const int *const_args
);
109 #if TCG_TARGET_MAYBE_vec
110 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
111 unsigned vece
, const TCGArg
*args
,
112 const int *const_args
);
114 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
115 unsigned vece
, const TCGArg
*args
,
116 const int *const_args
)
118 g_assert_not_reached();
121 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
123 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
124 TCGReg base
, intptr_t ofs
);
125 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
);
126 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
127 const TCGArgConstraint
*arg_ct
);
128 #ifdef TCG_TARGET_NEED_LDST_LABELS
129 static bool tcg_out_ldst_finalize(TCGContext
*s
);
132 #define TCG_HIGHWATER 1024
134 static TCGContext
**tcg_ctxs
;
135 static unsigned int n_tcg_ctxs
;
136 TCGv_env cpu_env
= 0;
139 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
140 * dynamically allocate from as demand dictates. Given appropriate region
141 * sizing, this minimizes flushes even when some TCG threads generate a lot
142 * more code than others.
144 struct tcg_region_state
{
147 /* fields set at init time */
152 size_t size
; /* size of one region */
153 size_t stride
; /* .size + guard size */
155 /* fields protected by the lock */
156 size_t current
; /* current region index */
157 size_t agg_size_full
; /* aggregate size of full regions */
160 static struct tcg_region_state region
;
161 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
162 static TCGRegSet tcg_target_call_clobber_regs
;
164 #if TCG_TARGET_INSN_UNIT_SIZE == 1
165 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
170 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
177 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
178 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
180 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
183 tcg_insn_unit
*p
= s
->code_ptr
;
184 memcpy(p
, &v
, sizeof(v
));
185 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
189 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
192 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
195 memcpy(p
, &v
, sizeof(v
));
200 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
201 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
203 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
206 tcg_insn_unit
*p
= s
->code_ptr
;
207 memcpy(p
, &v
, sizeof(v
));
208 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
212 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
215 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
218 memcpy(p
, &v
, sizeof(v
));
223 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
224 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
226 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
229 tcg_insn_unit
*p
= s
->code_ptr
;
230 memcpy(p
, &v
, sizeof(v
));
231 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
235 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
238 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
241 memcpy(p
, &v
, sizeof(v
));
246 /* label relocation processing */
248 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
249 TCGLabel
*l
, intptr_t addend
)
254 /* FIXME: This may break relocations on RISC targets that
255 modify instruction fields in place. The caller may not have
256 written the initial value. */
257 patch_reloc(code_ptr
, type
, l
->u
.value
, addend
);
259 /* add a new relocation entry */
260 r
= tcg_malloc(sizeof(TCGRelocation
));
264 r
->next
= l
->u
.first_reloc
;
265 l
->u
.first_reloc
= r
;
269 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
, tcg_insn_unit
*ptr
)
271 intptr_t value
= (intptr_t)ptr
;
274 tcg_debug_assert(!l
->has_value
);
276 for (r
= l
->u
.first_reloc
; r
!= NULL
; r
= r
->next
) {
277 patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
);
281 l
->u
.value_ptr
= ptr
;
284 TCGLabel
*gen_new_label(void)
286 TCGContext
*s
= tcg_ctx
;
287 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
296 #include "tcg-target.inc.c"
298 static void tcg_region_bounds(size_t curr_region
, void **pstart
, void **pend
)
302 start
= region
.start_aligned
+ curr_region
* region
.stride
;
303 end
= start
+ region
.size
;
305 if (curr_region
== 0) {
306 start
= region
.start
;
308 if (curr_region
== region
.n
- 1) {
316 static void tcg_region_assign(TCGContext
*s
, size_t curr_region
)
320 tcg_region_bounds(curr_region
, &start
, &end
);
322 s
->code_gen_buffer
= start
;
323 s
->code_gen_ptr
= start
;
324 s
->code_gen_buffer_size
= end
- start
;
325 s
->code_gen_highwater
= end
- TCG_HIGHWATER
;
328 static bool tcg_region_alloc__locked(TCGContext
*s
)
330 if (region
.current
== region
.n
) {
333 tcg_region_assign(s
, region
.current
);
339 * Request a new region once the one in use has filled up.
340 * Returns true on error.
342 static bool tcg_region_alloc(TCGContext
*s
)
345 /* read the region size now; alloc__locked will overwrite it on success */
346 size_t size_full
= s
->code_gen_buffer_size
;
348 qemu_mutex_lock(®ion
.lock
);
349 err
= tcg_region_alloc__locked(s
);
351 region
.agg_size_full
+= size_full
- TCG_HIGHWATER
;
353 qemu_mutex_unlock(®ion
.lock
);
358 * Perform a context's first region allocation.
359 * This function does _not_ increment region.agg_size_full.
361 static inline bool tcg_region_initial_alloc__locked(TCGContext
*s
)
363 return tcg_region_alloc__locked(s
);
366 /* Call from a safe-work context */
367 void tcg_region_reset_all(void)
369 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
372 qemu_mutex_lock(®ion
.lock
);
374 region
.agg_size_full
= 0;
376 for (i
= 0; i
< n_ctxs
; i
++) {
377 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
378 bool err
= tcg_region_initial_alloc__locked(s
);
382 qemu_mutex_unlock(®ion
.lock
);
385 #ifdef CONFIG_USER_ONLY
386 static size_t tcg_n_regions(void)
392 * It is likely that some vCPUs will translate more code than others, so we
393 * first try to set more regions than max_cpus, with those regions being of
394 * reasonable size. If that's not possible we make do by evenly dividing
395 * the code_gen_buffer among the vCPUs.
397 static size_t tcg_n_regions(void)
401 /* Use a single region if all we have is one vCPU thread */
402 if (max_cpus
== 1 || !qemu_tcg_mttcg_enabled()) {
406 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
407 for (i
= 8; i
> 0; i
--) {
408 size_t regions_per_thread
= i
;
411 region_size
= tcg_init_ctx
.code_gen_buffer_size
;
412 region_size
/= max_cpus
* regions_per_thread
;
414 if (region_size
>= 2 * 1024u * 1024) {
415 return max_cpus
* regions_per_thread
;
418 /* If we can't, then just allocate one region per vCPU thread */
424 * Initializes region partitioning.
426 * Called at init time from the parent thread (i.e. the one calling
427 * tcg_context_init), after the target's TCG globals have been set.
429 * Region partitioning works by splitting code_gen_buffer into separate regions,
430 * and then assigning regions to TCG threads so that the threads can translate
431 * code in parallel without synchronization.
433 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
434 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
435 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
436 * must have been parsed before calling this function, since it calls
437 * qemu_tcg_mttcg_enabled().
439 * In user-mode we use a single region. Having multiple regions in user-mode
440 * is not supported, because the number of vCPU threads (recall that each thread
441 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
442 * OS, and usually this number is huge (tens of thousands is not uncommon).
443 * Thus, given this large bound on the number of vCPU threads and the fact
444 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
445 * that the availability of at least one region per vCPU thread.
447 * However, this user-mode limitation is unlikely to be a significant problem
448 * in practice. Multi-threaded guests share most if not all of their translated
449 * code, which makes parallel code generation less appealing than in softmmu.
451 void tcg_region_init(void)
453 void *buf
= tcg_init_ctx
.code_gen_buffer
;
455 size_t size
= tcg_init_ctx
.code_gen_buffer_size
;
456 size_t page_size
= qemu_real_host_page_size
;
461 n_regions
= tcg_n_regions();
463 /* The first region will be 'aligned - buf' bytes larger than the others */
464 aligned
= QEMU_ALIGN_PTR_UP(buf
, page_size
);
465 g_assert(aligned
< tcg_init_ctx
.code_gen_buffer
+ size
);
467 * Make region_size a multiple of page_size, using aligned as the start.
468 * As a result of this we might end up with a few extra pages at the end of
469 * the buffer; we will assign those to the last region.
471 region_size
= (size
- (aligned
- buf
)) / n_regions
;
472 region_size
= QEMU_ALIGN_DOWN(region_size
, page_size
);
474 /* A region must have at least 2 pages; one code, one guard */
475 g_assert(region_size
>= 2 * page_size
);
477 /* init the region struct */
478 qemu_mutex_init(®ion
.lock
);
479 region
.n
= n_regions
;
480 region
.size
= region_size
- page_size
;
481 region
.stride
= region_size
;
483 region
.start_aligned
= aligned
;
484 /* page-align the end, since its last page will be a guard page */
485 region
.end
= QEMU_ALIGN_PTR_DOWN(buf
+ size
, page_size
);
486 /* account for that last guard page */
487 region
.end
-= page_size
;
489 /* set guard pages */
490 for (i
= 0; i
< region
.n
; i
++) {
494 tcg_region_bounds(i
, &start
, &end
);
495 rc
= qemu_mprotect_none(end
, page_size
);
499 /* In user-mode we support only one ctx, so do the initial allocation now */
500 #ifdef CONFIG_USER_ONLY
502 bool err
= tcg_region_initial_alloc__locked(tcg_ctx
);
510 * All TCG threads except the parent (i.e. the one that called tcg_context_init
511 * and registered the target's TCG globals) must register with this function
512 * before initiating translation.
514 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
515 * of tcg_region_init() for the reasoning behind this.
517 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
518 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
519 * is not used anymore for translation once this function is called.
521 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
522 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
524 #ifdef CONFIG_USER_ONLY
525 void tcg_register_thread(void)
527 tcg_ctx
= &tcg_init_ctx
;
530 void tcg_register_thread(void)
532 TCGContext
*s
= g_malloc(sizeof(*s
));
538 /* Relink mem_base. */
539 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
540 if (tcg_init_ctx
.temps
[i
].mem_base
) {
541 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
542 tcg_debug_assert(b
>= 0 && b
< n
);
543 s
->temps
[i
].mem_base
= &s
->temps
[b
];
547 /* Claim an entry in tcg_ctxs */
548 n
= atomic_fetch_inc(&n_tcg_ctxs
);
549 g_assert(n
< max_cpus
);
550 atomic_set(&tcg_ctxs
[n
], s
);
553 qemu_mutex_lock(®ion
.lock
);
554 err
= tcg_region_initial_alloc__locked(tcg_ctx
);
556 qemu_mutex_unlock(®ion
.lock
);
558 #endif /* !CONFIG_USER_ONLY */
561 * Returns the size (in bytes) of all translated code (i.e. from all regions)
562 * currently in the cache.
563 * See also: tcg_code_capacity()
564 * Do not confuse with tcg_current_code_size(); that one applies to a single
567 size_t tcg_code_size(void)
569 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
573 qemu_mutex_lock(®ion
.lock
);
574 total
= region
.agg_size_full
;
575 for (i
= 0; i
< n_ctxs
; i
++) {
576 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
579 size
= atomic_read(&s
->code_gen_ptr
) - s
->code_gen_buffer
;
580 g_assert(size
<= s
->code_gen_buffer_size
);
583 qemu_mutex_unlock(®ion
.lock
);
588 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
590 * See also: tcg_code_size()
592 size_t tcg_code_capacity(void)
594 size_t guard_size
, capacity
;
596 /* no need for synchronization; these variables are set at init time */
597 guard_size
= region
.stride
- region
.size
;
598 capacity
= region
.end
+ guard_size
- region
.start
;
599 capacity
-= region
.n
* (guard_size
+ TCG_HIGHWATER
);
603 /* pool based memory allocation */
604 void *tcg_malloc_internal(TCGContext
*s
, int size
)
609 if (size
> TCG_POOL_CHUNK_SIZE
) {
610 /* big malloc: insert a new pool (XXX: could optimize) */
611 p
= g_malloc(sizeof(TCGPool
) + size
);
613 p
->next
= s
->pool_first_large
;
614 s
->pool_first_large
= p
;
625 pool_size
= TCG_POOL_CHUNK_SIZE
;
626 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
630 s
->pool_current
->next
= p
;
639 s
->pool_cur
= p
->data
+ size
;
640 s
->pool_end
= p
->data
+ p
->size
;
644 void tcg_pool_reset(TCGContext
*s
)
647 for (p
= s
->pool_first_large
; p
; p
= t
) {
651 s
->pool_first_large
= NULL
;
652 s
->pool_cur
= s
->pool_end
= NULL
;
653 s
->pool_current
= NULL
;
656 typedef struct TCGHelperInfo
{
663 #include "exec/helper-proto.h"
665 static const TCGHelperInfo all_helpers
[] = {
666 #include "exec/helper-tcg.h"
668 static GHashTable
*helper_table
;
670 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
671 static void process_op_defs(TCGContext
*s
);
672 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
673 TCGReg reg
, const char *name
);
675 void tcg_context_init(TCGContext
*s
)
677 int op
, total_args
, n
, i
;
679 TCGArgConstraint
*args_ct
;
683 memset(s
, 0, sizeof(*s
));
686 /* Count total number of arguments and allocate the corresponding
689 for(op
= 0; op
< NB_OPS
; op
++) {
690 def
= &tcg_op_defs
[op
];
691 n
= def
->nb_iargs
+ def
->nb_oargs
;
695 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
696 sorted_args
= g_malloc(sizeof(int) * total_args
);
698 for(op
= 0; op
< NB_OPS
; op
++) {
699 def
= &tcg_op_defs
[op
];
700 def
->args_ct
= args_ct
;
701 def
->sorted_args
= sorted_args
;
702 n
= def
->nb_iargs
+ def
->nb_oargs
;
707 /* Register helpers. */
708 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
709 helper_table
= g_hash_table_new(NULL
, NULL
);
711 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
712 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
713 (gpointer
)&all_helpers
[i
]);
719 /* Reverse the order of the saved registers, assuming they're all at
720 the start of tcg_target_reg_alloc_order. */
721 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
722 int r
= tcg_target_reg_alloc_order
[n
];
723 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
727 for (i
= 0; i
< n
; ++i
) {
728 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
730 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
731 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
736 * In user-mode we simply share the init context among threads, since we
737 * use a single region. See the documentation tcg_region_init() for the
738 * reasoning behind this.
739 * In softmmu we will have at most max_cpus TCG threads.
741 #ifdef CONFIG_USER_ONLY
745 tcg_ctxs
= g_new(TCGContext
*, max_cpus
);
748 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
749 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
750 cpu_env
= temp_tcgv_ptr(ts
);
754 * Allocate TBs right before their corresponding translated code, making
755 * sure that TBs and code are on different cache lines.
757 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
759 uintptr_t align
= qemu_icache_linesize
;
760 TranslationBlock
*tb
;
764 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
765 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
767 if (unlikely(next
> s
->code_gen_highwater
)) {
768 if (tcg_region_alloc(s
)) {
773 atomic_set(&s
->code_gen_ptr
, next
);
774 s
->data_gen_ptr
= NULL
;
778 void tcg_prologue_init(TCGContext
*s
)
780 size_t prologue_size
, total_size
;
783 /* Put the prologue at the beginning of code_gen_buffer. */
784 buf0
= s
->code_gen_buffer
;
785 total_size
= s
->code_gen_buffer_size
;
788 s
->data_gen_ptr
= NULL
;
789 s
->code_gen_prologue
= buf0
;
791 /* Compute a high-water mark, at which we voluntarily flush the buffer
792 and start over. The size here is arbitrary, significantly larger
793 than we expect the code generation for any one opcode to require. */
794 s
->code_gen_highwater
= s
->code_gen_buffer
+ (total_size
- TCG_HIGHWATER
);
796 #ifdef TCG_TARGET_NEED_POOL_LABELS
797 s
->pool_labels
= NULL
;
800 /* Generate the prologue. */
801 tcg_target_qemu_prologue(s
);
803 #ifdef TCG_TARGET_NEED_POOL_LABELS
804 /* Allow the prologue to put e.g. guest_base into a pool entry. */
806 bool ok
= tcg_out_pool_finalize(s
);
807 tcg_debug_assert(ok
);
812 flush_icache_range((uintptr_t)buf0
, (uintptr_t)buf1
);
814 /* Deduct the prologue from the buffer. */
815 prologue_size
= tcg_current_code_size(s
);
816 s
->code_gen_ptr
= buf1
;
817 s
->code_gen_buffer
= buf1
;
819 total_size
-= prologue_size
;
820 s
->code_gen_buffer_size
= total_size
;
822 tcg_register_jit(s
->code_gen_buffer
, total_size
);
825 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
827 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
828 if (s
->data_gen_ptr
) {
829 size_t code_size
= s
->data_gen_ptr
- buf0
;
830 size_t data_size
= prologue_size
- code_size
;
833 log_disas(buf0
, code_size
);
835 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
836 if (sizeof(tcg_target_ulong
) == 8) {
837 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
838 (uintptr_t)s
->data_gen_ptr
+ i
,
839 *(uint64_t *)(s
->data_gen_ptr
+ i
));
841 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
842 (uintptr_t)s
->data_gen_ptr
+ i
,
843 *(uint32_t *)(s
->data_gen_ptr
+ i
));
847 log_disas(buf0
, prologue_size
);
855 /* Assert that goto_ptr is implemented completely. */
856 if (TCG_TARGET_HAS_goto_ptr
) {
857 tcg_debug_assert(s
->code_gen_epilogue
!= NULL
);
861 void tcg_func_start(TCGContext
*s
)
864 s
->nb_temps
= s
->nb_globals
;
866 /* No temps have been previously allocated for size or locality. */
867 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
870 s
->current_frame_offset
= s
->frame_start
;
872 #ifdef CONFIG_DEBUG_TCG
873 s
->goto_tb_issue_mask
= 0;
876 QTAILQ_INIT(&s
->ops
);
877 QTAILQ_INIT(&s
->free_ops
);
880 static inline TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
882 int n
= s
->nb_temps
++;
883 tcg_debug_assert(n
< TCG_MAX_TEMPS
);
884 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
887 static inline TCGTemp
*tcg_global_alloc(TCGContext
*s
)
891 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
893 ts
= tcg_temp_alloc(s
);
899 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
900 TCGReg reg
, const char *name
)
904 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
908 ts
= tcg_global_alloc(s
);
909 ts
->base_type
= type
;
914 tcg_regset_set_reg(s
->reserved_regs
, reg
);
919 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
921 s
->frame_start
= start
;
922 s
->frame_end
= start
+ size
;
924 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
927 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
928 intptr_t offset
, const char *name
)
930 TCGContext
*s
= tcg_ctx
;
931 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
932 TCGTemp
*ts
= tcg_global_alloc(s
);
933 int indirect_reg
= 0, bigendian
= 0;
934 #ifdef HOST_WORDS_BIGENDIAN
938 if (!base_ts
->fixed_reg
) {
939 /* We do not support double-indirect registers. */
940 tcg_debug_assert(!base_ts
->indirect_reg
);
941 base_ts
->indirect_base
= 1;
942 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
947 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
948 TCGTemp
*ts2
= tcg_global_alloc(s
);
951 ts
->base_type
= TCG_TYPE_I64
;
952 ts
->type
= TCG_TYPE_I32
;
953 ts
->indirect_reg
= indirect_reg
;
954 ts
->mem_allocated
= 1;
955 ts
->mem_base
= base_ts
;
956 ts
->mem_offset
= offset
+ bigendian
* 4;
957 pstrcpy(buf
, sizeof(buf
), name
);
958 pstrcat(buf
, sizeof(buf
), "_0");
959 ts
->name
= strdup(buf
);
961 tcg_debug_assert(ts2
== ts
+ 1);
962 ts2
->base_type
= TCG_TYPE_I64
;
963 ts2
->type
= TCG_TYPE_I32
;
964 ts2
->indirect_reg
= indirect_reg
;
965 ts2
->mem_allocated
= 1;
966 ts2
->mem_base
= base_ts
;
967 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
968 pstrcpy(buf
, sizeof(buf
), name
);
969 pstrcat(buf
, sizeof(buf
), "_1");
970 ts2
->name
= strdup(buf
);
972 ts
->base_type
= type
;
974 ts
->indirect_reg
= indirect_reg
;
975 ts
->mem_allocated
= 1;
976 ts
->mem_base
= base_ts
;
977 ts
->mem_offset
= offset
;
983 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
985 TCGContext
*s
= tcg_ctx
;
989 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
990 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
991 if (idx
< TCG_MAX_TEMPS
) {
992 /* There is already an available temp with the right type. */
993 clear_bit(idx
, s
->free_temps
[k
].l
);
996 ts
->temp_allocated
= 1;
997 tcg_debug_assert(ts
->base_type
== type
);
998 tcg_debug_assert(ts
->temp_local
== temp_local
);
1000 ts
= tcg_temp_alloc(s
);
1001 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1002 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1004 ts
->base_type
= type
;
1005 ts
->type
= TCG_TYPE_I32
;
1006 ts
->temp_allocated
= 1;
1007 ts
->temp_local
= temp_local
;
1009 tcg_debug_assert(ts2
== ts
+ 1);
1010 ts2
->base_type
= TCG_TYPE_I64
;
1011 ts2
->type
= TCG_TYPE_I32
;
1012 ts2
->temp_allocated
= 1;
1013 ts2
->temp_local
= temp_local
;
1015 ts
->base_type
= type
;
1017 ts
->temp_allocated
= 1;
1018 ts
->temp_local
= temp_local
;
1022 #if defined(CONFIG_DEBUG_TCG)
1028 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1032 #ifdef CONFIG_DEBUG_TCG
1035 assert(TCG_TARGET_HAS_v64
);
1038 assert(TCG_TARGET_HAS_v128
);
1041 assert(TCG_TARGET_HAS_v256
);
1044 g_assert_not_reached();
1048 t
= tcg_temp_new_internal(type
, 0);
1049 return temp_tcgv_vec(t
);
1052 /* Create a new temp of the same type as an existing temp. */
1053 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1055 TCGTemp
*t
= tcgv_vec_temp(match
);
1057 tcg_debug_assert(t
->temp_allocated
!= 0);
1059 t
= tcg_temp_new_internal(t
->base_type
, 0);
1060 return temp_tcgv_vec(t
);
1063 void tcg_temp_free_internal(TCGTemp
*ts
)
1065 TCGContext
*s
= tcg_ctx
;
1068 #if defined(CONFIG_DEBUG_TCG)
1070 if (s
->temps_in_use
< 0) {
1071 fprintf(stderr
, "More temporaries freed than allocated!\n");
1075 tcg_debug_assert(ts
->temp_global
== 0);
1076 tcg_debug_assert(ts
->temp_allocated
!= 0);
1077 ts
->temp_allocated
= 0;
1080 k
= ts
->base_type
+ (ts
->temp_local
? TCG_TYPE_COUNT
: 0);
1081 set_bit(idx
, s
->free_temps
[k
].l
);
1084 TCGv_i32
tcg_const_i32(int32_t val
)
1087 t0
= tcg_temp_new_i32();
1088 tcg_gen_movi_i32(t0
, val
);
1092 TCGv_i64
tcg_const_i64(int64_t val
)
1095 t0
= tcg_temp_new_i64();
1096 tcg_gen_movi_i64(t0
, val
);
1100 TCGv_i32
tcg_const_local_i32(int32_t val
)
1103 t0
= tcg_temp_local_new_i32();
1104 tcg_gen_movi_i32(t0
, val
);
1108 TCGv_i64
tcg_const_local_i64(int64_t val
)
1111 t0
= tcg_temp_local_new_i64();
1112 tcg_gen_movi_i64(t0
, val
);
1116 #if defined(CONFIG_DEBUG_TCG)
1117 void tcg_clear_temp_count(void)
1119 TCGContext
*s
= tcg_ctx
;
1120 s
->temps_in_use
= 0;
1123 int tcg_check_temp_count(void)
1125 TCGContext
*s
= tcg_ctx
;
1126 if (s
->temps_in_use
) {
1127 /* Clear the count so that we don't give another
1128 * warning immediately next time around.
1130 s
->temps_in_use
= 0;
1137 /* Return true if OP may appear in the opcode stream.
1138 Test the runtime variable that controls each opcode. */
1139 bool tcg_op_supported(TCGOpcode op
)
1142 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1145 case INDEX_op_discard
:
1146 case INDEX_op_set_label
:
1150 case INDEX_op_insn_start
:
1151 case INDEX_op_exit_tb
:
1152 case INDEX_op_goto_tb
:
1153 case INDEX_op_qemu_ld_i32
:
1154 case INDEX_op_qemu_st_i32
:
1155 case INDEX_op_qemu_ld_i64
:
1156 case INDEX_op_qemu_st_i64
:
1159 case INDEX_op_goto_ptr
:
1160 return TCG_TARGET_HAS_goto_ptr
;
1162 case INDEX_op_mov_i32
:
1163 case INDEX_op_movi_i32
:
1164 case INDEX_op_setcond_i32
:
1165 case INDEX_op_brcond_i32
:
1166 case INDEX_op_ld8u_i32
:
1167 case INDEX_op_ld8s_i32
:
1168 case INDEX_op_ld16u_i32
:
1169 case INDEX_op_ld16s_i32
:
1170 case INDEX_op_ld_i32
:
1171 case INDEX_op_st8_i32
:
1172 case INDEX_op_st16_i32
:
1173 case INDEX_op_st_i32
:
1174 case INDEX_op_add_i32
:
1175 case INDEX_op_sub_i32
:
1176 case INDEX_op_mul_i32
:
1177 case INDEX_op_and_i32
:
1178 case INDEX_op_or_i32
:
1179 case INDEX_op_xor_i32
:
1180 case INDEX_op_shl_i32
:
1181 case INDEX_op_shr_i32
:
1182 case INDEX_op_sar_i32
:
1185 case INDEX_op_movcond_i32
:
1186 return TCG_TARGET_HAS_movcond_i32
;
1187 case INDEX_op_div_i32
:
1188 case INDEX_op_divu_i32
:
1189 return TCG_TARGET_HAS_div_i32
;
1190 case INDEX_op_rem_i32
:
1191 case INDEX_op_remu_i32
:
1192 return TCG_TARGET_HAS_rem_i32
;
1193 case INDEX_op_div2_i32
:
1194 case INDEX_op_divu2_i32
:
1195 return TCG_TARGET_HAS_div2_i32
;
1196 case INDEX_op_rotl_i32
:
1197 case INDEX_op_rotr_i32
:
1198 return TCG_TARGET_HAS_rot_i32
;
1199 case INDEX_op_deposit_i32
:
1200 return TCG_TARGET_HAS_deposit_i32
;
1201 case INDEX_op_extract_i32
:
1202 return TCG_TARGET_HAS_extract_i32
;
1203 case INDEX_op_sextract_i32
:
1204 return TCG_TARGET_HAS_sextract_i32
;
1205 case INDEX_op_add2_i32
:
1206 return TCG_TARGET_HAS_add2_i32
;
1207 case INDEX_op_sub2_i32
:
1208 return TCG_TARGET_HAS_sub2_i32
;
1209 case INDEX_op_mulu2_i32
:
1210 return TCG_TARGET_HAS_mulu2_i32
;
1211 case INDEX_op_muls2_i32
:
1212 return TCG_TARGET_HAS_muls2_i32
;
1213 case INDEX_op_muluh_i32
:
1214 return TCG_TARGET_HAS_muluh_i32
;
1215 case INDEX_op_mulsh_i32
:
1216 return TCG_TARGET_HAS_mulsh_i32
;
1217 case INDEX_op_ext8s_i32
:
1218 return TCG_TARGET_HAS_ext8s_i32
;
1219 case INDEX_op_ext16s_i32
:
1220 return TCG_TARGET_HAS_ext16s_i32
;
1221 case INDEX_op_ext8u_i32
:
1222 return TCG_TARGET_HAS_ext8u_i32
;
1223 case INDEX_op_ext16u_i32
:
1224 return TCG_TARGET_HAS_ext16u_i32
;
1225 case INDEX_op_bswap16_i32
:
1226 return TCG_TARGET_HAS_bswap16_i32
;
1227 case INDEX_op_bswap32_i32
:
1228 return TCG_TARGET_HAS_bswap32_i32
;
1229 case INDEX_op_not_i32
:
1230 return TCG_TARGET_HAS_not_i32
;
1231 case INDEX_op_neg_i32
:
1232 return TCG_TARGET_HAS_neg_i32
;
1233 case INDEX_op_andc_i32
:
1234 return TCG_TARGET_HAS_andc_i32
;
1235 case INDEX_op_orc_i32
:
1236 return TCG_TARGET_HAS_orc_i32
;
1237 case INDEX_op_eqv_i32
:
1238 return TCG_TARGET_HAS_eqv_i32
;
1239 case INDEX_op_nand_i32
:
1240 return TCG_TARGET_HAS_nand_i32
;
1241 case INDEX_op_nor_i32
:
1242 return TCG_TARGET_HAS_nor_i32
;
1243 case INDEX_op_clz_i32
:
1244 return TCG_TARGET_HAS_clz_i32
;
1245 case INDEX_op_ctz_i32
:
1246 return TCG_TARGET_HAS_ctz_i32
;
1247 case INDEX_op_ctpop_i32
:
1248 return TCG_TARGET_HAS_ctpop_i32
;
1250 case INDEX_op_brcond2_i32
:
1251 case INDEX_op_setcond2_i32
:
1252 return TCG_TARGET_REG_BITS
== 32;
1254 case INDEX_op_mov_i64
:
1255 case INDEX_op_movi_i64
:
1256 case INDEX_op_setcond_i64
:
1257 case INDEX_op_brcond_i64
:
1258 case INDEX_op_ld8u_i64
:
1259 case INDEX_op_ld8s_i64
:
1260 case INDEX_op_ld16u_i64
:
1261 case INDEX_op_ld16s_i64
:
1262 case INDEX_op_ld32u_i64
:
1263 case INDEX_op_ld32s_i64
:
1264 case INDEX_op_ld_i64
:
1265 case INDEX_op_st8_i64
:
1266 case INDEX_op_st16_i64
:
1267 case INDEX_op_st32_i64
:
1268 case INDEX_op_st_i64
:
1269 case INDEX_op_add_i64
:
1270 case INDEX_op_sub_i64
:
1271 case INDEX_op_mul_i64
:
1272 case INDEX_op_and_i64
:
1273 case INDEX_op_or_i64
:
1274 case INDEX_op_xor_i64
:
1275 case INDEX_op_shl_i64
:
1276 case INDEX_op_shr_i64
:
1277 case INDEX_op_sar_i64
:
1278 case INDEX_op_ext_i32_i64
:
1279 case INDEX_op_extu_i32_i64
:
1280 return TCG_TARGET_REG_BITS
== 64;
1282 case INDEX_op_movcond_i64
:
1283 return TCG_TARGET_HAS_movcond_i64
;
1284 case INDEX_op_div_i64
:
1285 case INDEX_op_divu_i64
:
1286 return TCG_TARGET_HAS_div_i64
;
1287 case INDEX_op_rem_i64
:
1288 case INDEX_op_remu_i64
:
1289 return TCG_TARGET_HAS_rem_i64
;
1290 case INDEX_op_div2_i64
:
1291 case INDEX_op_divu2_i64
:
1292 return TCG_TARGET_HAS_div2_i64
;
1293 case INDEX_op_rotl_i64
:
1294 case INDEX_op_rotr_i64
:
1295 return TCG_TARGET_HAS_rot_i64
;
1296 case INDEX_op_deposit_i64
:
1297 return TCG_TARGET_HAS_deposit_i64
;
1298 case INDEX_op_extract_i64
:
1299 return TCG_TARGET_HAS_extract_i64
;
1300 case INDEX_op_sextract_i64
:
1301 return TCG_TARGET_HAS_sextract_i64
;
1302 case INDEX_op_extrl_i64_i32
:
1303 return TCG_TARGET_HAS_extrl_i64_i32
;
1304 case INDEX_op_extrh_i64_i32
:
1305 return TCG_TARGET_HAS_extrh_i64_i32
;
1306 case INDEX_op_ext8s_i64
:
1307 return TCG_TARGET_HAS_ext8s_i64
;
1308 case INDEX_op_ext16s_i64
:
1309 return TCG_TARGET_HAS_ext16s_i64
;
1310 case INDEX_op_ext32s_i64
:
1311 return TCG_TARGET_HAS_ext32s_i64
;
1312 case INDEX_op_ext8u_i64
:
1313 return TCG_TARGET_HAS_ext8u_i64
;
1314 case INDEX_op_ext16u_i64
:
1315 return TCG_TARGET_HAS_ext16u_i64
;
1316 case INDEX_op_ext32u_i64
:
1317 return TCG_TARGET_HAS_ext32u_i64
;
1318 case INDEX_op_bswap16_i64
:
1319 return TCG_TARGET_HAS_bswap16_i64
;
1320 case INDEX_op_bswap32_i64
:
1321 return TCG_TARGET_HAS_bswap32_i64
;
1322 case INDEX_op_bswap64_i64
:
1323 return TCG_TARGET_HAS_bswap64_i64
;
1324 case INDEX_op_not_i64
:
1325 return TCG_TARGET_HAS_not_i64
;
1326 case INDEX_op_neg_i64
:
1327 return TCG_TARGET_HAS_neg_i64
;
1328 case INDEX_op_andc_i64
:
1329 return TCG_TARGET_HAS_andc_i64
;
1330 case INDEX_op_orc_i64
:
1331 return TCG_TARGET_HAS_orc_i64
;
1332 case INDEX_op_eqv_i64
:
1333 return TCG_TARGET_HAS_eqv_i64
;
1334 case INDEX_op_nand_i64
:
1335 return TCG_TARGET_HAS_nand_i64
;
1336 case INDEX_op_nor_i64
:
1337 return TCG_TARGET_HAS_nor_i64
;
1338 case INDEX_op_clz_i64
:
1339 return TCG_TARGET_HAS_clz_i64
;
1340 case INDEX_op_ctz_i64
:
1341 return TCG_TARGET_HAS_ctz_i64
;
1342 case INDEX_op_ctpop_i64
:
1343 return TCG_TARGET_HAS_ctpop_i64
;
1344 case INDEX_op_add2_i64
:
1345 return TCG_TARGET_HAS_add2_i64
;
1346 case INDEX_op_sub2_i64
:
1347 return TCG_TARGET_HAS_sub2_i64
;
1348 case INDEX_op_mulu2_i64
:
1349 return TCG_TARGET_HAS_mulu2_i64
;
1350 case INDEX_op_muls2_i64
:
1351 return TCG_TARGET_HAS_muls2_i64
;
1352 case INDEX_op_muluh_i64
:
1353 return TCG_TARGET_HAS_muluh_i64
;
1354 case INDEX_op_mulsh_i64
:
1355 return TCG_TARGET_HAS_mulsh_i64
;
1357 case INDEX_op_mov_vec
:
1358 case INDEX_op_dup_vec
:
1359 case INDEX_op_dupi_vec
:
1360 case INDEX_op_ld_vec
:
1361 case INDEX_op_st_vec
:
1362 case INDEX_op_add_vec
:
1363 case INDEX_op_sub_vec
:
1364 case INDEX_op_and_vec
:
1365 case INDEX_op_or_vec
:
1366 case INDEX_op_xor_vec
:
1367 case INDEX_op_cmp_vec
:
1369 case INDEX_op_dup2_vec
:
1370 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1371 case INDEX_op_not_vec
:
1372 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1373 case INDEX_op_neg_vec
:
1374 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1375 case INDEX_op_andc_vec
:
1376 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1377 case INDEX_op_orc_vec
:
1378 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1379 case INDEX_op_mul_vec
:
1380 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1381 case INDEX_op_shli_vec
:
1382 case INDEX_op_shri_vec
:
1383 case INDEX_op_sari_vec
:
1384 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1385 case INDEX_op_shls_vec
:
1386 case INDEX_op_shrs_vec
:
1387 case INDEX_op_sars_vec
:
1388 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1389 case INDEX_op_shlv_vec
:
1390 case INDEX_op_shrv_vec
:
1391 case INDEX_op_sarv_vec
:
1392 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1395 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1400 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1401 and endian swap. Maybe it would be better to do the alignment
1402 and endian swap in tcg_reg_alloc_call(). */
1403 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1405 int i
, real_args
, nb_rets
, pi
;
1406 unsigned sizemask
, flags
;
1407 TCGHelperInfo
*info
;
1410 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1411 flags
= info
->flags
;
1412 sizemask
= info
->sizemask
;
1414 #if defined(__sparc__) && !defined(__arch64__) \
1415 && !defined(CONFIG_TCG_INTERPRETER)
1416 /* We have 64-bit values in one register, but need to pass as two
1417 separate parameters. Split them. */
1418 int orig_sizemask
= sizemask
;
1419 int orig_nargs
= nargs
;
1420 TCGv_i64 retl
, reth
;
1421 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1425 if (sizemask
!= 0) {
1426 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1427 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1429 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1430 TCGv_i32 h
= tcg_temp_new_i32();
1431 TCGv_i32 l
= tcg_temp_new_i32();
1432 tcg_gen_extr_i64_i32(l
, h
, orig
);
1433 split_args
[real_args
++] = tcgv_i32_temp(h
);
1434 split_args
[real_args
++] = tcgv_i32_temp(l
);
1436 split_args
[real_args
++] = args
[i
];
1443 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1444 for (i
= 0; i
< nargs
; ++i
) {
1445 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1446 int is_signed
= sizemask
& (2 << (i
+1)*2);
1448 TCGv_i64 temp
= tcg_temp_new_i64();
1449 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1451 tcg_gen_ext32s_i64(temp
, orig
);
1453 tcg_gen_ext32u_i64(temp
, orig
);
1455 args
[i
] = tcgv_i64_temp(temp
);
1458 #endif /* TCG_TARGET_EXTEND_ARGS */
1460 op
= tcg_emit_op(INDEX_op_call
);
1464 #if defined(__sparc__) && !defined(__arch64__) \
1465 && !defined(CONFIG_TCG_INTERPRETER)
1466 if (orig_sizemask
& 1) {
1467 /* The 32-bit ABI is going to return the 64-bit value in
1468 the %o0/%o1 register pair. Prepare for this by using
1469 two return temporaries, and reassemble below. */
1470 retl
= tcg_temp_new_i64();
1471 reth
= tcg_temp_new_i64();
1472 op
->args
[pi
++] = tcgv_i64_arg(reth
);
1473 op
->args
[pi
++] = tcgv_i64_arg(retl
);
1476 op
->args
[pi
++] = temp_arg(ret
);
1480 if (TCG_TARGET_REG_BITS
< 64 && (sizemask
& 1)) {
1481 #ifdef HOST_WORDS_BIGENDIAN
1482 op
->args
[pi
++] = temp_arg(ret
+ 1);
1483 op
->args
[pi
++] = temp_arg(ret
);
1485 op
->args
[pi
++] = temp_arg(ret
);
1486 op
->args
[pi
++] = temp_arg(ret
+ 1);
1490 op
->args
[pi
++] = temp_arg(ret
);
1497 TCGOP_CALLO(op
) = nb_rets
;
1500 for (i
= 0; i
< nargs
; i
++) {
1501 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1502 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
1503 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1504 /* some targets want aligned 64 bit args */
1505 if (real_args
& 1) {
1506 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
1510 /* If stack grows up, then we will be placing successive
1511 arguments at lower addresses, which means we need to
1512 reverse the order compared to how we would normally
1513 treat either big or little-endian. For those arguments
1514 that will wind up in registers, this still works for
1515 HPPA (the only current STACK_GROWSUP target) since the
1516 argument registers are *also* allocated in decreasing
1517 order. If another such target is added, this logic may
1518 have to get more complicated to differentiate between
1519 stack arguments and register arguments. */
1520 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1521 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1522 op
->args
[pi
++] = temp_arg(args
[i
]);
1524 op
->args
[pi
++] = temp_arg(args
[i
]);
1525 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1531 op
->args
[pi
++] = temp_arg(args
[i
]);
1534 op
->args
[pi
++] = (uintptr_t)func
;
1535 op
->args
[pi
++] = flags
;
1536 TCGOP_CALLI(op
) = real_args
;
1538 /* Make sure the fields didn't overflow. */
1539 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
1540 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
1542 #if defined(__sparc__) && !defined(__arch64__) \
1543 && !defined(CONFIG_TCG_INTERPRETER)
1544 /* Free all of the parts we allocated above. */
1545 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
1546 int is_64bit
= orig_sizemask
& (1 << (i
+1)*2);
1548 tcg_temp_free_internal(args
[real_args
++]);
1549 tcg_temp_free_internal(args
[real_args
++]);
1554 if (orig_sizemask
& 1) {
1555 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1556 Note that describing these as TCGv_i64 eliminates an unnecessary
1557 zero-extension that tcg_gen_concat_i32_i64 would create. */
1558 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
1559 tcg_temp_free_i64(retl
);
1560 tcg_temp_free_i64(reth
);
1562 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1563 for (i
= 0; i
< nargs
; ++i
) {
1564 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1566 tcg_temp_free_internal(args
[i
]);
1569 #endif /* TCG_TARGET_EXTEND_ARGS */
1572 static void tcg_reg_alloc_start(TCGContext
*s
)
1577 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
1579 ts
->val_type
= (ts
->fixed_reg
? TEMP_VAL_REG
: TEMP_VAL_MEM
);
1581 for (n
= s
->nb_temps
; i
< n
; i
++) {
1583 ts
->val_type
= (ts
->temp_local
? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
1584 ts
->mem_allocated
= 0;
1588 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1591 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1594 int idx
= temp_idx(ts
);
1596 if (ts
->temp_global
) {
1597 pstrcpy(buf
, buf_size
, ts
->name
);
1598 } else if (ts
->temp_local
) {
1599 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1601 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1606 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1607 int buf_size
, TCGArg arg
)
1609 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1612 /* Find helper name. */
1613 static inline const char *tcg_find_helper(TCGContext
*s
, uintptr_t val
)
1615 const char *ret
= NULL
;
1617 TCGHelperInfo
*info
= g_hash_table_lookup(helper_table
, (gpointer
)val
);
1625 static const char * const cond_name
[] =
1627 [TCG_COND_NEVER
] = "never",
1628 [TCG_COND_ALWAYS
] = "always",
1629 [TCG_COND_EQ
] = "eq",
1630 [TCG_COND_NE
] = "ne",
1631 [TCG_COND_LT
] = "lt",
1632 [TCG_COND_GE
] = "ge",
1633 [TCG_COND_LE
] = "le",
1634 [TCG_COND_GT
] = "gt",
1635 [TCG_COND_LTU
] = "ltu",
1636 [TCG_COND_GEU
] = "geu",
1637 [TCG_COND_LEU
] = "leu",
1638 [TCG_COND_GTU
] = "gtu"
1641 static const char * const ldst_name
[] =
1657 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1659 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1660 [MO_ALIGN
>> MO_ASHIFT
] = "",
1662 [MO_UNALN
>> MO_ASHIFT
] = "",
1663 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1665 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1666 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1667 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1668 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1669 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1670 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1673 void tcg_dump_ops(TCGContext
*s
)
1678 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1679 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1680 const TCGOpDef
*def
;
1685 def
= &tcg_op_defs
[c
];
1687 if (c
== INDEX_op_insn_start
) {
1688 col
+= qemu_log("\n ----");
1690 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1692 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1693 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1697 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
1699 } else if (c
== INDEX_op_call
) {
1700 /* variable number of arguments */
1701 nb_oargs
= TCGOP_CALLO(op
);
1702 nb_iargs
= TCGOP_CALLI(op
);
1703 nb_cargs
= def
->nb_cargs
;
1705 /* function name, flags, out args */
1706 col
+= qemu_log(" %s %s,$0x%" TCG_PRIlx
",$%d", def
->name
,
1707 tcg_find_helper(s
, op
->args
[nb_oargs
+ nb_iargs
]),
1708 op
->args
[nb_oargs
+ nb_iargs
+ 1], nb_oargs
);
1709 for (i
= 0; i
< nb_oargs
; i
++) {
1710 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1713 for (i
= 0; i
< nb_iargs
; i
++) {
1714 TCGArg arg
= op
->args
[nb_oargs
+ i
];
1715 const char *t
= "<dummy>";
1716 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1717 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
1719 col
+= qemu_log(",%s", t
);
1722 col
+= qemu_log(" %s ", def
->name
);
1724 nb_oargs
= def
->nb_oargs
;
1725 nb_iargs
= def
->nb_iargs
;
1726 nb_cargs
= def
->nb_cargs
;
1728 if (def
->flags
& TCG_OPF_VECTOR
) {
1729 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
1730 8 << TCGOP_VECE(op
));
1734 for (i
= 0; i
< nb_oargs
; i
++) {
1736 col
+= qemu_log(",");
1738 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1741 for (i
= 0; i
< nb_iargs
; i
++) {
1743 col
+= qemu_log(",");
1745 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1749 case INDEX_op_brcond_i32
:
1750 case INDEX_op_setcond_i32
:
1751 case INDEX_op_movcond_i32
:
1752 case INDEX_op_brcond2_i32
:
1753 case INDEX_op_setcond2_i32
:
1754 case INDEX_op_brcond_i64
:
1755 case INDEX_op_setcond_i64
:
1756 case INDEX_op_movcond_i64
:
1757 case INDEX_op_cmp_vec
:
1758 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
1759 && cond_name
[op
->args
[k
]]) {
1760 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
1762 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
1766 case INDEX_op_qemu_ld_i32
:
1767 case INDEX_op_qemu_st_i32
:
1768 case INDEX_op_qemu_ld_i64
:
1769 case INDEX_op_qemu_st_i64
:
1771 TCGMemOpIdx oi
= op
->args
[k
++];
1772 TCGMemOp op
= get_memop(oi
);
1773 unsigned ix
= get_mmuidx(oi
);
1775 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
1776 col
+= qemu_log(",$0x%x,%u", op
, ix
);
1778 const char *s_al
, *s_op
;
1779 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
1780 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
1781 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
1791 case INDEX_op_set_label
:
1793 case INDEX_op_brcond_i32
:
1794 case INDEX_op_brcond_i64
:
1795 case INDEX_op_brcond2_i32
:
1796 col
+= qemu_log("%s$L%d", k
? "," : "",
1797 arg_label(op
->args
[k
])->id
);
1803 for (; i
< nb_cargs
; i
++, k
++) {
1804 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
1808 unsigned life
= op
->life
;
1810 for (; col
< 48; ++col
) {
1811 putc(' ', qemu_logfile
);
1814 if (life
& (SYNC_ARG
* 3)) {
1816 for (i
= 0; i
< 2; ++i
) {
1817 if (life
& (SYNC_ARG
<< i
)) {
1825 for (i
= 0; life
; ++i
, life
>>= 1) {
1836 /* we give more priority to constraints with less registers */
1837 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
1839 const TCGArgConstraint
*arg_ct
;
1842 arg_ct
= &def
->args_ct
[k
];
1843 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1844 /* an alias is equivalent to a single register */
1847 if (!(arg_ct
->ct
& TCG_CT_REG
))
1850 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1851 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
1855 return TCG_TARGET_NB_REGS
- n
+ 1;
1858 /* sort from highest priority to lowest */
1859 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
1861 int i
, j
, p1
, p2
, tmp
;
1863 for(i
= 0; i
< n
; i
++)
1864 def
->sorted_args
[start
+ i
] = start
+ i
;
1867 for(i
= 0; i
< n
- 1; i
++) {
1868 for(j
= i
+ 1; j
< n
; j
++) {
1869 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
1870 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
1872 tmp
= def
->sorted_args
[start
+ i
];
1873 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
1874 def
->sorted_args
[start
+ j
] = tmp
;
1880 static void process_op_defs(TCGContext
*s
)
1884 for (op
= 0; op
< NB_OPS
; op
++) {
1885 TCGOpDef
*def
= &tcg_op_defs
[op
];
1886 const TCGTargetOpDef
*tdefs
;
1890 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
1894 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
1899 tdefs
= tcg_target_op_def(op
);
1900 /* Missing TCGTargetOpDef entry. */
1901 tcg_debug_assert(tdefs
!= NULL
);
1903 type
= (def
->flags
& TCG_OPF_64BIT
? TCG_TYPE_I64
: TCG_TYPE_I32
);
1904 for (i
= 0; i
< nb_args
; i
++) {
1905 const char *ct_str
= tdefs
->args_ct_str
[i
];
1906 /* Incomplete TCGTargetOpDef entry. */
1907 tcg_debug_assert(ct_str
!= NULL
);
1909 def
->args_ct
[i
].u
.regs
= 0;
1910 def
->args_ct
[i
].ct
= 0;
1911 while (*ct_str
!= '\0') {
1915 int oarg
= *ct_str
- '0';
1916 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
1917 tcg_debug_assert(oarg
< def
->nb_oargs
);
1918 tcg_debug_assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
1919 /* TCG_CT_ALIAS is for the output arguments.
1920 The input is tagged with TCG_CT_IALIAS. */
1921 def
->args_ct
[i
] = def
->args_ct
[oarg
];
1922 def
->args_ct
[oarg
].ct
|= TCG_CT_ALIAS
;
1923 def
->args_ct
[oarg
].alias_index
= i
;
1924 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
1925 def
->args_ct
[i
].alias_index
= oarg
;
1930 def
->args_ct
[i
].ct
|= TCG_CT_NEWREG
;
1934 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
1938 ct_str
= target_parse_constraint(&def
->args_ct
[i
],
1940 /* Typo in TCGTargetOpDef constraint. */
1941 tcg_debug_assert(ct_str
!= NULL
);
1946 /* TCGTargetOpDef entry with too much information? */
1947 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
1949 /* sort the constraints (XXX: this is just an heuristic) */
1950 sort_constraints(def
, 0, def
->nb_oargs
);
1951 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
1955 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
1957 QTAILQ_REMOVE(&s
->ops
, op
, link
);
1958 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
1960 #ifdef CONFIG_PROFILER
1961 atomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
1965 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
1967 TCGContext
*s
= tcg_ctx
;
1970 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
1971 op
= tcg_malloc(sizeof(TCGOp
));
1973 op
= QTAILQ_FIRST(&s
->free_ops
);
1974 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
1976 memset(op
, 0, offsetof(TCGOp
, link
));
1982 TCGOp
*tcg_emit_op(TCGOpcode opc
)
1984 TCGOp
*op
= tcg_op_alloc(opc
);
1985 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
1989 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
1990 TCGOpcode opc
, int nargs
)
1992 TCGOp
*new_op
= tcg_op_alloc(opc
);
1993 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
1997 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
1998 TCGOpcode opc
, int nargs
)
2000 TCGOp
*new_op
= tcg_op_alloc(opc
);
2001 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2008 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2009 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2011 /* liveness analysis: end of function: all temps are dead, and globals
2012 should be in memory. */
2013 static void tcg_la_func_end(TCGContext
*s
)
2015 int ng
= s
->nb_globals
;
2016 int nt
= s
->nb_temps
;
2019 for (i
= 0; i
< ng
; ++i
) {
2020 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2022 for (i
= ng
; i
< nt
; ++i
) {
2023 s
->temps
[i
].state
= TS_DEAD
;
2027 /* liveness analysis: end of basic block: all temps are dead, globals
2028 and local temps should be in memory. */
2029 static void tcg_la_bb_end(TCGContext
*s
)
2031 int ng
= s
->nb_globals
;
2032 int nt
= s
->nb_temps
;
2035 for (i
= 0; i
< ng
; ++i
) {
2036 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2038 for (i
= ng
; i
< nt
; ++i
) {
2039 s
->temps
[i
].state
= (s
->temps
[i
].temp_local
2045 /* Liveness analysis : update the opc_arg_life array to tell if a
2046 given input arguments is dead. Instructions updating dead
2047 temporaries are removed. */
2048 static void liveness_pass_1(TCGContext
*s
)
2050 int nb_globals
= s
->nb_globals
;
2051 TCGOp
*op
, *op_prev
;
2055 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, TCGOpHead
, link
, op_prev
) {
2056 int i
, nb_iargs
, nb_oargs
;
2057 TCGOpcode opc_new
, opc_new2
;
2059 TCGLifeData arg_life
= 0;
2061 TCGOpcode opc
= op
->opc
;
2062 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2069 nb_oargs
= TCGOP_CALLO(op
);
2070 nb_iargs
= TCGOP_CALLI(op
);
2071 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2073 /* pure functions can be removed if their result is unused */
2074 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2075 for (i
= 0; i
< nb_oargs
; i
++) {
2076 arg_ts
= arg_temp(op
->args
[i
]);
2077 if (arg_ts
->state
!= TS_DEAD
) {
2078 goto do_not_remove_call
;
2085 /* output args are dead */
2086 for (i
= 0; i
< nb_oargs
; i
++) {
2087 arg_ts
= arg_temp(op
->args
[i
]);
2088 if (arg_ts
->state
& TS_DEAD
) {
2089 arg_life
|= DEAD_ARG
<< i
;
2091 if (arg_ts
->state
& TS_MEM
) {
2092 arg_life
|= SYNC_ARG
<< i
;
2094 arg_ts
->state
= TS_DEAD
;
2097 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2098 TCG_CALL_NO_READ_GLOBALS
))) {
2099 /* globals should go back to memory */
2100 for (i
= 0; i
< nb_globals
; i
++) {
2101 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2103 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2104 /* globals should be synced to memory */
2105 for (i
= 0; i
< nb_globals
; i
++) {
2106 s
->temps
[i
].state
|= TS_MEM
;
2110 /* record arguments that die in this helper */
2111 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2112 arg_ts
= arg_temp(op
->args
[i
]);
2113 if (arg_ts
&& arg_ts
->state
& TS_DEAD
) {
2114 arg_life
|= DEAD_ARG
<< i
;
2117 /* input arguments are live for preceding opcodes */
2118 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2119 arg_ts
= arg_temp(op
->args
[i
]);
2121 arg_ts
->state
&= ~TS_DEAD
;
2127 case INDEX_op_insn_start
:
2129 case INDEX_op_discard
:
2130 /* mark the temporary as dead */
2131 arg_temp(op
->args
[0])->state
= TS_DEAD
;
2134 case INDEX_op_add2_i32
:
2135 opc_new
= INDEX_op_add_i32
;
2137 case INDEX_op_sub2_i32
:
2138 opc_new
= INDEX_op_sub_i32
;
2140 case INDEX_op_add2_i64
:
2141 opc_new
= INDEX_op_add_i64
;
2143 case INDEX_op_sub2_i64
:
2144 opc_new
= INDEX_op_sub_i64
;
2148 /* Test if the high part of the operation is dead, but not
2149 the low part. The result can be optimized to a simple
2150 add or sub. This happens often for x86_64 guest when the
2151 cpu mode is set to 32 bit. */
2152 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2153 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2156 /* Replace the opcode and adjust the args in place,
2157 leaving 3 unused args at the end. */
2158 op
->opc
= opc
= opc_new
;
2159 op
->args
[1] = op
->args
[2];
2160 op
->args
[2] = op
->args
[4];
2161 /* Fall through and mark the single-word operation live. */
2167 case INDEX_op_mulu2_i32
:
2168 opc_new
= INDEX_op_mul_i32
;
2169 opc_new2
= INDEX_op_muluh_i32
;
2170 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2172 case INDEX_op_muls2_i32
:
2173 opc_new
= INDEX_op_mul_i32
;
2174 opc_new2
= INDEX_op_mulsh_i32
;
2175 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2177 case INDEX_op_mulu2_i64
:
2178 opc_new
= INDEX_op_mul_i64
;
2179 opc_new2
= INDEX_op_muluh_i64
;
2180 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2182 case INDEX_op_muls2_i64
:
2183 opc_new
= INDEX_op_mul_i64
;
2184 opc_new2
= INDEX_op_mulsh_i64
;
2185 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2190 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2191 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2192 /* Both parts of the operation are dead. */
2195 /* The high part of the operation is dead; generate the low. */
2196 op
->opc
= opc
= opc_new
;
2197 op
->args
[1] = op
->args
[2];
2198 op
->args
[2] = op
->args
[3];
2199 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2200 /* The low part of the operation is dead; generate the high. */
2201 op
->opc
= opc
= opc_new2
;
2202 op
->args
[0] = op
->args
[1];
2203 op
->args
[1] = op
->args
[2];
2204 op
->args
[2] = op
->args
[3];
2208 /* Mark the single-word operation live. */
2213 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2214 nb_iargs
= def
->nb_iargs
;
2215 nb_oargs
= def
->nb_oargs
;
2217 /* Test if the operation can be removed because all
2218 its outputs are dead. We assume that nb_oargs == 0
2219 implies side effects */
2220 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2221 for (i
= 0; i
< nb_oargs
; i
++) {
2222 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2227 tcg_op_remove(s
, op
);
2230 /* output args are dead */
2231 for (i
= 0; i
< nb_oargs
; i
++) {
2232 arg_ts
= arg_temp(op
->args
[i
]);
2233 if (arg_ts
->state
& TS_DEAD
) {
2234 arg_life
|= DEAD_ARG
<< i
;
2236 if (arg_ts
->state
& TS_MEM
) {
2237 arg_life
|= SYNC_ARG
<< i
;
2239 arg_ts
->state
= TS_DEAD
;
2242 /* if end of basic block, update */
2243 if (def
->flags
& TCG_OPF_BB_END
) {
2245 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2246 /* globals should be synced to memory */
2247 for (i
= 0; i
< nb_globals
; i
++) {
2248 s
->temps
[i
].state
|= TS_MEM
;
2252 /* record arguments that die in this opcode */
2253 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2254 arg_ts
= arg_temp(op
->args
[i
]);
2255 if (arg_ts
->state
& TS_DEAD
) {
2256 arg_life
|= DEAD_ARG
<< i
;
2259 /* input arguments are live for preceding opcodes */
2260 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2261 arg_temp(op
->args
[i
])->state
&= ~TS_DEAD
;
2266 op
->life
= arg_life
;
2270 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2271 static bool liveness_pass_2(TCGContext
*s
)
2273 int nb_globals
= s
->nb_globals
;
2275 bool changes
= false;
2276 TCGOp
*op
, *op_next
;
2278 /* Create a temporary for each indirect global. */
2279 for (i
= 0; i
< nb_globals
; ++i
) {
2280 TCGTemp
*its
= &s
->temps
[i
];
2281 if (its
->indirect_reg
) {
2282 TCGTemp
*dts
= tcg_temp_alloc(s
);
2283 dts
->type
= its
->type
;
2284 dts
->base_type
= its
->base_type
;
2285 its
->state_ptr
= dts
;
2287 its
->state_ptr
= NULL
;
2289 /* All globals begin dead. */
2290 its
->state
= TS_DEAD
;
2292 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
2293 TCGTemp
*its
= &s
->temps
[i
];
2294 its
->state_ptr
= NULL
;
2295 its
->state
= TS_DEAD
;
2298 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2299 TCGOpcode opc
= op
->opc
;
2300 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2301 TCGLifeData arg_life
= op
->life
;
2302 int nb_iargs
, nb_oargs
, call_flags
;
2303 TCGTemp
*arg_ts
, *dir_ts
;
2305 if (opc
== INDEX_op_call
) {
2306 nb_oargs
= TCGOP_CALLO(op
);
2307 nb_iargs
= TCGOP_CALLI(op
);
2308 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2310 nb_iargs
= def
->nb_iargs
;
2311 nb_oargs
= def
->nb_oargs
;
2313 /* Set flags similar to how calls require. */
2314 if (def
->flags
& TCG_OPF_BB_END
) {
2315 /* Like writing globals: save_globals */
2317 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2318 /* Like reading globals: sync_globals */
2319 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2321 /* No effect on globals. */
2322 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
2323 TCG_CALL_NO_WRITE_GLOBALS
);
2327 /* Make sure that input arguments are available. */
2328 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2329 arg_ts
= arg_temp(op
->args
[i
]);
2331 dir_ts
= arg_ts
->state_ptr
;
2332 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
2333 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
2336 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
2338 lop
->args
[0] = temp_arg(dir_ts
);
2339 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2340 lop
->args
[2] = arg_ts
->mem_offset
;
2342 /* Loaded, but synced with memory. */
2343 arg_ts
->state
= TS_MEM
;
2348 /* Perform input replacement, and mark inputs that became dead.
2349 No action is required except keeping temp_state up to date
2350 so that we reload when needed. */
2351 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2352 arg_ts
= arg_temp(op
->args
[i
]);
2354 dir_ts
= arg_ts
->state_ptr
;
2356 op
->args
[i
] = temp_arg(dir_ts
);
2358 if (IS_DEAD_ARG(i
)) {
2359 arg_ts
->state
= TS_DEAD
;
2365 /* Liveness analysis should ensure that the following are
2366 all correct, for call sites and basic block end points. */
2367 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
2369 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
2370 for (i
= 0; i
< nb_globals
; ++i
) {
2371 /* Liveness should see that globals are synced back,
2372 that is, either TS_DEAD or TS_MEM. */
2373 arg_ts
= &s
->temps
[i
];
2374 tcg_debug_assert(arg_ts
->state_ptr
== 0
2375 || arg_ts
->state
!= 0);
2378 for (i
= 0; i
< nb_globals
; ++i
) {
2379 /* Liveness should see that globals are saved back,
2380 that is, TS_DEAD, waiting to be reloaded. */
2381 arg_ts
= &s
->temps
[i
];
2382 tcg_debug_assert(arg_ts
->state_ptr
== 0
2383 || arg_ts
->state
== TS_DEAD
);
2387 /* Outputs become available. */
2388 for (i
= 0; i
< nb_oargs
; i
++) {
2389 arg_ts
= arg_temp(op
->args
[i
]);
2390 dir_ts
= arg_ts
->state_ptr
;
2394 op
->args
[i
] = temp_arg(dir_ts
);
2397 /* The output is now live and modified. */
2400 /* Sync outputs upon their last write. */
2401 if (NEED_SYNC_ARG(i
)) {
2402 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2405 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
2407 sop
->args
[0] = temp_arg(dir_ts
);
2408 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2409 sop
->args
[2] = arg_ts
->mem_offset
;
2411 arg_ts
->state
= TS_MEM
;
2413 /* Drop outputs that are dead. */
2414 if (IS_DEAD_ARG(i
)) {
2415 arg_ts
->state
= TS_DEAD
;
2423 #ifdef CONFIG_DEBUG_TCG
2424 static void dump_regs(TCGContext
*s
)
2430 for(i
= 0; i
< s
->nb_temps
; i
++) {
2432 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2433 switch(ts
->val_type
) {
2435 printf("%s", tcg_target_reg_names
[ts
->reg
]);
2438 printf("%d(%s)", (int)ts
->mem_offset
,
2439 tcg_target_reg_names
[ts
->mem_base
->reg
]);
2441 case TEMP_VAL_CONST
:
2442 printf("$0x%" TCG_PRIlx
, ts
->val
);
2454 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2455 if (s
->reg_to_temp
[i
] != NULL
) {
2457 tcg_target_reg_names
[i
],
2458 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
2463 static void check_regs(TCGContext
*s
)
2470 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
2471 ts
= s
->reg_to_temp
[reg
];
2473 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
2474 printf("Inconsistency for register %s:\n",
2475 tcg_target_reg_names
[reg
]);
2480 for (k
= 0; k
< s
->nb_temps
; k
++) {
2482 if (ts
->val_type
== TEMP_VAL_REG
&& !ts
->fixed_reg
2483 && s
->reg_to_temp
[ts
->reg
] != ts
) {
2484 printf("Inconsistency for temp %s:\n",
2485 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2487 printf("reg state:\n");
2495 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
2497 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2498 /* Sparc64 stack is accessed with offset of 2047 */
2499 s
->current_frame_offset
= (s
->current_frame_offset
+
2500 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
2501 ~(sizeof(tcg_target_long
) - 1);
2503 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
2507 ts
->mem_offset
= s
->current_frame_offset
;
2508 ts
->mem_base
= s
->frame_temp
;
2509 ts
->mem_allocated
= 1;
2510 s
->current_frame_offset
+= sizeof(tcg_target_long
);
2513 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
);
2515 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2516 mark it free; otherwise mark it dead. */
2517 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
2519 if (ts
->fixed_reg
) {
2522 if (ts
->val_type
== TEMP_VAL_REG
) {
2523 s
->reg_to_temp
[ts
->reg
] = NULL
;
2525 ts
->val_type
= (free_or_dead
< 0
2528 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
2531 /* Mark a temporary as dead. */
2532 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
2534 temp_free_or_dead(s
, ts
, 1);
2537 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2538 registers needs to be allocated to store a constant. If 'free_or_dead'
2539 is non-zero, subsequently release the temporary; if it is positive, the
2540 temp is dead; if it is negative, the temp is free. */
2541 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
,
2542 TCGRegSet allocated_regs
, int free_or_dead
)
2544 if (ts
->fixed_reg
) {
2547 if (!ts
->mem_coherent
) {
2548 if (!ts
->mem_allocated
) {
2549 temp_allocate_frame(s
, ts
);
2551 switch (ts
->val_type
) {
2552 case TEMP_VAL_CONST
:
2553 /* If we're going to free the temp immediately, then we won't
2554 require it later in a register, so attempt to store the
2555 constant to memory directly. */
2557 && tcg_out_sti(s
, ts
->type
, ts
->val
,
2558 ts
->mem_base
->reg
, ts
->mem_offset
)) {
2561 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
2566 tcg_out_st(s
, ts
->type
, ts
->reg
,
2567 ts
->mem_base
->reg
, ts
->mem_offset
);
2577 ts
->mem_coherent
= 1;
2580 temp_free_or_dead(s
, ts
, free_or_dead
);
2584 /* free register 'reg' by spilling the corresponding temporary if necessary */
2585 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
2587 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
2589 temp_sync(s
, ts
, allocated_regs
, -1);
2593 /* Allocate a register belonging to reg1 & ~reg2 */
2594 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet desired_regs
,
2595 TCGRegSet allocated_regs
, bool rev
)
2597 int i
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
2602 reg_ct
= desired_regs
& ~allocated_regs
;
2603 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
2605 /* first try free registers */
2606 for(i
= 0; i
< n
; i
++) {
2608 if (tcg_regset_test_reg(reg_ct
, reg
) && s
->reg_to_temp
[reg
] == NULL
)
2612 /* XXX: do better spill choice */
2613 for(i
= 0; i
< n
; i
++) {
2615 if (tcg_regset_test_reg(reg_ct
, reg
)) {
2616 tcg_reg_free(s
, reg
, allocated_regs
);
2624 /* Make sure the temporary is in a register. If needed, allocate the register
2625 from DESIRED while avoiding ALLOCATED. */
2626 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
2627 TCGRegSet allocated_regs
)
2631 switch (ts
->val_type
) {
2634 case TEMP_VAL_CONST
:
2635 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
, ts
->indirect_base
);
2636 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
2637 ts
->mem_coherent
= 0;
2640 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
, ts
->indirect_base
);
2641 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
2642 ts
->mem_coherent
= 1;
2649 ts
->val_type
= TEMP_VAL_REG
;
2650 s
->reg_to_temp
[reg
] = ts
;
2653 /* Save a temporary to memory. 'allocated_regs' is used in case a
2654 temporary registers needs to be allocated to store a constant. */
2655 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
2657 /* The liveness analysis already ensures that globals are back
2658 in memory. Keep an tcg_debug_assert for safety. */
2659 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| ts
->fixed_reg
);
2662 /* save globals to their canonical location and assume they can be
2663 modified be the following code. 'allocated_regs' is used in case a
2664 temporary registers needs to be allocated to store a constant. */
2665 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
2669 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
2670 temp_save(s
, &s
->temps
[i
], allocated_regs
);
2674 /* sync globals to their canonical location and assume they can be
2675 read by the following code. 'allocated_regs' is used in case a
2676 temporary registers needs to be allocated to store a constant. */
2677 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
2681 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
2682 TCGTemp
*ts
= &s
->temps
[i
];
2683 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
2685 || ts
->mem_coherent
);
2689 /* at the end of a basic block, we assume all temporaries are dead and
2690 all globals are stored at their canonical location. */
2691 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
2695 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
2696 TCGTemp
*ts
= &s
->temps
[i
];
2697 if (ts
->temp_local
) {
2698 temp_save(s
, ts
, allocated_regs
);
2700 /* The liveness analysis already ensures that temps are dead.
2701 Keep an tcg_debug_assert for safety. */
2702 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
2706 save_globals(s
, allocated_regs
);
2709 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
2710 tcg_target_ulong val
, TCGLifeData arg_life
)
2712 if (ots
->fixed_reg
) {
2713 /* For fixed registers, we do not do any constant propagation. */
2714 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
2718 /* The movi is not explicitly generated here. */
2719 if (ots
->val_type
== TEMP_VAL_REG
) {
2720 s
->reg_to_temp
[ots
->reg
] = NULL
;
2722 ots
->val_type
= TEMP_VAL_CONST
;
2724 ots
->mem_coherent
= 0;
2725 if (NEED_SYNC_ARG(0)) {
2726 temp_sync(s
, ots
, s
->reserved_regs
, IS_DEAD_ARG(0));
2727 } else if (IS_DEAD_ARG(0)) {
2732 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGOp
*op
)
2734 TCGTemp
*ots
= arg_temp(op
->args
[0]);
2735 tcg_target_ulong val
= op
->args
[1];
2737 tcg_reg_alloc_do_movi(s
, ots
, val
, op
->life
);
2740 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
2742 const TCGLifeData arg_life
= op
->life
;
2743 TCGRegSet allocated_regs
;
2745 TCGType otype
, itype
;
2747 allocated_regs
= s
->reserved_regs
;
2748 ots
= arg_temp(op
->args
[0]);
2749 ts
= arg_temp(op
->args
[1]);
2751 /* Note that otype != itype for no-op truncation. */
2755 if (ts
->val_type
== TEMP_VAL_CONST
) {
2756 /* propagate constant or generate sti */
2757 tcg_target_ulong val
= ts
->val
;
2758 if (IS_DEAD_ARG(1)) {
2761 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
);
2765 /* If the source value is in memory we're going to be forced
2766 to have it in a register in order to perform the copy. Copy
2767 the SOURCE value into its own register first, that way we
2768 don't have to reload SOURCE the next time it is used. */
2769 if (ts
->val_type
== TEMP_VAL_MEM
) {
2770 temp_load(s
, ts
, tcg_target_available_regs
[itype
], allocated_regs
);
2773 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
2774 if (IS_DEAD_ARG(0) && !ots
->fixed_reg
) {
2775 /* mov to a non-saved dead register makes no sense (even with
2776 liveness analysis disabled). */
2777 tcg_debug_assert(NEED_SYNC_ARG(0));
2778 if (!ots
->mem_allocated
) {
2779 temp_allocate_frame(s
, ots
);
2781 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
2782 if (IS_DEAD_ARG(1)) {
2787 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
2788 /* the mov can be suppressed */
2789 if (ots
->val_type
== TEMP_VAL_REG
) {
2790 s
->reg_to_temp
[ots
->reg
] = NULL
;
2795 if (ots
->val_type
!= TEMP_VAL_REG
) {
2796 /* When allocating a new register, make sure to not spill the
2798 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
2799 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
2800 allocated_regs
, ots
->indirect_base
);
2802 tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
);
2804 ots
->val_type
= TEMP_VAL_REG
;
2805 ots
->mem_coherent
= 0;
2806 s
->reg_to_temp
[ots
->reg
] = ots
;
2807 if (NEED_SYNC_ARG(0)) {
2808 temp_sync(s
, ots
, allocated_regs
, 0);
2813 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
2815 const TCGLifeData arg_life
= op
->life
;
2816 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
2817 TCGRegSet i_allocated_regs
;
2818 TCGRegSet o_allocated_regs
;
2819 int i
, k
, nb_iargs
, nb_oargs
;
2822 const TCGArgConstraint
*arg_ct
;
2824 TCGArg new_args
[TCG_MAX_OP_ARGS
];
2825 int const_args
[TCG_MAX_OP_ARGS
];
2827 nb_oargs
= def
->nb_oargs
;
2828 nb_iargs
= def
->nb_iargs
;
2830 /* copy constants */
2831 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
2832 op
->args
+ nb_oargs
+ nb_iargs
,
2833 sizeof(TCGArg
) * def
->nb_cargs
);
2835 i_allocated_regs
= s
->reserved_regs
;
2836 o_allocated_regs
= s
->reserved_regs
;
2838 /* satisfy input constraints */
2839 for (k
= 0; k
< nb_iargs
; k
++) {
2840 i
= def
->sorted_args
[nb_oargs
+ k
];
2842 arg_ct
= &def
->args_ct
[i
];
2845 if (ts
->val_type
== TEMP_VAL_CONST
2846 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
)) {
2847 /* constant is OK for instruction */
2849 new_args
[i
] = ts
->val
;
2853 temp_load(s
, ts
, arg_ct
->u
.regs
, i_allocated_regs
);
2855 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
2856 if (ts
->fixed_reg
) {
2857 /* if fixed register, we must allocate a new register
2858 if the alias is not the same register */
2859 if (arg
!= op
->args
[arg_ct
->alias_index
])
2860 goto allocate_in_reg
;
2862 /* if the input is aliased to an output and if it is
2863 not dead after the instruction, we must allocate
2864 a new register and move it */
2865 if (!IS_DEAD_ARG(i
)) {
2866 goto allocate_in_reg
;
2868 /* check if the current register has already been allocated
2869 for another input aliased to an output */
2871 for (k2
= 0 ; k2
< k
; k2
++) {
2872 i2
= def
->sorted_args
[nb_oargs
+ k2
];
2873 if ((def
->args_ct
[i2
].ct
& TCG_CT_IALIAS
) &&
2874 (new_args
[i2
] == ts
->reg
)) {
2875 goto allocate_in_reg
;
2881 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
2882 /* nothing to do : the constraint is satisfied */
2885 /* allocate a new register matching the constraint
2886 and move the temporary register into it */
2887 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, i_allocated_regs
,
2889 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
2893 tcg_regset_set_reg(i_allocated_regs
, reg
);
2897 /* mark dead temporaries and free the associated registers */
2898 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2899 if (IS_DEAD_ARG(i
)) {
2900 temp_dead(s
, arg_temp(op
->args
[i
]));
2904 if (def
->flags
& TCG_OPF_BB_END
) {
2905 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
2907 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2908 /* XXX: permit generic clobber register list ? */
2909 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2910 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
2911 tcg_reg_free(s
, i
, i_allocated_regs
);
2915 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2916 /* sync globals if the op has side effects and might trigger
2918 sync_globals(s
, i_allocated_regs
);
2921 /* satisfy the output constraints */
2922 for(k
= 0; k
< nb_oargs
; k
++) {
2923 i
= def
->sorted_args
[k
];
2925 arg_ct
= &def
->args_ct
[i
];
2927 if ((arg_ct
->ct
& TCG_CT_ALIAS
)
2928 && !const_args
[arg_ct
->alias_index
]) {
2929 reg
= new_args
[arg_ct
->alias_index
];
2930 } else if (arg_ct
->ct
& TCG_CT_NEWREG
) {
2931 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
,
2932 i_allocated_regs
| o_allocated_regs
,
2935 /* if fixed register, we try to use it */
2937 if (ts
->fixed_reg
&&
2938 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
2941 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, o_allocated_regs
,
2944 tcg_regset_set_reg(o_allocated_regs
, reg
);
2945 /* if a fixed register is used, then a move will be done afterwards */
2946 if (!ts
->fixed_reg
) {
2947 if (ts
->val_type
== TEMP_VAL_REG
) {
2948 s
->reg_to_temp
[ts
->reg
] = NULL
;
2950 ts
->val_type
= TEMP_VAL_REG
;
2952 /* temp value is modified, so the value kept in memory is
2953 potentially not the same */
2954 ts
->mem_coherent
= 0;
2955 s
->reg_to_temp
[reg
] = ts
;
2962 /* emit instruction */
2963 if (def
->flags
& TCG_OPF_VECTOR
) {
2964 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
2965 new_args
, const_args
);
2967 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
2970 /* move the outputs in the correct register if needed */
2971 for(i
= 0; i
< nb_oargs
; i
++) {
2972 ts
= arg_temp(op
->args
[i
]);
2974 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
2975 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
2977 if (NEED_SYNC_ARG(i
)) {
2978 temp_sync(s
, ts
, o_allocated_regs
, IS_DEAD_ARG(i
));
2979 } else if (IS_DEAD_ARG(i
)) {
2985 #ifdef TCG_TARGET_STACK_GROWSUP
2986 #define STACK_DIR(x) (-(x))
2988 #define STACK_DIR(x) (x)
2991 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
2993 const int nb_oargs
= TCGOP_CALLO(op
);
2994 const int nb_iargs
= TCGOP_CALLI(op
);
2995 const TCGLifeData arg_life
= op
->life
;
2996 int flags
, nb_regs
, i
;
3000 intptr_t stack_offset
;
3001 size_t call_stack_size
;
3002 tcg_insn_unit
*func_addr
;
3004 TCGRegSet allocated_regs
;
3006 func_addr
= (tcg_insn_unit
*)(intptr_t)op
->args
[nb_oargs
+ nb_iargs
];
3007 flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
3009 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
3010 if (nb_regs
> nb_iargs
) {
3014 /* assign stack slots first */
3015 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
3016 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
3017 ~(TCG_TARGET_STACK_ALIGN
- 1);
3018 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
3019 if (allocate_args
) {
3020 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3021 preallocate call stack */
3025 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
3026 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
3027 arg
= op
->args
[nb_oargs
+ i
];
3028 #ifdef TCG_TARGET_STACK_GROWSUP
3029 stack_offset
-= sizeof(tcg_target_long
);
3031 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3033 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3035 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
3037 #ifndef TCG_TARGET_STACK_GROWSUP
3038 stack_offset
+= sizeof(tcg_target_long
);
3042 /* assign input registers */
3043 allocated_regs
= s
->reserved_regs
;
3044 for (i
= 0; i
< nb_regs
; i
++) {
3045 arg
= op
->args
[nb_oargs
+ i
];
3046 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3048 reg
= tcg_target_call_iarg_regs
[i
];
3049 tcg_reg_free(s
, reg
, allocated_regs
);
3051 if (ts
->val_type
== TEMP_VAL_REG
) {
3052 if (ts
->reg
!= reg
) {
3053 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
3056 TCGRegSet arg_set
= 0;
3058 tcg_regset_set_reg(arg_set
, reg
);
3059 temp_load(s
, ts
, arg_set
, allocated_regs
);
3062 tcg_regset_set_reg(allocated_regs
, reg
);
3066 /* mark dead temporaries and free the associated registers */
3067 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3068 if (IS_DEAD_ARG(i
)) {
3069 temp_dead(s
, arg_temp(op
->args
[i
]));
3073 /* clobber call registers */
3074 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3075 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3076 tcg_reg_free(s
, i
, allocated_regs
);
3080 /* Save globals if they might be written by the helper, sync them if
3081 they might be read. */
3082 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
3084 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3085 sync_globals(s
, allocated_regs
);
3087 save_globals(s
, allocated_regs
);
3090 tcg_out_call(s
, func_addr
);
3092 /* assign output registers and emit moves if needed */
3093 for(i
= 0; i
< nb_oargs
; i
++) {
3096 reg
= tcg_target_call_oarg_regs
[i
];
3097 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3099 if (ts
->fixed_reg
) {
3100 if (ts
->reg
!= reg
) {
3101 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
3104 if (ts
->val_type
== TEMP_VAL_REG
) {
3105 s
->reg_to_temp
[ts
->reg
] = NULL
;
3107 ts
->val_type
= TEMP_VAL_REG
;
3109 ts
->mem_coherent
= 0;
3110 s
->reg_to_temp
[reg
] = ts
;
3111 if (NEED_SYNC_ARG(i
)) {
3112 temp_sync(s
, ts
, allocated_regs
, IS_DEAD_ARG(i
));
3113 } else if (IS_DEAD_ARG(i
)) {
3120 #ifdef CONFIG_PROFILER
3122 /* avoid copy/paste errors */
3123 #define PROF_ADD(to, from, field) \
3125 (to)->field += atomic_read(&((from)->field)); \
3128 #define PROF_MAX(to, from, field) \
3130 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3131 if (val__ > (to)->field) { \
3132 (to)->field = val__; \
3136 /* Pass in a zero'ed @prof */
3138 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
3140 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3143 for (i
= 0; i
< n_ctxs
; i
++) {
3144 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3145 const TCGProfile
*orig
= &s
->prof
;
3148 PROF_ADD(prof
, orig
, tb_count1
);
3149 PROF_ADD(prof
, orig
, tb_count
);
3150 PROF_ADD(prof
, orig
, op_count
);
3151 PROF_MAX(prof
, orig
, op_count_max
);
3152 PROF_ADD(prof
, orig
, temp_count
);
3153 PROF_MAX(prof
, orig
, temp_count_max
);
3154 PROF_ADD(prof
, orig
, del_op_count
);
3155 PROF_ADD(prof
, orig
, code_in_len
);
3156 PROF_ADD(prof
, orig
, code_out_len
);
3157 PROF_ADD(prof
, orig
, search_out_len
);
3158 PROF_ADD(prof
, orig
, interm_time
);
3159 PROF_ADD(prof
, orig
, code_time
);
3160 PROF_ADD(prof
, orig
, la_time
);
3161 PROF_ADD(prof
, orig
, opt_time
);
3162 PROF_ADD(prof
, orig
, restore_count
);
3163 PROF_ADD(prof
, orig
, restore_time
);
3168 for (i
= 0; i
< NB_OPS
; i
++) {
3169 PROF_ADD(prof
, orig
, table_op_count
[i
]);
3178 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
3180 tcg_profile_snapshot(prof
, true, false);
3183 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
3185 tcg_profile_snapshot(prof
, false, true);
3188 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
)
3190 TCGProfile prof
= {};
3193 tcg_profile_snapshot_table(&prof
);
3194 for (i
= 0; i
< NB_OPS
; i
++) {
3195 cpu_fprintf(f
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
3196 prof
.table_op_count
[i
]);
3200 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
)
3202 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
3207 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
3209 #ifdef CONFIG_PROFILER
3210 TCGProfile
*prof
= &s
->prof
;
3215 #ifdef CONFIG_PROFILER
3219 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3222 atomic_set(&prof
->op_count
, prof
->op_count
+ n
);
3223 if (n
> prof
->op_count_max
) {
3224 atomic_set(&prof
->op_count_max
, n
);
3228 atomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
3229 if (n
> prof
->temp_count_max
) {
3230 atomic_set(&prof
->temp_count_max
, n
);
3236 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
3237 && qemu_log_in_addr_range(tb
->pc
))) {
3246 #ifdef CONFIG_PROFILER
3247 atomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
3250 #ifdef USE_TCG_OPTIMIZATIONS
3254 #ifdef CONFIG_PROFILER
3255 atomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
3256 atomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
3261 if (s
->nb_indirects
> 0) {
3263 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
3264 && qemu_log_in_addr_range(tb
->pc
))) {
3266 qemu_log("OP before indirect lowering:\n");
3272 /* Replace indirect temps with direct temps. */
3273 if (liveness_pass_2(s
)) {
3274 /* If changes were made, re-run liveness. */
3279 #ifdef CONFIG_PROFILER
3280 atomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
3284 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
3285 && qemu_log_in_addr_range(tb
->pc
))) {
3287 qemu_log("OP after optimization and liveness analysis:\n");
3294 tcg_reg_alloc_start(s
);
3296 s
->code_buf
= tb
->tc
.ptr
;
3297 s
->code_ptr
= tb
->tc
.ptr
;
3299 #ifdef TCG_TARGET_NEED_LDST_LABELS
3300 s
->ldst_labels
= NULL
;
3302 #ifdef TCG_TARGET_NEED_POOL_LABELS
3303 s
->pool_labels
= NULL
;
3307 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3308 TCGOpcode opc
= op
->opc
;
3310 #ifdef CONFIG_PROFILER
3311 atomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
3315 case INDEX_op_mov_i32
:
3316 case INDEX_op_mov_i64
:
3317 case INDEX_op_mov_vec
:
3318 tcg_reg_alloc_mov(s
, op
);
3320 case INDEX_op_movi_i32
:
3321 case INDEX_op_movi_i64
:
3322 case INDEX_op_dupi_vec
:
3323 tcg_reg_alloc_movi(s
, op
);
3325 case INDEX_op_insn_start
:
3326 if (num_insns
>= 0) {
3327 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
3330 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
3332 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3333 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
3337 s
->gen_insn_data
[num_insns
][i
] = a
;
3340 case INDEX_op_discard
:
3341 temp_dead(s
, arg_temp(op
->args
[0]));
3343 case INDEX_op_set_label
:
3344 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
3345 tcg_out_label(s
, arg_label(op
->args
[0]), s
->code_ptr
);
3348 tcg_reg_alloc_call(s
, op
);
3351 /* Sanity check that we've not introduced any unhandled opcodes. */
3352 tcg_debug_assert(tcg_op_supported(opc
));
3353 /* Note: in order to speed up the code, it would be much
3354 faster to have specialized register allocator functions for
3355 some common argument patterns */
3356 tcg_reg_alloc_op(s
, op
);
3359 #ifdef CONFIG_DEBUG_TCG
3362 /* Test for (pending) buffer overflow. The assumption is that any
3363 one operation beginning below the high water mark cannot overrun
3364 the buffer completely. Thus we can test for overflow after
3365 generating code without having to check during generation. */
3366 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
3370 tcg_debug_assert(num_insns
>= 0);
3371 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
3373 /* Generate TB finalization at the end of block */
3374 #ifdef TCG_TARGET_NEED_LDST_LABELS
3375 if (!tcg_out_ldst_finalize(s
)) {
3379 #ifdef TCG_TARGET_NEED_POOL_LABELS
3380 if (!tcg_out_pool_finalize(s
)) {
3385 /* flush instruction cache */
3386 flush_icache_range((uintptr_t)s
->code_buf
, (uintptr_t)s
->code_ptr
);
3388 return tcg_current_code_size(s
);
3391 #ifdef CONFIG_PROFILER
3392 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
3394 TCGProfile prof
= {};
3395 const TCGProfile
*s
;
3397 int64_t tb_div_count
;
3400 tcg_profile_snapshot_counters(&prof
);
3402 tb_count
= s
->tb_count
;
3403 tb_div_count
= tb_count
? tb_count
: 1;
3404 tot
= s
->interm_time
+ s
->code_time
;
3406 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3408 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3409 tb_count
, s
->tb_count1
- tb_count
,
3410 (double)(s
->tb_count1
- s
->tb_count
)
3411 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
3412 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3413 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
3414 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3415 (double)s
->del_op_count
/ tb_div_count
);
3416 cpu_fprintf(f
, "avg temps/TB %0.2f max=%d\n",
3417 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
3418 cpu_fprintf(f
, "avg host code/TB %0.1f\n",
3419 (double)s
->code_out_len
/ tb_div_count
);
3420 cpu_fprintf(f
, "avg search data/TB %0.1f\n",
3421 (double)s
->search_out_len
/ tb_div_count
);
3423 cpu_fprintf(f
, "cycles/op %0.1f\n",
3424 s
->op_count
? (double)tot
/ s
->op_count
: 0);
3425 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3426 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
3427 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3428 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
3429 cpu_fprintf(f
, "cycles/search byte %0.1f\n",
3430 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
3434 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3435 (double)s
->interm_time
/ tot
* 100.0);
3436 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3437 (double)s
->code_time
/ tot
* 100.0);
3438 cpu_fprintf(f
, "optim./code time %0.1f%%\n",
3439 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
3441 cpu_fprintf(f
, "liveness/code time %0.1f%%\n",
3442 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
3443 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3445 cpu_fprintf(f
, " avg cycles %0.1f\n",
3446 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
3449 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
3451 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
3455 #ifdef ELF_HOST_MACHINE
3456 /* In order to use this feature, the backend needs to do three things:
3458 (1) Define ELF_HOST_MACHINE to indicate both what value to
3459 put into the ELF image and to indicate support for the feature.
3461 (2) Define tcg_register_jit. This should create a buffer containing
3462 the contents of a .debug_frame section that describes the post-
3463 prologue unwind info for the tcg machine.
3465 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3468 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3475 struct jit_code_entry
{
3476 struct jit_code_entry
*next_entry
;
3477 struct jit_code_entry
*prev_entry
;
3478 const void *symfile_addr
;
3479 uint64_t symfile_size
;
3482 struct jit_descriptor
{
3484 uint32_t action_flag
;
3485 struct jit_code_entry
*relevant_entry
;
3486 struct jit_code_entry
*first_entry
;
3489 void __jit_debug_register_code(void) __attribute__((noinline
));
3490 void __jit_debug_register_code(void)
3495 /* Must statically initialize the version, because GDB may check
3496 the version before we can set it. */
3497 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
3499 /* End GDB interface. */
3501 static int find_string(const char *strtab
, const char *str
)
3503 const char *p
= strtab
+ 1;
3506 if (strcmp(p
, str
) == 0) {
3513 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
3514 const void *debug_frame
,
3515 size_t debug_frame_size
)
3517 struct __attribute__((packed
)) DebugInfo
{
3524 uintptr_t cu_low_pc
;
3525 uintptr_t cu_high_pc
;
3528 uintptr_t fn_low_pc
;
3529 uintptr_t fn_high_pc
;
3538 struct DebugInfo di
;
3543 struct ElfImage
*img
;
3545 static const struct ElfImage img_template
= {
3547 .e_ident
[EI_MAG0
] = ELFMAG0
,
3548 .e_ident
[EI_MAG1
] = ELFMAG1
,
3549 .e_ident
[EI_MAG2
] = ELFMAG2
,
3550 .e_ident
[EI_MAG3
] = ELFMAG3
,
3551 .e_ident
[EI_CLASS
] = ELF_CLASS
,
3552 .e_ident
[EI_DATA
] = ELF_DATA
,
3553 .e_ident
[EI_VERSION
] = EV_CURRENT
,
3555 .e_machine
= ELF_HOST_MACHINE
,
3556 .e_version
= EV_CURRENT
,
3557 .e_phoff
= offsetof(struct ElfImage
, phdr
),
3558 .e_shoff
= offsetof(struct ElfImage
, shdr
),
3559 .e_ehsize
= sizeof(ElfW(Shdr
)),
3560 .e_phentsize
= sizeof(ElfW(Phdr
)),
3562 .e_shentsize
= sizeof(ElfW(Shdr
)),
3563 .e_shnum
= ARRAY_SIZE(img
->shdr
),
3564 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
3565 #ifdef ELF_HOST_FLAGS
3566 .e_flags
= ELF_HOST_FLAGS
,
3569 .e_ident
[EI_OSABI
] = ELF_OSABI
,
3577 [0] = { .sh_type
= SHT_NULL
},
3578 /* Trick: The contents of code_gen_buffer are not present in
3579 this fake ELF file; that got allocated elsewhere. Therefore
3580 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3581 will not look for contents. We can record any address. */
3583 .sh_type
= SHT_NOBITS
,
3584 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
3586 [2] = { /* .debug_info */
3587 .sh_type
= SHT_PROGBITS
,
3588 .sh_offset
= offsetof(struct ElfImage
, di
),
3589 .sh_size
= sizeof(struct DebugInfo
),
3591 [3] = { /* .debug_abbrev */
3592 .sh_type
= SHT_PROGBITS
,
3593 .sh_offset
= offsetof(struct ElfImage
, da
),
3594 .sh_size
= sizeof(img
->da
),
3596 [4] = { /* .debug_frame */
3597 .sh_type
= SHT_PROGBITS
,
3598 .sh_offset
= sizeof(struct ElfImage
),
3600 [5] = { /* .symtab */
3601 .sh_type
= SHT_SYMTAB
,
3602 .sh_offset
= offsetof(struct ElfImage
, sym
),
3603 .sh_size
= sizeof(img
->sym
),
3605 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
3606 .sh_entsize
= sizeof(ElfW(Sym
)),
3608 [6] = { /* .strtab */
3609 .sh_type
= SHT_STRTAB
,
3610 .sh_offset
= offsetof(struct ElfImage
, str
),
3611 .sh_size
= sizeof(img
->str
),
3615 [1] = { /* code_gen_buffer */
3616 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
3621 .len
= sizeof(struct DebugInfo
) - 4,
3623 .ptr_size
= sizeof(void *),
3625 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
3627 .fn_name
= "code_gen_buffer"
3630 1, /* abbrev number (the cu) */
3631 0x11, 1, /* DW_TAG_compile_unit, has children */
3632 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3633 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3634 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3635 0, 0, /* end of abbrev */
3636 2, /* abbrev number (the fn) */
3637 0x2e, 0, /* DW_TAG_subprogram, no children */
3638 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3639 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3640 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3641 0, 0, /* end of abbrev */
3642 0 /* no more abbrev */
3644 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3645 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
3648 /* We only need a single jit entry; statically allocate it. */
3649 static struct jit_code_entry one_entry
;
3651 uintptr_t buf
= (uintptr_t)buf_ptr
;
3652 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
3653 DebugFrameHeader
*dfh
;
3655 img
= g_malloc(img_size
);
3656 *img
= img_template
;
3658 img
->phdr
.p_vaddr
= buf
;
3659 img
->phdr
.p_paddr
= buf
;
3660 img
->phdr
.p_memsz
= buf_size
;
3662 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
3663 img
->shdr
[1].sh_addr
= buf
;
3664 img
->shdr
[1].sh_size
= buf_size
;
3666 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
3667 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
3669 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
3670 img
->shdr
[4].sh_size
= debug_frame_size
;
3672 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
3673 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
3675 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
3676 img
->sym
[1].st_value
= buf
;
3677 img
->sym
[1].st_size
= buf_size
;
3679 img
->di
.cu_low_pc
= buf
;
3680 img
->di
.cu_high_pc
= buf
+ buf_size
;
3681 img
->di
.fn_low_pc
= buf
;
3682 img
->di
.fn_high_pc
= buf
+ buf_size
;
3684 dfh
= (DebugFrameHeader
*)(img
+ 1);
3685 memcpy(dfh
, debug_frame
, debug_frame_size
);
3686 dfh
->fde
.func_start
= buf
;
3687 dfh
->fde
.func_len
= buf_size
;
3690 /* Enable this block to be able to debug the ELF image file creation.
3691 One can use readelf, objdump, or other inspection utilities. */
3693 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
3695 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
3696 /* Avoid stupid unused return value warning for fwrite. */
3703 one_entry
.symfile_addr
= img
;
3704 one_entry
.symfile_size
= img_size
;
3706 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
3707 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
3708 __jit_debug_descriptor
.first_entry
= &one_entry
;
3709 __jit_debug_register_code();
3712 /* No support for the feature. Provide the entry point expected by exec.c,
3713 and implement the internal function we declared earlier. */
3715 static void tcg_register_jit_int(void *buf
, size_t size
,
3716 const void *debug_frame
,
3717 size_t debug_frame_size
)
3721 void tcg_register_jit(void *buf
, size_t buf_size
)
3724 #endif /* ELF_HOST_MACHINE */
3726 #if !TCG_TARGET_MAYBE_vec
3727 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
3729 g_assert_not_reached();