2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
31 /* Define to jump the ELF file used to communicate with GDB. */
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
47 #define NO_CPU_IO_DEFS
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
55 # define ELF_CLASS ELFCLASS32
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
60 # define ELF_DATA ELFDATA2LSB
65 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
66 #error GUEST_BASE not supported on this host.
69 /* Forward declarations for functions declared in tcg-target.c and used here. */
70 static void tcg_target_init(TCGContext
*s
);
71 static void tcg_target_qemu_prologue(TCGContext
*s
);
72 static void patch_reloc(uint8_t *code_ptr
, int type
,
73 tcg_target_long value
, tcg_target_long addend
);
75 static void tcg_register_jit_int(void *buf
, size_t size
,
76 void *debug_frame
, size_t debug_frame_size
)
77 __attribute__((unused
));
79 /* Forward declarations for functions declared and used in tcg-target.c. */
80 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
);
81 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
82 tcg_target_long arg2
);
83 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
84 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
85 TCGReg ret
, tcg_target_long arg
);
86 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
87 const int *const_args
);
88 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
89 tcg_target_long arg2
);
90 static int tcg_target_const_match(tcg_target_long val
,
91 const TCGArgConstraint
*arg_ct
);
92 static int tcg_target_get_call_iarg_regs_count(int flags
);
94 TCGOpDef tcg_op_defs
[] = {
95 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
99 const size_t tcg_op_defs_max
= ARRAY_SIZE(tcg_op_defs
);
101 static TCGRegSet tcg_target_available_regs
[2];
102 static TCGRegSet tcg_target_call_clobber_regs
;
104 /* XXX: move that inside the context */
105 uint16_t *gen_opc_ptr
;
106 TCGArg
*gen_opparam_ptr
;
108 static inline void tcg_out8(TCGContext
*s
, uint8_t v
)
113 static inline void tcg_out16(TCGContext
*s
, uint16_t v
)
115 *(uint16_t *)s
->code_ptr
= v
;
119 static inline void tcg_out32(TCGContext
*s
, uint32_t v
)
121 *(uint32_t *)s
->code_ptr
= v
;
125 /* label relocation processing */
127 static void tcg_out_reloc(TCGContext
*s
, uint8_t *code_ptr
, int type
,
128 int label_index
, long addend
)
133 l
= &s
->labels
[label_index
];
135 /* FIXME: This may break relocations on RISC targets that
136 modify instruction fields in place. The caller may not have
137 written the initial value. */
138 patch_reloc(code_ptr
, type
, l
->u
.value
, addend
);
140 /* add a new relocation entry */
141 r
= tcg_malloc(sizeof(TCGRelocation
));
145 r
->next
= l
->u
.first_reloc
;
146 l
->u
.first_reloc
= r
;
150 static void tcg_out_label(TCGContext
*s
, int label_index
, void *ptr
)
154 tcg_target_long value
= (tcg_target_long
)ptr
;
156 l
= &s
->labels
[label_index
];
159 r
= l
->u
.first_reloc
;
161 patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
);
168 int gen_new_label(void)
170 TCGContext
*s
= &tcg_ctx
;
174 if (s
->nb_labels
>= TCG_MAX_LABELS
)
176 idx
= s
->nb_labels
++;
179 l
->u
.first_reloc
= NULL
;
183 #include "tcg-target.c"
185 /* pool based memory allocation */
186 void *tcg_malloc_internal(TCGContext
*s
, int size
)
191 if (size
> TCG_POOL_CHUNK_SIZE
) {
192 /* big malloc: insert a new pool (XXX: could optimize) */
193 p
= g_malloc(sizeof(TCGPool
) + size
);
195 p
->next
= s
->pool_first_large
;
196 s
->pool_first_large
= p
;
207 pool_size
= TCG_POOL_CHUNK_SIZE
;
208 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
212 s
->pool_current
->next
= p
;
221 s
->pool_cur
= p
->data
+ size
;
222 s
->pool_end
= p
->data
+ p
->size
;
226 void tcg_pool_reset(TCGContext
*s
)
229 for (p
= s
->pool_first_large
; p
; p
= t
) {
233 s
->pool_first_large
= NULL
;
234 s
->pool_cur
= s
->pool_end
= NULL
;
235 s
->pool_current
= NULL
;
238 void tcg_context_init(TCGContext
*s
)
240 int op
, total_args
, n
;
242 TCGArgConstraint
*args_ct
;
245 memset(s
, 0, sizeof(*s
));
246 s
->temps
= s
->static_temps
;
249 /* Count total number of arguments and allocate the corresponding
252 for(op
= 0; op
< NB_OPS
; op
++) {
253 def
= &tcg_op_defs
[op
];
254 n
= def
->nb_iargs
+ def
->nb_oargs
;
258 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
259 sorted_args
= g_malloc(sizeof(int) * total_args
);
261 for(op
= 0; op
< NB_OPS
; op
++) {
262 def
= &tcg_op_defs
[op
];
263 def
->args_ct
= args_ct
;
264 def
->sorted_args
= sorted_args
;
265 n
= def
->nb_iargs
+ def
->nb_oargs
;
273 void tcg_prologue_init(TCGContext
*s
)
275 /* init global prologue and epilogue */
276 s
->code_buf
= code_gen_prologue
;
277 s
->code_ptr
= s
->code_buf
;
278 tcg_target_qemu_prologue(s
);
279 flush_icache_range((tcg_target_ulong
)s
->code_buf
,
280 (tcg_target_ulong
)s
->code_ptr
);
283 void tcg_set_frame(TCGContext
*s
, int reg
,
284 tcg_target_long start
, tcg_target_long size
)
286 s
->frame_start
= start
;
287 s
->frame_end
= start
+ size
;
291 void tcg_func_start(TCGContext
*s
)
295 s
->nb_temps
= s
->nb_globals
;
296 for(i
= 0; i
< (TCG_TYPE_COUNT
* 2); i
++)
297 s
->first_free_temp
[i
] = -1;
298 s
->labels
= tcg_malloc(sizeof(TCGLabel
) * TCG_MAX_LABELS
);
300 s
->current_frame_offset
= s
->frame_start
;
302 gen_opc_ptr
= gen_opc_buf
;
303 gen_opparam_ptr
= gen_opparam_buf
;
306 static inline void tcg_temp_alloc(TCGContext
*s
, int n
)
308 if (n
> TCG_MAX_TEMPS
)
312 static inline int tcg_global_reg_new_internal(TCGType type
, int reg
,
315 TCGContext
*s
= &tcg_ctx
;
319 #if TCG_TARGET_REG_BITS == 32
320 if (type
!= TCG_TYPE_I32
)
323 if (tcg_regset_test_reg(s
->reserved_regs
, reg
))
326 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
327 ts
= &s
->temps
[s
->nb_globals
];
328 ts
->base_type
= type
;
334 tcg_regset_set_reg(s
->reserved_regs
, reg
);
338 TCGv_i32
tcg_global_reg_new_i32(int reg
, const char *name
)
342 idx
= tcg_global_reg_new_internal(TCG_TYPE_I32
, reg
, name
);
343 return MAKE_TCGV_I32(idx
);
346 TCGv_i64
tcg_global_reg_new_i64(int reg
, const char *name
)
350 idx
= tcg_global_reg_new_internal(TCG_TYPE_I64
, reg
, name
);
351 return MAKE_TCGV_I64(idx
);
354 static inline int tcg_global_mem_new_internal(TCGType type
, int reg
,
355 tcg_target_long offset
,
358 TCGContext
*s
= &tcg_ctx
;
363 #if TCG_TARGET_REG_BITS == 32
364 if (type
== TCG_TYPE_I64
) {
366 tcg_temp_alloc(s
, s
->nb_globals
+ 2);
367 ts
= &s
->temps
[s
->nb_globals
];
368 ts
->base_type
= type
;
369 ts
->type
= TCG_TYPE_I32
;
371 ts
->mem_allocated
= 1;
373 #ifdef TCG_TARGET_WORDS_BIGENDIAN
374 ts
->mem_offset
= offset
+ 4;
376 ts
->mem_offset
= offset
;
378 pstrcpy(buf
, sizeof(buf
), name
);
379 pstrcat(buf
, sizeof(buf
), "_0");
380 ts
->name
= strdup(buf
);
383 ts
->base_type
= type
;
384 ts
->type
= TCG_TYPE_I32
;
386 ts
->mem_allocated
= 1;
388 #ifdef TCG_TARGET_WORDS_BIGENDIAN
389 ts
->mem_offset
= offset
;
391 ts
->mem_offset
= offset
+ 4;
393 pstrcpy(buf
, sizeof(buf
), name
);
394 pstrcat(buf
, sizeof(buf
), "_1");
395 ts
->name
= strdup(buf
);
401 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
402 ts
= &s
->temps
[s
->nb_globals
];
403 ts
->base_type
= type
;
406 ts
->mem_allocated
= 1;
408 ts
->mem_offset
= offset
;
415 TCGv_i32
tcg_global_mem_new_i32(int reg
, tcg_target_long offset
,
420 idx
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
421 return MAKE_TCGV_I32(idx
);
424 TCGv_i64
tcg_global_mem_new_i64(int reg
, tcg_target_long offset
,
429 idx
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
430 return MAKE_TCGV_I64(idx
);
433 static inline int tcg_temp_new_internal(TCGType type
, int temp_local
)
435 TCGContext
*s
= &tcg_ctx
;
442 idx
= s
->first_free_temp
[k
];
444 /* There is already an available temp with the
447 s
->first_free_temp
[k
] = ts
->next_free_temp
;
448 ts
->temp_allocated
= 1;
449 assert(ts
->temp_local
== temp_local
);
452 #if TCG_TARGET_REG_BITS == 32
453 if (type
== TCG_TYPE_I64
) {
454 tcg_temp_alloc(s
, s
->nb_temps
+ 2);
455 ts
= &s
->temps
[s
->nb_temps
];
456 ts
->base_type
= type
;
457 ts
->type
= TCG_TYPE_I32
;
458 ts
->temp_allocated
= 1;
459 ts
->temp_local
= temp_local
;
462 ts
->base_type
= TCG_TYPE_I32
;
463 ts
->type
= TCG_TYPE_I32
;
464 ts
->temp_allocated
= 1;
465 ts
->temp_local
= temp_local
;
471 tcg_temp_alloc(s
, s
->nb_temps
+ 1);
472 ts
= &s
->temps
[s
->nb_temps
];
473 ts
->base_type
= type
;
475 ts
->temp_allocated
= 1;
476 ts
->temp_local
= temp_local
;
482 #if defined(CONFIG_DEBUG_TCG)
488 TCGv_i32
tcg_temp_new_internal_i32(int temp_local
)
492 idx
= tcg_temp_new_internal(TCG_TYPE_I32
, temp_local
);
493 return MAKE_TCGV_I32(idx
);
496 TCGv_i64
tcg_temp_new_internal_i64(int temp_local
)
500 idx
= tcg_temp_new_internal(TCG_TYPE_I64
, temp_local
);
501 return MAKE_TCGV_I64(idx
);
504 static inline void tcg_temp_free_internal(int idx
)
506 TCGContext
*s
= &tcg_ctx
;
510 #if defined(CONFIG_DEBUG_TCG)
512 if (s
->temps_in_use
< 0) {
513 fprintf(stderr
, "More temporaries freed than allocated!\n");
517 assert(idx
>= s
->nb_globals
&& idx
< s
->nb_temps
);
519 assert(ts
->temp_allocated
!= 0);
520 ts
->temp_allocated
= 0;
524 ts
->next_free_temp
= s
->first_free_temp
[k
];
525 s
->first_free_temp
[k
] = idx
;
528 void tcg_temp_free_i32(TCGv_i32 arg
)
530 tcg_temp_free_internal(GET_TCGV_I32(arg
));
533 void tcg_temp_free_i64(TCGv_i64 arg
)
535 tcg_temp_free_internal(GET_TCGV_I64(arg
));
538 TCGv_i32
tcg_const_i32(int32_t val
)
541 t0
= tcg_temp_new_i32();
542 tcg_gen_movi_i32(t0
, val
);
546 TCGv_i64
tcg_const_i64(int64_t val
)
549 t0
= tcg_temp_new_i64();
550 tcg_gen_movi_i64(t0
, val
);
554 TCGv_i32
tcg_const_local_i32(int32_t val
)
557 t0
= tcg_temp_local_new_i32();
558 tcg_gen_movi_i32(t0
, val
);
562 TCGv_i64
tcg_const_local_i64(int64_t val
)
565 t0
= tcg_temp_local_new_i64();
566 tcg_gen_movi_i64(t0
, val
);
570 #if defined(CONFIG_DEBUG_TCG)
571 void tcg_clear_temp_count(void)
573 TCGContext
*s
= &tcg_ctx
;
577 int tcg_check_temp_count(void)
579 TCGContext
*s
= &tcg_ctx
;
580 if (s
->temps_in_use
) {
581 /* Clear the count so that we don't give another
582 * warning immediately next time around.
591 void tcg_register_helper(void *func
, const char *name
)
593 TCGContext
*s
= &tcg_ctx
;
595 if ((s
->nb_helpers
+ 1) > s
->allocated_helpers
) {
596 n
= s
->allocated_helpers
;
602 s
->helpers
= realloc(s
->helpers
, n
* sizeof(TCGHelperInfo
));
603 s
->allocated_helpers
= n
;
605 s
->helpers
[s
->nb_helpers
].func
= (tcg_target_ulong
)func
;
606 s
->helpers
[s
->nb_helpers
].name
= name
;
610 /* Note: we convert the 64 bit args to 32 bit and do some alignment
611 and endian swap. Maybe it would be better to do the alignment
612 and endian swap in tcg_reg_alloc_call(). */
613 void tcg_gen_callN(TCGContext
*s
, TCGv_ptr func
, unsigned int flags
,
614 int sizemask
, TCGArg ret
, int nargs
, TCGArg
*args
)
621 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
622 for (i
= 0; i
< nargs
; ++i
) {
623 int is_64bit
= sizemask
& (1 << (i
+1)*2);
624 int is_signed
= sizemask
& (2 << (i
+1)*2);
626 TCGv_i64 temp
= tcg_temp_new_i64();
627 TCGv_i64 orig
= MAKE_TCGV_I64(args
[i
]);
629 tcg_gen_ext32s_i64(temp
, orig
);
631 tcg_gen_ext32u_i64(temp
, orig
);
633 args
[i
] = GET_TCGV_I64(temp
);
636 #endif /* TCG_TARGET_EXTEND_ARGS */
638 *gen_opc_ptr
++ = INDEX_op_call
;
639 nparam
= gen_opparam_ptr
++;
640 if (ret
!= TCG_CALL_DUMMY_ARG
) {
641 #if TCG_TARGET_REG_BITS < 64
643 #ifdef TCG_TARGET_WORDS_BIGENDIAN
644 *gen_opparam_ptr
++ = ret
+ 1;
645 *gen_opparam_ptr
++ = ret
;
647 *gen_opparam_ptr
++ = ret
;
648 *gen_opparam_ptr
++ = ret
+ 1;
654 *gen_opparam_ptr
++ = ret
;
661 for (i
= 0; i
< nargs
; i
++) {
662 #if TCG_TARGET_REG_BITS < 64
663 int is_64bit
= sizemask
& (1 << (i
+1)*2);
665 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
666 /* some targets want aligned 64 bit args */
668 *gen_opparam_ptr
++ = TCG_CALL_DUMMY_ARG
;
672 /* If stack grows up, then we will be placing successive
673 arguments at lower addresses, which means we need to
674 reverse the order compared to how we would normally
675 treat either big or little-endian. For those arguments
676 that will wind up in registers, this still works for
677 HPPA (the only current STACK_GROWSUP target) since the
678 argument registers are *also* allocated in decreasing
679 order. If another such target is added, this logic may
680 have to get more complicated to differentiate between
681 stack arguments and register arguments. */
682 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
683 *gen_opparam_ptr
++ = args
[i
] + 1;
684 *gen_opparam_ptr
++ = args
[i
];
686 *gen_opparam_ptr
++ = args
[i
];
687 *gen_opparam_ptr
++ = args
[i
] + 1;
692 #endif /* TCG_TARGET_REG_BITS < 64 */
694 *gen_opparam_ptr
++ = args
[i
];
697 *gen_opparam_ptr
++ = GET_TCGV_PTR(func
);
699 *gen_opparam_ptr
++ = flags
;
701 *nparam
= (nb_rets
<< 16) | (real_args
+ 1);
703 /* total parameters, needed to go backward in the instruction stream */
704 *gen_opparam_ptr
++ = 1 + nb_rets
+ real_args
+ 3;
706 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
707 for (i
= 0; i
< nargs
; ++i
) {
708 int is_64bit
= sizemask
& (1 << (i
+1)*2);
710 TCGv_i64 temp
= MAKE_TCGV_I64(args
[i
]);
711 tcg_temp_free_i64(temp
);
714 #endif /* TCG_TARGET_EXTEND_ARGS */
717 #if TCG_TARGET_REG_BITS == 32
718 void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
719 int c
, int right
, int arith
)
722 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
723 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
724 } else if (c
>= 32) {
728 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
729 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
731 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
732 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
735 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
736 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
741 t0
= tcg_temp_new_i32();
742 t1
= tcg_temp_new_i32();
744 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
746 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
748 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
749 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
750 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
751 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
753 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
754 /* Note: ret can be the same as arg1, so we use t1 */
755 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
756 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
757 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
758 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
760 tcg_temp_free_i32(t0
);
761 tcg_temp_free_i32(t1
);
767 static void tcg_reg_alloc_start(TCGContext
*s
)
771 for(i
= 0; i
< s
->nb_globals
; i
++) {
774 ts
->val_type
= TEMP_VAL_REG
;
776 ts
->val_type
= TEMP_VAL_MEM
;
779 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
781 ts
->val_type
= TEMP_VAL_DEAD
;
782 ts
->mem_allocated
= 0;
785 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
786 s
->reg_to_temp
[i
] = -1;
790 static char *tcg_get_arg_str_idx(TCGContext
*s
, char *buf
, int buf_size
,
795 assert(idx
>= 0 && idx
< s
->nb_temps
);
798 if (idx
< s
->nb_globals
) {
799 pstrcpy(buf
, buf_size
, ts
->name
);
802 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
804 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
809 char *tcg_get_arg_str_i32(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i32 arg
)
811 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I32(arg
));
814 char *tcg_get_arg_str_i64(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i64 arg
)
816 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I64(arg
));
819 static int helper_cmp(const void *p1
, const void *p2
)
821 const TCGHelperInfo
*th1
= p1
;
822 const TCGHelperInfo
*th2
= p2
;
823 if (th1
->func
< th2
->func
)
825 else if (th1
->func
== th2
->func
)
831 /* find helper definition (Note: A hash table would be better) */
832 static TCGHelperInfo
*tcg_find_helper(TCGContext
*s
, tcg_target_ulong val
)
838 if (unlikely(!s
->helpers_sorted
)) {
839 qsort(s
->helpers
, s
->nb_helpers
, sizeof(TCGHelperInfo
),
841 s
->helpers_sorted
= 1;
846 m_max
= s
->nb_helpers
- 1;
847 while (m_min
<= m_max
) {
848 m
= (m_min
+ m_max
) >> 1;
862 static const char * const cond_name
[] =
864 [TCG_COND_EQ
] = "eq",
865 [TCG_COND_NE
] = "ne",
866 [TCG_COND_LT
] = "lt",
867 [TCG_COND_GE
] = "ge",
868 [TCG_COND_LE
] = "le",
869 [TCG_COND_GT
] = "gt",
870 [TCG_COND_LTU
] = "ltu",
871 [TCG_COND_GEU
] = "geu",
872 [TCG_COND_LEU
] = "leu",
873 [TCG_COND_GTU
] = "gtu"
876 void tcg_dump_ops(TCGContext
*s
)
878 const uint16_t *opc_ptr
;
882 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
, first_insn
;
887 opc_ptr
= gen_opc_buf
;
888 args
= gen_opparam_buf
;
889 while (opc_ptr
< gen_opc_ptr
) {
891 def
= &tcg_op_defs
[c
];
892 if (c
== INDEX_op_debug_insn_start
) {
894 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
895 pc
= ((uint64_t)args
[1] << 32) | args
[0];
902 qemu_log(" ---- 0x%" PRIx64
, pc
);
904 nb_oargs
= def
->nb_oargs
;
905 nb_iargs
= def
->nb_iargs
;
906 nb_cargs
= def
->nb_cargs
;
907 } else if (c
== INDEX_op_call
) {
910 /* variable number of arguments */
912 nb_oargs
= arg
>> 16;
913 nb_iargs
= arg
& 0xffff;
914 nb_cargs
= def
->nb_cargs
;
916 qemu_log(" %s ", def
->name
);
920 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
921 args
[nb_oargs
+ nb_iargs
- 1]));
923 qemu_log(",$0x%" TCG_PRIlx
, args
[nb_oargs
+ nb_iargs
]);
925 qemu_log(",$%d", nb_oargs
);
926 for(i
= 0; i
< nb_oargs
; i
++) {
928 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
931 for(i
= 0; i
< (nb_iargs
- 1); i
++) {
933 if (args
[nb_oargs
+ i
] == TCG_CALL_DUMMY_ARG
) {
936 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
937 args
[nb_oargs
+ i
]));
940 } else if (c
== INDEX_op_movi_i32
941 #if TCG_TARGET_REG_BITS == 64
942 || c
== INDEX_op_movi_i64
945 tcg_target_ulong val
;
948 nb_oargs
= def
->nb_oargs
;
949 nb_iargs
= def
->nb_iargs
;
950 nb_cargs
= def
->nb_cargs
;
951 qemu_log(" %s %s,$", def
->name
,
952 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), args
[0]));
954 th
= tcg_find_helper(s
, val
);
956 qemu_log("%s", th
->name
);
958 if (c
== INDEX_op_movi_i32
) {
959 qemu_log("0x%x", (uint32_t)val
);
961 qemu_log("0x%" PRIx64
, (uint64_t)val
);
965 qemu_log(" %s ", def
->name
);
966 if (c
== INDEX_op_nopn
) {
967 /* variable number of arguments */
972 nb_oargs
= def
->nb_oargs
;
973 nb_iargs
= def
->nb_iargs
;
974 nb_cargs
= def
->nb_cargs
;
978 for(i
= 0; i
< nb_oargs
; i
++) {
982 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
985 for(i
= 0; i
< nb_iargs
; i
++) {
989 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
993 case INDEX_op_brcond_i32
:
994 case INDEX_op_setcond_i32
:
995 case INDEX_op_movcond_i32
:
996 #if TCG_TARGET_REG_BITS == 32
997 case INDEX_op_brcond2_i32
:
998 case INDEX_op_setcond2_i32
:
1000 case INDEX_op_brcond_i64
:
1001 case INDEX_op_setcond_i64
:
1002 case INDEX_op_movcond_i64
:
1004 if (args
[k
] < ARRAY_SIZE(cond_name
) && cond_name
[args
[k
]]) {
1005 qemu_log(",%s", cond_name
[args
[k
++]]);
1007 qemu_log(",$0x%" TCG_PRIlx
, args
[k
++]);
1015 for(; i
< nb_cargs
; i
++) {
1020 qemu_log("$0x%" TCG_PRIlx
, arg
);
1024 args
+= nb_iargs
+ nb_oargs
+ nb_cargs
;
1028 /* we give more priority to constraints with less registers */
1029 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
1031 const TCGArgConstraint
*arg_ct
;
1034 arg_ct
= &def
->args_ct
[k
];
1035 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1036 /* an alias is equivalent to a single register */
1039 if (!(arg_ct
->ct
& TCG_CT_REG
))
1042 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1043 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
1047 return TCG_TARGET_NB_REGS
- n
+ 1;
1050 /* sort from highest priority to lowest */
1051 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
1053 int i
, j
, p1
, p2
, tmp
;
1055 for(i
= 0; i
< n
; i
++)
1056 def
->sorted_args
[start
+ i
] = start
+ i
;
1059 for(i
= 0; i
< n
- 1; i
++) {
1060 for(j
= i
+ 1; j
< n
; j
++) {
1061 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
1062 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
1064 tmp
= def
->sorted_args
[start
+ i
];
1065 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
1066 def
->sorted_args
[start
+ j
] = tmp
;
1072 void tcg_add_target_add_op_defs(const TCGTargetOpDef
*tdefs
)
1080 if (tdefs
->op
== (TCGOpcode
)-1)
1083 assert((unsigned)op
< NB_OPS
);
1084 def
= &tcg_op_defs
[op
];
1085 #if defined(CONFIG_DEBUG_TCG)
1086 /* Duplicate entry in op definitions? */
1090 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
1091 for(i
= 0; i
< nb_args
; i
++) {
1092 ct_str
= tdefs
->args_ct_str
[i
];
1093 /* Incomplete TCGTargetOpDef entry? */
1094 assert(ct_str
!= NULL
);
1095 tcg_regset_clear(def
->args_ct
[i
].u
.regs
);
1096 def
->args_ct
[i
].ct
= 0;
1097 if (ct_str
[0] >= '0' && ct_str
[0] <= '9') {
1099 oarg
= ct_str
[0] - '0';
1100 assert(oarg
< def
->nb_oargs
);
1101 assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
1102 /* TCG_CT_ALIAS is for the output arguments. The input
1103 argument is tagged with TCG_CT_IALIAS. */
1104 def
->args_ct
[i
] = def
->args_ct
[oarg
];
1105 def
->args_ct
[oarg
].ct
= TCG_CT_ALIAS
;
1106 def
->args_ct
[oarg
].alias_index
= i
;
1107 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
1108 def
->args_ct
[i
].alias_index
= oarg
;
1111 if (*ct_str
== '\0')
1115 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
1119 if (target_parse_constraint(&def
->args_ct
[i
], &ct_str
) < 0) {
1120 fprintf(stderr
, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1121 ct_str
, i
, def
->name
);
1129 /* TCGTargetOpDef entry with too much information? */
1130 assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
1132 /* sort the constraints (XXX: this is just an heuristic) */
1133 sort_constraints(def
, 0, def
->nb_oargs
);
1134 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
1140 printf("%s: sorted=", def
->name
);
1141 for(i
= 0; i
< def
->nb_oargs
+ def
->nb_iargs
; i
++)
1142 printf(" %d", def
->sorted_args
[i
]);
1149 #if defined(CONFIG_DEBUG_TCG)
1151 for (op
= 0; op
< ARRAY_SIZE(tcg_op_defs
); op
++) {
1152 const TCGOpDef
*def
= &tcg_op_defs
[op
];
1153 if (op
< INDEX_op_call
1154 || op
== INDEX_op_debug_insn_start
1155 || (def
->flags
& TCG_OPF_NOT_PRESENT
)) {
1156 /* Wrong entry in op definitions? */
1158 fprintf(stderr
, "Invalid op definition for %s\n", def
->name
);
1162 /* Missing entry in op definitions? */
1164 fprintf(stderr
, "Missing op definition for %s\n", def
->name
);
1175 #ifdef USE_LIVENESS_ANALYSIS
1177 /* set a nop for an operation using 'nb_args' */
1178 static inline void tcg_set_nop(TCGContext
*s
, uint16_t *opc_ptr
,
1179 TCGArg
*args
, int nb_args
)
1182 *opc_ptr
= INDEX_op_nop
;
1184 *opc_ptr
= INDEX_op_nopn
;
1186 args
[nb_args
- 1] = nb_args
;
1190 /* liveness analysis: end of function: globals are live, temps are
1192 /* XXX: at this stage, not used as there would be little gains because
1193 most TBs end with a conditional jump. */
1194 static inline void tcg_la_func_end(TCGContext
*s
, uint8_t *dead_temps
)
1196 memset(dead_temps
, 0, s
->nb_globals
);
1197 memset(dead_temps
+ s
->nb_globals
, 1, s
->nb_temps
- s
->nb_globals
);
1200 /* liveness analysis: end of basic block: globals are live, temps are
1201 dead, local temps are live. */
1202 static inline void tcg_la_bb_end(TCGContext
*s
, uint8_t *dead_temps
)
1207 memset(dead_temps
, 0, s
->nb_globals
);
1208 ts
= &s
->temps
[s
->nb_globals
];
1209 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1218 /* Liveness analysis : update the opc_dead_args array to tell if a
1219 given input arguments is dead. Instructions updating dead
1220 temporaries are removed. */
1221 static void tcg_liveness_analysis(TCGContext
*s
)
1223 int i
, op_index
, nb_args
, nb_iargs
, nb_oargs
, arg
, nb_ops
;
1226 const TCGOpDef
*def
;
1227 uint8_t *dead_temps
;
1228 unsigned int dead_args
;
1230 gen_opc_ptr
++; /* skip end */
1232 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1234 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1236 dead_temps
= tcg_malloc(s
->nb_temps
);
1237 memset(dead_temps
, 1, s
->nb_temps
);
1239 args
= gen_opparam_ptr
;
1240 op_index
= nb_ops
- 1;
1241 while (op_index
>= 0) {
1242 op
= gen_opc_buf
[op_index
];
1243 def
= &tcg_op_defs
[op
];
1251 nb_iargs
= args
[0] & 0xffff;
1252 nb_oargs
= args
[0] >> 16;
1254 call_flags
= args
[nb_oargs
+ nb_iargs
];
1256 /* pure functions can be removed if their result is not
1258 if (call_flags
& TCG_CALL_PURE
) {
1259 for(i
= 0; i
< nb_oargs
; i
++) {
1261 if (!dead_temps
[arg
])
1262 goto do_not_remove_call
;
1264 tcg_set_nop(s
, gen_opc_buf
+ op_index
,
1269 /* output args are dead */
1271 for(i
= 0; i
< nb_oargs
; i
++) {
1273 if (dead_temps
[arg
]) {
1274 dead_args
|= (1 << i
);
1276 dead_temps
[arg
] = 1;
1279 if (!(call_flags
& TCG_CALL_CONST
)) {
1280 /* globals are live (they may be used by the call) */
1281 memset(dead_temps
, 0, s
->nb_globals
);
1284 /* input args are live */
1285 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1287 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1288 if (dead_temps
[arg
]) {
1289 dead_args
|= (1 << i
);
1291 dead_temps
[arg
] = 0;
1294 s
->op_dead_args
[op_index
] = dead_args
;
1299 case INDEX_op_debug_insn_start
:
1300 args
-= def
->nb_args
;
1306 case INDEX_op_discard
:
1308 /* mark the temporary as dead */
1309 dead_temps
[args
[0]] = 1;
1313 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1315 args
-= def
->nb_args
;
1316 nb_iargs
= def
->nb_iargs
;
1317 nb_oargs
= def
->nb_oargs
;
1319 /* Test if the operation can be removed because all
1320 its outputs are dead. We assume that nb_oargs == 0
1321 implies side effects */
1322 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
1323 for(i
= 0; i
< nb_oargs
; i
++) {
1325 if (!dead_temps
[arg
])
1328 tcg_set_nop(s
, gen_opc_buf
+ op_index
, args
, def
->nb_args
);
1329 #ifdef CONFIG_PROFILER
1335 /* output args are dead */
1337 for(i
= 0; i
< nb_oargs
; i
++) {
1339 if (dead_temps
[arg
]) {
1340 dead_args
|= (1 << i
);
1342 dead_temps
[arg
] = 1;
1345 /* if end of basic block, update */
1346 if (def
->flags
& TCG_OPF_BB_END
) {
1347 tcg_la_bb_end(s
, dead_temps
);
1348 } else if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1349 /* globals are live */
1350 memset(dead_temps
, 0, s
->nb_globals
);
1353 /* input args are live */
1354 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1356 if (dead_temps
[arg
]) {
1357 dead_args
|= (1 << i
);
1359 dead_temps
[arg
] = 0;
1361 s
->op_dead_args
[op_index
] = dead_args
;
1368 if (args
!= gen_opparam_buf
)
1372 /* dummy liveness analysis */
1373 static void tcg_liveness_analysis(TCGContext
*s
)
1376 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1378 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1379 memset(s
->op_dead_args
, 0, nb_ops
* sizeof(uint16_t));
1384 static void dump_regs(TCGContext
*s
)
1390 for(i
= 0; i
< s
->nb_temps
; i
++) {
1392 printf(" %10s: ", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), i
));
1393 switch(ts
->val_type
) {
1395 printf("%s", tcg_target_reg_names
[ts
->reg
]);
1398 printf("%d(%s)", (int)ts
->mem_offset
, tcg_target_reg_names
[ts
->mem_reg
]);
1400 case TEMP_VAL_CONST
:
1401 printf("$0x%" TCG_PRIlx
, ts
->val
);
1413 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1414 if (s
->reg_to_temp
[i
] >= 0) {
1416 tcg_target_reg_names
[i
],
1417 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
1422 static void check_regs(TCGContext
*s
)
1428 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1429 k
= s
->reg_to_temp
[reg
];
1432 if (ts
->val_type
!= TEMP_VAL_REG
||
1434 printf("Inconsistency for register %s:\n",
1435 tcg_target_reg_names
[reg
]);
1440 for(k
= 0; k
< s
->nb_temps
; k
++) {
1442 if (ts
->val_type
== TEMP_VAL_REG
&&
1444 s
->reg_to_temp
[ts
->reg
] != k
) {
1445 printf("Inconsistency for temp %s:\n",
1446 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), k
));
1448 printf("reg state:\n");
1456 static void temp_allocate_frame(TCGContext
*s
, int temp
)
1459 ts
= &s
->temps
[temp
];
1460 #ifndef __sparc_v9__ /* Sparc64 stack is accessed with offset of 2047 */
1461 s
->current_frame_offset
= (s
->current_frame_offset
+
1462 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
1463 ~(sizeof(tcg_target_long
) - 1);
1465 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
1469 ts
->mem_offset
= s
->current_frame_offset
;
1470 ts
->mem_reg
= s
->frame_reg
;
1471 ts
->mem_allocated
= 1;
1472 s
->current_frame_offset
+= (tcg_target_long
)sizeof(tcg_target_long
);
1475 /* free register 'reg' by spilling the corresponding temporary if necessary */
1476 static void tcg_reg_free(TCGContext
*s
, int reg
)
1481 temp
= s
->reg_to_temp
[reg
];
1483 ts
= &s
->temps
[temp
];
1484 assert(ts
->val_type
== TEMP_VAL_REG
);
1485 if (!ts
->mem_coherent
) {
1486 if (!ts
->mem_allocated
)
1487 temp_allocate_frame(s
, temp
);
1488 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1490 ts
->val_type
= TEMP_VAL_MEM
;
1491 s
->reg_to_temp
[reg
] = -1;
1495 /* Allocate a register belonging to reg1 & ~reg2 */
1496 static int tcg_reg_alloc(TCGContext
*s
, TCGRegSet reg1
, TCGRegSet reg2
)
1501 tcg_regset_andnot(reg_ct
, reg1
, reg2
);
1503 /* first try free registers */
1504 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1505 reg
= tcg_target_reg_alloc_order
[i
];
1506 if (tcg_regset_test_reg(reg_ct
, reg
) && s
->reg_to_temp
[reg
] == -1)
1510 /* XXX: do better spill choice */
1511 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1512 reg
= tcg_target_reg_alloc_order
[i
];
1513 if (tcg_regset_test_reg(reg_ct
, reg
)) {
1514 tcg_reg_free(s
, reg
);
1522 /* save a temporary to memory. 'allocated_regs' is used in case a
1523 temporary registers needs to be allocated to store a constant. */
1524 static void temp_save(TCGContext
*s
, int temp
, TCGRegSet allocated_regs
)
1529 ts
= &s
->temps
[temp
];
1530 if (!ts
->fixed_reg
) {
1531 switch(ts
->val_type
) {
1533 tcg_reg_free(s
, ts
->reg
);
1536 ts
->val_type
= TEMP_VAL_MEM
;
1538 case TEMP_VAL_CONST
:
1539 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1541 if (!ts
->mem_allocated
)
1542 temp_allocate_frame(s
, temp
);
1543 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1544 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1545 ts
->val_type
= TEMP_VAL_MEM
;
1555 /* save globals to their canonical location and assume they can be
1556 modified be the following code. 'allocated_regs' is used in case a
1557 temporary registers needs to be allocated to store a constant. */
1558 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
1562 for(i
= 0; i
< s
->nb_globals
; i
++) {
1563 temp_save(s
, i
, allocated_regs
);
1567 /* at the end of a basic block, we assume all temporaries are dead and
1568 all globals are stored at their canonical location. */
1569 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
1574 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1576 if (ts
->temp_local
) {
1577 temp_save(s
, i
, allocated_regs
);
1579 if (ts
->val_type
== TEMP_VAL_REG
) {
1580 s
->reg_to_temp
[ts
->reg
] = -1;
1582 ts
->val_type
= TEMP_VAL_DEAD
;
1586 save_globals(s
, allocated_regs
);
1589 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1591 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGArg
*args
)
1594 tcg_target_ulong val
;
1596 ots
= &s
->temps
[args
[0]];
1599 if (ots
->fixed_reg
) {
1600 /* for fixed registers, we do not do any constant
1602 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
1604 /* The movi is not explicitly generated here */
1605 if (ots
->val_type
== TEMP_VAL_REG
)
1606 s
->reg_to_temp
[ots
->reg
] = -1;
1607 ots
->val_type
= TEMP_VAL_CONST
;
1612 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOpDef
*def
,
1614 unsigned int dead_args
)
1618 const TCGArgConstraint
*arg_ct
;
1620 ots
= &s
->temps
[args
[0]];
1621 ts
= &s
->temps
[args
[1]];
1622 arg_ct
= &def
->args_ct
[0];
1624 /* XXX: always mark arg dead if IS_DEAD_ARG(1) */
1625 if (ts
->val_type
== TEMP_VAL_REG
) {
1626 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
1627 /* the mov can be suppressed */
1628 if (ots
->val_type
== TEMP_VAL_REG
)
1629 s
->reg_to_temp
[ots
->reg
] = -1;
1631 s
->reg_to_temp
[reg
] = -1;
1632 ts
->val_type
= TEMP_VAL_DEAD
;
1634 if (ots
->val_type
== TEMP_VAL_REG
) {
1637 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1639 if (ts
->reg
!= reg
) {
1640 tcg_out_mov(s
, ots
->type
, reg
, ts
->reg
);
1643 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1644 if (ots
->val_type
== TEMP_VAL_REG
) {
1647 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1649 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1650 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1651 if (ots
->fixed_reg
) {
1653 tcg_out_movi(s
, ots
->type
, reg
, ts
->val
);
1655 /* propagate constant */
1656 if (ots
->val_type
== TEMP_VAL_REG
)
1657 s
->reg_to_temp
[ots
->reg
] = -1;
1658 ots
->val_type
= TEMP_VAL_CONST
;
1665 s
->reg_to_temp
[reg
] = args
[0];
1667 ots
->val_type
= TEMP_VAL_REG
;
1668 ots
->mem_coherent
= 0;
1671 static void tcg_reg_alloc_op(TCGContext
*s
,
1672 const TCGOpDef
*def
, TCGOpcode opc
,
1674 unsigned int dead_args
)
1676 TCGRegSet allocated_regs
;
1677 int i
, k
, nb_iargs
, nb_oargs
, reg
;
1679 const TCGArgConstraint
*arg_ct
;
1681 TCGArg new_args
[TCG_MAX_OP_ARGS
];
1682 int const_args
[TCG_MAX_OP_ARGS
];
1684 nb_oargs
= def
->nb_oargs
;
1685 nb_iargs
= def
->nb_iargs
;
1687 /* copy constants */
1688 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
1689 args
+ nb_oargs
+ nb_iargs
,
1690 sizeof(TCGArg
) * def
->nb_cargs
);
1692 /* satisfy input constraints */
1693 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1694 for(k
= 0; k
< nb_iargs
; k
++) {
1695 i
= def
->sorted_args
[nb_oargs
+ k
];
1697 arg_ct
= &def
->args_ct
[i
];
1698 ts
= &s
->temps
[arg
];
1699 if (ts
->val_type
== TEMP_VAL_MEM
) {
1700 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1701 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1702 ts
->val_type
= TEMP_VAL_REG
;
1704 ts
->mem_coherent
= 1;
1705 s
->reg_to_temp
[reg
] = arg
;
1706 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1707 if (tcg_target_const_match(ts
->val
, arg_ct
)) {
1708 /* constant is OK for instruction */
1710 new_args
[i
] = ts
->val
;
1713 /* need to move to a register */
1714 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1715 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1716 ts
->val_type
= TEMP_VAL_REG
;
1718 ts
->mem_coherent
= 0;
1719 s
->reg_to_temp
[reg
] = arg
;
1722 assert(ts
->val_type
== TEMP_VAL_REG
);
1723 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
1724 if (ts
->fixed_reg
) {
1725 /* if fixed register, we must allocate a new register
1726 if the alias is not the same register */
1727 if (arg
!= args
[arg_ct
->alias_index
])
1728 goto allocate_in_reg
;
1730 /* if the input is aliased to an output and if it is
1731 not dead after the instruction, we must allocate
1732 a new register and move it */
1733 if (!IS_DEAD_ARG(i
)) {
1734 goto allocate_in_reg
;
1739 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1740 /* nothing to do : the constraint is satisfied */
1743 /* allocate a new register matching the constraint
1744 and move the temporary register into it */
1745 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1746 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1750 tcg_regset_set_reg(allocated_regs
, reg
);
1754 if (def
->flags
& TCG_OPF_BB_END
) {
1755 tcg_reg_alloc_bb_end(s
, allocated_regs
);
1757 /* mark dead temporaries and free the associated registers */
1758 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1760 if (IS_DEAD_ARG(i
)) {
1761 ts
= &s
->temps
[arg
];
1762 if (!ts
->fixed_reg
) {
1763 if (ts
->val_type
== TEMP_VAL_REG
)
1764 s
->reg_to_temp
[ts
->reg
] = -1;
1765 ts
->val_type
= TEMP_VAL_DEAD
;
1770 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1771 /* XXX: permit generic clobber register list ? */
1772 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1773 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1774 tcg_reg_free(s
, reg
);
1777 /* XXX: for load/store we could do that only for the slow path
1778 (i.e. when a memory callback is called) */
1780 /* store globals and free associated registers (we assume the insn
1781 can modify any global. */
1782 save_globals(s
, allocated_regs
);
1785 /* satisfy the output constraints */
1786 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1787 for(k
= 0; k
< nb_oargs
; k
++) {
1788 i
= def
->sorted_args
[k
];
1790 arg_ct
= &def
->args_ct
[i
];
1791 ts
= &s
->temps
[arg
];
1792 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1793 reg
= new_args
[arg_ct
->alias_index
];
1795 /* if fixed register, we try to use it */
1797 if (ts
->fixed_reg
&&
1798 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1801 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1803 tcg_regset_set_reg(allocated_regs
, reg
);
1804 /* if a fixed register is used, then a move will be done afterwards */
1805 if (!ts
->fixed_reg
) {
1806 if (ts
->val_type
== TEMP_VAL_REG
)
1807 s
->reg_to_temp
[ts
->reg
] = -1;
1808 if (IS_DEAD_ARG(i
)) {
1809 ts
->val_type
= TEMP_VAL_DEAD
;
1811 ts
->val_type
= TEMP_VAL_REG
;
1813 /* temp value is modified, so the value kept in memory is
1814 potentially not the same */
1815 ts
->mem_coherent
= 0;
1816 s
->reg_to_temp
[reg
] = arg
;
1824 /* emit instruction */
1825 tcg_out_op(s
, opc
, new_args
, const_args
);
1827 /* move the outputs in the correct register if needed */
1828 for(i
= 0; i
< nb_oargs
; i
++) {
1829 ts
= &s
->temps
[args
[i
]];
1831 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
1832 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
1837 #ifdef TCG_TARGET_STACK_GROWSUP
1838 #define STACK_DIR(x) (-(x))
1840 #define STACK_DIR(x) (x)
1843 static int tcg_reg_alloc_call(TCGContext
*s
, const TCGOpDef
*def
,
1844 TCGOpcode opc
, const TCGArg
*args
,
1845 unsigned int dead_args
)
1847 int nb_iargs
, nb_oargs
, flags
, nb_regs
, i
, reg
, nb_params
;
1848 TCGArg arg
, func_arg
;
1850 tcg_target_long stack_offset
, call_stack_size
, func_addr
;
1851 int const_func_arg
, allocate_args
;
1852 TCGRegSet allocated_regs
;
1853 const TCGArgConstraint
*arg_ct
;
1857 nb_oargs
= arg
>> 16;
1858 nb_iargs
= arg
& 0xffff;
1859 nb_params
= nb_iargs
- 1;
1861 flags
= args
[nb_oargs
+ nb_iargs
];
1863 nb_regs
= tcg_target_get_call_iarg_regs_count(flags
);
1864 if (nb_regs
> nb_params
)
1865 nb_regs
= nb_params
;
1867 /* assign stack slots first */
1868 call_stack_size
= (nb_params
- nb_regs
) * sizeof(tcg_target_long
);
1869 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1870 ~(TCG_TARGET_STACK_ALIGN
- 1);
1871 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
1872 if (allocate_args
) {
1873 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1874 preallocate call stack */
1878 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
1879 for(i
= nb_regs
; i
< nb_params
; i
++) {
1880 arg
= args
[nb_oargs
+ i
];
1881 #ifdef TCG_TARGET_STACK_GROWSUP
1882 stack_offset
-= sizeof(tcg_target_long
);
1884 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1885 ts
= &s
->temps
[arg
];
1886 if (ts
->val_type
== TEMP_VAL_REG
) {
1887 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
1888 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1889 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1891 /* XXX: not correct if reading values from the stack */
1892 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1893 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1894 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1895 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1897 /* XXX: sign extend may be needed on some targets */
1898 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1899 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1904 #ifndef TCG_TARGET_STACK_GROWSUP
1905 stack_offset
+= sizeof(tcg_target_long
);
1909 /* assign input registers */
1910 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1911 for(i
= 0; i
< nb_regs
; i
++) {
1912 arg
= args
[nb_oargs
+ i
];
1913 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1914 ts
= &s
->temps
[arg
];
1915 reg
= tcg_target_call_iarg_regs
[i
];
1916 tcg_reg_free(s
, reg
);
1917 if (ts
->val_type
== TEMP_VAL_REG
) {
1918 if (ts
->reg
!= reg
) {
1919 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1921 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1922 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1923 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1924 /* XXX: sign extend ? */
1925 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1929 tcg_regset_set_reg(allocated_regs
, reg
);
1933 /* assign function address */
1934 func_arg
= args
[nb_oargs
+ nb_iargs
- 1];
1935 arg_ct
= &def
->args_ct
[0];
1936 ts
= &s
->temps
[func_arg
];
1937 func_addr
= ts
->val
;
1939 if (ts
->val_type
== TEMP_VAL_MEM
) {
1940 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1941 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1943 tcg_regset_set_reg(allocated_regs
, reg
);
1944 } else if (ts
->val_type
== TEMP_VAL_REG
) {
1946 if (!tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1947 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1948 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1951 tcg_regset_set_reg(allocated_regs
, reg
);
1952 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1953 if (tcg_target_const_match(func_addr
, arg_ct
)) {
1955 func_arg
= func_addr
;
1957 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1958 tcg_out_movi(s
, ts
->type
, reg
, func_addr
);
1960 tcg_regset_set_reg(allocated_regs
, reg
);
1967 /* mark dead temporaries and free the associated registers */
1968 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1970 if (IS_DEAD_ARG(i
)) {
1971 ts
= &s
->temps
[arg
];
1972 if (!ts
->fixed_reg
) {
1973 if (ts
->val_type
== TEMP_VAL_REG
)
1974 s
->reg_to_temp
[ts
->reg
] = -1;
1975 ts
->val_type
= TEMP_VAL_DEAD
;
1980 /* clobber call registers */
1981 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1982 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1983 tcg_reg_free(s
, reg
);
1987 /* store globals and free associated registers (we assume the call
1988 can modify any global. */
1989 if (!(flags
& TCG_CALL_CONST
)) {
1990 save_globals(s
, allocated_regs
);
1993 tcg_out_op(s
, opc
, &func_arg
, &const_func_arg
);
1995 /* assign output registers and emit moves if needed */
1996 for(i
= 0; i
< nb_oargs
; i
++) {
1998 ts
= &s
->temps
[arg
];
1999 reg
= tcg_target_call_oarg_regs
[i
];
2000 assert(s
->reg_to_temp
[reg
] == -1);
2001 if (ts
->fixed_reg
) {
2002 if (ts
->reg
!= reg
) {
2003 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
2006 if (ts
->val_type
== TEMP_VAL_REG
)
2007 s
->reg_to_temp
[ts
->reg
] = -1;
2008 if (IS_DEAD_ARG(i
)) {
2009 ts
->val_type
= TEMP_VAL_DEAD
;
2011 ts
->val_type
= TEMP_VAL_REG
;
2013 ts
->mem_coherent
= 0;
2014 s
->reg_to_temp
[reg
] = arg
;
2019 return nb_iargs
+ nb_oargs
+ def
->nb_cargs
+ 1;
2022 #ifdef CONFIG_PROFILER
2024 static int64_t tcg_table_op_count
[NB_OPS
];
2026 static void dump_op_count(void)
2030 f
= fopen("/tmp/op.log", "w");
2031 for(i
= INDEX_op_end
; i
< NB_OPS
; i
++) {
2032 fprintf(f
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
, tcg_table_op_count
[i
]);
2039 static inline int tcg_gen_code_common(TCGContext
*s
, uint8_t *gen_code_buf
,
2044 const TCGOpDef
*def
;
2045 unsigned int dead_args
;
2049 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
2056 #ifdef CONFIG_PROFILER
2057 s
->opt_time
-= profile_getclock();
2060 #ifdef USE_TCG_OPTIMIZATIONS
2062 tcg_optimize(s
, gen_opc_ptr
, gen_opparam_buf
, tcg_op_defs
);
2065 #ifdef CONFIG_PROFILER
2066 s
->opt_time
+= profile_getclock();
2067 s
->la_time
-= profile_getclock();
2070 tcg_liveness_analysis(s
);
2072 #ifdef CONFIG_PROFILER
2073 s
->la_time
+= profile_getclock();
2077 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
))) {
2078 qemu_log("OP after optimization and liveness analysis:\n");
2084 tcg_reg_alloc_start(s
);
2086 s
->code_buf
= gen_code_buf
;
2087 s
->code_ptr
= gen_code_buf
;
2089 args
= gen_opparam_buf
;
2093 opc
= gen_opc_buf
[op_index
];
2094 #ifdef CONFIG_PROFILER
2095 tcg_table_op_count
[opc
]++;
2097 def
= &tcg_op_defs
[opc
];
2099 printf("%s: %d %d %d\n", def
->name
,
2100 def
->nb_oargs
, def
->nb_iargs
, def
->nb_cargs
);
2104 case INDEX_op_mov_i32
:
2105 #if TCG_TARGET_REG_BITS == 64
2106 case INDEX_op_mov_i64
:
2108 dead_args
= s
->op_dead_args
[op_index
];
2109 tcg_reg_alloc_mov(s
, def
, args
, dead_args
);
2111 case INDEX_op_movi_i32
:
2112 #if TCG_TARGET_REG_BITS == 64
2113 case INDEX_op_movi_i64
:
2115 tcg_reg_alloc_movi(s
, args
);
2117 case INDEX_op_debug_insn_start
:
2118 /* debug instruction */
2128 case INDEX_op_discard
:
2131 ts
= &s
->temps
[args
[0]];
2132 /* mark the temporary as dead */
2133 if (!ts
->fixed_reg
) {
2134 if (ts
->val_type
== TEMP_VAL_REG
)
2135 s
->reg_to_temp
[ts
->reg
] = -1;
2136 ts
->val_type
= TEMP_VAL_DEAD
;
2140 case INDEX_op_set_label
:
2141 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
2142 tcg_out_label(s
, args
[0], s
->code_ptr
);
2145 dead_args
= s
->op_dead_args
[op_index
];
2146 args
+= tcg_reg_alloc_call(s
, def
, opc
, args
, dead_args
);
2151 /* Sanity check that we've not introduced any unhandled opcodes. */
2152 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2155 /* Note: in order to speed up the code, it would be much
2156 faster to have specialized register allocator functions for
2157 some common argument patterns */
2158 dead_args
= s
->op_dead_args
[op_index
];
2159 tcg_reg_alloc_op(s
, def
, opc
, args
, dead_args
);
2162 args
+= def
->nb_args
;
2164 if (search_pc
>= 0 && search_pc
< s
->code_ptr
- gen_code_buf
) {
2176 int tcg_gen_code(TCGContext
*s
, uint8_t *gen_code_buf
)
2178 #ifdef CONFIG_PROFILER
2181 n
= (gen_opc_ptr
- gen_opc_buf
);
2183 if (n
> s
->op_count_max
)
2184 s
->op_count_max
= n
;
2186 s
->temp_count
+= s
->nb_temps
;
2187 if (s
->nb_temps
> s
->temp_count_max
)
2188 s
->temp_count_max
= s
->nb_temps
;
2192 tcg_gen_code_common(s
, gen_code_buf
, -1);
2194 /* flush instruction cache */
2195 flush_icache_range((tcg_target_ulong
)gen_code_buf
,
2196 (tcg_target_ulong
)s
->code_ptr
);
2198 return s
->code_ptr
- gen_code_buf
;
2201 /* Return the index of the micro operation such as the pc after is <
2202 offset bytes from the start of the TB. The contents of gen_code_buf must
2203 not be changed, though writing the same values is ok.
2204 Return -1 if not found. */
2205 int tcg_gen_code_search_pc(TCGContext
*s
, uint8_t *gen_code_buf
, long offset
)
2207 return tcg_gen_code_common(s
, gen_code_buf
, offset
);
2210 #ifdef CONFIG_PROFILER
2211 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2213 TCGContext
*s
= &tcg_ctx
;
2216 tot
= s
->interm_time
+ s
->code_time
;
2217 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2219 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2221 s
->tb_count1
- s
->tb_count
,
2222 s
->tb_count1
? (double)(s
->tb_count1
- s
->tb_count
) / s
->tb_count1
* 100.0 : 0);
2223 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2224 s
->tb_count
? (double)s
->op_count
/ s
->tb_count
: 0, s
->op_count_max
);
2225 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2227 (double)s
->del_op_count
/ s
->tb_count
: 0);
2228 cpu_fprintf(f
, "avg temps/TB %0.2f max=%d\n",
2230 (double)s
->temp_count
/ s
->tb_count
: 0,
2233 cpu_fprintf(f
, "cycles/op %0.1f\n",
2234 s
->op_count
? (double)tot
/ s
->op_count
: 0);
2235 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2236 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
2237 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2238 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
2241 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2242 (double)s
->interm_time
/ tot
* 100.0);
2243 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
2244 (double)s
->code_time
/ tot
* 100.0);
2245 cpu_fprintf(f
, "optim./code time %0.1f%%\n",
2246 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
2248 cpu_fprintf(f
, "liveness/code time %0.1f%%\n",
2249 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
2250 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
2252 cpu_fprintf(f
, " avg cycles %0.1f\n",
2253 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
2258 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2260 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
2264 #ifdef ELF_HOST_MACHINE
2265 /* In order to use this feature, the backend needs to do three things:
2267 (1) Define ELF_HOST_MACHINE to indicate both what value to
2268 put into the ELF image and to indicate support for the feature.
2270 (2) Define tcg_register_jit. This should create a buffer containing
2271 the contents of a .debug_frame section that describes the post-
2272 prologue unwind info for the tcg machine.
2274 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2277 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2284 struct jit_code_entry
{
2285 struct jit_code_entry
*next_entry
;
2286 struct jit_code_entry
*prev_entry
;
2287 const void *symfile_addr
;
2288 uint64_t symfile_size
;
2291 struct jit_descriptor
{
2293 uint32_t action_flag
;
2294 struct jit_code_entry
*relevant_entry
;
2295 struct jit_code_entry
*first_entry
;
2298 void __jit_debug_register_code(void) __attribute__((noinline
));
2299 void __jit_debug_register_code(void)
2304 /* Must statically initialize the version, because GDB may check
2305 the version before we can set it. */
2306 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
2308 /* End GDB interface. */
2310 static int find_string(const char *strtab
, const char *str
)
2312 const char *p
= strtab
+ 1;
2315 if (strcmp(p
, str
) == 0) {
2322 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
2323 void *debug_frame
, size_t debug_frame_size
)
2325 struct __attribute__((packed
)) DebugInfo
{
2332 uintptr_t cu_low_pc
;
2333 uintptr_t cu_high_pc
;
2336 uintptr_t fn_low_pc
;
2337 uintptr_t fn_high_pc
;
2346 struct DebugInfo di
;
2351 struct ElfImage
*img
;
2353 static const struct ElfImage img_template
= {
2355 .e_ident
[EI_MAG0
] = ELFMAG0
,
2356 .e_ident
[EI_MAG1
] = ELFMAG1
,
2357 .e_ident
[EI_MAG2
] = ELFMAG2
,
2358 .e_ident
[EI_MAG3
] = ELFMAG3
,
2359 .e_ident
[EI_CLASS
] = ELF_CLASS
,
2360 .e_ident
[EI_DATA
] = ELF_DATA
,
2361 .e_ident
[EI_VERSION
] = EV_CURRENT
,
2363 .e_machine
= ELF_HOST_MACHINE
,
2364 .e_version
= EV_CURRENT
,
2365 .e_phoff
= offsetof(struct ElfImage
, phdr
),
2366 .e_shoff
= offsetof(struct ElfImage
, shdr
),
2367 .e_ehsize
= sizeof(ElfW(Shdr
)),
2368 .e_phentsize
= sizeof(ElfW(Phdr
)),
2370 .e_shentsize
= sizeof(ElfW(Shdr
)),
2371 .e_shnum
= ARRAY_SIZE(img
->shdr
),
2372 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
2373 #ifdef ELF_HOST_FLAGS
2374 .e_flags
= ELF_HOST_FLAGS
,
2377 .e_ident
[EI_OSABI
] = ELF_OSABI
,
2385 [0] = { .sh_type
= SHT_NULL
},
2386 /* Trick: The contents of code_gen_buffer are not present in
2387 this fake ELF file; that got allocated elsewhere. Therefore
2388 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2389 will not look for contents. We can record any address. */
2391 .sh_type
= SHT_NOBITS
,
2392 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
2394 [2] = { /* .debug_info */
2395 .sh_type
= SHT_PROGBITS
,
2396 .sh_offset
= offsetof(struct ElfImage
, di
),
2397 .sh_size
= sizeof(struct DebugInfo
),
2399 [3] = { /* .debug_abbrev */
2400 .sh_type
= SHT_PROGBITS
,
2401 .sh_offset
= offsetof(struct ElfImage
, da
),
2402 .sh_size
= sizeof(img
->da
),
2404 [4] = { /* .debug_frame */
2405 .sh_type
= SHT_PROGBITS
,
2406 .sh_offset
= sizeof(struct ElfImage
),
2408 [5] = { /* .symtab */
2409 .sh_type
= SHT_SYMTAB
,
2410 .sh_offset
= offsetof(struct ElfImage
, sym
),
2411 .sh_size
= sizeof(img
->sym
),
2413 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
2414 .sh_entsize
= sizeof(ElfW(Sym
)),
2416 [6] = { /* .strtab */
2417 .sh_type
= SHT_STRTAB
,
2418 .sh_offset
= offsetof(struct ElfImage
, str
),
2419 .sh_size
= sizeof(img
->str
),
2423 [1] = { /* code_gen_buffer */
2424 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
2429 .len
= sizeof(struct DebugInfo
) - 4,
2431 .ptr_size
= sizeof(void *),
2433 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
2435 .fn_name
= "code_gen_buffer"
2438 1, /* abbrev number (the cu) */
2439 0x11, 1, /* DW_TAG_compile_unit, has children */
2440 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2441 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2442 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2443 0, 0, /* end of abbrev */
2444 2, /* abbrev number (the fn) */
2445 0x2e, 0, /* DW_TAG_subprogram, no children */
2446 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2447 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2448 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2449 0, 0, /* end of abbrev */
2450 0 /* no more abbrev */
2452 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2453 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2456 /* We only need a single jit entry; statically allocate it. */
2457 static struct jit_code_entry one_entry
;
2459 uintptr_t buf
= (uintptr_t)buf_ptr
;
2460 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
2462 img
= g_malloc(img_size
);
2463 *img
= img_template
;
2464 memcpy(img
+ 1, debug_frame
, debug_frame_size
);
2466 img
->phdr
.p_vaddr
= buf
;
2467 img
->phdr
.p_paddr
= buf
;
2468 img
->phdr
.p_memsz
= buf_size
;
2470 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
2471 img
->shdr
[1].sh_addr
= buf
;
2472 img
->shdr
[1].sh_size
= buf_size
;
2474 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
2475 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
2477 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
2478 img
->shdr
[4].sh_size
= debug_frame_size
;
2480 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
2481 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
2483 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
2484 img
->sym
[1].st_value
= buf
;
2485 img
->sym
[1].st_size
= buf_size
;
2487 img
->di
.cu_low_pc
= buf
;
2488 img
->di
.cu_high_pc
= buf_size
;
2489 img
->di
.fn_low_pc
= buf
;
2490 img
->di
.fn_high_pc
= buf_size
;
2493 /* Enable this block to be able to debug the ELF image file creation.
2494 One can use readelf, objdump, or other inspection utilities. */
2496 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
2498 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
2499 /* Avoid stupid unused return value warning for fwrite. */
2506 one_entry
.symfile_addr
= img
;
2507 one_entry
.symfile_size
= img_size
;
2509 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
2510 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
2511 __jit_debug_descriptor
.first_entry
= &one_entry
;
2512 __jit_debug_register_code();
2515 /* No support for the feature. Provide the entry point expected by exec.c,
2516 and implement the internal function we declared earlier. */
2518 static void tcg_register_jit_int(void *buf
, size_t size
,
2519 void *debug_frame
, size_t debug_frame_size
)
2523 void tcg_register_jit(void *buf
, size_t buf_size
)
2526 #endif /* ELF_HOST_MACHINE */