2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
31 /* Define to jump the ELF file used to communicate with GDB. */
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
47 #define NO_CPU_IO_DEFS
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
55 # define ELF_CLASS ELFCLASS32
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
60 # define ELF_DATA ELFDATA2LSB
65 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
66 #error GUEST_BASE not supported on this host.
69 /* Forward declarations for functions declared in tcg-target.c and used here. */
70 static void tcg_target_init(TCGContext
*s
);
71 static void tcg_target_qemu_prologue(TCGContext
*s
);
72 static void patch_reloc(uint8_t *code_ptr
, int type
,
73 tcg_target_long value
, tcg_target_long addend
);
75 static void tcg_register_jit_int(void *buf
, size_t size
,
76 void *debug_frame
, size_t debug_frame_size
)
77 __attribute__((unused
));
79 /* Forward declarations for functions declared and used in tcg-target.c. */
80 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
);
81 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
82 tcg_target_long arg2
);
83 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
84 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
85 TCGReg ret
, tcg_target_long arg
);
86 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
87 const int *const_args
);
88 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
89 tcg_target_long arg2
);
90 static int tcg_target_const_match(tcg_target_long val
,
91 const TCGArgConstraint
*arg_ct
);
93 TCGOpDef tcg_op_defs
[] = {
94 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
98 const size_t tcg_op_defs_max
= ARRAY_SIZE(tcg_op_defs
);
100 static TCGRegSet tcg_target_available_regs
[2];
101 static TCGRegSet tcg_target_call_clobber_regs
;
103 /* XXX: move that inside the context */
104 uint16_t *gen_opc_ptr
;
105 TCGArg
*gen_opparam_ptr
;
107 static inline void tcg_out8(TCGContext
*s
, uint8_t v
)
112 static inline void tcg_out16(TCGContext
*s
, uint16_t v
)
114 *(uint16_t *)s
->code_ptr
= v
;
118 static inline void tcg_out32(TCGContext
*s
, uint32_t v
)
120 *(uint32_t *)s
->code_ptr
= v
;
124 /* label relocation processing */
126 static void tcg_out_reloc(TCGContext
*s
, uint8_t *code_ptr
, int type
,
127 int label_index
, long addend
)
132 l
= &s
->labels
[label_index
];
134 /* FIXME: This may break relocations on RISC targets that
135 modify instruction fields in place. The caller may not have
136 written the initial value. */
137 patch_reloc(code_ptr
, type
, l
->u
.value
, addend
);
139 /* add a new relocation entry */
140 r
= tcg_malloc(sizeof(TCGRelocation
));
144 r
->next
= l
->u
.first_reloc
;
145 l
->u
.first_reloc
= r
;
149 static void tcg_out_label(TCGContext
*s
, int label_index
, void *ptr
)
153 tcg_target_long value
= (tcg_target_long
)ptr
;
155 l
= &s
->labels
[label_index
];
158 r
= l
->u
.first_reloc
;
160 patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
);
167 int gen_new_label(void)
169 TCGContext
*s
= &tcg_ctx
;
173 if (s
->nb_labels
>= TCG_MAX_LABELS
)
175 idx
= s
->nb_labels
++;
178 l
->u
.first_reloc
= NULL
;
182 #include "tcg-target.c"
184 /* pool based memory allocation */
185 void *tcg_malloc_internal(TCGContext
*s
, int size
)
190 if (size
> TCG_POOL_CHUNK_SIZE
) {
191 /* big malloc: insert a new pool (XXX: could optimize) */
192 p
= g_malloc(sizeof(TCGPool
) + size
);
194 p
->next
= s
->pool_first_large
;
195 s
->pool_first_large
= p
;
206 pool_size
= TCG_POOL_CHUNK_SIZE
;
207 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
211 s
->pool_current
->next
= p
;
220 s
->pool_cur
= p
->data
+ size
;
221 s
->pool_end
= p
->data
+ p
->size
;
225 void tcg_pool_reset(TCGContext
*s
)
228 for (p
= s
->pool_first_large
; p
; p
= t
) {
232 s
->pool_first_large
= NULL
;
233 s
->pool_cur
= s
->pool_end
= NULL
;
234 s
->pool_current
= NULL
;
237 void tcg_context_init(TCGContext
*s
)
239 int op
, total_args
, n
;
241 TCGArgConstraint
*args_ct
;
244 memset(s
, 0, sizeof(*s
));
245 s
->temps
= s
->static_temps
;
248 /* Count total number of arguments and allocate the corresponding
251 for(op
= 0; op
< NB_OPS
; op
++) {
252 def
= &tcg_op_defs
[op
];
253 n
= def
->nb_iargs
+ def
->nb_oargs
;
257 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
258 sorted_args
= g_malloc(sizeof(int) * total_args
);
260 for(op
= 0; op
< NB_OPS
; op
++) {
261 def
= &tcg_op_defs
[op
];
262 def
->args_ct
= args_ct
;
263 def
->sorted_args
= sorted_args
;
264 n
= def
->nb_iargs
+ def
->nb_oargs
;
272 void tcg_prologue_init(TCGContext
*s
)
274 /* init global prologue and epilogue */
275 s
->code_buf
= code_gen_prologue
;
276 s
->code_ptr
= s
->code_buf
;
277 tcg_target_qemu_prologue(s
);
278 flush_icache_range((tcg_target_ulong
)s
->code_buf
,
279 (tcg_target_ulong
)s
->code_ptr
);
282 void tcg_set_frame(TCGContext
*s
, int reg
,
283 tcg_target_long start
, tcg_target_long size
)
285 s
->frame_start
= start
;
286 s
->frame_end
= start
+ size
;
290 void tcg_func_start(TCGContext
*s
)
294 s
->nb_temps
= s
->nb_globals
;
295 for(i
= 0; i
< (TCG_TYPE_COUNT
* 2); i
++)
296 s
->first_free_temp
[i
] = -1;
297 s
->labels
= tcg_malloc(sizeof(TCGLabel
) * TCG_MAX_LABELS
);
299 s
->current_frame_offset
= s
->frame_start
;
301 #ifdef CONFIG_DEBUG_TCG
302 s
->goto_tb_issue_mask
= 0;
305 gen_opc_ptr
= gen_opc_buf
;
306 gen_opparam_ptr
= gen_opparam_buf
;
309 static inline void tcg_temp_alloc(TCGContext
*s
, int n
)
311 if (n
> TCG_MAX_TEMPS
)
315 static inline int tcg_global_reg_new_internal(TCGType type
, int reg
,
318 TCGContext
*s
= &tcg_ctx
;
322 #if TCG_TARGET_REG_BITS == 32
323 if (type
!= TCG_TYPE_I32
)
326 if (tcg_regset_test_reg(s
->reserved_regs
, reg
))
329 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
330 ts
= &s
->temps
[s
->nb_globals
];
331 ts
->base_type
= type
;
337 tcg_regset_set_reg(s
->reserved_regs
, reg
);
341 TCGv_i32
tcg_global_reg_new_i32(int reg
, const char *name
)
345 idx
= tcg_global_reg_new_internal(TCG_TYPE_I32
, reg
, name
);
346 return MAKE_TCGV_I32(idx
);
349 TCGv_i64
tcg_global_reg_new_i64(int reg
, const char *name
)
353 idx
= tcg_global_reg_new_internal(TCG_TYPE_I64
, reg
, name
);
354 return MAKE_TCGV_I64(idx
);
357 static inline int tcg_global_mem_new_internal(TCGType type
, int reg
,
358 tcg_target_long offset
,
361 TCGContext
*s
= &tcg_ctx
;
366 #if TCG_TARGET_REG_BITS == 32
367 if (type
== TCG_TYPE_I64
) {
369 tcg_temp_alloc(s
, s
->nb_globals
+ 2);
370 ts
= &s
->temps
[s
->nb_globals
];
371 ts
->base_type
= type
;
372 ts
->type
= TCG_TYPE_I32
;
374 ts
->mem_allocated
= 1;
376 #ifdef TCG_TARGET_WORDS_BIGENDIAN
377 ts
->mem_offset
= offset
+ 4;
379 ts
->mem_offset
= offset
;
381 pstrcpy(buf
, sizeof(buf
), name
);
382 pstrcat(buf
, sizeof(buf
), "_0");
383 ts
->name
= strdup(buf
);
386 ts
->base_type
= type
;
387 ts
->type
= TCG_TYPE_I32
;
389 ts
->mem_allocated
= 1;
391 #ifdef TCG_TARGET_WORDS_BIGENDIAN
392 ts
->mem_offset
= offset
;
394 ts
->mem_offset
= offset
+ 4;
396 pstrcpy(buf
, sizeof(buf
), name
);
397 pstrcat(buf
, sizeof(buf
), "_1");
398 ts
->name
= strdup(buf
);
404 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
405 ts
= &s
->temps
[s
->nb_globals
];
406 ts
->base_type
= type
;
409 ts
->mem_allocated
= 1;
411 ts
->mem_offset
= offset
;
418 TCGv_i32
tcg_global_mem_new_i32(int reg
, tcg_target_long offset
,
423 idx
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
424 return MAKE_TCGV_I32(idx
);
427 TCGv_i64
tcg_global_mem_new_i64(int reg
, tcg_target_long offset
,
432 idx
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
433 return MAKE_TCGV_I64(idx
);
436 static inline int tcg_temp_new_internal(TCGType type
, int temp_local
)
438 TCGContext
*s
= &tcg_ctx
;
445 idx
= s
->first_free_temp
[k
];
447 /* There is already an available temp with the
450 s
->first_free_temp
[k
] = ts
->next_free_temp
;
451 ts
->temp_allocated
= 1;
452 assert(ts
->temp_local
== temp_local
);
455 #if TCG_TARGET_REG_BITS == 32
456 if (type
== TCG_TYPE_I64
) {
457 tcg_temp_alloc(s
, s
->nb_temps
+ 2);
458 ts
= &s
->temps
[s
->nb_temps
];
459 ts
->base_type
= type
;
460 ts
->type
= TCG_TYPE_I32
;
461 ts
->temp_allocated
= 1;
462 ts
->temp_local
= temp_local
;
465 ts
->base_type
= TCG_TYPE_I32
;
466 ts
->type
= TCG_TYPE_I32
;
467 ts
->temp_allocated
= 1;
468 ts
->temp_local
= temp_local
;
474 tcg_temp_alloc(s
, s
->nb_temps
+ 1);
475 ts
= &s
->temps
[s
->nb_temps
];
476 ts
->base_type
= type
;
478 ts
->temp_allocated
= 1;
479 ts
->temp_local
= temp_local
;
485 #if defined(CONFIG_DEBUG_TCG)
491 TCGv_i32
tcg_temp_new_internal_i32(int temp_local
)
495 idx
= tcg_temp_new_internal(TCG_TYPE_I32
, temp_local
);
496 return MAKE_TCGV_I32(idx
);
499 TCGv_i64
tcg_temp_new_internal_i64(int temp_local
)
503 idx
= tcg_temp_new_internal(TCG_TYPE_I64
, temp_local
);
504 return MAKE_TCGV_I64(idx
);
507 static inline void tcg_temp_free_internal(int idx
)
509 TCGContext
*s
= &tcg_ctx
;
513 #if defined(CONFIG_DEBUG_TCG)
515 if (s
->temps_in_use
< 0) {
516 fprintf(stderr
, "More temporaries freed than allocated!\n");
520 assert(idx
>= s
->nb_globals
&& idx
< s
->nb_temps
);
522 assert(ts
->temp_allocated
!= 0);
523 ts
->temp_allocated
= 0;
527 ts
->next_free_temp
= s
->first_free_temp
[k
];
528 s
->first_free_temp
[k
] = idx
;
531 void tcg_temp_free_i32(TCGv_i32 arg
)
533 tcg_temp_free_internal(GET_TCGV_I32(arg
));
536 void tcg_temp_free_i64(TCGv_i64 arg
)
538 tcg_temp_free_internal(GET_TCGV_I64(arg
));
541 TCGv_i32
tcg_const_i32(int32_t val
)
544 t0
= tcg_temp_new_i32();
545 tcg_gen_movi_i32(t0
, val
);
549 TCGv_i64
tcg_const_i64(int64_t val
)
552 t0
= tcg_temp_new_i64();
553 tcg_gen_movi_i64(t0
, val
);
557 TCGv_i32
tcg_const_local_i32(int32_t val
)
560 t0
= tcg_temp_local_new_i32();
561 tcg_gen_movi_i32(t0
, val
);
565 TCGv_i64
tcg_const_local_i64(int64_t val
)
568 t0
= tcg_temp_local_new_i64();
569 tcg_gen_movi_i64(t0
, val
);
573 #if defined(CONFIG_DEBUG_TCG)
574 void tcg_clear_temp_count(void)
576 TCGContext
*s
= &tcg_ctx
;
580 int tcg_check_temp_count(void)
582 TCGContext
*s
= &tcg_ctx
;
583 if (s
->temps_in_use
) {
584 /* Clear the count so that we don't give another
585 * warning immediately next time around.
594 void tcg_register_helper(void *func
, const char *name
)
596 TCGContext
*s
= &tcg_ctx
;
598 if ((s
->nb_helpers
+ 1) > s
->allocated_helpers
) {
599 n
= s
->allocated_helpers
;
605 s
->helpers
= realloc(s
->helpers
, n
* sizeof(TCGHelperInfo
));
606 s
->allocated_helpers
= n
;
608 s
->helpers
[s
->nb_helpers
].func
= (tcg_target_ulong
)func
;
609 s
->helpers
[s
->nb_helpers
].name
= name
;
613 /* Note: we convert the 64 bit args to 32 bit and do some alignment
614 and endian swap. Maybe it would be better to do the alignment
615 and endian swap in tcg_reg_alloc_call(). */
616 void tcg_gen_callN(TCGContext
*s
, TCGv_ptr func
, unsigned int flags
,
617 int sizemask
, TCGArg ret
, int nargs
, TCGArg
*args
)
624 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
625 for (i
= 0; i
< nargs
; ++i
) {
626 int is_64bit
= sizemask
& (1 << (i
+1)*2);
627 int is_signed
= sizemask
& (2 << (i
+1)*2);
629 TCGv_i64 temp
= tcg_temp_new_i64();
630 TCGv_i64 orig
= MAKE_TCGV_I64(args
[i
]);
632 tcg_gen_ext32s_i64(temp
, orig
);
634 tcg_gen_ext32u_i64(temp
, orig
);
636 args
[i
] = GET_TCGV_I64(temp
);
639 #endif /* TCG_TARGET_EXTEND_ARGS */
641 *gen_opc_ptr
++ = INDEX_op_call
;
642 nparam
= gen_opparam_ptr
++;
643 if (ret
!= TCG_CALL_DUMMY_ARG
) {
644 #if TCG_TARGET_REG_BITS < 64
646 #ifdef TCG_TARGET_WORDS_BIGENDIAN
647 *gen_opparam_ptr
++ = ret
+ 1;
648 *gen_opparam_ptr
++ = ret
;
650 *gen_opparam_ptr
++ = ret
;
651 *gen_opparam_ptr
++ = ret
+ 1;
657 *gen_opparam_ptr
++ = ret
;
664 for (i
= 0; i
< nargs
; i
++) {
665 #if TCG_TARGET_REG_BITS < 64
666 int is_64bit
= sizemask
& (1 << (i
+1)*2);
668 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
669 /* some targets want aligned 64 bit args */
671 *gen_opparam_ptr
++ = TCG_CALL_DUMMY_ARG
;
675 /* If stack grows up, then we will be placing successive
676 arguments at lower addresses, which means we need to
677 reverse the order compared to how we would normally
678 treat either big or little-endian. For those arguments
679 that will wind up in registers, this still works for
680 HPPA (the only current STACK_GROWSUP target) since the
681 argument registers are *also* allocated in decreasing
682 order. If another such target is added, this logic may
683 have to get more complicated to differentiate between
684 stack arguments and register arguments. */
685 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
686 *gen_opparam_ptr
++ = args
[i
] + 1;
687 *gen_opparam_ptr
++ = args
[i
];
689 *gen_opparam_ptr
++ = args
[i
];
690 *gen_opparam_ptr
++ = args
[i
] + 1;
695 #endif /* TCG_TARGET_REG_BITS < 64 */
697 *gen_opparam_ptr
++ = args
[i
];
700 *gen_opparam_ptr
++ = GET_TCGV_PTR(func
);
702 *gen_opparam_ptr
++ = flags
;
704 *nparam
= (nb_rets
<< 16) | (real_args
+ 1);
706 /* total parameters, needed to go backward in the instruction stream */
707 *gen_opparam_ptr
++ = 1 + nb_rets
+ real_args
+ 3;
709 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
710 for (i
= 0; i
< nargs
; ++i
) {
711 int is_64bit
= sizemask
& (1 << (i
+1)*2);
713 TCGv_i64 temp
= MAKE_TCGV_I64(args
[i
]);
714 tcg_temp_free_i64(temp
);
717 #endif /* TCG_TARGET_EXTEND_ARGS */
720 #if TCG_TARGET_REG_BITS == 32
721 void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
722 int c
, int right
, int arith
)
725 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
726 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
727 } else if (c
>= 32) {
731 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
732 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
734 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
735 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
738 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
739 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
744 t0
= tcg_temp_new_i32();
745 t1
= tcg_temp_new_i32();
747 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
749 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
751 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
752 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
753 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
754 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
756 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
757 /* Note: ret can be the same as arg1, so we use t1 */
758 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
759 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
760 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
761 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
763 tcg_temp_free_i32(t0
);
764 tcg_temp_free_i32(t1
);
770 static void tcg_reg_alloc_start(TCGContext
*s
)
774 for(i
= 0; i
< s
->nb_globals
; i
++) {
777 ts
->val_type
= TEMP_VAL_REG
;
779 ts
->val_type
= TEMP_VAL_MEM
;
782 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
784 ts
->val_type
= TEMP_VAL_DEAD
;
785 ts
->mem_allocated
= 0;
788 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
789 s
->reg_to_temp
[i
] = -1;
793 static char *tcg_get_arg_str_idx(TCGContext
*s
, char *buf
, int buf_size
,
798 assert(idx
>= 0 && idx
< s
->nb_temps
);
801 if (idx
< s
->nb_globals
) {
802 pstrcpy(buf
, buf_size
, ts
->name
);
805 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
807 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
812 char *tcg_get_arg_str_i32(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i32 arg
)
814 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I32(arg
));
817 char *tcg_get_arg_str_i64(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i64 arg
)
819 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I64(arg
));
822 static int helper_cmp(const void *p1
, const void *p2
)
824 const TCGHelperInfo
*th1
= p1
;
825 const TCGHelperInfo
*th2
= p2
;
826 if (th1
->func
< th2
->func
)
828 else if (th1
->func
== th2
->func
)
834 /* find helper definition (Note: A hash table would be better) */
835 static TCGHelperInfo
*tcg_find_helper(TCGContext
*s
, tcg_target_ulong val
)
841 if (unlikely(!s
->helpers_sorted
)) {
842 qsort(s
->helpers
, s
->nb_helpers
, sizeof(TCGHelperInfo
),
844 s
->helpers_sorted
= 1;
849 m_max
= s
->nb_helpers
- 1;
850 while (m_min
<= m_max
) {
851 m
= (m_min
+ m_max
) >> 1;
865 static const char * const cond_name
[] =
867 [TCG_COND_NEVER
] = "never",
868 [TCG_COND_ALWAYS
] = "always",
869 [TCG_COND_EQ
] = "eq",
870 [TCG_COND_NE
] = "ne",
871 [TCG_COND_LT
] = "lt",
872 [TCG_COND_GE
] = "ge",
873 [TCG_COND_LE
] = "le",
874 [TCG_COND_GT
] = "gt",
875 [TCG_COND_LTU
] = "ltu",
876 [TCG_COND_GEU
] = "geu",
877 [TCG_COND_LEU
] = "leu",
878 [TCG_COND_GTU
] = "gtu"
881 void tcg_dump_ops(TCGContext
*s
)
883 const uint16_t *opc_ptr
;
887 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
, first_insn
;
892 opc_ptr
= gen_opc_buf
;
893 args
= gen_opparam_buf
;
894 while (opc_ptr
< gen_opc_ptr
) {
896 def
= &tcg_op_defs
[c
];
897 if (c
== INDEX_op_debug_insn_start
) {
899 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
900 pc
= ((uint64_t)args
[1] << 32) | args
[0];
907 qemu_log(" ---- 0x%" PRIx64
, pc
);
909 nb_oargs
= def
->nb_oargs
;
910 nb_iargs
= def
->nb_iargs
;
911 nb_cargs
= def
->nb_cargs
;
912 } else if (c
== INDEX_op_call
) {
915 /* variable number of arguments */
917 nb_oargs
= arg
>> 16;
918 nb_iargs
= arg
& 0xffff;
919 nb_cargs
= def
->nb_cargs
;
921 qemu_log(" %s ", def
->name
);
925 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
926 args
[nb_oargs
+ nb_iargs
- 1]));
928 qemu_log(",$0x%" TCG_PRIlx
, args
[nb_oargs
+ nb_iargs
]);
930 qemu_log(",$%d", nb_oargs
);
931 for(i
= 0; i
< nb_oargs
; i
++) {
933 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
936 for(i
= 0; i
< (nb_iargs
- 1); i
++) {
938 if (args
[nb_oargs
+ i
] == TCG_CALL_DUMMY_ARG
) {
941 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
942 args
[nb_oargs
+ i
]));
945 } else if (c
== INDEX_op_movi_i32
|| c
== INDEX_op_movi_i64
) {
946 tcg_target_ulong val
;
949 nb_oargs
= def
->nb_oargs
;
950 nb_iargs
= def
->nb_iargs
;
951 nb_cargs
= def
->nb_cargs
;
952 qemu_log(" %s %s,$", def
->name
,
953 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), args
[0]));
955 th
= tcg_find_helper(s
, val
);
957 qemu_log("%s", th
->name
);
959 if (c
== INDEX_op_movi_i32
) {
960 qemu_log("0x%x", (uint32_t)val
);
962 qemu_log("0x%" PRIx64
, (uint64_t)val
);
966 qemu_log(" %s ", def
->name
);
967 if (c
== INDEX_op_nopn
) {
968 /* variable number of arguments */
973 nb_oargs
= def
->nb_oargs
;
974 nb_iargs
= def
->nb_iargs
;
975 nb_cargs
= def
->nb_cargs
;
979 for(i
= 0; i
< nb_oargs
; i
++) {
983 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
986 for(i
= 0; i
< nb_iargs
; i
++) {
990 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
994 case INDEX_op_brcond_i32
:
995 case INDEX_op_setcond_i32
:
996 case INDEX_op_movcond_i32
:
997 case INDEX_op_brcond2_i32
:
998 case INDEX_op_setcond2_i32
:
999 case INDEX_op_brcond_i64
:
1000 case INDEX_op_setcond_i64
:
1001 case INDEX_op_movcond_i64
:
1002 if (args
[k
] < ARRAY_SIZE(cond_name
) && cond_name
[args
[k
]]) {
1003 qemu_log(",%s", cond_name
[args
[k
++]]);
1005 qemu_log(",$0x%" TCG_PRIlx
, args
[k
++]);
1013 for(; i
< nb_cargs
; i
++) {
1018 qemu_log("$0x%" TCG_PRIlx
, arg
);
1022 args
+= nb_iargs
+ nb_oargs
+ nb_cargs
;
1026 /* we give more priority to constraints with less registers */
1027 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
1029 const TCGArgConstraint
*arg_ct
;
1032 arg_ct
= &def
->args_ct
[k
];
1033 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1034 /* an alias is equivalent to a single register */
1037 if (!(arg_ct
->ct
& TCG_CT_REG
))
1040 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1041 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
1045 return TCG_TARGET_NB_REGS
- n
+ 1;
1048 /* sort from highest priority to lowest */
1049 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
1051 int i
, j
, p1
, p2
, tmp
;
1053 for(i
= 0; i
< n
; i
++)
1054 def
->sorted_args
[start
+ i
] = start
+ i
;
1057 for(i
= 0; i
< n
- 1; i
++) {
1058 for(j
= i
+ 1; j
< n
; j
++) {
1059 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
1060 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
1062 tmp
= def
->sorted_args
[start
+ i
];
1063 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
1064 def
->sorted_args
[start
+ j
] = tmp
;
1070 void tcg_add_target_add_op_defs(const TCGTargetOpDef
*tdefs
)
1078 if (tdefs
->op
== (TCGOpcode
)-1)
1081 assert((unsigned)op
< NB_OPS
);
1082 def
= &tcg_op_defs
[op
];
1083 #if defined(CONFIG_DEBUG_TCG)
1084 /* Duplicate entry in op definitions? */
1088 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
1089 for(i
= 0; i
< nb_args
; i
++) {
1090 ct_str
= tdefs
->args_ct_str
[i
];
1091 /* Incomplete TCGTargetOpDef entry? */
1092 assert(ct_str
!= NULL
);
1093 tcg_regset_clear(def
->args_ct
[i
].u
.regs
);
1094 def
->args_ct
[i
].ct
= 0;
1095 if (ct_str
[0] >= '0' && ct_str
[0] <= '9') {
1097 oarg
= ct_str
[0] - '0';
1098 assert(oarg
< def
->nb_oargs
);
1099 assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
1100 /* TCG_CT_ALIAS is for the output arguments. The input
1101 argument is tagged with TCG_CT_IALIAS. */
1102 def
->args_ct
[i
] = def
->args_ct
[oarg
];
1103 def
->args_ct
[oarg
].ct
= TCG_CT_ALIAS
;
1104 def
->args_ct
[oarg
].alias_index
= i
;
1105 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
1106 def
->args_ct
[i
].alias_index
= oarg
;
1109 if (*ct_str
== '\0')
1113 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
1117 if (target_parse_constraint(&def
->args_ct
[i
], &ct_str
) < 0) {
1118 fprintf(stderr
, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1119 ct_str
, i
, def
->name
);
1127 /* TCGTargetOpDef entry with too much information? */
1128 assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
1130 /* sort the constraints (XXX: this is just an heuristic) */
1131 sort_constraints(def
, 0, def
->nb_oargs
);
1132 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
1138 printf("%s: sorted=", def
->name
);
1139 for(i
= 0; i
< def
->nb_oargs
+ def
->nb_iargs
; i
++)
1140 printf(" %d", def
->sorted_args
[i
]);
1147 #if defined(CONFIG_DEBUG_TCG)
1149 for (op
= 0; op
< ARRAY_SIZE(tcg_op_defs
); op
++) {
1150 const TCGOpDef
*def
= &tcg_op_defs
[op
];
1151 if (op
< INDEX_op_call
1152 || op
== INDEX_op_debug_insn_start
1153 || (def
->flags
& TCG_OPF_NOT_PRESENT
)) {
1154 /* Wrong entry in op definitions? */
1156 fprintf(stderr
, "Invalid op definition for %s\n", def
->name
);
1160 /* Missing entry in op definitions? */
1162 fprintf(stderr
, "Missing op definition for %s\n", def
->name
);
1173 #ifdef USE_LIVENESS_ANALYSIS
1175 /* set a nop for an operation using 'nb_args' */
1176 static inline void tcg_set_nop(TCGContext
*s
, uint16_t *opc_ptr
,
1177 TCGArg
*args
, int nb_args
)
1180 *opc_ptr
= INDEX_op_nop
;
1182 *opc_ptr
= INDEX_op_nopn
;
1184 args
[nb_args
- 1] = nb_args
;
1188 /* liveness analysis: end of function: globals are live, temps are
1190 /* XXX: at this stage, not used as there would be little gains because
1191 most TBs end with a conditional jump. */
1192 static inline void tcg_la_func_end(TCGContext
*s
, uint8_t *dead_temps
)
1194 memset(dead_temps
, 0, s
->nb_globals
);
1195 memset(dead_temps
+ s
->nb_globals
, 1, s
->nb_temps
- s
->nb_globals
);
1198 /* liveness analysis: end of basic block: globals are live, temps are
1199 dead, local temps are live. */
1200 static inline void tcg_la_bb_end(TCGContext
*s
, uint8_t *dead_temps
)
1205 memset(dead_temps
, 0, s
->nb_globals
);
1206 ts
= &s
->temps
[s
->nb_globals
];
1207 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1216 /* Liveness analysis : update the opc_dead_args array to tell if a
1217 given input arguments is dead. Instructions updating dead
1218 temporaries are removed. */
1219 static void tcg_liveness_analysis(TCGContext
*s
)
1221 int i
, op_index
, nb_args
, nb_iargs
, nb_oargs
, arg
, nb_ops
;
1224 const TCGOpDef
*def
;
1225 uint8_t *dead_temps
;
1226 unsigned int dead_args
;
1228 gen_opc_ptr
++; /* skip end */
1230 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1232 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1234 dead_temps
= tcg_malloc(s
->nb_temps
);
1235 memset(dead_temps
, 1, s
->nb_temps
);
1237 args
= gen_opparam_ptr
;
1238 op_index
= nb_ops
- 1;
1239 while (op_index
>= 0) {
1240 op
= gen_opc_buf
[op_index
];
1241 def
= &tcg_op_defs
[op
];
1249 nb_iargs
= args
[0] & 0xffff;
1250 nb_oargs
= args
[0] >> 16;
1252 call_flags
= args
[nb_oargs
+ nb_iargs
];
1254 /* pure functions can be removed if their result is not
1256 if (call_flags
& TCG_CALL_PURE
) {
1257 for(i
= 0; i
< nb_oargs
; i
++) {
1259 if (!dead_temps
[arg
])
1260 goto do_not_remove_call
;
1262 tcg_set_nop(s
, gen_opc_buf
+ op_index
,
1267 /* output args are dead */
1269 for(i
= 0; i
< nb_oargs
; i
++) {
1271 if (dead_temps
[arg
]) {
1272 dead_args
|= (1 << i
);
1274 dead_temps
[arg
] = 1;
1277 if (!(call_flags
& TCG_CALL_CONST
)) {
1278 /* globals are live (they may be used by the call) */
1279 memset(dead_temps
, 0, s
->nb_globals
);
1282 /* input args are live */
1283 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1285 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1286 if (dead_temps
[arg
]) {
1287 dead_args
|= (1 << i
);
1289 dead_temps
[arg
] = 0;
1292 s
->op_dead_args
[op_index
] = dead_args
;
1297 case INDEX_op_debug_insn_start
:
1298 args
-= def
->nb_args
;
1304 case INDEX_op_discard
:
1306 /* mark the temporary as dead */
1307 dead_temps
[args
[0]] = 1;
1311 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1313 args
-= def
->nb_args
;
1314 nb_iargs
= def
->nb_iargs
;
1315 nb_oargs
= def
->nb_oargs
;
1317 /* Test if the operation can be removed because all
1318 its outputs are dead. We assume that nb_oargs == 0
1319 implies side effects */
1320 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
1321 for(i
= 0; i
< nb_oargs
; i
++) {
1323 if (!dead_temps
[arg
])
1326 tcg_set_nop(s
, gen_opc_buf
+ op_index
, args
, def
->nb_args
);
1327 #ifdef CONFIG_PROFILER
1333 /* output args are dead */
1335 for(i
= 0; i
< nb_oargs
; i
++) {
1337 if (dead_temps
[arg
]) {
1338 dead_args
|= (1 << i
);
1340 dead_temps
[arg
] = 1;
1343 /* if end of basic block, update */
1344 if (def
->flags
& TCG_OPF_BB_END
) {
1345 tcg_la_bb_end(s
, dead_temps
);
1346 } else if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1347 /* globals are live */
1348 memset(dead_temps
, 0, s
->nb_globals
);
1351 /* input args are live */
1352 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1354 if (dead_temps
[arg
]) {
1355 dead_args
|= (1 << i
);
1357 dead_temps
[arg
] = 0;
1359 s
->op_dead_args
[op_index
] = dead_args
;
1366 if (args
!= gen_opparam_buf
)
1370 /* dummy liveness analysis */
1371 static void tcg_liveness_analysis(TCGContext
*s
)
1374 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1376 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1377 memset(s
->op_dead_args
, 0, nb_ops
* sizeof(uint16_t));
1382 static void dump_regs(TCGContext
*s
)
1388 for(i
= 0; i
< s
->nb_temps
; i
++) {
1390 printf(" %10s: ", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), i
));
1391 switch(ts
->val_type
) {
1393 printf("%s", tcg_target_reg_names
[ts
->reg
]);
1396 printf("%d(%s)", (int)ts
->mem_offset
, tcg_target_reg_names
[ts
->mem_reg
]);
1398 case TEMP_VAL_CONST
:
1399 printf("$0x%" TCG_PRIlx
, ts
->val
);
1411 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1412 if (s
->reg_to_temp
[i
] >= 0) {
1414 tcg_target_reg_names
[i
],
1415 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
1420 static void check_regs(TCGContext
*s
)
1426 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1427 k
= s
->reg_to_temp
[reg
];
1430 if (ts
->val_type
!= TEMP_VAL_REG
||
1432 printf("Inconsistency for register %s:\n",
1433 tcg_target_reg_names
[reg
]);
1438 for(k
= 0; k
< s
->nb_temps
; k
++) {
1440 if (ts
->val_type
== TEMP_VAL_REG
&&
1442 s
->reg_to_temp
[ts
->reg
] != k
) {
1443 printf("Inconsistency for temp %s:\n",
1444 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), k
));
1446 printf("reg state:\n");
1454 static void temp_allocate_frame(TCGContext
*s
, int temp
)
1457 ts
= &s
->temps
[temp
];
1458 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1459 /* Sparc64 stack is accessed with offset of 2047 */
1460 s
->current_frame_offset
= (s
->current_frame_offset
+
1461 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
1462 ~(sizeof(tcg_target_long
) - 1);
1464 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
1468 ts
->mem_offset
= s
->current_frame_offset
;
1469 ts
->mem_reg
= s
->frame_reg
;
1470 ts
->mem_allocated
= 1;
1471 s
->current_frame_offset
+= (tcg_target_long
)sizeof(tcg_target_long
);
1474 /* free register 'reg' by spilling the corresponding temporary if necessary */
1475 static void tcg_reg_free(TCGContext
*s
, int reg
)
1480 temp
= s
->reg_to_temp
[reg
];
1482 ts
= &s
->temps
[temp
];
1483 assert(ts
->val_type
== TEMP_VAL_REG
);
1484 if (!ts
->mem_coherent
) {
1485 if (!ts
->mem_allocated
)
1486 temp_allocate_frame(s
, temp
);
1487 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1489 ts
->val_type
= TEMP_VAL_MEM
;
1490 s
->reg_to_temp
[reg
] = -1;
1494 /* Allocate a register belonging to reg1 & ~reg2 */
1495 static int tcg_reg_alloc(TCGContext
*s
, TCGRegSet reg1
, TCGRegSet reg2
)
1500 tcg_regset_andnot(reg_ct
, reg1
, reg2
);
1502 /* first try free registers */
1503 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1504 reg
= tcg_target_reg_alloc_order
[i
];
1505 if (tcg_regset_test_reg(reg_ct
, reg
) && s
->reg_to_temp
[reg
] == -1)
1509 /* XXX: do better spill choice */
1510 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1511 reg
= tcg_target_reg_alloc_order
[i
];
1512 if (tcg_regset_test_reg(reg_ct
, reg
)) {
1513 tcg_reg_free(s
, reg
);
1521 /* save a temporary to memory. 'allocated_regs' is used in case a
1522 temporary registers needs to be allocated to store a constant. */
1523 static void temp_save(TCGContext
*s
, int temp
, TCGRegSet allocated_regs
)
1528 ts
= &s
->temps
[temp
];
1529 if (!ts
->fixed_reg
) {
1530 switch(ts
->val_type
) {
1532 tcg_reg_free(s
, ts
->reg
);
1535 ts
->val_type
= TEMP_VAL_MEM
;
1537 case TEMP_VAL_CONST
:
1538 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1540 if (!ts
->mem_allocated
)
1541 temp_allocate_frame(s
, temp
);
1542 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1543 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1544 ts
->val_type
= TEMP_VAL_MEM
;
1554 /* save globals to their canonical location and assume they can be
1555 modified be the following code. 'allocated_regs' is used in case a
1556 temporary registers needs to be allocated to store a constant. */
1557 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
1561 for(i
= 0; i
< s
->nb_globals
; i
++) {
1562 temp_save(s
, i
, allocated_regs
);
1566 /* at the end of a basic block, we assume all temporaries are dead and
1567 all globals are stored at their canonical location. */
1568 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
1573 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1575 if (ts
->temp_local
) {
1576 temp_save(s
, i
, allocated_regs
);
1578 if (ts
->val_type
== TEMP_VAL_REG
) {
1579 s
->reg_to_temp
[ts
->reg
] = -1;
1581 ts
->val_type
= TEMP_VAL_DEAD
;
1585 save_globals(s
, allocated_regs
);
1588 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1590 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGArg
*args
)
1593 tcg_target_ulong val
;
1595 ots
= &s
->temps
[args
[0]];
1598 if (ots
->fixed_reg
) {
1599 /* for fixed registers, we do not do any constant
1601 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
1603 /* The movi is not explicitly generated here */
1604 if (ots
->val_type
== TEMP_VAL_REG
)
1605 s
->reg_to_temp
[ots
->reg
] = -1;
1606 ots
->val_type
= TEMP_VAL_CONST
;
1611 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOpDef
*def
,
1613 unsigned int dead_args
)
1617 const TCGArgConstraint
*arg_ct
;
1619 ots
= &s
->temps
[args
[0]];
1620 ts
= &s
->temps
[args
[1]];
1621 arg_ct
= &def
->args_ct
[0];
1623 /* XXX: always mark arg dead if IS_DEAD_ARG(1) */
1624 if (ts
->val_type
== TEMP_VAL_REG
) {
1625 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
1626 /* the mov can be suppressed */
1627 if (ots
->val_type
== TEMP_VAL_REG
)
1628 s
->reg_to_temp
[ots
->reg
] = -1;
1630 s
->reg_to_temp
[reg
] = -1;
1631 ts
->val_type
= TEMP_VAL_DEAD
;
1633 if (ots
->val_type
== TEMP_VAL_REG
) {
1636 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1638 if (ts
->reg
!= reg
) {
1639 tcg_out_mov(s
, ots
->type
, reg
, ts
->reg
);
1642 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1643 if (ots
->val_type
== TEMP_VAL_REG
) {
1646 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1648 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1649 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1650 if (ots
->fixed_reg
) {
1652 tcg_out_movi(s
, ots
->type
, reg
, ts
->val
);
1654 /* propagate constant */
1655 if (ots
->val_type
== TEMP_VAL_REG
)
1656 s
->reg_to_temp
[ots
->reg
] = -1;
1657 ots
->val_type
= TEMP_VAL_CONST
;
1664 s
->reg_to_temp
[reg
] = args
[0];
1666 ots
->val_type
= TEMP_VAL_REG
;
1667 ots
->mem_coherent
= 0;
1670 static void tcg_reg_alloc_op(TCGContext
*s
,
1671 const TCGOpDef
*def
, TCGOpcode opc
,
1673 unsigned int dead_args
)
1675 TCGRegSet allocated_regs
;
1676 int i
, k
, nb_iargs
, nb_oargs
, reg
;
1678 const TCGArgConstraint
*arg_ct
;
1680 TCGArg new_args
[TCG_MAX_OP_ARGS
];
1681 int const_args
[TCG_MAX_OP_ARGS
];
1683 nb_oargs
= def
->nb_oargs
;
1684 nb_iargs
= def
->nb_iargs
;
1686 /* copy constants */
1687 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
1688 args
+ nb_oargs
+ nb_iargs
,
1689 sizeof(TCGArg
) * def
->nb_cargs
);
1691 /* satisfy input constraints */
1692 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1693 for(k
= 0; k
< nb_iargs
; k
++) {
1694 i
= def
->sorted_args
[nb_oargs
+ k
];
1696 arg_ct
= &def
->args_ct
[i
];
1697 ts
= &s
->temps
[arg
];
1698 if (ts
->val_type
== TEMP_VAL_MEM
) {
1699 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1700 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1701 ts
->val_type
= TEMP_VAL_REG
;
1703 ts
->mem_coherent
= 1;
1704 s
->reg_to_temp
[reg
] = arg
;
1705 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1706 if (tcg_target_const_match(ts
->val
, arg_ct
)) {
1707 /* constant is OK for instruction */
1709 new_args
[i
] = ts
->val
;
1712 /* need to move to a register */
1713 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1714 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1715 ts
->val_type
= TEMP_VAL_REG
;
1717 ts
->mem_coherent
= 0;
1718 s
->reg_to_temp
[reg
] = arg
;
1721 assert(ts
->val_type
== TEMP_VAL_REG
);
1722 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
1723 if (ts
->fixed_reg
) {
1724 /* if fixed register, we must allocate a new register
1725 if the alias is not the same register */
1726 if (arg
!= args
[arg_ct
->alias_index
])
1727 goto allocate_in_reg
;
1729 /* if the input is aliased to an output and if it is
1730 not dead after the instruction, we must allocate
1731 a new register and move it */
1732 if (!IS_DEAD_ARG(i
)) {
1733 goto allocate_in_reg
;
1738 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1739 /* nothing to do : the constraint is satisfied */
1742 /* allocate a new register matching the constraint
1743 and move the temporary register into it */
1744 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1745 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1749 tcg_regset_set_reg(allocated_regs
, reg
);
1753 if (def
->flags
& TCG_OPF_BB_END
) {
1754 tcg_reg_alloc_bb_end(s
, allocated_regs
);
1756 /* mark dead temporaries and free the associated registers */
1757 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1759 if (IS_DEAD_ARG(i
)) {
1760 ts
= &s
->temps
[arg
];
1761 if (!ts
->fixed_reg
) {
1762 if (ts
->val_type
== TEMP_VAL_REG
)
1763 s
->reg_to_temp
[ts
->reg
] = -1;
1764 ts
->val_type
= TEMP_VAL_DEAD
;
1769 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1770 /* XXX: permit generic clobber register list ? */
1771 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1772 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1773 tcg_reg_free(s
, reg
);
1776 /* XXX: for load/store we could do that only for the slow path
1777 (i.e. when a memory callback is called) */
1779 /* store globals and free associated registers (we assume the insn
1780 can modify any global. */
1781 save_globals(s
, allocated_regs
);
1784 /* satisfy the output constraints */
1785 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1786 for(k
= 0; k
< nb_oargs
; k
++) {
1787 i
= def
->sorted_args
[k
];
1789 arg_ct
= &def
->args_ct
[i
];
1790 ts
= &s
->temps
[arg
];
1791 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1792 reg
= new_args
[arg_ct
->alias_index
];
1794 /* if fixed register, we try to use it */
1796 if (ts
->fixed_reg
&&
1797 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1800 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1802 tcg_regset_set_reg(allocated_regs
, reg
);
1803 /* if a fixed register is used, then a move will be done afterwards */
1804 if (!ts
->fixed_reg
) {
1805 if (ts
->val_type
== TEMP_VAL_REG
)
1806 s
->reg_to_temp
[ts
->reg
] = -1;
1807 if (IS_DEAD_ARG(i
)) {
1808 ts
->val_type
= TEMP_VAL_DEAD
;
1810 ts
->val_type
= TEMP_VAL_REG
;
1812 /* temp value is modified, so the value kept in memory is
1813 potentially not the same */
1814 ts
->mem_coherent
= 0;
1815 s
->reg_to_temp
[reg
] = arg
;
1823 /* emit instruction */
1824 tcg_out_op(s
, opc
, new_args
, const_args
);
1826 /* move the outputs in the correct register if needed */
1827 for(i
= 0; i
< nb_oargs
; i
++) {
1828 ts
= &s
->temps
[args
[i
]];
1830 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
1831 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
1836 #ifdef TCG_TARGET_STACK_GROWSUP
1837 #define STACK_DIR(x) (-(x))
1839 #define STACK_DIR(x) (x)
1842 static int tcg_reg_alloc_call(TCGContext
*s
, const TCGOpDef
*def
,
1843 TCGOpcode opc
, const TCGArg
*args
,
1844 unsigned int dead_args
)
1846 int nb_iargs
, nb_oargs
, flags
, nb_regs
, i
, reg
, nb_params
;
1847 TCGArg arg
, func_arg
;
1849 tcg_target_long stack_offset
, call_stack_size
, func_addr
;
1850 int const_func_arg
, allocate_args
;
1851 TCGRegSet allocated_regs
;
1852 const TCGArgConstraint
*arg_ct
;
1856 nb_oargs
= arg
>> 16;
1857 nb_iargs
= arg
& 0xffff;
1858 nb_params
= nb_iargs
- 1;
1860 flags
= args
[nb_oargs
+ nb_iargs
];
1862 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1863 if (nb_regs
> nb_params
)
1864 nb_regs
= nb_params
;
1866 /* assign stack slots first */
1867 call_stack_size
= (nb_params
- nb_regs
) * sizeof(tcg_target_long
);
1868 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1869 ~(TCG_TARGET_STACK_ALIGN
- 1);
1870 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
1871 if (allocate_args
) {
1872 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1873 preallocate call stack */
1877 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
1878 for(i
= nb_regs
; i
< nb_params
; i
++) {
1879 arg
= args
[nb_oargs
+ i
];
1880 #ifdef TCG_TARGET_STACK_GROWSUP
1881 stack_offset
-= sizeof(tcg_target_long
);
1883 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1884 ts
= &s
->temps
[arg
];
1885 if (ts
->val_type
== TEMP_VAL_REG
) {
1886 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
1887 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1888 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1890 /* XXX: not correct if reading values from the stack */
1891 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1892 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1893 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1894 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1896 /* XXX: sign extend may be needed on some targets */
1897 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1898 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1903 #ifndef TCG_TARGET_STACK_GROWSUP
1904 stack_offset
+= sizeof(tcg_target_long
);
1908 /* assign input registers */
1909 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1910 for(i
= 0; i
< nb_regs
; i
++) {
1911 arg
= args
[nb_oargs
+ i
];
1912 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1913 ts
= &s
->temps
[arg
];
1914 reg
= tcg_target_call_iarg_regs
[i
];
1915 tcg_reg_free(s
, reg
);
1916 if (ts
->val_type
== TEMP_VAL_REG
) {
1917 if (ts
->reg
!= reg
) {
1918 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1920 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1921 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1922 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1923 /* XXX: sign extend ? */
1924 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1928 tcg_regset_set_reg(allocated_regs
, reg
);
1932 /* assign function address */
1933 func_arg
= args
[nb_oargs
+ nb_iargs
- 1];
1934 arg_ct
= &def
->args_ct
[0];
1935 ts
= &s
->temps
[func_arg
];
1936 func_addr
= ts
->val
;
1938 if (ts
->val_type
== TEMP_VAL_MEM
) {
1939 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1940 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1942 tcg_regset_set_reg(allocated_regs
, reg
);
1943 } else if (ts
->val_type
== TEMP_VAL_REG
) {
1945 if (!tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1946 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1947 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1950 tcg_regset_set_reg(allocated_regs
, reg
);
1951 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1952 if (tcg_target_const_match(func_addr
, arg_ct
)) {
1954 func_arg
= func_addr
;
1956 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1957 tcg_out_movi(s
, ts
->type
, reg
, func_addr
);
1959 tcg_regset_set_reg(allocated_regs
, reg
);
1966 /* mark dead temporaries and free the associated registers */
1967 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1969 if (IS_DEAD_ARG(i
)) {
1970 ts
= &s
->temps
[arg
];
1971 if (!ts
->fixed_reg
) {
1972 if (ts
->val_type
== TEMP_VAL_REG
)
1973 s
->reg_to_temp
[ts
->reg
] = -1;
1974 ts
->val_type
= TEMP_VAL_DEAD
;
1979 /* clobber call registers */
1980 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1981 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1982 tcg_reg_free(s
, reg
);
1986 /* store globals and free associated registers (we assume the call
1987 can modify any global. */
1988 if (!(flags
& TCG_CALL_CONST
)) {
1989 save_globals(s
, allocated_regs
);
1992 tcg_out_op(s
, opc
, &func_arg
, &const_func_arg
);
1994 /* assign output registers and emit moves if needed */
1995 for(i
= 0; i
< nb_oargs
; i
++) {
1997 ts
= &s
->temps
[arg
];
1998 reg
= tcg_target_call_oarg_regs
[i
];
1999 assert(s
->reg_to_temp
[reg
] == -1);
2000 if (ts
->fixed_reg
) {
2001 if (ts
->reg
!= reg
) {
2002 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
2005 if (ts
->val_type
== TEMP_VAL_REG
)
2006 s
->reg_to_temp
[ts
->reg
] = -1;
2007 if (IS_DEAD_ARG(i
)) {
2008 ts
->val_type
= TEMP_VAL_DEAD
;
2010 ts
->val_type
= TEMP_VAL_REG
;
2012 ts
->mem_coherent
= 0;
2013 s
->reg_to_temp
[reg
] = arg
;
2018 return nb_iargs
+ nb_oargs
+ def
->nb_cargs
+ 1;
2021 #ifdef CONFIG_PROFILER
2023 static int64_t tcg_table_op_count
[NB_OPS
];
2025 static void dump_op_count(void)
2029 f
= fopen("/tmp/op.log", "w");
2030 for(i
= INDEX_op_end
; i
< NB_OPS
; i
++) {
2031 fprintf(f
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
, tcg_table_op_count
[i
]);
2038 static inline int tcg_gen_code_common(TCGContext
*s
, uint8_t *gen_code_buf
,
2043 const TCGOpDef
*def
;
2044 unsigned int dead_args
;
2048 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
2055 #ifdef CONFIG_PROFILER
2056 s
->opt_time
-= profile_getclock();
2059 #ifdef USE_TCG_OPTIMIZATIONS
2061 tcg_optimize(s
, gen_opc_ptr
, gen_opparam_buf
, tcg_op_defs
);
2064 #ifdef CONFIG_PROFILER
2065 s
->opt_time
+= profile_getclock();
2066 s
->la_time
-= profile_getclock();
2069 tcg_liveness_analysis(s
);
2071 #ifdef CONFIG_PROFILER
2072 s
->la_time
+= profile_getclock();
2076 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
))) {
2077 qemu_log("OP after optimization and liveness analysis:\n");
2083 tcg_reg_alloc_start(s
);
2085 s
->code_buf
= gen_code_buf
;
2086 s
->code_ptr
= gen_code_buf
;
2088 args
= gen_opparam_buf
;
2092 opc
= gen_opc_buf
[op_index
];
2093 #ifdef CONFIG_PROFILER
2094 tcg_table_op_count
[opc
]++;
2096 def
= &tcg_op_defs
[opc
];
2098 printf("%s: %d %d %d\n", def
->name
,
2099 def
->nb_oargs
, def
->nb_iargs
, def
->nb_cargs
);
2103 case INDEX_op_mov_i32
:
2104 case INDEX_op_mov_i64
:
2105 dead_args
= s
->op_dead_args
[op_index
];
2106 tcg_reg_alloc_mov(s
, def
, args
, dead_args
);
2108 case INDEX_op_movi_i32
:
2109 case INDEX_op_movi_i64
:
2110 tcg_reg_alloc_movi(s
, args
);
2112 case INDEX_op_debug_insn_start
:
2113 /* debug instruction */
2123 case INDEX_op_discard
:
2126 ts
= &s
->temps
[args
[0]];
2127 /* mark the temporary as dead */
2128 if (!ts
->fixed_reg
) {
2129 if (ts
->val_type
== TEMP_VAL_REG
)
2130 s
->reg_to_temp
[ts
->reg
] = -1;
2131 ts
->val_type
= TEMP_VAL_DEAD
;
2135 case INDEX_op_set_label
:
2136 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
2137 tcg_out_label(s
, args
[0], s
->code_ptr
);
2140 dead_args
= s
->op_dead_args
[op_index
];
2141 args
+= tcg_reg_alloc_call(s
, def
, opc
, args
, dead_args
);
2146 /* Sanity check that we've not introduced any unhandled opcodes. */
2147 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2150 /* Note: in order to speed up the code, it would be much
2151 faster to have specialized register allocator functions for
2152 some common argument patterns */
2153 dead_args
= s
->op_dead_args
[op_index
];
2154 tcg_reg_alloc_op(s
, def
, opc
, args
, dead_args
);
2157 args
+= def
->nb_args
;
2159 if (search_pc
>= 0 && search_pc
< s
->code_ptr
- gen_code_buf
) {
2171 int tcg_gen_code(TCGContext
*s
, uint8_t *gen_code_buf
)
2173 #ifdef CONFIG_PROFILER
2176 n
= (gen_opc_ptr
- gen_opc_buf
);
2178 if (n
> s
->op_count_max
)
2179 s
->op_count_max
= n
;
2181 s
->temp_count
+= s
->nb_temps
;
2182 if (s
->nb_temps
> s
->temp_count_max
)
2183 s
->temp_count_max
= s
->nb_temps
;
2187 tcg_gen_code_common(s
, gen_code_buf
, -1);
2189 /* flush instruction cache */
2190 flush_icache_range((tcg_target_ulong
)gen_code_buf
,
2191 (tcg_target_ulong
)s
->code_ptr
);
2193 return s
->code_ptr
- gen_code_buf
;
2196 /* Return the index of the micro operation such as the pc after is <
2197 offset bytes from the start of the TB. The contents of gen_code_buf must
2198 not be changed, though writing the same values is ok.
2199 Return -1 if not found. */
2200 int tcg_gen_code_search_pc(TCGContext
*s
, uint8_t *gen_code_buf
, long offset
)
2202 return tcg_gen_code_common(s
, gen_code_buf
, offset
);
2205 #ifdef CONFIG_PROFILER
2206 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2208 TCGContext
*s
= &tcg_ctx
;
2211 tot
= s
->interm_time
+ s
->code_time
;
2212 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2214 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2216 s
->tb_count1
- s
->tb_count
,
2217 s
->tb_count1
? (double)(s
->tb_count1
- s
->tb_count
) / s
->tb_count1
* 100.0 : 0);
2218 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2219 s
->tb_count
? (double)s
->op_count
/ s
->tb_count
: 0, s
->op_count_max
);
2220 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2222 (double)s
->del_op_count
/ s
->tb_count
: 0);
2223 cpu_fprintf(f
, "avg temps/TB %0.2f max=%d\n",
2225 (double)s
->temp_count
/ s
->tb_count
: 0,
2228 cpu_fprintf(f
, "cycles/op %0.1f\n",
2229 s
->op_count
? (double)tot
/ s
->op_count
: 0);
2230 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2231 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
2232 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2233 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
2236 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2237 (double)s
->interm_time
/ tot
* 100.0);
2238 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
2239 (double)s
->code_time
/ tot
* 100.0);
2240 cpu_fprintf(f
, "optim./code time %0.1f%%\n",
2241 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
2243 cpu_fprintf(f
, "liveness/code time %0.1f%%\n",
2244 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
2245 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
2247 cpu_fprintf(f
, " avg cycles %0.1f\n",
2248 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
2253 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2255 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
2259 #ifdef ELF_HOST_MACHINE
2260 /* In order to use this feature, the backend needs to do three things:
2262 (1) Define ELF_HOST_MACHINE to indicate both what value to
2263 put into the ELF image and to indicate support for the feature.
2265 (2) Define tcg_register_jit. This should create a buffer containing
2266 the contents of a .debug_frame section that describes the post-
2267 prologue unwind info for the tcg machine.
2269 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2272 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2279 struct jit_code_entry
{
2280 struct jit_code_entry
*next_entry
;
2281 struct jit_code_entry
*prev_entry
;
2282 const void *symfile_addr
;
2283 uint64_t symfile_size
;
2286 struct jit_descriptor
{
2288 uint32_t action_flag
;
2289 struct jit_code_entry
*relevant_entry
;
2290 struct jit_code_entry
*first_entry
;
2293 void __jit_debug_register_code(void) __attribute__((noinline
));
2294 void __jit_debug_register_code(void)
2299 /* Must statically initialize the version, because GDB may check
2300 the version before we can set it. */
2301 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
2303 /* End GDB interface. */
2305 static int find_string(const char *strtab
, const char *str
)
2307 const char *p
= strtab
+ 1;
2310 if (strcmp(p
, str
) == 0) {
2317 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
2318 void *debug_frame
, size_t debug_frame_size
)
2320 struct __attribute__((packed
)) DebugInfo
{
2327 uintptr_t cu_low_pc
;
2328 uintptr_t cu_high_pc
;
2331 uintptr_t fn_low_pc
;
2332 uintptr_t fn_high_pc
;
2341 struct DebugInfo di
;
2346 struct ElfImage
*img
;
2348 static const struct ElfImage img_template
= {
2350 .e_ident
[EI_MAG0
] = ELFMAG0
,
2351 .e_ident
[EI_MAG1
] = ELFMAG1
,
2352 .e_ident
[EI_MAG2
] = ELFMAG2
,
2353 .e_ident
[EI_MAG3
] = ELFMAG3
,
2354 .e_ident
[EI_CLASS
] = ELF_CLASS
,
2355 .e_ident
[EI_DATA
] = ELF_DATA
,
2356 .e_ident
[EI_VERSION
] = EV_CURRENT
,
2358 .e_machine
= ELF_HOST_MACHINE
,
2359 .e_version
= EV_CURRENT
,
2360 .e_phoff
= offsetof(struct ElfImage
, phdr
),
2361 .e_shoff
= offsetof(struct ElfImage
, shdr
),
2362 .e_ehsize
= sizeof(ElfW(Shdr
)),
2363 .e_phentsize
= sizeof(ElfW(Phdr
)),
2365 .e_shentsize
= sizeof(ElfW(Shdr
)),
2366 .e_shnum
= ARRAY_SIZE(img
->shdr
),
2367 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
2368 #ifdef ELF_HOST_FLAGS
2369 .e_flags
= ELF_HOST_FLAGS
,
2372 .e_ident
[EI_OSABI
] = ELF_OSABI
,
2380 [0] = { .sh_type
= SHT_NULL
},
2381 /* Trick: The contents of code_gen_buffer are not present in
2382 this fake ELF file; that got allocated elsewhere. Therefore
2383 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2384 will not look for contents. We can record any address. */
2386 .sh_type
= SHT_NOBITS
,
2387 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
2389 [2] = { /* .debug_info */
2390 .sh_type
= SHT_PROGBITS
,
2391 .sh_offset
= offsetof(struct ElfImage
, di
),
2392 .sh_size
= sizeof(struct DebugInfo
),
2394 [3] = { /* .debug_abbrev */
2395 .sh_type
= SHT_PROGBITS
,
2396 .sh_offset
= offsetof(struct ElfImage
, da
),
2397 .sh_size
= sizeof(img
->da
),
2399 [4] = { /* .debug_frame */
2400 .sh_type
= SHT_PROGBITS
,
2401 .sh_offset
= sizeof(struct ElfImage
),
2403 [5] = { /* .symtab */
2404 .sh_type
= SHT_SYMTAB
,
2405 .sh_offset
= offsetof(struct ElfImage
, sym
),
2406 .sh_size
= sizeof(img
->sym
),
2408 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
2409 .sh_entsize
= sizeof(ElfW(Sym
)),
2411 [6] = { /* .strtab */
2412 .sh_type
= SHT_STRTAB
,
2413 .sh_offset
= offsetof(struct ElfImage
, str
),
2414 .sh_size
= sizeof(img
->str
),
2418 [1] = { /* code_gen_buffer */
2419 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
2424 .len
= sizeof(struct DebugInfo
) - 4,
2426 .ptr_size
= sizeof(void *),
2428 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
2430 .fn_name
= "code_gen_buffer"
2433 1, /* abbrev number (the cu) */
2434 0x11, 1, /* DW_TAG_compile_unit, has children */
2435 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2436 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2437 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2438 0, 0, /* end of abbrev */
2439 2, /* abbrev number (the fn) */
2440 0x2e, 0, /* DW_TAG_subprogram, no children */
2441 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2442 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2443 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2444 0, 0, /* end of abbrev */
2445 0 /* no more abbrev */
2447 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2448 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2451 /* We only need a single jit entry; statically allocate it. */
2452 static struct jit_code_entry one_entry
;
2454 uintptr_t buf
= (uintptr_t)buf_ptr
;
2455 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
2457 img
= g_malloc(img_size
);
2458 *img
= img_template
;
2459 memcpy(img
+ 1, debug_frame
, debug_frame_size
);
2461 img
->phdr
.p_vaddr
= buf
;
2462 img
->phdr
.p_paddr
= buf
;
2463 img
->phdr
.p_memsz
= buf_size
;
2465 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
2466 img
->shdr
[1].sh_addr
= buf
;
2467 img
->shdr
[1].sh_size
= buf_size
;
2469 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
2470 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
2472 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
2473 img
->shdr
[4].sh_size
= debug_frame_size
;
2475 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
2476 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
2478 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
2479 img
->sym
[1].st_value
= buf
;
2480 img
->sym
[1].st_size
= buf_size
;
2482 img
->di
.cu_low_pc
= buf
;
2483 img
->di
.cu_high_pc
= buf_size
;
2484 img
->di
.fn_low_pc
= buf
;
2485 img
->di
.fn_high_pc
= buf_size
;
2488 /* Enable this block to be able to debug the ELF image file creation.
2489 One can use readelf, objdump, or other inspection utilities. */
2491 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
2493 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
2494 /* Avoid stupid unused return value warning for fwrite. */
2501 one_entry
.symfile_addr
= img
;
2502 one_entry
.symfile_size
= img_size
;
2504 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
2505 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
2506 __jit_debug_descriptor
.first_entry
= &one_entry
;
2507 __jit_debug_register_code();
2510 /* No support for the feature. Provide the entry point expected by exec.c,
2511 and implement the internal function we declared earlier. */
2513 static void tcg_register_jit_int(void *buf
, size_t size
,
2514 void *debug_frame
, size_t debug_frame_size
)
2518 void tcg_register_jit(void *buf
, size_t buf_size
)
2521 #endif /* ELF_HOST_MACHINE */