]> git.proxmox.com Git - qemu.git/blob - tcg/tcg.c
tcg: always mark dead input arguments as dead
[qemu.git] / tcg / tcg.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
28
29 #include "config.h"
30
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
33
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
38
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
43
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
46 instructions */
47 #define NO_CPU_IO_DEFS
48 #include "cpu.h"
49
50 #include "tcg-op.h"
51
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
54 #else
55 # define ELF_CLASS ELFCLASS32
56 #endif
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
59 #else
60 # define ELF_DATA ELFDATA2LSB
61 #endif
62
63 #include "elf.h"
64
65 /* Forward declarations for functions declared in tcg-target.c and used here. */
66 static void tcg_target_init(TCGContext *s);
67 static void tcg_target_qemu_prologue(TCGContext *s);
68 static void patch_reloc(uint8_t *code_ptr, int type,
69 tcg_target_long value, tcg_target_long addend);
70
71 static void tcg_register_jit_int(void *buf, size_t size,
72 void *debug_frame, size_t debug_frame_size)
73 __attribute__((unused));
74
75 /* Forward declarations for functions declared and used in tcg-target.c. */
76 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
77 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
78 tcg_target_long arg2);
79 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
80 static void tcg_out_movi(TCGContext *s, TCGType type,
81 TCGReg ret, tcg_target_long arg);
82 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
83 const int *const_args);
84 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
85 tcg_target_long arg2);
86 static int tcg_target_const_match(tcg_target_long val,
87 const TCGArgConstraint *arg_ct);
88
89 TCGOpDef tcg_op_defs[] = {
90 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
91 #include "tcg-opc.h"
92 #undef DEF
93 };
94 const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
95
96 static TCGRegSet tcg_target_available_regs[2];
97 static TCGRegSet tcg_target_call_clobber_regs;
98
99 /* XXX: move that inside the context */
100 uint16_t *gen_opc_ptr;
101 TCGArg *gen_opparam_ptr;
102
103 static inline void tcg_out8(TCGContext *s, uint8_t v)
104 {
105 *s->code_ptr++ = v;
106 }
107
108 static inline void tcg_out16(TCGContext *s, uint16_t v)
109 {
110 *(uint16_t *)s->code_ptr = v;
111 s->code_ptr += 2;
112 }
113
114 static inline void tcg_out32(TCGContext *s, uint32_t v)
115 {
116 *(uint32_t *)s->code_ptr = v;
117 s->code_ptr += 4;
118 }
119
120 /* label relocation processing */
121
122 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
123 int label_index, long addend)
124 {
125 TCGLabel *l;
126 TCGRelocation *r;
127
128 l = &s->labels[label_index];
129 if (l->has_value) {
130 /* FIXME: This may break relocations on RISC targets that
131 modify instruction fields in place. The caller may not have
132 written the initial value. */
133 patch_reloc(code_ptr, type, l->u.value, addend);
134 } else {
135 /* add a new relocation entry */
136 r = tcg_malloc(sizeof(TCGRelocation));
137 r->type = type;
138 r->ptr = code_ptr;
139 r->addend = addend;
140 r->next = l->u.first_reloc;
141 l->u.first_reloc = r;
142 }
143 }
144
145 static void tcg_out_label(TCGContext *s, int label_index, void *ptr)
146 {
147 TCGLabel *l;
148 TCGRelocation *r;
149 tcg_target_long value = (tcg_target_long)ptr;
150
151 l = &s->labels[label_index];
152 if (l->has_value)
153 tcg_abort();
154 r = l->u.first_reloc;
155 while (r != NULL) {
156 patch_reloc(r->ptr, r->type, value, r->addend);
157 r = r->next;
158 }
159 l->has_value = 1;
160 l->u.value = value;
161 }
162
163 int gen_new_label(void)
164 {
165 TCGContext *s = &tcg_ctx;
166 int idx;
167 TCGLabel *l;
168
169 if (s->nb_labels >= TCG_MAX_LABELS)
170 tcg_abort();
171 idx = s->nb_labels++;
172 l = &s->labels[idx];
173 l->has_value = 0;
174 l->u.first_reloc = NULL;
175 return idx;
176 }
177
178 #include "tcg-target.c"
179
180 /* pool based memory allocation */
181 void *tcg_malloc_internal(TCGContext *s, int size)
182 {
183 TCGPool *p;
184 int pool_size;
185
186 if (size > TCG_POOL_CHUNK_SIZE) {
187 /* big malloc: insert a new pool (XXX: could optimize) */
188 p = g_malloc(sizeof(TCGPool) + size);
189 p->size = size;
190 p->next = s->pool_first_large;
191 s->pool_first_large = p;
192 return p->data;
193 } else {
194 p = s->pool_current;
195 if (!p) {
196 p = s->pool_first;
197 if (!p)
198 goto new_pool;
199 } else {
200 if (!p->next) {
201 new_pool:
202 pool_size = TCG_POOL_CHUNK_SIZE;
203 p = g_malloc(sizeof(TCGPool) + pool_size);
204 p->size = pool_size;
205 p->next = NULL;
206 if (s->pool_current)
207 s->pool_current->next = p;
208 else
209 s->pool_first = p;
210 } else {
211 p = p->next;
212 }
213 }
214 }
215 s->pool_current = p;
216 s->pool_cur = p->data + size;
217 s->pool_end = p->data + p->size;
218 return p->data;
219 }
220
221 void tcg_pool_reset(TCGContext *s)
222 {
223 TCGPool *p, *t;
224 for (p = s->pool_first_large; p; p = t) {
225 t = p->next;
226 g_free(p);
227 }
228 s->pool_first_large = NULL;
229 s->pool_cur = s->pool_end = NULL;
230 s->pool_current = NULL;
231 }
232
233 void tcg_context_init(TCGContext *s)
234 {
235 int op, total_args, n;
236 TCGOpDef *def;
237 TCGArgConstraint *args_ct;
238 int *sorted_args;
239
240 memset(s, 0, sizeof(*s));
241 s->nb_globals = 0;
242
243 /* Count total number of arguments and allocate the corresponding
244 space */
245 total_args = 0;
246 for(op = 0; op < NB_OPS; op++) {
247 def = &tcg_op_defs[op];
248 n = def->nb_iargs + def->nb_oargs;
249 total_args += n;
250 }
251
252 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
253 sorted_args = g_malloc(sizeof(int) * total_args);
254
255 for(op = 0; op < NB_OPS; op++) {
256 def = &tcg_op_defs[op];
257 def->args_ct = args_ct;
258 def->sorted_args = sorted_args;
259 n = def->nb_iargs + def->nb_oargs;
260 sorted_args += n;
261 args_ct += n;
262 }
263
264 tcg_target_init(s);
265 }
266
267 void tcg_prologue_init(TCGContext *s)
268 {
269 /* init global prologue and epilogue */
270 s->code_buf = code_gen_prologue;
271 s->code_ptr = s->code_buf;
272 tcg_target_qemu_prologue(s);
273 flush_icache_range((tcg_target_ulong)s->code_buf,
274 (tcg_target_ulong)s->code_ptr);
275 }
276
277 void tcg_set_frame(TCGContext *s, int reg,
278 tcg_target_long start, tcg_target_long size)
279 {
280 s->frame_start = start;
281 s->frame_end = start + size;
282 s->frame_reg = reg;
283 }
284
285 void tcg_func_start(TCGContext *s)
286 {
287 int i;
288 tcg_pool_reset(s);
289 s->nb_temps = s->nb_globals;
290 for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
291 s->first_free_temp[i] = -1;
292 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
293 s->nb_labels = 0;
294 s->current_frame_offset = s->frame_start;
295
296 #ifdef CONFIG_DEBUG_TCG
297 s->goto_tb_issue_mask = 0;
298 #endif
299
300 gen_opc_ptr = gen_opc_buf;
301 gen_opparam_ptr = gen_opparam_buf;
302 }
303
304 static inline void tcg_temp_alloc(TCGContext *s, int n)
305 {
306 if (n > TCG_MAX_TEMPS)
307 tcg_abort();
308 }
309
310 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
311 const char *name)
312 {
313 TCGContext *s = &tcg_ctx;
314 TCGTemp *ts;
315 int idx;
316
317 #if TCG_TARGET_REG_BITS == 32
318 if (type != TCG_TYPE_I32)
319 tcg_abort();
320 #endif
321 if (tcg_regset_test_reg(s->reserved_regs, reg))
322 tcg_abort();
323 idx = s->nb_globals;
324 tcg_temp_alloc(s, s->nb_globals + 1);
325 ts = &s->temps[s->nb_globals];
326 ts->base_type = type;
327 ts->type = type;
328 ts->fixed_reg = 1;
329 ts->reg = reg;
330 ts->name = name;
331 s->nb_globals++;
332 tcg_regset_set_reg(s->reserved_regs, reg);
333 return idx;
334 }
335
336 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
337 {
338 int idx;
339
340 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
341 return MAKE_TCGV_I32(idx);
342 }
343
344 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
345 {
346 int idx;
347
348 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
349 return MAKE_TCGV_I64(idx);
350 }
351
352 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
353 tcg_target_long offset,
354 const char *name)
355 {
356 TCGContext *s = &tcg_ctx;
357 TCGTemp *ts;
358 int idx;
359
360 idx = s->nb_globals;
361 #if TCG_TARGET_REG_BITS == 32
362 if (type == TCG_TYPE_I64) {
363 char buf[64];
364 tcg_temp_alloc(s, s->nb_globals + 2);
365 ts = &s->temps[s->nb_globals];
366 ts->base_type = type;
367 ts->type = TCG_TYPE_I32;
368 ts->fixed_reg = 0;
369 ts->mem_allocated = 1;
370 ts->mem_reg = reg;
371 #ifdef TCG_TARGET_WORDS_BIGENDIAN
372 ts->mem_offset = offset + 4;
373 #else
374 ts->mem_offset = offset;
375 #endif
376 pstrcpy(buf, sizeof(buf), name);
377 pstrcat(buf, sizeof(buf), "_0");
378 ts->name = strdup(buf);
379 ts++;
380
381 ts->base_type = type;
382 ts->type = TCG_TYPE_I32;
383 ts->fixed_reg = 0;
384 ts->mem_allocated = 1;
385 ts->mem_reg = reg;
386 #ifdef TCG_TARGET_WORDS_BIGENDIAN
387 ts->mem_offset = offset;
388 #else
389 ts->mem_offset = offset + 4;
390 #endif
391 pstrcpy(buf, sizeof(buf), name);
392 pstrcat(buf, sizeof(buf), "_1");
393 ts->name = strdup(buf);
394
395 s->nb_globals += 2;
396 } else
397 #endif
398 {
399 tcg_temp_alloc(s, s->nb_globals + 1);
400 ts = &s->temps[s->nb_globals];
401 ts->base_type = type;
402 ts->type = type;
403 ts->fixed_reg = 0;
404 ts->mem_allocated = 1;
405 ts->mem_reg = reg;
406 ts->mem_offset = offset;
407 ts->name = name;
408 s->nb_globals++;
409 }
410 return idx;
411 }
412
413 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
414 const char *name)
415 {
416 int idx;
417
418 idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
419 return MAKE_TCGV_I32(idx);
420 }
421
422 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
423 const char *name)
424 {
425 int idx;
426
427 idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
428 return MAKE_TCGV_I64(idx);
429 }
430
431 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
432 {
433 TCGContext *s = &tcg_ctx;
434 TCGTemp *ts;
435 int idx, k;
436
437 k = type;
438 if (temp_local)
439 k += TCG_TYPE_COUNT;
440 idx = s->first_free_temp[k];
441 if (idx != -1) {
442 /* There is already an available temp with the
443 right type */
444 ts = &s->temps[idx];
445 s->first_free_temp[k] = ts->next_free_temp;
446 ts->temp_allocated = 1;
447 assert(ts->temp_local == temp_local);
448 } else {
449 idx = s->nb_temps;
450 #if TCG_TARGET_REG_BITS == 32
451 if (type == TCG_TYPE_I64) {
452 tcg_temp_alloc(s, s->nb_temps + 2);
453 ts = &s->temps[s->nb_temps];
454 ts->base_type = type;
455 ts->type = TCG_TYPE_I32;
456 ts->temp_allocated = 1;
457 ts->temp_local = temp_local;
458 ts->name = NULL;
459 ts++;
460 ts->base_type = TCG_TYPE_I32;
461 ts->type = TCG_TYPE_I32;
462 ts->temp_allocated = 1;
463 ts->temp_local = temp_local;
464 ts->name = NULL;
465 s->nb_temps += 2;
466 } else
467 #endif
468 {
469 tcg_temp_alloc(s, s->nb_temps + 1);
470 ts = &s->temps[s->nb_temps];
471 ts->base_type = type;
472 ts->type = type;
473 ts->temp_allocated = 1;
474 ts->temp_local = temp_local;
475 ts->name = NULL;
476 s->nb_temps++;
477 }
478 }
479
480 #if defined(CONFIG_DEBUG_TCG)
481 s->temps_in_use++;
482 #endif
483 return idx;
484 }
485
486 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
487 {
488 int idx;
489
490 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
491 return MAKE_TCGV_I32(idx);
492 }
493
494 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
495 {
496 int idx;
497
498 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
499 return MAKE_TCGV_I64(idx);
500 }
501
502 static inline void tcg_temp_free_internal(int idx)
503 {
504 TCGContext *s = &tcg_ctx;
505 TCGTemp *ts;
506 int k;
507
508 #if defined(CONFIG_DEBUG_TCG)
509 s->temps_in_use--;
510 if (s->temps_in_use < 0) {
511 fprintf(stderr, "More temporaries freed than allocated!\n");
512 }
513 #endif
514
515 assert(idx >= s->nb_globals && idx < s->nb_temps);
516 ts = &s->temps[idx];
517 assert(ts->temp_allocated != 0);
518 ts->temp_allocated = 0;
519 k = ts->base_type;
520 if (ts->temp_local)
521 k += TCG_TYPE_COUNT;
522 ts->next_free_temp = s->first_free_temp[k];
523 s->first_free_temp[k] = idx;
524 }
525
526 void tcg_temp_free_i32(TCGv_i32 arg)
527 {
528 tcg_temp_free_internal(GET_TCGV_I32(arg));
529 }
530
531 void tcg_temp_free_i64(TCGv_i64 arg)
532 {
533 tcg_temp_free_internal(GET_TCGV_I64(arg));
534 }
535
536 TCGv_i32 tcg_const_i32(int32_t val)
537 {
538 TCGv_i32 t0;
539 t0 = tcg_temp_new_i32();
540 tcg_gen_movi_i32(t0, val);
541 return t0;
542 }
543
544 TCGv_i64 tcg_const_i64(int64_t val)
545 {
546 TCGv_i64 t0;
547 t0 = tcg_temp_new_i64();
548 tcg_gen_movi_i64(t0, val);
549 return t0;
550 }
551
552 TCGv_i32 tcg_const_local_i32(int32_t val)
553 {
554 TCGv_i32 t0;
555 t0 = tcg_temp_local_new_i32();
556 tcg_gen_movi_i32(t0, val);
557 return t0;
558 }
559
560 TCGv_i64 tcg_const_local_i64(int64_t val)
561 {
562 TCGv_i64 t0;
563 t0 = tcg_temp_local_new_i64();
564 tcg_gen_movi_i64(t0, val);
565 return t0;
566 }
567
568 #if defined(CONFIG_DEBUG_TCG)
569 void tcg_clear_temp_count(void)
570 {
571 TCGContext *s = &tcg_ctx;
572 s->temps_in_use = 0;
573 }
574
575 int tcg_check_temp_count(void)
576 {
577 TCGContext *s = &tcg_ctx;
578 if (s->temps_in_use) {
579 /* Clear the count so that we don't give another
580 * warning immediately next time around.
581 */
582 s->temps_in_use = 0;
583 return 1;
584 }
585 return 0;
586 }
587 #endif
588
589 void tcg_register_helper(void *func, const char *name)
590 {
591 TCGContext *s = &tcg_ctx;
592 int n;
593 if ((s->nb_helpers + 1) > s->allocated_helpers) {
594 n = s->allocated_helpers;
595 if (n == 0) {
596 n = 4;
597 } else {
598 n *= 2;
599 }
600 s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
601 s->allocated_helpers = n;
602 }
603 s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
604 s->helpers[s->nb_helpers].name = name;
605 s->nb_helpers++;
606 }
607
608 /* Note: we convert the 64 bit args to 32 bit and do some alignment
609 and endian swap. Maybe it would be better to do the alignment
610 and endian swap in tcg_reg_alloc_call(). */
611 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
612 int sizemask, TCGArg ret, int nargs, TCGArg *args)
613 {
614 int i;
615 int real_args;
616 int nb_rets;
617 TCGArg *nparam;
618
619 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
620 for (i = 0; i < nargs; ++i) {
621 int is_64bit = sizemask & (1 << (i+1)*2);
622 int is_signed = sizemask & (2 << (i+1)*2);
623 if (!is_64bit) {
624 TCGv_i64 temp = tcg_temp_new_i64();
625 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
626 if (is_signed) {
627 tcg_gen_ext32s_i64(temp, orig);
628 } else {
629 tcg_gen_ext32u_i64(temp, orig);
630 }
631 args[i] = GET_TCGV_I64(temp);
632 }
633 }
634 #endif /* TCG_TARGET_EXTEND_ARGS */
635
636 *gen_opc_ptr++ = INDEX_op_call;
637 nparam = gen_opparam_ptr++;
638 if (ret != TCG_CALL_DUMMY_ARG) {
639 #if TCG_TARGET_REG_BITS < 64
640 if (sizemask & 1) {
641 #ifdef TCG_TARGET_WORDS_BIGENDIAN
642 *gen_opparam_ptr++ = ret + 1;
643 *gen_opparam_ptr++ = ret;
644 #else
645 *gen_opparam_ptr++ = ret;
646 *gen_opparam_ptr++ = ret + 1;
647 #endif
648 nb_rets = 2;
649 } else
650 #endif
651 {
652 *gen_opparam_ptr++ = ret;
653 nb_rets = 1;
654 }
655 } else {
656 nb_rets = 0;
657 }
658 real_args = 0;
659 for (i = 0; i < nargs; i++) {
660 #if TCG_TARGET_REG_BITS < 64
661 int is_64bit = sizemask & (1 << (i+1)*2);
662 if (is_64bit) {
663 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
664 /* some targets want aligned 64 bit args */
665 if (real_args & 1) {
666 *gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
667 real_args++;
668 }
669 #endif
670 /* If stack grows up, then we will be placing successive
671 arguments at lower addresses, which means we need to
672 reverse the order compared to how we would normally
673 treat either big or little-endian. For those arguments
674 that will wind up in registers, this still works for
675 HPPA (the only current STACK_GROWSUP target) since the
676 argument registers are *also* allocated in decreasing
677 order. If another such target is added, this logic may
678 have to get more complicated to differentiate between
679 stack arguments and register arguments. */
680 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
681 *gen_opparam_ptr++ = args[i] + 1;
682 *gen_opparam_ptr++ = args[i];
683 #else
684 *gen_opparam_ptr++ = args[i];
685 *gen_opparam_ptr++ = args[i] + 1;
686 #endif
687 real_args += 2;
688 continue;
689 }
690 #endif /* TCG_TARGET_REG_BITS < 64 */
691
692 *gen_opparam_ptr++ = args[i];
693 real_args++;
694 }
695 *gen_opparam_ptr++ = GET_TCGV_PTR(func);
696
697 *gen_opparam_ptr++ = flags;
698
699 *nparam = (nb_rets << 16) | (real_args + 1);
700
701 /* total parameters, needed to go backward in the instruction stream */
702 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
703
704 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
705 for (i = 0; i < nargs; ++i) {
706 int is_64bit = sizemask & (1 << (i+1)*2);
707 if (!is_64bit) {
708 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
709 tcg_temp_free_i64(temp);
710 }
711 }
712 #endif /* TCG_TARGET_EXTEND_ARGS */
713 }
714
715 #if TCG_TARGET_REG_BITS == 32
716 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
717 int c, int right, int arith)
718 {
719 if (c == 0) {
720 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
721 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
722 } else if (c >= 32) {
723 c -= 32;
724 if (right) {
725 if (arith) {
726 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
727 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
728 } else {
729 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
730 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
731 }
732 } else {
733 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
734 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
735 }
736 } else {
737 TCGv_i32 t0, t1;
738
739 t0 = tcg_temp_new_i32();
740 t1 = tcg_temp_new_i32();
741 if (right) {
742 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
743 if (arith)
744 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
745 else
746 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
747 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
748 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
749 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
750 } else {
751 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
752 /* Note: ret can be the same as arg1, so we use t1 */
753 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
754 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
755 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
756 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
757 }
758 tcg_temp_free_i32(t0);
759 tcg_temp_free_i32(t1);
760 }
761 }
762 #endif
763
764
765 static void tcg_reg_alloc_start(TCGContext *s)
766 {
767 int i;
768 TCGTemp *ts;
769 for(i = 0; i < s->nb_globals; i++) {
770 ts = &s->temps[i];
771 if (ts->fixed_reg) {
772 ts->val_type = TEMP_VAL_REG;
773 } else {
774 ts->val_type = TEMP_VAL_MEM;
775 }
776 }
777 for(i = s->nb_globals; i < s->nb_temps; i++) {
778 ts = &s->temps[i];
779 ts->val_type = TEMP_VAL_DEAD;
780 ts->mem_allocated = 0;
781 ts->fixed_reg = 0;
782 }
783 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
784 s->reg_to_temp[i] = -1;
785 }
786 }
787
788 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
789 int idx)
790 {
791 TCGTemp *ts;
792
793 assert(idx >= 0 && idx < s->nb_temps);
794 ts = &s->temps[idx];
795 assert(ts);
796 if (idx < s->nb_globals) {
797 pstrcpy(buf, buf_size, ts->name);
798 } else {
799 if (ts->temp_local)
800 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
801 else
802 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
803 }
804 return buf;
805 }
806
807 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
808 {
809 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
810 }
811
812 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
813 {
814 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
815 }
816
817 static int helper_cmp(const void *p1, const void *p2)
818 {
819 const TCGHelperInfo *th1 = p1;
820 const TCGHelperInfo *th2 = p2;
821 if (th1->func < th2->func)
822 return -1;
823 else if (th1->func == th2->func)
824 return 0;
825 else
826 return 1;
827 }
828
829 /* find helper definition (Note: A hash table would be better) */
830 static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
831 {
832 int m, m_min, m_max;
833 TCGHelperInfo *th;
834 tcg_target_ulong v;
835
836 if (unlikely(!s->helpers_sorted)) {
837 qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
838 helper_cmp);
839 s->helpers_sorted = 1;
840 }
841
842 /* binary search */
843 m_min = 0;
844 m_max = s->nb_helpers - 1;
845 while (m_min <= m_max) {
846 m = (m_min + m_max) >> 1;
847 th = &s->helpers[m];
848 v = th->func;
849 if (v == val)
850 return th;
851 else if (val < v) {
852 m_max = m - 1;
853 } else {
854 m_min = m + 1;
855 }
856 }
857 return NULL;
858 }
859
860 static const char * const cond_name[] =
861 {
862 [TCG_COND_NEVER] = "never",
863 [TCG_COND_ALWAYS] = "always",
864 [TCG_COND_EQ] = "eq",
865 [TCG_COND_NE] = "ne",
866 [TCG_COND_LT] = "lt",
867 [TCG_COND_GE] = "ge",
868 [TCG_COND_LE] = "le",
869 [TCG_COND_GT] = "gt",
870 [TCG_COND_LTU] = "ltu",
871 [TCG_COND_GEU] = "geu",
872 [TCG_COND_LEU] = "leu",
873 [TCG_COND_GTU] = "gtu"
874 };
875
876 void tcg_dump_ops(TCGContext *s)
877 {
878 const uint16_t *opc_ptr;
879 const TCGArg *args;
880 TCGArg arg;
881 TCGOpcode c;
882 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
883 const TCGOpDef *def;
884 char buf[128];
885
886 first_insn = 1;
887 opc_ptr = gen_opc_buf;
888 args = gen_opparam_buf;
889 while (opc_ptr < gen_opc_ptr) {
890 c = *opc_ptr++;
891 def = &tcg_op_defs[c];
892 if (c == INDEX_op_debug_insn_start) {
893 uint64_t pc;
894 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
895 pc = ((uint64_t)args[1] << 32) | args[0];
896 #else
897 pc = args[0];
898 #endif
899 if (!first_insn) {
900 qemu_log("\n");
901 }
902 qemu_log(" ---- 0x%" PRIx64, pc);
903 first_insn = 0;
904 nb_oargs = def->nb_oargs;
905 nb_iargs = def->nb_iargs;
906 nb_cargs = def->nb_cargs;
907 } else if (c == INDEX_op_call) {
908 TCGArg arg;
909
910 /* variable number of arguments */
911 arg = *args++;
912 nb_oargs = arg >> 16;
913 nb_iargs = arg & 0xffff;
914 nb_cargs = def->nb_cargs;
915
916 qemu_log(" %s ", def->name);
917
918 /* function name */
919 qemu_log("%s",
920 tcg_get_arg_str_idx(s, buf, sizeof(buf),
921 args[nb_oargs + nb_iargs - 1]));
922 /* flags */
923 qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]);
924 /* nb out args */
925 qemu_log(",$%d", nb_oargs);
926 for(i = 0; i < nb_oargs; i++) {
927 qemu_log(",");
928 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
929 args[i]));
930 }
931 for(i = 0; i < (nb_iargs - 1); i++) {
932 qemu_log(",");
933 if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
934 qemu_log("<dummy>");
935 } else {
936 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
937 args[nb_oargs + i]));
938 }
939 }
940 } else if (c == INDEX_op_movi_i32 || c == INDEX_op_movi_i64) {
941 tcg_target_ulong val;
942 TCGHelperInfo *th;
943
944 nb_oargs = def->nb_oargs;
945 nb_iargs = def->nb_iargs;
946 nb_cargs = def->nb_cargs;
947 qemu_log(" %s %s,$", def->name,
948 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
949 val = args[1];
950 th = tcg_find_helper(s, val);
951 if (th) {
952 qemu_log("%s", th->name);
953 } else {
954 if (c == INDEX_op_movi_i32) {
955 qemu_log("0x%x", (uint32_t)val);
956 } else {
957 qemu_log("0x%" PRIx64 , (uint64_t)val);
958 }
959 }
960 } else {
961 qemu_log(" %s ", def->name);
962 if (c == INDEX_op_nopn) {
963 /* variable number of arguments */
964 nb_cargs = *args;
965 nb_oargs = 0;
966 nb_iargs = 0;
967 } else {
968 nb_oargs = def->nb_oargs;
969 nb_iargs = def->nb_iargs;
970 nb_cargs = def->nb_cargs;
971 }
972
973 k = 0;
974 for(i = 0; i < nb_oargs; i++) {
975 if (k != 0) {
976 qemu_log(",");
977 }
978 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
979 args[k++]));
980 }
981 for(i = 0; i < nb_iargs; i++) {
982 if (k != 0) {
983 qemu_log(",");
984 }
985 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
986 args[k++]));
987 }
988 switch (c) {
989 case INDEX_op_brcond_i32:
990 case INDEX_op_setcond_i32:
991 case INDEX_op_movcond_i32:
992 case INDEX_op_brcond2_i32:
993 case INDEX_op_setcond2_i32:
994 case INDEX_op_brcond_i64:
995 case INDEX_op_setcond_i64:
996 case INDEX_op_movcond_i64:
997 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
998 qemu_log(",%s", cond_name[args[k++]]);
999 } else {
1000 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1001 }
1002 i = 1;
1003 break;
1004 default:
1005 i = 0;
1006 break;
1007 }
1008 for(; i < nb_cargs; i++) {
1009 if (k != 0) {
1010 qemu_log(",");
1011 }
1012 arg = args[k++];
1013 qemu_log("$0x%" TCG_PRIlx, arg);
1014 }
1015 }
1016 qemu_log("\n");
1017 args += nb_iargs + nb_oargs + nb_cargs;
1018 }
1019 }
1020
1021 /* we give more priority to constraints with less registers */
1022 static int get_constraint_priority(const TCGOpDef *def, int k)
1023 {
1024 const TCGArgConstraint *arg_ct;
1025
1026 int i, n;
1027 arg_ct = &def->args_ct[k];
1028 if (arg_ct->ct & TCG_CT_ALIAS) {
1029 /* an alias is equivalent to a single register */
1030 n = 1;
1031 } else {
1032 if (!(arg_ct->ct & TCG_CT_REG))
1033 return 0;
1034 n = 0;
1035 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1036 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1037 n++;
1038 }
1039 }
1040 return TCG_TARGET_NB_REGS - n + 1;
1041 }
1042
1043 /* sort from highest priority to lowest */
1044 static void sort_constraints(TCGOpDef *def, int start, int n)
1045 {
1046 int i, j, p1, p2, tmp;
1047
1048 for(i = 0; i < n; i++)
1049 def->sorted_args[start + i] = start + i;
1050 if (n <= 1)
1051 return;
1052 for(i = 0; i < n - 1; i++) {
1053 for(j = i + 1; j < n; j++) {
1054 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1055 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1056 if (p1 < p2) {
1057 tmp = def->sorted_args[start + i];
1058 def->sorted_args[start + i] = def->sorted_args[start + j];
1059 def->sorted_args[start + j] = tmp;
1060 }
1061 }
1062 }
1063 }
1064
1065 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1066 {
1067 TCGOpcode op;
1068 TCGOpDef *def;
1069 const char *ct_str;
1070 int i, nb_args;
1071
1072 for(;;) {
1073 if (tdefs->op == (TCGOpcode)-1)
1074 break;
1075 op = tdefs->op;
1076 assert((unsigned)op < NB_OPS);
1077 def = &tcg_op_defs[op];
1078 #if defined(CONFIG_DEBUG_TCG)
1079 /* Duplicate entry in op definitions? */
1080 assert(!def->used);
1081 def->used = 1;
1082 #endif
1083 nb_args = def->nb_iargs + def->nb_oargs;
1084 for(i = 0; i < nb_args; i++) {
1085 ct_str = tdefs->args_ct_str[i];
1086 /* Incomplete TCGTargetOpDef entry? */
1087 assert(ct_str != NULL);
1088 tcg_regset_clear(def->args_ct[i].u.regs);
1089 def->args_ct[i].ct = 0;
1090 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1091 int oarg;
1092 oarg = ct_str[0] - '0';
1093 assert(oarg < def->nb_oargs);
1094 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1095 /* TCG_CT_ALIAS is for the output arguments. The input
1096 argument is tagged with TCG_CT_IALIAS. */
1097 def->args_ct[i] = def->args_ct[oarg];
1098 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1099 def->args_ct[oarg].alias_index = i;
1100 def->args_ct[i].ct |= TCG_CT_IALIAS;
1101 def->args_ct[i].alias_index = oarg;
1102 } else {
1103 for(;;) {
1104 if (*ct_str == '\0')
1105 break;
1106 switch(*ct_str) {
1107 case 'i':
1108 def->args_ct[i].ct |= TCG_CT_CONST;
1109 ct_str++;
1110 break;
1111 default:
1112 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1113 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1114 ct_str, i, def->name);
1115 exit(1);
1116 }
1117 }
1118 }
1119 }
1120 }
1121
1122 /* TCGTargetOpDef entry with too much information? */
1123 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1124
1125 /* sort the constraints (XXX: this is just an heuristic) */
1126 sort_constraints(def, 0, def->nb_oargs);
1127 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1128
1129 #if 0
1130 {
1131 int i;
1132
1133 printf("%s: sorted=", def->name);
1134 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1135 printf(" %d", def->sorted_args[i]);
1136 printf("\n");
1137 }
1138 #endif
1139 tdefs++;
1140 }
1141
1142 #if defined(CONFIG_DEBUG_TCG)
1143 i = 0;
1144 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1145 const TCGOpDef *def = &tcg_op_defs[op];
1146 if (op < INDEX_op_call
1147 || op == INDEX_op_debug_insn_start
1148 || (def->flags & TCG_OPF_NOT_PRESENT)) {
1149 /* Wrong entry in op definitions? */
1150 if (def->used) {
1151 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1152 i = 1;
1153 }
1154 } else {
1155 /* Missing entry in op definitions? */
1156 if (!def->used) {
1157 fprintf(stderr, "Missing op definition for %s\n", def->name);
1158 i = 1;
1159 }
1160 }
1161 }
1162 if (i == 1) {
1163 tcg_abort();
1164 }
1165 #endif
1166 }
1167
1168 #ifdef USE_LIVENESS_ANALYSIS
1169
1170 /* set a nop for an operation using 'nb_args' */
1171 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1172 TCGArg *args, int nb_args)
1173 {
1174 if (nb_args == 0) {
1175 *opc_ptr = INDEX_op_nop;
1176 } else {
1177 *opc_ptr = INDEX_op_nopn;
1178 args[0] = nb_args;
1179 args[nb_args - 1] = nb_args;
1180 }
1181 }
1182
1183 /* liveness analysis: end of function: all temps are dead, and globals
1184 should be in memory. */
1185 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1186 uint8_t *mem_temps)
1187 {
1188 memset(dead_temps, 1, s->nb_temps);
1189 memset(mem_temps, 1, s->nb_globals);
1190 memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1191 }
1192
1193 /* liveness analysis: end of basic block: all temps are dead, globals
1194 and local temps should be in memory. */
1195 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1196 uint8_t *mem_temps)
1197 {
1198 int i;
1199
1200 memset(dead_temps, 1, s->nb_temps);
1201 memset(mem_temps, 1, s->nb_globals);
1202 for(i = s->nb_globals; i < s->nb_temps; i++) {
1203 mem_temps[i] = s->temps[i].temp_local;
1204 }
1205 }
1206
1207 /* Liveness analysis : update the opc_dead_args array to tell if a
1208 given input arguments is dead. Instructions updating dead
1209 temporaries are removed. */
1210 static void tcg_liveness_analysis(TCGContext *s)
1211 {
1212 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
1213 TCGOpcode op;
1214 TCGArg *args;
1215 const TCGOpDef *def;
1216 uint8_t *dead_temps, *mem_temps;
1217 uint16_t dead_args;
1218 uint8_t sync_args;
1219
1220 gen_opc_ptr++; /* skip end */
1221
1222 nb_ops = gen_opc_ptr - gen_opc_buf;
1223
1224 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1225 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1226
1227 dead_temps = tcg_malloc(s->nb_temps);
1228 mem_temps = tcg_malloc(s->nb_temps);
1229 tcg_la_func_end(s, dead_temps, mem_temps);
1230
1231 args = gen_opparam_ptr;
1232 op_index = nb_ops - 1;
1233 while (op_index >= 0) {
1234 op = gen_opc_buf[op_index];
1235 def = &tcg_op_defs[op];
1236 switch(op) {
1237 case INDEX_op_call:
1238 {
1239 int call_flags;
1240
1241 nb_args = args[-1];
1242 args -= nb_args;
1243 nb_iargs = args[0] & 0xffff;
1244 nb_oargs = args[0] >> 16;
1245 args++;
1246 call_flags = args[nb_oargs + nb_iargs];
1247
1248 /* pure functions can be removed if their result is not
1249 used */
1250 if (call_flags & TCG_CALL_PURE) {
1251 for(i = 0; i < nb_oargs; i++) {
1252 arg = args[i];
1253 if (!dead_temps[arg] || mem_temps[arg]) {
1254 goto do_not_remove_call;
1255 }
1256 }
1257 tcg_set_nop(s, gen_opc_buf + op_index,
1258 args - 1, nb_args);
1259 } else {
1260 do_not_remove_call:
1261
1262 /* output args are dead */
1263 dead_args = 0;
1264 sync_args = 0;
1265 for(i = 0; i < nb_oargs; i++) {
1266 arg = args[i];
1267 if (dead_temps[arg]) {
1268 dead_args |= (1 << i);
1269 }
1270 if (mem_temps[arg]) {
1271 sync_args |= (1 << i);
1272 }
1273 dead_temps[arg] = 1;
1274 mem_temps[arg] = 0;
1275 }
1276
1277 if (!(call_flags & TCG_CALL_CONST)) {
1278 /* globals should go back to memory */
1279 memset(dead_temps, 1, s->nb_globals);
1280 memset(mem_temps, 1, s->nb_globals);
1281 }
1282
1283 /* input args are live */
1284 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1285 arg = args[i];
1286 if (arg != TCG_CALL_DUMMY_ARG) {
1287 if (dead_temps[arg]) {
1288 dead_args |= (1 << i);
1289 }
1290 dead_temps[arg] = 0;
1291 }
1292 }
1293 s->op_dead_args[op_index] = dead_args;
1294 s->op_sync_args[op_index] = sync_args;
1295 }
1296 args--;
1297 }
1298 break;
1299 case INDEX_op_debug_insn_start:
1300 args -= def->nb_args;
1301 break;
1302 case INDEX_op_nopn:
1303 nb_args = args[-1];
1304 args -= nb_args;
1305 break;
1306 case INDEX_op_discard:
1307 args--;
1308 /* mark the temporary as dead */
1309 dead_temps[args[0]] = 1;
1310 mem_temps[args[0]] = 0;
1311 break;
1312 case INDEX_op_end:
1313 break;
1314
1315 case INDEX_op_add2_i32:
1316 case INDEX_op_sub2_i32:
1317 args -= 6;
1318 nb_iargs = 4;
1319 nb_oargs = 2;
1320 /* Test if the high part of the operation is dead, but not
1321 the low part. The result can be optimized to a simple
1322 add or sub. This happens often for x86_64 guest when the
1323 cpu mode is set to 32 bit. */
1324 if (dead_temps[args[1]]) {
1325 if (dead_temps[args[0]]) {
1326 goto do_remove;
1327 }
1328 /* Create the single operation plus nop. */
1329 if (op == INDEX_op_add2_i32) {
1330 op = INDEX_op_add_i32;
1331 } else {
1332 op = INDEX_op_sub_i32;
1333 }
1334 gen_opc_buf[op_index] = op;
1335 args[1] = args[2];
1336 args[2] = args[4];
1337 assert(gen_opc_buf[op_index + 1] == INDEX_op_nop);
1338 tcg_set_nop(s, gen_opc_buf + op_index + 1, args + 3, 3);
1339 /* Fall through and mark the single-word operation live. */
1340 nb_iargs = 2;
1341 nb_oargs = 1;
1342 }
1343 goto do_not_remove;
1344
1345 case INDEX_op_mulu2_i32:
1346 args -= 4;
1347 nb_iargs = 2;
1348 nb_oargs = 2;
1349 /* Likewise, test for the high part of the operation dead. */
1350 if (dead_temps[args[1]]) {
1351 if (dead_temps[args[0]]) {
1352 goto do_remove;
1353 }
1354 gen_opc_buf[op_index] = op = INDEX_op_mul_i32;
1355 args[1] = args[2];
1356 args[2] = args[3];
1357 assert(gen_opc_buf[op_index + 1] == INDEX_op_nop);
1358 tcg_set_nop(s, gen_opc_buf + op_index + 1, args + 3, 1);
1359 /* Fall through and mark the single-word operation live. */
1360 nb_oargs = 1;
1361 }
1362 goto do_not_remove;
1363
1364 default:
1365 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1366 args -= def->nb_args;
1367 nb_iargs = def->nb_iargs;
1368 nb_oargs = def->nb_oargs;
1369
1370 /* Test if the operation can be removed because all
1371 its outputs are dead. We assume that nb_oargs == 0
1372 implies side effects */
1373 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1374 for(i = 0; i < nb_oargs; i++) {
1375 arg = args[i];
1376 if (!dead_temps[arg] || mem_temps[arg]) {
1377 goto do_not_remove;
1378 }
1379 }
1380 do_remove:
1381 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
1382 #ifdef CONFIG_PROFILER
1383 s->del_op_count++;
1384 #endif
1385 } else {
1386 do_not_remove:
1387
1388 /* output args are dead */
1389 dead_args = 0;
1390 sync_args = 0;
1391 for(i = 0; i < nb_oargs; i++) {
1392 arg = args[i];
1393 if (dead_temps[arg]) {
1394 dead_args |= (1 << i);
1395 }
1396 if (mem_temps[arg]) {
1397 sync_args |= (1 << i);
1398 }
1399 dead_temps[arg] = 1;
1400 mem_temps[arg] = 0;
1401 }
1402
1403 /* if end of basic block, update */
1404 if (def->flags & TCG_OPF_BB_END) {
1405 tcg_la_bb_end(s, dead_temps, mem_temps);
1406 } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
1407 /* globals should go back to memory */
1408 memset(dead_temps, 1, s->nb_globals);
1409 memset(mem_temps, 1, s->nb_globals);
1410 }
1411
1412 /* input args are live */
1413 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1414 arg = args[i];
1415 if (dead_temps[arg]) {
1416 dead_args |= (1 << i);
1417 }
1418 dead_temps[arg] = 0;
1419 }
1420 s->op_dead_args[op_index] = dead_args;
1421 s->op_sync_args[op_index] = sync_args;
1422 }
1423 break;
1424 }
1425 op_index--;
1426 }
1427
1428 if (args != gen_opparam_buf)
1429 tcg_abort();
1430 }
1431 #else
1432 /* dummy liveness analysis */
1433 static void tcg_liveness_analysis(TCGContext *s)
1434 {
1435 int nb_ops;
1436 nb_ops = gen_opc_ptr - gen_opc_buf;
1437
1438 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1439 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1440 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1441 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1442 }
1443 #endif
1444
1445 #ifndef NDEBUG
1446 static void dump_regs(TCGContext *s)
1447 {
1448 TCGTemp *ts;
1449 int i;
1450 char buf[64];
1451
1452 for(i = 0; i < s->nb_temps; i++) {
1453 ts = &s->temps[i];
1454 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1455 switch(ts->val_type) {
1456 case TEMP_VAL_REG:
1457 printf("%s", tcg_target_reg_names[ts->reg]);
1458 break;
1459 case TEMP_VAL_MEM:
1460 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1461 break;
1462 case TEMP_VAL_CONST:
1463 printf("$0x%" TCG_PRIlx, ts->val);
1464 break;
1465 case TEMP_VAL_DEAD:
1466 printf("D");
1467 break;
1468 default:
1469 printf("???");
1470 break;
1471 }
1472 printf("\n");
1473 }
1474
1475 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1476 if (s->reg_to_temp[i] >= 0) {
1477 printf("%s: %s\n",
1478 tcg_target_reg_names[i],
1479 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1480 }
1481 }
1482 }
1483
1484 static void check_regs(TCGContext *s)
1485 {
1486 int reg, k;
1487 TCGTemp *ts;
1488 char buf[64];
1489
1490 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1491 k = s->reg_to_temp[reg];
1492 if (k >= 0) {
1493 ts = &s->temps[k];
1494 if (ts->val_type != TEMP_VAL_REG ||
1495 ts->reg != reg) {
1496 printf("Inconsistency for register %s:\n",
1497 tcg_target_reg_names[reg]);
1498 goto fail;
1499 }
1500 }
1501 }
1502 for(k = 0; k < s->nb_temps; k++) {
1503 ts = &s->temps[k];
1504 if (ts->val_type == TEMP_VAL_REG &&
1505 !ts->fixed_reg &&
1506 s->reg_to_temp[ts->reg] != k) {
1507 printf("Inconsistency for temp %s:\n",
1508 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1509 fail:
1510 printf("reg state:\n");
1511 dump_regs(s);
1512 tcg_abort();
1513 }
1514 }
1515 }
1516 #endif
1517
1518 static void temp_allocate_frame(TCGContext *s, int temp)
1519 {
1520 TCGTemp *ts;
1521 ts = &s->temps[temp];
1522 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1523 /* Sparc64 stack is accessed with offset of 2047 */
1524 s->current_frame_offset = (s->current_frame_offset +
1525 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1526 ~(sizeof(tcg_target_long) - 1);
1527 #endif
1528 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1529 s->frame_end) {
1530 tcg_abort();
1531 }
1532 ts->mem_offset = s->current_frame_offset;
1533 ts->mem_reg = s->frame_reg;
1534 ts->mem_allocated = 1;
1535 s->current_frame_offset += (tcg_target_long)sizeof(tcg_target_long);
1536 }
1537
1538 /* sync register 'reg' by saving it to the corresponding temporary */
1539 static inline void tcg_reg_sync(TCGContext *s, int reg)
1540 {
1541 TCGTemp *ts;
1542 int temp;
1543
1544 temp = s->reg_to_temp[reg];
1545 ts = &s->temps[temp];
1546 assert(ts->val_type == TEMP_VAL_REG);
1547 if (!ts->mem_coherent && !ts->fixed_reg) {
1548 if (!ts->mem_allocated) {
1549 temp_allocate_frame(s, temp);
1550 }
1551 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1552 }
1553 ts->mem_coherent = 1;
1554 }
1555
1556 /* free register 'reg' by spilling the corresponding temporary if necessary */
1557 static void tcg_reg_free(TCGContext *s, int reg)
1558 {
1559 int temp;
1560
1561 temp = s->reg_to_temp[reg];
1562 if (temp != -1) {
1563 tcg_reg_sync(s, reg);
1564 s->temps[temp].val_type = TEMP_VAL_MEM;
1565 s->reg_to_temp[reg] = -1;
1566 }
1567 }
1568
1569 /* Allocate a register belonging to reg1 & ~reg2 */
1570 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1571 {
1572 int i, reg;
1573 TCGRegSet reg_ct;
1574
1575 tcg_regset_andnot(reg_ct, reg1, reg2);
1576
1577 /* first try free registers */
1578 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1579 reg = tcg_target_reg_alloc_order[i];
1580 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1581 return reg;
1582 }
1583
1584 /* XXX: do better spill choice */
1585 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1586 reg = tcg_target_reg_alloc_order[i];
1587 if (tcg_regset_test_reg(reg_ct, reg)) {
1588 tcg_reg_free(s, reg);
1589 return reg;
1590 }
1591 }
1592
1593 tcg_abort();
1594 }
1595
1596 /* mark a temporary as dead. */
1597 static inline void temp_dead(TCGContext *s, int temp)
1598 {
1599 TCGTemp *ts;
1600
1601 ts = &s->temps[temp];
1602 if (!ts->fixed_reg) {
1603 if (ts->val_type == TEMP_VAL_REG) {
1604 s->reg_to_temp[ts->reg] = -1;
1605 }
1606 if (temp < s->nb_globals || (ts->temp_local && ts->mem_allocated)) {
1607 ts->val_type = TEMP_VAL_MEM;
1608 } else {
1609 ts->val_type = TEMP_VAL_DEAD;
1610 }
1611 }
1612 }
1613
1614 /* sync a temporary to memory. 'allocated_regs' is used in case a
1615 temporary registers needs to be allocated to store a constant. */
1616 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1617 {
1618 TCGTemp *ts;
1619
1620 ts = &s->temps[temp];
1621 if (!ts->fixed_reg) {
1622 switch(ts->val_type) {
1623 case TEMP_VAL_CONST:
1624 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1625 allocated_regs);
1626 ts->val_type = TEMP_VAL_REG;
1627 s->reg_to_temp[ts->reg] = temp;
1628 ts->mem_coherent = 0;
1629 tcg_out_movi(s, ts->type, ts->reg, ts->val);
1630 /* fallthrough*/
1631 case TEMP_VAL_REG:
1632 tcg_reg_sync(s, ts->reg);
1633 break;
1634 case TEMP_VAL_DEAD:
1635 case TEMP_VAL_MEM:
1636 break;
1637 default:
1638 tcg_abort();
1639 }
1640 }
1641 }
1642
1643 /* save a temporary to memory. 'allocated_regs' is used in case a
1644 temporary registers needs to be allocated to store a constant. */
1645 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1646 {
1647 temp_sync(s, temp, allocated_regs);
1648 temp_dead(s, temp);
1649 }
1650
1651 /* save globals to their canonical location and assume they can be
1652 modified be the following code. 'allocated_regs' is used in case a
1653 temporary registers needs to be allocated to store a constant. */
1654 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1655 {
1656 int i;
1657
1658 for(i = 0; i < s->nb_globals; i++) {
1659 temp_save(s, i, allocated_regs);
1660 }
1661 }
1662
1663 /* at the end of a basic block, we assume all temporaries are dead and
1664 all globals are stored at their canonical location. */
1665 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1666 {
1667 TCGTemp *ts;
1668 int i;
1669
1670 for(i = s->nb_globals; i < s->nb_temps; i++) {
1671 ts = &s->temps[i];
1672 if (ts->temp_local) {
1673 temp_save(s, i, allocated_regs);
1674 } else {
1675 temp_dead(s, i);
1676 }
1677 }
1678
1679 save_globals(s, allocated_regs);
1680 }
1681
1682 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1683 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1684
1685 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1686 uint16_t dead_args, uint8_t sync_args)
1687 {
1688 TCGTemp *ots;
1689 tcg_target_ulong val;
1690
1691 ots = &s->temps[args[0]];
1692 val = args[1];
1693
1694 if (ots->fixed_reg) {
1695 /* for fixed registers, we do not do any constant
1696 propagation */
1697 tcg_out_movi(s, ots->type, ots->reg, val);
1698 } else {
1699 /* The movi is not explicitly generated here */
1700 if (ots->val_type == TEMP_VAL_REG)
1701 s->reg_to_temp[ots->reg] = -1;
1702 ots->val_type = TEMP_VAL_CONST;
1703 ots->val = val;
1704 }
1705 if (NEED_SYNC_ARG(0)) {
1706 temp_sync(s, args[0], s->reserved_regs);
1707 }
1708 if (IS_DEAD_ARG(0)) {
1709 temp_dead(s, args[0]);
1710 }
1711 }
1712
1713 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1714 const TCGArg *args, uint16_t dead_args,
1715 uint8_t sync_args)
1716 {
1717 TCGRegSet allocated_regs;
1718 TCGTemp *ts, *ots;
1719 const TCGArgConstraint *arg_ct, *oarg_ct;
1720
1721 tcg_regset_set(allocated_regs, s->reserved_regs);
1722 ots = &s->temps[args[0]];
1723 ts = &s->temps[args[1]];
1724 oarg_ct = &def->args_ct[0];
1725 arg_ct = &def->args_ct[1];
1726
1727 /* If the source value is not in a register, and we're going to be
1728 forced to have it in a register in order to perform the copy,
1729 then copy the SOURCE value into its own register first. That way
1730 we don't have to reload SOURCE the next time it is used. */
1731 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1732 || ts->val_type == TEMP_VAL_MEM) {
1733 ts->reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1734 if (ts->val_type == TEMP_VAL_MEM) {
1735 tcg_out_ld(s, ts->type, ts->reg, ts->mem_reg, ts->mem_offset);
1736 ts->mem_coherent = 1;
1737 } else if (ts->val_type == TEMP_VAL_CONST) {
1738 tcg_out_movi(s, ts->type, ts->reg, ts->val);
1739 }
1740 s->reg_to_temp[ts->reg] = args[1];
1741 ts->val_type = TEMP_VAL_REG;
1742 }
1743
1744 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1745 /* mov to a non-saved dead register makes no sense (even with
1746 liveness analysis disabled). */
1747 assert(NEED_SYNC_ARG(0));
1748 /* The code above should have moved the temp to a register. */
1749 assert(ts->val_type == TEMP_VAL_REG);
1750 if (!ots->mem_allocated) {
1751 temp_allocate_frame(s, args[0]);
1752 }
1753 tcg_out_st(s, ots->type, ts->reg, ots->mem_reg, ots->mem_offset);
1754 if (IS_DEAD_ARG(1)) {
1755 temp_dead(s, args[1]);
1756 }
1757 temp_dead(s, args[0]);
1758 } else if (ts->val_type == TEMP_VAL_CONST) {
1759 /* propagate constant */
1760 if (ots->val_type == TEMP_VAL_REG) {
1761 s->reg_to_temp[ots->reg] = -1;
1762 }
1763 ots->val_type = TEMP_VAL_CONST;
1764 ots->val = ts->val;
1765 } else {
1766 /* The code in the first if block should have moved the
1767 temp to a register. */
1768 assert(ts->val_type == TEMP_VAL_REG);
1769 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1770 /* the mov can be suppressed */
1771 if (ots->val_type == TEMP_VAL_REG) {
1772 s->reg_to_temp[ots->reg] = -1;
1773 }
1774 ots->reg = ts->reg;
1775 temp_dead(s, args[1]);
1776 } else {
1777 if (ots->val_type != TEMP_VAL_REG) {
1778 /* When allocating a new register, make sure to not spill the
1779 input one. */
1780 tcg_regset_set_reg(allocated_regs, ts->reg);
1781 ots->reg = tcg_reg_alloc(s, oarg_ct->u.regs, allocated_regs);
1782 }
1783 tcg_out_mov(s, ots->type, ots->reg, ts->reg);
1784 }
1785 ots->val_type = TEMP_VAL_REG;
1786 ots->mem_coherent = 0;
1787 s->reg_to_temp[ots->reg] = args[0];
1788 if (NEED_SYNC_ARG(0)) {
1789 tcg_reg_sync(s, ots->reg);
1790 }
1791 }
1792 }
1793
1794 static void tcg_reg_alloc_op(TCGContext *s,
1795 const TCGOpDef *def, TCGOpcode opc,
1796 const TCGArg *args, uint16_t dead_args,
1797 uint8_t sync_args)
1798 {
1799 TCGRegSet allocated_regs;
1800 int i, k, nb_iargs, nb_oargs, reg;
1801 TCGArg arg;
1802 const TCGArgConstraint *arg_ct;
1803 TCGTemp *ts;
1804 TCGArg new_args[TCG_MAX_OP_ARGS];
1805 int const_args[TCG_MAX_OP_ARGS];
1806
1807 nb_oargs = def->nb_oargs;
1808 nb_iargs = def->nb_iargs;
1809
1810 /* copy constants */
1811 memcpy(new_args + nb_oargs + nb_iargs,
1812 args + nb_oargs + nb_iargs,
1813 sizeof(TCGArg) * def->nb_cargs);
1814
1815 /* satisfy input constraints */
1816 tcg_regset_set(allocated_regs, s->reserved_regs);
1817 for(k = 0; k < nb_iargs; k++) {
1818 i = def->sorted_args[nb_oargs + k];
1819 arg = args[i];
1820 arg_ct = &def->args_ct[i];
1821 ts = &s->temps[arg];
1822 if (ts->val_type == TEMP_VAL_MEM) {
1823 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1824 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1825 ts->val_type = TEMP_VAL_REG;
1826 ts->reg = reg;
1827 ts->mem_coherent = 1;
1828 s->reg_to_temp[reg] = arg;
1829 } else if (ts->val_type == TEMP_VAL_CONST) {
1830 if (tcg_target_const_match(ts->val, arg_ct)) {
1831 /* constant is OK for instruction */
1832 const_args[i] = 1;
1833 new_args[i] = ts->val;
1834 goto iarg_end;
1835 } else {
1836 /* need to move to a register */
1837 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1838 tcg_out_movi(s, ts->type, reg, ts->val);
1839 ts->val_type = TEMP_VAL_REG;
1840 ts->reg = reg;
1841 ts->mem_coherent = 0;
1842 s->reg_to_temp[reg] = arg;
1843 }
1844 }
1845 assert(ts->val_type == TEMP_VAL_REG);
1846 if (arg_ct->ct & TCG_CT_IALIAS) {
1847 if (ts->fixed_reg) {
1848 /* if fixed register, we must allocate a new register
1849 if the alias is not the same register */
1850 if (arg != args[arg_ct->alias_index])
1851 goto allocate_in_reg;
1852 } else {
1853 /* if the input is aliased to an output and if it is
1854 not dead after the instruction, we must allocate
1855 a new register and move it */
1856 if (!IS_DEAD_ARG(i)) {
1857 goto allocate_in_reg;
1858 }
1859 }
1860 }
1861 reg = ts->reg;
1862 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1863 /* nothing to do : the constraint is satisfied */
1864 } else {
1865 allocate_in_reg:
1866 /* allocate a new register matching the constraint
1867 and move the temporary register into it */
1868 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1869 tcg_out_mov(s, ts->type, reg, ts->reg);
1870 }
1871 new_args[i] = reg;
1872 const_args[i] = 0;
1873 tcg_regset_set_reg(allocated_regs, reg);
1874 iarg_end: ;
1875 }
1876
1877 /* mark dead temporaries and free the associated registers */
1878 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1879 if (IS_DEAD_ARG(i)) {
1880 temp_dead(s, args[i]);
1881 }
1882 }
1883
1884 if (def->flags & TCG_OPF_BB_END) {
1885 tcg_reg_alloc_bb_end(s, allocated_regs);
1886 } else {
1887 if (def->flags & TCG_OPF_CALL_CLOBBER) {
1888 /* XXX: permit generic clobber register list ? */
1889 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1890 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1891 tcg_reg_free(s, reg);
1892 }
1893 }
1894 /* XXX: for load/store we could do that only for the slow path
1895 (i.e. when a memory callback is called) */
1896
1897 /* store globals and free associated registers (we assume the insn
1898 can modify any global. */
1899 save_globals(s, allocated_regs);
1900 }
1901
1902 /* satisfy the output constraints */
1903 tcg_regset_set(allocated_regs, s->reserved_regs);
1904 for(k = 0; k < nb_oargs; k++) {
1905 i = def->sorted_args[k];
1906 arg = args[i];
1907 arg_ct = &def->args_ct[i];
1908 ts = &s->temps[arg];
1909 if (arg_ct->ct & TCG_CT_ALIAS) {
1910 reg = new_args[arg_ct->alias_index];
1911 } else {
1912 /* if fixed register, we try to use it */
1913 reg = ts->reg;
1914 if (ts->fixed_reg &&
1915 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1916 goto oarg_end;
1917 }
1918 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1919 }
1920 tcg_regset_set_reg(allocated_regs, reg);
1921 /* if a fixed register is used, then a move will be done afterwards */
1922 if (!ts->fixed_reg) {
1923 if (ts->val_type == TEMP_VAL_REG) {
1924 s->reg_to_temp[ts->reg] = -1;
1925 }
1926 ts->val_type = TEMP_VAL_REG;
1927 ts->reg = reg;
1928 /* temp value is modified, so the value kept in memory is
1929 potentially not the same */
1930 ts->mem_coherent = 0;
1931 s->reg_to_temp[reg] = arg;
1932 }
1933 oarg_end:
1934 new_args[i] = reg;
1935 }
1936 }
1937
1938 /* emit instruction */
1939 tcg_out_op(s, opc, new_args, const_args);
1940
1941 /* move the outputs in the correct register if needed */
1942 for(i = 0; i < nb_oargs; i++) {
1943 ts = &s->temps[args[i]];
1944 reg = new_args[i];
1945 if (ts->fixed_reg && ts->reg != reg) {
1946 tcg_out_mov(s, ts->type, ts->reg, reg);
1947 }
1948 if (NEED_SYNC_ARG(i)) {
1949 tcg_reg_sync(s, reg);
1950 }
1951 if (IS_DEAD_ARG(i)) {
1952 temp_dead(s, args[i]);
1953 }
1954 }
1955 }
1956
1957 #ifdef TCG_TARGET_STACK_GROWSUP
1958 #define STACK_DIR(x) (-(x))
1959 #else
1960 #define STACK_DIR(x) (x)
1961 #endif
1962
1963 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
1964 TCGOpcode opc, const TCGArg *args,
1965 uint16_t dead_args, uint8_t sync_args)
1966 {
1967 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
1968 TCGArg arg, func_arg;
1969 TCGTemp *ts;
1970 tcg_target_long stack_offset, call_stack_size, func_addr;
1971 int const_func_arg, allocate_args;
1972 TCGRegSet allocated_regs;
1973 const TCGArgConstraint *arg_ct;
1974
1975 arg = *args++;
1976
1977 nb_oargs = arg >> 16;
1978 nb_iargs = arg & 0xffff;
1979 nb_params = nb_iargs - 1;
1980
1981 flags = args[nb_oargs + nb_iargs];
1982
1983 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
1984 if (nb_regs > nb_params)
1985 nb_regs = nb_params;
1986
1987 /* assign stack slots first */
1988 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
1989 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
1990 ~(TCG_TARGET_STACK_ALIGN - 1);
1991 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
1992 if (allocate_args) {
1993 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1994 preallocate call stack */
1995 tcg_abort();
1996 }
1997
1998 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
1999 for(i = nb_regs; i < nb_params; i++) {
2000 arg = args[nb_oargs + i];
2001 #ifdef TCG_TARGET_STACK_GROWSUP
2002 stack_offset -= sizeof(tcg_target_long);
2003 #endif
2004 if (arg != TCG_CALL_DUMMY_ARG) {
2005 ts = &s->temps[arg];
2006 if (ts->val_type == TEMP_VAL_REG) {
2007 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2008 } else if (ts->val_type == TEMP_VAL_MEM) {
2009 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2010 s->reserved_regs);
2011 /* XXX: not correct if reading values from the stack */
2012 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2013 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2014 } else if (ts->val_type == TEMP_VAL_CONST) {
2015 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2016 s->reserved_regs);
2017 /* XXX: sign extend may be needed on some targets */
2018 tcg_out_movi(s, ts->type, reg, ts->val);
2019 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2020 } else {
2021 tcg_abort();
2022 }
2023 }
2024 #ifndef TCG_TARGET_STACK_GROWSUP
2025 stack_offset += sizeof(tcg_target_long);
2026 #endif
2027 }
2028
2029 /* assign input registers */
2030 tcg_regset_set(allocated_regs, s->reserved_regs);
2031 for(i = 0; i < nb_regs; i++) {
2032 arg = args[nb_oargs + i];
2033 if (arg != TCG_CALL_DUMMY_ARG) {
2034 ts = &s->temps[arg];
2035 reg = tcg_target_call_iarg_regs[i];
2036 tcg_reg_free(s, reg);
2037 if (ts->val_type == TEMP_VAL_REG) {
2038 if (ts->reg != reg) {
2039 tcg_out_mov(s, ts->type, reg, ts->reg);
2040 }
2041 } else if (ts->val_type == TEMP_VAL_MEM) {
2042 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2043 } else if (ts->val_type == TEMP_VAL_CONST) {
2044 /* XXX: sign extend ? */
2045 tcg_out_movi(s, ts->type, reg, ts->val);
2046 } else {
2047 tcg_abort();
2048 }
2049 tcg_regset_set_reg(allocated_regs, reg);
2050 }
2051 }
2052
2053 /* assign function address */
2054 func_arg = args[nb_oargs + nb_iargs - 1];
2055 arg_ct = &def->args_ct[0];
2056 ts = &s->temps[func_arg];
2057 func_addr = ts->val;
2058 const_func_arg = 0;
2059 if (ts->val_type == TEMP_VAL_MEM) {
2060 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2061 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2062 func_arg = reg;
2063 tcg_regset_set_reg(allocated_regs, reg);
2064 } else if (ts->val_type == TEMP_VAL_REG) {
2065 reg = ts->reg;
2066 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2067 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2068 tcg_out_mov(s, ts->type, reg, ts->reg);
2069 }
2070 func_arg = reg;
2071 tcg_regset_set_reg(allocated_regs, reg);
2072 } else if (ts->val_type == TEMP_VAL_CONST) {
2073 if (tcg_target_const_match(func_addr, arg_ct)) {
2074 const_func_arg = 1;
2075 func_arg = func_addr;
2076 } else {
2077 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2078 tcg_out_movi(s, ts->type, reg, func_addr);
2079 func_arg = reg;
2080 tcg_regset_set_reg(allocated_regs, reg);
2081 }
2082 } else {
2083 tcg_abort();
2084 }
2085
2086
2087 /* mark dead temporaries and free the associated registers */
2088 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2089 if (IS_DEAD_ARG(i)) {
2090 temp_dead(s, args[i]);
2091 }
2092 }
2093
2094 /* clobber call registers */
2095 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2096 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2097 tcg_reg_free(s, reg);
2098 }
2099 }
2100
2101 /* store globals and free associated registers (we assume the call
2102 can modify any global. */
2103 if (!(flags & TCG_CALL_CONST)) {
2104 save_globals(s, allocated_regs);
2105 }
2106
2107 tcg_out_op(s, opc, &func_arg, &const_func_arg);
2108
2109 /* assign output registers and emit moves if needed */
2110 for(i = 0; i < nb_oargs; i++) {
2111 arg = args[i];
2112 ts = &s->temps[arg];
2113 reg = tcg_target_call_oarg_regs[i];
2114 assert(s->reg_to_temp[reg] == -1);
2115 if (ts->fixed_reg) {
2116 if (ts->reg != reg) {
2117 tcg_out_mov(s, ts->type, ts->reg, reg);
2118 }
2119 } else {
2120 if (ts->val_type == TEMP_VAL_REG) {
2121 s->reg_to_temp[ts->reg] = -1;
2122 }
2123 ts->val_type = TEMP_VAL_REG;
2124 ts->reg = reg;
2125 ts->mem_coherent = 0;
2126 s->reg_to_temp[reg] = arg;
2127 if (NEED_SYNC_ARG(i)) {
2128 tcg_reg_sync(s, reg);
2129 }
2130 if (IS_DEAD_ARG(i)) {
2131 temp_dead(s, args[i]);
2132 }
2133 }
2134 }
2135
2136 return nb_iargs + nb_oargs + def->nb_cargs + 1;
2137 }
2138
2139 #ifdef CONFIG_PROFILER
2140
2141 static int64_t tcg_table_op_count[NB_OPS];
2142
2143 static void dump_op_count(void)
2144 {
2145 int i;
2146 FILE *f;
2147 f = fopen("/tmp/op.log", "w");
2148 for(i = INDEX_op_end; i < NB_OPS; i++) {
2149 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]);
2150 }
2151 fclose(f);
2152 }
2153 #endif
2154
2155
2156 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
2157 long search_pc)
2158 {
2159 TCGOpcode opc;
2160 int op_index;
2161 const TCGOpDef *def;
2162 const TCGArg *args;
2163
2164 #ifdef DEBUG_DISAS
2165 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2166 qemu_log("OP:\n");
2167 tcg_dump_ops(s);
2168 qemu_log("\n");
2169 }
2170 #endif
2171
2172 #ifdef CONFIG_PROFILER
2173 s->opt_time -= profile_getclock();
2174 #endif
2175
2176 #ifdef USE_TCG_OPTIMIZATIONS
2177 gen_opparam_ptr =
2178 tcg_optimize(s, gen_opc_ptr, gen_opparam_buf, tcg_op_defs);
2179 #endif
2180
2181 #ifdef CONFIG_PROFILER
2182 s->opt_time += profile_getclock();
2183 s->la_time -= profile_getclock();
2184 #endif
2185
2186 tcg_liveness_analysis(s);
2187
2188 #ifdef CONFIG_PROFILER
2189 s->la_time += profile_getclock();
2190 #endif
2191
2192 #ifdef DEBUG_DISAS
2193 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2194 qemu_log("OP after optimization and liveness analysis:\n");
2195 tcg_dump_ops(s);
2196 qemu_log("\n");
2197 }
2198 #endif
2199
2200 tcg_reg_alloc_start(s);
2201
2202 s->code_buf = gen_code_buf;
2203 s->code_ptr = gen_code_buf;
2204
2205 args = gen_opparam_buf;
2206 op_index = 0;
2207
2208 for(;;) {
2209 opc = gen_opc_buf[op_index];
2210 #ifdef CONFIG_PROFILER
2211 tcg_table_op_count[opc]++;
2212 #endif
2213 def = &tcg_op_defs[opc];
2214 #if 0
2215 printf("%s: %d %d %d\n", def->name,
2216 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2217 // dump_regs(s);
2218 #endif
2219 switch(opc) {
2220 case INDEX_op_mov_i32:
2221 case INDEX_op_mov_i64:
2222 tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index],
2223 s->op_sync_args[op_index]);
2224 break;
2225 case INDEX_op_movi_i32:
2226 case INDEX_op_movi_i64:
2227 tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index],
2228 s->op_sync_args[op_index]);
2229 break;
2230 case INDEX_op_debug_insn_start:
2231 /* debug instruction */
2232 break;
2233 case INDEX_op_nop:
2234 case INDEX_op_nop1:
2235 case INDEX_op_nop2:
2236 case INDEX_op_nop3:
2237 break;
2238 case INDEX_op_nopn:
2239 args += args[0];
2240 goto next;
2241 case INDEX_op_discard:
2242 temp_dead(s, args[0]);
2243 break;
2244 case INDEX_op_set_label:
2245 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2246 tcg_out_label(s, args[0], s->code_ptr);
2247 break;
2248 case INDEX_op_call:
2249 args += tcg_reg_alloc_call(s, def, opc, args,
2250 s->op_dead_args[op_index],
2251 s->op_sync_args[op_index]);
2252 goto next;
2253 case INDEX_op_end:
2254 goto the_end;
2255 default:
2256 /* Sanity check that we've not introduced any unhandled opcodes. */
2257 if (def->flags & TCG_OPF_NOT_PRESENT) {
2258 tcg_abort();
2259 }
2260 /* Note: in order to speed up the code, it would be much
2261 faster to have specialized register allocator functions for
2262 some common argument patterns */
2263 tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index],
2264 s->op_sync_args[op_index]);
2265 break;
2266 }
2267 args += def->nb_args;
2268 next:
2269 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
2270 return op_index;
2271 }
2272 op_index++;
2273 #ifndef NDEBUG
2274 check_regs(s);
2275 #endif
2276 }
2277 the_end:
2278 return -1;
2279 }
2280
2281 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
2282 {
2283 #ifdef CONFIG_PROFILER
2284 {
2285 int n;
2286 n = (gen_opc_ptr - gen_opc_buf);
2287 s->op_count += n;
2288 if (n > s->op_count_max)
2289 s->op_count_max = n;
2290
2291 s->temp_count += s->nb_temps;
2292 if (s->nb_temps > s->temp_count_max)
2293 s->temp_count_max = s->nb_temps;
2294 }
2295 #endif
2296
2297 tcg_gen_code_common(s, gen_code_buf, -1);
2298
2299 /* flush instruction cache */
2300 flush_icache_range((tcg_target_ulong)gen_code_buf,
2301 (tcg_target_ulong)s->code_ptr);
2302
2303 return s->code_ptr - gen_code_buf;
2304 }
2305
2306 /* Return the index of the micro operation such as the pc after is <
2307 offset bytes from the start of the TB. The contents of gen_code_buf must
2308 not be changed, though writing the same values is ok.
2309 Return -1 if not found. */
2310 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
2311 {
2312 return tcg_gen_code_common(s, gen_code_buf, offset);
2313 }
2314
2315 #ifdef CONFIG_PROFILER
2316 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2317 {
2318 TCGContext *s = &tcg_ctx;
2319 int64_t tot;
2320
2321 tot = s->interm_time + s->code_time;
2322 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2323 tot, tot / 2.4e9);
2324 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2325 s->tb_count,
2326 s->tb_count1 - s->tb_count,
2327 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2328 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2329 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2330 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2331 s->tb_count ?
2332 (double)s->del_op_count / s->tb_count : 0);
2333 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2334 s->tb_count ?
2335 (double)s->temp_count / s->tb_count : 0,
2336 s->temp_count_max);
2337
2338 cpu_fprintf(f, "cycles/op %0.1f\n",
2339 s->op_count ? (double)tot / s->op_count : 0);
2340 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2341 s->code_in_len ? (double)tot / s->code_in_len : 0);
2342 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2343 s->code_out_len ? (double)tot / s->code_out_len : 0);
2344 if (tot == 0)
2345 tot = 1;
2346 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2347 (double)s->interm_time / tot * 100.0);
2348 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2349 (double)s->code_time / tot * 100.0);
2350 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2351 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2352 * 100.0);
2353 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2354 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2355 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2356 s->restore_count);
2357 cpu_fprintf(f, " avg cycles %0.1f\n",
2358 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2359
2360 dump_op_count();
2361 }
2362 #else
2363 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2364 {
2365 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2366 }
2367 #endif
2368
2369 #ifdef ELF_HOST_MACHINE
2370 /* In order to use this feature, the backend needs to do three things:
2371
2372 (1) Define ELF_HOST_MACHINE to indicate both what value to
2373 put into the ELF image and to indicate support for the feature.
2374
2375 (2) Define tcg_register_jit. This should create a buffer containing
2376 the contents of a .debug_frame section that describes the post-
2377 prologue unwind info for the tcg machine.
2378
2379 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2380 */
2381
2382 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2383 typedef enum {
2384 JIT_NOACTION = 0,
2385 JIT_REGISTER_FN,
2386 JIT_UNREGISTER_FN
2387 } jit_actions_t;
2388
2389 struct jit_code_entry {
2390 struct jit_code_entry *next_entry;
2391 struct jit_code_entry *prev_entry;
2392 const void *symfile_addr;
2393 uint64_t symfile_size;
2394 };
2395
2396 struct jit_descriptor {
2397 uint32_t version;
2398 uint32_t action_flag;
2399 struct jit_code_entry *relevant_entry;
2400 struct jit_code_entry *first_entry;
2401 };
2402
2403 void __jit_debug_register_code(void) __attribute__((noinline));
2404 void __jit_debug_register_code(void)
2405 {
2406 asm("");
2407 }
2408
2409 /* Must statically initialize the version, because GDB may check
2410 the version before we can set it. */
2411 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2412
2413 /* End GDB interface. */
2414
2415 static int find_string(const char *strtab, const char *str)
2416 {
2417 const char *p = strtab + 1;
2418
2419 while (1) {
2420 if (strcmp(p, str) == 0) {
2421 return p - strtab;
2422 }
2423 p += strlen(p) + 1;
2424 }
2425 }
2426
2427 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2428 void *debug_frame, size_t debug_frame_size)
2429 {
2430 struct __attribute__((packed)) DebugInfo {
2431 uint32_t len;
2432 uint16_t version;
2433 uint32_t abbrev;
2434 uint8_t ptr_size;
2435 uint8_t cu_die;
2436 uint16_t cu_lang;
2437 uintptr_t cu_low_pc;
2438 uintptr_t cu_high_pc;
2439 uint8_t fn_die;
2440 char fn_name[16];
2441 uintptr_t fn_low_pc;
2442 uintptr_t fn_high_pc;
2443 uint8_t cu_eoc;
2444 };
2445
2446 struct ElfImage {
2447 ElfW(Ehdr) ehdr;
2448 ElfW(Phdr) phdr;
2449 ElfW(Shdr) shdr[7];
2450 ElfW(Sym) sym[2];
2451 struct DebugInfo di;
2452 uint8_t da[24];
2453 char str[80];
2454 };
2455
2456 struct ElfImage *img;
2457
2458 static const struct ElfImage img_template = {
2459 .ehdr = {
2460 .e_ident[EI_MAG0] = ELFMAG0,
2461 .e_ident[EI_MAG1] = ELFMAG1,
2462 .e_ident[EI_MAG2] = ELFMAG2,
2463 .e_ident[EI_MAG3] = ELFMAG3,
2464 .e_ident[EI_CLASS] = ELF_CLASS,
2465 .e_ident[EI_DATA] = ELF_DATA,
2466 .e_ident[EI_VERSION] = EV_CURRENT,
2467 .e_type = ET_EXEC,
2468 .e_machine = ELF_HOST_MACHINE,
2469 .e_version = EV_CURRENT,
2470 .e_phoff = offsetof(struct ElfImage, phdr),
2471 .e_shoff = offsetof(struct ElfImage, shdr),
2472 .e_ehsize = sizeof(ElfW(Shdr)),
2473 .e_phentsize = sizeof(ElfW(Phdr)),
2474 .e_phnum = 1,
2475 .e_shentsize = sizeof(ElfW(Shdr)),
2476 .e_shnum = ARRAY_SIZE(img->shdr),
2477 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2478 #ifdef ELF_HOST_FLAGS
2479 .e_flags = ELF_HOST_FLAGS,
2480 #endif
2481 #ifdef ELF_OSABI
2482 .e_ident[EI_OSABI] = ELF_OSABI,
2483 #endif
2484 },
2485 .phdr = {
2486 .p_type = PT_LOAD,
2487 .p_flags = PF_X,
2488 },
2489 .shdr = {
2490 [0] = { .sh_type = SHT_NULL },
2491 /* Trick: The contents of code_gen_buffer are not present in
2492 this fake ELF file; that got allocated elsewhere. Therefore
2493 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2494 will not look for contents. We can record any address. */
2495 [1] = { /* .text */
2496 .sh_type = SHT_NOBITS,
2497 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2498 },
2499 [2] = { /* .debug_info */
2500 .sh_type = SHT_PROGBITS,
2501 .sh_offset = offsetof(struct ElfImage, di),
2502 .sh_size = sizeof(struct DebugInfo),
2503 },
2504 [3] = { /* .debug_abbrev */
2505 .sh_type = SHT_PROGBITS,
2506 .sh_offset = offsetof(struct ElfImage, da),
2507 .sh_size = sizeof(img->da),
2508 },
2509 [4] = { /* .debug_frame */
2510 .sh_type = SHT_PROGBITS,
2511 .sh_offset = sizeof(struct ElfImage),
2512 },
2513 [5] = { /* .symtab */
2514 .sh_type = SHT_SYMTAB,
2515 .sh_offset = offsetof(struct ElfImage, sym),
2516 .sh_size = sizeof(img->sym),
2517 .sh_info = 1,
2518 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2519 .sh_entsize = sizeof(ElfW(Sym)),
2520 },
2521 [6] = { /* .strtab */
2522 .sh_type = SHT_STRTAB,
2523 .sh_offset = offsetof(struct ElfImage, str),
2524 .sh_size = sizeof(img->str),
2525 }
2526 },
2527 .sym = {
2528 [1] = { /* code_gen_buffer */
2529 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2530 .st_shndx = 1,
2531 }
2532 },
2533 .di = {
2534 .len = sizeof(struct DebugInfo) - 4,
2535 .version = 2,
2536 .ptr_size = sizeof(void *),
2537 .cu_die = 1,
2538 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2539 .fn_die = 2,
2540 .fn_name = "code_gen_buffer"
2541 },
2542 .da = {
2543 1, /* abbrev number (the cu) */
2544 0x11, 1, /* DW_TAG_compile_unit, has children */
2545 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2546 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2547 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2548 0, 0, /* end of abbrev */
2549 2, /* abbrev number (the fn) */
2550 0x2e, 0, /* DW_TAG_subprogram, no children */
2551 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2552 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2553 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2554 0, 0, /* end of abbrev */
2555 0 /* no more abbrev */
2556 },
2557 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2558 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2559 };
2560
2561 /* We only need a single jit entry; statically allocate it. */
2562 static struct jit_code_entry one_entry;
2563
2564 uintptr_t buf = (uintptr_t)buf_ptr;
2565 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2566
2567 img = g_malloc(img_size);
2568 *img = img_template;
2569 memcpy(img + 1, debug_frame, debug_frame_size);
2570
2571 img->phdr.p_vaddr = buf;
2572 img->phdr.p_paddr = buf;
2573 img->phdr.p_memsz = buf_size;
2574
2575 img->shdr[1].sh_name = find_string(img->str, ".text");
2576 img->shdr[1].sh_addr = buf;
2577 img->shdr[1].sh_size = buf_size;
2578
2579 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2580 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2581
2582 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2583 img->shdr[4].sh_size = debug_frame_size;
2584
2585 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2586 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2587
2588 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2589 img->sym[1].st_value = buf;
2590 img->sym[1].st_size = buf_size;
2591
2592 img->di.cu_low_pc = buf;
2593 img->di.cu_high_pc = buf_size;
2594 img->di.fn_low_pc = buf;
2595 img->di.fn_high_pc = buf_size;
2596
2597 #ifdef DEBUG_JIT
2598 /* Enable this block to be able to debug the ELF image file creation.
2599 One can use readelf, objdump, or other inspection utilities. */
2600 {
2601 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2602 if (f) {
2603 if (fwrite(img, img_size, 1, f) != img_size) {
2604 /* Avoid stupid unused return value warning for fwrite. */
2605 }
2606 fclose(f);
2607 }
2608 }
2609 #endif
2610
2611 one_entry.symfile_addr = img;
2612 one_entry.symfile_size = img_size;
2613
2614 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2615 __jit_debug_descriptor.relevant_entry = &one_entry;
2616 __jit_debug_descriptor.first_entry = &one_entry;
2617 __jit_debug_register_code();
2618 }
2619 #else
2620 /* No support for the feature. Provide the entry point expected by exec.c,
2621 and implement the internal function we declared earlier. */
2622
2623 static void tcg_register_jit_int(void *buf, size_t size,
2624 void *debug_frame, size_t debug_frame_size)
2625 {
2626 }
2627
2628 void tcg_register_jit(void *buf, size_t buf_size)
2629 {
2630 }
2631 #endif /* ELF_HOST_MACHINE */