]> git.proxmox.com Git - qemu.git/blob - tcg/tcg.c
bb9c9952aacafd794c7daef6d373c05ddfc9fd24
[qemu.git] / tcg / tcg.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
28
29 #include "config.h"
30
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
33
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
38
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
43
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
46 instructions */
47 #define NO_CPU_IO_DEFS
48 #include "cpu.h"
49
50 #include "tcg-op.h"
51
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
54 #else
55 # define ELF_CLASS ELFCLASS32
56 #endif
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
59 #else
60 # define ELF_DATA ELFDATA2LSB
61 #endif
62
63 #include "elf.h"
64
65 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
66 #error GUEST_BASE not supported on this host.
67 #endif
68
69 /* Forward declarations for functions declared in tcg-target.c and used here. */
70 static void tcg_target_init(TCGContext *s);
71 static void tcg_target_qemu_prologue(TCGContext *s);
72 static void patch_reloc(uint8_t *code_ptr, int type,
73 tcg_target_long value, tcg_target_long addend);
74
75 static void tcg_register_jit_int(void *buf, size_t size,
76 void *debug_frame, size_t debug_frame_size)
77 __attribute__((unused));
78
79 /* Forward declarations for functions declared and used in tcg-target.c. */
80 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
81 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
82 tcg_target_long arg2);
83 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
84 static void tcg_out_movi(TCGContext *s, TCGType type,
85 TCGReg ret, tcg_target_long arg);
86 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
87 const int *const_args);
88 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
89 tcg_target_long arg2);
90 static int tcg_target_const_match(tcg_target_long val,
91 const TCGArgConstraint *arg_ct);
92 static int tcg_target_get_call_iarg_regs_count(int flags);
93
94 TCGOpDef tcg_op_defs[] = {
95 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
96 #include "tcg-opc.h"
97 #undef DEF
98 };
99 const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
100
101 static TCGRegSet tcg_target_available_regs[2];
102 static TCGRegSet tcg_target_call_clobber_regs;
103
104 /* XXX: move that inside the context */
105 uint16_t *gen_opc_ptr;
106 TCGArg *gen_opparam_ptr;
107
108 static inline void tcg_out8(TCGContext *s, uint8_t v)
109 {
110 *s->code_ptr++ = v;
111 }
112
113 static inline void tcg_out16(TCGContext *s, uint16_t v)
114 {
115 *(uint16_t *)s->code_ptr = v;
116 s->code_ptr += 2;
117 }
118
119 static inline void tcg_out32(TCGContext *s, uint32_t v)
120 {
121 *(uint32_t *)s->code_ptr = v;
122 s->code_ptr += 4;
123 }
124
125 /* label relocation processing */
126
127 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
128 int label_index, long addend)
129 {
130 TCGLabel *l;
131 TCGRelocation *r;
132
133 l = &s->labels[label_index];
134 if (l->has_value) {
135 /* FIXME: This may break relocations on RISC targets that
136 modify instruction fields in place. The caller may not have
137 written the initial value. */
138 patch_reloc(code_ptr, type, l->u.value, addend);
139 } else {
140 /* add a new relocation entry */
141 r = tcg_malloc(sizeof(TCGRelocation));
142 r->type = type;
143 r->ptr = code_ptr;
144 r->addend = addend;
145 r->next = l->u.first_reloc;
146 l->u.first_reloc = r;
147 }
148 }
149
150 static void tcg_out_label(TCGContext *s, int label_index, void *ptr)
151 {
152 TCGLabel *l;
153 TCGRelocation *r;
154 tcg_target_long value = (tcg_target_long)ptr;
155
156 l = &s->labels[label_index];
157 if (l->has_value)
158 tcg_abort();
159 r = l->u.first_reloc;
160 while (r != NULL) {
161 patch_reloc(r->ptr, r->type, value, r->addend);
162 r = r->next;
163 }
164 l->has_value = 1;
165 l->u.value = value;
166 }
167
168 int gen_new_label(void)
169 {
170 TCGContext *s = &tcg_ctx;
171 int idx;
172 TCGLabel *l;
173
174 if (s->nb_labels >= TCG_MAX_LABELS)
175 tcg_abort();
176 idx = s->nb_labels++;
177 l = &s->labels[idx];
178 l->has_value = 0;
179 l->u.first_reloc = NULL;
180 return idx;
181 }
182
183 #include "tcg-target.c"
184
185 /* pool based memory allocation */
186 void *tcg_malloc_internal(TCGContext *s, int size)
187 {
188 TCGPool *p;
189 int pool_size;
190
191 if (size > TCG_POOL_CHUNK_SIZE) {
192 /* big malloc: insert a new pool (XXX: could optimize) */
193 p = g_malloc(sizeof(TCGPool) + size);
194 p->size = size;
195 p->next = s->pool_first_large;
196 s->pool_first_large = p;
197 return p->data;
198 } else {
199 p = s->pool_current;
200 if (!p) {
201 p = s->pool_first;
202 if (!p)
203 goto new_pool;
204 } else {
205 if (!p->next) {
206 new_pool:
207 pool_size = TCG_POOL_CHUNK_SIZE;
208 p = g_malloc(sizeof(TCGPool) + pool_size);
209 p->size = pool_size;
210 p->next = NULL;
211 if (s->pool_current)
212 s->pool_current->next = p;
213 else
214 s->pool_first = p;
215 } else {
216 p = p->next;
217 }
218 }
219 }
220 s->pool_current = p;
221 s->pool_cur = p->data + size;
222 s->pool_end = p->data + p->size;
223 return p->data;
224 }
225
226 void tcg_pool_reset(TCGContext *s)
227 {
228 TCGPool *p, *t;
229 for (p = s->pool_first_large; p; p = t) {
230 t = p->next;
231 g_free(p);
232 }
233 s->pool_first_large = NULL;
234 s->pool_cur = s->pool_end = NULL;
235 s->pool_current = NULL;
236 }
237
238 void tcg_context_init(TCGContext *s)
239 {
240 int op, total_args, n;
241 TCGOpDef *def;
242 TCGArgConstraint *args_ct;
243 int *sorted_args;
244
245 memset(s, 0, sizeof(*s));
246 s->temps = s->static_temps;
247 s->nb_globals = 0;
248
249 /* Count total number of arguments and allocate the corresponding
250 space */
251 total_args = 0;
252 for(op = 0; op < NB_OPS; op++) {
253 def = &tcg_op_defs[op];
254 n = def->nb_iargs + def->nb_oargs;
255 total_args += n;
256 }
257
258 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
259 sorted_args = g_malloc(sizeof(int) * total_args);
260
261 for(op = 0; op < NB_OPS; op++) {
262 def = &tcg_op_defs[op];
263 def->args_ct = args_ct;
264 def->sorted_args = sorted_args;
265 n = def->nb_iargs + def->nb_oargs;
266 sorted_args += n;
267 args_ct += n;
268 }
269
270 tcg_target_init(s);
271 }
272
273 void tcg_prologue_init(TCGContext *s)
274 {
275 /* init global prologue and epilogue */
276 s->code_buf = code_gen_prologue;
277 s->code_ptr = s->code_buf;
278 tcg_target_qemu_prologue(s);
279 flush_icache_range((tcg_target_ulong)s->code_buf,
280 (tcg_target_ulong)s->code_ptr);
281 }
282
283 void tcg_set_frame(TCGContext *s, int reg,
284 tcg_target_long start, tcg_target_long size)
285 {
286 s->frame_start = start;
287 s->frame_end = start + size;
288 s->frame_reg = reg;
289 }
290
291 void tcg_func_start(TCGContext *s)
292 {
293 int i;
294 tcg_pool_reset(s);
295 s->nb_temps = s->nb_globals;
296 for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
297 s->first_free_temp[i] = -1;
298 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
299 s->nb_labels = 0;
300 s->current_frame_offset = s->frame_start;
301
302 gen_opc_ptr = gen_opc_buf;
303 gen_opparam_ptr = gen_opparam_buf;
304 }
305
306 static inline void tcg_temp_alloc(TCGContext *s, int n)
307 {
308 if (n > TCG_MAX_TEMPS)
309 tcg_abort();
310 }
311
312 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
313 const char *name)
314 {
315 TCGContext *s = &tcg_ctx;
316 TCGTemp *ts;
317 int idx;
318
319 #if TCG_TARGET_REG_BITS == 32
320 if (type != TCG_TYPE_I32)
321 tcg_abort();
322 #endif
323 if (tcg_regset_test_reg(s->reserved_regs, reg))
324 tcg_abort();
325 idx = s->nb_globals;
326 tcg_temp_alloc(s, s->nb_globals + 1);
327 ts = &s->temps[s->nb_globals];
328 ts->base_type = type;
329 ts->type = type;
330 ts->fixed_reg = 1;
331 ts->reg = reg;
332 ts->name = name;
333 s->nb_globals++;
334 tcg_regset_set_reg(s->reserved_regs, reg);
335 return idx;
336 }
337
338 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
339 {
340 int idx;
341
342 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
343 return MAKE_TCGV_I32(idx);
344 }
345
346 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
347 {
348 int idx;
349
350 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
351 return MAKE_TCGV_I64(idx);
352 }
353
354 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
355 tcg_target_long offset,
356 const char *name)
357 {
358 TCGContext *s = &tcg_ctx;
359 TCGTemp *ts;
360 int idx;
361
362 idx = s->nb_globals;
363 #if TCG_TARGET_REG_BITS == 32
364 if (type == TCG_TYPE_I64) {
365 char buf[64];
366 tcg_temp_alloc(s, s->nb_globals + 2);
367 ts = &s->temps[s->nb_globals];
368 ts->base_type = type;
369 ts->type = TCG_TYPE_I32;
370 ts->fixed_reg = 0;
371 ts->mem_allocated = 1;
372 ts->mem_reg = reg;
373 #ifdef TCG_TARGET_WORDS_BIGENDIAN
374 ts->mem_offset = offset + 4;
375 #else
376 ts->mem_offset = offset;
377 #endif
378 pstrcpy(buf, sizeof(buf), name);
379 pstrcat(buf, sizeof(buf), "_0");
380 ts->name = strdup(buf);
381 ts++;
382
383 ts->base_type = type;
384 ts->type = TCG_TYPE_I32;
385 ts->fixed_reg = 0;
386 ts->mem_allocated = 1;
387 ts->mem_reg = reg;
388 #ifdef TCG_TARGET_WORDS_BIGENDIAN
389 ts->mem_offset = offset;
390 #else
391 ts->mem_offset = offset + 4;
392 #endif
393 pstrcpy(buf, sizeof(buf), name);
394 pstrcat(buf, sizeof(buf), "_1");
395 ts->name = strdup(buf);
396
397 s->nb_globals += 2;
398 } else
399 #endif
400 {
401 tcg_temp_alloc(s, s->nb_globals + 1);
402 ts = &s->temps[s->nb_globals];
403 ts->base_type = type;
404 ts->type = type;
405 ts->fixed_reg = 0;
406 ts->mem_allocated = 1;
407 ts->mem_reg = reg;
408 ts->mem_offset = offset;
409 ts->name = name;
410 s->nb_globals++;
411 }
412 return idx;
413 }
414
415 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
416 const char *name)
417 {
418 int idx;
419
420 idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
421 return MAKE_TCGV_I32(idx);
422 }
423
424 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
425 const char *name)
426 {
427 int idx;
428
429 idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
430 return MAKE_TCGV_I64(idx);
431 }
432
433 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
434 {
435 TCGContext *s = &tcg_ctx;
436 TCGTemp *ts;
437 int idx, k;
438
439 k = type;
440 if (temp_local)
441 k += TCG_TYPE_COUNT;
442 idx = s->first_free_temp[k];
443 if (idx != -1) {
444 /* There is already an available temp with the
445 right type */
446 ts = &s->temps[idx];
447 s->first_free_temp[k] = ts->next_free_temp;
448 ts->temp_allocated = 1;
449 assert(ts->temp_local == temp_local);
450 } else {
451 idx = s->nb_temps;
452 #if TCG_TARGET_REG_BITS == 32
453 if (type == TCG_TYPE_I64) {
454 tcg_temp_alloc(s, s->nb_temps + 2);
455 ts = &s->temps[s->nb_temps];
456 ts->base_type = type;
457 ts->type = TCG_TYPE_I32;
458 ts->temp_allocated = 1;
459 ts->temp_local = temp_local;
460 ts->name = NULL;
461 ts++;
462 ts->base_type = TCG_TYPE_I32;
463 ts->type = TCG_TYPE_I32;
464 ts->temp_allocated = 1;
465 ts->temp_local = temp_local;
466 ts->name = NULL;
467 s->nb_temps += 2;
468 } else
469 #endif
470 {
471 tcg_temp_alloc(s, s->nb_temps + 1);
472 ts = &s->temps[s->nb_temps];
473 ts->base_type = type;
474 ts->type = type;
475 ts->temp_allocated = 1;
476 ts->temp_local = temp_local;
477 ts->name = NULL;
478 s->nb_temps++;
479 }
480 }
481
482 #if defined(CONFIG_DEBUG_TCG)
483 s->temps_in_use++;
484 #endif
485 return idx;
486 }
487
488 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
489 {
490 int idx;
491
492 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
493 return MAKE_TCGV_I32(idx);
494 }
495
496 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
497 {
498 int idx;
499
500 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
501 return MAKE_TCGV_I64(idx);
502 }
503
504 static inline void tcg_temp_free_internal(int idx)
505 {
506 TCGContext *s = &tcg_ctx;
507 TCGTemp *ts;
508 int k;
509
510 #if defined(CONFIG_DEBUG_TCG)
511 s->temps_in_use--;
512 if (s->temps_in_use < 0) {
513 fprintf(stderr, "More temporaries freed than allocated!\n");
514 }
515 #endif
516
517 assert(idx >= s->nb_globals && idx < s->nb_temps);
518 ts = &s->temps[idx];
519 assert(ts->temp_allocated != 0);
520 ts->temp_allocated = 0;
521 k = ts->base_type;
522 if (ts->temp_local)
523 k += TCG_TYPE_COUNT;
524 ts->next_free_temp = s->first_free_temp[k];
525 s->first_free_temp[k] = idx;
526 }
527
528 void tcg_temp_free_i32(TCGv_i32 arg)
529 {
530 tcg_temp_free_internal(GET_TCGV_I32(arg));
531 }
532
533 void tcg_temp_free_i64(TCGv_i64 arg)
534 {
535 tcg_temp_free_internal(GET_TCGV_I64(arg));
536 }
537
538 TCGv_i32 tcg_const_i32(int32_t val)
539 {
540 TCGv_i32 t0;
541 t0 = tcg_temp_new_i32();
542 tcg_gen_movi_i32(t0, val);
543 return t0;
544 }
545
546 TCGv_i64 tcg_const_i64(int64_t val)
547 {
548 TCGv_i64 t0;
549 t0 = tcg_temp_new_i64();
550 tcg_gen_movi_i64(t0, val);
551 return t0;
552 }
553
554 TCGv_i32 tcg_const_local_i32(int32_t val)
555 {
556 TCGv_i32 t0;
557 t0 = tcg_temp_local_new_i32();
558 tcg_gen_movi_i32(t0, val);
559 return t0;
560 }
561
562 TCGv_i64 tcg_const_local_i64(int64_t val)
563 {
564 TCGv_i64 t0;
565 t0 = tcg_temp_local_new_i64();
566 tcg_gen_movi_i64(t0, val);
567 return t0;
568 }
569
570 #if defined(CONFIG_DEBUG_TCG)
571 void tcg_clear_temp_count(void)
572 {
573 TCGContext *s = &tcg_ctx;
574 s->temps_in_use = 0;
575 }
576
577 int tcg_check_temp_count(void)
578 {
579 TCGContext *s = &tcg_ctx;
580 if (s->temps_in_use) {
581 /* Clear the count so that we don't give another
582 * warning immediately next time around.
583 */
584 s->temps_in_use = 0;
585 return 1;
586 }
587 return 0;
588 }
589 #endif
590
591 void tcg_register_helper(void *func, const char *name)
592 {
593 TCGContext *s = &tcg_ctx;
594 int n;
595 if ((s->nb_helpers + 1) > s->allocated_helpers) {
596 n = s->allocated_helpers;
597 if (n == 0) {
598 n = 4;
599 } else {
600 n *= 2;
601 }
602 s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
603 s->allocated_helpers = n;
604 }
605 s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
606 s->helpers[s->nb_helpers].name = name;
607 s->nb_helpers++;
608 }
609
610 /* Note: we convert the 64 bit args to 32 bit and do some alignment
611 and endian swap. Maybe it would be better to do the alignment
612 and endian swap in tcg_reg_alloc_call(). */
613 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
614 int sizemask, TCGArg ret, int nargs, TCGArg *args)
615 {
616 int i;
617 int real_args;
618 int nb_rets;
619 TCGArg *nparam;
620
621 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
622 for (i = 0; i < nargs; ++i) {
623 int is_64bit = sizemask & (1 << (i+1)*2);
624 int is_signed = sizemask & (2 << (i+1)*2);
625 if (!is_64bit) {
626 TCGv_i64 temp = tcg_temp_new_i64();
627 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
628 if (is_signed) {
629 tcg_gen_ext32s_i64(temp, orig);
630 } else {
631 tcg_gen_ext32u_i64(temp, orig);
632 }
633 args[i] = GET_TCGV_I64(temp);
634 }
635 }
636 #endif /* TCG_TARGET_EXTEND_ARGS */
637
638 *gen_opc_ptr++ = INDEX_op_call;
639 nparam = gen_opparam_ptr++;
640 if (ret != TCG_CALL_DUMMY_ARG) {
641 #if TCG_TARGET_REG_BITS < 64
642 if (sizemask & 1) {
643 #ifdef TCG_TARGET_WORDS_BIGENDIAN
644 *gen_opparam_ptr++ = ret + 1;
645 *gen_opparam_ptr++ = ret;
646 #else
647 *gen_opparam_ptr++ = ret;
648 *gen_opparam_ptr++ = ret + 1;
649 #endif
650 nb_rets = 2;
651 } else
652 #endif
653 {
654 *gen_opparam_ptr++ = ret;
655 nb_rets = 1;
656 }
657 } else {
658 nb_rets = 0;
659 }
660 real_args = 0;
661 for (i = 0; i < nargs; i++) {
662 #if TCG_TARGET_REG_BITS < 64
663 int is_64bit = sizemask & (1 << (i+1)*2);
664 if (is_64bit) {
665 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
666 /* some targets want aligned 64 bit args */
667 if (real_args & 1) {
668 *gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
669 real_args++;
670 }
671 #endif
672 /* If stack grows up, then we will be placing successive
673 arguments at lower addresses, which means we need to
674 reverse the order compared to how we would normally
675 treat either big or little-endian. For those arguments
676 that will wind up in registers, this still works for
677 HPPA (the only current STACK_GROWSUP target) since the
678 argument registers are *also* allocated in decreasing
679 order. If another such target is added, this logic may
680 have to get more complicated to differentiate between
681 stack arguments and register arguments. */
682 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
683 *gen_opparam_ptr++ = args[i] + 1;
684 *gen_opparam_ptr++ = args[i];
685 #else
686 *gen_opparam_ptr++ = args[i];
687 *gen_opparam_ptr++ = args[i] + 1;
688 #endif
689 real_args += 2;
690 continue;
691 }
692 #endif /* TCG_TARGET_REG_BITS < 64 */
693
694 *gen_opparam_ptr++ = args[i];
695 real_args++;
696 }
697 *gen_opparam_ptr++ = GET_TCGV_PTR(func);
698
699 *gen_opparam_ptr++ = flags;
700
701 *nparam = (nb_rets << 16) | (real_args + 1);
702
703 /* total parameters, needed to go backward in the instruction stream */
704 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
705
706 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
707 for (i = 0; i < nargs; ++i) {
708 int is_64bit = sizemask & (1 << (i+1)*2);
709 if (!is_64bit) {
710 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
711 tcg_temp_free_i64(temp);
712 }
713 }
714 #endif /* TCG_TARGET_EXTEND_ARGS */
715 }
716
717 #if TCG_TARGET_REG_BITS == 32
718 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
719 int c, int right, int arith)
720 {
721 if (c == 0) {
722 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
723 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
724 } else if (c >= 32) {
725 c -= 32;
726 if (right) {
727 if (arith) {
728 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
729 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
730 } else {
731 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
732 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
733 }
734 } else {
735 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
736 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
737 }
738 } else {
739 TCGv_i32 t0, t1;
740
741 t0 = tcg_temp_new_i32();
742 t1 = tcg_temp_new_i32();
743 if (right) {
744 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
745 if (arith)
746 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
747 else
748 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
749 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
750 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
751 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
752 } else {
753 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
754 /* Note: ret can be the same as arg1, so we use t1 */
755 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
756 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
757 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
758 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
759 }
760 tcg_temp_free_i32(t0);
761 tcg_temp_free_i32(t1);
762 }
763 }
764 #endif
765
766
767 static void tcg_reg_alloc_start(TCGContext *s)
768 {
769 int i;
770 TCGTemp *ts;
771 for(i = 0; i < s->nb_globals; i++) {
772 ts = &s->temps[i];
773 if (ts->fixed_reg) {
774 ts->val_type = TEMP_VAL_REG;
775 } else {
776 ts->val_type = TEMP_VAL_MEM;
777 }
778 }
779 for(i = s->nb_globals; i < s->nb_temps; i++) {
780 ts = &s->temps[i];
781 ts->val_type = TEMP_VAL_DEAD;
782 ts->mem_allocated = 0;
783 ts->fixed_reg = 0;
784 }
785 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
786 s->reg_to_temp[i] = -1;
787 }
788 }
789
790 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
791 int idx)
792 {
793 TCGTemp *ts;
794
795 assert(idx >= 0 && idx < s->nb_temps);
796 ts = &s->temps[idx];
797 assert(ts);
798 if (idx < s->nb_globals) {
799 pstrcpy(buf, buf_size, ts->name);
800 } else {
801 if (ts->temp_local)
802 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
803 else
804 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
805 }
806 return buf;
807 }
808
809 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
810 {
811 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
812 }
813
814 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
815 {
816 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
817 }
818
819 static int helper_cmp(const void *p1, const void *p2)
820 {
821 const TCGHelperInfo *th1 = p1;
822 const TCGHelperInfo *th2 = p2;
823 if (th1->func < th2->func)
824 return -1;
825 else if (th1->func == th2->func)
826 return 0;
827 else
828 return 1;
829 }
830
831 /* find helper definition (Note: A hash table would be better) */
832 static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
833 {
834 int m, m_min, m_max;
835 TCGHelperInfo *th;
836 tcg_target_ulong v;
837
838 if (unlikely(!s->helpers_sorted)) {
839 qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
840 helper_cmp);
841 s->helpers_sorted = 1;
842 }
843
844 /* binary search */
845 m_min = 0;
846 m_max = s->nb_helpers - 1;
847 while (m_min <= m_max) {
848 m = (m_min + m_max) >> 1;
849 th = &s->helpers[m];
850 v = th->func;
851 if (v == val)
852 return th;
853 else if (val < v) {
854 m_max = m - 1;
855 } else {
856 m_min = m + 1;
857 }
858 }
859 return NULL;
860 }
861
862 static const char * const cond_name[] =
863 {
864 [TCG_COND_EQ] = "eq",
865 [TCG_COND_NE] = "ne",
866 [TCG_COND_LT] = "lt",
867 [TCG_COND_GE] = "ge",
868 [TCG_COND_LE] = "le",
869 [TCG_COND_GT] = "gt",
870 [TCG_COND_LTU] = "ltu",
871 [TCG_COND_GEU] = "geu",
872 [TCG_COND_LEU] = "leu",
873 [TCG_COND_GTU] = "gtu"
874 };
875
876 void tcg_dump_ops(TCGContext *s)
877 {
878 const uint16_t *opc_ptr;
879 const TCGArg *args;
880 TCGArg arg;
881 TCGOpcode c;
882 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
883 const TCGOpDef *def;
884 char buf[128];
885
886 first_insn = 1;
887 opc_ptr = gen_opc_buf;
888 args = gen_opparam_buf;
889 while (opc_ptr < gen_opc_ptr) {
890 c = *opc_ptr++;
891 def = &tcg_op_defs[c];
892 if (c == INDEX_op_debug_insn_start) {
893 uint64_t pc;
894 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
895 pc = ((uint64_t)args[1] << 32) | args[0];
896 #else
897 pc = args[0];
898 #endif
899 if (!first_insn) {
900 qemu_log("\n");
901 }
902 qemu_log(" ---- 0x%" PRIx64, pc);
903 first_insn = 0;
904 nb_oargs = def->nb_oargs;
905 nb_iargs = def->nb_iargs;
906 nb_cargs = def->nb_cargs;
907 } else if (c == INDEX_op_call) {
908 TCGArg arg;
909
910 /* variable number of arguments */
911 arg = *args++;
912 nb_oargs = arg >> 16;
913 nb_iargs = arg & 0xffff;
914 nb_cargs = def->nb_cargs;
915
916 qemu_log(" %s ", def->name);
917
918 /* function name */
919 qemu_log("%s",
920 tcg_get_arg_str_idx(s, buf, sizeof(buf),
921 args[nb_oargs + nb_iargs - 1]));
922 /* flags */
923 qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]);
924 /* nb out args */
925 qemu_log(",$%d", nb_oargs);
926 for(i = 0; i < nb_oargs; i++) {
927 qemu_log(",");
928 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
929 args[i]));
930 }
931 for(i = 0; i < (nb_iargs - 1); i++) {
932 qemu_log(",");
933 if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
934 qemu_log("<dummy>");
935 } else {
936 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
937 args[nb_oargs + i]));
938 }
939 }
940 } else if (c == INDEX_op_movi_i32
941 #if TCG_TARGET_REG_BITS == 64
942 || c == INDEX_op_movi_i64
943 #endif
944 ) {
945 tcg_target_ulong val;
946 TCGHelperInfo *th;
947
948 nb_oargs = def->nb_oargs;
949 nb_iargs = def->nb_iargs;
950 nb_cargs = def->nb_cargs;
951 qemu_log(" %s %s,$", def->name,
952 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
953 val = args[1];
954 th = tcg_find_helper(s, val);
955 if (th) {
956 qemu_log("%s", th->name);
957 } else {
958 if (c == INDEX_op_movi_i32) {
959 qemu_log("0x%x", (uint32_t)val);
960 } else {
961 qemu_log("0x%" PRIx64 , (uint64_t)val);
962 }
963 }
964 } else {
965 qemu_log(" %s ", def->name);
966 if (c == INDEX_op_nopn) {
967 /* variable number of arguments */
968 nb_cargs = *args;
969 nb_oargs = 0;
970 nb_iargs = 0;
971 } else {
972 nb_oargs = def->nb_oargs;
973 nb_iargs = def->nb_iargs;
974 nb_cargs = def->nb_cargs;
975 }
976
977 k = 0;
978 for(i = 0; i < nb_oargs; i++) {
979 if (k != 0) {
980 qemu_log(",");
981 }
982 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
983 args[k++]));
984 }
985 for(i = 0; i < nb_iargs; i++) {
986 if (k != 0) {
987 qemu_log(",");
988 }
989 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
990 args[k++]));
991 }
992 switch (c) {
993 case INDEX_op_brcond_i32:
994 case INDEX_op_setcond_i32:
995 case INDEX_op_movcond_i32:
996 #if TCG_TARGET_REG_BITS == 32
997 case INDEX_op_brcond2_i32:
998 case INDEX_op_setcond2_i32:
999 #else
1000 case INDEX_op_brcond_i64:
1001 case INDEX_op_setcond_i64:
1002 case INDEX_op_movcond_i64:
1003 #endif
1004 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1005 qemu_log(",%s", cond_name[args[k++]]);
1006 } else {
1007 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1008 }
1009 i = 1;
1010 break;
1011 default:
1012 i = 0;
1013 break;
1014 }
1015 for(; i < nb_cargs; i++) {
1016 if (k != 0) {
1017 qemu_log(",");
1018 }
1019 arg = args[k++];
1020 qemu_log("$0x%" TCG_PRIlx, arg);
1021 }
1022 }
1023 qemu_log("\n");
1024 args += nb_iargs + nb_oargs + nb_cargs;
1025 }
1026 }
1027
1028 /* we give more priority to constraints with less registers */
1029 static int get_constraint_priority(const TCGOpDef *def, int k)
1030 {
1031 const TCGArgConstraint *arg_ct;
1032
1033 int i, n;
1034 arg_ct = &def->args_ct[k];
1035 if (arg_ct->ct & TCG_CT_ALIAS) {
1036 /* an alias is equivalent to a single register */
1037 n = 1;
1038 } else {
1039 if (!(arg_ct->ct & TCG_CT_REG))
1040 return 0;
1041 n = 0;
1042 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1043 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1044 n++;
1045 }
1046 }
1047 return TCG_TARGET_NB_REGS - n + 1;
1048 }
1049
1050 /* sort from highest priority to lowest */
1051 static void sort_constraints(TCGOpDef *def, int start, int n)
1052 {
1053 int i, j, p1, p2, tmp;
1054
1055 for(i = 0; i < n; i++)
1056 def->sorted_args[start + i] = start + i;
1057 if (n <= 1)
1058 return;
1059 for(i = 0; i < n - 1; i++) {
1060 for(j = i + 1; j < n; j++) {
1061 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1062 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1063 if (p1 < p2) {
1064 tmp = def->sorted_args[start + i];
1065 def->sorted_args[start + i] = def->sorted_args[start + j];
1066 def->sorted_args[start + j] = tmp;
1067 }
1068 }
1069 }
1070 }
1071
1072 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1073 {
1074 TCGOpcode op;
1075 TCGOpDef *def;
1076 const char *ct_str;
1077 int i, nb_args;
1078
1079 for(;;) {
1080 if (tdefs->op == (TCGOpcode)-1)
1081 break;
1082 op = tdefs->op;
1083 assert((unsigned)op < NB_OPS);
1084 def = &tcg_op_defs[op];
1085 #if defined(CONFIG_DEBUG_TCG)
1086 /* Duplicate entry in op definitions? */
1087 assert(!def->used);
1088 def->used = 1;
1089 #endif
1090 nb_args = def->nb_iargs + def->nb_oargs;
1091 for(i = 0; i < nb_args; i++) {
1092 ct_str = tdefs->args_ct_str[i];
1093 /* Incomplete TCGTargetOpDef entry? */
1094 assert(ct_str != NULL);
1095 tcg_regset_clear(def->args_ct[i].u.regs);
1096 def->args_ct[i].ct = 0;
1097 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1098 int oarg;
1099 oarg = ct_str[0] - '0';
1100 assert(oarg < def->nb_oargs);
1101 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1102 /* TCG_CT_ALIAS is for the output arguments. The input
1103 argument is tagged with TCG_CT_IALIAS. */
1104 def->args_ct[i] = def->args_ct[oarg];
1105 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1106 def->args_ct[oarg].alias_index = i;
1107 def->args_ct[i].ct |= TCG_CT_IALIAS;
1108 def->args_ct[i].alias_index = oarg;
1109 } else {
1110 for(;;) {
1111 if (*ct_str == '\0')
1112 break;
1113 switch(*ct_str) {
1114 case 'i':
1115 def->args_ct[i].ct |= TCG_CT_CONST;
1116 ct_str++;
1117 break;
1118 default:
1119 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1120 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1121 ct_str, i, def->name);
1122 exit(1);
1123 }
1124 }
1125 }
1126 }
1127 }
1128
1129 /* TCGTargetOpDef entry with too much information? */
1130 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1131
1132 /* sort the constraints (XXX: this is just an heuristic) */
1133 sort_constraints(def, 0, def->nb_oargs);
1134 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1135
1136 #if 0
1137 {
1138 int i;
1139
1140 printf("%s: sorted=", def->name);
1141 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1142 printf(" %d", def->sorted_args[i]);
1143 printf("\n");
1144 }
1145 #endif
1146 tdefs++;
1147 }
1148
1149 #if defined(CONFIG_DEBUG_TCG)
1150 i = 0;
1151 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1152 const TCGOpDef *def = &tcg_op_defs[op];
1153 if (op < INDEX_op_call
1154 || op == INDEX_op_debug_insn_start
1155 || (def->flags & TCG_OPF_NOT_PRESENT)) {
1156 /* Wrong entry in op definitions? */
1157 if (def->used) {
1158 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1159 i = 1;
1160 }
1161 } else {
1162 /* Missing entry in op definitions? */
1163 if (!def->used) {
1164 fprintf(stderr, "Missing op definition for %s\n", def->name);
1165 i = 1;
1166 }
1167 }
1168 }
1169 if (i == 1) {
1170 tcg_abort();
1171 }
1172 #endif
1173 }
1174
1175 #ifdef USE_LIVENESS_ANALYSIS
1176
1177 /* set a nop for an operation using 'nb_args' */
1178 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1179 TCGArg *args, int nb_args)
1180 {
1181 if (nb_args == 0) {
1182 *opc_ptr = INDEX_op_nop;
1183 } else {
1184 *opc_ptr = INDEX_op_nopn;
1185 args[0] = nb_args;
1186 args[nb_args - 1] = nb_args;
1187 }
1188 }
1189
1190 /* liveness analysis: end of function: globals are live, temps are
1191 dead. */
1192 /* XXX: at this stage, not used as there would be little gains because
1193 most TBs end with a conditional jump. */
1194 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps)
1195 {
1196 memset(dead_temps, 0, s->nb_globals);
1197 memset(dead_temps + s->nb_globals, 1, s->nb_temps - s->nb_globals);
1198 }
1199
1200 /* liveness analysis: end of basic block: globals are live, temps are
1201 dead, local temps are live. */
1202 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps)
1203 {
1204 int i;
1205 TCGTemp *ts;
1206
1207 memset(dead_temps, 0, s->nb_globals);
1208 ts = &s->temps[s->nb_globals];
1209 for(i = s->nb_globals; i < s->nb_temps; i++) {
1210 if (ts->temp_local)
1211 dead_temps[i] = 0;
1212 else
1213 dead_temps[i] = 1;
1214 ts++;
1215 }
1216 }
1217
1218 /* Liveness analysis : update the opc_dead_args array to tell if a
1219 given input arguments is dead. Instructions updating dead
1220 temporaries are removed. */
1221 static void tcg_liveness_analysis(TCGContext *s)
1222 {
1223 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
1224 TCGOpcode op;
1225 TCGArg *args;
1226 const TCGOpDef *def;
1227 uint8_t *dead_temps;
1228 unsigned int dead_args;
1229
1230 gen_opc_ptr++; /* skip end */
1231
1232 nb_ops = gen_opc_ptr - gen_opc_buf;
1233
1234 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1235
1236 dead_temps = tcg_malloc(s->nb_temps);
1237 memset(dead_temps, 1, s->nb_temps);
1238
1239 args = gen_opparam_ptr;
1240 op_index = nb_ops - 1;
1241 while (op_index >= 0) {
1242 op = gen_opc_buf[op_index];
1243 def = &tcg_op_defs[op];
1244 switch(op) {
1245 case INDEX_op_call:
1246 {
1247 int call_flags;
1248
1249 nb_args = args[-1];
1250 args -= nb_args;
1251 nb_iargs = args[0] & 0xffff;
1252 nb_oargs = args[0] >> 16;
1253 args++;
1254 call_flags = args[nb_oargs + nb_iargs];
1255
1256 /* pure functions can be removed if their result is not
1257 used */
1258 if (call_flags & TCG_CALL_PURE) {
1259 for(i = 0; i < nb_oargs; i++) {
1260 arg = args[i];
1261 if (!dead_temps[arg])
1262 goto do_not_remove_call;
1263 }
1264 tcg_set_nop(s, gen_opc_buf + op_index,
1265 args - 1, nb_args);
1266 } else {
1267 do_not_remove_call:
1268
1269 /* output args are dead */
1270 dead_args = 0;
1271 for(i = 0; i < nb_oargs; i++) {
1272 arg = args[i];
1273 if (dead_temps[arg]) {
1274 dead_args |= (1 << i);
1275 }
1276 dead_temps[arg] = 1;
1277 }
1278
1279 if (!(call_flags & TCG_CALL_CONST)) {
1280 /* globals are live (they may be used by the call) */
1281 memset(dead_temps, 0, s->nb_globals);
1282 }
1283
1284 /* input args are live */
1285 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1286 arg = args[i];
1287 if (arg != TCG_CALL_DUMMY_ARG) {
1288 if (dead_temps[arg]) {
1289 dead_args |= (1 << i);
1290 }
1291 dead_temps[arg] = 0;
1292 }
1293 }
1294 s->op_dead_args[op_index] = dead_args;
1295 }
1296 args--;
1297 }
1298 break;
1299 case INDEX_op_debug_insn_start:
1300 args -= def->nb_args;
1301 break;
1302 case INDEX_op_nopn:
1303 nb_args = args[-1];
1304 args -= nb_args;
1305 break;
1306 case INDEX_op_discard:
1307 args--;
1308 /* mark the temporary as dead */
1309 dead_temps[args[0]] = 1;
1310 break;
1311 case INDEX_op_end:
1312 break;
1313 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1314 default:
1315 args -= def->nb_args;
1316 nb_iargs = def->nb_iargs;
1317 nb_oargs = def->nb_oargs;
1318
1319 /* Test if the operation can be removed because all
1320 its outputs are dead. We assume that nb_oargs == 0
1321 implies side effects */
1322 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1323 for(i = 0; i < nb_oargs; i++) {
1324 arg = args[i];
1325 if (!dead_temps[arg])
1326 goto do_not_remove;
1327 }
1328 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
1329 #ifdef CONFIG_PROFILER
1330 s->del_op_count++;
1331 #endif
1332 } else {
1333 do_not_remove:
1334
1335 /* output args are dead */
1336 dead_args = 0;
1337 for(i = 0; i < nb_oargs; i++) {
1338 arg = args[i];
1339 if (dead_temps[arg]) {
1340 dead_args |= (1 << i);
1341 }
1342 dead_temps[arg] = 1;
1343 }
1344
1345 /* if end of basic block, update */
1346 if (def->flags & TCG_OPF_BB_END) {
1347 tcg_la_bb_end(s, dead_temps);
1348 } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
1349 /* globals are live */
1350 memset(dead_temps, 0, s->nb_globals);
1351 }
1352
1353 /* input args are live */
1354 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1355 arg = args[i];
1356 if (dead_temps[arg]) {
1357 dead_args |= (1 << i);
1358 }
1359 dead_temps[arg] = 0;
1360 }
1361 s->op_dead_args[op_index] = dead_args;
1362 }
1363 break;
1364 }
1365 op_index--;
1366 }
1367
1368 if (args != gen_opparam_buf)
1369 tcg_abort();
1370 }
1371 #else
1372 /* dummy liveness analysis */
1373 static void tcg_liveness_analysis(TCGContext *s)
1374 {
1375 int nb_ops;
1376 nb_ops = gen_opc_ptr - gen_opc_buf;
1377
1378 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1379 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1380 }
1381 #endif
1382
1383 #ifndef NDEBUG
1384 static void dump_regs(TCGContext *s)
1385 {
1386 TCGTemp *ts;
1387 int i;
1388 char buf[64];
1389
1390 for(i = 0; i < s->nb_temps; i++) {
1391 ts = &s->temps[i];
1392 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1393 switch(ts->val_type) {
1394 case TEMP_VAL_REG:
1395 printf("%s", tcg_target_reg_names[ts->reg]);
1396 break;
1397 case TEMP_VAL_MEM:
1398 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1399 break;
1400 case TEMP_VAL_CONST:
1401 printf("$0x%" TCG_PRIlx, ts->val);
1402 break;
1403 case TEMP_VAL_DEAD:
1404 printf("D");
1405 break;
1406 default:
1407 printf("???");
1408 break;
1409 }
1410 printf("\n");
1411 }
1412
1413 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1414 if (s->reg_to_temp[i] >= 0) {
1415 printf("%s: %s\n",
1416 tcg_target_reg_names[i],
1417 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1418 }
1419 }
1420 }
1421
1422 static void check_regs(TCGContext *s)
1423 {
1424 int reg, k;
1425 TCGTemp *ts;
1426 char buf[64];
1427
1428 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1429 k = s->reg_to_temp[reg];
1430 if (k >= 0) {
1431 ts = &s->temps[k];
1432 if (ts->val_type != TEMP_VAL_REG ||
1433 ts->reg != reg) {
1434 printf("Inconsistency for register %s:\n",
1435 tcg_target_reg_names[reg]);
1436 goto fail;
1437 }
1438 }
1439 }
1440 for(k = 0; k < s->nb_temps; k++) {
1441 ts = &s->temps[k];
1442 if (ts->val_type == TEMP_VAL_REG &&
1443 !ts->fixed_reg &&
1444 s->reg_to_temp[ts->reg] != k) {
1445 printf("Inconsistency for temp %s:\n",
1446 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1447 fail:
1448 printf("reg state:\n");
1449 dump_regs(s);
1450 tcg_abort();
1451 }
1452 }
1453 }
1454 #endif
1455
1456 static void temp_allocate_frame(TCGContext *s, int temp)
1457 {
1458 TCGTemp *ts;
1459 ts = &s->temps[temp];
1460 #ifndef __sparc_v9__ /* Sparc64 stack is accessed with offset of 2047 */
1461 s->current_frame_offset = (s->current_frame_offset +
1462 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1463 ~(sizeof(tcg_target_long) - 1);
1464 #endif
1465 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1466 s->frame_end) {
1467 tcg_abort();
1468 }
1469 ts->mem_offset = s->current_frame_offset;
1470 ts->mem_reg = s->frame_reg;
1471 ts->mem_allocated = 1;
1472 s->current_frame_offset += (tcg_target_long)sizeof(tcg_target_long);
1473 }
1474
1475 /* free register 'reg' by spilling the corresponding temporary if necessary */
1476 static void tcg_reg_free(TCGContext *s, int reg)
1477 {
1478 TCGTemp *ts;
1479 int temp;
1480
1481 temp = s->reg_to_temp[reg];
1482 if (temp != -1) {
1483 ts = &s->temps[temp];
1484 assert(ts->val_type == TEMP_VAL_REG);
1485 if (!ts->mem_coherent) {
1486 if (!ts->mem_allocated)
1487 temp_allocate_frame(s, temp);
1488 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1489 }
1490 ts->val_type = TEMP_VAL_MEM;
1491 s->reg_to_temp[reg] = -1;
1492 }
1493 }
1494
1495 /* Allocate a register belonging to reg1 & ~reg2 */
1496 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1497 {
1498 int i, reg;
1499 TCGRegSet reg_ct;
1500
1501 tcg_regset_andnot(reg_ct, reg1, reg2);
1502
1503 /* first try free registers */
1504 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1505 reg = tcg_target_reg_alloc_order[i];
1506 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1507 return reg;
1508 }
1509
1510 /* XXX: do better spill choice */
1511 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1512 reg = tcg_target_reg_alloc_order[i];
1513 if (tcg_regset_test_reg(reg_ct, reg)) {
1514 tcg_reg_free(s, reg);
1515 return reg;
1516 }
1517 }
1518
1519 tcg_abort();
1520 }
1521
1522 /* save a temporary to memory. 'allocated_regs' is used in case a
1523 temporary registers needs to be allocated to store a constant. */
1524 static void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1525 {
1526 TCGTemp *ts;
1527 int reg;
1528
1529 ts = &s->temps[temp];
1530 if (!ts->fixed_reg) {
1531 switch(ts->val_type) {
1532 case TEMP_VAL_REG:
1533 tcg_reg_free(s, ts->reg);
1534 break;
1535 case TEMP_VAL_DEAD:
1536 ts->val_type = TEMP_VAL_MEM;
1537 break;
1538 case TEMP_VAL_CONST:
1539 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1540 allocated_regs);
1541 if (!ts->mem_allocated)
1542 temp_allocate_frame(s, temp);
1543 tcg_out_movi(s, ts->type, reg, ts->val);
1544 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1545 ts->val_type = TEMP_VAL_MEM;
1546 break;
1547 case TEMP_VAL_MEM:
1548 break;
1549 default:
1550 tcg_abort();
1551 }
1552 }
1553 }
1554
1555 /* save globals to their canonical location and assume they can be
1556 modified be the following code. 'allocated_regs' is used in case a
1557 temporary registers needs to be allocated to store a constant. */
1558 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1559 {
1560 int i;
1561
1562 for(i = 0; i < s->nb_globals; i++) {
1563 temp_save(s, i, allocated_regs);
1564 }
1565 }
1566
1567 /* at the end of a basic block, we assume all temporaries are dead and
1568 all globals are stored at their canonical location. */
1569 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1570 {
1571 TCGTemp *ts;
1572 int i;
1573
1574 for(i = s->nb_globals; i < s->nb_temps; i++) {
1575 ts = &s->temps[i];
1576 if (ts->temp_local) {
1577 temp_save(s, i, allocated_regs);
1578 } else {
1579 if (ts->val_type == TEMP_VAL_REG) {
1580 s->reg_to_temp[ts->reg] = -1;
1581 }
1582 ts->val_type = TEMP_VAL_DEAD;
1583 }
1584 }
1585
1586 save_globals(s, allocated_regs);
1587 }
1588
1589 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1590
1591 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args)
1592 {
1593 TCGTemp *ots;
1594 tcg_target_ulong val;
1595
1596 ots = &s->temps[args[0]];
1597 val = args[1];
1598
1599 if (ots->fixed_reg) {
1600 /* for fixed registers, we do not do any constant
1601 propagation */
1602 tcg_out_movi(s, ots->type, ots->reg, val);
1603 } else {
1604 /* The movi is not explicitly generated here */
1605 if (ots->val_type == TEMP_VAL_REG)
1606 s->reg_to_temp[ots->reg] = -1;
1607 ots->val_type = TEMP_VAL_CONST;
1608 ots->val = val;
1609 }
1610 }
1611
1612 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1613 const TCGArg *args,
1614 unsigned int dead_args)
1615 {
1616 TCGTemp *ts, *ots;
1617 int reg;
1618 const TCGArgConstraint *arg_ct;
1619
1620 ots = &s->temps[args[0]];
1621 ts = &s->temps[args[1]];
1622 arg_ct = &def->args_ct[0];
1623
1624 /* XXX: always mark arg dead if IS_DEAD_ARG(1) */
1625 if (ts->val_type == TEMP_VAL_REG) {
1626 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1627 /* the mov can be suppressed */
1628 if (ots->val_type == TEMP_VAL_REG)
1629 s->reg_to_temp[ots->reg] = -1;
1630 reg = ts->reg;
1631 s->reg_to_temp[reg] = -1;
1632 ts->val_type = TEMP_VAL_DEAD;
1633 } else {
1634 if (ots->val_type == TEMP_VAL_REG) {
1635 reg = ots->reg;
1636 } else {
1637 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1638 }
1639 if (ts->reg != reg) {
1640 tcg_out_mov(s, ots->type, reg, ts->reg);
1641 }
1642 }
1643 } else if (ts->val_type == TEMP_VAL_MEM) {
1644 if (ots->val_type == TEMP_VAL_REG) {
1645 reg = ots->reg;
1646 } else {
1647 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1648 }
1649 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1650 } else if (ts->val_type == TEMP_VAL_CONST) {
1651 if (ots->fixed_reg) {
1652 reg = ots->reg;
1653 tcg_out_movi(s, ots->type, reg, ts->val);
1654 } else {
1655 /* propagate constant */
1656 if (ots->val_type == TEMP_VAL_REG)
1657 s->reg_to_temp[ots->reg] = -1;
1658 ots->val_type = TEMP_VAL_CONST;
1659 ots->val = ts->val;
1660 return;
1661 }
1662 } else {
1663 tcg_abort();
1664 }
1665 s->reg_to_temp[reg] = args[0];
1666 ots->reg = reg;
1667 ots->val_type = TEMP_VAL_REG;
1668 ots->mem_coherent = 0;
1669 }
1670
1671 static void tcg_reg_alloc_op(TCGContext *s,
1672 const TCGOpDef *def, TCGOpcode opc,
1673 const TCGArg *args,
1674 unsigned int dead_args)
1675 {
1676 TCGRegSet allocated_regs;
1677 int i, k, nb_iargs, nb_oargs, reg;
1678 TCGArg arg;
1679 const TCGArgConstraint *arg_ct;
1680 TCGTemp *ts;
1681 TCGArg new_args[TCG_MAX_OP_ARGS];
1682 int const_args[TCG_MAX_OP_ARGS];
1683
1684 nb_oargs = def->nb_oargs;
1685 nb_iargs = def->nb_iargs;
1686
1687 /* copy constants */
1688 memcpy(new_args + nb_oargs + nb_iargs,
1689 args + nb_oargs + nb_iargs,
1690 sizeof(TCGArg) * def->nb_cargs);
1691
1692 /* satisfy input constraints */
1693 tcg_regset_set(allocated_regs, s->reserved_regs);
1694 for(k = 0; k < nb_iargs; k++) {
1695 i = def->sorted_args[nb_oargs + k];
1696 arg = args[i];
1697 arg_ct = &def->args_ct[i];
1698 ts = &s->temps[arg];
1699 if (ts->val_type == TEMP_VAL_MEM) {
1700 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1701 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1702 ts->val_type = TEMP_VAL_REG;
1703 ts->reg = reg;
1704 ts->mem_coherent = 1;
1705 s->reg_to_temp[reg] = arg;
1706 } else if (ts->val_type == TEMP_VAL_CONST) {
1707 if (tcg_target_const_match(ts->val, arg_ct)) {
1708 /* constant is OK for instruction */
1709 const_args[i] = 1;
1710 new_args[i] = ts->val;
1711 goto iarg_end;
1712 } else {
1713 /* need to move to a register */
1714 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1715 tcg_out_movi(s, ts->type, reg, ts->val);
1716 ts->val_type = TEMP_VAL_REG;
1717 ts->reg = reg;
1718 ts->mem_coherent = 0;
1719 s->reg_to_temp[reg] = arg;
1720 }
1721 }
1722 assert(ts->val_type == TEMP_VAL_REG);
1723 if (arg_ct->ct & TCG_CT_IALIAS) {
1724 if (ts->fixed_reg) {
1725 /* if fixed register, we must allocate a new register
1726 if the alias is not the same register */
1727 if (arg != args[arg_ct->alias_index])
1728 goto allocate_in_reg;
1729 } else {
1730 /* if the input is aliased to an output and if it is
1731 not dead after the instruction, we must allocate
1732 a new register and move it */
1733 if (!IS_DEAD_ARG(i)) {
1734 goto allocate_in_reg;
1735 }
1736 }
1737 }
1738 reg = ts->reg;
1739 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1740 /* nothing to do : the constraint is satisfied */
1741 } else {
1742 allocate_in_reg:
1743 /* allocate a new register matching the constraint
1744 and move the temporary register into it */
1745 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1746 tcg_out_mov(s, ts->type, reg, ts->reg);
1747 }
1748 new_args[i] = reg;
1749 const_args[i] = 0;
1750 tcg_regset_set_reg(allocated_regs, reg);
1751 iarg_end: ;
1752 }
1753
1754 if (def->flags & TCG_OPF_BB_END) {
1755 tcg_reg_alloc_bb_end(s, allocated_regs);
1756 } else {
1757 /* mark dead temporaries and free the associated registers */
1758 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1759 arg = args[i];
1760 if (IS_DEAD_ARG(i)) {
1761 ts = &s->temps[arg];
1762 if (!ts->fixed_reg) {
1763 if (ts->val_type == TEMP_VAL_REG)
1764 s->reg_to_temp[ts->reg] = -1;
1765 ts->val_type = TEMP_VAL_DEAD;
1766 }
1767 }
1768 }
1769
1770 if (def->flags & TCG_OPF_CALL_CLOBBER) {
1771 /* XXX: permit generic clobber register list ? */
1772 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1773 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1774 tcg_reg_free(s, reg);
1775 }
1776 }
1777 /* XXX: for load/store we could do that only for the slow path
1778 (i.e. when a memory callback is called) */
1779
1780 /* store globals and free associated registers (we assume the insn
1781 can modify any global. */
1782 save_globals(s, allocated_regs);
1783 }
1784
1785 /* satisfy the output constraints */
1786 tcg_regset_set(allocated_regs, s->reserved_regs);
1787 for(k = 0; k < nb_oargs; k++) {
1788 i = def->sorted_args[k];
1789 arg = args[i];
1790 arg_ct = &def->args_ct[i];
1791 ts = &s->temps[arg];
1792 if (arg_ct->ct & TCG_CT_ALIAS) {
1793 reg = new_args[arg_ct->alias_index];
1794 } else {
1795 /* if fixed register, we try to use it */
1796 reg = ts->reg;
1797 if (ts->fixed_reg &&
1798 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1799 goto oarg_end;
1800 }
1801 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1802 }
1803 tcg_regset_set_reg(allocated_regs, reg);
1804 /* if a fixed register is used, then a move will be done afterwards */
1805 if (!ts->fixed_reg) {
1806 if (ts->val_type == TEMP_VAL_REG)
1807 s->reg_to_temp[ts->reg] = -1;
1808 if (IS_DEAD_ARG(i)) {
1809 ts->val_type = TEMP_VAL_DEAD;
1810 } else {
1811 ts->val_type = TEMP_VAL_REG;
1812 ts->reg = reg;
1813 /* temp value is modified, so the value kept in memory is
1814 potentially not the same */
1815 ts->mem_coherent = 0;
1816 s->reg_to_temp[reg] = arg;
1817 }
1818 }
1819 oarg_end:
1820 new_args[i] = reg;
1821 }
1822 }
1823
1824 /* emit instruction */
1825 tcg_out_op(s, opc, new_args, const_args);
1826
1827 /* move the outputs in the correct register if needed */
1828 for(i = 0; i < nb_oargs; i++) {
1829 ts = &s->temps[args[i]];
1830 reg = new_args[i];
1831 if (ts->fixed_reg && ts->reg != reg) {
1832 tcg_out_mov(s, ts->type, ts->reg, reg);
1833 }
1834 }
1835 }
1836
1837 #ifdef TCG_TARGET_STACK_GROWSUP
1838 #define STACK_DIR(x) (-(x))
1839 #else
1840 #define STACK_DIR(x) (x)
1841 #endif
1842
1843 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
1844 TCGOpcode opc, const TCGArg *args,
1845 unsigned int dead_args)
1846 {
1847 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
1848 TCGArg arg, func_arg;
1849 TCGTemp *ts;
1850 tcg_target_long stack_offset, call_stack_size, func_addr;
1851 int const_func_arg, allocate_args;
1852 TCGRegSet allocated_regs;
1853 const TCGArgConstraint *arg_ct;
1854
1855 arg = *args++;
1856
1857 nb_oargs = arg >> 16;
1858 nb_iargs = arg & 0xffff;
1859 nb_params = nb_iargs - 1;
1860
1861 flags = args[nb_oargs + nb_iargs];
1862
1863 nb_regs = tcg_target_get_call_iarg_regs_count(flags);
1864 if (nb_regs > nb_params)
1865 nb_regs = nb_params;
1866
1867 /* assign stack slots first */
1868 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
1869 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
1870 ~(TCG_TARGET_STACK_ALIGN - 1);
1871 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
1872 if (allocate_args) {
1873 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1874 preallocate call stack */
1875 tcg_abort();
1876 }
1877
1878 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
1879 for(i = nb_regs; i < nb_params; i++) {
1880 arg = args[nb_oargs + i];
1881 #ifdef TCG_TARGET_STACK_GROWSUP
1882 stack_offset -= sizeof(tcg_target_long);
1883 #endif
1884 if (arg != TCG_CALL_DUMMY_ARG) {
1885 ts = &s->temps[arg];
1886 if (ts->val_type == TEMP_VAL_REG) {
1887 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
1888 } else if (ts->val_type == TEMP_VAL_MEM) {
1889 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1890 s->reserved_regs);
1891 /* XXX: not correct if reading values from the stack */
1892 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1893 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1894 } else if (ts->val_type == TEMP_VAL_CONST) {
1895 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1896 s->reserved_regs);
1897 /* XXX: sign extend may be needed on some targets */
1898 tcg_out_movi(s, ts->type, reg, ts->val);
1899 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1900 } else {
1901 tcg_abort();
1902 }
1903 }
1904 #ifndef TCG_TARGET_STACK_GROWSUP
1905 stack_offset += sizeof(tcg_target_long);
1906 #endif
1907 }
1908
1909 /* assign input registers */
1910 tcg_regset_set(allocated_regs, s->reserved_regs);
1911 for(i = 0; i < nb_regs; i++) {
1912 arg = args[nb_oargs + i];
1913 if (arg != TCG_CALL_DUMMY_ARG) {
1914 ts = &s->temps[arg];
1915 reg = tcg_target_call_iarg_regs[i];
1916 tcg_reg_free(s, reg);
1917 if (ts->val_type == TEMP_VAL_REG) {
1918 if (ts->reg != reg) {
1919 tcg_out_mov(s, ts->type, reg, ts->reg);
1920 }
1921 } else if (ts->val_type == TEMP_VAL_MEM) {
1922 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1923 } else if (ts->val_type == TEMP_VAL_CONST) {
1924 /* XXX: sign extend ? */
1925 tcg_out_movi(s, ts->type, reg, ts->val);
1926 } else {
1927 tcg_abort();
1928 }
1929 tcg_regset_set_reg(allocated_regs, reg);
1930 }
1931 }
1932
1933 /* assign function address */
1934 func_arg = args[nb_oargs + nb_iargs - 1];
1935 arg_ct = &def->args_ct[0];
1936 ts = &s->temps[func_arg];
1937 func_addr = ts->val;
1938 const_func_arg = 0;
1939 if (ts->val_type == TEMP_VAL_MEM) {
1940 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1941 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1942 func_arg = reg;
1943 tcg_regset_set_reg(allocated_regs, reg);
1944 } else if (ts->val_type == TEMP_VAL_REG) {
1945 reg = ts->reg;
1946 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1947 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1948 tcg_out_mov(s, ts->type, reg, ts->reg);
1949 }
1950 func_arg = reg;
1951 tcg_regset_set_reg(allocated_regs, reg);
1952 } else if (ts->val_type == TEMP_VAL_CONST) {
1953 if (tcg_target_const_match(func_addr, arg_ct)) {
1954 const_func_arg = 1;
1955 func_arg = func_addr;
1956 } else {
1957 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1958 tcg_out_movi(s, ts->type, reg, func_addr);
1959 func_arg = reg;
1960 tcg_regset_set_reg(allocated_regs, reg);
1961 }
1962 } else {
1963 tcg_abort();
1964 }
1965
1966
1967 /* mark dead temporaries and free the associated registers */
1968 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1969 arg = args[i];
1970 if (IS_DEAD_ARG(i)) {
1971 ts = &s->temps[arg];
1972 if (!ts->fixed_reg) {
1973 if (ts->val_type == TEMP_VAL_REG)
1974 s->reg_to_temp[ts->reg] = -1;
1975 ts->val_type = TEMP_VAL_DEAD;
1976 }
1977 }
1978 }
1979
1980 /* clobber call registers */
1981 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1982 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1983 tcg_reg_free(s, reg);
1984 }
1985 }
1986
1987 /* store globals and free associated registers (we assume the call
1988 can modify any global. */
1989 if (!(flags & TCG_CALL_CONST)) {
1990 save_globals(s, allocated_regs);
1991 }
1992
1993 tcg_out_op(s, opc, &func_arg, &const_func_arg);
1994
1995 /* assign output registers and emit moves if needed */
1996 for(i = 0; i < nb_oargs; i++) {
1997 arg = args[i];
1998 ts = &s->temps[arg];
1999 reg = tcg_target_call_oarg_regs[i];
2000 assert(s->reg_to_temp[reg] == -1);
2001 if (ts->fixed_reg) {
2002 if (ts->reg != reg) {
2003 tcg_out_mov(s, ts->type, ts->reg, reg);
2004 }
2005 } else {
2006 if (ts->val_type == TEMP_VAL_REG)
2007 s->reg_to_temp[ts->reg] = -1;
2008 if (IS_DEAD_ARG(i)) {
2009 ts->val_type = TEMP_VAL_DEAD;
2010 } else {
2011 ts->val_type = TEMP_VAL_REG;
2012 ts->reg = reg;
2013 ts->mem_coherent = 0;
2014 s->reg_to_temp[reg] = arg;
2015 }
2016 }
2017 }
2018
2019 return nb_iargs + nb_oargs + def->nb_cargs + 1;
2020 }
2021
2022 #ifdef CONFIG_PROFILER
2023
2024 static int64_t tcg_table_op_count[NB_OPS];
2025
2026 static void dump_op_count(void)
2027 {
2028 int i;
2029 FILE *f;
2030 f = fopen("/tmp/op.log", "w");
2031 for(i = INDEX_op_end; i < NB_OPS; i++) {
2032 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]);
2033 }
2034 fclose(f);
2035 }
2036 #endif
2037
2038
2039 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
2040 long search_pc)
2041 {
2042 TCGOpcode opc;
2043 int op_index;
2044 const TCGOpDef *def;
2045 unsigned int dead_args;
2046 const TCGArg *args;
2047
2048 #ifdef DEBUG_DISAS
2049 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2050 qemu_log("OP:\n");
2051 tcg_dump_ops(s);
2052 qemu_log("\n");
2053 }
2054 #endif
2055
2056 #ifdef CONFIG_PROFILER
2057 s->opt_time -= profile_getclock();
2058 #endif
2059
2060 #ifdef USE_TCG_OPTIMIZATIONS
2061 gen_opparam_ptr =
2062 tcg_optimize(s, gen_opc_ptr, gen_opparam_buf, tcg_op_defs);
2063 #endif
2064
2065 #ifdef CONFIG_PROFILER
2066 s->opt_time += profile_getclock();
2067 s->la_time -= profile_getclock();
2068 #endif
2069
2070 tcg_liveness_analysis(s);
2071
2072 #ifdef CONFIG_PROFILER
2073 s->la_time += profile_getclock();
2074 #endif
2075
2076 #ifdef DEBUG_DISAS
2077 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2078 qemu_log("OP after optimization and liveness analysis:\n");
2079 tcg_dump_ops(s);
2080 qemu_log("\n");
2081 }
2082 #endif
2083
2084 tcg_reg_alloc_start(s);
2085
2086 s->code_buf = gen_code_buf;
2087 s->code_ptr = gen_code_buf;
2088
2089 args = gen_opparam_buf;
2090 op_index = 0;
2091
2092 for(;;) {
2093 opc = gen_opc_buf[op_index];
2094 #ifdef CONFIG_PROFILER
2095 tcg_table_op_count[opc]++;
2096 #endif
2097 def = &tcg_op_defs[opc];
2098 #if 0
2099 printf("%s: %d %d %d\n", def->name,
2100 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2101 // dump_regs(s);
2102 #endif
2103 switch(opc) {
2104 case INDEX_op_mov_i32:
2105 #if TCG_TARGET_REG_BITS == 64
2106 case INDEX_op_mov_i64:
2107 #endif
2108 dead_args = s->op_dead_args[op_index];
2109 tcg_reg_alloc_mov(s, def, args, dead_args);
2110 break;
2111 case INDEX_op_movi_i32:
2112 #if TCG_TARGET_REG_BITS == 64
2113 case INDEX_op_movi_i64:
2114 #endif
2115 tcg_reg_alloc_movi(s, args);
2116 break;
2117 case INDEX_op_debug_insn_start:
2118 /* debug instruction */
2119 break;
2120 case INDEX_op_nop:
2121 case INDEX_op_nop1:
2122 case INDEX_op_nop2:
2123 case INDEX_op_nop3:
2124 break;
2125 case INDEX_op_nopn:
2126 args += args[0];
2127 goto next;
2128 case INDEX_op_discard:
2129 {
2130 TCGTemp *ts;
2131 ts = &s->temps[args[0]];
2132 /* mark the temporary as dead */
2133 if (!ts->fixed_reg) {
2134 if (ts->val_type == TEMP_VAL_REG)
2135 s->reg_to_temp[ts->reg] = -1;
2136 ts->val_type = TEMP_VAL_DEAD;
2137 }
2138 }
2139 break;
2140 case INDEX_op_set_label:
2141 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2142 tcg_out_label(s, args[0], s->code_ptr);
2143 break;
2144 case INDEX_op_call:
2145 dead_args = s->op_dead_args[op_index];
2146 args += tcg_reg_alloc_call(s, def, opc, args, dead_args);
2147 goto next;
2148 case INDEX_op_end:
2149 goto the_end;
2150 default:
2151 /* Sanity check that we've not introduced any unhandled opcodes. */
2152 if (def->flags & TCG_OPF_NOT_PRESENT) {
2153 tcg_abort();
2154 }
2155 /* Note: in order to speed up the code, it would be much
2156 faster to have specialized register allocator functions for
2157 some common argument patterns */
2158 dead_args = s->op_dead_args[op_index];
2159 tcg_reg_alloc_op(s, def, opc, args, dead_args);
2160 break;
2161 }
2162 args += def->nb_args;
2163 next:
2164 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
2165 return op_index;
2166 }
2167 op_index++;
2168 #ifndef NDEBUG
2169 check_regs(s);
2170 #endif
2171 }
2172 the_end:
2173 return -1;
2174 }
2175
2176 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
2177 {
2178 #ifdef CONFIG_PROFILER
2179 {
2180 int n;
2181 n = (gen_opc_ptr - gen_opc_buf);
2182 s->op_count += n;
2183 if (n > s->op_count_max)
2184 s->op_count_max = n;
2185
2186 s->temp_count += s->nb_temps;
2187 if (s->nb_temps > s->temp_count_max)
2188 s->temp_count_max = s->nb_temps;
2189 }
2190 #endif
2191
2192 tcg_gen_code_common(s, gen_code_buf, -1);
2193
2194 /* flush instruction cache */
2195 flush_icache_range((tcg_target_ulong)gen_code_buf,
2196 (tcg_target_ulong)s->code_ptr);
2197
2198 return s->code_ptr - gen_code_buf;
2199 }
2200
2201 /* Return the index of the micro operation such as the pc after is <
2202 offset bytes from the start of the TB. The contents of gen_code_buf must
2203 not be changed, though writing the same values is ok.
2204 Return -1 if not found. */
2205 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
2206 {
2207 return tcg_gen_code_common(s, gen_code_buf, offset);
2208 }
2209
2210 #ifdef CONFIG_PROFILER
2211 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2212 {
2213 TCGContext *s = &tcg_ctx;
2214 int64_t tot;
2215
2216 tot = s->interm_time + s->code_time;
2217 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2218 tot, tot / 2.4e9);
2219 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2220 s->tb_count,
2221 s->tb_count1 - s->tb_count,
2222 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2223 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2224 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2225 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2226 s->tb_count ?
2227 (double)s->del_op_count / s->tb_count : 0);
2228 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2229 s->tb_count ?
2230 (double)s->temp_count / s->tb_count : 0,
2231 s->temp_count_max);
2232
2233 cpu_fprintf(f, "cycles/op %0.1f\n",
2234 s->op_count ? (double)tot / s->op_count : 0);
2235 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2236 s->code_in_len ? (double)tot / s->code_in_len : 0);
2237 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2238 s->code_out_len ? (double)tot / s->code_out_len : 0);
2239 if (tot == 0)
2240 tot = 1;
2241 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2242 (double)s->interm_time / tot * 100.0);
2243 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2244 (double)s->code_time / tot * 100.0);
2245 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2246 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2247 * 100.0);
2248 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2249 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2250 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2251 s->restore_count);
2252 cpu_fprintf(f, " avg cycles %0.1f\n",
2253 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2254
2255 dump_op_count();
2256 }
2257 #else
2258 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2259 {
2260 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2261 }
2262 #endif
2263
2264 #ifdef ELF_HOST_MACHINE
2265 /* In order to use this feature, the backend needs to do three things:
2266
2267 (1) Define ELF_HOST_MACHINE to indicate both what value to
2268 put into the ELF image and to indicate support for the feature.
2269
2270 (2) Define tcg_register_jit. This should create a buffer containing
2271 the contents of a .debug_frame section that describes the post-
2272 prologue unwind info for the tcg machine.
2273
2274 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2275 */
2276
2277 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2278 typedef enum {
2279 JIT_NOACTION = 0,
2280 JIT_REGISTER_FN,
2281 JIT_UNREGISTER_FN
2282 } jit_actions_t;
2283
2284 struct jit_code_entry {
2285 struct jit_code_entry *next_entry;
2286 struct jit_code_entry *prev_entry;
2287 const void *symfile_addr;
2288 uint64_t symfile_size;
2289 };
2290
2291 struct jit_descriptor {
2292 uint32_t version;
2293 uint32_t action_flag;
2294 struct jit_code_entry *relevant_entry;
2295 struct jit_code_entry *first_entry;
2296 };
2297
2298 void __jit_debug_register_code(void) __attribute__((noinline));
2299 void __jit_debug_register_code(void)
2300 {
2301 asm("");
2302 }
2303
2304 /* Must statically initialize the version, because GDB may check
2305 the version before we can set it. */
2306 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2307
2308 /* End GDB interface. */
2309
2310 static int find_string(const char *strtab, const char *str)
2311 {
2312 const char *p = strtab + 1;
2313
2314 while (1) {
2315 if (strcmp(p, str) == 0) {
2316 return p - strtab;
2317 }
2318 p += strlen(p) + 1;
2319 }
2320 }
2321
2322 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2323 void *debug_frame, size_t debug_frame_size)
2324 {
2325 struct __attribute__((packed)) DebugInfo {
2326 uint32_t len;
2327 uint16_t version;
2328 uint32_t abbrev;
2329 uint8_t ptr_size;
2330 uint8_t cu_die;
2331 uint16_t cu_lang;
2332 uintptr_t cu_low_pc;
2333 uintptr_t cu_high_pc;
2334 uint8_t fn_die;
2335 char fn_name[16];
2336 uintptr_t fn_low_pc;
2337 uintptr_t fn_high_pc;
2338 uint8_t cu_eoc;
2339 };
2340
2341 struct ElfImage {
2342 ElfW(Ehdr) ehdr;
2343 ElfW(Phdr) phdr;
2344 ElfW(Shdr) shdr[7];
2345 ElfW(Sym) sym[2];
2346 struct DebugInfo di;
2347 uint8_t da[24];
2348 char str[80];
2349 };
2350
2351 struct ElfImage *img;
2352
2353 static const struct ElfImage img_template = {
2354 .ehdr = {
2355 .e_ident[EI_MAG0] = ELFMAG0,
2356 .e_ident[EI_MAG1] = ELFMAG1,
2357 .e_ident[EI_MAG2] = ELFMAG2,
2358 .e_ident[EI_MAG3] = ELFMAG3,
2359 .e_ident[EI_CLASS] = ELF_CLASS,
2360 .e_ident[EI_DATA] = ELF_DATA,
2361 .e_ident[EI_VERSION] = EV_CURRENT,
2362 .e_type = ET_EXEC,
2363 .e_machine = ELF_HOST_MACHINE,
2364 .e_version = EV_CURRENT,
2365 .e_phoff = offsetof(struct ElfImage, phdr),
2366 .e_shoff = offsetof(struct ElfImage, shdr),
2367 .e_ehsize = sizeof(ElfW(Shdr)),
2368 .e_phentsize = sizeof(ElfW(Phdr)),
2369 .e_phnum = 1,
2370 .e_shentsize = sizeof(ElfW(Shdr)),
2371 .e_shnum = ARRAY_SIZE(img->shdr),
2372 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2373 #ifdef ELF_HOST_FLAGS
2374 .e_flags = ELF_HOST_FLAGS,
2375 #endif
2376 #ifdef ELF_OSABI
2377 .e_ident[EI_OSABI] = ELF_OSABI,
2378 #endif
2379 },
2380 .phdr = {
2381 .p_type = PT_LOAD,
2382 .p_flags = PF_X,
2383 },
2384 .shdr = {
2385 [0] = { .sh_type = SHT_NULL },
2386 /* Trick: The contents of code_gen_buffer are not present in
2387 this fake ELF file; that got allocated elsewhere. Therefore
2388 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2389 will not look for contents. We can record any address. */
2390 [1] = { /* .text */
2391 .sh_type = SHT_NOBITS,
2392 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2393 },
2394 [2] = { /* .debug_info */
2395 .sh_type = SHT_PROGBITS,
2396 .sh_offset = offsetof(struct ElfImage, di),
2397 .sh_size = sizeof(struct DebugInfo),
2398 },
2399 [3] = { /* .debug_abbrev */
2400 .sh_type = SHT_PROGBITS,
2401 .sh_offset = offsetof(struct ElfImage, da),
2402 .sh_size = sizeof(img->da),
2403 },
2404 [4] = { /* .debug_frame */
2405 .sh_type = SHT_PROGBITS,
2406 .sh_offset = sizeof(struct ElfImage),
2407 },
2408 [5] = { /* .symtab */
2409 .sh_type = SHT_SYMTAB,
2410 .sh_offset = offsetof(struct ElfImage, sym),
2411 .sh_size = sizeof(img->sym),
2412 .sh_info = 1,
2413 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2414 .sh_entsize = sizeof(ElfW(Sym)),
2415 },
2416 [6] = { /* .strtab */
2417 .sh_type = SHT_STRTAB,
2418 .sh_offset = offsetof(struct ElfImage, str),
2419 .sh_size = sizeof(img->str),
2420 }
2421 },
2422 .sym = {
2423 [1] = { /* code_gen_buffer */
2424 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2425 .st_shndx = 1,
2426 }
2427 },
2428 .di = {
2429 .len = sizeof(struct DebugInfo) - 4,
2430 .version = 2,
2431 .ptr_size = sizeof(void *),
2432 .cu_die = 1,
2433 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2434 .fn_die = 2,
2435 .fn_name = "code_gen_buffer"
2436 },
2437 .da = {
2438 1, /* abbrev number (the cu) */
2439 0x11, 1, /* DW_TAG_compile_unit, has children */
2440 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2441 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2442 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2443 0, 0, /* end of abbrev */
2444 2, /* abbrev number (the fn) */
2445 0x2e, 0, /* DW_TAG_subprogram, no children */
2446 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2447 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2448 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2449 0, 0, /* end of abbrev */
2450 0 /* no more abbrev */
2451 },
2452 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2453 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2454 };
2455
2456 /* We only need a single jit entry; statically allocate it. */
2457 static struct jit_code_entry one_entry;
2458
2459 uintptr_t buf = (uintptr_t)buf_ptr;
2460 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2461
2462 img = g_malloc(img_size);
2463 *img = img_template;
2464 memcpy(img + 1, debug_frame, debug_frame_size);
2465
2466 img->phdr.p_vaddr = buf;
2467 img->phdr.p_paddr = buf;
2468 img->phdr.p_memsz = buf_size;
2469
2470 img->shdr[1].sh_name = find_string(img->str, ".text");
2471 img->shdr[1].sh_addr = buf;
2472 img->shdr[1].sh_size = buf_size;
2473
2474 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2475 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2476
2477 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2478 img->shdr[4].sh_size = debug_frame_size;
2479
2480 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2481 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2482
2483 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2484 img->sym[1].st_value = buf;
2485 img->sym[1].st_size = buf_size;
2486
2487 img->di.cu_low_pc = buf;
2488 img->di.cu_high_pc = buf_size;
2489 img->di.fn_low_pc = buf;
2490 img->di.fn_high_pc = buf_size;
2491
2492 #ifdef DEBUG_JIT
2493 /* Enable this block to be able to debug the ELF image file creation.
2494 One can use readelf, objdump, or other inspection utilities. */
2495 {
2496 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2497 if (f) {
2498 if (fwrite(img, img_size, 1, f) != img_size) {
2499 /* Avoid stupid unused return value warning for fwrite. */
2500 }
2501 fclose(f);
2502 }
2503 }
2504 #endif
2505
2506 one_entry.symfile_addr = img;
2507 one_entry.symfile_size = img_size;
2508
2509 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2510 __jit_debug_descriptor.relevant_entry = &one_entry;
2511 __jit_debug_descriptor.first_entry = &one_entry;
2512 __jit_debug_register_code();
2513 }
2514 #else
2515 /* No support for the feature. Provide the entry point expected by exec.c,
2516 and implement the internal function we declared earlier. */
2517
2518 static void tcg_register_jit_int(void *buf, size_t size,
2519 void *debug_frame, size_t debug_frame_size)
2520 {
2521 }
2522
2523 void tcg_register_jit(void *buf, size_t buf_size)
2524 {
2525 }
2526 #endif /* ELF_HOST_MACHINE */