]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg.c
tcg: introduce regions to split code_gen_buffer
[mirror_qemu.git] / tcg / tcg.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27
28 #include "qemu/osdep.h"
29
30 /* Define to jump the ELF file used to communicate with GDB. */
31 #undef DEBUG_JIT
32
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
36
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40 #define NO_CPU_IO_DEFS
41 #include "cpu.h"
42
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
45
46 #include "tcg-op.h"
47
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
50 #else
51 # define ELF_CLASS ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
55 #else
56 # define ELF_DATA ELFDATA2LSB
57 #endif
58
59 #include "elf.h"
60 #include "exec/log.h"
61
62 /* Forward declarations for functions declared in tcg-target.inc.c and
63 used here. */
64 static void tcg_target_init(TCGContext *s);
65 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68 intptr_t value, intptr_t addend);
69
70 /* The CIE and FDE header definitions will be common to all hosts. */
71 typedef struct {
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
73 uint32_t id;
74 uint8_t version;
75 char augmentation[1];
76 uint8_t code_align;
77 uint8_t data_align;
78 uint8_t return_column;
79 } DebugFrameCIE;
80
81 typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
83 uint32_t cie_offset;
84 uintptr_t func_start;
85 uintptr_t func_len;
86 } DebugFrameFDEHeader;
87
88 typedef struct QEMU_PACKED {
89 DebugFrameCIE cie;
90 DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92
93 static void tcg_register_jit_int(void *buf, size_t size,
94 const void *debug_frame,
95 size_t debug_frame_size)
96 __attribute__((unused));
97
98 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
99 static const char *target_parse_constraint(TCGArgConstraint *ct,
100 const char *ct_str, TCGType type);
101 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
102 intptr_t arg2);
103 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
104 static void tcg_out_movi(TCGContext *s, TCGType type,
105 TCGReg ret, tcg_target_long arg);
106 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
107 const int *const_args);
108 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
109 intptr_t arg2);
110 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
111 TCGReg base, intptr_t ofs);
112 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
113 static int tcg_target_const_match(tcg_target_long val, TCGType type,
114 const TCGArgConstraint *arg_ct);
115 #ifdef TCG_TARGET_NEED_LDST_LABELS
116 static bool tcg_out_ldst_finalize(TCGContext *s);
117 #endif
118
119 #define TCG_HIGHWATER 1024
120
121 static TCGContext **tcg_ctxs;
122 static unsigned int n_tcg_ctxs;
123
124 /*
125 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
126 * dynamically allocate from as demand dictates. Given appropriate region
127 * sizing, this minimizes flushes even when some TCG threads generate a lot
128 * more code than others.
129 */
130 struct tcg_region_state {
131 QemuMutex lock;
132
133 /* fields set at init time */
134 void *start;
135 void *start_aligned;
136 void *end;
137 size_t n;
138 size_t size; /* size of one region */
139 size_t stride; /* .size + guard size */
140
141 /* fields protected by the lock */
142 size_t current; /* current region index */
143 size_t agg_size_full; /* aggregate size of full regions */
144 };
145
146 static struct tcg_region_state region;
147
148 static TCGRegSet tcg_target_available_regs[2];
149 static TCGRegSet tcg_target_call_clobber_regs;
150
151 #if TCG_TARGET_INSN_UNIT_SIZE == 1
152 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
153 {
154 *s->code_ptr++ = v;
155 }
156
157 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
158 uint8_t v)
159 {
160 *p = v;
161 }
162 #endif
163
164 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
165 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
166 {
167 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
168 *s->code_ptr++ = v;
169 } else {
170 tcg_insn_unit *p = s->code_ptr;
171 memcpy(p, &v, sizeof(v));
172 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
173 }
174 }
175
176 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
177 uint16_t v)
178 {
179 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
180 *p = v;
181 } else {
182 memcpy(p, &v, sizeof(v));
183 }
184 }
185 #endif
186
187 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
188 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
189 {
190 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
191 *s->code_ptr++ = v;
192 } else {
193 tcg_insn_unit *p = s->code_ptr;
194 memcpy(p, &v, sizeof(v));
195 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
196 }
197 }
198
199 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
200 uint32_t v)
201 {
202 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
203 *p = v;
204 } else {
205 memcpy(p, &v, sizeof(v));
206 }
207 }
208 #endif
209
210 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
211 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
212 {
213 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
214 *s->code_ptr++ = v;
215 } else {
216 tcg_insn_unit *p = s->code_ptr;
217 memcpy(p, &v, sizeof(v));
218 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
219 }
220 }
221
222 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
223 uint64_t v)
224 {
225 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
226 *p = v;
227 } else {
228 memcpy(p, &v, sizeof(v));
229 }
230 }
231 #endif
232
233 /* label relocation processing */
234
235 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
236 TCGLabel *l, intptr_t addend)
237 {
238 TCGRelocation *r;
239
240 if (l->has_value) {
241 /* FIXME: This may break relocations on RISC targets that
242 modify instruction fields in place. The caller may not have
243 written the initial value. */
244 patch_reloc(code_ptr, type, l->u.value, addend);
245 } else {
246 /* add a new relocation entry */
247 r = tcg_malloc(sizeof(TCGRelocation));
248 r->type = type;
249 r->ptr = code_ptr;
250 r->addend = addend;
251 r->next = l->u.first_reloc;
252 l->u.first_reloc = r;
253 }
254 }
255
256 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
257 {
258 intptr_t value = (intptr_t)ptr;
259 TCGRelocation *r;
260
261 tcg_debug_assert(!l->has_value);
262
263 for (r = l->u.first_reloc; r != NULL; r = r->next) {
264 patch_reloc(r->ptr, r->type, value, r->addend);
265 }
266
267 l->has_value = 1;
268 l->u.value_ptr = ptr;
269 }
270
271 TCGLabel *gen_new_label(void)
272 {
273 TCGContext *s = tcg_ctx;
274 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
275
276 *l = (TCGLabel){
277 .id = s->nb_labels++
278 };
279
280 return l;
281 }
282
283 #include "tcg-target.inc.c"
284
285 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
286 {
287 void *start, *end;
288
289 start = region.start_aligned + curr_region * region.stride;
290 end = start + region.size;
291
292 if (curr_region == 0) {
293 start = region.start;
294 }
295 if (curr_region == region.n - 1) {
296 end = region.end;
297 }
298
299 *pstart = start;
300 *pend = end;
301 }
302
303 static void tcg_region_assign(TCGContext *s, size_t curr_region)
304 {
305 void *start, *end;
306
307 tcg_region_bounds(curr_region, &start, &end);
308
309 s->code_gen_buffer = start;
310 s->code_gen_ptr = start;
311 s->code_gen_buffer_size = end - start;
312 s->code_gen_highwater = end - TCG_HIGHWATER;
313 }
314
315 static bool tcg_region_alloc__locked(TCGContext *s)
316 {
317 if (region.current == region.n) {
318 return true;
319 }
320 tcg_region_assign(s, region.current);
321 region.current++;
322 return false;
323 }
324
325 /*
326 * Request a new region once the one in use has filled up.
327 * Returns true on error.
328 */
329 static bool tcg_region_alloc(TCGContext *s)
330 {
331 bool err;
332 /* read the region size now; alloc__locked will overwrite it on success */
333 size_t size_full = s->code_gen_buffer_size;
334
335 qemu_mutex_lock(&region.lock);
336 err = tcg_region_alloc__locked(s);
337 if (!err) {
338 region.agg_size_full += size_full - TCG_HIGHWATER;
339 }
340 qemu_mutex_unlock(&region.lock);
341 return err;
342 }
343
344 /*
345 * Perform a context's first region allocation.
346 * This function does _not_ increment region.agg_size_full.
347 */
348 static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
349 {
350 return tcg_region_alloc__locked(s);
351 }
352
353 /* Call from a safe-work context */
354 void tcg_region_reset_all(void)
355 {
356 unsigned int i;
357
358 qemu_mutex_lock(&region.lock);
359 region.current = 0;
360 region.agg_size_full = 0;
361
362 for (i = 0; i < n_tcg_ctxs; i++) {
363 bool err = tcg_region_initial_alloc__locked(tcg_ctxs[i]);
364
365 g_assert(!err);
366 }
367 qemu_mutex_unlock(&region.lock);
368 }
369
370 /*
371 * Initializes region partitioning.
372 *
373 * Called at init time from the parent thread (i.e. the one calling
374 * tcg_context_init), after the target's TCG globals have been set.
375 */
376 void tcg_region_init(void)
377 {
378 void *buf = tcg_init_ctx.code_gen_buffer;
379 void *aligned;
380 size_t size = tcg_init_ctx.code_gen_buffer_size;
381 size_t page_size = qemu_real_host_page_size;
382 size_t region_size;
383 size_t n_regions;
384 size_t i;
385
386 /* We do not yet support multiple TCG contexts, so use one region for now */
387 n_regions = 1;
388
389 /* The first region will be 'aligned - buf' bytes larger than the others */
390 aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
391 g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
392 /*
393 * Make region_size a multiple of page_size, using aligned as the start.
394 * As a result of this we might end up with a few extra pages at the end of
395 * the buffer; we will assign those to the last region.
396 */
397 region_size = (size - (aligned - buf)) / n_regions;
398 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
399
400 /* A region must have at least 2 pages; one code, one guard */
401 g_assert(region_size >= 2 * page_size);
402
403 /* init the region struct */
404 qemu_mutex_init(&region.lock);
405 region.n = n_regions;
406 region.size = region_size - page_size;
407 region.stride = region_size;
408 region.start = buf;
409 region.start_aligned = aligned;
410 /* page-align the end, since its last page will be a guard page */
411 region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
412 /* account for that last guard page */
413 region.end -= page_size;
414
415 /* set guard pages */
416 for (i = 0; i < region.n; i++) {
417 void *start, *end;
418 int rc;
419
420 tcg_region_bounds(i, &start, &end);
421 rc = qemu_mprotect_none(end, page_size);
422 g_assert(!rc);
423 }
424
425 /* We do not yet support multiple TCG contexts so allocate the region now */
426 {
427 bool err = tcg_region_initial_alloc__locked(tcg_ctx);
428
429 g_assert(!err);
430 }
431 }
432
433 /*
434 * Returns the size (in bytes) of all translated code (i.e. from all regions)
435 * currently in the cache.
436 * See also: tcg_code_capacity()
437 * Do not confuse with tcg_current_code_size(); that one applies to a single
438 * TCG context.
439 */
440 size_t tcg_code_size(void)
441 {
442 unsigned int i;
443 size_t total;
444
445 qemu_mutex_lock(&region.lock);
446 total = region.agg_size_full;
447 for (i = 0; i < n_tcg_ctxs; i++) {
448 const TCGContext *s = tcg_ctxs[i];
449 size_t size;
450
451 size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
452 g_assert(size <= s->code_gen_buffer_size);
453 total += size;
454 }
455 qemu_mutex_unlock(&region.lock);
456 return total;
457 }
458
459 /*
460 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
461 * regions.
462 * See also: tcg_code_size()
463 */
464 size_t tcg_code_capacity(void)
465 {
466 size_t guard_size, capacity;
467
468 /* no need for synchronization; these variables are set at init time */
469 guard_size = region.stride - region.size;
470 capacity = region.end + guard_size - region.start;
471 capacity -= region.n * (guard_size + TCG_HIGHWATER);
472 return capacity;
473 }
474
475 /* pool based memory allocation */
476 void *tcg_malloc_internal(TCGContext *s, int size)
477 {
478 TCGPool *p;
479 int pool_size;
480
481 if (size > TCG_POOL_CHUNK_SIZE) {
482 /* big malloc: insert a new pool (XXX: could optimize) */
483 p = g_malloc(sizeof(TCGPool) + size);
484 p->size = size;
485 p->next = s->pool_first_large;
486 s->pool_first_large = p;
487 return p->data;
488 } else {
489 p = s->pool_current;
490 if (!p) {
491 p = s->pool_first;
492 if (!p)
493 goto new_pool;
494 } else {
495 if (!p->next) {
496 new_pool:
497 pool_size = TCG_POOL_CHUNK_SIZE;
498 p = g_malloc(sizeof(TCGPool) + pool_size);
499 p->size = pool_size;
500 p->next = NULL;
501 if (s->pool_current)
502 s->pool_current->next = p;
503 else
504 s->pool_first = p;
505 } else {
506 p = p->next;
507 }
508 }
509 }
510 s->pool_current = p;
511 s->pool_cur = p->data + size;
512 s->pool_end = p->data + p->size;
513 return p->data;
514 }
515
516 void tcg_pool_reset(TCGContext *s)
517 {
518 TCGPool *p, *t;
519 for (p = s->pool_first_large; p; p = t) {
520 t = p->next;
521 g_free(p);
522 }
523 s->pool_first_large = NULL;
524 s->pool_cur = s->pool_end = NULL;
525 s->pool_current = NULL;
526 }
527
528 typedef struct TCGHelperInfo {
529 void *func;
530 const char *name;
531 unsigned flags;
532 unsigned sizemask;
533 } TCGHelperInfo;
534
535 #include "exec/helper-proto.h"
536
537 static const TCGHelperInfo all_helpers[] = {
538 #include "exec/helper-tcg.h"
539 };
540 static GHashTable *helper_table;
541
542 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
543 static void process_op_defs(TCGContext *s);
544
545 void tcg_context_init(TCGContext *s)
546 {
547 int op, total_args, n, i;
548 TCGOpDef *def;
549 TCGArgConstraint *args_ct;
550 int *sorted_args;
551
552 memset(s, 0, sizeof(*s));
553 s->nb_globals = 0;
554
555 /* Count total number of arguments and allocate the corresponding
556 space */
557 total_args = 0;
558 for(op = 0; op < NB_OPS; op++) {
559 def = &tcg_op_defs[op];
560 n = def->nb_iargs + def->nb_oargs;
561 total_args += n;
562 }
563
564 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
565 sorted_args = g_malloc(sizeof(int) * total_args);
566
567 for(op = 0; op < NB_OPS; op++) {
568 def = &tcg_op_defs[op];
569 def->args_ct = args_ct;
570 def->sorted_args = sorted_args;
571 n = def->nb_iargs + def->nb_oargs;
572 sorted_args += n;
573 args_ct += n;
574 }
575
576 /* Register helpers. */
577 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
578 helper_table = g_hash_table_new(NULL, NULL);
579
580 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
581 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
582 (gpointer)&all_helpers[i]);
583 }
584
585 tcg_target_init(s);
586 process_op_defs(s);
587
588 /* Reverse the order of the saved registers, assuming they're all at
589 the start of tcg_target_reg_alloc_order. */
590 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
591 int r = tcg_target_reg_alloc_order[n];
592 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
593 break;
594 }
595 }
596 for (i = 0; i < n; ++i) {
597 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
598 }
599 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
600 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
601 }
602
603 tcg_ctx = s;
604 tcg_ctxs = &tcg_ctx;
605 n_tcg_ctxs = 1;
606 }
607
608 /*
609 * Allocate TBs right before their corresponding translated code, making
610 * sure that TBs and code are on different cache lines.
611 */
612 TranslationBlock *tcg_tb_alloc(TCGContext *s)
613 {
614 uintptr_t align = qemu_icache_linesize;
615 TranslationBlock *tb;
616 void *next;
617
618 retry:
619 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
620 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
621
622 if (unlikely(next > s->code_gen_highwater)) {
623 if (tcg_region_alloc(s)) {
624 return NULL;
625 }
626 goto retry;
627 }
628 atomic_set(&s->code_gen_ptr, next);
629 s->data_gen_ptr = NULL;
630 return tb;
631 }
632
633 void tcg_prologue_init(TCGContext *s)
634 {
635 size_t prologue_size, total_size;
636 void *buf0, *buf1;
637
638 /* Put the prologue at the beginning of code_gen_buffer. */
639 buf0 = s->code_gen_buffer;
640 s->code_ptr = buf0;
641 s->code_buf = buf0;
642 s->code_gen_prologue = buf0;
643
644 /* Generate the prologue. */
645 tcg_target_qemu_prologue(s);
646 buf1 = s->code_ptr;
647 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
648
649 /* Deduct the prologue from the buffer. */
650 prologue_size = tcg_current_code_size(s);
651 s->code_gen_ptr = buf1;
652 s->code_gen_buffer = buf1;
653 s->code_buf = buf1;
654 total_size = s->code_gen_buffer_size - prologue_size;
655 s->code_gen_buffer_size = total_size;
656
657 /* Compute a high-water mark, at which we voluntarily flush the buffer
658 and start over. The size here is arbitrary, significantly larger
659 than we expect the code generation for any one opcode to require. */
660 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
661
662 tcg_register_jit(s->code_gen_buffer, total_size);
663
664 #ifdef DEBUG_DISAS
665 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
666 qemu_log_lock();
667 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
668 log_disas(buf0, prologue_size);
669 qemu_log("\n");
670 qemu_log_flush();
671 qemu_log_unlock();
672 }
673 #endif
674
675 /* Assert that goto_ptr is implemented completely. */
676 if (TCG_TARGET_HAS_goto_ptr) {
677 tcg_debug_assert(s->code_gen_epilogue != NULL);
678 }
679 }
680
681 void tcg_func_start(TCGContext *s)
682 {
683 tcg_pool_reset(s);
684 s->nb_temps = s->nb_globals;
685
686 /* No temps have been previously allocated for size or locality. */
687 memset(s->free_temps, 0, sizeof(s->free_temps));
688
689 s->nb_labels = 0;
690 s->current_frame_offset = s->frame_start;
691
692 #ifdef CONFIG_DEBUG_TCG
693 s->goto_tb_issue_mask = 0;
694 #endif
695
696 s->gen_op_buf[0].next = 1;
697 s->gen_op_buf[0].prev = 0;
698 s->gen_next_op_idx = 1;
699 }
700
701 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
702 {
703 int n = s->nb_temps++;
704 tcg_debug_assert(n < TCG_MAX_TEMPS);
705 return memset(&s->temps[n], 0, sizeof(TCGTemp));
706 }
707
708 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
709 {
710 TCGTemp *ts;
711
712 tcg_debug_assert(s->nb_globals == s->nb_temps);
713 s->nb_globals++;
714 ts = tcg_temp_alloc(s);
715 ts->temp_global = 1;
716
717 return ts;
718 }
719
720 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
721 TCGReg reg, const char *name)
722 {
723 TCGTemp *ts;
724
725 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
726 tcg_abort();
727 }
728
729 ts = tcg_global_alloc(s);
730 ts->base_type = type;
731 ts->type = type;
732 ts->fixed_reg = 1;
733 ts->reg = reg;
734 ts->name = name;
735 tcg_regset_set_reg(s->reserved_regs, reg);
736
737 return ts;
738 }
739
740 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
741 {
742 s->frame_start = start;
743 s->frame_end = start + size;
744 s->frame_temp
745 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
746 }
747
748 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
749 {
750 TCGContext *s = tcg_ctx;
751 TCGTemp *t;
752
753 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
754 tcg_abort();
755 }
756 t = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
757 return temp_tcgv_i32(t);
758 }
759
760 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
761 {
762 TCGContext *s = tcg_ctx;
763 TCGTemp *t;
764
765 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
766 tcg_abort();
767 }
768 t = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
769 return temp_tcgv_i64(t);
770 }
771
772 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
773 intptr_t offset, const char *name)
774 {
775 TCGContext *s = tcg_ctx;
776 TCGTemp *base_ts = tcgv_ptr_temp(base);
777 TCGTemp *ts = tcg_global_alloc(s);
778 int indirect_reg = 0, bigendian = 0;
779 #ifdef HOST_WORDS_BIGENDIAN
780 bigendian = 1;
781 #endif
782
783 if (!base_ts->fixed_reg) {
784 /* We do not support double-indirect registers. */
785 tcg_debug_assert(!base_ts->indirect_reg);
786 base_ts->indirect_base = 1;
787 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
788 ? 2 : 1);
789 indirect_reg = 1;
790 }
791
792 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
793 TCGTemp *ts2 = tcg_global_alloc(s);
794 char buf[64];
795
796 ts->base_type = TCG_TYPE_I64;
797 ts->type = TCG_TYPE_I32;
798 ts->indirect_reg = indirect_reg;
799 ts->mem_allocated = 1;
800 ts->mem_base = base_ts;
801 ts->mem_offset = offset + bigendian * 4;
802 pstrcpy(buf, sizeof(buf), name);
803 pstrcat(buf, sizeof(buf), "_0");
804 ts->name = strdup(buf);
805
806 tcg_debug_assert(ts2 == ts + 1);
807 ts2->base_type = TCG_TYPE_I64;
808 ts2->type = TCG_TYPE_I32;
809 ts2->indirect_reg = indirect_reg;
810 ts2->mem_allocated = 1;
811 ts2->mem_base = base_ts;
812 ts2->mem_offset = offset + (1 - bigendian) * 4;
813 pstrcpy(buf, sizeof(buf), name);
814 pstrcat(buf, sizeof(buf), "_1");
815 ts2->name = strdup(buf);
816 } else {
817 ts->base_type = type;
818 ts->type = type;
819 ts->indirect_reg = indirect_reg;
820 ts->mem_allocated = 1;
821 ts->mem_base = base_ts;
822 ts->mem_offset = offset;
823 ts->name = name;
824 }
825 return ts;
826 }
827
828 static TCGTemp *tcg_temp_new_internal(TCGType type, int temp_local)
829 {
830 TCGContext *s = tcg_ctx;
831 TCGTemp *ts;
832 int idx, k;
833
834 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
835 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
836 if (idx < TCG_MAX_TEMPS) {
837 /* There is already an available temp with the right type. */
838 clear_bit(idx, s->free_temps[k].l);
839
840 ts = &s->temps[idx];
841 ts->temp_allocated = 1;
842 tcg_debug_assert(ts->base_type == type);
843 tcg_debug_assert(ts->temp_local == temp_local);
844 } else {
845 ts = tcg_temp_alloc(s);
846 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
847 TCGTemp *ts2 = tcg_temp_alloc(s);
848
849 ts->base_type = type;
850 ts->type = TCG_TYPE_I32;
851 ts->temp_allocated = 1;
852 ts->temp_local = temp_local;
853
854 tcg_debug_assert(ts2 == ts + 1);
855 ts2->base_type = TCG_TYPE_I64;
856 ts2->type = TCG_TYPE_I32;
857 ts2->temp_allocated = 1;
858 ts2->temp_local = temp_local;
859 } else {
860 ts->base_type = type;
861 ts->type = type;
862 ts->temp_allocated = 1;
863 ts->temp_local = temp_local;
864 }
865 }
866
867 #if defined(CONFIG_DEBUG_TCG)
868 s->temps_in_use++;
869 #endif
870 return ts;
871 }
872
873 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
874 {
875 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
876 return temp_tcgv_i32(t);
877 }
878
879 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
880 {
881 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
882 return temp_tcgv_i64(t);
883 }
884
885 static void tcg_temp_free_internal(TCGTemp *ts)
886 {
887 TCGContext *s = tcg_ctx;
888 int k, idx;
889
890 #if defined(CONFIG_DEBUG_TCG)
891 s->temps_in_use--;
892 if (s->temps_in_use < 0) {
893 fprintf(stderr, "More temporaries freed than allocated!\n");
894 }
895 #endif
896
897 tcg_debug_assert(ts->temp_global == 0);
898 tcg_debug_assert(ts->temp_allocated != 0);
899 ts->temp_allocated = 0;
900
901 idx = temp_idx(ts);
902 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
903 set_bit(idx, s->free_temps[k].l);
904 }
905
906 void tcg_temp_free_i32(TCGv_i32 arg)
907 {
908 tcg_temp_free_internal(tcgv_i32_temp(arg));
909 }
910
911 void tcg_temp_free_i64(TCGv_i64 arg)
912 {
913 tcg_temp_free_internal(tcgv_i64_temp(arg));
914 }
915
916 TCGv_i32 tcg_const_i32(int32_t val)
917 {
918 TCGv_i32 t0;
919 t0 = tcg_temp_new_i32();
920 tcg_gen_movi_i32(t0, val);
921 return t0;
922 }
923
924 TCGv_i64 tcg_const_i64(int64_t val)
925 {
926 TCGv_i64 t0;
927 t0 = tcg_temp_new_i64();
928 tcg_gen_movi_i64(t0, val);
929 return t0;
930 }
931
932 TCGv_i32 tcg_const_local_i32(int32_t val)
933 {
934 TCGv_i32 t0;
935 t0 = tcg_temp_local_new_i32();
936 tcg_gen_movi_i32(t0, val);
937 return t0;
938 }
939
940 TCGv_i64 tcg_const_local_i64(int64_t val)
941 {
942 TCGv_i64 t0;
943 t0 = tcg_temp_local_new_i64();
944 tcg_gen_movi_i64(t0, val);
945 return t0;
946 }
947
948 #if defined(CONFIG_DEBUG_TCG)
949 void tcg_clear_temp_count(void)
950 {
951 TCGContext *s = tcg_ctx;
952 s->temps_in_use = 0;
953 }
954
955 int tcg_check_temp_count(void)
956 {
957 TCGContext *s = tcg_ctx;
958 if (s->temps_in_use) {
959 /* Clear the count so that we don't give another
960 * warning immediately next time around.
961 */
962 s->temps_in_use = 0;
963 return 1;
964 }
965 return 0;
966 }
967 #endif
968
969 /* Return true if OP may appear in the opcode stream.
970 Test the runtime variable that controls each opcode. */
971 bool tcg_op_supported(TCGOpcode op)
972 {
973 switch (op) {
974 case INDEX_op_discard:
975 case INDEX_op_set_label:
976 case INDEX_op_call:
977 case INDEX_op_br:
978 case INDEX_op_mb:
979 case INDEX_op_insn_start:
980 case INDEX_op_exit_tb:
981 case INDEX_op_goto_tb:
982 case INDEX_op_qemu_ld_i32:
983 case INDEX_op_qemu_st_i32:
984 case INDEX_op_qemu_ld_i64:
985 case INDEX_op_qemu_st_i64:
986 return true;
987
988 case INDEX_op_goto_ptr:
989 return TCG_TARGET_HAS_goto_ptr;
990
991 case INDEX_op_mov_i32:
992 case INDEX_op_movi_i32:
993 case INDEX_op_setcond_i32:
994 case INDEX_op_brcond_i32:
995 case INDEX_op_ld8u_i32:
996 case INDEX_op_ld8s_i32:
997 case INDEX_op_ld16u_i32:
998 case INDEX_op_ld16s_i32:
999 case INDEX_op_ld_i32:
1000 case INDEX_op_st8_i32:
1001 case INDEX_op_st16_i32:
1002 case INDEX_op_st_i32:
1003 case INDEX_op_add_i32:
1004 case INDEX_op_sub_i32:
1005 case INDEX_op_mul_i32:
1006 case INDEX_op_and_i32:
1007 case INDEX_op_or_i32:
1008 case INDEX_op_xor_i32:
1009 case INDEX_op_shl_i32:
1010 case INDEX_op_shr_i32:
1011 case INDEX_op_sar_i32:
1012 return true;
1013
1014 case INDEX_op_movcond_i32:
1015 return TCG_TARGET_HAS_movcond_i32;
1016 case INDEX_op_div_i32:
1017 case INDEX_op_divu_i32:
1018 return TCG_TARGET_HAS_div_i32;
1019 case INDEX_op_rem_i32:
1020 case INDEX_op_remu_i32:
1021 return TCG_TARGET_HAS_rem_i32;
1022 case INDEX_op_div2_i32:
1023 case INDEX_op_divu2_i32:
1024 return TCG_TARGET_HAS_div2_i32;
1025 case INDEX_op_rotl_i32:
1026 case INDEX_op_rotr_i32:
1027 return TCG_TARGET_HAS_rot_i32;
1028 case INDEX_op_deposit_i32:
1029 return TCG_TARGET_HAS_deposit_i32;
1030 case INDEX_op_extract_i32:
1031 return TCG_TARGET_HAS_extract_i32;
1032 case INDEX_op_sextract_i32:
1033 return TCG_TARGET_HAS_sextract_i32;
1034 case INDEX_op_add2_i32:
1035 return TCG_TARGET_HAS_add2_i32;
1036 case INDEX_op_sub2_i32:
1037 return TCG_TARGET_HAS_sub2_i32;
1038 case INDEX_op_mulu2_i32:
1039 return TCG_TARGET_HAS_mulu2_i32;
1040 case INDEX_op_muls2_i32:
1041 return TCG_TARGET_HAS_muls2_i32;
1042 case INDEX_op_muluh_i32:
1043 return TCG_TARGET_HAS_muluh_i32;
1044 case INDEX_op_mulsh_i32:
1045 return TCG_TARGET_HAS_mulsh_i32;
1046 case INDEX_op_ext8s_i32:
1047 return TCG_TARGET_HAS_ext8s_i32;
1048 case INDEX_op_ext16s_i32:
1049 return TCG_TARGET_HAS_ext16s_i32;
1050 case INDEX_op_ext8u_i32:
1051 return TCG_TARGET_HAS_ext8u_i32;
1052 case INDEX_op_ext16u_i32:
1053 return TCG_TARGET_HAS_ext16u_i32;
1054 case INDEX_op_bswap16_i32:
1055 return TCG_TARGET_HAS_bswap16_i32;
1056 case INDEX_op_bswap32_i32:
1057 return TCG_TARGET_HAS_bswap32_i32;
1058 case INDEX_op_not_i32:
1059 return TCG_TARGET_HAS_not_i32;
1060 case INDEX_op_neg_i32:
1061 return TCG_TARGET_HAS_neg_i32;
1062 case INDEX_op_andc_i32:
1063 return TCG_TARGET_HAS_andc_i32;
1064 case INDEX_op_orc_i32:
1065 return TCG_TARGET_HAS_orc_i32;
1066 case INDEX_op_eqv_i32:
1067 return TCG_TARGET_HAS_eqv_i32;
1068 case INDEX_op_nand_i32:
1069 return TCG_TARGET_HAS_nand_i32;
1070 case INDEX_op_nor_i32:
1071 return TCG_TARGET_HAS_nor_i32;
1072 case INDEX_op_clz_i32:
1073 return TCG_TARGET_HAS_clz_i32;
1074 case INDEX_op_ctz_i32:
1075 return TCG_TARGET_HAS_ctz_i32;
1076 case INDEX_op_ctpop_i32:
1077 return TCG_TARGET_HAS_ctpop_i32;
1078
1079 case INDEX_op_brcond2_i32:
1080 case INDEX_op_setcond2_i32:
1081 return TCG_TARGET_REG_BITS == 32;
1082
1083 case INDEX_op_mov_i64:
1084 case INDEX_op_movi_i64:
1085 case INDEX_op_setcond_i64:
1086 case INDEX_op_brcond_i64:
1087 case INDEX_op_ld8u_i64:
1088 case INDEX_op_ld8s_i64:
1089 case INDEX_op_ld16u_i64:
1090 case INDEX_op_ld16s_i64:
1091 case INDEX_op_ld32u_i64:
1092 case INDEX_op_ld32s_i64:
1093 case INDEX_op_ld_i64:
1094 case INDEX_op_st8_i64:
1095 case INDEX_op_st16_i64:
1096 case INDEX_op_st32_i64:
1097 case INDEX_op_st_i64:
1098 case INDEX_op_add_i64:
1099 case INDEX_op_sub_i64:
1100 case INDEX_op_mul_i64:
1101 case INDEX_op_and_i64:
1102 case INDEX_op_or_i64:
1103 case INDEX_op_xor_i64:
1104 case INDEX_op_shl_i64:
1105 case INDEX_op_shr_i64:
1106 case INDEX_op_sar_i64:
1107 case INDEX_op_ext_i32_i64:
1108 case INDEX_op_extu_i32_i64:
1109 return TCG_TARGET_REG_BITS == 64;
1110
1111 case INDEX_op_movcond_i64:
1112 return TCG_TARGET_HAS_movcond_i64;
1113 case INDEX_op_div_i64:
1114 case INDEX_op_divu_i64:
1115 return TCG_TARGET_HAS_div_i64;
1116 case INDEX_op_rem_i64:
1117 case INDEX_op_remu_i64:
1118 return TCG_TARGET_HAS_rem_i64;
1119 case INDEX_op_div2_i64:
1120 case INDEX_op_divu2_i64:
1121 return TCG_TARGET_HAS_div2_i64;
1122 case INDEX_op_rotl_i64:
1123 case INDEX_op_rotr_i64:
1124 return TCG_TARGET_HAS_rot_i64;
1125 case INDEX_op_deposit_i64:
1126 return TCG_TARGET_HAS_deposit_i64;
1127 case INDEX_op_extract_i64:
1128 return TCG_TARGET_HAS_extract_i64;
1129 case INDEX_op_sextract_i64:
1130 return TCG_TARGET_HAS_sextract_i64;
1131 case INDEX_op_extrl_i64_i32:
1132 return TCG_TARGET_HAS_extrl_i64_i32;
1133 case INDEX_op_extrh_i64_i32:
1134 return TCG_TARGET_HAS_extrh_i64_i32;
1135 case INDEX_op_ext8s_i64:
1136 return TCG_TARGET_HAS_ext8s_i64;
1137 case INDEX_op_ext16s_i64:
1138 return TCG_TARGET_HAS_ext16s_i64;
1139 case INDEX_op_ext32s_i64:
1140 return TCG_TARGET_HAS_ext32s_i64;
1141 case INDEX_op_ext8u_i64:
1142 return TCG_TARGET_HAS_ext8u_i64;
1143 case INDEX_op_ext16u_i64:
1144 return TCG_TARGET_HAS_ext16u_i64;
1145 case INDEX_op_ext32u_i64:
1146 return TCG_TARGET_HAS_ext32u_i64;
1147 case INDEX_op_bswap16_i64:
1148 return TCG_TARGET_HAS_bswap16_i64;
1149 case INDEX_op_bswap32_i64:
1150 return TCG_TARGET_HAS_bswap32_i64;
1151 case INDEX_op_bswap64_i64:
1152 return TCG_TARGET_HAS_bswap64_i64;
1153 case INDEX_op_not_i64:
1154 return TCG_TARGET_HAS_not_i64;
1155 case INDEX_op_neg_i64:
1156 return TCG_TARGET_HAS_neg_i64;
1157 case INDEX_op_andc_i64:
1158 return TCG_TARGET_HAS_andc_i64;
1159 case INDEX_op_orc_i64:
1160 return TCG_TARGET_HAS_orc_i64;
1161 case INDEX_op_eqv_i64:
1162 return TCG_TARGET_HAS_eqv_i64;
1163 case INDEX_op_nand_i64:
1164 return TCG_TARGET_HAS_nand_i64;
1165 case INDEX_op_nor_i64:
1166 return TCG_TARGET_HAS_nor_i64;
1167 case INDEX_op_clz_i64:
1168 return TCG_TARGET_HAS_clz_i64;
1169 case INDEX_op_ctz_i64:
1170 return TCG_TARGET_HAS_ctz_i64;
1171 case INDEX_op_ctpop_i64:
1172 return TCG_TARGET_HAS_ctpop_i64;
1173 case INDEX_op_add2_i64:
1174 return TCG_TARGET_HAS_add2_i64;
1175 case INDEX_op_sub2_i64:
1176 return TCG_TARGET_HAS_sub2_i64;
1177 case INDEX_op_mulu2_i64:
1178 return TCG_TARGET_HAS_mulu2_i64;
1179 case INDEX_op_muls2_i64:
1180 return TCG_TARGET_HAS_muls2_i64;
1181 case INDEX_op_muluh_i64:
1182 return TCG_TARGET_HAS_muluh_i64;
1183 case INDEX_op_mulsh_i64:
1184 return TCG_TARGET_HAS_mulsh_i64;
1185
1186 case NB_OPS:
1187 break;
1188 }
1189 g_assert_not_reached();
1190 }
1191
1192 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1193 and endian swap. Maybe it would be better to do the alignment
1194 and endian swap in tcg_reg_alloc_call(). */
1195 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1196 {
1197 TCGContext *s = tcg_ctx;
1198 int i, real_args, nb_rets, pi;
1199 unsigned sizemask, flags;
1200 TCGHelperInfo *info;
1201 TCGOp *op;
1202
1203 info = g_hash_table_lookup(helper_table, (gpointer)func);
1204 flags = info->flags;
1205 sizemask = info->sizemask;
1206
1207 #if defined(__sparc__) && !defined(__arch64__) \
1208 && !defined(CONFIG_TCG_INTERPRETER)
1209 /* We have 64-bit values in one register, but need to pass as two
1210 separate parameters. Split them. */
1211 int orig_sizemask = sizemask;
1212 int orig_nargs = nargs;
1213 TCGv_i64 retl, reth;
1214 TCGTemp *split_args[MAX_OPC_PARAM];
1215
1216 TCGV_UNUSED_I64(retl);
1217 TCGV_UNUSED_I64(reth);
1218 if (sizemask != 0) {
1219 for (i = real_args = 0; i < nargs; ++i) {
1220 int is_64bit = sizemask & (1 << (i+1)*2);
1221 if (is_64bit) {
1222 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1223 TCGv_i32 h = tcg_temp_new_i32();
1224 TCGv_i32 l = tcg_temp_new_i32();
1225 tcg_gen_extr_i64_i32(l, h, orig);
1226 split_args[real_args++] = tcgv_i32_temp(h);
1227 split_args[real_args++] = tcgv_i32_temp(l);
1228 } else {
1229 split_args[real_args++] = args[i];
1230 }
1231 }
1232 nargs = real_args;
1233 args = split_args;
1234 sizemask = 0;
1235 }
1236 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1237 for (i = 0; i < nargs; ++i) {
1238 int is_64bit = sizemask & (1 << (i+1)*2);
1239 int is_signed = sizemask & (2 << (i+1)*2);
1240 if (!is_64bit) {
1241 TCGv_i64 temp = tcg_temp_new_i64();
1242 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1243 if (is_signed) {
1244 tcg_gen_ext32s_i64(temp, orig);
1245 } else {
1246 tcg_gen_ext32u_i64(temp, orig);
1247 }
1248 args[i] = tcgv_i64_temp(temp);
1249 }
1250 }
1251 #endif /* TCG_TARGET_EXTEND_ARGS */
1252
1253 i = s->gen_next_op_idx;
1254 tcg_debug_assert(i < OPC_BUF_SIZE);
1255 s->gen_op_buf[0].prev = i;
1256 s->gen_next_op_idx = i + 1;
1257 op = &s->gen_op_buf[i];
1258
1259 /* Set links for sequential allocation during translation. */
1260 memset(op, 0, offsetof(TCGOp, args));
1261 op->opc = INDEX_op_call;
1262 op->prev = i - 1;
1263 op->next = i + 1;
1264
1265 pi = 0;
1266 if (ret != NULL) {
1267 #if defined(__sparc__) && !defined(__arch64__) \
1268 && !defined(CONFIG_TCG_INTERPRETER)
1269 if (orig_sizemask & 1) {
1270 /* The 32-bit ABI is going to return the 64-bit value in
1271 the %o0/%o1 register pair. Prepare for this by using
1272 two return temporaries, and reassemble below. */
1273 retl = tcg_temp_new_i64();
1274 reth = tcg_temp_new_i64();
1275 op->args[pi++] = tcgv_i64_arg(reth);
1276 op->args[pi++] = tcgv_i64_arg(retl);
1277 nb_rets = 2;
1278 } else {
1279 op->args[pi++] = temp_arg(ret);
1280 nb_rets = 1;
1281 }
1282 #else
1283 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
1284 #ifdef HOST_WORDS_BIGENDIAN
1285 op->args[pi++] = temp_arg(ret + 1);
1286 op->args[pi++] = temp_arg(ret);
1287 #else
1288 op->args[pi++] = temp_arg(ret);
1289 op->args[pi++] = temp_arg(ret + 1);
1290 #endif
1291 nb_rets = 2;
1292 } else {
1293 op->args[pi++] = temp_arg(ret);
1294 nb_rets = 1;
1295 }
1296 #endif
1297 } else {
1298 nb_rets = 0;
1299 }
1300 op->callo = nb_rets;
1301
1302 real_args = 0;
1303 for (i = 0; i < nargs; i++) {
1304 int is_64bit = sizemask & (1 << (i+1)*2);
1305 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1306 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1307 /* some targets want aligned 64 bit args */
1308 if (real_args & 1) {
1309 op->args[pi++] = TCG_CALL_DUMMY_ARG;
1310 real_args++;
1311 }
1312 #endif
1313 /* If stack grows up, then we will be placing successive
1314 arguments at lower addresses, which means we need to
1315 reverse the order compared to how we would normally
1316 treat either big or little-endian. For those arguments
1317 that will wind up in registers, this still works for
1318 HPPA (the only current STACK_GROWSUP target) since the
1319 argument registers are *also* allocated in decreasing
1320 order. If another such target is added, this logic may
1321 have to get more complicated to differentiate between
1322 stack arguments and register arguments. */
1323 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1324 op->args[pi++] = temp_arg(args[i] + 1);
1325 op->args[pi++] = temp_arg(args[i]);
1326 #else
1327 op->args[pi++] = temp_arg(args[i]);
1328 op->args[pi++] = temp_arg(args[i] + 1);
1329 #endif
1330 real_args += 2;
1331 continue;
1332 }
1333
1334 op->args[pi++] = temp_arg(args[i]);
1335 real_args++;
1336 }
1337 op->args[pi++] = (uintptr_t)func;
1338 op->args[pi++] = flags;
1339 op->calli = real_args;
1340
1341 /* Make sure the fields didn't overflow. */
1342 tcg_debug_assert(op->calli == real_args);
1343 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
1344
1345 #if defined(__sparc__) && !defined(__arch64__) \
1346 && !defined(CONFIG_TCG_INTERPRETER)
1347 /* Free all of the parts we allocated above. */
1348 for (i = real_args = 0; i < orig_nargs; ++i) {
1349 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1350 if (is_64bit) {
1351 tcg_temp_free_internal(args[real_args++]);
1352 tcg_temp_free_internal(args[real_args++]);
1353 } else {
1354 real_args++;
1355 }
1356 }
1357 if (orig_sizemask & 1) {
1358 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1359 Note that describing these as TCGv_i64 eliminates an unnecessary
1360 zero-extension that tcg_gen_concat_i32_i64 would create. */
1361 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
1362 tcg_temp_free_i64(retl);
1363 tcg_temp_free_i64(reth);
1364 }
1365 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1366 for (i = 0; i < nargs; ++i) {
1367 int is_64bit = sizemask & (1 << (i+1)*2);
1368 if (!is_64bit) {
1369 tcg_temp_free_internal(args[i]);
1370 }
1371 }
1372 #endif /* TCG_TARGET_EXTEND_ARGS */
1373 }
1374
1375 static void tcg_reg_alloc_start(TCGContext *s)
1376 {
1377 int i, n;
1378 TCGTemp *ts;
1379
1380 for (i = 0, n = s->nb_globals; i < n; i++) {
1381 ts = &s->temps[i];
1382 ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
1383 }
1384 for (n = s->nb_temps; i < n; i++) {
1385 ts = &s->temps[i];
1386 ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1387 ts->mem_allocated = 0;
1388 ts->fixed_reg = 0;
1389 }
1390
1391 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1392 }
1393
1394 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1395 TCGTemp *ts)
1396 {
1397 int idx = temp_idx(ts);
1398
1399 if (ts->temp_global) {
1400 pstrcpy(buf, buf_size, ts->name);
1401 } else if (ts->temp_local) {
1402 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1403 } else {
1404 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1405 }
1406 return buf;
1407 }
1408
1409 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1410 int buf_size, TCGArg arg)
1411 {
1412 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1413 }
1414
1415 /* Find helper name. */
1416 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1417 {
1418 const char *ret = NULL;
1419 if (helper_table) {
1420 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
1421 if (info) {
1422 ret = info->name;
1423 }
1424 }
1425 return ret;
1426 }
1427
1428 static const char * const cond_name[] =
1429 {
1430 [TCG_COND_NEVER] = "never",
1431 [TCG_COND_ALWAYS] = "always",
1432 [TCG_COND_EQ] = "eq",
1433 [TCG_COND_NE] = "ne",
1434 [TCG_COND_LT] = "lt",
1435 [TCG_COND_GE] = "ge",
1436 [TCG_COND_LE] = "le",
1437 [TCG_COND_GT] = "gt",
1438 [TCG_COND_LTU] = "ltu",
1439 [TCG_COND_GEU] = "geu",
1440 [TCG_COND_LEU] = "leu",
1441 [TCG_COND_GTU] = "gtu"
1442 };
1443
1444 static const char * const ldst_name[] =
1445 {
1446 [MO_UB] = "ub",
1447 [MO_SB] = "sb",
1448 [MO_LEUW] = "leuw",
1449 [MO_LESW] = "lesw",
1450 [MO_LEUL] = "leul",
1451 [MO_LESL] = "lesl",
1452 [MO_LEQ] = "leq",
1453 [MO_BEUW] = "beuw",
1454 [MO_BESW] = "besw",
1455 [MO_BEUL] = "beul",
1456 [MO_BESL] = "besl",
1457 [MO_BEQ] = "beq",
1458 };
1459
1460 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1461 #ifdef ALIGNED_ONLY
1462 [MO_UNALN >> MO_ASHIFT] = "un+",
1463 [MO_ALIGN >> MO_ASHIFT] = "",
1464 #else
1465 [MO_UNALN >> MO_ASHIFT] = "",
1466 [MO_ALIGN >> MO_ASHIFT] = "al+",
1467 #endif
1468 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1469 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1470 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1471 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1472 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1473 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1474 };
1475
1476 void tcg_dump_ops(TCGContext *s)
1477 {
1478 char buf[128];
1479 TCGOp *op;
1480 int oi;
1481
1482 for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
1483 int i, k, nb_oargs, nb_iargs, nb_cargs;
1484 const TCGOpDef *def;
1485 TCGOpcode c;
1486 int col = 0;
1487
1488 op = &s->gen_op_buf[oi];
1489 c = op->opc;
1490 def = &tcg_op_defs[c];
1491
1492 if (c == INDEX_op_insn_start) {
1493 col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
1494
1495 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1496 target_ulong a;
1497 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1498 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
1499 #else
1500 a = op->args[i];
1501 #endif
1502 col += qemu_log(" " TARGET_FMT_lx, a);
1503 }
1504 } else if (c == INDEX_op_call) {
1505 /* variable number of arguments */
1506 nb_oargs = op->callo;
1507 nb_iargs = op->calli;
1508 nb_cargs = def->nb_cargs;
1509
1510 /* function name, flags, out args */
1511 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1512 tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1513 op->args[nb_oargs + nb_iargs + 1], nb_oargs);
1514 for (i = 0; i < nb_oargs; i++) {
1515 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1516 op->args[i]));
1517 }
1518 for (i = 0; i < nb_iargs; i++) {
1519 TCGArg arg = op->args[nb_oargs + i];
1520 const char *t = "<dummy>";
1521 if (arg != TCG_CALL_DUMMY_ARG) {
1522 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
1523 }
1524 col += qemu_log(",%s", t);
1525 }
1526 } else {
1527 col += qemu_log(" %s ", def->name);
1528
1529 nb_oargs = def->nb_oargs;
1530 nb_iargs = def->nb_iargs;
1531 nb_cargs = def->nb_cargs;
1532
1533 k = 0;
1534 for (i = 0; i < nb_oargs; i++) {
1535 if (k != 0) {
1536 col += qemu_log(",");
1537 }
1538 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1539 op->args[k++]));
1540 }
1541 for (i = 0; i < nb_iargs; i++) {
1542 if (k != 0) {
1543 col += qemu_log(",");
1544 }
1545 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1546 op->args[k++]));
1547 }
1548 switch (c) {
1549 case INDEX_op_brcond_i32:
1550 case INDEX_op_setcond_i32:
1551 case INDEX_op_movcond_i32:
1552 case INDEX_op_brcond2_i32:
1553 case INDEX_op_setcond2_i32:
1554 case INDEX_op_brcond_i64:
1555 case INDEX_op_setcond_i64:
1556 case INDEX_op_movcond_i64:
1557 if (op->args[k] < ARRAY_SIZE(cond_name)
1558 && cond_name[op->args[k]]) {
1559 col += qemu_log(",%s", cond_name[op->args[k++]]);
1560 } else {
1561 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
1562 }
1563 i = 1;
1564 break;
1565 case INDEX_op_qemu_ld_i32:
1566 case INDEX_op_qemu_st_i32:
1567 case INDEX_op_qemu_ld_i64:
1568 case INDEX_op_qemu_st_i64:
1569 {
1570 TCGMemOpIdx oi = op->args[k++];
1571 TCGMemOp op = get_memop(oi);
1572 unsigned ix = get_mmuidx(oi);
1573
1574 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1575 col += qemu_log(",$0x%x,%u", op, ix);
1576 } else {
1577 const char *s_al, *s_op;
1578 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1579 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1580 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1581 }
1582 i = 1;
1583 }
1584 break;
1585 default:
1586 i = 0;
1587 break;
1588 }
1589 switch (c) {
1590 case INDEX_op_set_label:
1591 case INDEX_op_br:
1592 case INDEX_op_brcond_i32:
1593 case INDEX_op_brcond_i64:
1594 case INDEX_op_brcond2_i32:
1595 col += qemu_log("%s$L%d", k ? "," : "",
1596 arg_label(op->args[k])->id);
1597 i++, k++;
1598 break;
1599 default:
1600 break;
1601 }
1602 for (; i < nb_cargs; i++, k++) {
1603 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
1604 }
1605 }
1606 if (op->life) {
1607 unsigned life = op->life;
1608
1609 for (; col < 48; ++col) {
1610 putc(' ', qemu_logfile);
1611 }
1612
1613 if (life & (SYNC_ARG * 3)) {
1614 qemu_log(" sync:");
1615 for (i = 0; i < 2; ++i) {
1616 if (life & (SYNC_ARG << i)) {
1617 qemu_log(" %d", i);
1618 }
1619 }
1620 }
1621 life /= DEAD_ARG;
1622 if (life) {
1623 qemu_log(" dead:");
1624 for (i = 0; life; ++i, life >>= 1) {
1625 if (life & 1) {
1626 qemu_log(" %d", i);
1627 }
1628 }
1629 }
1630 }
1631 qemu_log("\n");
1632 }
1633 }
1634
1635 /* we give more priority to constraints with less registers */
1636 static int get_constraint_priority(const TCGOpDef *def, int k)
1637 {
1638 const TCGArgConstraint *arg_ct;
1639
1640 int i, n;
1641 arg_ct = &def->args_ct[k];
1642 if (arg_ct->ct & TCG_CT_ALIAS) {
1643 /* an alias is equivalent to a single register */
1644 n = 1;
1645 } else {
1646 if (!(arg_ct->ct & TCG_CT_REG))
1647 return 0;
1648 n = 0;
1649 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1650 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1651 n++;
1652 }
1653 }
1654 return TCG_TARGET_NB_REGS - n + 1;
1655 }
1656
1657 /* sort from highest priority to lowest */
1658 static void sort_constraints(TCGOpDef *def, int start, int n)
1659 {
1660 int i, j, p1, p2, tmp;
1661
1662 for(i = 0; i < n; i++)
1663 def->sorted_args[start + i] = start + i;
1664 if (n <= 1)
1665 return;
1666 for(i = 0; i < n - 1; i++) {
1667 for(j = i + 1; j < n; j++) {
1668 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1669 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1670 if (p1 < p2) {
1671 tmp = def->sorted_args[start + i];
1672 def->sorted_args[start + i] = def->sorted_args[start + j];
1673 def->sorted_args[start + j] = tmp;
1674 }
1675 }
1676 }
1677 }
1678
1679 static void process_op_defs(TCGContext *s)
1680 {
1681 TCGOpcode op;
1682
1683 for (op = 0; op < NB_OPS; op++) {
1684 TCGOpDef *def = &tcg_op_defs[op];
1685 const TCGTargetOpDef *tdefs;
1686 TCGType type;
1687 int i, nb_args;
1688
1689 if (def->flags & TCG_OPF_NOT_PRESENT) {
1690 continue;
1691 }
1692
1693 nb_args = def->nb_iargs + def->nb_oargs;
1694 if (nb_args == 0) {
1695 continue;
1696 }
1697
1698 tdefs = tcg_target_op_def(op);
1699 /* Missing TCGTargetOpDef entry. */
1700 tcg_debug_assert(tdefs != NULL);
1701
1702 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
1703 for (i = 0; i < nb_args; i++) {
1704 const char *ct_str = tdefs->args_ct_str[i];
1705 /* Incomplete TCGTargetOpDef entry. */
1706 tcg_debug_assert(ct_str != NULL);
1707
1708 def->args_ct[i].u.regs = 0;
1709 def->args_ct[i].ct = 0;
1710 while (*ct_str != '\0') {
1711 switch(*ct_str) {
1712 case '0' ... '9':
1713 {
1714 int oarg = *ct_str - '0';
1715 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
1716 tcg_debug_assert(oarg < def->nb_oargs);
1717 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1718 /* TCG_CT_ALIAS is for the output arguments.
1719 The input is tagged with TCG_CT_IALIAS. */
1720 def->args_ct[i] = def->args_ct[oarg];
1721 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
1722 def->args_ct[oarg].alias_index = i;
1723 def->args_ct[i].ct |= TCG_CT_IALIAS;
1724 def->args_ct[i].alias_index = oarg;
1725 }
1726 ct_str++;
1727 break;
1728 case '&':
1729 def->args_ct[i].ct |= TCG_CT_NEWREG;
1730 ct_str++;
1731 break;
1732 case 'i':
1733 def->args_ct[i].ct |= TCG_CT_CONST;
1734 ct_str++;
1735 break;
1736 default:
1737 ct_str = target_parse_constraint(&def->args_ct[i],
1738 ct_str, type);
1739 /* Typo in TCGTargetOpDef constraint. */
1740 tcg_debug_assert(ct_str != NULL);
1741 }
1742 }
1743 }
1744
1745 /* TCGTargetOpDef entry with too much information? */
1746 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1747
1748 /* sort the constraints (XXX: this is just an heuristic) */
1749 sort_constraints(def, 0, def->nb_oargs);
1750 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1751 }
1752 }
1753
1754 void tcg_op_remove(TCGContext *s, TCGOp *op)
1755 {
1756 int next = op->next;
1757 int prev = op->prev;
1758
1759 /* We should never attempt to remove the list terminator. */
1760 tcg_debug_assert(op != &s->gen_op_buf[0]);
1761
1762 s->gen_op_buf[next].prev = prev;
1763 s->gen_op_buf[prev].next = next;
1764
1765 memset(op, 0, sizeof(*op));
1766
1767 #ifdef CONFIG_PROFILER
1768 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
1769 #endif
1770 }
1771
1772 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
1773 TCGOpcode opc, int nargs)
1774 {
1775 int oi = s->gen_next_op_idx;
1776 int prev = old_op->prev;
1777 int next = old_op - s->gen_op_buf;
1778 TCGOp *new_op;
1779
1780 tcg_debug_assert(oi < OPC_BUF_SIZE);
1781 s->gen_next_op_idx = oi + 1;
1782
1783 new_op = &s->gen_op_buf[oi];
1784 *new_op = (TCGOp){
1785 .opc = opc,
1786 .prev = prev,
1787 .next = next
1788 };
1789 s->gen_op_buf[prev].next = oi;
1790 old_op->prev = oi;
1791
1792 return new_op;
1793 }
1794
1795 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
1796 TCGOpcode opc, int nargs)
1797 {
1798 int oi = s->gen_next_op_idx;
1799 int prev = old_op - s->gen_op_buf;
1800 int next = old_op->next;
1801 TCGOp *new_op;
1802
1803 tcg_debug_assert(oi < OPC_BUF_SIZE);
1804 s->gen_next_op_idx = oi + 1;
1805
1806 new_op = &s->gen_op_buf[oi];
1807 *new_op = (TCGOp){
1808 .opc = opc,
1809 .prev = prev,
1810 .next = next
1811 };
1812 s->gen_op_buf[next].prev = oi;
1813 old_op->next = oi;
1814
1815 return new_op;
1816 }
1817
1818 #define TS_DEAD 1
1819 #define TS_MEM 2
1820
1821 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
1822 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1823
1824 /* liveness analysis: end of function: all temps are dead, and globals
1825 should be in memory. */
1826 static void tcg_la_func_end(TCGContext *s)
1827 {
1828 int ng = s->nb_globals;
1829 int nt = s->nb_temps;
1830 int i;
1831
1832 for (i = 0; i < ng; ++i) {
1833 s->temps[i].state = TS_DEAD | TS_MEM;
1834 }
1835 for (i = ng; i < nt; ++i) {
1836 s->temps[i].state = TS_DEAD;
1837 }
1838 }
1839
1840 /* liveness analysis: end of basic block: all temps are dead, globals
1841 and local temps should be in memory. */
1842 static void tcg_la_bb_end(TCGContext *s)
1843 {
1844 int ng = s->nb_globals;
1845 int nt = s->nb_temps;
1846 int i;
1847
1848 for (i = 0; i < ng; ++i) {
1849 s->temps[i].state = TS_DEAD | TS_MEM;
1850 }
1851 for (i = ng; i < nt; ++i) {
1852 s->temps[i].state = (s->temps[i].temp_local
1853 ? TS_DEAD | TS_MEM
1854 : TS_DEAD);
1855 }
1856 }
1857
1858 /* Liveness analysis : update the opc_arg_life array to tell if a
1859 given input arguments is dead. Instructions updating dead
1860 temporaries are removed. */
1861 static void liveness_pass_1(TCGContext *s)
1862 {
1863 int nb_globals = s->nb_globals;
1864 int oi, oi_prev;
1865
1866 tcg_la_func_end(s);
1867
1868 for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
1869 int i, nb_iargs, nb_oargs;
1870 TCGOpcode opc_new, opc_new2;
1871 bool have_opc_new2;
1872 TCGLifeData arg_life = 0;
1873 TCGTemp *arg_ts;
1874
1875 TCGOp * const op = &s->gen_op_buf[oi];
1876 TCGOpcode opc = op->opc;
1877 const TCGOpDef *def = &tcg_op_defs[opc];
1878
1879 oi_prev = op->prev;
1880
1881 switch (opc) {
1882 case INDEX_op_call:
1883 {
1884 int call_flags;
1885
1886 nb_oargs = op->callo;
1887 nb_iargs = op->calli;
1888 call_flags = op->args[nb_oargs + nb_iargs + 1];
1889
1890 /* pure functions can be removed if their result is unused */
1891 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1892 for (i = 0; i < nb_oargs; i++) {
1893 arg_ts = arg_temp(op->args[i]);
1894 if (arg_ts->state != TS_DEAD) {
1895 goto do_not_remove_call;
1896 }
1897 }
1898 goto do_remove;
1899 } else {
1900 do_not_remove_call:
1901
1902 /* output args are dead */
1903 for (i = 0; i < nb_oargs; i++) {
1904 arg_ts = arg_temp(op->args[i]);
1905 if (arg_ts->state & TS_DEAD) {
1906 arg_life |= DEAD_ARG << i;
1907 }
1908 if (arg_ts->state & TS_MEM) {
1909 arg_life |= SYNC_ARG << i;
1910 }
1911 arg_ts->state = TS_DEAD;
1912 }
1913
1914 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1915 TCG_CALL_NO_READ_GLOBALS))) {
1916 /* globals should go back to memory */
1917 for (i = 0; i < nb_globals; i++) {
1918 s->temps[i].state = TS_DEAD | TS_MEM;
1919 }
1920 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1921 /* globals should be synced to memory */
1922 for (i = 0; i < nb_globals; i++) {
1923 s->temps[i].state |= TS_MEM;
1924 }
1925 }
1926
1927 /* record arguments that die in this helper */
1928 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1929 arg_ts = arg_temp(op->args[i]);
1930 if (arg_ts && arg_ts->state & TS_DEAD) {
1931 arg_life |= DEAD_ARG << i;
1932 }
1933 }
1934 /* input arguments are live for preceding opcodes */
1935 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1936 arg_ts = arg_temp(op->args[i]);
1937 if (arg_ts) {
1938 arg_ts->state &= ~TS_DEAD;
1939 }
1940 }
1941 }
1942 }
1943 break;
1944 case INDEX_op_insn_start:
1945 break;
1946 case INDEX_op_discard:
1947 /* mark the temporary as dead */
1948 arg_temp(op->args[0])->state = TS_DEAD;
1949 break;
1950
1951 case INDEX_op_add2_i32:
1952 opc_new = INDEX_op_add_i32;
1953 goto do_addsub2;
1954 case INDEX_op_sub2_i32:
1955 opc_new = INDEX_op_sub_i32;
1956 goto do_addsub2;
1957 case INDEX_op_add2_i64:
1958 opc_new = INDEX_op_add_i64;
1959 goto do_addsub2;
1960 case INDEX_op_sub2_i64:
1961 opc_new = INDEX_op_sub_i64;
1962 do_addsub2:
1963 nb_iargs = 4;
1964 nb_oargs = 2;
1965 /* Test if the high part of the operation is dead, but not
1966 the low part. The result can be optimized to a simple
1967 add or sub. This happens often for x86_64 guest when the
1968 cpu mode is set to 32 bit. */
1969 if (arg_temp(op->args[1])->state == TS_DEAD) {
1970 if (arg_temp(op->args[0])->state == TS_DEAD) {
1971 goto do_remove;
1972 }
1973 /* Replace the opcode and adjust the args in place,
1974 leaving 3 unused args at the end. */
1975 op->opc = opc = opc_new;
1976 op->args[1] = op->args[2];
1977 op->args[2] = op->args[4];
1978 /* Fall through and mark the single-word operation live. */
1979 nb_iargs = 2;
1980 nb_oargs = 1;
1981 }
1982 goto do_not_remove;
1983
1984 case INDEX_op_mulu2_i32:
1985 opc_new = INDEX_op_mul_i32;
1986 opc_new2 = INDEX_op_muluh_i32;
1987 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1988 goto do_mul2;
1989 case INDEX_op_muls2_i32:
1990 opc_new = INDEX_op_mul_i32;
1991 opc_new2 = INDEX_op_mulsh_i32;
1992 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1993 goto do_mul2;
1994 case INDEX_op_mulu2_i64:
1995 opc_new = INDEX_op_mul_i64;
1996 opc_new2 = INDEX_op_muluh_i64;
1997 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1998 goto do_mul2;
1999 case INDEX_op_muls2_i64:
2000 opc_new = INDEX_op_mul_i64;
2001 opc_new2 = INDEX_op_mulsh_i64;
2002 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
2003 goto do_mul2;
2004 do_mul2:
2005 nb_iargs = 2;
2006 nb_oargs = 2;
2007 if (arg_temp(op->args[1])->state == TS_DEAD) {
2008 if (arg_temp(op->args[0])->state == TS_DEAD) {
2009 /* Both parts of the operation are dead. */
2010 goto do_remove;
2011 }
2012 /* The high part of the operation is dead; generate the low. */
2013 op->opc = opc = opc_new;
2014 op->args[1] = op->args[2];
2015 op->args[2] = op->args[3];
2016 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
2017 /* The low part of the operation is dead; generate the high. */
2018 op->opc = opc = opc_new2;
2019 op->args[0] = op->args[1];
2020 op->args[1] = op->args[2];
2021 op->args[2] = op->args[3];
2022 } else {
2023 goto do_not_remove;
2024 }
2025 /* Mark the single-word operation live. */
2026 nb_oargs = 1;
2027 goto do_not_remove;
2028
2029 default:
2030 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2031 nb_iargs = def->nb_iargs;
2032 nb_oargs = def->nb_oargs;
2033
2034 /* Test if the operation can be removed because all
2035 its outputs are dead. We assume that nb_oargs == 0
2036 implies side effects */
2037 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
2038 for (i = 0; i < nb_oargs; i++) {
2039 if (arg_temp(op->args[i])->state != TS_DEAD) {
2040 goto do_not_remove;
2041 }
2042 }
2043 do_remove:
2044 tcg_op_remove(s, op);
2045 } else {
2046 do_not_remove:
2047 /* output args are dead */
2048 for (i = 0; i < nb_oargs; i++) {
2049 arg_ts = arg_temp(op->args[i]);
2050 if (arg_ts->state & TS_DEAD) {
2051 arg_life |= DEAD_ARG << i;
2052 }
2053 if (arg_ts->state & TS_MEM) {
2054 arg_life |= SYNC_ARG << i;
2055 }
2056 arg_ts->state = TS_DEAD;
2057 }
2058
2059 /* if end of basic block, update */
2060 if (def->flags & TCG_OPF_BB_END) {
2061 tcg_la_bb_end(s);
2062 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2063 /* globals should be synced to memory */
2064 for (i = 0; i < nb_globals; i++) {
2065 s->temps[i].state |= TS_MEM;
2066 }
2067 }
2068
2069 /* record arguments that die in this opcode */
2070 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2071 arg_ts = arg_temp(op->args[i]);
2072 if (arg_ts->state & TS_DEAD) {
2073 arg_life |= DEAD_ARG << i;
2074 }
2075 }
2076 /* input arguments are live for preceding opcodes */
2077 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2078 arg_temp(op->args[i])->state &= ~TS_DEAD;
2079 }
2080 }
2081 break;
2082 }
2083 op->life = arg_life;
2084 }
2085 }
2086
2087 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2088 static bool liveness_pass_2(TCGContext *s)
2089 {
2090 int nb_globals = s->nb_globals;
2091 int nb_temps, i, oi, oi_next;
2092 bool changes = false;
2093
2094 /* Create a temporary for each indirect global. */
2095 for (i = 0; i < nb_globals; ++i) {
2096 TCGTemp *its = &s->temps[i];
2097 if (its->indirect_reg) {
2098 TCGTemp *dts = tcg_temp_alloc(s);
2099 dts->type = its->type;
2100 dts->base_type = its->base_type;
2101 its->state_ptr = dts;
2102 } else {
2103 its->state_ptr = NULL;
2104 }
2105 /* All globals begin dead. */
2106 its->state = TS_DEAD;
2107 }
2108 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2109 TCGTemp *its = &s->temps[i];
2110 its->state_ptr = NULL;
2111 its->state = TS_DEAD;
2112 }
2113
2114 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
2115 TCGOp *op = &s->gen_op_buf[oi];
2116 TCGOpcode opc = op->opc;
2117 const TCGOpDef *def = &tcg_op_defs[opc];
2118 TCGLifeData arg_life = op->life;
2119 int nb_iargs, nb_oargs, call_flags;
2120 TCGTemp *arg_ts, *dir_ts;
2121
2122 oi_next = op->next;
2123
2124 if (opc == INDEX_op_call) {
2125 nb_oargs = op->callo;
2126 nb_iargs = op->calli;
2127 call_flags = op->args[nb_oargs + nb_iargs + 1];
2128 } else {
2129 nb_iargs = def->nb_iargs;
2130 nb_oargs = def->nb_oargs;
2131
2132 /* Set flags similar to how calls require. */
2133 if (def->flags & TCG_OPF_BB_END) {
2134 /* Like writing globals: save_globals */
2135 call_flags = 0;
2136 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2137 /* Like reading globals: sync_globals */
2138 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2139 } else {
2140 /* No effect on globals. */
2141 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2142 TCG_CALL_NO_WRITE_GLOBALS);
2143 }
2144 }
2145
2146 /* Make sure that input arguments are available. */
2147 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2148 arg_ts = arg_temp(op->args[i]);
2149 if (arg_ts) {
2150 dir_ts = arg_ts->state_ptr;
2151 if (dir_ts && arg_ts->state == TS_DEAD) {
2152 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
2153 ? INDEX_op_ld_i32
2154 : INDEX_op_ld_i64);
2155 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
2156
2157 lop->args[0] = temp_arg(dir_ts);
2158 lop->args[1] = temp_arg(arg_ts->mem_base);
2159 lop->args[2] = arg_ts->mem_offset;
2160
2161 /* Loaded, but synced with memory. */
2162 arg_ts->state = TS_MEM;
2163 }
2164 }
2165 }
2166
2167 /* Perform input replacement, and mark inputs that became dead.
2168 No action is required except keeping temp_state up to date
2169 so that we reload when needed. */
2170 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2171 arg_ts = arg_temp(op->args[i]);
2172 if (arg_ts) {
2173 dir_ts = arg_ts->state_ptr;
2174 if (dir_ts) {
2175 op->args[i] = temp_arg(dir_ts);
2176 changes = true;
2177 if (IS_DEAD_ARG(i)) {
2178 arg_ts->state = TS_DEAD;
2179 }
2180 }
2181 }
2182 }
2183
2184 /* Liveness analysis should ensure that the following are
2185 all correct, for call sites and basic block end points. */
2186 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2187 /* Nothing to do */
2188 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2189 for (i = 0; i < nb_globals; ++i) {
2190 /* Liveness should see that globals are synced back,
2191 that is, either TS_DEAD or TS_MEM. */
2192 arg_ts = &s->temps[i];
2193 tcg_debug_assert(arg_ts->state_ptr == 0
2194 || arg_ts->state != 0);
2195 }
2196 } else {
2197 for (i = 0; i < nb_globals; ++i) {
2198 /* Liveness should see that globals are saved back,
2199 that is, TS_DEAD, waiting to be reloaded. */
2200 arg_ts = &s->temps[i];
2201 tcg_debug_assert(arg_ts->state_ptr == 0
2202 || arg_ts->state == TS_DEAD);
2203 }
2204 }
2205
2206 /* Outputs become available. */
2207 for (i = 0; i < nb_oargs; i++) {
2208 arg_ts = arg_temp(op->args[i]);
2209 dir_ts = arg_ts->state_ptr;
2210 if (!dir_ts) {
2211 continue;
2212 }
2213 op->args[i] = temp_arg(dir_ts);
2214 changes = true;
2215
2216 /* The output is now live and modified. */
2217 arg_ts->state = 0;
2218
2219 /* Sync outputs upon their last write. */
2220 if (NEED_SYNC_ARG(i)) {
2221 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2222 ? INDEX_op_st_i32
2223 : INDEX_op_st_i64);
2224 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
2225
2226 sop->args[0] = temp_arg(dir_ts);
2227 sop->args[1] = temp_arg(arg_ts->mem_base);
2228 sop->args[2] = arg_ts->mem_offset;
2229
2230 arg_ts->state = TS_MEM;
2231 }
2232 /* Drop outputs that are dead. */
2233 if (IS_DEAD_ARG(i)) {
2234 arg_ts->state = TS_DEAD;
2235 }
2236 }
2237 }
2238
2239 return changes;
2240 }
2241
2242 #ifdef CONFIG_DEBUG_TCG
2243 static void dump_regs(TCGContext *s)
2244 {
2245 TCGTemp *ts;
2246 int i;
2247 char buf[64];
2248
2249 for(i = 0; i < s->nb_temps; i++) {
2250 ts = &s->temps[i];
2251 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2252 switch(ts->val_type) {
2253 case TEMP_VAL_REG:
2254 printf("%s", tcg_target_reg_names[ts->reg]);
2255 break;
2256 case TEMP_VAL_MEM:
2257 printf("%d(%s)", (int)ts->mem_offset,
2258 tcg_target_reg_names[ts->mem_base->reg]);
2259 break;
2260 case TEMP_VAL_CONST:
2261 printf("$0x%" TCG_PRIlx, ts->val);
2262 break;
2263 case TEMP_VAL_DEAD:
2264 printf("D");
2265 break;
2266 default:
2267 printf("???");
2268 break;
2269 }
2270 printf("\n");
2271 }
2272
2273 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2274 if (s->reg_to_temp[i] != NULL) {
2275 printf("%s: %s\n",
2276 tcg_target_reg_names[i],
2277 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2278 }
2279 }
2280 }
2281
2282 static void check_regs(TCGContext *s)
2283 {
2284 int reg;
2285 int k;
2286 TCGTemp *ts;
2287 char buf[64];
2288
2289 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2290 ts = s->reg_to_temp[reg];
2291 if (ts != NULL) {
2292 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2293 printf("Inconsistency for register %s:\n",
2294 tcg_target_reg_names[reg]);
2295 goto fail;
2296 }
2297 }
2298 }
2299 for (k = 0; k < s->nb_temps; k++) {
2300 ts = &s->temps[k];
2301 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2302 && s->reg_to_temp[ts->reg] != ts) {
2303 printf("Inconsistency for temp %s:\n",
2304 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2305 fail:
2306 printf("reg state:\n");
2307 dump_regs(s);
2308 tcg_abort();
2309 }
2310 }
2311 }
2312 #endif
2313
2314 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
2315 {
2316 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2317 /* Sparc64 stack is accessed with offset of 2047 */
2318 s->current_frame_offset = (s->current_frame_offset +
2319 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2320 ~(sizeof(tcg_target_long) - 1);
2321 #endif
2322 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2323 s->frame_end) {
2324 tcg_abort();
2325 }
2326 ts->mem_offset = s->current_frame_offset;
2327 ts->mem_base = s->frame_temp;
2328 ts->mem_allocated = 1;
2329 s->current_frame_offset += sizeof(tcg_target_long);
2330 }
2331
2332 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2333
2334 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2335 mark it free; otherwise mark it dead. */
2336 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
2337 {
2338 if (ts->fixed_reg) {
2339 return;
2340 }
2341 if (ts->val_type == TEMP_VAL_REG) {
2342 s->reg_to_temp[ts->reg] = NULL;
2343 }
2344 ts->val_type = (free_or_dead < 0
2345 || ts->temp_local
2346 || ts->temp_global
2347 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2348 }
2349
2350 /* Mark a temporary as dead. */
2351 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2352 {
2353 temp_free_or_dead(s, ts, 1);
2354 }
2355
2356 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2357 registers needs to be allocated to store a constant. If 'free_or_dead'
2358 is non-zero, subsequently release the temporary; if it is positive, the
2359 temp is dead; if it is negative, the temp is free. */
2360 static void temp_sync(TCGContext *s, TCGTemp *ts,
2361 TCGRegSet allocated_regs, int free_or_dead)
2362 {
2363 if (ts->fixed_reg) {
2364 return;
2365 }
2366 if (!ts->mem_coherent) {
2367 if (!ts->mem_allocated) {
2368 temp_allocate_frame(s, ts);
2369 }
2370 switch (ts->val_type) {
2371 case TEMP_VAL_CONST:
2372 /* If we're going to free the temp immediately, then we won't
2373 require it later in a register, so attempt to store the
2374 constant to memory directly. */
2375 if (free_or_dead
2376 && tcg_out_sti(s, ts->type, ts->val,
2377 ts->mem_base->reg, ts->mem_offset)) {
2378 break;
2379 }
2380 temp_load(s, ts, tcg_target_available_regs[ts->type],
2381 allocated_regs);
2382 /* fallthrough */
2383
2384 case TEMP_VAL_REG:
2385 tcg_out_st(s, ts->type, ts->reg,
2386 ts->mem_base->reg, ts->mem_offset);
2387 break;
2388
2389 case TEMP_VAL_MEM:
2390 break;
2391
2392 case TEMP_VAL_DEAD:
2393 default:
2394 tcg_abort();
2395 }
2396 ts->mem_coherent = 1;
2397 }
2398 if (free_or_dead) {
2399 temp_free_or_dead(s, ts, free_or_dead);
2400 }
2401 }
2402
2403 /* free register 'reg' by spilling the corresponding temporary if necessary */
2404 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
2405 {
2406 TCGTemp *ts = s->reg_to_temp[reg];
2407 if (ts != NULL) {
2408 temp_sync(s, ts, allocated_regs, -1);
2409 }
2410 }
2411
2412 /* Allocate a register belonging to reg1 & ~reg2 */
2413 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
2414 TCGRegSet allocated_regs, bool rev)
2415 {
2416 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2417 const int *order;
2418 TCGReg reg;
2419 TCGRegSet reg_ct;
2420
2421 reg_ct = desired_regs & ~allocated_regs;
2422 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
2423
2424 /* first try free registers */
2425 for(i = 0; i < n; i++) {
2426 reg = order[i];
2427 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
2428 return reg;
2429 }
2430
2431 /* XXX: do better spill choice */
2432 for(i = 0; i < n; i++) {
2433 reg = order[i];
2434 if (tcg_regset_test_reg(reg_ct, reg)) {
2435 tcg_reg_free(s, reg, allocated_regs);
2436 return reg;
2437 }
2438 }
2439
2440 tcg_abort();
2441 }
2442
2443 /* Make sure the temporary is in a register. If needed, allocate the register
2444 from DESIRED while avoiding ALLOCATED. */
2445 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2446 TCGRegSet allocated_regs)
2447 {
2448 TCGReg reg;
2449
2450 switch (ts->val_type) {
2451 case TEMP_VAL_REG:
2452 return;
2453 case TEMP_VAL_CONST:
2454 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2455 tcg_out_movi(s, ts->type, reg, ts->val);
2456 ts->mem_coherent = 0;
2457 break;
2458 case TEMP_VAL_MEM:
2459 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2460 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2461 ts->mem_coherent = 1;
2462 break;
2463 case TEMP_VAL_DEAD:
2464 default:
2465 tcg_abort();
2466 }
2467 ts->reg = reg;
2468 ts->val_type = TEMP_VAL_REG;
2469 s->reg_to_temp[reg] = ts;
2470 }
2471
2472 /* Save a temporary to memory. 'allocated_regs' is used in case a
2473 temporary registers needs to be allocated to store a constant. */
2474 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
2475 {
2476 /* The liveness analysis already ensures that globals are back
2477 in memory. Keep an tcg_debug_assert for safety. */
2478 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
2479 }
2480
2481 /* save globals to their canonical location and assume they can be
2482 modified be the following code. 'allocated_regs' is used in case a
2483 temporary registers needs to be allocated to store a constant. */
2484 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
2485 {
2486 int i, n;
2487
2488 for (i = 0, n = s->nb_globals; i < n; i++) {
2489 temp_save(s, &s->temps[i], allocated_regs);
2490 }
2491 }
2492
2493 /* sync globals to their canonical location and assume they can be
2494 read by the following code. 'allocated_regs' is used in case a
2495 temporary registers needs to be allocated to store a constant. */
2496 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2497 {
2498 int i, n;
2499
2500 for (i = 0, n = s->nb_globals; i < n; i++) {
2501 TCGTemp *ts = &s->temps[i];
2502 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2503 || ts->fixed_reg
2504 || ts->mem_coherent);
2505 }
2506 }
2507
2508 /* at the end of a basic block, we assume all temporaries are dead and
2509 all globals are stored at their canonical location. */
2510 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
2511 {
2512 int i;
2513
2514 for (i = s->nb_globals; i < s->nb_temps; i++) {
2515 TCGTemp *ts = &s->temps[i];
2516 if (ts->temp_local) {
2517 temp_save(s, ts, allocated_regs);
2518 } else {
2519 /* The liveness analysis already ensures that temps are dead.
2520 Keep an tcg_debug_assert for safety. */
2521 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
2522 }
2523 }
2524
2525 save_globals(s, allocated_regs);
2526 }
2527
2528 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2529 tcg_target_ulong val, TCGLifeData arg_life)
2530 {
2531 if (ots->fixed_reg) {
2532 /* For fixed registers, we do not do any constant propagation. */
2533 tcg_out_movi(s, ots->type, ots->reg, val);
2534 return;
2535 }
2536
2537 /* The movi is not explicitly generated here. */
2538 if (ots->val_type == TEMP_VAL_REG) {
2539 s->reg_to_temp[ots->reg] = NULL;
2540 }
2541 ots->val_type = TEMP_VAL_CONST;
2542 ots->val = val;
2543 ots->mem_coherent = 0;
2544 if (NEED_SYNC_ARG(0)) {
2545 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2546 } else if (IS_DEAD_ARG(0)) {
2547 temp_dead(s, ots);
2548 }
2549 }
2550
2551 static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
2552 {
2553 TCGTemp *ots = arg_temp(op->args[0]);
2554 tcg_target_ulong val = op->args[1];
2555
2556 tcg_reg_alloc_do_movi(s, ots, val, op->life);
2557 }
2558
2559 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
2560 {
2561 const TCGLifeData arg_life = op->life;
2562 TCGRegSet allocated_regs;
2563 TCGTemp *ts, *ots;
2564 TCGType otype, itype;
2565
2566 allocated_regs = s->reserved_regs;
2567 ots = arg_temp(op->args[0]);
2568 ts = arg_temp(op->args[1]);
2569
2570 /* Note that otype != itype for no-op truncation. */
2571 otype = ots->type;
2572 itype = ts->type;
2573
2574 if (ts->val_type == TEMP_VAL_CONST) {
2575 /* propagate constant or generate sti */
2576 tcg_target_ulong val = ts->val;
2577 if (IS_DEAD_ARG(1)) {
2578 temp_dead(s, ts);
2579 }
2580 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2581 return;
2582 }
2583
2584 /* If the source value is in memory we're going to be forced
2585 to have it in a register in order to perform the copy. Copy
2586 the SOURCE value into its own register first, that way we
2587 don't have to reload SOURCE the next time it is used. */
2588 if (ts->val_type == TEMP_VAL_MEM) {
2589 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
2590 }
2591
2592 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2593 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2594 /* mov to a non-saved dead register makes no sense (even with
2595 liveness analysis disabled). */
2596 tcg_debug_assert(NEED_SYNC_ARG(0));
2597 if (!ots->mem_allocated) {
2598 temp_allocate_frame(s, ots);
2599 }
2600 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
2601 if (IS_DEAD_ARG(1)) {
2602 temp_dead(s, ts);
2603 }
2604 temp_dead(s, ots);
2605 } else {
2606 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2607 /* the mov can be suppressed */
2608 if (ots->val_type == TEMP_VAL_REG) {
2609 s->reg_to_temp[ots->reg] = NULL;
2610 }
2611 ots->reg = ts->reg;
2612 temp_dead(s, ts);
2613 } else {
2614 if (ots->val_type != TEMP_VAL_REG) {
2615 /* When allocating a new register, make sure to not spill the
2616 input one. */
2617 tcg_regset_set_reg(allocated_regs, ts->reg);
2618 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2619 allocated_regs, ots->indirect_base);
2620 }
2621 tcg_out_mov(s, otype, ots->reg, ts->reg);
2622 }
2623 ots->val_type = TEMP_VAL_REG;
2624 ots->mem_coherent = 0;
2625 s->reg_to_temp[ots->reg] = ots;
2626 if (NEED_SYNC_ARG(0)) {
2627 temp_sync(s, ots, allocated_regs, 0);
2628 }
2629 }
2630 }
2631
2632 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
2633 {
2634 const TCGLifeData arg_life = op->life;
2635 const TCGOpDef * const def = &tcg_op_defs[op->opc];
2636 TCGRegSet i_allocated_regs;
2637 TCGRegSet o_allocated_regs;
2638 int i, k, nb_iargs, nb_oargs;
2639 TCGReg reg;
2640 TCGArg arg;
2641 const TCGArgConstraint *arg_ct;
2642 TCGTemp *ts;
2643 TCGArg new_args[TCG_MAX_OP_ARGS];
2644 int const_args[TCG_MAX_OP_ARGS];
2645
2646 nb_oargs = def->nb_oargs;
2647 nb_iargs = def->nb_iargs;
2648
2649 /* copy constants */
2650 memcpy(new_args + nb_oargs + nb_iargs,
2651 op->args + nb_oargs + nb_iargs,
2652 sizeof(TCGArg) * def->nb_cargs);
2653
2654 i_allocated_regs = s->reserved_regs;
2655 o_allocated_regs = s->reserved_regs;
2656
2657 /* satisfy input constraints */
2658 for (k = 0; k < nb_iargs; k++) {
2659 i = def->sorted_args[nb_oargs + k];
2660 arg = op->args[i];
2661 arg_ct = &def->args_ct[i];
2662 ts = arg_temp(arg);
2663
2664 if (ts->val_type == TEMP_VAL_CONST
2665 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2666 /* constant is OK for instruction */
2667 const_args[i] = 1;
2668 new_args[i] = ts->val;
2669 goto iarg_end;
2670 }
2671
2672 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
2673
2674 if (arg_ct->ct & TCG_CT_IALIAS) {
2675 if (ts->fixed_reg) {
2676 /* if fixed register, we must allocate a new register
2677 if the alias is not the same register */
2678 if (arg != op->args[arg_ct->alias_index])
2679 goto allocate_in_reg;
2680 } else {
2681 /* if the input is aliased to an output and if it is
2682 not dead after the instruction, we must allocate
2683 a new register and move it */
2684 if (!IS_DEAD_ARG(i)) {
2685 goto allocate_in_reg;
2686 }
2687 /* check if the current register has already been allocated
2688 for another input aliased to an output */
2689 int k2, i2;
2690 for (k2 = 0 ; k2 < k ; k2++) {
2691 i2 = def->sorted_args[nb_oargs + k2];
2692 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2693 (new_args[i2] == ts->reg)) {
2694 goto allocate_in_reg;
2695 }
2696 }
2697 }
2698 }
2699 reg = ts->reg;
2700 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2701 /* nothing to do : the constraint is satisfied */
2702 } else {
2703 allocate_in_reg:
2704 /* allocate a new register matching the constraint
2705 and move the temporary register into it */
2706 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
2707 ts->indirect_base);
2708 tcg_out_mov(s, ts->type, reg, ts->reg);
2709 }
2710 new_args[i] = reg;
2711 const_args[i] = 0;
2712 tcg_regset_set_reg(i_allocated_regs, reg);
2713 iarg_end: ;
2714 }
2715
2716 /* mark dead temporaries and free the associated registers */
2717 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2718 if (IS_DEAD_ARG(i)) {
2719 temp_dead(s, arg_temp(op->args[i]));
2720 }
2721 }
2722
2723 if (def->flags & TCG_OPF_BB_END) {
2724 tcg_reg_alloc_bb_end(s, i_allocated_regs);
2725 } else {
2726 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2727 /* XXX: permit generic clobber register list ? */
2728 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2729 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2730 tcg_reg_free(s, i, i_allocated_regs);
2731 }
2732 }
2733 }
2734 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2735 /* sync globals if the op has side effects and might trigger
2736 an exception. */
2737 sync_globals(s, i_allocated_regs);
2738 }
2739
2740 /* satisfy the output constraints */
2741 for(k = 0; k < nb_oargs; k++) {
2742 i = def->sorted_args[k];
2743 arg = op->args[i];
2744 arg_ct = &def->args_ct[i];
2745 ts = arg_temp(arg);
2746 if ((arg_ct->ct & TCG_CT_ALIAS)
2747 && !const_args[arg_ct->alias_index]) {
2748 reg = new_args[arg_ct->alias_index];
2749 } else if (arg_ct->ct & TCG_CT_NEWREG) {
2750 reg = tcg_reg_alloc(s, arg_ct->u.regs,
2751 i_allocated_regs | o_allocated_regs,
2752 ts->indirect_base);
2753 } else {
2754 /* if fixed register, we try to use it */
2755 reg = ts->reg;
2756 if (ts->fixed_reg &&
2757 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2758 goto oarg_end;
2759 }
2760 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
2761 ts->indirect_base);
2762 }
2763 tcg_regset_set_reg(o_allocated_regs, reg);
2764 /* if a fixed register is used, then a move will be done afterwards */
2765 if (!ts->fixed_reg) {
2766 if (ts->val_type == TEMP_VAL_REG) {
2767 s->reg_to_temp[ts->reg] = NULL;
2768 }
2769 ts->val_type = TEMP_VAL_REG;
2770 ts->reg = reg;
2771 /* temp value is modified, so the value kept in memory is
2772 potentially not the same */
2773 ts->mem_coherent = 0;
2774 s->reg_to_temp[reg] = ts;
2775 }
2776 oarg_end:
2777 new_args[i] = reg;
2778 }
2779 }
2780
2781 /* emit instruction */
2782 tcg_out_op(s, op->opc, new_args, const_args);
2783
2784 /* move the outputs in the correct register if needed */
2785 for(i = 0; i < nb_oargs; i++) {
2786 ts = arg_temp(op->args[i]);
2787 reg = new_args[i];
2788 if (ts->fixed_reg && ts->reg != reg) {
2789 tcg_out_mov(s, ts->type, ts->reg, reg);
2790 }
2791 if (NEED_SYNC_ARG(i)) {
2792 temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
2793 } else if (IS_DEAD_ARG(i)) {
2794 temp_dead(s, ts);
2795 }
2796 }
2797 }
2798
2799 #ifdef TCG_TARGET_STACK_GROWSUP
2800 #define STACK_DIR(x) (-(x))
2801 #else
2802 #define STACK_DIR(x) (x)
2803 #endif
2804
2805 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
2806 {
2807 const int nb_oargs = op->callo;
2808 const int nb_iargs = op->calli;
2809 const TCGLifeData arg_life = op->life;
2810 int flags, nb_regs, i;
2811 TCGReg reg;
2812 TCGArg arg;
2813 TCGTemp *ts;
2814 intptr_t stack_offset;
2815 size_t call_stack_size;
2816 tcg_insn_unit *func_addr;
2817 int allocate_args;
2818 TCGRegSet allocated_regs;
2819
2820 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
2821 flags = op->args[nb_oargs + nb_iargs + 1];
2822
2823 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2824 if (nb_regs > nb_iargs) {
2825 nb_regs = nb_iargs;
2826 }
2827
2828 /* assign stack slots first */
2829 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2830 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2831 ~(TCG_TARGET_STACK_ALIGN - 1);
2832 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2833 if (allocate_args) {
2834 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2835 preallocate call stack */
2836 tcg_abort();
2837 }
2838
2839 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2840 for (i = nb_regs; i < nb_iargs; i++) {
2841 arg = op->args[nb_oargs + i];
2842 #ifdef TCG_TARGET_STACK_GROWSUP
2843 stack_offset -= sizeof(tcg_target_long);
2844 #endif
2845 if (arg != TCG_CALL_DUMMY_ARG) {
2846 ts = arg_temp(arg);
2847 temp_load(s, ts, tcg_target_available_regs[ts->type],
2848 s->reserved_regs);
2849 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2850 }
2851 #ifndef TCG_TARGET_STACK_GROWSUP
2852 stack_offset += sizeof(tcg_target_long);
2853 #endif
2854 }
2855
2856 /* assign input registers */
2857 allocated_regs = s->reserved_regs;
2858 for (i = 0; i < nb_regs; i++) {
2859 arg = op->args[nb_oargs + i];
2860 if (arg != TCG_CALL_DUMMY_ARG) {
2861 ts = arg_temp(arg);
2862 reg = tcg_target_call_iarg_regs[i];
2863 tcg_reg_free(s, reg, allocated_regs);
2864
2865 if (ts->val_type == TEMP_VAL_REG) {
2866 if (ts->reg != reg) {
2867 tcg_out_mov(s, ts->type, reg, ts->reg);
2868 }
2869 } else {
2870 TCGRegSet arg_set = 0;
2871
2872 tcg_regset_set_reg(arg_set, reg);
2873 temp_load(s, ts, arg_set, allocated_regs);
2874 }
2875
2876 tcg_regset_set_reg(allocated_regs, reg);
2877 }
2878 }
2879
2880 /* mark dead temporaries and free the associated registers */
2881 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2882 if (IS_DEAD_ARG(i)) {
2883 temp_dead(s, arg_temp(op->args[i]));
2884 }
2885 }
2886
2887 /* clobber call registers */
2888 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2889 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2890 tcg_reg_free(s, i, allocated_regs);
2891 }
2892 }
2893
2894 /* Save globals if they might be written by the helper, sync them if
2895 they might be read. */
2896 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2897 /* Nothing to do */
2898 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2899 sync_globals(s, allocated_regs);
2900 } else {
2901 save_globals(s, allocated_regs);
2902 }
2903
2904 tcg_out_call(s, func_addr);
2905
2906 /* assign output registers and emit moves if needed */
2907 for(i = 0; i < nb_oargs; i++) {
2908 arg = op->args[i];
2909 ts = arg_temp(arg);
2910 reg = tcg_target_call_oarg_regs[i];
2911 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2912
2913 if (ts->fixed_reg) {
2914 if (ts->reg != reg) {
2915 tcg_out_mov(s, ts->type, ts->reg, reg);
2916 }
2917 } else {
2918 if (ts->val_type == TEMP_VAL_REG) {
2919 s->reg_to_temp[ts->reg] = NULL;
2920 }
2921 ts->val_type = TEMP_VAL_REG;
2922 ts->reg = reg;
2923 ts->mem_coherent = 0;
2924 s->reg_to_temp[reg] = ts;
2925 if (NEED_SYNC_ARG(i)) {
2926 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2927 } else if (IS_DEAD_ARG(i)) {
2928 temp_dead(s, ts);
2929 }
2930 }
2931 }
2932 }
2933
2934 #ifdef CONFIG_PROFILER
2935
2936 /* avoid copy/paste errors */
2937 #define PROF_ADD(to, from, field) \
2938 do { \
2939 (to)->field += atomic_read(&((from)->field)); \
2940 } while (0)
2941
2942 #define PROF_MAX(to, from, field) \
2943 do { \
2944 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
2945 if (val__ > (to)->field) { \
2946 (to)->field = val__; \
2947 } \
2948 } while (0)
2949
2950 /* Pass in a zero'ed @prof */
2951 static inline
2952 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
2953 {
2954 unsigned int i;
2955
2956 for (i = 0; i < n_tcg_ctxs; i++) {
2957 const TCGProfile *orig = &tcg_ctxs[i]->prof;
2958
2959 if (counters) {
2960 PROF_ADD(prof, orig, tb_count1);
2961 PROF_ADD(prof, orig, tb_count);
2962 PROF_ADD(prof, orig, op_count);
2963 PROF_MAX(prof, orig, op_count_max);
2964 PROF_ADD(prof, orig, temp_count);
2965 PROF_MAX(prof, orig, temp_count_max);
2966 PROF_ADD(prof, orig, del_op_count);
2967 PROF_ADD(prof, orig, code_in_len);
2968 PROF_ADD(prof, orig, code_out_len);
2969 PROF_ADD(prof, orig, search_out_len);
2970 PROF_ADD(prof, orig, interm_time);
2971 PROF_ADD(prof, orig, code_time);
2972 PROF_ADD(prof, orig, la_time);
2973 PROF_ADD(prof, orig, opt_time);
2974 PROF_ADD(prof, orig, restore_count);
2975 PROF_ADD(prof, orig, restore_time);
2976 }
2977 if (table) {
2978 int i;
2979
2980 for (i = 0; i < NB_OPS; i++) {
2981 PROF_ADD(prof, orig, table_op_count[i]);
2982 }
2983 }
2984 }
2985 }
2986
2987 #undef PROF_ADD
2988 #undef PROF_MAX
2989
2990 static void tcg_profile_snapshot_counters(TCGProfile *prof)
2991 {
2992 tcg_profile_snapshot(prof, true, false);
2993 }
2994
2995 static void tcg_profile_snapshot_table(TCGProfile *prof)
2996 {
2997 tcg_profile_snapshot(prof, false, true);
2998 }
2999
3000 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3001 {
3002 TCGProfile prof = {};
3003 int i;
3004
3005 tcg_profile_snapshot_table(&prof);
3006 for (i = 0; i < NB_OPS; i++) {
3007 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
3008 prof.table_op_count[i]);
3009 }
3010 }
3011 #else
3012 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3013 {
3014 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3015 }
3016 #endif
3017
3018
3019 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
3020 {
3021 #ifdef CONFIG_PROFILER
3022 TCGProfile *prof = &s->prof;
3023 #endif
3024 int i, oi, oi_next, num_insns;
3025
3026 #ifdef CONFIG_PROFILER
3027 {
3028 int n;
3029
3030 n = s->gen_op_buf[0].prev + 1;
3031 atomic_set(&prof->op_count, prof->op_count + n);
3032 if (n > prof->op_count_max) {
3033 atomic_set(&prof->op_count_max, n);
3034 }
3035
3036 n = s->nb_temps;
3037 atomic_set(&prof->temp_count, prof->temp_count + n);
3038 if (n > prof->temp_count_max) {
3039 atomic_set(&prof->temp_count_max, n);
3040 }
3041 }
3042 #endif
3043
3044 #ifdef DEBUG_DISAS
3045 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3046 && qemu_log_in_addr_range(tb->pc))) {
3047 qemu_log_lock();
3048 qemu_log("OP:\n");
3049 tcg_dump_ops(s);
3050 qemu_log("\n");
3051 qemu_log_unlock();
3052 }
3053 #endif
3054
3055 #ifdef CONFIG_PROFILER
3056 atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
3057 #endif
3058
3059 #ifdef USE_TCG_OPTIMIZATIONS
3060 tcg_optimize(s);
3061 #endif
3062
3063 #ifdef CONFIG_PROFILER
3064 atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3065 atomic_set(&prof->la_time, prof->la_time - profile_getclock());
3066 #endif
3067
3068 liveness_pass_1(s);
3069
3070 if (s->nb_indirects > 0) {
3071 #ifdef DEBUG_DISAS
3072 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3073 && qemu_log_in_addr_range(tb->pc))) {
3074 qemu_log_lock();
3075 qemu_log("OP before indirect lowering:\n");
3076 tcg_dump_ops(s);
3077 qemu_log("\n");
3078 qemu_log_unlock();
3079 }
3080 #endif
3081 /* Replace indirect temps with direct temps. */
3082 if (liveness_pass_2(s)) {
3083 /* If changes were made, re-run liveness. */
3084 liveness_pass_1(s);
3085 }
3086 }
3087
3088 #ifdef CONFIG_PROFILER
3089 atomic_set(&prof->la_time, prof->la_time + profile_getclock());
3090 #endif
3091
3092 #ifdef DEBUG_DISAS
3093 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3094 && qemu_log_in_addr_range(tb->pc))) {
3095 qemu_log_lock();
3096 qemu_log("OP after optimization and liveness analysis:\n");
3097 tcg_dump_ops(s);
3098 qemu_log("\n");
3099 qemu_log_unlock();
3100 }
3101 #endif
3102
3103 tcg_reg_alloc_start(s);
3104
3105 s->code_buf = tb->tc.ptr;
3106 s->code_ptr = tb->tc.ptr;
3107
3108 #ifdef TCG_TARGET_NEED_LDST_LABELS
3109 s->ldst_labels = NULL;
3110 #endif
3111 #ifdef TCG_TARGET_NEED_POOL_LABELS
3112 s->pool_labels = NULL;
3113 #endif
3114
3115 num_insns = -1;
3116 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
3117 TCGOp * const op = &s->gen_op_buf[oi];
3118 TCGOpcode opc = op->opc;
3119
3120 oi_next = op->next;
3121 #ifdef CONFIG_PROFILER
3122 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
3123 #endif
3124
3125 switch (opc) {
3126 case INDEX_op_mov_i32:
3127 case INDEX_op_mov_i64:
3128 tcg_reg_alloc_mov(s, op);
3129 break;
3130 case INDEX_op_movi_i32:
3131 case INDEX_op_movi_i64:
3132 tcg_reg_alloc_movi(s, op);
3133 break;
3134 case INDEX_op_insn_start:
3135 if (num_insns >= 0) {
3136 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3137 }
3138 num_insns++;
3139 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3140 target_ulong a;
3141 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3142 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
3143 #else
3144 a = op->args[i];
3145 #endif
3146 s->gen_insn_data[num_insns][i] = a;
3147 }
3148 break;
3149 case INDEX_op_discard:
3150 temp_dead(s, arg_temp(op->args[0]));
3151 break;
3152 case INDEX_op_set_label:
3153 tcg_reg_alloc_bb_end(s, s->reserved_regs);
3154 tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
3155 break;
3156 case INDEX_op_call:
3157 tcg_reg_alloc_call(s, op);
3158 break;
3159 default:
3160 /* Sanity check that we've not introduced any unhandled opcodes. */
3161 tcg_debug_assert(tcg_op_supported(opc));
3162 /* Note: in order to speed up the code, it would be much
3163 faster to have specialized register allocator functions for
3164 some common argument patterns */
3165 tcg_reg_alloc_op(s, op);
3166 break;
3167 }
3168 #ifdef CONFIG_DEBUG_TCG
3169 check_regs(s);
3170 #endif
3171 /* Test for (pending) buffer overflow. The assumption is that any
3172 one operation beginning below the high water mark cannot overrun
3173 the buffer completely. Thus we can test for overflow after
3174 generating code without having to check during generation. */
3175 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
3176 return -1;
3177 }
3178 }
3179 tcg_debug_assert(num_insns >= 0);
3180 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3181
3182 /* Generate TB finalization at the end of block */
3183 #ifdef TCG_TARGET_NEED_LDST_LABELS
3184 if (!tcg_out_ldst_finalize(s)) {
3185 return -1;
3186 }
3187 #endif
3188 #ifdef TCG_TARGET_NEED_POOL_LABELS
3189 if (!tcg_out_pool_finalize(s)) {
3190 return -1;
3191 }
3192 #endif
3193
3194 /* flush instruction cache */
3195 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
3196
3197 return tcg_current_code_size(s);
3198 }
3199
3200 #ifdef CONFIG_PROFILER
3201 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
3202 {
3203 TCGProfile prof = {};
3204 const TCGProfile *s;
3205 int64_t tb_count;
3206 int64_t tb_div_count;
3207 int64_t tot;
3208
3209 tcg_profile_snapshot_counters(&prof);
3210 s = &prof;
3211 tb_count = s->tb_count;
3212 tb_div_count = tb_count ? tb_count : 1;
3213 tot = s->interm_time + s->code_time;
3214
3215 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3216 tot, tot / 2.4e9);
3217 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3218 tb_count, s->tb_count1 - tb_count,
3219 (double)(s->tb_count1 - s->tb_count)
3220 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
3221 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3222 (double)s->op_count / tb_div_count, s->op_count_max);
3223 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3224 (double)s->del_op_count / tb_div_count);
3225 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
3226 (double)s->temp_count / tb_div_count, s->temp_count_max);
3227 cpu_fprintf(f, "avg host code/TB %0.1f\n",
3228 (double)s->code_out_len / tb_div_count);
3229 cpu_fprintf(f, "avg search data/TB %0.1f\n",
3230 (double)s->search_out_len / tb_div_count);
3231
3232 cpu_fprintf(f, "cycles/op %0.1f\n",
3233 s->op_count ? (double)tot / s->op_count : 0);
3234 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3235 s->code_in_len ? (double)tot / s->code_in_len : 0);
3236 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3237 s->code_out_len ? (double)tot / s->code_out_len : 0);
3238 cpu_fprintf(f, "cycles/search byte %0.1f\n",
3239 s->search_out_len ? (double)tot / s->search_out_len : 0);
3240 if (tot == 0) {
3241 tot = 1;
3242 }
3243 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3244 (double)s->interm_time / tot * 100.0);
3245 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3246 (double)s->code_time / tot * 100.0);
3247 cpu_fprintf(f, "optim./code time %0.1f%%\n",
3248 (double)s->opt_time / (s->code_time ? s->code_time : 1)
3249 * 100.0);
3250 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
3251 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
3252 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3253 s->restore_count);
3254 cpu_fprintf(f, " avg cycles %0.1f\n",
3255 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
3256 }
3257 #else
3258 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
3259 {
3260 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3261 }
3262 #endif
3263
3264 #ifdef ELF_HOST_MACHINE
3265 /* In order to use this feature, the backend needs to do three things:
3266
3267 (1) Define ELF_HOST_MACHINE to indicate both what value to
3268 put into the ELF image and to indicate support for the feature.
3269
3270 (2) Define tcg_register_jit. This should create a buffer containing
3271 the contents of a .debug_frame section that describes the post-
3272 prologue unwind info for the tcg machine.
3273
3274 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3275 */
3276
3277 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3278 typedef enum {
3279 JIT_NOACTION = 0,
3280 JIT_REGISTER_FN,
3281 JIT_UNREGISTER_FN
3282 } jit_actions_t;
3283
3284 struct jit_code_entry {
3285 struct jit_code_entry *next_entry;
3286 struct jit_code_entry *prev_entry;
3287 const void *symfile_addr;
3288 uint64_t symfile_size;
3289 };
3290
3291 struct jit_descriptor {
3292 uint32_t version;
3293 uint32_t action_flag;
3294 struct jit_code_entry *relevant_entry;
3295 struct jit_code_entry *first_entry;
3296 };
3297
3298 void __jit_debug_register_code(void) __attribute__((noinline));
3299 void __jit_debug_register_code(void)
3300 {
3301 asm("");
3302 }
3303
3304 /* Must statically initialize the version, because GDB may check
3305 the version before we can set it. */
3306 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3307
3308 /* End GDB interface. */
3309
3310 static int find_string(const char *strtab, const char *str)
3311 {
3312 const char *p = strtab + 1;
3313
3314 while (1) {
3315 if (strcmp(p, str) == 0) {
3316 return p - strtab;
3317 }
3318 p += strlen(p) + 1;
3319 }
3320 }
3321
3322 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
3323 const void *debug_frame,
3324 size_t debug_frame_size)
3325 {
3326 struct __attribute__((packed)) DebugInfo {
3327 uint32_t len;
3328 uint16_t version;
3329 uint32_t abbrev;
3330 uint8_t ptr_size;
3331 uint8_t cu_die;
3332 uint16_t cu_lang;
3333 uintptr_t cu_low_pc;
3334 uintptr_t cu_high_pc;
3335 uint8_t fn_die;
3336 char fn_name[16];
3337 uintptr_t fn_low_pc;
3338 uintptr_t fn_high_pc;
3339 uint8_t cu_eoc;
3340 };
3341
3342 struct ElfImage {
3343 ElfW(Ehdr) ehdr;
3344 ElfW(Phdr) phdr;
3345 ElfW(Shdr) shdr[7];
3346 ElfW(Sym) sym[2];
3347 struct DebugInfo di;
3348 uint8_t da[24];
3349 char str[80];
3350 };
3351
3352 struct ElfImage *img;
3353
3354 static const struct ElfImage img_template = {
3355 .ehdr = {
3356 .e_ident[EI_MAG0] = ELFMAG0,
3357 .e_ident[EI_MAG1] = ELFMAG1,
3358 .e_ident[EI_MAG2] = ELFMAG2,
3359 .e_ident[EI_MAG3] = ELFMAG3,
3360 .e_ident[EI_CLASS] = ELF_CLASS,
3361 .e_ident[EI_DATA] = ELF_DATA,
3362 .e_ident[EI_VERSION] = EV_CURRENT,
3363 .e_type = ET_EXEC,
3364 .e_machine = ELF_HOST_MACHINE,
3365 .e_version = EV_CURRENT,
3366 .e_phoff = offsetof(struct ElfImage, phdr),
3367 .e_shoff = offsetof(struct ElfImage, shdr),
3368 .e_ehsize = sizeof(ElfW(Shdr)),
3369 .e_phentsize = sizeof(ElfW(Phdr)),
3370 .e_phnum = 1,
3371 .e_shentsize = sizeof(ElfW(Shdr)),
3372 .e_shnum = ARRAY_SIZE(img->shdr),
3373 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
3374 #ifdef ELF_HOST_FLAGS
3375 .e_flags = ELF_HOST_FLAGS,
3376 #endif
3377 #ifdef ELF_OSABI
3378 .e_ident[EI_OSABI] = ELF_OSABI,
3379 #endif
3380 },
3381 .phdr = {
3382 .p_type = PT_LOAD,
3383 .p_flags = PF_X,
3384 },
3385 .shdr = {
3386 [0] = { .sh_type = SHT_NULL },
3387 /* Trick: The contents of code_gen_buffer are not present in
3388 this fake ELF file; that got allocated elsewhere. Therefore
3389 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3390 will not look for contents. We can record any address. */
3391 [1] = { /* .text */
3392 .sh_type = SHT_NOBITS,
3393 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3394 },
3395 [2] = { /* .debug_info */
3396 .sh_type = SHT_PROGBITS,
3397 .sh_offset = offsetof(struct ElfImage, di),
3398 .sh_size = sizeof(struct DebugInfo),
3399 },
3400 [3] = { /* .debug_abbrev */
3401 .sh_type = SHT_PROGBITS,
3402 .sh_offset = offsetof(struct ElfImage, da),
3403 .sh_size = sizeof(img->da),
3404 },
3405 [4] = { /* .debug_frame */
3406 .sh_type = SHT_PROGBITS,
3407 .sh_offset = sizeof(struct ElfImage),
3408 },
3409 [5] = { /* .symtab */
3410 .sh_type = SHT_SYMTAB,
3411 .sh_offset = offsetof(struct ElfImage, sym),
3412 .sh_size = sizeof(img->sym),
3413 .sh_info = 1,
3414 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3415 .sh_entsize = sizeof(ElfW(Sym)),
3416 },
3417 [6] = { /* .strtab */
3418 .sh_type = SHT_STRTAB,
3419 .sh_offset = offsetof(struct ElfImage, str),
3420 .sh_size = sizeof(img->str),
3421 }
3422 },
3423 .sym = {
3424 [1] = { /* code_gen_buffer */
3425 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3426 .st_shndx = 1,
3427 }
3428 },
3429 .di = {
3430 .len = sizeof(struct DebugInfo) - 4,
3431 .version = 2,
3432 .ptr_size = sizeof(void *),
3433 .cu_die = 1,
3434 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
3435 .fn_die = 2,
3436 .fn_name = "code_gen_buffer"
3437 },
3438 .da = {
3439 1, /* abbrev number (the cu) */
3440 0x11, 1, /* DW_TAG_compile_unit, has children */
3441 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3442 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3443 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3444 0, 0, /* end of abbrev */
3445 2, /* abbrev number (the fn) */
3446 0x2e, 0, /* DW_TAG_subprogram, no children */
3447 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3448 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3449 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3450 0, 0, /* end of abbrev */
3451 0 /* no more abbrev */
3452 },
3453 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3454 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
3455 };
3456
3457 /* We only need a single jit entry; statically allocate it. */
3458 static struct jit_code_entry one_entry;
3459
3460 uintptr_t buf = (uintptr_t)buf_ptr;
3461 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
3462 DebugFrameHeader *dfh;
3463
3464 img = g_malloc(img_size);
3465 *img = img_template;
3466
3467 img->phdr.p_vaddr = buf;
3468 img->phdr.p_paddr = buf;
3469 img->phdr.p_memsz = buf_size;
3470
3471 img->shdr[1].sh_name = find_string(img->str, ".text");
3472 img->shdr[1].sh_addr = buf;
3473 img->shdr[1].sh_size = buf_size;
3474
3475 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3476 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3477
3478 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3479 img->shdr[4].sh_size = debug_frame_size;
3480
3481 img->shdr[5].sh_name = find_string(img->str, ".symtab");
3482 img->shdr[6].sh_name = find_string(img->str, ".strtab");
3483
3484 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3485 img->sym[1].st_value = buf;
3486 img->sym[1].st_size = buf_size;
3487
3488 img->di.cu_low_pc = buf;
3489 img->di.cu_high_pc = buf + buf_size;
3490 img->di.fn_low_pc = buf;
3491 img->di.fn_high_pc = buf + buf_size;
3492
3493 dfh = (DebugFrameHeader *)(img + 1);
3494 memcpy(dfh, debug_frame, debug_frame_size);
3495 dfh->fde.func_start = buf;
3496 dfh->fde.func_len = buf_size;
3497
3498 #ifdef DEBUG_JIT
3499 /* Enable this block to be able to debug the ELF image file creation.
3500 One can use readelf, objdump, or other inspection utilities. */
3501 {
3502 FILE *f = fopen("/tmp/qemu.jit", "w+b");
3503 if (f) {
3504 if (fwrite(img, img_size, 1, f) != img_size) {
3505 /* Avoid stupid unused return value warning for fwrite. */
3506 }
3507 fclose(f);
3508 }
3509 }
3510 #endif
3511
3512 one_entry.symfile_addr = img;
3513 one_entry.symfile_size = img_size;
3514
3515 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3516 __jit_debug_descriptor.relevant_entry = &one_entry;
3517 __jit_debug_descriptor.first_entry = &one_entry;
3518 __jit_debug_register_code();
3519 }
3520 #else
3521 /* No support for the feature. Provide the entry point expected by exec.c,
3522 and implement the internal function we declared earlier. */
3523
3524 static void tcg_register_jit_int(void *buf, size_t size,
3525 const void *debug_frame,
3526 size_t debug_frame_size)
3527 {
3528 }
3529
3530 void tcg_register_jit(void *buf, size_t buf_size)
3531 {
3532 }
3533 #endif /* ELF_HOST_MACHINE */