]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg.c
tcg: remove tb_lock
[mirror_qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
f348b6d1 33#include "qemu/cutils.h"
1de7afc9
PB
34#include "qemu/host-utils.h"
35#include "qemu/timer.h"
c896fe29 36
c5d3c498 37/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40#define NO_CPU_IO_DEFS
41#include "cpu.h"
c896fe29 42
63c91552
PB
43#include "exec/cpu-common.h"
44#include "exec/exec-all.h"
45
c896fe29 46#include "tcg-op.h"
813da627 47
edee2579 48#if UINTPTR_MAX == UINT32_MAX
813da627 49# define ELF_CLASS ELFCLASS32
edee2579
RH
50#else
51# define ELF_CLASS ELFCLASS64
813da627
RH
52#endif
53#ifdef HOST_WORDS_BIGENDIAN
54# define ELF_DATA ELFDATA2MSB
55#else
56# define ELF_DATA ELFDATA2LSB
57#endif
58
c896fe29 59#include "elf.h"
508127e2 60#include "exec/log.h"
3468b59e 61#include "sysemu/sysemu.h"
c896fe29 62
ce151109
PM
63/* Forward declarations for functions declared in tcg-target.inc.c and
64 used here. */
e4d58b41 65static void tcg_target_init(TCGContext *s);
f69d277e 66static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
e4d58b41 67static void tcg_target_qemu_prologue(TCGContext *s);
1813e175 68static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 69 intptr_t value, intptr_t addend);
c896fe29 70
497a22eb
RH
71/* The CIE and FDE header definitions will be common to all hosts. */
72typedef struct {
73 uint32_t len __attribute__((aligned((sizeof(void *)))));
74 uint32_t id;
75 uint8_t version;
76 char augmentation[1];
77 uint8_t code_align;
78 uint8_t data_align;
79 uint8_t return_column;
80} DebugFrameCIE;
81
82typedef struct QEMU_PACKED {
83 uint32_t len __attribute__((aligned((sizeof(void *)))));
84 uint32_t cie_offset;
edee2579
RH
85 uintptr_t func_start;
86 uintptr_t func_len;
497a22eb
RH
87} DebugFrameFDEHeader;
88
2c90784a
RH
89typedef struct QEMU_PACKED {
90 DebugFrameCIE cie;
91 DebugFrameFDEHeader fde;
92} DebugFrameHeader;
93
813da627 94static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
95 const void *debug_frame,
96 size_t debug_frame_size)
813da627
RH
97 __attribute__((unused));
98
ce151109 99/* Forward declarations for functions declared and used in tcg-target.inc.c. */
069ea736
RH
100static const char *target_parse_constraint(TCGArgConstraint *ct,
101 const char *ct_str, TCGType type);
2a534aff 102static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 103 intptr_t arg2);
2a534aff 104static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 105static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 106 TCGReg ret, tcg_target_long arg);
c0ad3001
SW
107static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
108 const int *const_args);
d2fd745f
RH
109#if TCG_TARGET_MAYBE_vec
110static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
111 unsigned vece, const TCGArg *args,
112 const int *const_args);
113#else
114static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
115 unsigned vece, const TCGArg *args,
116 const int *const_args)
117{
118 g_assert_not_reached();
119}
120#endif
2a534aff 121static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 122 intptr_t arg2);
59d7c14e
RH
123static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
124 TCGReg base, intptr_t ofs);
cf066674 125static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
f6c6afc1 126static int tcg_target_const_match(tcg_target_long val, TCGType type,
c0ad3001 127 const TCGArgConstraint *arg_ct);
659ef5cb
RH
128#ifdef TCG_TARGET_NEED_LDST_LABELS
129static bool tcg_out_ldst_finalize(TCGContext *s);
130#endif
c896fe29 131
a505785c
EC
132#define TCG_HIGHWATER 1024
133
df2cce29
EC
134static TCGContext **tcg_ctxs;
135static unsigned int n_tcg_ctxs;
1c2adb95 136TCGv_env cpu_env = 0;
df2cce29 137
be2cdc5e
EC
138struct tcg_region_tree {
139 QemuMutex lock;
140 GTree *tree;
141 /* padding to avoid false sharing is computed at run-time */
142};
143
e8feb96f
EC
144/*
145 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
146 * dynamically allocate from as demand dictates. Given appropriate region
147 * sizing, this minimizes flushes even when some TCG threads generate a lot
148 * more code than others.
149 */
150struct tcg_region_state {
151 QemuMutex lock;
152
153 /* fields set at init time */
154 void *start;
155 void *start_aligned;
156 void *end;
157 size_t n;
158 size_t size; /* size of one region */
159 size_t stride; /* .size + guard size */
160
161 /* fields protected by the lock */
162 size_t current; /* current region index */
163 size_t agg_size_full; /* aggregate size of full regions */
164};
165
166static struct tcg_region_state region;
be2cdc5e
EC
167/*
168 * This is an array of struct tcg_region_tree's, with padding.
169 * We use void * to simplify the computation of region_trees[i]; each
170 * struct is found every tree_size bytes.
171 */
172static void *region_trees;
173static size_t tree_size;
d2fd745f 174static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 175static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 176
1813e175 177#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 178static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
179{
180 *s->code_ptr++ = v;
181}
182
4196dca6
PM
183static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
184 uint8_t v)
5c53bb81 185{
1813e175 186 *p = v;
5c53bb81 187}
1813e175 188#endif
5c53bb81 189
1813e175 190#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 191static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 192{
1813e175
RH
193 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
194 *s->code_ptr++ = v;
195 } else {
196 tcg_insn_unit *p = s->code_ptr;
197 memcpy(p, &v, sizeof(v));
198 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
199 }
c896fe29
FB
200}
201
4196dca6
PM
202static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
203 uint16_t v)
5c53bb81 204{
1813e175
RH
205 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
206 *p = v;
207 } else {
208 memcpy(p, &v, sizeof(v));
209 }
5c53bb81 210}
1813e175 211#endif
5c53bb81 212
1813e175 213#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 214static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 215{
1813e175
RH
216 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
217 *s->code_ptr++ = v;
218 } else {
219 tcg_insn_unit *p = s->code_ptr;
220 memcpy(p, &v, sizeof(v));
221 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
222 }
c896fe29
FB
223}
224
4196dca6
PM
225static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
226 uint32_t v)
5c53bb81 227{
1813e175
RH
228 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
229 *p = v;
230 } else {
231 memcpy(p, &v, sizeof(v));
232 }
5c53bb81 233}
1813e175 234#endif
5c53bb81 235
1813e175 236#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 237static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 238{
1813e175
RH
239 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
240 *s->code_ptr++ = v;
241 } else {
242 tcg_insn_unit *p = s->code_ptr;
243 memcpy(p, &v, sizeof(v));
244 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
245 }
ac26eb69
RH
246}
247
4196dca6
PM
248static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
249 uint64_t v)
5c53bb81 250{
1813e175
RH
251 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
252 *p = v;
253 } else {
254 memcpy(p, &v, sizeof(v));
255 }
5c53bb81 256}
1813e175 257#endif
5c53bb81 258
c896fe29
FB
259/* label relocation processing */
260
1813e175 261static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 262 TCGLabel *l, intptr_t addend)
c896fe29 263{
c896fe29
FB
264 TCGRelocation *r;
265
c896fe29 266 if (l->has_value) {
623e265c
PB
267 /* FIXME: This may break relocations on RISC targets that
268 modify instruction fields in place. The caller may not have
269 written the initial value. */
f54b3f92 270 patch_reloc(code_ptr, type, l->u.value, addend);
c896fe29
FB
271 } else {
272 /* add a new relocation entry */
273 r = tcg_malloc(sizeof(TCGRelocation));
274 r->type = type;
275 r->ptr = code_ptr;
276 r->addend = addend;
277 r->next = l->u.first_reloc;
278 l->u.first_reloc = r;
279 }
280}
281
bec16311 282static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
c896fe29 283{
2ba7fae2 284 intptr_t value = (intptr_t)ptr;
1813e175 285 TCGRelocation *r;
c896fe29 286
eabb7b91 287 tcg_debug_assert(!l->has_value);
1813e175
RH
288
289 for (r = l->u.first_reloc; r != NULL; r = r->next) {
f54b3f92 290 patch_reloc(r->ptr, r->type, value, r->addend);
c896fe29 291 }
1813e175 292
c896fe29 293 l->has_value = 1;
1813e175 294 l->u.value_ptr = ptr;
c896fe29
FB
295}
296
42a268c2 297TCGLabel *gen_new_label(void)
c896fe29 298{
b1311c4a 299 TCGContext *s = tcg_ctx;
51e3972c 300 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 301
51e3972c
RH
302 *l = (TCGLabel){
303 .id = s->nb_labels++
304 };
42a268c2
RH
305
306 return l;
c896fe29
FB
307}
308
ce151109 309#include "tcg-target.inc.c"
c896fe29 310
be2cdc5e
EC
311/* compare a pointer @ptr and a tb_tc @s */
312static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
313{
314 if (ptr >= s->ptr + s->size) {
315 return 1;
316 } else if (ptr < s->ptr) {
317 return -1;
318 }
319 return 0;
320}
321
322static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
323{
324 const struct tb_tc *a = ap;
325 const struct tb_tc *b = bp;
326
327 /*
328 * When both sizes are set, we know this isn't a lookup.
329 * This is the most likely case: every TB must be inserted; lookups
330 * are a lot less frequent.
331 */
332 if (likely(a->size && b->size)) {
333 if (a->ptr > b->ptr) {
334 return 1;
335 } else if (a->ptr < b->ptr) {
336 return -1;
337 }
338 /* a->ptr == b->ptr should happen only on deletions */
339 g_assert(a->size == b->size);
340 return 0;
341 }
342 /*
343 * All lookups have either .size field set to 0.
344 * From the glib sources we see that @ap is always the lookup key. However
345 * the docs provide no guarantee, so we just mark this case as likely.
346 */
347 if (likely(a->size == 0)) {
348 return ptr_cmp_tb_tc(a->ptr, b);
349 }
350 return ptr_cmp_tb_tc(b->ptr, a);
351}
352
353static void tcg_region_trees_init(void)
354{
355 size_t i;
356
357 tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
358 region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
359 for (i = 0; i < region.n; i++) {
360 struct tcg_region_tree *rt = region_trees + i * tree_size;
361
362 qemu_mutex_init(&rt->lock);
363 rt->tree = g_tree_new(tb_tc_cmp);
364 }
365}
366
367static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
368{
369 size_t region_idx;
370
371 if (p < region.start_aligned) {
372 region_idx = 0;
373 } else {
374 ptrdiff_t offset = p - region.start_aligned;
375
376 if (offset > region.stride * (region.n - 1)) {
377 region_idx = region.n - 1;
378 } else {
379 region_idx = offset / region.stride;
380 }
381 }
382 return region_trees + region_idx * tree_size;
383}
384
385void tcg_tb_insert(TranslationBlock *tb)
386{
387 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
388
389 qemu_mutex_lock(&rt->lock);
390 g_tree_insert(rt->tree, &tb->tc, tb);
391 qemu_mutex_unlock(&rt->lock);
392}
393
394void tcg_tb_remove(TranslationBlock *tb)
395{
396 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
397
398 qemu_mutex_lock(&rt->lock);
399 g_tree_remove(rt->tree, &tb->tc);
400 qemu_mutex_unlock(&rt->lock);
401}
402
403/*
404 * Find the TB 'tb' such that
405 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
406 * Return NULL if not found.
407 */
408TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
409{
410 struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
411 TranslationBlock *tb;
412 struct tb_tc s = { .ptr = (void *)tc_ptr };
413
414 qemu_mutex_lock(&rt->lock);
415 tb = g_tree_lookup(rt->tree, &s);
416 qemu_mutex_unlock(&rt->lock);
417 return tb;
418}
419
420static void tcg_region_tree_lock_all(void)
421{
422 size_t i;
423
424 for (i = 0; i < region.n; i++) {
425 struct tcg_region_tree *rt = region_trees + i * tree_size;
426
427 qemu_mutex_lock(&rt->lock);
428 }
429}
430
431static void tcg_region_tree_unlock_all(void)
432{
433 size_t i;
434
435 for (i = 0; i < region.n; i++) {
436 struct tcg_region_tree *rt = region_trees + i * tree_size;
437
438 qemu_mutex_unlock(&rt->lock);
439 }
440}
441
442void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
443{
444 size_t i;
445
446 tcg_region_tree_lock_all();
447 for (i = 0; i < region.n; i++) {
448 struct tcg_region_tree *rt = region_trees + i * tree_size;
449
450 g_tree_foreach(rt->tree, func, user_data);
451 }
452 tcg_region_tree_unlock_all();
453}
454
455size_t tcg_nb_tbs(void)
456{
457 size_t nb_tbs = 0;
458 size_t i;
459
460 tcg_region_tree_lock_all();
461 for (i = 0; i < region.n; i++) {
462 struct tcg_region_tree *rt = region_trees + i * tree_size;
463
464 nb_tbs += g_tree_nnodes(rt->tree);
465 }
466 tcg_region_tree_unlock_all();
467 return nb_tbs;
468}
469
470static void tcg_region_tree_reset_all(void)
471{
472 size_t i;
473
474 tcg_region_tree_lock_all();
475 for (i = 0; i < region.n; i++) {
476 struct tcg_region_tree *rt = region_trees + i * tree_size;
477
478 /* Increment the refcount first so that destroy acts as a reset */
479 g_tree_ref(rt->tree);
480 g_tree_destroy(rt->tree);
481 }
482 tcg_region_tree_unlock_all();
483}
484
e8feb96f
EC
485static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
486{
487 void *start, *end;
488
489 start = region.start_aligned + curr_region * region.stride;
490 end = start + region.size;
491
492 if (curr_region == 0) {
493 start = region.start;
494 }
495 if (curr_region == region.n - 1) {
496 end = region.end;
497 }
498
499 *pstart = start;
500 *pend = end;
501}
502
503static void tcg_region_assign(TCGContext *s, size_t curr_region)
504{
505 void *start, *end;
506
507 tcg_region_bounds(curr_region, &start, &end);
508
509 s->code_gen_buffer = start;
510 s->code_gen_ptr = start;
511 s->code_gen_buffer_size = end - start;
512 s->code_gen_highwater = end - TCG_HIGHWATER;
513}
514
515static bool tcg_region_alloc__locked(TCGContext *s)
516{
517 if (region.current == region.n) {
518 return true;
519 }
520 tcg_region_assign(s, region.current);
521 region.current++;
522 return false;
523}
524
525/*
526 * Request a new region once the one in use has filled up.
527 * Returns true on error.
528 */
529static bool tcg_region_alloc(TCGContext *s)
530{
531 bool err;
532 /* read the region size now; alloc__locked will overwrite it on success */
533 size_t size_full = s->code_gen_buffer_size;
534
535 qemu_mutex_lock(&region.lock);
536 err = tcg_region_alloc__locked(s);
537 if (!err) {
538 region.agg_size_full += size_full - TCG_HIGHWATER;
539 }
540 qemu_mutex_unlock(&region.lock);
541 return err;
542}
543
544/*
545 * Perform a context's first region allocation.
546 * This function does _not_ increment region.agg_size_full.
547 */
548static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
549{
550 return tcg_region_alloc__locked(s);
551}
552
553/* Call from a safe-work context */
554void tcg_region_reset_all(void)
555{
3468b59e 556 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
557 unsigned int i;
558
559 qemu_mutex_lock(&region.lock);
560 region.current = 0;
561 region.agg_size_full = 0;
562
3468b59e
EC
563 for (i = 0; i < n_ctxs; i++) {
564 TCGContext *s = atomic_read(&tcg_ctxs[i]);
565 bool err = tcg_region_initial_alloc__locked(s);
e8feb96f
EC
566
567 g_assert(!err);
568 }
569 qemu_mutex_unlock(&region.lock);
be2cdc5e
EC
570
571 tcg_region_tree_reset_all();
e8feb96f
EC
572}
573
3468b59e
EC
574#ifdef CONFIG_USER_ONLY
575static size_t tcg_n_regions(void)
576{
577 return 1;
578}
579#else
580/*
581 * It is likely that some vCPUs will translate more code than others, so we
582 * first try to set more regions than max_cpus, with those regions being of
583 * reasonable size. If that's not possible we make do by evenly dividing
584 * the code_gen_buffer among the vCPUs.
585 */
586static size_t tcg_n_regions(void)
587{
588 size_t i;
589
590 /* Use a single region if all we have is one vCPU thread */
591 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
592 return 1;
593 }
594
595 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
596 for (i = 8; i > 0; i--) {
597 size_t regions_per_thread = i;
598 size_t region_size;
599
600 region_size = tcg_init_ctx.code_gen_buffer_size;
601 region_size /= max_cpus * regions_per_thread;
602
603 if (region_size >= 2 * 1024u * 1024) {
604 return max_cpus * regions_per_thread;
605 }
606 }
607 /* If we can't, then just allocate one region per vCPU thread */
608 return max_cpus;
609}
610#endif
611
e8feb96f
EC
612/*
613 * Initializes region partitioning.
614 *
615 * Called at init time from the parent thread (i.e. the one calling
616 * tcg_context_init), after the target's TCG globals have been set.
3468b59e
EC
617 *
618 * Region partitioning works by splitting code_gen_buffer into separate regions,
619 * and then assigning regions to TCG threads so that the threads can translate
620 * code in parallel without synchronization.
621 *
622 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
623 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
624 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
625 * must have been parsed before calling this function, since it calls
626 * qemu_tcg_mttcg_enabled().
627 *
628 * In user-mode we use a single region. Having multiple regions in user-mode
629 * is not supported, because the number of vCPU threads (recall that each thread
630 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
631 * OS, and usually this number is huge (tens of thousands is not uncommon).
632 * Thus, given this large bound on the number of vCPU threads and the fact
633 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
634 * that the availability of at least one region per vCPU thread.
635 *
636 * However, this user-mode limitation is unlikely to be a significant problem
637 * in practice. Multi-threaded guests share most if not all of their translated
638 * code, which makes parallel code generation less appealing than in softmmu.
e8feb96f
EC
639 */
640void tcg_region_init(void)
641{
642 void *buf = tcg_init_ctx.code_gen_buffer;
643 void *aligned;
644 size_t size = tcg_init_ctx.code_gen_buffer_size;
645 size_t page_size = qemu_real_host_page_size;
646 size_t region_size;
647 size_t n_regions;
648 size_t i;
649
3468b59e 650 n_regions = tcg_n_regions();
e8feb96f
EC
651
652 /* The first region will be 'aligned - buf' bytes larger than the others */
653 aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
654 g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
655 /*
656 * Make region_size a multiple of page_size, using aligned as the start.
657 * As a result of this we might end up with a few extra pages at the end of
658 * the buffer; we will assign those to the last region.
659 */
660 region_size = (size - (aligned - buf)) / n_regions;
661 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
662
663 /* A region must have at least 2 pages; one code, one guard */
664 g_assert(region_size >= 2 * page_size);
665
666 /* init the region struct */
667 qemu_mutex_init(&region.lock);
668 region.n = n_regions;
669 region.size = region_size - page_size;
670 region.stride = region_size;
671 region.start = buf;
672 region.start_aligned = aligned;
673 /* page-align the end, since its last page will be a guard page */
674 region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
675 /* account for that last guard page */
676 region.end -= page_size;
677
678 /* set guard pages */
679 for (i = 0; i < region.n; i++) {
680 void *start, *end;
681 int rc;
682
683 tcg_region_bounds(i, &start, &end);
684 rc = qemu_mprotect_none(end, page_size);
685 g_assert(!rc);
686 }
687
be2cdc5e
EC
688 tcg_region_trees_init();
689
3468b59e
EC
690 /* In user-mode we support only one ctx, so do the initial allocation now */
691#ifdef CONFIG_USER_ONLY
e8feb96f
EC
692 {
693 bool err = tcg_region_initial_alloc__locked(tcg_ctx);
694
695 g_assert(!err);
696 }
3468b59e
EC
697#endif
698}
699
700/*
701 * All TCG threads except the parent (i.e. the one that called tcg_context_init
702 * and registered the target's TCG globals) must register with this function
703 * before initiating translation.
704 *
705 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
706 * of tcg_region_init() for the reasoning behind this.
707 *
708 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
709 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
710 * is not used anymore for translation once this function is called.
711 *
712 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
713 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
714 */
715#ifdef CONFIG_USER_ONLY
716void tcg_register_thread(void)
717{
718 tcg_ctx = &tcg_init_ctx;
719}
720#else
721void tcg_register_thread(void)
722{
723 TCGContext *s = g_malloc(sizeof(*s));
724 unsigned int i, n;
725 bool err;
726
727 *s = tcg_init_ctx;
728
729 /* Relink mem_base. */
730 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
731 if (tcg_init_ctx.temps[i].mem_base) {
732 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
733 tcg_debug_assert(b >= 0 && b < n);
734 s->temps[i].mem_base = &s->temps[b];
735 }
736 }
737
738 /* Claim an entry in tcg_ctxs */
739 n = atomic_fetch_inc(&n_tcg_ctxs);
740 g_assert(n < max_cpus);
741 atomic_set(&tcg_ctxs[n], s);
742
743 tcg_ctx = s;
744 qemu_mutex_lock(&region.lock);
745 err = tcg_region_initial_alloc__locked(tcg_ctx);
746 g_assert(!err);
747 qemu_mutex_unlock(&region.lock);
e8feb96f 748}
3468b59e 749#endif /* !CONFIG_USER_ONLY */
e8feb96f
EC
750
751/*
752 * Returns the size (in bytes) of all translated code (i.e. from all regions)
753 * currently in the cache.
754 * See also: tcg_code_capacity()
755 * Do not confuse with tcg_current_code_size(); that one applies to a single
756 * TCG context.
757 */
758size_t tcg_code_size(void)
759{
3468b59e 760 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
761 unsigned int i;
762 size_t total;
763
764 qemu_mutex_lock(&region.lock);
765 total = region.agg_size_full;
3468b59e
EC
766 for (i = 0; i < n_ctxs; i++) {
767 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
e8feb96f
EC
768 size_t size;
769
770 size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
771 g_assert(size <= s->code_gen_buffer_size);
772 total += size;
773 }
774 qemu_mutex_unlock(&region.lock);
775 return total;
776}
777
778/*
779 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
780 * regions.
781 * See also: tcg_code_size()
782 */
783size_t tcg_code_capacity(void)
784{
785 size_t guard_size, capacity;
786
787 /* no need for synchronization; these variables are set at init time */
788 guard_size = region.stride - region.size;
789 capacity = region.end + guard_size - region.start;
790 capacity -= region.n * (guard_size + TCG_HIGHWATER);
791 return capacity;
792}
793
128ed227
EC
794size_t tcg_tb_phys_invalidate_count(void)
795{
796 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
797 unsigned int i;
798 size_t total = 0;
799
800 for (i = 0; i < n_ctxs; i++) {
801 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
802
803 total += atomic_read(&s->tb_phys_invalidate_count);
804 }
805 return total;
806}
807
c896fe29
FB
808/* pool based memory allocation */
809void *tcg_malloc_internal(TCGContext *s, int size)
810{
811 TCGPool *p;
812 int pool_size;
813
814 if (size > TCG_POOL_CHUNK_SIZE) {
815 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 816 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 817 p->size = size;
4055299e
KB
818 p->next = s->pool_first_large;
819 s->pool_first_large = p;
820 return p->data;
c896fe29
FB
821 } else {
822 p = s->pool_current;
823 if (!p) {
824 p = s->pool_first;
825 if (!p)
826 goto new_pool;
827 } else {
828 if (!p->next) {
829 new_pool:
830 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 831 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
832 p->size = pool_size;
833 p->next = NULL;
834 if (s->pool_current)
835 s->pool_current->next = p;
836 else
837 s->pool_first = p;
838 } else {
839 p = p->next;
840 }
841 }
842 }
843 s->pool_current = p;
844 s->pool_cur = p->data + size;
845 s->pool_end = p->data + p->size;
846 return p->data;
847}
848
849void tcg_pool_reset(TCGContext *s)
850{
4055299e
KB
851 TCGPool *p, *t;
852 for (p = s->pool_first_large; p; p = t) {
853 t = p->next;
854 g_free(p);
855 }
856 s->pool_first_large = NULL;
c896fe29
FB
857 s->pool_cur = s->pool_end = NULL;
858 s->pool_current = NULL;
859}
860
100b5e01
RH
861typedef struct TCGHelperInfo {
862 void *func;
863 const char *name;
afb49896
RH
864 unsigned flags;
865 unsigned sizemask;
100b5e01
RH
866} TCGHelperInfo;
867
2ef6175a
RH
868#include "exec/helper-proto.h"
869
100b5e01 870static const TCGHelperInfo all_helpers[] = {
2ef6175a 871#include "exec/helper-tcg.h"
100b5e01 872};
619205fd 873static GHashTable *helper_table;
100b5e01 874
91478cef 875static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 876static void process_op_defs(TCGContext *s);
1c2adb95
RH
877static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
878 TCGReg reg, const char *name);
91478cef 879
c896fe29
FB
880void tcg_context_init(TCGContext *s)
881{
100b5e01 882 int op, total_args, n, i;
c896fe29
FB
883 TCGOpDef *def;
884 TCGArgConstraint *args_ct;
885 int *sorted_args;
1c2adb95 886 TCGTemp *ts;
c896fe29
FB
887
888 memset(s, 0, sizeof(*s));
c896fe29 889 s->nb_globals = 0;
c70fbf0a 890
c896fe29
FB
891 /* Count total number of arguments and allocate the corresponding
892 space */
893 total_args = 0;
894 for(op = 0; op < NB_OPS; op++) {
895 def = &tcg_op_defs[op];
896 n = def->nb_iargs + def->nb_oargs;
897 total_args += n;
898 }
899
7267c094
AL
900 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
901 sorted_args = g_malloc(sizeof(int) * total_args);
c896fe29
FB
902
903 for(op = 0; op < NB_OPS; op++) {
904 def = &tcg_op_defs[op];
905 def->args_ct = args_ct;
906 def->sorted_args = sorted_args;
907 n = def->nb_iargs + def->nb_oargs;
908 sorted_args += n;
909 args_ct += n;
910 }
5cd8f621
RH
911
912 /* Register helpers. */
84fd9dd3 913 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 914 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 915
100b5e01 916 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
84fd9dd3 917 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 918 (gpointer)&all_helpers[i]);
100b5e01 919 }
5cd8f621 920
c896fe29 921 tcg_target_init(s);
f69d277e 922 process_op_defs(s);
91478cef
RH
923
924 /* Reverse the order of the saved registers, assuming they're all at
925 the start of tcg_target_reg_alloc_order. */
926 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
927 int r = tcg_target_reg_alloc_order[n];
928 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
929 break;
930 }
931 }
932 for (i = 0; i < n; ++i) {
933 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
934 }
935 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
936 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
937 }
b1311c4a
EC
938
939 tcg_ctx = s;
3468b59e
EC
940 /*
941 * In user-mode we simply share the init context among threads, since we
942 * use a single region. See the documentation tcg_region_init() for the
943 * reasoning behind this.
944 * In softmmu we will have at most max_cpus TCG threads.
945 */
946#ifdef CONFIG_USER_ONLY
df2cce29
EC
947 tcg_ctxs = &tcg_ctx;
948 n_tcg_ctxs = 1;
3468b59e
EC
949#else
950 tcg_ctxs = g_new(TCGContext *, max_cpus);
951#endif
1c2adb95
RH
952
953 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
954 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
955 cpu_env = temp_tcgv_ptr(ts);
9002ec79 956}
b03cce8e 957
6e3b2bfd
EC
958/*
959 * Allocate TBs right before their corresponding translated code, making
960 * sure that TBs and code are on different cache lines.
961 */
962TranslationBlock *tcg_tb_alloc(TCGContext *s)
963{
964 uintptr_t align = qemu_icache_linesize;
965 TranslationBlock *tb;
966 void *next;
967
e8feb96f 968 retry:
6e3b2bfd
EC
969 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
970 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
971
972 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
973 if (tcg_region_alloc(s)) {
974 return NULL;
975 }
976 goto retry;
6e3b2bfd 977 }
e8feb96f 978 atomic_set(&s->code_gen_ptr, next);
57a26946 979 s->data_gen_ptr = NULL;
6e3b2bfd
EC
980 return tb;
981}
982
9002ec79
RH
983void tcg_prologue_init(TCGContext *s)
984{
8163b749
RH
985 size_t prologue_size, total_size;
986 void *buf0, *buf1;
987
988 /* Put the prologue at the beginning of code_gen_buffer. */
989 buf0 = s->code_gen_buffer;
5b38ee31 990 total_size = s->code_gen_buffer_size;
8163b749
RH
991 s->code_ptr = buf0;
992 s->code_buf = buf0;
5b38ee31 993 s->data_gen_ptr = NULL;
8163b749
RH
994 s->code_gen_prologue = buf0;
995
5b38ee31
RH
996 /* Compute a high-water mark, at which we voluntarily flush the buffer
997 and start over. The size here is arbitrary, significantly larger
998 than we expect the code generation for any one opcode to require. */
999 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
1000
1001#ifdef TCG_TARGET_NEED_POOL_LABELS
1002 s->pool_labels = NULL;
1003#endif
1004
8163b749 1005 /* Generate the prologue. */
b03cce8e 1006 tcg_target_qemu_prologue(s);
5b38ee31
RH
1007
1008#ifdef TCG_TARGET_NEED_POOL_LABELS
1009 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1010 {
1011 bool ok = tcg_out_pool_finalize(s);
1012 tcg_debug_assert(ok);
1013 }
1014#endif
1015
8163b749
RH
1016 buf1 = s->code_ptr;
1017 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
1018
1019 /* Deduct the prologue from the buffer. */
1020 prologue_size = tcg_current_code_size(s);
1021 s->code_gen_ptr = buf1;
1022 s->code_gen_buffer = buf1;
1023 s->code_buf = buf1;
5b38ee31 1024 total_size -= prologue_size;
8163b749
RH
1025 s->code_gen_buffer_size = total_size;
1026
8163b749 1027 tcg_register_jit(s->code_gen_buffer, total_size);
d6b64b2b
RH
1028
1029#ifdef DEBUG_DISAS
1030 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1ee73216 1031 qemu_log_lock();
8163b749 1032 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
5b38ee31
RH
1033 if (s->data_gen_ptr) {
1034 size_t code_size = s->data_gen_ptr - buf0;
1035 size_t data_size = prologue_size - code_size;
1036 size_t i;
1037
1038 log_disas(buf0, code_size);
1039
1040 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1041 if (sizeof(tcg_target_ulong) == 8) {
1042 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1043 (uintptr_t)s->data_gen_ptr + i,
1044 *(uint64_t *)(s->data_gen_ptr + i));
1045 } else {
1046 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1047 (uintptr_t)s->data_gen_ptr + i,
1048 *(uint32_t *)(s->data_gen_ptr + i));
1049 }
1050 }
1051 } else {
1052 log_disas(buf0, prologue_size);
1053 }
d6b64b2b
RH
1054 qemu_log("\n");
1055 qemu_log_flush();
1ee73216 1056 qemu_log_unlock();
d6b64b2b
RH
1057 }
1058#endif
cedbcb01
EC
1059
1060 /* Assert that goto_ptr is implemented completely. */
1061 if (TCG_TARGET_HAS_goto_ptr) {
1062 tcg_debug_assert(s->code_gen_epilogue != NULL);
1063 }
c896fe29
FB
1064}
1065
c896fe29
FB
1066void tcg_func_start(TCGContext *s)
1067{
1068 tcg_pool_reset(s);
1069 s->nb_temps = s->nb_globals;
0ec9eabc
RH
1070
1071 /* No temps have been previously allocated for size or locality. */
1072 memset(s->free_temps, 0, sizeof(s->free_temps));
1073
abebf925 1074 s->nb_ops = 0;
c896fe29
FB
1075 s->nb_labels = 0;
1076 s->current_frame_offset = s->frame_start;
1077
0a209d4b
RH
1078#ifdef CONFIG_DEBUG_TCG
1079 s->goto_tb_issue_mask = 0;
1080#endif
1081
15fa08f8
RH
1082 QTAILQ_INIT(&s->ops);
1083 QTAILQ_INIT(&s->free_ops);
c896fe29
FB
1084}
1085
7ca4b752
RH
1086static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
1087{
1088 int n = s->nb_temps++;
1089 tcg_debug_assert(n < TCG_MAX_TEMPS);
1090 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1091}
1092
1093static inline TCGTemp *tcg_global_alloc(TCGContext *s)
1094{
fa477d25
RH
1095 TCGTemp *ts;
1096
7ca4b752
RH
1097 tcg_debug_assert(s->nb_globals == s->nb_temps);
1098 s->nb_globals++;
fa477d25
RH
1099 ts = tcg_temp_alloc(s);
1100 ts->temp_global = 1;
1101
1102 return ts;
c896fe29
FB
1103}
1104
085272b3
RH
1105static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1106 TCGReg reg, const char *name)
c896fe29 1107{
c896fe29 1108 TCGTemp *ts;
c896fe29 1109
b3a62939 1110 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 1111 tcg_abort();
b3a62939 1112 }
7ca4b752
RH
1113
1114 ts = tcg_global_alloc(s);
c896fe29
FB
1115 ts->base_type = type;
1116 ts->type = type;
1117 ts->fixed_reg = 1;
1118 ts->reg = reg;
c896fe29 1119 ts->name = name;
c896fe29 1120 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 1121
085272b3 1122 return ts;
a7812ae4
PB
1123}
1124
b6638662 1125void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 1126{
b3a62939
RH
1127 s->frame_start = start;
1128 s->frame_end = start + size;
085272b3
RH
1129 s->frame_temp
1130 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
1131}
1132
085272b3
RH
1133TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1134 intptr_t offset, const char *name)
c896fe29 1135{
b1311c4a 1136 TCGContext *s = tcg_ctx;
dc41aa7d 1137 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 1138 TCGTemp *ts = tcg_global_alloc(s);
b3915dbb 1139 int indirect_reg = 0, bigendian = 0;
7ca4b752
RH
1140#ifdef HOST_WORDS_BIGENDIAN
1141 bigendian = 1;
1142#endif
c896fe29 1143
b3915dbb 1144 if (!base_ts->fixed_reg) {
5a18407f
RH
1145 /* We do not support double-indirect registers. */
1146 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 1147 base_ts->indirect_base = 1;
5a18407f
RH
1148 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1149 ? 2 : 1);
1150 indirect_reg = 1;
b3915dbb
RH
1151 }
1152
7ca4b752
RH
1153 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1154 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 1155 char buf[64];
7ca4b752
RH
1156
1157 ts->base_type = TCG_TYPE_I64;
c896fe29 1158 ts->type = TCG_TYPE_I32;
b3915dbb 1159 ts->indirect_reg = indirect_reg;
c896fe29 1160 ts->mem_allocated = 1;
b3a62939 1161 ts->mem_base = base_ts;
7ca4b752 1162 ts->mem_offset = offset + bigendian * 4;
c896fe29
FB
1163 pstrcpy(buf, sizeof(buf), name);
1164 pstrcat(buf, sizeof(buf), "_0");
1165 ts->name = strdup(buf);
c896fe29 1166
7ca4b752
RH
1167 tcg_debug_assert(ts2 == ts + 1);
1168 ts2->base_type = TCG_TYPE_I64;
1169 ts2->type = TCG_TYPE_I32;
b3915dbb 1170 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
1171 ts2->mem_allocated = 1;
1172 ts2->mem_base = base_ts;
1173 ts2->mem_offset = offset + (1 - bigendian) * 4;
c896fe29
FB
1174 pstrcpy(buf, sizeof(buf), name);
1175 pstrcat(buf, sizeof(buf), "_1");
120c1084 1176 ts2->name = strdup(buf);
7ca4b752 1177 } else {
c896fe29
FB
1178 ts->base_type = type;
1179 ts->type = type;
b3915dbb 1180 ts->indirect_reg = indirect_reg;
c896fe29 1181 ts->mem_allocated = 1;
b3a62939 1182 ts->mem_base = base_ts;
c896fe29 1183 ts->mem_offset = offset;
c896fe29 1184 ts->name = name;
c896fe29 1185 }
085272b3 1186 return ts;
a7812ae4
PB
1187}
1188
5bfa8034 1189TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
c896fe29 1190{
b1311c4a 1191 TCGContext *s = tcg_ctx;
c896fe29 1192 TCGTemp *ts;
641d5fbe 1193 int idx, k;
c896fe29 1194
0ec9eabc
RH
1195 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1196 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1197 if (idx < TCG_MAX_TEMPS) {
1198 /* There is already an available temp with the right type. */
1199 clear_bit(idx, s->free_temps[k].l);
1200
e8996ee0 1201 ts = &s->temps[idx];
e8996ee0 1202 ts->temp_allocated = 1;
7ca4b752
RH
1203 tcg_debug_assert(ts->base_type == type);
1204 tcg_debug_assert(ts->temp_local == temp_local);
e8996ee0 1205 } else {
7ca4b752
RH
1206 ts = tcg_temp_alloc(s);
1207 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1208 TCGTemp *ts2 = tcg_temp_alloc(s);
1209
f6aa2f7d 1210 ts->base_type = type;
e8996ee0
FB
1211 ts->type = TCG_TYPE_I32;
1212 ts->temp_allocated = 1;
641d5fbe 1213 ts->temp_local = temp_local;
7ca4b752
RH
1214
1215 tcg_debug_assert(ts2 == ts + 1);
1216 ts2->base_type = TCG_TYPE_I64;
1217 ts2->type = TCG_TYPE_I32;
1218 ts2->temp_allocated = 1;
1219 ts2->temp_local = temp_local;
1220 } else {
e8996ee0
FB
1221 ts->base_type = type;
1222 ts->type = type;
1223 ts->temp_allocated = 1;
641d5fbe 1224 ts->temp_local = temp_local;
e8996ee0 1225 }
c896fe29 1226 }
27bfd83c
PM
1227
1228#if defined(CONFIG_DEBUG_TCG)
1229 s->temps_in_use++;
1230#endif
085272b3 1231 return ts;
c896fe29
FB
1232}
1233
d2fd745f
RH
1234TCGv_vec tcg_temp_new_vec(TCGType type)
1235{
1236 TCGTemp *t;
1237
1238#ifdef CONFIG_DEBUG_TCG
1239 switch (type) {
1240 case TCG_TYPE_V64:
1241 assert(TCG_TARGET_HAS_v64);
1242 break;
1243 case TCG_TYPE_V128:
1244 assert(TCG_TARGET_HAS_v128);
1245 break;
1246 case TCG_TYPE_V256:
1247 assert(TCG_TARGET_HAS_v256);
1248 break;
1249 default:
1250 g_assert_not_reached();
1251 }
1252#endif
1253
1254 t = tcg_temp_new_internal(type, 0);
1255 return temp_tcgv_vec(t);
1256}
1257
1258/* Create a new temp of the same type as an existing temp. */
1259TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1260{
1261 TCGTemp *t = tcgv_vec_temp(match);
1262
1263 tcg_debug_assert(t->temp_allocated != 0);
1264
1265 t = tcg_temp_new_internal(t->base_type, 0);
1266 return temp_tcgv_vec(t);
1267}
1268
5bfa8034 1269void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1270{
b1311c4a 1271 TCGContext *s = tcg_ctx;
085272b3 1272 int k, idx;
c896fe29 1273
27bfd83c
PM
1274#if defined(CONFIG_DEBUG_TCG)
1275 s->temps_in_use--;
1276 if (s->temps_in_use < 0) {
1277 fprintf(stderr, "More temporaries freed than allocated!\n");
1278 }
1279#endif
1280
085272b3 1281 tcg_debug_assert(ts->temp_global == 0);
eabb7b91 1282 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1283 ts->temp_allocated = 0;
0ec9eabc 1284
085272b3 1285 idx = temp_idx(ts);
18d13fa2 1286 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
0ec9eabc 1287 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1288}
1289
a7812ae4 1290TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1291{
a7812ae4
PB
1292 TCGv_i32 t0;
1293 t0 = tcg_temp_new_i32();
e8996ee0
FB
1294 tcg_gen_movi_i32(t0, val);
1295 return t0;
1296}
c896fe29 1297
a7812ae4 1298TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1299{
a7812ae4
PB
1300 TCGv_i64 t0;
1301 t0 = tcg_temp_new_i64();
e8996ee0
FB
1302 tcg_gen_movi_i64(t0, val);
1303 return t0;
c896fe29
FB
1304}
1305
a7812ae4 1306TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1307{
a7812ae4
PB
1308 TCGv_i32 t0;
1309 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1310 tcg_gen_movi_i32(t0, val);
1311 return t0;
1312}
1313
a7812ae4 1314TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1315{
a7812ae4
PB
1316 TCGv_i64 t0;
1317 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1318 tcg_gen_movi_i64(t0, val);
1319 return t0;
1320}
1321
27bfd83c
PM
1322#if defined(CONFIG_DEBUG_TCG)
1323void tcg_clear_temp_count(void)
1324{
b1311c4a 1325 TCGContext *s = tcg_ctx;
27bfd83c
PM
1326 s->temps_in_use = 0;
1327}
1328
1329int tcg_check_temp_count(void)
1330{
b1311c4a 1331 TCGContext *s = tcg_ctx;
27bfd83c
PM
1332 if (s->temps_in_use) {
1333 /* Clear the count so that we don't give another
1334 * warning immediately next time around.
1335 */
1336 s->temps_in_use = 0;
1337 return 1;
1338 }
1339 return 0;
1340}
1341#endif
1342
be0f34b5
RH
1343/* Return true if OP may appear in the opcode stream.
1344 Test the runtime variable that controls each opcode. */
1345bool tcg_op_supported(TCGOpcode op)
1346{
d2fd745f
RH
1347 const bool have_vec
1348 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1349
be0f34b5
RH
1350 switch (op) {
1351 case INDEX_op_discard:
1352 case INDEX_op_set_label:
1353 case INDEX_op_call:
1354 case INDEX_op_br:
1355 case INDEX_op_mb:
1356 case INDEX_op_insn_start:
1357 case INDEX_op_exit_tb:
1358 case INDEX_op_goto_tb:
1359 case INDEX_op_qemu_ld_i32:
1360 case INDEX_op_qemu_st_i32:
1361 case INDEX_op_qemu_ld_i64:
1362 case INDEX_op_qemu_st_i64:
1363 return true;
1364
1365 case INDEX_op_goto_ptr:
1366 return TCG_TARGET_HAS_goto_ptr;
1367
1368 case INDEX_op_mov_i32:
1369 case INDEX_op_movi_i32:
1370 case INDEX_op_setcond_i32:
1371 case INDEX_op_brcond_i32:
1372 case INDEX_op_ld8u_i32:
1373 case INDEX_op_ld8s_i32:
1374 case INDEX_op_ld16u_i32:
1375 case INDEX_op_ld16s_i32:
1376 case INDEX_op_ld_i32:
1377 case INDEX_op_st8_i32:
1378 case INDEX_op_st16_i32:
1379 case INDEX_op_st_i32:
1380 case INDEX_op_add_i32:
1381 case INDEX_op_sub_i32:
1382 case INDEX_op_mul_i32:
1383 case INDEX_op_and_i32:
1384 case INDEX_op_or_i32:
1385 case INDEX_op_xor_i32:
1386 case INDEX_op_shl_i32:
1387 case INDEX_op_shr_i32:
1388 case INDEX_op_sar_i32:
1389 return true;
1390
1391 case INDEX_op_movcond_i32:
1392 return TCG_TARGET_HAS_movcond_i32;
1393 case INDEX_op_div_i32:
1394 case INDEX_op_divu_i32:
1395 return TCG_TARGET_HAS_div_i32;
1396 case INDEX_op_rem_i32:
1397 case INDEX_op_remu_i32:
1398 return TCG_TARGET_HAS_rem_i32;
1399 case INDEX_op_div2_i32:
1400 case INDEX_op_divu2_i32:
1401 return TCG_TARGET_HAS_div2_i32;
1402 case INDEX_op_rotl_i32:
1403 case INDEX_op_rotr_i32:
1404 return TCG_TARGET_HAS_rot_i32;
1405 case INDEX_op_deposit_i32:
1406 return TCG_TARGET_HAS_deposit_i32;
1407 case INDEX_op_extract_i32:
1408 return TCG_TARGET_HAS_extract_i32;
1409 case INDEX_op_sextract_i32:
1410 return TCG_TARGET_HAS_sextract_i32;
1411 case INDEX_op_add2_i32:
1412 return TCG_TARGET_HAS_add2_i32;
1413 case INDEX_op_sub2_i32:
1414 return TCG_TARGET_HAS_sub2_i32;
1415 case INDEX_op_mulu2_i32:
1416 return TCG_TARGET_HAS_mulu2_i32;
1417 case INDEX_op_muls2_i32:
1418 return TCG_TARGET_HAS_muls2_i32;
1419 case INDEX_op_muluh_i32:
1420 return TCG_TARGET_HAS_muluh_i32;
1421 case INDEX_op_mulsh_i32:
1422 return TCG_TARGET_HAS_mulsh_i32;
1423 case INDEX_op_ext8s_i32:
1424 return TCG_TARGET_HAS_ext8s_i32;
1425 case INDEX_op_ext16s_i32:
1426 return TCG_TARGET_HAS_ext16s_i32;
1427 case INDEX_op_ext8u_i32:
1428 return TCG_TARGET_HAS_ext8u_i32;
1429 case INDEX_op_ext16u_i32:
1430 return TCG_TARGET_HAS_ext16u_i32;
1431 case INDEX_op_bswap16_i32:
1432 return TCG_TARGET_HAS_bswap16_i32;
1433 case INDEX_op_bswap32_i32:
1434 return TCG_TARGET_HAS_bswap32_i32;
1435 case INDEX_op_not_i32:
1436 return TCG_TARGET_HAS_not_i32;
1437 case INDEX_op_neg_i32:
1438 return TCG_TARGET_HAS_neg_i32;
1439 case INDEX_op_andc_i32:
1440 return TCG_TARGET_HAS_andc_i32;
1441 case INDEX_op_orc_i32:
1442 return TCG_TARGET_HAS_orc_i32;
1443 case INDEX_op_eqv_i32:
1444 return TCG_TARGET_HAS_eqv_i32;
1445 case INDEX_op_nand_i32:
1446 return TCG_TARGET_HAS_nand_i32;
1447 case INDEX_op_nor_i32:
1448 return TCG_TARGET_HAS_nor_i32;
1449 case INDEX_op_clz_i32:
1450 return TCG_TARGET_HAS_clz_i32;
1451 case INDEX_op_ctz_i32:
1452 return TCG_TARGET_HAS_ctz_i32;
1453 case INDEX_op_ctpop_i32:
1454 return TCG_TARGET_HAS_ctpop_i32;
1455
1456 case INDEX_op_brcond2_i32:
1457 case INDEX_op_setcond2_i32:
1458 return TCG_TARGET_REG_BITS == 32;
1459
1460 case INDEX_op_mov_i64:
1461 case INDEX_op_movi_i64:
1462 case INDEX_op_setcond_i64:
1463 case INDEX_op_brcond_i64:
1464 case INDEX_op_ld8u_i64:
1465 case INDEX_op_ld8s_i64:
1466 case INDEX_op_ld16u_i64:
1467 case INDEX_op_ld16s_i64:
1468 case INDEX_op_ld32u_i64:
1469 case INDEX_op_ld32s_i64:
1470 case INDEX_op_ld_i64:
1471 case INDEX_op_st8_i64:
1472 case INDEX_op_st16_i64:
1473 case INDEX_op_st32_i64:
1474 case INDEX_op_st_i64:
1475 case INDEX_op_add_i64:
1476 case INDEX_op_sub_i64:
1477 case INDEX_op_mul_i64:
1478 case INDEX_op_and_i64:
1479 case INDEX_op_or_i64:
1480 case INDEX_op_xor_i64:
1481 case INDEX_op_shl_i64:
1482 case INDEX_op_shr_i64:
1483 case INDEX_op_sar_i64:
1484 case INDEX_op_ext_i32_i64:
1485 case INDEX_op_extu_i32_i64:
1486 return TCG_TARGET_REG_BITS == 64;
1487
1488 case INDEX_op_movcond_i64:
1489 return TCG_TARGET_HAS_movcond_i64;
1490 case INDEX_op_div_i64:
1491 case INDEX_op_divu_i64:
1492 return TCG_TARGET_HAS_div_i64;
1493 case INDEX_op_rem_i64:
1494 case INDEX_op_remu_i64:
1495 return TCG_TARGET_HAS_rem_i64;
1496 case INDEX_op_div2_i64:
1497 case INDEX_op_divu2_i64:
1498 return TCG_TARGET_HAS_div2_i64;
1499 case INDEX_op_rotl_i64:
1500 case INDEX_op_rotr_i64:
1501 return TCG_TARGET_HAS_rot_i64;
1502 case INDEX_op_deposit_i64:
1503 return TCG_TARGET_HAS_deposit_i64;
1504 case INDEX_op_extract_i64:
1505 return TCG_TARGET_HAS_extract_i64;
1506 case INDEX_op_sextract_i64:
1507 return TCG_TARGET_HAS_sextract_i64;
1508 case INDEX_op_extrl_i64_i32:
1509 return TCG_TARGET_HAS_extrl_i64_i32;
1510 case INDEX_op_extrh_i64_i32:
1511 return TCG_TARGET_HAS_extrh_i64_i32;
1512 case INDEX_op_ext8s_i64:
1513 return TCG_TARGET_HAS_ext8s_i64;
1514 case INDEX_op_ext16s_i64:
1515 return TCG_TARGET_HAS_ext16s_i64;
1516 case INDEX_op_ext32s_i64:
1517 return TCG_TARGET_HAS_ext32s_i64;
1518 case INDEX_op_ext8u_i64:
1519 return TCG_TARGET_HAS_ext8u_i64;
1520 case INDEX_op_ext16u_i64:
1521 return TCG_TARGET_HAS_ext16u_i64;
1522 case INDEX_op_ext32u_i64:
1523 return TCG_TARGET_HAS_ext32u_i64;
1524 case INDEX_op_bswap16_i64:
1525 return TCG_TARGET_HAS_bswap16_i64;
1526 case INDEX_op_bswap32_i64:
1527 return TCG_TARGET_HAS_bswap32_i64;
1528 case INDEX_op_bswap64_i64:
1529 return TCG_TARGET_HAS_bswap64_i64;
1530 case INDEX_op_not_i64:
1531 return TCG_TARGET_HAS_not_i64;
1532 case INDEX_op_neg_i64:
1533 return TCG_TARGET_HAS_neg_i64;
1534 case INDEX_op_andc_i64:
1535 return TCG_TARGET_HAS_andc_i64;
1536 case INDEX_op_orc_i64:
1537 return TCG_TARGET_HAS_orc_i64;
1538 case INDEX_op_eqv_i64:
1539 return TCG_TARGET_HAS_eqv_i64;
1540 case INDEX_op_nand_i64:
1541 return TCG_TARGET_HAS_nand_i64;
1542 case INDEX_op_nor_i64:
1543 return TCG_TARGET_HAS_nor_i64;
1544 case INDEX_op_clz_i64:
1545 return TCG_TARGET_HAS_clz_i64;
1546 case INDEX_op_ctz_i64:
1547 return TCG_TARGET_HAS_ctz_i64;
1548 case INDEX_op_ctpop_i64:
1549 return TCG_TARGET_HAS_ctpop_i64;
1550 case INDEX_op_add2_i64:
1551 return TCG_TARGET_HAS_add2_i64;
1552 case INDEX_op_sub2_i64:
1553 return TCG_TARGET_HAS_sub2_i64;
1554 case INDEX_op_mulu2_i64:
1555 return TCG_TARGET_HAS_mulu2_i64;
1556 case INDEX_op_muls2_i64:
1557 return TCG_TARGET_HAS_muls2_i64;
1558 case INDEX_op_muluh_i64:
1559 return TCG_TARGET_HAS_muluh_i64;
1560 case INDEX_op_mulsh_i64:
1561 return TCG_TARGET_HAS_mulsh_i64;
1562
d2fd745f
RH
1563 case INDEX_op_mov_vec:
1564 case INDEX_op_dup_vec:
1565 case INDEX_op_dupi_vec:
1566 case INDEX_op_ld_vec:
1567 case INDEX_op_st_vec:
1568 case INDEX_op_add_vec:
1569 case INDEX_op_sub_vec:
1570 case INDEX_op_and_vec:
1571 case INDEX_op_or_vec:
1572 case INDEX_op_xor_vec:
212be173 1573 case INDEX_op_cmp_vec:
d2fd745f
RH
1574 return have_vec;
1575 case INDEX_op_dup2_vec:
1576 return have_vec && TCG_TARGET_REG_BITS == 32;
1577 case INDEX_op_not_vec:
1578 return have_vec && TCG_TARGET_HAS_not_vec;
1579 case INDEX_op_neg_vec:
1580 return have_vec && TCG_TARGET_HAS_neg_vec;
1581 case INDEX_op_andc_vec:
1582 return have_vec && TCG_TARGET_HAS_andc_vec;
1583 case INDEX_op_orc_vec:
1584 return have_vec && TCG_TARGET_HAS_orc_vec;
3774030a
RH
1585 case INDEX_op_mul_vec:
1586 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1587 case INDEX_op_shli_vec:
1588 case INDEX_op_shri_vec:
1589 case INDEX_op_sari_vec:
1590 return have_vec && TCG_TARGET_HAS_shi_vec;
1591 case INDEX_op_shls_vec:
1592 case INDEX_op_shrs_vec:
1593 case INDEX_op_sars_vec:
1594 return have_vec && TCG_TARGET_HAS_shs_vec;
1595 case INDEX_op_shlv_vec:
1596 case INDEX_op_shrv_vec:
1597 case INDEX_op_sarv_vec:
1598 return have_vec && TCG_TARGET_HAS_shv_vec;
d2fd745f 1599
db432672
RH
1600 default:
1601 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1602 return true;
be0f34b5 1603 }
be0f34b5
RH
1604}
1605
39cf05d3
FB
1606/* Note: we convert the 64 bit args to 32 bit and do some alignment
1607 and endian swap. Maybe it would be better to do the alignment
1608 and endian swap in tcg_reg_alloc_call(). */
ae8b75dc 1609void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1610{
75e8b9b7 1611 int i, real_args, nb_rets, pi;
bbb8a1b4 1612 unsigned sizemask, flags;
afb49896 1613 TCGHelperInfo *info;
75e8b9b7 1614 TCGOp *op;
afb49896 1615
619205fd 1616 info = g_hash_table_lookup(helper_table, (gpointer)func);
bbb8a1b4
RH
1617 flags = info->flags;
1618 sizemask = info->sizemask;
2bece2c8 1619
34b1a49c
RH
1620#if defined(__sparc__) && !defined(__arch64__) \
1621 && !defined(CONFIG_TCG_INTERPRETER)
1622 /* We have 64-bit values in one register, but need to pass as two
1623 separate parameters. Split them. */
1624 int orig_sizemask = sizemask;
1625 int orig_nargs = nargs;
1626 TCGv_i64 retl, reth;
ae8b75dc 1627 TCGTemp *split_args[MAX_OPC_PARAM];
34b1a49c 1628
f764718d
RH
1629 retl = NULL;
1630 reth = NULL;
34b1a49c 1631 if (sizemask != 0) {
34b1a49c
RH
1632 for (i = real_args = 0; i < nargs; ++i) {
1633 int is_64bit = sizemask & (1 << (i+1)*2);
1634 if (is_64bit) {
085272b3 1635 TCGv_i64 orig = temp_tcgv_i64(args[i]);
34b1a49c
RH
1636 TCGv_i32 h = tcg_temp_new_i32();
1637 TCGv_i32 l = tcg_temp_new_i32();
1638 tcg_gen_extr_i64_i32(l, h, orig);
ae8b75dc
RH
1639 split_args[real_args++] = tcgv_i32_temp(h);
1640 split_args[real_args++] = tcgv_i32_temp(l);
34b1a49c
RH
1641 } else {
1642 split_args[real_args++] = args[i];
1643 }
1644 }
1645 nargs = real_args;
1646 args = split_args;
1647 sizemask = 0;
1648 }
1649#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1650 for (i = 0; i < nargs; ++i) {
1651 int is_64bit = sizemask & (1 << (i+1)*2);
1652 int is_signed = sizemask & (2 << (i+1)*2);
1653 if (!is_64bit) {
1654 TCGv_i64 temp = tcg_temp_new_i64();
085272b3 1655 TCGv_i64 orig = temp_tcgv_i64(args[i]);
2bece2c8
RH
1656 if (is_signed) {
1657 tcg_gen_ext32s_i64(temp, orig);
1658 } else {
1659 tcg_gen_ext32u_i64(temp, orig);
1660 }
ae8b75dc 1661 args[i] = tcgv_i64_temp(temp);
2bece2c8
RH
1662 }
1663 }
1664#endif /* TCG_TARGET_EXTEND_ARGS */
1665
15fa08f8 1666 op = tcg_emit_op(INDEX_op_call);
75e8b9b7
RH
1667
1668 pi = 0;
ae8b75dc 1669 if (ret != NULL) {
34b1a49c
RH
1670#if defined(__sparc__) && !defined(__arch64__) \
1671 && !defined(CONFIG_TCG_INTERPRETER)
1672 if (orig_sizemask & 1) {
1673 /* The 32-bit ABI is going to return the 64-bit value in
1674 the %o0/%o1 register pair. Prepare for this by using
1675 two return temporaries, and reassemble below. */
1676 retl = tcg_temp_new_i64();
1677 reth = tcg_temp_new_i64();
ae8b75dc
RH
1678 op->args[pi++] = tcgv_i64_arg(reth);
1679 op->args[pi++] = tcgv_i64_arg(retl);
34b1a49c
RH
1680 nb_rets = 2;
1681 } else {
ae8b75dc 1682 op->args[pi++] = temp_arg(ret);
34b1a49c
RH
1683 nb_rets = 1;
1684 }
1685#else
1686 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
02eb19d0 1687#ifdef HOST_WORDS_BIGENDIAN
ae8b75dc
RH
1688 op->args[pi++] = temp_arg(ret + 1);
1689 op->args[pi++] = temp_arg(ret);
39cf05d3 1690#else
ae8b75dc
RH
1691 op->args[pi++] = temp_arg(ret);
1692 op->args[pi++] = temp_arg(ret + 1);
39cf05d3 1693#endif
a7812ae4 1694 nb_rets = 2;
34b1a49c 1695 } else {
ae8b75dc 1696 op->args[pi++] = temp_arg(ret);
a7812ae4 1697 nb_rets = 1;
c896fe29 1698 }
34b1a49c 1699#endif
a7812ae4
PB
1700 } else {
1701 nb_rets = 0;
c896fe29 1702 }
cd9090aa 1703 TCGOP_CALLO(op) = nb_rets;
75e8b9b7 1704
a7812ae4
PB
1705 real_args = 0;
1706 for (i = 0; i < nargs; i++) {
2bece2c8 1707 int is_64bit = sizemask & (1 << (i+1)*2);
bbb8a1b4 1708 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
39cf05d3
FB
1709#ifdef TCG_TARGET_CALL_ALIGN_ARGS
1710 /* some targets want aligned 64 bit args */
ebd486d5 1711 if (real_args & 1) {
75e8b9b7 1712 op->args[pi++] = TCG_CALL_DUMMY_ARG;
ebd486d5 1713 real_args++;
39cf05d3
FB
1714 }
1715#endif
c70fbf0a
RH
1716 /* If stack grows up, then we will be placing successive
1717 arguments at lower addresses, which means we need to
1718 reverse the order compared to how we would normally
1719 treat either big or little-endian. For those arguments
1720 that will wind up in registers, this still works for
1721 HPPA (the only current STACK_GROWSUP target) since the
1722 argument registers are *also* allocated in decreasing
1723 order. If another such target is added, this logic may
1724 have to get more complicated to differentiate between
1725 stack arguments and register arguments. */
02eb19d0 1726#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
ae8b75dc
RH
1727 op->args[pi++] = temp_arg(args[i] + 1);
1728 op->args[pi++] = temp_arg(args[i]);
c896fe29 1729#else
ae8b75dc
RH
1730 op->args[pi++] = temp_arg(args[i]);
1731 op->args[pi++] = temp_arg(args[i] + 1);
c896fe29 1732#endif
a7812ae4 1733 real_args += 2;
2bece2c8 1734 continue;
c896fe29 1735 }
2bece2c8 1736
ae8b75dc 1737 op->args[pi++] = temp_arg(args[i]);
2bece2c8 1738 real_args++;
c896fe29 1739 }
75e8b9b7
RH
1740 op->args[pi++] = (uintptr_t)func;
1741 op->args[pi++] = flags;
cd9090aa 1742 TCGOP_CALLI(op) = real_args;
a7812ae4 1743
75e8b9b7 1744 /* Make sure the fields didn't overflow. */
cd9090aa 1745 tcg_debug_assert(TCGOP_CALLI(op) == real_args);
75e8b9b7 1746 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
2bece2c8 1747
34b1a49c
RH
1748#if defined(__sparc__) && !defined(__arch64__) \
1749 && !defined(CONFIG_TCG_INTERPRETER)
1750 /* Free all of the parts we allocated above. */
1751 for (i = real_args = 0; i < orig_nargs; ++i) {
1752 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1753 if (is_64bit) {
085272b3
RH
1754 tcg_temp_free_internal(args[real_args++]);
1755 tcg_temp_free_internal(args[real_args++]);
34b1a49c
RH
1756 } else {
1757 real_args++;
1758 }
1759 }
1760 if (orig_sizemask & 1) {
1761 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1762 Note that describing these as TCGv_i64 eliminates an unnecessary
1763 zero-extension that tcg_gen_concat_i32_i64 would create. */
085272b3 1764 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
34b1a49c
RH
1765 tcg_temp_free_i64(retl);
1766 tcg_temp_free_i64(reth);
1767 }
1768#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1769 for (i = 0; i < nargs; ++i) {
1770 int is_64bit = sizemask & (1 << (i+1)*2);
1771 if (!is_64bit) {
085272b3 1772 tcg_temp_free_internal(args[i]);
2bece2c8
RH
1773 }
1774 }
1775#endif /* TCG_TARGET_EXTEND_ARGS */
c896fe29 1776}
c896fe29 1777
8fcd3692 1778static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1779{
ac3b8891 1780 int i, n;
c896fe29 1781 TCGTemp *ts;
ac3b8891
RH
1782
1783 for (i = 0, n = s->nb_globals; i < n; i++) {
c896fe29 1784 ts = &s->temps[i];
ac3b8891 1785 ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
c896fe29 1786 }
ac3b8891 1787 for (n = s->nb_temps; i < n; i++) {
e8996ee0 1788 ts = &s->temps[i];
ac3b8891 1789 ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
e8996ee0
FB
1790 ts->mem_allocated = 0;
1791 ts->fixed_reg = 0;
1792 }
f8b2f202
RH
1793
1794 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1795}
1796
f8b2f202
RH
1797static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1798 TCGTemp *ts)
c896fe29 1799{
1807f4c4 1800 int idx = temp_idx(ts);
ac56dd48 1801
fa477d25 1802 if (ts->temp_global) {
ac56dd48 1803 pstrcpy(buf, buf_size, ts->name);
f8b2f202
RH
1804 } else if (ts->temp_local) {
1805 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
c896fe29 1806 } else {
f8b2f202 1807 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
c896fe29
FB
1808 }
1809 return buf;
1810}
1811
43439139
RH
1812static char *tcg_get_arg_str(TCGContext *s, char *buf,
1813 int buf_size, TCGArg arg)
f8b2f202 1814{
43439139 1815 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1816}
1817
6e085f72
RH
1818/* Find helper name. */
1819static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
4dc81f28 1820{
6e085f72 1821 const char *ret = NULL;
619205fd
EC
1822 if (helper_table) {
1823 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
72866e82
RH
1824 if (info) {
1825 ret = info->name;
1826 }
4dc81f28 1827 }
6e085f72 1828 return ret;
4dc81f28
FB
1829}
1830
f48f3ede
BS
1831static const char * const cond_name[] =
1832{
0aed257f
RH
1833 [TCG_COND_NEVER] = "never",
1834 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1835 [TCG_COND_EQ] = "eq",
1836 [TCG_COND_NE] = "ne",
1837 [TCG_COND_LT] = "lt",
1838 [TCG_COND_GE] = "ge",
1839 [TCG_COND_LE] = "le",
1840 [TCG_COND_GT] = "gt",
1841 [TCG_COND_LTU] = "ltu",
1842 [TCG_COND_GEU] = "geu",
1843 [TCG_COND_LEU] = "leu",
1844 [TCG_COND_GTU] = "gtu"
1845};
1846
f713d6ad
RH
1847static const char * const ldst_name[] =
1848{
1849 [MO_UB] = "ub",
1850 [MO_SB] = "sb",
1851 [MO_LEUW] = "leuw",
1852 [MO_LESW] = "lesw",
1853 [MO_LEUL] = "leul",
1854 [MO_LESL] = "lesl",
1855 [MO_LEQ] = "leq",
1856 [MO_BEUW] = "beuw",
1857 [MO_BESW] = "besw",
1858 [MO_BEUL] = "beul",
1859 [MO_BESL] = "besl",
1860 [MO_BEQ] = "beq",
1861};
1862
1f00b27f
SS
1863static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1864#ifdef ALIGNED_ONLY
1865 [MO_UNALN >> MO_ASHIFT] = "un+",
1866 [MO_ALIGN >> MO_ASHIFT] = "",
1867#else
1868 [MO_UNALN >> MO_ASHIFT] = "",
1869 [MO_ALIGN >> MO_ASHIFT] = "al+",
1870#endif
1871 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1872 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1873 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1874 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1875 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1876 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1877};
1878
eeacee4d 1879void tcg_dump_ops(TCGContext *s)
c896fe29 1880{
c896fe29 1881 char buf[128];
c45cb8bb 1882 TCGOp *op;
c45cb8bb 1883
15fa08f8 1884 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1885 int i, k, nb_oargs, nb_iargs, nb_cargs;
1886 const TCGOpDef *def;
c45cb8bb 1887 TCGOpcode c;
bdfb460e 1888 int col = 0;
c896fe29 1889
c45cb8bb 1890 c = op->opc;
c896fe29 1891 def = &tcg_op_defs[c];
c45cb8bb 1892
765b842a 1893 if (c == INDEX_op_insn_start) {
15fa08f8 1894 col += qemu_log("\n ----");
9aef40ed
RH
1895
1896 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1897 target_ulong a;
7e4597d7 1898#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1899 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1900#else
efee3746 1901 a = op->args[i];
7e4597d7 1902#endif
bdfb460e 1903 col += qemu_log(" " TARGET_FMT_lx, a);
eeacee4d 1904 }
7e4597d7 1905 } else if (c == INDEX_op_call) {
c896fe29 1906 /* variable number of arguments */
cd9090aa
RH
1907 nb_oargs = TCGOP_CALLO(op);
1908 nb_iargs = TCGOP_CALLI(op);
c896fe29 1909 nb_cargs = def->nb_cargs;
c896fe29 1910
cf066674 1911 /* function name, flags, out args */
bdfb460e 1912 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
efee3746
RH
1913 tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1914 op->args[nb_oargs + nb_iargs + 1], nb_oargs);
cf066674 1915 for (i = 0; i < nb_oargs; i++) {
43439139
RH
1916 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1917 op->args[i]));
b03cce8e 1918 }
cf066674 1919 for (i = 0; i < nb_iargs; i++) {
efee3746 1920 TCGArg arg = op->args[nb_oargs + i];
cf066674
RH
1921 const char *t = "<dummy>";
1922 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 1923 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
eeacee4d 1924 }
bdfb460e 1925 col += qemu_log(",%s", t);
e8996ee0 1926 }
b03cce8e 1927 } else {
bdfb460e 1928 col += qemu_log(" %s ", def->name);
c45cb8bb
RH
1929
1930 nb_oargs = def->nb_oargs;
1931 nb_iargs = def->nb_iargs;
1932 nb_cargs = def->nb_cargs;
1933
d2fd745f
RH
1934 if (def->flags & TCG_OPF_VECTOR) {
1935 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1936 8 << TCGOP_VECE(op));
1937 }
1938
b03cce8e 1939 k = 0;
c45cb8bb 1940 for (i = 0; i < nb_oargs; i++) {
eeacee4d 1941 if (k != 0) {
bdfb460e 1942 col += qemu_log(",");
eeacee4d 1943 }
43439139
RH
1944 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1945 op->args[k++]));
b03cce8e 1946 }
c45cb8bb 1947 for (i = 0; i < nb_iargs; i++) {
eeacee4d 1948 if (k != 0) {
bdfb460e 1949 col += qemu_log(",");
eeacee4d 1950 }
43439139
RH
1951 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1952 op->args[k++]));
b03cce8e 1953 }
be210acb
RH
1954 switch (c) {
1955 case INDEX_op_brcond_i32:
be210acb 1956 case INDEX_op_setcond_i32:
ffc5ea09 1957 case INDEX_op_movcond_i32:
ffc5ea09 1958 case INDEX_op_brcond2_i32:
be210acb 1959 case INDEX_op_setcond2_i32:
ffc5ea09 1960 case INDEX_op_brcond_i64:
be210acb 1961 case INDEX_op_setcond_i64:
ffc5ea09 1962 case INDEX_op_movcond_i64:
212be173 1963 case INDEX_op_cmp_vec:
efee3746
RH
1964 if (op->args[k] < ARRAY_SIZE(cond_name)
1965 && cond_name[op->args[k]]) {
1966 col += qemu_log(",%s", cond_name[op->args[k++]]);
eeacee4d 1967 } else {
efee3746 1968 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 1969 }
f48f3ede 1970 i = 1;
be210acb 1971 break;
f713d6ad
RH
1972 case INDEX_op_qemu_ld_i32:
1973 case INDEX_op_qemu_st_i32:
1974 case INDEX_op_qemu_ld_i64:
1975 case INDEX_op_qemu_st_i64:
59227d5d 1976 {
efee3746 1977 TCGMemOpIdx oi = op->args[k++];
59227d5d
RH
1978 TCGMemOp op = get_memop(oi);
1979 unsigned ix = get_mmuidx(oi);
1980
59c4b7e8 1981 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
bdfb460e 1982 col += qemu_log(",$0x%x,%u", op, ix);
59c4b7e8 1983 } else {
1f00b27f
SS
1984 const char *s_al, *s_op;
1985 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 1986 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
bdfb460e 1987 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
1988 }
1989 i = 1;
f713d6ad 1990 }
f713d6ad 1991 break;
be210acb 1992 default:
f48f3ede 1993 i = 0;
be210acb
RH
1994 break;
1995 }
51e3972c
RH
1996 switch (c) {
1997 case INDEX_op_set_label:
1998 case INDEX_op_br:
1999 case INDEX_op_brcond_i32:
2000 case INDEX_op_brcond_i64:
2001 case INDEX_op_brcond2_i32:
efee3746
RH
2002 col += qemu_log("%s$L%d", k ? "," : "",
2003 arg_label(op->args[k])->id);
51e3972c
RH
2004 i++, k++;
2005 break;
2006 default:
2007 break;
2008 }
2009 for (; i < nb_cargs; i++, k++) {
efee3746 2010 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
bdfb460e
RH
2011 }
2012 }
2013 if (op->life) {
2014 unsigned life = op->life;
2015
2016 for (; col < 48; ++col) {
2017 putc(' ', qemu_logfile);
2018 }
2019
2020 if (life & (SYNC_ARG * 3)) {
2021 qemu_log(" sync:");
2022 for (i = 0; i < 2; ++i) {
2023 if (life & (SYNC_ARG << i)) {
2024 qemu_log(" %d", i);
2025 }
2026 }
2027 }
2028 life /= DEAD_ARG;
2029 if (life) {
2030 qemu_log(" dead:");
2031 for (i = 0; life; ++i, life >>= 1) {
2032 if (life & 1) {
2033 qemu_log(" %d", i);
2034 }
2035 }
b03cce8e 2036 }
c896fe29 2037 }
eeacee4d 2038 qemu_log("\n");
c896fe29
FB
2039 }
2040}
2041
2042/* we give more priority to constraints with less registers */
2043static int get_constraint_priority(const TCGOpDef *def, int k)
2044{
2045 const TCGArgConstraint *arg_ct;
2046
2047 int i, n;
2048 arg_ct = &def->args_ct[k];
2049 if (arg_ct->ct & TCG_CT_ALIAS) {
2050 /* an alias is equivalent to a single register */
2051 n = 1;
2052 } else {
2053 if (!(arg_ct->ct & TCG_CT_REG))
2054 return 0;
2055 n = 0;
2056 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2057 if (tcg_regset_test_reg(arg_ct->u.regs, i))
2058 n++;
2059 }
2060 }
2061 return TCG_TARGET_NB_REGS - n + 1;
2062}
2063
2064/* sort from highest priority to lowest */
2065static void sort_constraints(TCGOpDef *def, int start, int n)
2066{
2067 int i, j, p1, p2, tmp;
2068
2069 for(i = 0; i < n; i++)
2070 def->sorted_args[start + i] = start + i;
2071 if (n <= 1)
2072 return;
2073 for(i = 0; i < n - 1; i++) {
2074 for(j = i + 1; j < n; j++) {
2075 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
2076 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
2077 if (p1 < p2) {
2078 tmp = def->sorted_args[start + i];
2079 def->sorted_args[start + i] = def->sorted_args[start + j];
2080 def->sorted_args[start + j] = tmp;
2081 }
2082 }
2083 }
2084}
2085
f69d277e 2086static void process_op_defs(TCGContext *s)
c896fe29 2087{
a9751609 2088 TCGOpcode op;
c896fe29 2089
f69d277e
RH
2090 for (op = 0; op < NB_OPS; op++) {
2091 TCGOpDef *def = &tcg_op_defs[op];
2092 const TCGTargetOpDef *tdefs;
069ea736
RH
2093 TCGType type;
2094 int i, nb_args;
f69d277e
RH
2095
2096 if (def->flags & TCG_OPF_NOT_PRESENT) {
2097 continue;
2098 }
2099
c896fe29 2100 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
2101 if (nb_args == 0) {
2102 continue;
2103 }
2104
2105 tdefs = tcg_target_op_def(op);
2106 /* Missing TCGTargetOpDef entry. */
2107 tcg_debug_assert(tdefs != NULL);
2108
069ea736 2109 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
f69d277e
RH
2110 for (i = 0; i < nb_args; i++) {
2111 const char *ct_str = tdefs->args_ct_str[i];
2112 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 2113 tcg_debug_assert(ct_str != NULL);
f69d277e 2114
ccb1bb66 2115 def->args_ct[i].u.regs = 0;
c896fe29 2116 def->args_ct[i].ct = 0;
17280ff4
RH
2117 while (*ct_str != '\0') {
2118 switch(*ct_str) {
2119 case '0' ... '9':
2120 {
2121 int oarg = *ct_str - '0';
2122 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
2123 tcg_debug_assert(oarg < def->nb_oargs);
2124 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
2125 /* TCG_CT_ALIAS is for the output arguments.
2126 The input is tagged with TCG_CT_IALIAS. */
2127 def->args_ct[i] = def->args_ct[oarg];
2128 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
2129 def->args_ct[oarg].alias_index = i;
2130 def->args_ct[i].ct |= TCG_CT_IALIAS;
2131 def->args_ct[i].alias_index = oarg;
c896fe29 2132 }
17280ff4
RH
2133 ct_str++;
2134 break;
2135 case '&':
2136 def->args_ct[i].ct |= TCG_CT_NEWREG;
2137 ct_str++;
2138 break;
2139 case 'i':
2140 def->args_ct[i].ct |= TCG_CT_CONST;
2141 ct_str++;
2142 break;
2143 default:
2144 ct_str = target_parse_constraint(&def->args_ct[i],
2145 ct_str, type);
2146 /* Typo in TCGTargetOpDef constraint. */
2147 tcg_debug_assert(ct_str != NULL);
c896fe29
FB
2148 }
2149 }
2150 }
2151
c68aaa18 2152 /* TCGTargetOpDef entry with too much information? */
eabb7b91 2153 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 2154
c896fe29
FB
2155 /* sort the constraints (XXX: this is just an heuristic) */
2156 sort_constraints(def, 0, def->nb_oargs);
2157 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 2158 }
c896fe29
FB
2159}
2160
0c627cdc
RH
2161void tcg_op_remove(TCGContext *s, TCGOp *op)
2162{
15fa08f8
RH
2163 QTAILQ_REMOVE(&s->ops, op, link);
2164 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
abebf925 2165 s->nb_ops--;
0c627cdc
RH
2166
2167#ifdef CONFIG_PROFILER
c3fac113 2168 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
2169#endif
2170}
2171
15fa08f8 2172static TCGOp *tcg_op_alloc(TCGOpcode opc)
5a18407f 2173{
15fa08f8
RH
2174 TCGContext *s = tcg_ctx;
2175 TCGOp *op;
5a18407f 2176
15fa08f8
RH
2177 if (likely(QTAILQ_EMPTY(&s->free_ops))) {
2178 op = tcg_malloc(sizeof(TCGOp));
2179 } else {
2180 op = QTAILQ_FIRST(&s->free_ops);
2181 QTAILQ_REMOVE(&s->free_ops, op, link);
2182 }
2183 memset(op, 0, offsetof(TCGOp, link));
2184 op->opc = opc;
abebf925 2185 s->nb_ops++;
5a18407f 2186
15fa08f8
RH
2187 return op;
2188}
2189
2190TCGOp *tcg_emit_op(TCGOpcode opc)
2191{
2192 TCGOp *op = tcg_op_alloc(opc);
2193 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2194 return op;
2195}
5a18407f 2196
15fa08f8
RH
2197TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2198 TCGOpcode opc, int nargs)
2199{
2200 TCGOp *new_op = tcg_op_alloc(opc);
2201 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2202 return new_op;
2203}
2204
2205TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2206 TCGOpcode opc, int nargs)
2207{
15fa08f8
RH
2208 TCGOp *new_op = tcg_op_alloc(opc);
2209 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2210 return new_op;
2211}
2212
c70fbf0a
RH
2213#define TS_DEAD 1
2214#define TS_MEM 2
2215
5a18407f
RH
2216#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2217#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2218
9c43b68d
AJ
2219/* liveness analysis: end of function: all temps are dead, and globals
2220 should be in memory. */
b83eabea 2221static void tcg_la_func_end(TCGContext *s)
c896fe29 2222{
b83eabea
RH
2223 int ng = s->nb_globals;
2224 int nt = s->nb_temps;
2225 int i;
2226
2227 for (i = 0; i < ng; ++i) {
2228 s->temps[i].state = TS_DEAD | TS_MEM;
2229 }
2230 for (i = ng; i < nt; ++i) {
2231 s->temps[i].state = TS_DEAD;
2232 }
c896fe29
FB
2233}
2234
9c43b68d
AJ
2235/* liveness analysis: end of basic block: all temps are dead, globals
2236 and local temps should be in memory. */
b83eabea 2237static void tcg_la_bb_end(TCGContext *s)
641d5fbe 2238{
b83eabea
RH
2239 int ng = s->nb_globals;
2240 int nt = s->nb_temps;
2241 int i;
641d5fbe 2242
b83eabea
RH
2243 for (i = 0; i < ng; ++i) {
2244 s->temps[i].state = TS_DEAD | TS_MEM;
2245 }
2246 for (i = ng; i < nt; ++i) {
2247 s->temps[i].state = (s->temps[i].temp_local
2248 ? TS_DEAD | TS_MEM
2249 : TS_DEAD);
641d5fbe
FB
2250 }
2251}
2252
a1b3c48d 2253/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2254 given input arguments is dead. Instructions updating dead
2255 temporaries are removed. */
b83eabea 2256static void liveness_pass_1(TCGContext *s)
c896fe29 2257{
c70fbf0a 2258 int nb_globals = s->nb_globals;
15fa08f8 2259 TCGOp *op, *op_prev;
a1b3c48d 2260
b83eabea 2261 tcg_la_func_end(s);
c896fe29 2262
15fa08f8 2263 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
c45cb8bb
RH
2264 int i, nb_iargs, nb_oargs;
2265 TCGOpcode opc_new, opc_new2;
2266 bool have_opc_new2;
a1b3c48d 2267 TCGLifeData arg_life = 0;
b83eabea 2268 TCGTemp *arg_ts;
c45cb8bb
RH
2269 TCGOpcode opc = op->opc;
2270 const TCGOpDef *def = &tcg_op_defs[opc];
2271
c45cb8bb 2272 switch (opc) {
c896fe29 2273 case INDEX_op_call:
c6e113f5
FB
2274 {
2275 int call_flags;
c896fe29 2276
cd9090aa
RH
2277 nb_oargs = TCGOP_CALLO(op);
2278 nb_iargs = TCGOP_CALLI(op);
efee3746 2279 call_flags = op->args[nb_oargs + nb_iargs + 1];
c6e113f5 2280
c45cb8bb 2281 /* pure functions can be removed if their result is unused */
78505279 2282 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2283 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2284 arg_ts = arg_temp(op->args[i]);
2285 if (arg_ts->state != TS_DEAD) {
c6e113f5 2286 goto do_not_remove_call;
9c43b68d 2287 }
c6e113f5 2288 }
c45cb8bb 2289 goto do_remove;
c6e113f5
FB
2290 } else {
2291 do_not_remove_call:
c896fe29 2292
c6e113f5 2293 /* output args are dead */
cf066674 2294 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2295 arg_ts = arg_temp(op->args[i]);
2296 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2297 arg_life |= DEAD_ARG << i;
6b64b624 2298 }
b83eabea 2299 if (arg_ts->state & TS_MEM) {
a1b3c48d 2300 arg_life |= SYNC_ARG << i;
9c43b68d 2301 }
b83eabea 2302 arg_ts->state = TS_DEAD;
c6e113f5 2303 }
78505279 2304
78505279
AJ
2305 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2306 TCG_CALL_NO_READ_GLOBALS))) {
9c43b68d 2307 /* globals should go back to memory */
b83eabea
RH
2308 for (i = 0; i < nb_globals; i++) {
2309 s->temps[i].state = TS_DEAD | TS_MEM;
2310 }
c70fbf0a
RH
2311 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2312 /* globals should be synced to memory */
2313 for (i = 0; i < nb_globals; i++) {
b83eabea 2314 s->temps[i].state |= TS_MEM;
c70fbf0a 2315 }
b9c18f56
AJ
2316 }
2317
c19f47bf 2318 /* record arguments that die in this helper */
cf066674 2319 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2320 arg_ts = arg_temp(op->args[i]);
2321 if (arg_ts && arg_ts->state & TS_DEAD) {
2322 arg_life |= DEAD_ARG << i;
c6e113f5 2323 }
c6e113f5 2324 }
67cc32eb 2325 /* input arguments are live for preceding opcodes */
c70fbf0a 2326 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2327 arg_ts = arg_temp(op->args[i]);
2328 if (arg_ts) {
2329 arg_ts->state &= ~TS_DEAD;
c70fbf0a 2330 }
c19f47bf 2331 }
c896fe29 2332 }
c896fe29 2333 }
c896fe29 2334 break;
765b842a 2335 case INDEX_op_insn_start:
c896fe29 2336 break;
5ff9d6a4 2337 case INDEX_op_discard:
5ff9d6a4 2338 /* mark the temporary as dead */
b83eabea 2339 arg_temp(op->args[0])->state = TS_DEAD;
5ff9d6a4 2340 break;
1305c451
RH
2341
2342 case INDEX_op_add2_i32:
c45cb8bb 2343 opc_new = INDEX_op_add_i32;
f1fae40c 2344 goto do_addsub2;
1305c451 2345 case INDEX_op_sub2_i32:
c45cb8bb 2346 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2347 goto do_addsub2;
2348 case INDEX_op_add2_i64:
c45cb8bb 2349 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2350 goto do_addsub2;
2351 case INDEX_op_sub2_i64:
c45cb8bb 2352 opc_new = INDEX_op_sub_i64;
f1fae40c 2353 do_addsub2:
1305c451
RH
2354 nb_iargs = 4;
2355 nb_oargs = 2;
2356 /* Test if the high part of the operation is dead, but not
2357 the low part. The result can be optimized to a simple
2358 add or sub. This happens often for x86_64 guest when the
2359 cpu mode is set to 32 bit. */
b83eabea
RH
2360 if (arg_temp(op->args[1])->state == TS_DEAD) {
2361 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2362 goto do_remove;
2363 }
c45cb8bb
RH
2364 /* Replace the opcode and adjust the args in place,
2365 leaving 3 unused args at the end. */
2366 op->opc = opc = opc_new;
efee3746
RH
2367 op->args[1] = op->args[2];
2368 op->args[2] = op->args[4];
1305c451
RH
2369 /* Fall through and mark the single-word operation live. */
2370 nb_iargs = 2;
2371 nb_oargs = 1;
2372 }
2373 goto do_not_remove;
2374
1414968a 2375 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2376 opc_new = INDEX_op_mul_i32;
2377 opc_new2 = INDEX_op_muluh_i32;
2378 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2379 goto do_mul2;
f1fae40c 2380 case INDEX_op_muls2_i32:
c45cb8bb
RH
2381 opc_new = INDEX_op_mul_i32;
2382 opc_new2 = INDEX_op_mulsh_i32;
2383 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2384 goto do_mul2;
2385 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2386 opc_new = INDEX_op_mul_i64;
2387 opc_new2 = INDEX_op_muluh_i64;
2388 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2389 goto do_mul2;
f1fae40c 2390 case INDEX_op_muls2_i64:
c45cb8bb
RH
2391 opc_new = INDEX_op_mul_i64;
2392 opc_new2 = INDEX_op_mulsh_i64;
2393 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2394 goto do_mul2;
f1fae40c 2395 do_mul2:
1414968a
RH
2396 nb_iargs = 2;
2397 nb_oargs = 2;
b83eabea
RH
2398 if (arg_temp(op->args[1])->state == TS_DEAD) {
2399 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2400 /* Both parts of the operation are dead. */
1414968a
RH
2401 goto do_remove;
2402 }
03271524 2403 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2404 op->opc = opc = opc_new;
efee3746
RH
2405 op->args[1] = op->args[2];
2406 op->args[2] = op->args[3];
b83eabea 2407 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2408 /* The low part of the operation is dead; generate the high. */
2409 op->opc = opc = opc_new2;
efee3746
RH
2410 op->args[0] = op->args[1];
2411 op->args[1] = op->args[2];
2412 op->args[2] = op->args[3];
03271524
RH
2413 } else {
2414 goto do_not_remove;
1414968a 2415 }
03271524
RH
2416 /* Mark the single-word operation live. */
2417 nb_oargs = 1;
1414968a
RH
2418 goto do_not_remove;
2419
c896fe29 2420 default:
1305c451 2421 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2422 nb_iargs = def->nb_iargs;
2423 nb_oargs = def->nb_oargs;
c896fe29 2424
49516bc0
AJ
2425 /* Test if the operation can be removed because all
2426 its outputs are dead. We assume that nb_oargs == 0
2427 implies side effects */
2428 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2429 for (i = 0; i < nb_oargs; i++) {
b83eabea 2430 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2431 goto do_not_remove;
9c43b68d 2432 }
49516bc0 2433 }
1305c451 2434 do_remove:
0c627cdc 2435 tcg_op_remove(s, op);
49516bc0
AJ
2436 } else {
2437 do_not_remove:
49516bc0 2438 /* output args are dead */
c45cb8bb 2439 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2440 arg_ts = arg_temp(op->args[i]);
2441 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2442 arg_life |= DEAD_ARG << i;
6b64b624 2443 }
b83eabea 2444 if (arg_ts->state & TS_MEM) {
a1b3c48d 2445 arg_life |= SYNC_ARG << i;
9c43b68d 2446 }
b83eabea 2447 arg_ts->state = TS_DEAD;
49516bc0
AJ
2448 }
2449
2450 /* if end of basic block, update */
2451 if (def->flags & TCG_OPF_BB_END) {
b83eabea 2452 tcg_la_bb_end(s);
3d5c5f87
AJ
2453 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2454 /* globals should be synced to memory */
c70fbf0a 2455 for (i = 0; i < nb_globals; i++) {
b83eabea 2456 s->temps[i].state |= TS_MEM;
c70fbf0a 2457 }
49516bc0
AJ
2458 }
2459
c19f47bf 2460 /* record arguments that die in this opcode */
c45cb8bb 2461 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
b83eabea
RH
2462 arg_ts = arg_temp(op->args[i]);
2463 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2464 arg_life |= DEAD_ARG << i;
c896fe29 2465 }
c19f47bf 2466 }
67cc32eb 2467 /* input arguments are live for preceding opcodes */
c19f47bf 2468 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
b83eabea 2469 arg_temp(op->args[i])->state &= ~TS_DEAD;
c896fe29 2470 }
c896fe29
FB
2471 }
2472 break;
2473 }
bee158cb 2474 op->life = arg_life;
1ff0a2c5 2475 }
c896fe29 2476}
c896fe29 2477
5a18407f 2478/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 2479static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
2480{
2481 int nb_globals = s->nb_globals;
15fa08f8 2482 int nb_temps, i;
5a18407f 2483 bool changes = false;
15fa08f8 2484 TCGOp *op, *op_next;
5a18407f 2485
5a18407f
RH
2486 /* Create a temporary for each indirect global. */
2487 for (i = 0; i < nb_globals; ++i) {
2488 TCGTemp *its = &s->temps[i];
2489 if (its->indirect_reg) {
2490 TCGTemp *dts = tcg_temp_alloc(s);
2491 dts->type = its->type;
2492 dts->base_type = its->base_type;
b83eabea
RH
2493 its->state_ptr = dts;
2494 } else {
2495 its->state_ptr = NULL;
5a18407f 2496 }
b83eabea
RH
2497 /* All globals begin dead. */
2498 its->state = TS_DEAD;
2499 }
2500 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2501 TCGTemp *its = &s->temps[i];
2502 its->state_ptr = NULL;
2503 its->state = TS_DEAD;
5a18407f 2504 }
5a18407f 2505
15fa08f8 2506 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
2507 TCGOpcode opc = op->opc;
2508 const TCGOpDef *def = &tcg_op_defs[opc];
2509 TCGLifeData arg_life = op->life;
2510 int nb_iargs, nb_oargs, call_flags;
b83eabea 2511 TCGTemp *arg_ts, *dir_ts;
5a18407f 2512
5a18407f 2513 if (opc == INDEX_op_call) {
cd9090aa
RH
2514 nb_oargs = TCGOP_CALLO(op);
2515 nb_iargs = TCGOP_CALLI(op);
efee3746 2516 call_flags = op->args[nb_oargs + nb_iargs + 1];
5a18407f
RH
2517 } else {
2518 nb_iargs = def->nb_iargs;
2519 nb_oargs = def->nb_oargs;
2520
2521 /* Set flags similar to how calls require. */
2522 if (def->flags & TCG_OPF_BB_END) {
2523 /* Like writing globals: save_globals */
2524 call_flags = 0;
2525 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2526 /* Like reading globals: sync_globals */
2527 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2528 } else {
2529 /* No effect on globals. */
2530 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2531 TCG_CALL_NO_WRITE_GLOBALS);
2532 }
2533 }
2534
2535 /* Make sure that input arguments are available. */
2536 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2537 arg_ts = arg_temp(op->args[i]);
2538 if (arg_ts) {
2539 dir_ts = arg_ts->state_ptr;
2540 if (dir_ts && arg_ts->state == TS_DEAD) {
2541 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2542 ? INDEX_op_ld_i32
2543 : INDEX_op_ld_i64);
2544 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
5a18407f 2545
b83eabea
RH
2546 lop->args[0] = temp_arg(dir_ts);
2547 lop->args[1] = temp_arg(arg_ts->mem_base);
2548 lop->args[2] = arg_ts->mem_offset;
5a18407f
RH
2549
2550 /* Loaded, but synced with memory. */
b83eabea 2551 arg_ts->state = TS_MEM;
5a18407f
RH
2552 }
2553 }
2554 }
2555
2556 /* Perform input replacement, and mark inputs that became dead.
2557 No action is required except keeping temp_state up to date
2558 so that we reload when needed. */
2559 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2560 arg_ts = arg_temp(op->args[i]);
2561 if (arg_ts) {
2562 dir_ts = arg_ts->state_ptr;
2563 if (dir_ts) {
2564 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2565 changes = true;
2566 if (IS_DEAD_ARG(i)) {
b83eabea 2567 arg_ts->state = TS_DEAD;
5a18407f
RH
2568 }
2569 }
2570 }
2571 }
2572
2573 /* Liveness analysis should ensure that the following are
2574 all correct, for call sites and basic block end points. */
2575 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2576 /* Nothing to do */
2577 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2578 for (i = 0; i < nb_globals; ++i) {
2579 /* Liveness should see that globals are synced back,
2580 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
2581 arg_ts = &s->temps[i];
2582 tcg_debug_assert(arg_ts->state_ptr == 0
2583 || arg_ts->state != 0);
5a18407f
RH
2584 }
2585 } else {
2586 for (i = 0; i < nb_globals; ++i) {
2587 /* Liveness should see that globals are saved back,
2588 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
2589 arg_ts = &s->temps[i];
2590 tcg_debug_assert(arg_ts->state_ptr == 0
2591 || arg_ts->state == TS_DEAD);
5a18407f
RH
2592 }
2593 }
2594
2595 /* Outputs become available. */
2596 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2597 arg_ts = arg_temp(op->args[i]);
2598 dir_ts = arg_ts->state_ptr;
2599 if (!dir_ts) {
5a18407f
RH
2600 continue;
2601 }
b83eabea 2602 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2603 changes = true;
2604
2605 /* The output is now live and modified. */
b83eabea 2606 arg_ts->state = 0;
5a18407f
RH
2607
2608 /* Sync outputs upon their last write. */
2609 if (NEED_SYNC_ARG(i)) {
b83eabea 2610 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2611 ? INDEX_op_st_i32
2612 : INDEX_op_st_i64);
2613 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
5a18407f 2614
b83eabea
RH
2615 sop->args[0] = temp_arg(dir_ts);
2616 sop->args[1] = temp_arg(arg_ts->mem_base);
2617 sop->args[2] = arg_ts->mem_offset;
5a18407f 2618
b83eabea 2619 arg_ts->state = TS_MEM;
5a18407f
RH
2620 }
2621 /* Drop outputs that are dead. */
2622 if (IS_DEAD_ARG(i)) {
b83eabea 2623 arg_ts->state = TS_DEAD;
5a18407f
RH
2624 }
2625 }
2626 }
2627
2628 return changes;
2629}
2630
8d8fdbae 2631#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
2632static void dump_regs(TCGContext *s)
2633{
2634 TCGTemp *ts;
2635 int i;
2636 char buf[64];
2637
2638 for(i = 0; i < s->nb_temps; i++) {
2639 ts = &s->temps[i];
43439139 2640 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
c896fe29
FB
2641 switch(ts->val_type) {
2642 case TEMP_VAL_REG:
2643 printf("%s", tcg_target_reg_names[ts->reg]);
2644 break;
2645 case TEMP_VAL_MEM:
b3a62939
RH
2646 printf("%d(%s)", (int)ts->mem_offset,
2647 tcg_target_reg_names[ts->mem_base->reg]);
c896fe29
FB
2648 break;
2649 case TEMP_VAL_CONST:
2650 printf("$0x%" TCG_PRIlx, ts->val);
2651 break;
2652 case TEMP_VAL_DEAD:
2653 printf("D");
2654 break;
2655 default:
2656 printf("???");
2657 break;
2658 }
2659 printf("\n");
2660 }
2661
2662 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
f8b2f202 2663 if (s->reg_to_temp[i] != NULL) {
c896fe29
FB
2664 printf("%s: %s\n",
2665 tcg_target_reg_names[i],
f8b2f202 2666 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
c896fe29
FB
2667 }
2668 }
2669}
2670
2671static void check_regs(TCGContext *s)
2672{
869938ae 2673 int reg;
b6638662 2674 int k;
c896fe29
FB
2675 TCGTemp *ts;
2676 char buf[64];
2677
f8b2f202
RH
2678 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2679 ts = s->reg_to_temp[reg];
2680 if (ts != NULL) {
2681 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
c896fe29
FB
2682 printf("Inconsistency for register %s:\n",
2683 tcg_target_reg_names[reg]);
b03cce8e 2684 goto fail;
c896fe29
FB
2685 }
2686 }
2687 }
f8b2f202 2688 for (k = 0; k < s->nb_temps; k++) {
c896fe29 2689 ts = &s->temps[k];
f8b2f202
RH
2690 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2691 && s->reg_to_temp[ts->reg] != ts) {
2692 printf("Inconsistency for temp %s:\n",
2693 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
b03cce8e 2694 fail:
f8b2f202
RH
2695 printf("reg state:\n");
2696 dump_regs(s);
2697 tcg_abort();
c896fe29
FB
2698 }
2699 }
2700}
2701#endif
2702
2272e4a7 2703static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 2704{
9b9c37c3
RH
2705#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2706 /* Sparc64 stack is accessed with offset of 2047 */
b591dc59
BS
2707 s->current_frame_offset = (s->current_frame_offset +
2708 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2709 ~(sizeof(tcg_target_long) - 1);
f44c9960 2710#endif
b591dc59
BS
2711 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2712 s->frame_end) {
5ff9d6a4 2713 tcg_abort();
b591dc59 2714 }
c896fe29 2715 ts->mem_offset = s->current_frame_offset;
b3a62939 2716 ts->mem_base = s->frame_temp;
c896fe29 2717 ts->mem_allocated = 1;
e2c6d1b4 2718 s->current_frame_offset += sizeof(tcg_target_long);
c896fe29
FB
2719}
2720
b3915dbb
RH
2721static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2722
59d7c14e
RH
2723/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2724 mark it free; otherwise mark it dead. */
2725static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 2726{
59d7c14e
RH
2727 if (ts->fixed_reg) {
2728 return;
2729 }
2730 if (ts->val_type == TEMP_VAL_REG) {
2731 s->reg_to_temp[ts->reg] = NULL;
2732 }
2733 ts->val_type = (free_or_dead < 0
2734 || ts->temp_local
fa477d25 2735 || ts->temp_global
59d7c14e
RH
2736 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2737}
7f6ceedf 2738
59d7c14e
RH
2739/* Mark a temporary as dead. */
2740static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2741{
2742 temp_free_or_dead(s, ts, 1);
2743}
2744
2745/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2746 registers needs to be allocated to store a constant. If 'free_or_dead'
2747 is non-zero, subsequently release the temporary; if it is positive, the
2748 temp is dead; if it is negative, the temp is free. */
2749static void temp_sync(TCGContext *s, TCGTemp *ts,
2750 TCGRegSet allocated_regs, int free_or_dead)
2751{
2752 if (ts->fixed_reg) {
2753 return;
2754 }
2755 if (!ts->mem_coherent) {
7f6ceedf 2756 if (!ts->mem_allocated) {
2272e4a7 2757 temp_allocate_frame(s, ts);
59d7c14e 2758 }
59d7c14e
RH
2759 switch (ts->val_type) {
2760 case TEMP_VAL_CONST:
2761 /* If we're going to free the temp immediately, then we won't
2762 require it later in a register, so attempt to store the
2763 constant to memory directly. */
2764 if (free_or_dead
2765 && tcg_out_sti(s, ts->type, ts->val,
2766 ts->mem_base->reg, ts->mem_offset)) {
2767 break;
2768 }
2769 temp_load(s, ts, tcg_target_available_regs[ts->type],
2770 allocated_regs);
2771 /* fallthrough */
2772
2773 case TEMP_VAL_REG:
2774 tcg_out_st(s, ts->type, ts->reg,
2775 ts->mem_base->reg, ts->mem_offset);
2776 break;
2777
2778 case TEMP_VAL_MEM:
2779 break;
2780
2781 case TEMP_VAL_DEAD:
2782 default:
2783 tcg_abort();
2784 }
2785 ts->mem_coherent = 1;
2786 }
2787 if (free_or_dead) {
2788 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 2789 }
7f6ceedf
AJ
2790}
2791
c896fe29 2792/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 2793static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 2794{
f8b2f202 2795 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 2796 if (ts != NULL) {
59d7c14e 2797 temp_sync(s, ts, allocated_regs, -1);
c896fe29
FB
2798 }
2799}
2800
2801/* Allocate a register belonging to reg1 & ~reg2 */
b3915dbb 2802static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
91478cef 2803 TCGRegSet allocated_regs, bool rev)
c896fe29 2804{
91478cef
RH
2805 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2806 const int *order;
b6638662 2807 TCGReg reg;
c896fe29
FB
2808 TCGRegSet reg_ct;
2809
07ddf036 2810 reg_ct = desired_regs & ~allocated_regs;
91478cef 2811 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29
FB
2812
2813 /* first try free registers */
91478cef
RH
2814 for(i = 0; i < n; i++) {
2815 reg = order[i];
f8b2f202 2816 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
c896fe29
FB
2817 return reg;
2818 }
2819
2820 /* XXX: do better spill choice */
91478cef
RH
2821 for(i = 0; i < n; i++) {
2822 reg = order[i];
c896fe29 2823 if (tcg_regset_test_reg(reg_ct, reg)) {
b3915dbb 2824 tcg_reg_free(s, reg, allocated_regs);
c896fe29
FB
2825 return reg;
2826 }
2827 }
2828
2829 tcg_abort();
2830}
2831
40ae5c62
RH
2832/* Make sure the temporary is in a register. If needed, allocate the register
2833 from DESIRED while avoiding ALLOCATED. */
2834static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2835 TCGRegSet allocated_regs)
2836{
2837 TCGReg reg;
2838
2839 switch (ts->val_type) {
2840 case TEMP_VAL_REG:
2841 return;
2842 case TEMP_VAL_CONST:
91478cef 2843 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2844 tcg_out_movi(s, ts->type, reg, ts->val);
2845 ts->mem_coherent = 0;
2846 break;
2847 case TEMP_VAL_MEM:
91478cef 2848 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2849 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2850 ts->mem_coherent = 1;
2851 break;
2852 case TEMP_VAL_DEAD:
2853 default:
2854 tcg_abort();
2855 }
2856 ts->reg = reg;
2857 ts->val_type = TEMP_VAL_REG;
2858 s->reg_to_temp[reg] = ts;
2859}
2860
59d7c14e
RH
2861/* Save a temporary to memory. 'allocated_regs' is used in case a
2862 temporary registers needs to be allocated to store a constant. */
2863static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 2864{
5a18407f
RH
2865 /* The liveness analysis already ensures that globals are back
2866 in memory. Keep an tcg_debug_assert for safety. */
2867 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1ad80729
AJ
2868}
2869
9814dd27 2870/* save globals to their canonical location and assume they can be
e8996ee0
FB
2871 modified be the following code. 'allocated_regs' is used in case a
2872 temporary registers needs to be allocated to store a constant. */
2873static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 2874{
ac3b8891 2875 int i, n;
c896fe29 2876
ac3b8891 2877 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 2878 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 2879 }
e5097dc8
FB
2880}
2881
3d5c5f87
AJ
2882/* sync globals to their canonical location and assume they can be
2883 read by the following code. 'allocated_regs' is used in case a
2884 temporary registers needs to be allocated to store a constant. */
2885static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2886{
ac3b8891 2887 int i, n;
3d5c5f87 2888
ac3b8891 2889 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 2890 TCGTemp *ts = &s->temps[i];
5a18407f
RH
2891 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2892 || ts->fixed_reg
2893 || ts->mem_coherent);
3d5c5f87
AJ
2894 }
2895}
2896
e5097dc8 2897/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
2898 all globals are stored at their canonical location. */
2899static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 2900{
e5097dc8
FB
2901 int i;
2902
b13eb728
RH
2903 for (i = s->nb_globals; i < s->nb_temps; i++) {
2904 TCGTemp *ts = &s->temps[i];
641d5fbe 2905 if (ts->temp_local) {
b13eb728 2906 temp_save(s, ts, allocated_regs);
641d5fbe 2907 } else {
5a18407f
RH
2908 /* The liveness analysis already ensures that temps are dead.
2909 Keep an tcg_debug_assert for safety. */
2910 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c896fe29
FB
2911 }
2912 }
e8996ee0
FB
2913
2914 save_globals(s, allocated_regs);
c896fe29
FB
2915}
2916
0fe4fca4
PB
2917static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2918 tcg_target_ulong val, TCGLifeData arg_life)
e8996ee0 2919{
e8996ee0 2920 if (ots->fixed_reg) {
59d7c14e 2921 /* For fixed registers, we do not do any constant propagation. */
e8996ee0 2922 tcg_out_movi(s, ots->type, ots->reg, val);
59d7c14e 2923 return;
e8996ee0 2924 }
59d7c14e
RH
2925
2926 /* The movi is not explicitly generated here. */
2927 if (ots->val_type == TEMP_VAL_REG) {
2928 s->reg_to_temp[ots->reg] = NULL;
ec7a869d 2929 }
59d7c14e
RH
2930 ots->val_type = TEMP_VAL_CONST;
2931 ots->val = val;
2932 ots->mem_coherent = 0;
2933 if (NEED_SYNC_ARG(0)) {
2934 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2935 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 2936 temp_dead(s, ots);
4c4e1ab2 2937 }
e8996ee0
FB
2938}
2939
dd186292 2940static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
0fe4fca4 2941{
43439139 2942 TCGTemp *ots = arg_temp(op->args[0]);
dd186292 2943 tcg_target_ulong val = op->args[1];
0fe4fca4 2944
dd186292 2945 tcg_reg_alloc_do_movi(s, ots, val, op->life);
0fe4fca4
PB
2946}
2947
dd186292 2948static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 2949{
dd186292 2950 const TCGLifeData arg_life = op->life;
c29c1d7e 2951 TCGRegSet allocated_regs;
c896fe29 2952 TCGTemp *ts, *ots;
450445d5 2953 TCGType otype, itype;
c896fe29 2954
d21369f5 2955 allocated_regs = s->reserved_regs;
43439139
RH
2956 ots = arg_temp(op->args[0]);
2957 ts = arg_temp(op->args[1]);
450445d5
RH
2958
2959 /* Note that otype != itype for no-op truncation. */
2960 otype = ots->type;
2961 itype = ts->type;
c29c1d7e 2962
0fe4fca4
PB
2963 if (ts->val_type == TEMP_VAL_CONST) {
2964 /* propagate constant or generate sti */
2965 tcg_target_ulong val = ts->val;
2966 if (IS_DEAD_ARG(1)) {
2967 temp_dead(s, ts);
2968 }
2969 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2970 return;
2971 }
2972
2973 /* If the source value is in memory we're going to be forced
2974 to have it in a register in order to perform the copy. Copy
2975 the SOURCE value into its own register first, that way we
2976 don't have to reload SOURCE the next time it is used. */
2977 if (ts->val_type == TEMP_VAL_MEM) {
40ae5c62 2978 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
c29c1d7e 2979 }
c896fe29 2980
0fe4fca4 2981 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
c29c1d7e
AJ
2982 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2983 /* mov to a non-saved dead register makes no sense (even with
2984 liveness analysis disabled). */
eabb7b91 2985 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 2986 if (!ots->mem_allocated) {
2272e4a7 2987 temp_allocate_frame(s, ots);
c29c1d7e 2988 }
b3a62939 2989 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 2990 if (IS_DEAD_ARG(1)) {
f8bf00f1 2991 temp_dead(s, ts);
c29c1d7e 2992 }
f8bf00f1 2993 temp_dead(s, ots);
c29c1d7e 2994 } else {
866cb6cb 2995 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
c896fe29 2996 /* the mov can be suppressed */
c29c1d7e 2997 if (ots->val_type == TEMP_VAL_REG) {
f8b2f202 2998 s->reg_to_temp[ots->reg] = NULL;
c29c1d7e
AJ
2999 }
3000 ots->reg = ts->reg;
f8bf00f1 3001 temp_dead(s, ts);
c896fe29 3002 } else {
c29c1d7e
AJ
3003 if (ots->val_type != TEMP_VAL_REG) {
3004 /* When allocating a new register, make sure to not spill the
3005 input one. */
3006 tcg_regset_set_reg(allocated_regs, ts->reg);
450445d5 3007 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
91478cef 3008 allocated_regs, ots->indirect_base);
c896fe29 3009 }
450445d5 3010 tcg_out_mov(s, otype, ots->reg, ts->reg);
c896fe29 3011 }
c29c1d7e
AJ
3012 ots->val_type = TEMP_VAL_REG;
3013 ots->mem_coherent = 0;
f8b2f202 3014 s->reg_to_temp[ots->reg] = ots;
c29c1d7e 3015 if (NEED_SYNC_ARG(0)) {
59d7c14e 3016 temp_sync(s, ots, allocated_regs, 0);
c896fe29 3017 }
ec7a869d 3018 }
c896fe29
FB
3019}
3020
dd186292 3021static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 3022{
dd186292
RH
3023 const TCGLifeData arg_life = op->life;
3024 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
3025 TCGRegSet i_allocated_regs;
3026 TCGRegSet o_allocated_regs;
b6638662
RH
3027 int i, k, nb_iargs, nb_oargs;
3028 TCGReg reg;
c896fe29
FB
3029 TCGArg arg;
3030 const TCGArgConstraint *arg_ct;
3031 TCGTemp *ts;
3032 TCGArg new_args[TCG_MAX_OP_ARGS];
3033 int const_args[TCG_MAX_OP_ARGS];
3034
3035 nb_oargs = def->nb_oargs;
3036 nb_iargs = def->nb_iargs;
3037
3038 /* copy constants */
3039 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 3040 op->args + nb_oargs + nb_iargs,
c896fe29
FB
3041 sizeof(TCGArg) * def->nb_cargs);
3042
d21369f5
RH
3043 i_allocated_regs = s->reserved_regs;
3044 o_allocated_regs = s->reserved_regs;
82790a87 3045
c896fe29 3046 /* satisfy input constraints */
dd186292 3047 for (k = 0; k < nb_iargs; k++) {
c896fe29 3048 i = def->sorted_args[nb_oargs + k];
dd186292 3049 arg = op->args[i];
c896fe29 3050 arg_ct = &def->args_ct[i];
43439139 3051 ts = arg_temp(arg);
40ae5c62
RH
3052
3053 if (ts->val_type == TEMP_VAL_CONST
3054 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
3055 /* constant is OK for instruction */
3056 const_args[i] = 1;
3057 new_args[i] = ts->val;
3058 goto iarg_end;
c896fe29 3059 }
40ae5c62 3060
82790a87 3061 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
40ae5c62 3062
5ff9d6a4
FB
3063 if (arg_ct->ct & TCG_CT_IALIAS) {
3064 if (ts->fixed_reg) {
3065 /* if fixed register, we must allocate a new register
3066 if the alias is not the same register */
dd186292 3067 if (arg != op->args[arg_ct->alias_index])
5ff9d6a4
FB
3068 goto allocate_in_reg;
3069 } else {
3070 /* if the input is aliased to an output and if it is
3071 not dead after the instruction, we must allocate
3072 a new register and move it */
866cb6cb 3073 if (!IS_DEAD_ARG(i)) {
5ff9d6a4 3074 goto allocate_in_reg;
866cb6cb 3075 }
7e1df267
AJ
3076 /* check if the current register has already been allocated
3077 for another input aliased to an output */
3078 int k2, i2;
3079 for (k2 = 0 ; k2 < k ; k2++) {
3080 i2 = def->sorted_args[nb_oargs + k2];
3081 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
3082 (new_args[i2] == ts->reg)) {
3083 goto allocate_in_reg;
3084 }
3085 }
5ff9d6a4 3086 }
c896fe29
FB
3087 }
3088 reg = ts->reg;
3089 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
3090 /* nothing to do : the constraint is satisfied */
3091 } else {
3092 allocate_in_reg:
3093 /* allocate a new register matching the constraint
3094 and move the temporary register into it */
82790a87 3095 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
91478cef 3096 ts->indirect_base);
3b6dac34 3097 tcg_out_mov(s, ts->type, reg, ts->reg);
c896fe29 3098 }
c896fe29
FB
3099 new_args[i] = reg;
3100 const_args[i] = 0;
82790a87 3101 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29
FB
3102 iarg_end: ;
3103 }
3104
a52ad07e
AJ
3105 /* mark dead temporaries and free the associated registers */
3106 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3107 if (IS_DEAD_ARG(i)) {
43439139 3108 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
3109 }
3110 }
3111
e8996ee0 3112 if (def->flags & TCG_OPF_BB_END) {
82790a87 3113 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 3114 } else {
e8996ee0
FB
3115 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3116 /* XXX: permit generic clobber register list ? */
c8074023
RH
3117 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3118 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 3119 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 3120 }
c896fe29 3121 }
3d5c5f87
AJ
3122 }
3123 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3124 /* sync globals if the op has side effects and might trigger
3125 an exception. */
82790a87 3126 sync_globals(s, i_allocated_regs);
c896fe29 3127 }
e8996ee0
FB
3128
3129 /* satisfy the output constraints */
e8996ee0
FB
3130 for(k = 0; k < nb_oargs; k++) {
3131 i = def->sorted_args[k];
dd186292 3132 arg = op->args[i];
e8996ee0 3133 arg_ct = &def->args_ct[i];
43439139 3134 ts = arg_temp(arg);
17280ff4
RH
3135 if ((arg_ct->ct & TCG_CT_ALIAS)
3136 && !const_args[arg_ct->alias_index]) {
e8996ee0 3137 reg = new_args[arg_ct->alias_index];
82790a87
RH
3138 } else if (arg_ct->ct & TCG_CT_NEWREG) {
3139 reg = tcg_reg_alloc(s, arg_ct->u.regs,
3140 i_allocated_regs | o_allocated_regs,
3141 ts->indirect_base);
e8996ee0
FB
3142 } else {
3143 /* if fixed register, we try to use it */
3144 reg = ts->reg;
3145 if (ts->fixed_reg &&
3146 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
3147 goto oarg_end;
3148 }
82790a87 3149 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
91478cef 3150 ts->indirect_base);
c896fe29 3151 }
82790a87 3152 tcg_regset_set_reg(o_allocated_regs, reg);
e8996ee0
FB
3153 /* if a fixed register is used, then a move will be done afterwards */
3154 if (!ts->fixed_reg) {
ec7a869d 3155 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 3156 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
3157 }
3158 ts->val_type = TEMP_VAL_REG;
3159 ts->reg = reg;
3160 /* temp value is modified, so the value kept in memory is
3161 potentially not the same */
3162 ts->mem_coherent = 0;
f8b2f202 3163 s->reg_to_temp[reg] = ts;
e8996ee0
FB
3164 }
3165 oarg_end:
3166 new_args[i] = reg;
c896fe29 3167 }
c896fe29
FB
3168 }
3169
c896fe29 3170 /* emit instruction */
d2fd745f
RH
3171 if (def->flags & TCG_OPF_VECTOR) {
3172 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
3173 new_args, const_args);
3174 } else {
3175 tcg_out_op(s, op->opc, new_args, const_args);
3176 }
3177
c896fe29
FB
3178 /* move the outputs in the correct register if needed */
3179 for(i = 0; i < nb_oargs; i++) {
43439139 3180 ts = arg_temp(op->args[i]);
c896fe29
FB
3181 reg = new_args[i];
3182 if (ts->fixed_reg && ts->reg != reg) {
3b6dac34 3183 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29 3184 }
ec7a869d 3185 if (NEED_SYNC_ARG(i)) {
82790a87 3186 temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
59d7c14e 3187 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 3188 temp_dead(s, ts);
ec7a869d 3189 }
c896fe29
FB
3190 }
3191}
3192
b03cce8e
FB
3193#ifdef TCG_TARGET_STACK_GROWSUP
3194#define STACK_DIR(x) (-(x))
3195#else
3196#define STACK_DIR(x) (x)
3197#endif
3198
dd186292 3199static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
c896fe29 3200{
cd9090aa
RH
3201 const int nb_oargs = TCGOP_CALLO(op);
3202 const int nb_iargs = TCGOP_CALLI(op);
dd186292 3203 const TCGLifeData arg_life = op->life;
b6638662
RH
3204 int flags, nb_regs, i;
3205 TCGReg reg;
cf066674 3206 TCGArg arg;
c896fe29 3207 TCGTemp *ts;
d3452f1f
RH
3208 intptr_t stack_offset;
3209 size_t call_stack_size;
cf066674
RH
3210 tcg_insn_unit *func_addr;
3211 int allocate_args;
c896fe29 3212 TCGRegSet allocated_regs;
c896fe29 3213
dd186292
RH
3214 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3215 flags = op->args[nb_oargs + nb_iargs + 1];
c896fe29 3216
6e17d0c5 3217 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
c45cb8bb
RH
3218 if (nb_regs > nb_iargs) {
3219 nb_regs = nb_iargs;
cf066674 3220 }
c896fe29
FB
3221
3222 /* assign stack slots first */
c45cb8bb 3223 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
c896fe29
FB
3224 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3225 ~(TCG_TARGET_STACK_ALIGN - 1);
b03cce8e
FB
3226 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3227 if (allocate_args) {
345649c0
BS
3228 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3229 preallocate call stack */
3230 tcg_abort();
b03cce8e 3231 }
39cf05d3
FB
3232
3233 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
dd186292
RH
3234 for (i = nb_regs; i < nb_iargs; i++) {
3235 arg = op->args[nb_oargs + i];
39cf05d3
FB
3236#ifdef TCG_TARGET_STACK_GROWSUP
3237 stack_offset -= sizeof(tcg_target_long);
3238#endif
3239 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3240 ts = arg_temp(arg);
40ae5c62
RH
3241 temp_load(s, ts, tcg_target_available_regs[ts->type],
3242 s->reserved_regs);
3243 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
c896fe29 3244 }
39cf05d3
FB
3245#ifndef TCG_TARGET_STACK_GROWSUP
3246 stack_offset += sizeof(tcg_target_long);
3247#endif
c896fe29
FB
3248 }
3249
3250 /* assign input registers */
d21369f5 3251 allocated_regs = s->reserved_regs;
dd186292
RH
3252 for (i = 0; i < nb_regs; i++) {
3253 arg = op->args[nb_oargs + i];
39cf05d3 3254 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3255 ts = arg_temp(arg);
39cf05d3 3256 reg = tcg_target_call_iarg_regs[i];
b3915dbb 3257 tcg_reg_free(s, reg, allocated_regs);
40ae5c62 3258
39cf05d3
FB
3259 if (ts->val_type == TEMP_VAL_REG) {
3260 if (ts->reg != reg) {
3b6dac34 3261 tcg_out_mov(s, ts->type, reg, ts->reg);
39cf05d3 3262 }
39cf05d3 3263 } else {
ccb1bb66 3264 TCGRegSet arg_set = 0;
40ae5c62 3265
40ae5c62
RH
3266 tcg_regset_set_reg(arg_set, reg);
3267 temp_load(s, ts, arg_set, allocated_regs);
c896fe29 3268 }
40ae5c62 3269
39cf05d3 3270 tcg_regset_set_reg(allocated_regs, reg);
c896fe29 3271 }
c896fe29
FB
3272 }
3273
c896fe29 3274 /* mark dead temporaries and free the associated registers */
dd186292 3275 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 3276 if (IS_DEAD_ARG(i)) {
43439139 3277 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
3278 }
3279 }
3280
3281 /* clobber call registers */
c8074023
RH
3282 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3283 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 3284 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
3285 }
3286 }
78505279
AJ
3287
3288 /* Save globals if they might be written by the helper, sync them if
3289 they might be read. */
3290 if (flags & TCG_CALL_NO_READ_GLOBALS) {
3291 /* Nothing to do */
3292 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3293 sync_globals(s, allocated_regs);
3294 } else {
b9c18f56
AJ
3295 save_globals(s, allocated_regs);
3296 }
c896fe29 3297
cf066674 3298 tcg_out_call(s, func_addr);
c896fe29
FB
3299
3300 /* assign output registers and emit moves if needed */
3301 for(i = 0; i < nb_oargs; i++) {
dd186292 3302 arg = op->args[i];
43439139 3303 ts = arg_temp(arg);
c896fe29 3304 reg = tcg_target_call_oarg_regs[i];
eabb7b91 3305 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
34b1a49c 3306
c896fe29
FB
3307 if (ts->fixed_reg) {
3308 if (ts->reg != reg) {
3b6dac34 3309 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29
FB
3310 }
3311 } else {
ec7a869d 3312 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 3313 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
3314 }
3315 ts->val_type = TEMP_VAL_REG;
3316 ts->reg = reg;
3317 ts->mem_coherent = 0;
f8b2f202 3318 s->reg_to_temp[reg] = ts;
ec7a869d 3319 if (NEED_SYNC_ARG(i)) {
59d7c14e
RH
3320 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
3321 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 3322 temp_dead(s, ts);
8c11ad25 3323 }
c896fe29
FB
3324 }
3325 }
c896fe29
FB
3326}
3327
3328#ifdef CONFIG_PROFILER
3329
c3fac113
EC
3330/* avoid copy/paste errors */
3331#define PROF_ADD(to, from, field) \
3332 do { \
3333 (to)->field += atomic_read(&((from)->field)); \
3334 } while (0)
3335
3336#define PROF_MAX(to, from, field) \
3337 do { \
3338 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3339 if (val__ > (to)->field) { \
3340 (to)->field = val__; \
3341 } \
3342 } while (0)
3343
3344/* Pass in a zero'ed @prof */
3345static inline
3346void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3347{
3468b59e 3348 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
c3fac113
EC
3349 unsigned int i;
3350
3468b59e
EC
3351 for (i = 0; i < n_ctxs; i++) {
3352 TCGContext *s = atomic_read(&tcg_ctxs[i]);
3353 const TCGProfile *orig = &s->prof;
c3fac113
EC
3354
3355 if (counters) {
3356 PROF_ADD(prof, orig, tb_count1);
3357 PROF_ADD(prof, orig, tb_count);
3358 PROF_ADD(prof, orig, op_count);
3359 PROF_MAX(prof, orig, op_count_max);
3360 PROF_ADD(prof, orig, temp_count);
3361 PROF_MAX(prof, orig, temp_count_max);
3362 PROF_ADD(prof, orig, del_op_count);
3363 PROF_ADD(prof, orig, code_in_len);
3364 PROF_ADD(prof, orig, code_out_len);
3365 PROF_ADD(prof, orig, search_out_len);
3366 PROF_ADD(prof, orig, interm_time);
3367 PROF_ADD(prof, orig, code_time);
3368 PROF_ADD(prof, orig, la_time);
3369 PROF_ADD(prof, orig, opt_time);
3370 PROF_ADD(prof, orig, restore_count);
3371 PROF_ADD(prof, orig, restore_time);
3372 }
3373 if (table) {
3374 int i;
3375
3376 for (i = 0; i < NB_OPS; i++) {
3377 PROF_ADD(prof, orig, table_op_count[i]);
3378 }
3379 }
3380 }
3381}
3382
3383#undef PROF_ADD
3384#undef PROF_MAX
3385
3386static void tcg_profile_snapshot_counters(TCGProfile *prof)
3387{
3388 tcg_profile_snapshot(prof, true, false);
3389}
3390
3391static void tcg_profile_snapshot_table(TCGProfile *prof)
3392{
3393 tcg_profile_snapshot(prof, false, true);
3394}
c896fe29 3395
246ae24d 3396void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
c896fe29 3397{
c3fac113 3398 TCGProfile prof = {};
c896fe29 3399 int i;
d70724ce 3400
c3fac113 3401 tcg_profile_snapshot_table(&prof);
15fc7daa 3402 for (i = 0; i < NB_OPS; i++) {
246ae24d 3403 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
c3fac113 3404 prof.table_op_count[i]);
c896fe29 3405 }
c896fe29 3406}
246ae24d
MF
3407#else
3408void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3409{
3410 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3411}
c896fe29
FB
3412#endif
3413
3414
5bd2ec3d 3415int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
c896fe29 3416{
c3fac113
EC
3417#ifdef CONFIG_PROFILER
3418 TCGProfile *prof = &s->prof;
3419#endif
15fa08f8
RH
3420 int i, num_insns;
3421 TCGOp *op;
c896fe29 3422
04fe6400
RH
3423#ifdef CONFIG_PROFILER
3424 {
3425 int n;
3426
15fa08f8
RH
3427 QTAILQ_FOREACH(op, &s->ops, link) {
3428 n++;
3429 }
c3fac113
EC
3430 atomic_set(&prof->op_count, prof->op_count + n);
3431 if (n > prof->op_count_max) {
3432 atomic_set(&prof->op_count_max, n);
04fe6400
RH
3433 }
3434
3435 n = s->nb_temps;
c3fac113
EC
3436 atomic_set(&prof->temp_count, prof->temp_count + n);
3437 if (n > prof->temp_count_max) {
3438 atomic_set(&prof->temp_count_max, n);
04fe6400
RH
3439 }
3440 }
3441#endif
3442
c896fe29 3443#ifdef DEBUG_DISAS
d977e1c2
AB
3444 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3445 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3446 qemu_log_lock();
93fcfe39 3447 qemu_log("OP:\n");
eeacee4d 3448 tcg_dump_ops(s);
93fcfe39 3449 qemu_log("\n");
1ee73216 3450 qemu_log_unlock();
c896fe29
FB
3451 }
3452#endif
3453
c5cc28ff 3454#ifdef CONFIG_PROFILER
c3fac113 3455 atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
3456#endif
3457
8f2e8c07 3458#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 3459 tcg_optimize(s);
8f2e8c07
KB
3460#endif
3461
a23a9ec6 3462#ifdef CONFIG_PROFILER
c3fac113
EC
3463 atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3464 atomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 3465#endif
c5cc28ff 3466
b83eabea 3467 liveness_pass_1(s);
5a18407f 3468
b83eabea 3469 if (s->nb_indirects > 0) {
5a18407f 3470#ifdef DEBUG_DISAS
b83eabea
RH
3471 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3472 && qemu_log_in_addr_range(tb->pc))) {
3473 qemu_log_lock();
3474 qemu_log("OP before indirect lowering:\n");
3475 tcg_dump_ops(s);
3476 qemu_log("\n");
3477 qemu_log_unlock();
3478 }
5a18407f 3479#endif
b83eabea
RH
3480 /* Replace indirect temps with direct temps. */
3481 if (liveness_pass_2(s)) {
3482 /* If changes were made, re-run liveness. */
3483 liveness_pass_1(s);
5a18407f
RH
3484 }
3485 }
c5cc28ff 3486
a23a9ec6 3487#ifdef CONFIG_PROFILER
c3fac113 3488 atomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 3489#endif
c896fe29
FB
3490
3491#ifdef DEBUG_DISAS
d977e1c2
AB
3492 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3493 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3494 qemu_log_lock();
c5cc28ff 3495 qemu_log("OP after optimization and liveness analysis:\n");
eeacee4d 3496 tcg_dump_ops(s);
93fcfe39 3497 qemu_log("\n");
1ee73216 3498 qemu_log_unlock();
c896fe29
FB
3499 }
3500#endif
3501
3502 tcg_reg_alloc_start(s);
3503
e7e168f4
EC
3504 s->code_buf = tb->tc.ptr;
3505 s->code_ptr = tb->tc.ptr;
c896fe29 3506
659ef5cb 3507#ifdef TCG_TARGET_NEED_LDST_LABELS
6001f772 3508 QSIMPLEQ_INIT(&s->ldst_labels);
659ef5cb 3509#endif
57a26946
RH
3510#ifdef TCG_TARGET_NEED_POOL_LABELS
3511 s->pool_labels = NULL;
3512#endif
9ecefc84 3513
fca8a500 3514 num_insns = -1;
15fa08f8 3515 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 3516 TCGOpcode opc = op->opc;
b3db8758 3517
c896fe29 3518#ifdef CONFIG_PROFILER
c3fac113 3519 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 3520#endif
c45cb8bb
RH
3521
3522 switch (opc) {
c896fe29 3523 case INDEX_op_mov_i32:
c896fe29 3524 case INDEX_op_mov_i64:
d2fd745f 3525 case INDEX_op_mov_vec:
dd186292 3526 tcg_reg_alloc_mov(s, op);
c896fe29 3527 break;
e8996ee0 3528 case INDEX_op_movi_i32:
e8996ee0 3529 case INDEX_op_movi_i64:
d2fd745f 3530 case INDEX_op_dupi_vec:
dd186292 3531 tcg_reg_alloc_movi(s, op);
e8996ee0 3532 break;
765b842a 3533 case INDEX_op_insn_start:
fca8a500
RH
3534 if (num_insns >= 0) {
3535 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3536 }
3537 num_insns++;
bad729e2
RH
3538 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3539 target_ulong a;
3540#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 3541 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 3542#else
efee3746 3543 a = op->args[i];
bad729e2 3544#endif
fca8a500 3545 s->gen_insn_data[num_insns][i] = a;
bad729e2 3546 }
c896fe29 3547 break;
5ff9d6a4 3548 case INDEX_op_discard:
43439139 3549 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 3550 break;
c896fe29 3551 case INDEX_op_set_label:
e8996ee0 3552 tcg_reg_alloc_bb_end(s, s->reserved_regs);
efee3746 3553 tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
c896fe29
FB
3554 break;
3555 case INDEX_op_call:
dd186292 3556 tcg_reg_alloc_call(s, op);
c45cb8bb 3557 break;
c896fe29 3558 default:
25c4d9cc 3559 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 3560 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
3561 /* Note: in order to speed up the code, it would be much
3562 faster to have specialized register allocator functions for
3563 some common argument patterns */
dd186292 3564 tcg_reg_alloc_op(s, op);
c896fe29
FB
3565 break;
3566 }
8d8fdbae 3567#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
3568 check_regs(s);
3569#endif
b125f9dc
RH
3570 /* Test for (pending) buffer overflow. The assumption is that any
3571 one operation beginning below the high water mark cannot overrun
3572 the buffer completely. Thus we can test for overflow after
3573 generating code without having to check during generation. */
644da9b3 3574 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
3575 return -1;
3576 }
c896fe29 3577 }
fca8a500
RH
3578 tcg_debug_assert(num_insns >= 0);
3579 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 3580
b76f0d8c 3581 /* Generate TB finalization at the end of block */
659ef5cb
RH
3582#ifdef TCG_TARGET_NEED_LDST_LABELS
3583 if (!tcg_out_ldst_finalize(s)) {
23dceda6
RH
3584 return -1;
3585 }
659ef5cb 3586#endif
57a26946
RH
3587#ifdef TCG_TARGET_NEED_POOL_LABELS
3588 if (!tcg_out_pool_finalize(s)) {
3589 return -1;
3590 }
3591#endif
c896fe29
FB
3592
3593 /* flush instruction cache */
1813e175 3594 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2aeabc08 3595
1813e175 3596 return tcg_current_code_size(s);
c896fe29
FB
3597}
3598
a23a9ec6 3599#ifdef CONFIG_PROFILER
405cf9ff 3600void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6 3601{
c3fac113
EC
3602 TCGProfile prof = {};
3603 const TCGProfile *s;
3604 int64_t tb_count;
3605 int64_t tb_div_count;
3606 int64_t tot;
3607
3608 tcg_profile_snapshot_counters(&prof);
3609 s = &prof;
3610 tb_count = s->tb_count;
3611 tb_div_count = tb_count ? tb_count : 1;
3612 tot = s->interm_time + s->code_time;
a23a9ec6 3613
a23a9ec6
FB
3614 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3615 tot, tot / 2.4e9);
3616 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
fca8a500
RH
3617 tb_count, s->tb_count1 - tb_count,
3618 (double)(s->tb_count1 - s->tb_count)
3619 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
a23a9ec6 3620 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
fca8a500 3621 (double)s->op_count / tb_div_count, s->op_count_max);
a23a9ec6 3622 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
fca8a500 3623 (double)s->del_op_count / tb_div_count);
a23a9ec6 3624 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
fca8a500
RH
3625 (double)s->temp_count / tb_div_count, s->temp_count_max);
3626 cpu_fprintf(f, "avg host code/TB %0.1f\n",
3627 (double)s->code_out_len / tb_div_count);
3628 cpu_fprintf(f, "avg search data/TB %0.1f\n",
3629 (double)s->search_out_len / tb_div_count);
a23a9ec6
FB
3630
3631 cpu_fprintf(f, "cycles/op %0.1f\n",
3632 s->op_count ? (double)tot / s->op_count : 0);
3633 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3634 s->code_in_len ? (double)tot / s->code_in_len : 0);
3635 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3636 s->code_out_len ? (double)tot / s->code_out_len : 0);
fca8a500
RH
3637 cpu_fprintf(f, "cycles/search byte %0.1f\n",
3638 s->search_out_len ? (double)tot / s->search_out_len : 0);
3639 if (tot == 0) {
a23a9ec6 3640 tot = 1;
fca8a500 3641 }
a23a9ec6
FB
3642 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3643 (double)s->interm_time / tot * 100.0);
3644 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3645 (double)s->code_time / tot * 100.0);
c5cc28ff
AJ
3646 cpu_fprintf(f, "optim./code time %0.1f%%\n",
3647 (double)s->opt_time / (s->code_time ? s->code_time : 1)
3648 * 100.0);
a23a9ec6
FB
3649 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
3650 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
3651 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3652 s->restore_count);
3653 cpu_fprintf(f, " avg cycles %0.1f\n",
3654 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
3655}
3656#else
405cf9ff 3657void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6 3658{
24bf7b3a 3659 cpu_fprintf(f, "[TCG profiler not compiled]\n");
a23a9ec6
FB
3660}
3661#endif
813da627
RH
3662
3663#ifdef ELF_HOST_MACHINE
5872bbf2
RH
3664/* In order to use this feature, the backend needs to do three things:
3665
3666 (1) Define ELF_HOST_MACHINE to indicate both what value to
3667 put into the ELF image and to indicate support for the feature.
3668
3669 (2) Define tcg_register_jit. This should create a buffer containing
3670 the contents of a .debug_frame section that describes the post-
3671 prologue unwind info for the tcg machine.
3672
3673 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3674*/
813da627
RH
3675
3676/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3677typedef enum {
3678 JIT_NOACTION = 0,
3679 JIT_REGISTER_FN,
3680 JIT_UNREGISTER_FN
3681} jit_actions_t;
3682
3683struct jit_code_entry {
3684 struct jit_code_entry *next_entry;
3685 struct jit_code_entry *prev_entry;
3686 const void *symfile_addr;
3687 uint64_t symfile_size;
3688};
3689
3690struct jit_descriptor {
3691 uint32_t version;
3692 uint32_t action_flag;
3693 struct jit_code_entry *relevant_entry;
3694 struct jit_code_entry *first_entry;
3695};
3696
3697void __jit_debug_register_code(void) __attribute__((noinline));
3698void __jit_debug_register_code(void)
3699{
3700 asm("");
3701}
3702
3703/* Must statically initialize the version, because GDB may check
3704 the version before we can set it. */
3705struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3706
3707/* End GDB interface. */
3708
3709static int find_string(const char *strtab, const char *str)
3710{
3711 const char *p = strtab + 1;
3712
3713 while (1) {
3714 if (strcmp(p, str) == 0) {
3715 return p - strtab;
3716 }
3717 p += strlen(p) + 1;
3718 }
3719}
3720
5872bbf2 3721static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2c90784a
RH
3722 const void *debug_frame,
3723 size_t debug_frame_size)
813da627 3724{
5872bbf2
RH
3725 struct __attribute__((packed)) DebugInfo {
3726 uint32_t len;
3727 uint16_t version;
3728 uint32_t abbrev;
3729 uint8_t ptr_size;
3730 uint8_t cu_die;
3731 uint16_t cu_lang;
3732 uintptr_t cu_low_pc;
3733 uintptr_t cu_high_pc;
3734 uint8_t fn_die;
3735 char fn_name[16];
3736 uintptr_t fn_low_pc;
3737 uintptr_t fn_high_pc;
3738 uint8_t cu_eoc;
3739 };
813da627
RH
3740
3741 struct ElfImage {
3742 ElfW(Ehdr) ehdr;
3743 ElfW(Phdr) phdr;
5872bbf2
RH
3744 ElfW(Shdr) shdr[7];
3745 ElfW(Sym) sym[2];
3746 struct DebugInfo di;
3747 uint8_t da[24];
3748 char str[80];
3749 };
3750
3751 struct ElfImage *img;
3752
3753 static const struct ElfImage img_template = {
3754 .ehdr = {
3755 .e_ident[EI_MAG0] = ELFMAG0,
3756 .e_ident[EI_MAG1] = ELFMAG1,
3757 .e_ident[EI_MAG2] = ELFMAG2,
3758 .e_ident[EI_MAG3] = ELFMAG3,
3759 .e_ident[EI_CLASS] = ELF_CLASS,
3760 .e_ident[EI_DATA] = ELF_DATA,
3761 .e_ident[EI_VERSION] = EV_CURRENT,
3762 .e_type = ET_EXEC,
3763 .e_machine = ELF_HOST_MACHINE,
3764 .e_version = EV_CURRENT,
3765 .e_phoff = offsetof(struct ElfImage, phdr),
3766 .e_shoff = offsetof(struct ElfImage, shdr),
3767 .e_ehsize = sizeof(ElfW(Shdr)),
3768 .e_phentsize = sizeof(ElfW(Phdr)),
3769 .e_phnum = 1,
3770 .e_shentsize = sizeof(ElfW(Shdr)),
3771 .e_shnum = ARRAY_SIZE(img->shdr),
3772 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
3773#ifdef ELF_HOST_FLAGS
3774 .e_flags = ELF_HOST_FLAGS,
3775#endif
3776#ifdef ELF_OSABI
3777 .e_ident[EI_OSABI] = ELF_OSABI,
3778#endif
5872bbf2
RH
3779 },
3780 .phdr = {
3781 .p_type = PT_LOAD,
3782 .p_flags = PF_X,
3783 },
3784 .shdr = {
3785 [0] = { .sh_type = SHT_NULL },
3786 /* Trick: The contents of code_gen_buffer are not present in
3787 this fake ELF file; that got allocated elsewhere. Therefore
3788 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3789 will not look for contents. We can record any address. */
3790 [1] = { /* .text */
3791 .sh_type = SHT_NOBITS,
3792 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3793 },
3794 [2] = { /* .debug_info */
3795 .sh_type = SHT_PROGBITS,
3796 .sh_offset = offsetof(struct ElfImage, di),
3797 .sh_size = sizeof(struct DebugInfo),
3798 },
3799 [3] = { /* .debug_abbrev */
3800 .sh_type = SHT_PROGBITS,
3801 .sh_offset = offsetof(struct ElfImage, da),
3802 .sh_size = sizeof(img->da),
3803 },
3804 [4] = { /* .debug_frame */
3805 .sh_type = SHT_PROGBITS,
3806 .sh_offset = sizeof(struct ElfImage),
3807 },
3808 [5] = { /* .symtab */
3809 .sh_type = SHT_SYMTAB,
3810 .sh_offset = offsetof(struct ElfImage, sym),
3811 .sh_size = sizeof(img->sym),
3812 .sh_info = 1,
3813 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3814 .sh_entsize = sizeof(ElfW(Sym)),
3815 },
3816 [6] = { /* .strtab */
3817 .sh_type = SHT_STRTAB,
3818 .sh_offset = offsetof(struct ElfImage, str),
3819 .sh_size = sizeof(img->str),
3820 }
3821 },
3822 .sym = {
3823 [1] = { /* code_gen_buffer */
3824 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3825 .st_shndx = 1,
3826 }
3827 },
3828 .di = {
3829 .len = sizeof(struct DebugInfo) - 4,
3830 .version = 2,
3831 .ptr_size = sizeof(void *),
3832 .cu_die = 1,
3833 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
3834 .fn_die = 2,
3835 .fn_name = "code_gen_buffer"
3836 },
3837 .da = {
3838 1, /* abbrev number (the cu) */
3839 0x11, 1, /* DW_TAG_compile_unit, has children */
3840 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3841 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3842 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3843 0, 0, /* end of abbrev */
3844 2, /* abbrev number (the fn) */
3845 0x2e, 0, /* DW_TAG_subprogram, no children */
3846 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3847 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3848 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3849 0, 0, /* end of abbrev */
3850 0 /* no more abbrev */
3851 },
3852 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3853 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
3854 };
3855
3856 /* We only need a single jit entry; statically allocate it. */
3857 static struct jit_code_entry one_entry;
3858
5872bbf2 3859 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 3860 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 3861 DebugFrameHeader *dfh;
813da627 3862
5872bbf2
RH
3863 img = g_malloc(img_size);
3864 *img = img_template;
813da627 3865
5872bbf2
RH
3866 img->phdr.p_vaddr = buf;
3867 img->phdr.p_paddr = buf;
3868 img->phdr.p_memsz = buf_size;
813da627 3869
813da627 3870 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 3871 img->shdr[1].sh_addr = buf;
813da627
RH
3872 img->shdr[1].sh_size = buf_size;
3873
5872bbf2
RH
3874 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3875 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3876
3877 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3878 img->shdr[4].sh_size = debug_frame_size;
3879
3880 img->shdr[5].sh_name = find_string(img->str, ".symtab");
3881 img->shdr[6].sh_name = find_string(img->str, ".strtab");
3882
3883 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3884 img->sym[1].st_value = buf;
3885 img->sym[1].st_size = buf_size;
813da627 3886
5872bbf2 3887 img->di.cu_low_pc = buf;
45aba097 3888 img->di.cu_high_pc = buf + buf_size;
5872bbf2 3889 img->di.fn_low_pc = buf;
45aba097 3890 img->di.fn_high_pc = buf + buf_size;
813da627 3891
2c90784a
RH
3892 dfh = (DebugFrameHeader *)(img + 1);
3893 memcpy(dfh, debug_frame, debug_frame_size);
3894 dfh->fde.func_start = buf;
3895 dfh->fde.func_len = buf_size;
3896
813da627
RH
3897#ifdef DEBUG_JIT
3898 /* Enable this block to be able to debug the ELF image file creation.
3899 One can use readelf, objdump, or other inspection utilities. */
3900 {
3901 FILE *f = fopen("/tmp/qemu.jit", "w+b");
3902 if (f) {
5872bbf2 3903 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
3904 /* Avoid stupid unused return value warning for fwrite. */
3905 }
3906 fclose(f);
3907 }
3908 }
3909#endif
3910
3911 one_entry.symfile_addr = img;
3912 one_entry.symfile_size = img_size;
3913
3914 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3915 __jit_debug_descriptor.relevant_entry = &one_entry;
3916 __jit_debug_descriptor.first_entry = &one_entry;
3917 __jit_debug_register_code();
3918}
3919#else
5872bbf2
RH
3920/* No support for the feature. Provide the entry point expected by exec.c,
3921 and implement the internal function we declared earlier. */
813da627
RH
3922
3923static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
3924 const void *debug_frame,
3925 size_t debug_frame_size)
813da627
RH
3926{
3927}
3928
3929void tcg_register_jit(void *buf, size_t buf_size)
3930{
3931}
3932#endif /* ELF_HOST_MACHINE */
db432672
RH
3933
3934#if !TCG_TARGET_MAYBE_vec
3935void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
3936{
3937 g_assert_not_reached();
3938}
3939#endif