]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg.c
tcg: Allow wider vectors for cmp and mul
[mirror_qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
f348b6d1 33#include "qemu/cutils.h"
1de7afc9
PB
34#include "qemu/host-utils.h"
35#include "qemu/timer.h"
c896fe29 36
c5d3c498 37/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40#define NO_CPU_IO_DEFS
41#include "cpu.h"
c896fe29 42
63c91552
PB
43#include "exec/cpu-common.h"
44#include "exec/exec-all.h"
45
c896fe29 46#include "tcg-op.h"
813da627 47
edee2579 48#if UINTPTR_MAX == UINT32_MAX
813da627 49# define ELF_CLASS ELFCLASS32
edee2579
RH
50#else
51# define ELF_CLASS ELFCLASS64
813da627
RH
52#endif
53#ifdef HOST_WORDS_BIGENDIAN
54# define ELF_DATA ELFDATA2MSB
55#else
56# define ELF_DATA ELFDATA2LSB
57#endif
58
c896fe29 59#include "elf.h"
508127e2 60#include "exec/log.h"
3468b59e 61#include "sysemu/sysemu.h"
c896fe29 62
ce151109
PM
63/* Forward declarations for functions declared in tcg-target.inc.c and
64 used here. */
e4d58b41 65static void tcg_target_init(TCGContext *s);
f69d277e 66static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
e4d58b41 67static void tcg_target_qemu_prologue(TCGContext *s);
1813e175 68static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 69 intptr_t value, intptr_t addend);
c896fe29 70
497a22eb
RH
71/* The CIE and FDE header definitions will be common to all hosts. */
72typedef struct {
73 uint32_t len __attribute__((aligned((sizeof(void *)))));
74 uint32_t id;
75 uint8_t version;
76 char augmentation[1];
77 uint8_t code_align;
78 uint8_t data_align;
79 uint8_t return_column;
80} DebugFrameCIE;
81
82typedef struct QEMU_PACKED {
83 uint32_t len __attribute__((aligned((sizeof(void *)))));
84 uint32_t cie_offset;
edee2579
RH
85 uintptr_t func_start;
86 uintptr_t func_len;
497a22eb
RH
87} DebugFrameFDEHeader;
88
2c90784a
RH
89typedef struct QEMU_PACKED {
90 DebugFrameCIE cie;
91 DebugFrameFDEHeader fde;
92} DebugFrameHeader;
93
813da627 94static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
95 const void *debug_frame,
96 size_t debug_frame_size)
813da627
RH
97 __attribute__((unused));
98
ce151109 99/* Forward declarations for functions declared and used in tcg-target.inc.c. */
069ea736
RH
100static const char *target_parse_constraint(TCGArgConstraint *ct,
101 const char *ct_str, TCGType type);
2a534aff 102static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 103 intptr_t arg2);
2a534aff 104static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 105static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 106 TCGReg ret, tcg_target_long arg);
c0ad3001
SW
107static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
108 const int *const_args);
d2fd745f
RH
109#if TCG_TARGET_MAYBE_vec
110static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
111 unsigned vece, const TCGArg *args,
112 const int *const_args);
113#else
114static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
115 unsigned vece, const TCGArg *args,
116 const int *const_args)
117{
118 g_assert_not_reached();
119}
120#endif
2a534aff 121static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 122 intptr_t arg2);
59d7c14e
RH
123static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
124 TCGReg base, intptr_t ofs);
cf066674 125static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
f6c6afc1 126static int tcg_target_const_match(tcg_target_long val, TCGType type,
c0ad3001 127 const TCGArgConstraint *arg_ct);
659ef5cb
RH
128#ifdef TCG_TARGET_NEED_LDST_LABELS
129static bool tcg_out_ldst_finalize(TCGContext *s);
130#endif
c896fe29 131
a505785c
EC
132#define TCG_HIGHWATER 1024
133
df2cce29
EC
134static TCGContext **tcg_ctxs;
135static unsigned int n_tcg_ctxs;
1c2adb95 136TCGv_env cpu_env = 0;
df2cce29 137
e8feb96f
EC
138/*
139 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
140 * dynamically allocate from as demand dictates. Given appropriate region
141 * sizing, this minimizes flushes even when some TCG threads generate a lot
142 * more code than others.
143 */
144struct tcg_region_state {
145 QemuMutex lock;
146
147 /* fields set at init time */
148 void *start;
149 void *start_aligned;
150 void *end;
151 size_t n;
152 size_t size; /* size of one region */
153 size_t stride; /* .size + guard size */
154
155 /* fields protected by the lock */
156 size_t current; /* current region index */
157 size_t agg_size_full; /* aggregate size of full regions */
158};
159
160static struct tcg_region_state region;
d2fd745f 161static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 162static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 163
1813e175 164#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 165static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
166{
167 *s->code_ptr++ = v;
168}
169
4196dca6
PM
170static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
171 uint8_t v)
5c53bb81 172{
1813e175 173 *p = v;
5c53bb81 174}
1813e175 175#endif
5c53bb81 176
1813e175 177#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 178static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 179{
1813e175
RH
180 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
181 *s->code_ptr++ = v;
182 } else {
183 tcg_insn_unit *p = s->code_ptr;
184 memcpy(p, &v, sizeof(v));
185 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
186 }
c896fe29
FB
187}
188
4196dca6
PM
189static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
190 uint16_t v)
5c53bb81 191{
1813e175
RH
192 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
193 *p = v;
194 } else {
195 memcpy(p, &v, sizeof(v));
196 }
5c53bb81 197}
1813e175 198#endif
5c53bb81 199
1813e175 200#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 201static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 202{
1813e175
RH
203 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
204 *s->code_ptr++ = v;
205 } else {
206 tcg_insn_unit *p = s->code_ptr;
207 memcpy(p, &v, sizeof(v));
208 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
209 }
c896fe29
FB
210}
211
4196dca6
PM
212static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
213 uint32_t v)
5c53bb81 214{
1813e175
RH
215 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
216 *p = v;
217 } else {
218 memcpy(p, &v, sizeof(v));
219 }
5c53bb81 220}
1813e175 221#endif
5c53bb81 222
1813e175 223#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 224static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 225{
1813e175
RH
226 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
227 *s->code_ptr++ = v;
228 } else {
229 tcg_insn_unit *p = s->code_ptr;
230 memcpy(p, &v, sizeof(v));
231 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
232 }
ac26eb69
RH
233}
234
4196dca6
PM
235static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
236 uint64_t v)
5c53bb81 237{
1813e175
RH
238 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
239 *p = v;
240 } else {
241 memcpy(p, &v, sizeof(v));
242 }
5c53bb81 243}
1813e175 244#endif
5c53bb81 245
c896fe29
FB
246/* label relocation processing */
247
1813e175 248static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 249 TCGLabel *l, intptr_t addend)
c896fe29 250{
c896fe29
FB
251 TCGRelocation *r;
252
c896fe29 253 if (l->has_value) {
623e265c
PB
254 /* FIXME: This may break relocations on RISC targets that
255 modify instruction fields in place. The caller may not have
256 written the initial value. */
f54b3f92 257 patch_reloc(code_ptr, type, l->u.value, addend);
c896fe29
FB
258 } else {
259 /* add a new relocation entry */
260 r = tcg_malloc(sizeof(TCGRelocation));
261 r->type = type;
262 r->ptr = code_ptr;
263 r->addend = addend;
264 r->next = l->u.first_reloc;
265 l->u.first_reloc = r;
266 }
267}
268
bec16311 269static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
c896fe29 270{
2ba7fae2 271 intptr_t value = (intptr_t)ptr;
1813e175 272 TCGRelocation *r;
c896fe29 273
eabb7b91 274 tcg_debug_assert(!l->has_value);
1813e175
RH
275
276 for (r = l->u.first_reloc; r != NULL; r = r->next) {
f54b3f92 277 patch_reloc(r->ptr, r->type, value, r->addend);
c896fe29 278 }
1813e175 279
c896fe29 280 l->has_value = 1;
1813e175 281 l->u.value_ptr = ptr;
c896fe29
FB
282}
283
42a268c2 284TCGLabel *gen_new_label(void)
c896fe29 285{
b1311c4a 286 TCGContext *s = tcg_ctx;
51e3972c 287 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 288
51e3972c
RH
289 *l = (TCGLabel){
290 .id = s->nb_labels++
291 };
42a268c2
RH
292
293 return l;
c896fe29
FB
294}
295
ce151109 296#include "tcg-target.inc.c"
c896fe29 297
e8feb96f
EC
298static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
299{
300 void *start, *end;
301
302 start = region.start_aligned + curr_region * region.stride;
303 end = start + region.size;
304
305 if (curr_region == 0) {
306 start = region.start;
307 }
308 if (curr_region == region.n - 1) {
309 end = region.end;
310 }
311
312 *pstart = start;
313 *pend = end;
314}
315
316static void tcg_region_assign(TCGContext *s, size_t curr_region)
317{
318 void *start, *end;
319
320 tcg_region_bounds(curr_region, &start, &end);
321
322 s->code_gen_buffer = start;
323 s->code_gen_ptr = start;
324 s->code_gen_buffer_size = end - start;
325 s->code_gen_highwater = end - TCG_HIGHWATER;
326}
327
328static bool tcg_region_alloc__locked(TCGContext *s)
329{
330 if (region.current == region.n) {
331 return true;
332 }
333 tcg_region_assign(s, region.current);
334 region.current++;
335 return false;
336}
337
338/*
339 * Request a new region once the one in use has filled up.
340 * Returns true on error.
341 */
342static bool tcg_region_alloc(TCGContext *s)
343{
344 bool err;
345 /* read the region size now; alloc__locked will overwrite it on success */
346 size_t size_full = s->code_gen_buffer_size;
347
348 qemu_mutex_lock(&region.lock);
349 err = tcg_region_alloc__locked(s);
350 if (!err) {
351 region.agg_size_full += size_full - TCG_HIGHWATER;
352 }
353 qemu_mutex_unlock(&region.lock);
354 return err;
355}
356
357/*
358 * Perform a context's first region allocation.
359 * This function does _not_ increment region.agg_size_full.
360 */
361static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
362{
363 return tcg_region_alloc__locked(s);
364}
365
366/* Call from a safe-work context */
367void tcg_region_reset_all(void)
368{
3468b59e 369 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
370 unsigned int i;
371
372 qemu_mutex_lock(&region.lock);
373 region.current = 0;
374 region.agg_size_full = 0;
375
3468b59e
EC
376 for (i = 0; i < n_ctxs; i++) {
377 TCGContext *s = atomic_read(&tcg_ctxs[i]);
378 bool err = tcg_region_initial_alloc__locked(s);
e8feb96f
EC
379
380 g_assert(!err);
381 }
382 qemu_mutex_unlock(&region.lock);
383}
384
3468b59e
EC
385#ifdef CONFIG_USER_ONLY
386static size_t tcg_n_regions(void)
387{
388 return 1;
389}
390#else
391/*
392 * It is likely that some vCPUs will translate more code than others, so we
393 * first try to set more regions than max_cpus, with those regions being of
394 * reasonable size. If that's not possible we make do by evenly dividing
395 * the code_gen_buffer among the vCPUs.
396 */
397static size_t tcg_n_regions(void)
398{
399 size_t i;
400
401 /* Use a single region if all we have is one vCPU thread */
402 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
403 return 1;
404 }
405
406 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
407 for (i = 8; i > 0; i--) {
408 size_t regions_per_thread = i;
409 size_t region_size;
410
411 region_size = tcg_init_ctx.code_gen_buffer_size;
412 region_size /= max_cpus * regions_per_thread;
413
414 if (region_size >= 2 * 1024u * 1024) {
415 return max_cpus * regions_per_thread;
416 }
417 }
418 /* If we can't, then just allocate one region per vCPU thread */
419 return max_cpus;
420}
421#endif
422
e8feb96f
EC
423/*
424 * Initializes region partitioning.
425 *
426 * Called at init time from the parent thread (i.e. the one calling
427 * tcg_context_init), after the target's TCG globals have been set.
3468b59e
EC
428 *
429 * Region partitioning works by splitting code_gen_buffer into separate regions,
430 * and then assigning regions to TCG threads so that the threads can translate
431 * code in parallel without synchronization.
432 *
433 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
434 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
435 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
436 * must have been parsed before calling this function, since it calls
437 * qemu_tcg_mttcg_enabled().
438 *
439 * In user-mode we use a single region. Having multiple regions in user-mode
440 * is not supported, because the number of vCPU threads (recall that each thread
441 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
442 * OS, and usually this number is huge (tens of thousands is not uncommon).
443 * Thus, given this large bound on the number of vCPU threads and the fact
444 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
445 * that the availability of at least one region per vCPU thread.
446 *
447 * However, this user-mode limitation is unlikely to be a significant problem
448 * in practice. Multi-threaded guests share most if not all of their translated
449 * code, which makes parallel code generation less appealing than in softmmu.
e8feb96f
EC
450 */
451void tcg_region_init(void)
452{
453 void *buf = tcg_init_ctx.code_gen_buffer;
454 void *aligned;
455 size_t size = tcg_init_ctx.code_gen_buffer_size;
456 size_t page_size = qemu_real_host_page_size;
457 size_t region_size;
458 size_t n_regions;
459 size_t i;
460
3468b59e 461 n_regions = tcg_n_regions();
e8feb96f
EC
462
463 /* The first region will be 'aligned - buf' bytes larger than the others */
464 aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
465 g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
466 /*
467 * Make region_size a multiple of page_size, using aligned as the start.
468 * As a result of this we might end up with a few extra pages at the end of
469 * the buffer; we will assign those to the last region.
470 */
471 region_size = (size - (aligned - buf)) / n_regions;
472 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
473
474 /* A region must have at least 2 pages; one code, one guard */
475 g_assert(region_size >= 2 * page_size);
476
477 /* init the region struct */
478 qemu_mutex_init(&region.lock);
479 region.n = n_regions;
480 region.size = region_size - page_size;
481 region.stride = region_size;
482 region.start = buf;
483 region.start_aligned = aligned;
484 /* page-align the end, since its last page will be a guard page */
485 region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
486 /* account for that last guard page */
487 region.end -= page_size;
488
489 /* set guard pages */
490 for (i = 0; i < region.n; i++) {
491 void *start, *end;
492 int rc;
493
494 tcg_region_bounds(i, &start, &end);
495 rc = qemu_mprotect_none(end, page_size);
496 g_assert(!rc);
497 }
498
3468b59e
EC
499 /* In user-mode we support only one ctx, so do the initial allocation now */
500#ifdef CONFIG_USER_ONLY
e8feb96f
EC
501 {
502 bool err = tcg_region_initial_alloc__locked(tcg_ctx);
503
504 g_assert(!err);
505 }
3468b59e
EC
506#endif
507}
508
509/*
510 * All TCG threads except the parent (i.e. the one that called tcg_context_init
511 * and registered the target's TCG globals) must register with this function
512 * before initiating translation.
513 *
514 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
515 * of tcg_region_init() for the reasoning behind this.
516 *
517 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
518 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
519 * is not used anymore for translation once this function is called.
520 *
521 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
522 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
523 */
524#ifdef CONFIG_USER_ONLY
525void tcg_register_thread(void)
526{
527 tcg_ctx = &tcg_init_ctx;
528}
529#else
530void tcg_register_thread(void)
531{
532 TCGContext *s = g_malloc(sizeof(*s));
533 unsigned int i, n;
534 bool err;
535
536 *s = tcg_init_ctx;
537
538 /* Relink mem_base. */
539 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
540 if (tcg_init_ctx.temps[i].mem_base) {
541 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
542 tcg_debug_assert(b >= 0 && b < n);
543 s->temps[i].mem_base = &s->temps[b];
544 }
545 }
546
547 /* Claim an entry in tcg_ctxs */
548 n = atomic_fetch_inc(&n_tcg_ctxs);
549 g_assert(n < max_cpus);
550 atomic_set(&tcg_ctxs[n], s);
551
552 tcg_ctx = s;
553 qemu_mutex_lock(&region.lock);
554 err = tcg_region_initial_alloc__locked(tcg_ctx);
555 g_assert(!err);
556 qemu_mutex_unlock(&region.lock);
e8feb96f 557}
3468b59e 558#endif /* !CONFIG_USER_ONLY */
e8feb96f
EC
559
560/*
561 * Returns the size (in bytes) of all translated code (i.e. from all regions)
562 * currently in the cache.
563 * See also: tcg_code_capacity()
564 * Do not confuse with tcg_current_code_size(); that one applies to a single
565 * TCG context.
566 */
567size_t tcg_code_size(void)
568{
3468b59e 569 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
570 unsigned int i;
571 size_t total;
572
573 qemu_mutex_lock(&region.lock);
574 total = region.agg_size_full;
3468b59e
EC
575 for (i = 0; i < n_ctxs; i++) {
576 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
e8feb96f
EC
577 size_t size;
578
579 size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
580 g_assert(size <= s->code_gen_buffer_size);
581 total += size;
582 }
583 qemu_mutex_unlock(&region.lock);
584 return total;
585}
586
587/*
588 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
589 * regions.
590 * See also: tcg_code_size()
591 */
592size_t tcg_code_capacity(void)
593{
594 size_t guard_size, capacity;
595
596 /* no need for synchronization; these variables are set at init time */
597 guard_size = region.stride - region.size;
598 capacity = region.end + guard_size - region.start;
599 capacity -= region.n * (guard_size + TCG_HIGHWATER);
600 return capacity;
601}
602
c896fe29
FB
603/* pool based memory allocation */
604void *tcg_malloc_internal(TCGContext *s, int size)
605{
606 TCGPool *p;
607 int pool_size;
608
609 if (size > TCG_POOL_CHUNK_SIZE) {
610 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 611 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 612 p->size = size;
4055299e
KB
613 p->next = s->pool_first_large;
614 s->pool_first_large = p;
615 return p->data;
c896fe29
FB
616 } else {
617 p = s->pool_current;
618 if (!p) {
619 p = s->pool_first;
620 if (!p)
621 goto new_pool;
622 } else {
623 if (!p->next) {
624 new_pool:
625 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 626 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
627 p->size = pool_size;
628 p->next = NULL;
629 if (s->pool_current)
630 s->pool_current->next = p;
631 else
632 s->pool_first = p;
633 } else {
634 p = p->next;
635 }
636 }
637 }
638 s->pool_current = p;
639 s->pool_cur = p->data + size;
640 s->pool_end = p->data + p->size;
641 return p->data;
642}
643
644void tcg_pool_reset(TCGContext *s)
645{
4055299e
KB
646 TCGPool *p, *t;
647 for (p = s->pool_first_large; p; p = t) {
648 t = p->next;
649 g_free(p);
650 }
651 s->pool_first_large = NULL;
c896fe29
FB
652 s->pool_cur = s->pool_end = NULL;
653 s->pool_current = NULL;
654}
655
100b5e01
RH
656typedef struct TCGHelperInfo {
657 void *func;
658 const char *name;
afb49896
RH
659 unsigned flags;
660 unsigned sizemask;
100b5e01
RH
661} TCGHelperInfo;
662
2ef6175a
RH
663#include "exec/helper-proto.h"
664
100b5e01 665static const TCGHelperInfo all_helpers[] = {
2ef6175a 666#include "exec/helper-tcg.h"
100b5e01 667};
619205fd 668static GHashTable *helper_table;
100b5e01 669
91478cef 670static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 671static void process_op_defs(TCGContext *s);
1c2adb95
RH
672static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
673 TCGReg reg, const char *name);
91478cef 674
c896fe29
FB
675void tcg_context_init(TCGContext *s)
676{
100b5e01 677 int op, total_args, n, i;
c896fe29
FB
678 TCGOpDef *def;
679 TCGArgConstraint *args_ct;
680 int *sorted_args;
1c2adb95 681 TCGTemp *ts;
c896fe29
FB
682
683 memset(s, 0, sizeof(*s));
c896fe29 684 s->nb_globals = 0;
c70fbf0a 685
c896fe29
FB
686 /* Count total number of arguments and allocate the corresponding
687 space */
688 total_args = 0;
689 for(op = 0; op < NB_OPS; op++) {
690 def = &tcg_op_defs[op];
691 n = def->nb_iargs + def->nb_oargs;
692 total_args += n;
693 }
694
7267c094
AL
695 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
696 sorted_args = g_malloc(sizeof(int) * total_args);
c896fe29
FB
697
698 for(op = 0; op < NB_OPS; op++) {
699 def = &tcg_op_defs[op];
700 def->args_ct = args_ct;
701 def->sorted_args = sorted_args;
702 n = def->nb_iargs + def->nb_oargs;
703 sorted_args += n;
704 args_ct += n;
705 }
5cd8f621
RH
706
707 /* Register helpers. */
84fd9dd3 708 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 709 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 710
100b5e01 711 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
84fd9dd3 712 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 713 (gpointer)&all_helpers[i]);
100b5e01 714 }
5cd8f621 715
c896fe29 716 tcg_target_init(s);
f69d277e 717 process_op_defs(s);
91478cef
RH
718
719 /* Reverse the order of the saved registers, assuming they're all at
720 the start of tcg_target_reg_alloc_order. */
721 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
722 int r = tcg_target_reg_alloc_order[n];
723 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
724 break;
725 }
726 }
727 for (i = 0; i < n; ++i) {
728 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
729 }
730 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
731 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
732 }
b1311c4a
EC
733
734 tcg_ctx = s;
3468b59e
EC
735 /*
736 * In user-mode we simply share the init context among threads, since we
737 * use a single region. See the documentation tcg_region_init() for the
738 * reasoning behind this.
739 * In softmmu we will have at most max_cpus TCG threads.
740 */
741#ifdef CONFIG_USER_ONLY
df2cce29
EC
742 tcg_ctxs = &tcg_ctx;
743 n_tcg_ctxs = 1;
3468b59e
EC
744#else
745 tcg_ctxs = g_new(TCGContext *, max_cpus);
746#endif
1c2adb95
RH
747
748 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
749 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
750 cpu_env = temp_tcgv_ptr(ts);
9002ec79 751}
b03cce8e 752
6e3b2bfd
EC
753/*
754 * Allocate TBs right before their corresponding translated code, making
755 * sure that TBs and code are on different cache lines.
756 */
757TranslationBlock *tcg_tb_alloc(TCGContext *s)
758{
759 uintptr_t align = qemu_icache_linesize;
760 TranslationBlock *tb;
761 void *next;
762
e8feb96f 763 retry:
6e3b2bfd
EC
764 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
765 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
766
767 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
768 if (tcg_region_alloc(s)) {
769 return NULL;
770 }
771 goto retry;
6e3b2bfd 772 }
e8feb96f 773 atomic_set(&s->code_gen_ptr, next);
57a26946 774 s->data_gen_ptr = NULL;
6e3b2bfd
EC
775 return tb;
776}
777
9002ec79
RH
778void tcg_prologue_init(TCGContext *s)
779{
8163b749
RH
780 size_t prologue_size, total_size;
781 void *buf0, *buf1;
782
783 /* Put the prologue at the beginning of code_gen_buffer. */
784 buf0 = s->code_gen_buffer;
5b38ee31 785 total_size = s->code_gen_buffer_size;
8163b749
RH
786 s->code_ptr = buf0;
787 s->code_buf = buf0;
5b38ee31 788 s->data_gen_ptr = NULL;
8163b749
RH
789 s->code_gen_prologue = buf0;
790
5b38ee31
RH
791 /* Compute a high-water mark, at which we voluntarily flush the buffer
792 and start over. The size here is arbitrary, significantly larger
793 than we expect the code generation for any one opcode to require. */
794 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
795
796#ifdef TCG_TARGET_NEED_POOL_LABELS
797 s->pool_labels = NULL;
798#endif
799
8163b749 800 /* Generate the prologue. */
b03cce8e 801 tcg_target_qemu_prologue(s);
5b38ee31
RH
802
803#ifdef TCG_TARGET_NEED_POOL_LABELS
804 /* Allow the prologue to put e.g. guest_base into a pool entry. */
805 {
806 bool ok = tcg_out_pool_finalize(s);
807 tcg_debug_assert(ok);
808 }
809#endif
810
8163b749
RH
811 buf1 = s->code_ptr;
812 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
813
814 /* Deduct the prologue from the buffer. */
815 prologue_size = tcg_current_code_size(s);
816 s->code_gen_ptr = buf1;
817 s->code_gen_buffer = buf1;
818 s->code_buf = buf1;
5b38ee31 819 total_size -= prologue_size;
8163b749
RH
820 s->code_gen_buffer_size = total_size;
821
8163b749 822 tcg_register_jit(s->code_gen_buffer, total_size);
d6b64b2b
RH
823
824#ifdef DEBUG_DISAS
825 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1ee73216 826 qemu_log_lock();
8163b749 827 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
5b38ee31
RH
828 if (s->data_gen_ptr) {
829 size_t code_size = s->data_gen_ptr - buf0;
830 size_t data_size = prologue_size - code_size;
831 size_t i;
832
833 log_disas(buf0, code_size);
834
835 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
836 if (sizeof(tcg_target_ulong) == 8) {
837 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
838 (uintptr_t)s->data_gen_ptr + i,
839 *(uint64_t *)(s->data_gen_ptr + i));
840 } else {
841 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
842 (uintptr_t)s->data_gen_ptr + i,
843 *(uint32_t *)(s->data_gen_ptr + i));
844 }
845 }
846 } else {
847 log_disas(buf0, prologue_size);
848 }
d6b64b2b
RH
849 qemu_log("\n");
850 qemu_log_flush();
1ee73216 851 qemu_log_unlock();
d6b64b2b
RH
852 }
853#endif
cedbcb01
EC
854
855 /* Assert that goto_ptr is implemented completely. */
856 if (TCG_TARGET_HAS_goto_ptr) {
857 tcg_debug_assert(s->code_gen_epilogue != NULL);
858 }
c896fe29
FB
859}
860
c896fe29
FB
861void tcg_func_start(TCGContext *s)
862{
863 tcg_pool_reset(s);
864 s->nb_temps = s->nb_globals;
0ec9eabc
RH
865
866 /* No temps have been previously allocated for size or locality. */
867 memset(s->free_temps, 0, sizeof(s->free_temps));
868
c896fe29
FB
869 s->nb_labels = 0;
870 s->current_frame_offset = s->frame_start;
871
0a209d4b
RH
872#ifdef CONFIG_DEBUG_TCG
873 s->goto_tb_issue_mask = 0;
874#endif
875
15fa08f8
RH
876 QTAILQ_INIT(&s->ops);
877 QTAILQ_INIT(&s->free_ops);
c896fe29
FB
878}
879
7ca4b752
RH
880static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
881{
882 int n = s->nb_temps++;
883 tcg_debug_assert(n < TCG_MAX_TEMPS);
884 return memset(&s->temps[n], 0, sizeof(TCGTemp));
885}
886
887static inline TCGTemp *tcg_global_alloc(TCGContext *s)
888{
fa477d25
RH
889 TCGTemp *ts;
890
7ca4b752
RH
891 tcg_debug_assert(s->nb_globals == s->nb_temps);
892 s->nb_globals++;
fa477d25
RH
893 ts = tcg_temp_alloc(s);
894 ts->temp_global = 1;
895
896 return ts;
c896fe29
FB
897}
898
085272b3
RH
899static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
900 TCGReg reg, const char *name)
c896fe29 901{
c896fe29 902 TCGTemp *ts;
c896fe29 903
b3a62939 904 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 905 tcg_abort();
b3a62939 906 }
7ca4b752
RH
907
908 ts = tcg_global_alloc(s);
c896fe29
FB
909 ts->base_type = type;
910 ts->type = type;
911 ts->fixed_reg = 1;
912 ts->reg = reg;
c896fe29 913 ts->name = name;
c896fe29 914 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 915
085272b3 916 return ts;
a7812ae4
PB
917}
918
b6638662 919void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 920{
b3a62939
RH
921 s->frame_start = start;
922 s->frame_end = start + size;
085272b3
RH
923 s->frame_temp
924 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
925}
926
085272b3
RH
927TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
928 intptr_t offset, const char *name)
c896fe29 929{
b1311c4a 930 TCGContext *s = tcg_ctx;
dc41aa7d 931 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 932 TCGTemp *ts = tcg_global_alloc(s);
b3915dbb 933 int indirect_reg = 0, bigendian = 0;
7ca4b752
RH
934#ifdef HOST_WORDS_BIGENDIAN
935 bigendian = 1;
936#endif
c896fe29 937
b3915dbb 938 if (!base_ts->fixed_reg) {
5a18407f
RH
939 /* We do not support double-indirect registers. */
940 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 941 base_ts->indirect_base = 1;
5a18407f
RH
942 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
943 ? 2 : 1);
944 indirect_reg = 1;
b3915dbb
RH
945 }
946
7ca4b752
RH
947 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
948 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 949 char buf[64];
7ca4b752
RH
950
951 ts->base_type = TCG_TYPE_I64;
c896fe29 952 ts->type = TCG_TYPE_I32;
b3915dbb 953 ts->indirect_reg = indirect_reg;
c896fe29 954 ts->mem_allocated = 1;
b3a62939 955 ts->mem_base = base_ts;
7ca4b752 956 ts->mem_offset = offset + bigendian * 4;
c896fe29
FB
957 pstrcpy(buf, sizeof(buf), name);
958 pstrcat(buf, sizeof(buf), "_0");
959 ts->name = strdup(buf);
c896fe29 960
7ca4b752
RH
961 tcg_debug_assert(ts2 == ts + 1);
962 ts2->base_type = TCG_TYPE_I64;
963 ts2->type = TCG_TYPE_I32;
b3915dbb 964 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
965 ts2->mem_allocated = 1;
966 ts2->mem_base = base_ts;
967 ts2->mem_offset = offset + (1 - bigendian) * 4;
c896fe29
FB
968 pstrcpy(buf, sizeof(buf), name);
969 pstrcat(buf, sizeof(buf), "_1");
120c1084 970 ts2->name = strdup(buf);
7ca4b752 971 } else {
c896fe29
FB
972 ts->base_type = type;
973 ts->type = type;
b3915dbb 974 ts->indirect_reg = indirect_reg;
c896fe29 975 ts->mem_allocated = 1;
b3a62939 976 ts->mem_base = base_ts;
c896fe29 977 ts->mem_offset = offset;
c896fe29 978 ts->name = name;
c896fe29 979 }
085272b3 980 return ts;
a7812ae4
PB
981}
982
085272b3 983static TCGTemp *tcg_temp_new_internal(TCGType type, int temp_local)
c896fe29 984{
b1311c4a 985 TCGContext *s = tcg_ctx;
c896fe29 986 TCGTemp *ts;
641d5fbe 987 int idx, k;
c896fe29 988
0ec9eabc
RH
989 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
990 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
991 if (idx < TCG_MAX_TEMPS) {
992 /* There is already an available temp with the right type. */
993 clear_bit(idx, s->free_temps[k].l);
994
e8996ee0 995 ts = &s->temps[idx];
e8996ee0 996 ts->temp_allocated = 1;
7ca4b752
RH
997 tcg_debug_assert(ts->base_type == type);
998 tcg_debug_assert(ts->temp_local == temp_local);
e8996ee0 999 } else {
7ca4b752
RH
1000 ts = tcg_temp_alloc(s);
1001 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1002 TCGTemp *ts2 = tcg_temp_alloc(s);
1003
f6aa2f7d 1004 ts->base_type = type;
e8996ee0
FB
1005 ts->type = TCG_TYPE_I32;
1006 ts->temp_allocated = 1;
641d5fbe 1007 ts->temp_local = temp_local;
7ca4b752
RH
1008
1009 tcg_debug_assert(ts2 == ts + 1);
1010 ts2->base_type = TCG_TYPE_I64;
1011 ts2->type = TCG_TYPE_I32;
1012 ts2->temp_allocated = 1;
1013 ts2->temp_local = temp_local;
1014 } else {
e8996ee0
FB
1015 ts->base_type = type;
1016 ts->type = type;
1017 ts->temp_allocated = 1;
641d5fbe 1018 ts->temp_local = temp_local;
e8996ee0 1019 }
c896fe29 1020 }
27bfd83c
PM
1021
1022#if defined(CONFIG_DEBUG_TCG)
1023 s->temps_in_use++;
1024#endif
085272b3 1025 return ts;
c896fe29
FB
1026}
1027
a7812ae4
PB
1028TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
1029{
085272b3
RH
1030 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
1031 return temp_tcgv_i32(t);
a7812ae4
PB
1032}
1033
1034TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
1035{
085272b3
RH
1036 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
1037 return temp_tcgv_i64(t);
a7812ae4
PB
1038}
1039
d2fd745f
RH
1040TCGv_vec tcg_temp_new_vec(TCGType type)
1041{
1042 TCGTemp *t;
1043
1044#ifdef CONFIG_DEBUG_TCG
1045 switch (type) {
1046 case TCG_TYPE_V64:
1047 assert(TCG_TARGET_HAS_v64);
1048 break;
1049 case TCG_TYPE_V128:
1050 assert(TCG_TARGET_HAS_v128);
1051 break;
1052 case TCG_TYPE_V256:
1053 assert(TCG_TARGET_HAS_v256);
1054 break;
1055 default:
1056 g_assert_not_reached();
1057 }
1058#endif
1059
1060 t = tcg_temp_new_internal(type, 0);
1061 return temp_tcgv_vec(t);
1062}
1063
1064/* Create a new temp of the same type as an existing temp. */
1065TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1066{
1067 TCGTemp *t = tcgv_vec_temp(match);
1068
1069 tcg_debug_assert(t->temp_allocated != 0);
1070
1071 t = tcg_temp_new_internal(t->base_type, 0);
1072 return temp_tcgv_vec(t);
1073}
1074
085272b3 1075static void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1076{
b1311c4a 1077 TCGContext *s = tcg_ctx;
085272b3 1078 int k, idx;
c896fe29 1079
27bfd83c
PM
1080#if defined(CONFIG_DEBUG_TCG)
1081 s->temps_in_use--;
1082 if (s->temps_in_use < 0) {
1083 fprintf(stderr, "More temporaries freed than allocated!\n");
1084 }
1085#endif
1086
085272b3 1087 tcg_debug_assert(ts->temp_global == 0);
eabb7b91 1088 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1089 ts->temp_allocated = 0;
0ec9eabc 1090
085272b3 1091 idx = temp_idx(ts);
18d13fa2 1092 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
0ec9eabc 1093 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1094}
1095
a7812ae4
PB
1096void tcg_temp_free_i32(TCGv_i32 arg)
1097{
085272b3 1098 tcg_temp_free_internal(tcgv_i32_temp(arg));
a7812ae4
PB
1099}
1100
1101void tcg_temp_free_i64(TCGv_i64 arg)
1102{
085272b3 1103 tcg_temp_free_internal(tcgv_i64_temp(arg));
a7812ae4 1104}
e8996ee0 1105
d2fd745f
RH
1106void tcg_temp_free_vec(TCGv_vec arg)
1107{
1108 tcg_temp_free_internal(tcgv_vec_temp(arg));
1109}
1110
a7812ae4 1111TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1112{
a7812ae4
PB
1113 TCGv_i32 t0;
1114 t0 = tcg_temp_new_i32();
e8996ee0
FB
1115 tcg_gen_movi_i32(t0, val);
1116 return t0;
1117}
c896fe29 1118
a7812ae4 1119TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1120{
a7812ae4
PB
1121 TCGv_i64 t0;
1122 t0 = tcg_temp_new_i64();
e8996ee0
FB
1123 tcg_gen_movi_i64(t0, val);
1124 return t0;
c896fe29
FB
1125}
1126
a7812ae4 1127TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1128{
a7812ae4
PB
1129 TCGv_i32 t0;
1130 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1131 tcg_gen_movi_i32(t0, val);
1132 return t0;
1133}
1134
a7812ae4 1135TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1136{
a7812ae4
PB
1137 TCGv_i64 t0;
1138 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1139 tcg_gen_movi_i64(t0, val);
1140 return t0;
1141}
1142
27bfd83c
PM
1143#if defined(CONFIG_DEBUG_TCG)
1144void tcg_clear_temp_count(void)
1145{
b1311c4a 1146 TCGContext *s = tcg_ctx;
27bfd83c
PM
1147 s->temps_in_use = 0;
1148}
1149
1150int tcg_check_temp_count(void)
1151{
b1311c4a 1152 TCGContext *s = tcg_ctx;
27bfd83c
PM
1153 if (s->temps_in_use) {
1154 /* Clear the count so that we don't give another
1155 * warning immediately next time around.
1156 */
1157 s->temps_in_use = 0;
1158 return 1;
1159 }
1160 return 0;
1161}
1162#endif
1163
be0f34b5
RH
1164/* Return true if OP may appear in the opcode stream.
1165 Test the runtime variable that controls each opcode. */
1166bool tcg_op_supported(TCGOpcode op)
1167{
d2fd745f
RH
1168 const bool have_vec
1169 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1170
be0f34b5
RH
1171 switch (op) {
1172 case INDEX_op_discard:
1173 case INDEX_op_set_label:
1174 case INDEX_op_call:
1175 case INDEX_op_br:
1176 case INDEX_op_mb:
1177 case INDEX_op_insn_start:
1178 case INDEX_op_exit_tb:
1179 case INDEX_op_goto_tb:
1180 case INDEX_op_qemu_ld_i32:
1181 case INDEX_op_qemu_st_i32:
1182 case INDEX_op_qemu_ld_i64:
1183 case INDEX_op_qemu_st_i64:
1184 return true;
1185
1186 case INDEX_op_goto_ptr:
1187 return TCG_TARGET_HAS_goto_ptr;
1188
1189 case INDEX_op_mov_i32:
1190 case INDEX_op_movi_i32:
1191 case INDEX_op_setcond_i32:
1192 case INDEX_op_brcond_i32:
1193 case INDEX_op_ld8u_i32:
1194 case INDEX_op_ld8s_i32:
1195 case INDEX_op_ld16u_i32:
1196 case INDEX_op_ld16s_i32:
1197 case INDEX_op_ld_i32:
1198 case INDEX_op_st8_i32:
1199 case INDEX_op_st16_i32:
1200 case INDEX_op_st_i32:
1201 case INDEX_op_add_i32:
1202 case INDEX_op_sub_i32:
1203 case INDEX_op_mul_i32:
1204 case INDEX_op_and_i32:
1205 case INDEX_op_or_i32:
1206 case INDEX_op_xor_i32:
1207 case INDEX_op_shl_i32:
1208 case INDEX_op_shr_i32:
1209 case INDEX_op_sar_i32:
1210 return true;
1211
1212 case INDEX_op_movcond_i32:
1213 return TCG_TARGET_HAS_movcond_i32;
1214 case INDEX_op_div_i32:
1215 case INDEX_op_divu_i32:
1216 return TCG_TARGET_HAS_div_i32;
1217 case INDEX_op_rem_i32:
1218 case INDEX_op_remu_i32:
1219 return TCG_TARGET_HAS_rem_i32;
1220 case INDEX_op_div2_i32:
1221 case INDEX_op_divu2_i32:
1222 return TCG_TARGET_HAS_div2_i32;
1223 case INDEX_op_rotl_i32:
1224 case INDEX_op_rotr_i32:
1225 return TCG_TARGET_HAS_rot_i32;
1226 case INDEX_op_deposit_i32:
1227 return TCG_TARGET_HAS_deposit_i32;
1228 case INDEX_op_extract_i32:
1229 return TCG_TARGET_HAS_extract_i32;
1230 case INDEX_op_sextract_i32:
1231 return TCG_TARGET_HAS_sextract_i32;
1232 case INDEX_op_add2_i32:
1233 return TCG_TARGET_HAS_add2_i32;
1234 case INDEX_op_sub2_i32:
1235 return TCG_TARGET_HAS_sub2_i32;
1236 case INDEX_op_mulu2_i32:
1237 return TCG_TARGET_HAS_mulu2_i32;
1238 case INDEX_op_muls2_i32:
1239 return TCG_TARGET_HAS_muls2_i32;
1240 case INDEX_op_muluh_i32:
1241 return TCG_TARGET_HAS_muluh_i32;
1242 case INDEX_op_mulsh_i32:
1243 return TCG_TARGET_HAS_mulsh_i32;
1244 case INDEX_op_ext8s_i32:
1245 return TCG_TARGET_HAS_ext8s_i32;
1246 case INDEX_op_ext16s_i32:
1247 return TCG_TARGET_HAS_ext16s_i32;
1248 case INDEX_op_ext8u_i32:
1249 return TCG_TARGET_HAS_ext8u_i32;
1250 case INDEX_op_ext16u_i32:
1251 return TCG_TARGET_HAS_ext16u_i32;
1252 case INDEX_op_bswap16_i32:
1253 return TCG_TARGET_HAS_bswap16_i32;
1254 case INDEX_op_bswap32_i32:
1255 return TCG_TARGET_HAS_bswap32_i32;
1256 case INDEX_op_not_i32:
1257 return TCG_TARGET_HAS_not_i32;
1258 case INDEX_op_neg_i32:
1259 return TCG_TARGET_HAS_neg_i32;
1260 case INDEX_op_andc_i32:
1261 return TCG_TARGET_HAS_andc_i32;
1262 case INDEX_op_orc_i32:
1263 return TCG_TARGET_HAS_orc_i32;
1264 case INDEX_op_eqv_i32:
1265 return TCG_TARGET_HAS_eqv_i32;
1266 case INDEX_op_nand_i32:
1267 return TCG_TARGET_HAS_nand_i32;
1268 case INDEX_op_nor_i32:
1269 return TCG_TARGET_HAS_nor_i32;
1270 case INDEX_op_clz_i32:
1271 return TCG_TARGET_HAS_clz_i32;
1272 case INDEX_op_ctz_i32:
1273 return TCG_TARGET_HAS_ctz_i32;
1274 case INDEX_op_ctpop_i32:
1275 return TCG_TARGET_HAS_ctpop_i32;
1276
1277 case INDEX_op_brcond2_i32:
1278 case INDEX_op_setcond2_i32:
1279 return TCG_TARGET_REG_BITS == 32;
1280
1281 case INDEX_op_mov_i64:
1282 case INDEX_op_movi_i64:
1283 case INDEX_op_setcond_i64:
1284 case INDEX_op_brcond_i64:
1285 case INDEX_op_ld8u_i64:
1286 case INDEX_op_ld8s_i64:
1287 case INDEX_op_ld16u_i64:
1288 case INDEX_op_ld16s_i64:
1289 case INDEX_op_ld32u_i64:
1290 case INDEX_op_ld32s_i64:
1291 case INDEX_op_ld_i64:
1292 case INDEX_op_st8_i64:
1293 case INDEX_op_st16_i64:
1294 case INDEX_op_st32_i64:
1295 case INDEX_op_st_i64:
1296 case INDEX_op_add_i64:
1297 case INDEX_op_sub_i64:
1298 case INDEX_op_mul_i64:
1299 case INDEX_op_and_i64:
1300 case INDEX_op_or_i64:
1301 case INDEX_op_xor_i64:
1302 case INDEX_op_shl_i64:
1303 case INDEX_op_shr_i64:
1304 case INDEX_op_sar_i64:
1305 case INDEX_op_ext_i32_i64:
1306 case INDEX_op_extu_i32_i64:
1307 return TCG_TARGET_REG_BITS == 64;
1308
1309 case INDEX_op_movcond_i64:
1310 return TCG_TARGET_HAS_movcond_i64;
1311 case INDEX_op_div_i64:
1312 case INDEX_op_divu_i64:
1313 return TCG_TARGET_HAS_div_i64;
1314 case INDEX_op_rem_i64:
1315 case INDEX_op_remu_i64:
1316 return TCG_TARGET_HAS_rem_i64;
1317 case INDEX_op_div2_i64:
1318 case INDEX_op_divu2_i64:
1319 return TCG_TARGET_HAS_div2_i64;
1320 case INDEX_op_rotl_i64:
1321 case INDEX_op_rotr_i64:
1322 return TCG_TARGET_HAS_rot_i64;
1323 case INDEX_op_deposit_i64:
1324 return TCG_TARGET_HAS_deposit_i64;
1325 case INDEX_op_extract_i64:
1326 return TCG_TARGET_HAS_extract_i64;
1327 case INDEX_op_sextract_i64:
1328 return TCG_TARGET_HAS_sextract_i64;
1329 case INDEX_op_extrl_i64_i32:
1330 return TCG_TARGET_HAS_extrl_i64_i32;
1331 case INDEX_op_extrh_i64_i32:
1332 return TCG_TARGET_HAS_extrh_i64_i32;
1333 case INDEX_op_ext8s_i64:
1334 return TCG_TARGET_HAS_ext8s_i64;
1335 case INDEX_op_ext16s_i64:
1336 return TCG_TARGET_HAS_ext16s_i64;
1337 case INDEX_op_ext32s_i64:
1338 return TCG_TARGET_HAS_ext32s_i64;
1339 case INDEX_op_ext8u_i64:
1340 return TCG_TARGET_HAS_ext8u_i64;
1341 case INDEX_op_ext16u_i64:
1342 return TCG_TARGET_HAS_ext16u_i64;
1343 case INDEX_op_ext32u_i64:
1344 return TCG_TARGET_HAS_ext32u_i64;
1345 case INDEX_op_bswap16_i64:
1346 return TCG_TARGET_HAS_bswap16_i64;
1347 case INDEX_op_bswap32_i64:
1348 return TCG_TARGET_HAS_bswap32_i64;
1349 case INDEX_op_bswap64_i64:
1350 return TCG_TARGET_HAS_bswap64_i64;
1351 case INDEX_op_not_i64:
1352 return TCG_TARGET_HAS_not_i64;
1353 case INDEX_op_neg_i64:
1354 return TCG_TARGET_HAS_neg_i64;
1355 case INDEX_op_andc_i64:
1356 return TCG_TARGET_HAS_andc_i64;
1357 case INDEX_op_orc_i64:
1358 return TCG_TARGET_HAS_orc_i64;
1359 case INDEX_op_eqv_i64:
1360 return TCG_TARGET_HAS_eqv_i64;
1361 case INDEX_op_nand_i64:
1362 return TCG_TARGET_HAS_nand_i64;
1363 case INDEX_op_nor_i64:
1364 return TCG_TARGET_HAS_nor_i64;
1365 case INDEX_op_clz_i64:
1366 return TCG_TARGET_HAS_clz_i64;
1367 case INDEX_op_ctz_i64:
1368 return TCG_TARGET_HAS_ctz_i64;
1369 case INDEX_op_ctpop_i64:
1370 return TCG_TARGET_HAS_ctpop_i64;
1371 case INDEX_op_add2_i64:
1372 return TCG_TARGET_HAS_add2_i64;
1373 case INDEX_op_sub2_i64:
1374 return TCG_TARGET_HAS_sub2_i64;
1375 case INDEX_op_mulu2_i64:
1376 return TCG_TARGET_HAS_mulu2_i64;
1377 case INDEX_op_muls2_i64:
1378 return TCG_TARGET_HAS_muls2_i64;
1379 case INDEX_op_muluh_i64:
1380 return TCG_TARGET_HAS_muluh_i64;
1381 case INDEX_op_mulsh_i64:
1382 return TCG_TARGET_HAS_mulsh_i64;
1383
d2fd745f
RH
1384 case INDEX_op_mov_vec:
1385 case INDEX_op_dup_vec:
1386 case INDEX_op_dupi_vec:
1387 case INDEX_op_ld_vec:
1388 case INDEX_op_st_vec:
1389 case INDEX_op_add_vec:
1390 case INDEX_op_sub_vec:
1391 case INDEX_op_and_vec:
1392 case INDEX_op_or_vec:
1393 case INDEX_op_xor_vec:
212be173 1394 case INDEX_op_cmp_vec:
d2fd745f
RH
1395 return have_vec;
1396 case INDEX_op_dup2_vec:
1397 return have_vec && TCG_TARGET_REG_BITS == 32;
1398 case INDEX_op_not_vec:
1399 return have_vec && TCG_TARGET_HAS_not_vec;
1400 case INDEX_op_neg_vec:
1401 return have_vec && TCG_TARGET_HAS_neg_vec;
1402 case INDEX_op_andc_vec:
1403 return have_vec && TCG_TARGET_HAS_andc_vec;
1404 case INDEX_op_orc_vec:
1405 return have_vec && TCG_TARGET_HAS_orc_vec;
3774030a
RH
1406 case INDEX_op_mul_vec:
1407 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1408 case INDEX_op_shli_vec:
1409 case INDEX_op_shri_vec:
1410 case INDEX_op_sari_vec:
1411 return have_vec && TCG_TARGET_HAS_shi_vec;
1412 case INDEX_op_shls_vec:
1413 case INDEX_op_shrs_vec:
1414 case INDEX_op_sars_vec:
1415 return have_vec && TCG_TARGET_HAS_shs_vec;
1416 case INDEX_op_shlv_vec:
1417 case INDEX_op_shrv_vec:
1418 case INDEX_op_sarv_vec:
1419 return have_vec && TCG_TARGET_HAS_shv_vec;
d2fd745f 1420
db432672
RH
1421 default:
1422 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1423 return true;
be0f34b5 1424 }
be0f34b5
RH
1425}
1426
39cf05d3
FB
1427/* Note: we convert the 64 bit args to 32 bit and do some alignment
1428 and endian swap. Maybe it would be better to do the alignment
1429 and endian swap in tcg_reg_alloc_call(). */
ae8b75dc 1430void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1431{
75e8b9b7 1432 int i, real_args, nb_rets, pi;
bbb8a1b4 1433 unsigned sizemask, flags;
afb49896 1434 TCGHelperInfo *info;
75e8b9b7 1435 TCGOp *op;
afb49896 1436
619205fd 1437 info = g_hash_table_lookup(helper_table, (gpointer)func);
bbb8a1b4
RH
1438 flags = info->flags;
1439 sizemask = info->sizemask;
2bece2c8 1440
34b1a49c
RH
1441#if defined(__sparc__) && !defined(__arch64__) \
1442 && !defined(CONFIG_TCG_INTERPRETER)
1443 /* We have 64-bit values in one register, but need to pass as two
1444 separate parameters. Split them. */
1445 int orig_sizemask = sizemask;
1446 int orig_nargs = nargs;
1447 TCGv_i64 retl, reth;
ae8b75dc 1448 TCGTemp *split_args[MAX_OPC_PARAM];
34b1a49c 1449
f764718d
RH
1450 retl = NULL;
1451 reth = NULL;
34b1a49c 1452 if (sizemask != 0) {
34b1a49c
RH
1453 for (i = real_args = 0; i < nargs; ++i) {
1454 int is_64bit = sizemask & (1 << (i+1)*2);
1455 if (is_64bit) {
085272b3 1456 TCGv_i64 orig = temp_tcgv_i64(args[i]);
34b1a49c
RH
1457 TCGv_i32 h = tcg_temp_new_i32();
1458 TCGv_i32 l = tcg_temp_new_i32();
1459 tcg_gen_extr_i64_i32(l, h, orig);
ae8b75dc
RH
1460 split_args[real_args++] = tcgv_i32_temp(h);
1461 split_args[real_args++] = tcgv_i32_temp(l);
34b1a49c
RH
1462 } else {
1463 split_args[real_args++] = args[i];
1464 }
1465 }
1466 nargs = real_args;
1467 args = split_args;
1468 sizemask = 0;
1469 }
1470#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1471 for (i = 0; i < nargs; ++i) {
1472 int is_64bit = sizemask & (1 << (i+1)*2);
1473 int is_signed = sizemask & (2 << (i+1)*2);
1474 if (!is_64bit) {
1475 TCGv_i64 temp = tcg_temp_new_i64();
085272b3 1476 TCGv_i64 orig = temp_tcgv_i64(args[i]);
2bece2c8
RH
1477 if (is_signed) {
1478 tcg_gen_ext32s_i64(temp, orig);
1479 } else {
1480 tcg_gen_ext32u_i64(temp, orig);
1481 }
ae8b75dc 1482 args[i] = tcgv_i64_temp(temp);
2bece2c8
RH
1483 }
1484 }
1485#endif /* TCG_TARGET_EXTEND_ARGS */
1486
15fa08f8 1487 op = tcg_emit_op(INDEX_op_call);
75e8b9b7
RH
1488
1489 pi = 0;
ae8b75dc 1490 if (ret != NULL) {
34b1a49c
RH
1491#if defined(__sparc__) && !defined(__arch64__) \
1492 && !defined(CONFIG_TCG_INTERPRETER)
1493 if (orig_sizemask & 1) {
1494 /* The 32-bit ABI is going to return the 64-bit value in
1495 the %o0/%o1 register pair. Prepare for this by using
1496 two return temporaries, and reassemble below. */
1497 retl = tcg_temp_new_i64();
1498 reth = tcg_temp_new_i64();
ae8b75dc
RH
1499 op->args[pi++] = tcgv_i64_arg(reth);
1500 op->args[pi++] = tcgv_i64_arg(retl);
34b1a49c
RH
1501 nb_rets = 2;
1502 } else {
ae8b75dc 1503 op->args[pi++] = temp_arg(ret);
34b1a49c
RH
1504 nb_rets = 1;
1505 }
1506#else
1507 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
02eb19d0 1508#ifdef HOST_WORDS_BIGENDIAN
ae8b75dc
RH
1509 op->args[pi++] = temp_arg(ret + 1);
1510 op->args[pi++] = temp_arg(ret);
39cf05d3 1511#else
ae8b75dc
RH
1512 op->args[pi++] = temp_arg(ret);
1513 op->args[pi++] = temp_arg(ret + 1);
39cf05d3 1514#endif
a7812ae4 1515 nb_rets = 2;
34b1a49c 1516 } else {
ae8b75dc 1517 op->args[pi++] = temp_arg(ret);
a7812ae4 1518 nb_rets = 1;
c896fe29 1519 }
34b1a49c 1520#endif
a7812ae4
PB
1521 } else {
1522 nb_rets = 0;
c896fe29 1523 }
cd9090aa 1524 TCGOP_CALLO(op) = nb_rets;
75e8b9b7 1525
a7812ae4
PB
1526 real_args = 0;
1527 for (i = 0; i < nargs; i++) {
2bece2c8 1528 int is_64bit = sizemask & (1 << (i+1)*2);
bbb8a1b4 1529 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
39cf05d3
FB
1530#ifdef TCG_TARGET_CALL_ALIGN_ARGS
1531 /* some targets want aligned 64 bit args */
ebd486d5 1532 if (real_args & 1) {
75e8b9b7 1533 op->args[pi++] = TCG_CALL_DUMMY_ARG;
ebd486d5 1534 real_args++;
39cf05d3
FB
1535 }
1536#endif
c70fbf0a
RH
1537 /* If stack grows up, then we will be placing successive
1538 arguments at lower addresses, which means we need to
1539 reverse the order compared to how we would normally
1540 treat either big or little-endian. For those arguments
1541 that will wind up in registers, this still works for
1542 HPPA (the only current STACK_GROWSUP target) since the
1543 argument registers are *also* allocated in decreasing
1544 order. If another such target is added, this logic may
1545 have to get more complicated to differentiate between
1546 stack arguments and register arguments. */
02eb19d0 1547#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
ae8b75dc
RH
1548 op->args[pi++] = temp_arg(args[i] + 1);
1549 op->args[pi++] = temp_arg(args[i]);
c896fe29 1550#else
ae8b75dc
RH
1551 op->args[pi++] = temp_arg(args[i]);
1552 op->args[pi++] = temp_arg(args[i] + 1);
c896fe29 1553#endif
a7812ae4 1554 real_args += 2;
2bece2c8 1555 continue;
c896fe29 1556 }
2bece2c8 1557
ae8b75dc 1558 op->args[pi++] = temp_arg(args[i]);
2bece2c8 1559 real_args++;
c896fe29 1560 }
75e8b9b7
RH
1561 op->args[pi++] = (uintptr_t)func;
1562 op->args[pi++] = flags;
cd9090aa 1563 TCGOP_CALLI(op) = real_args;
a7812ae4 1564
75e8b9b7 1565 /* Make sure the fields didn't overflow. */
cd9090aa 1566 tcg_debug_assert(TCGOP_CALLI(op) == real_args);
75e8b9b7 1567 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
2bece2c8 1568
34b1a49c
RH
1569#if defined(__sparc__) && !defined(__arch64__) \
1570 && !defined(CONFIG_TCG_INTERPRETER)
1571 /* Free all of the parts we allocated above. */
1572 for (i = real_args = 0; i < orig_nargs; ++i) {
1573 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1574 if (is_64bit) {
085272b3
RH
1575 tcg_temp_free_internal(args[real_args++]);
1576 tcg_temp_free_internal(args[real_args++]);
34b1a49c
RH
1577 } else {
1578 real_args++;
1579 }
1580 }
1581 if (orig_sizemask & 1) {
1582 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1583 Note that describing these as TCGv_i64 eliminates an unnecessary
1584 zero-extension that tcg_gen_concat_i32_i64 would create. */
085272b3 1585 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
34b1a49c
RH
1586 tcg_temp_free_i64(retl);
1587 tcg_temp_free_i64(reth);
1588 }
1589#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1590 for (i = 0; i < nargs; ++i) {
1591 int is_64bit = sizemask & (1 << (i+1)*2);
1592 if (!is_64bit) {
085272b3 1593 tcg_temp_free_internal(args[i]);
2bece2c8
RH
1594 }
1595 }
1596#endif /* TCG_TARGET_EXTEND_ARGS */
c896fe29 1597}
c896fe29 1598
8fcd3692 1599static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1600{
ac3b8891 1601 int i, n;
c896fe29 1602 TCGTemp *ts;
ac3b8891
RH
1603
1604 for (i = 0, n = s->nb_globals; i < n; i++) {
c896fe29 1605 ts = &s->temps[i];
ac3b8891 1606 ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
c896fe29 1607 }
ac3b8891 1608 for (n = s->nb_temps; i < n; i++) {
e8996ee0 1609 ts = &s->temps[i];
ac3b8891 1610 ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
e8996ee0
FB
1611 ts->mem_allocated = 0;
1612 ts->fixed_reg = 0;
1613 }
f8b2f202
RH
1614
1615 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1616}
1617
f8b2f202
RH
1618static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1619 TCGTemp *ts)
c896fe29 1620{
1807f4c4 1621 int idx = temp_idx(ts);
ac56dd48 1622
fa477d25 1623 if (ts->temp_global) {
ac56dd48 1624 pstrcpy(buf, buf_size, ts->name);
f8b2f202
RH
1625 } else if (ts->temp_local) {
1626 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
c896fe29 1627 } else {
f8b2f202 1628 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
c896fe29
FB
1629 }
1630 return buf;
1631}
1632
43439139
RH
1633static char *tcg_get_arg_str(TCGContext *s, char *buf,
1634 int buf_size, TCGArg arg)
f8b2f202 1635{
43439139 1636 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1637}
1638
6e085f72
RH
1639/* Find helper name. */
1640static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
4dc81f28 1641{
6e085f72 1642 const char *ret = NULL;
619205fd
EC
1643 if (helper_table) {
1644 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
72866e82
RH
1645 if (info) {
1646 ret = info->name;
1647 }
4dc81f28 1648 }
6e085f72 1649 return ret;
4dc81f28
FB
1650}
1651
f48f3ede
BS
1652static const char * const cond_name[] =
1653{
0aed257f
RH
1654 [TCG_COND_NEVER] = "never",
1655 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1656 [TCG_COND_EQ] = "eq",
1657 [TCG_COND_NE] = "ne",
1658 [TCG_COND_LT] = "lt",
1659 [TCG_COND_GE] = "ge",
1660 [TCG_COND_LE] = "le",
1661 [TCG_COND_GT] = "gt",
1662 [TCG_COND_LTU] = "ltu",
1663 [TCG_COND_GEU] = "geu",
1664 [TCG_COND_LEU] = "leu",
1665 [TCG_COND_GTU] = "gtu"
1666};
1667
f713d6ad
RH
1668static const char * const ldst_name[] =
1669{
1670 [MO_UB] = "ub",
1671 [MO_SB] = "sb",
1672 [MO_LEUW] = "leuw",
1673 [MO_LESW] = "lesw",
1674 [MO_LEUL] = "leul",
1675 [MO_LESL] = "lesl",
1676 [MO_LEQ] = "leq",
1677 [MO_BEUW] = "beuw",
1678 [MO_BESW] = "besw",
1679 [MO_BEUL] = "beul",
1680 [MO_BESL] = "besl",
1681 [MO_BEQ] = "beq",
1682};
1683
1f00b27f
SS
1684static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1685#ifdef ALIGNED_ONLY
1686 [MO_UNALN >> MO_ASHIFT] = "un+",
1687 [MO_ALIGN >> MO_ASHIFT] = "",
1688#else
1689 [MO_UNALN >> MO_ASHIFT] = "",
1690 [MO_ALIGN >> MO_ASHIFT] = "al+",
1691#endif
1692 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1693 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1694 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1695 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1696 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1697 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1698};
1699
eeacee4d 1700void tcg_dump_ops(TCGContext *s)
c896fe29 1701{
c896fe29 1702 char buf[128];
c45cb8bb 1703 TCGOp *op;
c45cb8bb 1704
15fa08f8 1705 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1706 int i, k, nb_oargs, nb_iargs, nb_cargs;
1707 const TCGOpDef *def;
c45cb8bb 1708 TCGOpcode c;
bdfb460e 1709 int col = 0;
c896fe29 1710
c45cb8bb 1711 c = op->opc;
c896fe29 1712 def = &tcg_op_defs[c];
c45cb8bb 1713
765b842a 1714 if (c == INDEX_op_insn_start) {
15fa08f8 1715 col += qemu_log("\n ----");
9aef40ed
RH
1716
1717 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1718 target_ulong a;
7e4597d7 1719#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1720 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1721#else
efee3746 1722 a = op->args[i];
7e4597d7 1723#endif
bdfb460e 1724 col += qemu_log(" " TARGET_FMT_lx, a);
eeacee4d 1725 }
7e4597d7 1726 } else if (c == INDEX_op_call) {
c896fe29 1727 /* variable number of arguments */
cd9090aa
RH
1728 nb_oargs = TCGOP_CALLO(op);
1729 nb_iargs = TCGOP_CALLI(op);
c896fe29 1730 nb_cargs = def->nb_cargs;
c896fe29 1731
cf066674 1732 /* function name, flags, out args */
bdfb460e 1733 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
efee3746
RH
1734 tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1735 op->args[nb_oargs + nb_iargs + 1], nb_oargs);
cf066674 1736 for (i = 0; i < nb_oargs; i++) {
43439139
RH
1737 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1738 op->args[i]));
b03cce8e 1739 }
cf066674 1740 for (i = 0; i < nb_iargs; i++) {
efee3746 1741 TCGArg arg = op->args[nb_oargs + i];
cf066674
RH
1742 const char *t = "<dummy>";
1743 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 1744 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
eeacee4d 1745 }
bdfb460e 1746 col += qemu_log(",%s", t);
e8996ee0 1747 }
b03cce8e 1748 } else {
bdfb460e 1749 col += qemu_log(" %s ", def->name);
c45cb8bb
RH
1750
1751 nb_oargs = def->nb_oargs;
1752 nb_iargs = def->nb_iargs;
1753 nb_cargs = def->nb_cargs;
1754
d2fd745f
RH
1755 if (def->flags & TCG_OPF_VECTOR) {
1756 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1757 8 << TCGOP_VECE(op));
1758 }
1759
b03cce8e 1760 k = 0;
c45cb8bb 1761 for (i = 0; i < nb_oargs; i++) {
eeacee4d 1762 if (k != 0) {
bdfb460e 1763 col += qemu_log(",");
eeacee4d 1764 }
43439139
RH
1765 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1766 op->args[k++]));
b03cce8e 1767 }
c45cb8bb 1768 for (i = 0; i < nb_iargs; i++) {
eeacee4d 1769 if (k != 0) {
bdfb460e 1770 col += qemu_log(",");
eeacee4d 1771 }
43439139
RH
1772 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1773 op->args[k++]));
b03cce8e 1774 }
be210acb
RH
1775 switch (c) {
1776 case INDEX_op_brcond_i32:
be210acb 1777 case INDEX_op_setcond_i32:
ffc5ea09 1778 case INDEX_op_movcond_i32:
ffc5ea09 1779 case INDEX_op_brcond2_i32:
be210acb 1780 case INDEX_op_setcond2_i32:
ffc5ea09 1781 case INDEX_op_brcond_i64:
be210acb 1782 case INDEX_op_setcond_i64:
ffc5ea09 1783 case INDEX_op_movcond_i64:
212be173 1784 case INDEX_op_cmp_vec:
efee3746
RH
1785 if (op->args[k] < ARRAY_SIZE(cond_name)
1786 && cond_name[op->args[k]]) {
1787 col += qemu_log(",%s", cond_name[op->args[k++]]);
eeacee4d 1788 } else {
efee3746 1789 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 1790 }
f48f3ede 1791 i = 1;
be210acb 1792 break;
f713d6ad
RH
1793 case INDEX_op_qemu_ld_i32:
1794 case INDEX_op_qemu_st_i32:
1795 case INDEX_op_qemu_ld_i64:
1796 case INDEX_op_qemu_st_i64:
59227d5d 1797 {
efee3746 1798 TCGMemOpIdx oi = op->args[k++];
59227d5d
RH
1799 TCGMemOp op = get_memop(oi);
1800 unsigned ix = get_mmuidx(oi);
1801
59c4b7e8 1802 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
bdfb460e 1803 col += qemu_log(",$0x%x,%u", op, ix);
59c4b7e8 1804 } else {
1f00b27f
SS
1805 const char *s_al, *s_op;
1806 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 1807 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
bdfb460e 1808 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
1809 }
1810 i = 1;
f713d6ad 1811 }
f713d6ad 1812 break;
be210acb 1813 default:
f48f3ede 1814 i = 0;
be210acb
RH
1815 break;
1816 }
51e3972c
RH
1817 switch (c) {
1818 case INDEX_op_set_label:
1819 case INDEX_op_br:
1820 case INDEX_op_brcond_i32:
1821 case INDEX_op_brcond_i64:
1822 case INDEX_op_brcond2_i32:
efee3746
RH
1823 col += qemu_log("%s$L%d", k ? "," : "",
1824 arg_label(op->args[k])->id);
51e3972c
RH
1825 i++, k++;
1826 break;
1827 default:
1828 break;
1829 }
1830 for (; i < nb_cargs; i++, k++) {
efee3746 1831 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
bdfb460e
RH
1832 }
1833 }
1834 if (op->life) {
1835 unsigned life = op->life;
1836
1837 for (; col < 48; ++col) {
1838 putc(' ', qemu_logfile);
1839 }
1840
1841 if (life & (SYNC_ARG * 3)) {
1842 qemu_log(" sync:");
1843 for (i = 0; i < 2; ++i) {
1844 if (life & (SYNC_ARG << i)) {
1845 qemu_log(" %d", i);
1846 }
1847 }
1848 }
1849 life /= DEAD_ARG;
1850 if (life) {
1851 qemu_log(" dead:");
1852 for (i = 0; life; ++i, life >>= 1) {
1853 if (life & 1) {
1854 qemu_log(" %d", i);
1855 }
1856 }
b03cce8e 1857 }
c896fe29 1858 }
eeacee4d 1859 qemu_log("\n");
c896fe29
FB
1860 }
1861}
1862
1863/* we give more priority to constraints with less registers */
1864static int get_constraint_priority(const TCGOpDef *def, int k)
1865{
1866 const TCGArgConstraint *arg_ct;
1867
1868 int i, n;
1869 arg_ct = &def->args_ct[k];
1870 if (arg_ct->ct & TCG_CT_ALIAS) {
1871 /* an alias is equivalent to a single register */
1872 n = 1;
1873 } else {
1874 if (!(arg_ct->ct & TCG_CT_REG))
1875 return 0;
1876 n = 0;
1877 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1878 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1879 n++;
1880 }
1881 }
1882 return TCG_TARGET_NB_REGS - n + 1;
1883}
1884
1885/* sort from highest priority to lowest */
1886static void sort_constraints(TCGOpDef *def, int start, int n)
1887{
1888 int i, j, p1, p2, tmp;
1889
1890 for(i = 0; i < n; i++)
1891 def->sorted_args[start + i] = start + i;
1892 if (n <= 1)
1893 return;
1894 for(i = 0; i < n - 1; i++) {
1895 for(j = i + 1; j < n; j++) {
1896 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1897 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1898 if (p1 < p2) {
1899 tmp = def->sorted_args[start + i];
1900 def->sorted_args[start + i] = def->sorted_args[start + j];
1901 def->sorted_args[start + j] = tmp;
1902 }
1903 }
1904 }
1905}
1906
f69d277e 1907static void process_op_defs(TCGContext *s)
c896fe29 1908{
a9751609 1909 TCGOpcode op;
c896fe29 1910
f69d277e
RH
1911 for (op = 0; op < NB_OPS; op++) {
1912 TCGOpDef *def = &tcg_op_defs[op];
1913 const TCGTargetOpDef *tdefs;
069ea736
RH
1914 TCGType type;
1915 int i, nb_args;
f69d277e
RH
1916
1917 if (def->flags & TCG_OPF_NOT_PRESENT) {
1918 continue;
1919 }
1920
c896fe29 1921 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
1922 if (nb_args == 0) {
1923 continue;
1924 }
1925
1926 tdefs = tcg_target_op_def(op);
1927 /* Missing TCGTargetOpDef entry. */
1928 tcg_debug_assert(tdefs != NULL);
1929
069ea736 1930 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
f69d277e
RH
1931 for (i = 0; i < nb_args; i++) {
1932 const char *ct_str = tdefs->args_ct_str[i];
1933 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 1934 tcg_debug_assert(ct_str != NULL);
f69d277e 1935
ccb1bb66 1936 def->args_ct[i].u.regs = 0;
c896fe29 1937 def->args_ct[i].ct = 0;
17280ff4
RH
1938 while (*ct_str != '\0') {
1939 switch(*ct_str) {
1940 case '0' ... '9':
1941 {
1942 int oarg = *ct_str - '0';
1943 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
1944 tcg_debug_assert(oarg < def->nb_oargs);
1945 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1946 /* TCG_CT_ALIAS is for the output arguments.
1947 The input is tagged with TCG_CT_IALIAS. */
1948 def->args_ct[i] = def->args_ct[oarg];
1949 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
1950 def->args_ct[oarg].alias_index = i;
1951 def->args_ct[i].ct |= TCG_CT_IALIAS;
1952 def->args_ct[i].alias_index = oarg;
c896fe29 1953 }
17280ff4
RH
1954 ct_str++;
1955 break;
1956 case '&':
1957 def->args_ct[i].ct |= TCG_CT_NEWREG;
1958 ct_str++;
1959 break;
1960 case 'i':
1961 def->args_ct[i].ct |= TCG_CT_CONST;
1962 ct_str++;
1963 break;
1964 default:
1965 ct_str = target_parse_constraint(&def->args_ct[i],
1966 ct_str, type);
1967 /* Typo in TCGTargetOpDef constraint. */
1968 tcg_debug_assert(ct_str != NULL);
c896fe29
FB
1969 }
1970 }
1971 }
1972
c68aaa18 1973 /* TCGTargetOpDef entry with too much information? */
eabb7b91 1974 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 1975
c896fe29
FB
1976 /* sort the constraints (XXX: this is just an heuristic) */
1977 sort_constraints(def, 0, def->nb_oargs);
1978 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 1979 }
c896fe29
FB
1980}
1981
0c627cdc
RH
1982void tcg_op_remove(TCGContext *s, TCGOp *op)
1983{
15fa08f8
RH
1984 QTAILQ_REMOVE(&s->ops, op, link);
1985 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
0c627cdc
RH
1986
1987#ifdef CONFIG_PROFILER
c3fac113 1988 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
1989#endif
1990}
1991
15fa08f8 1992static TCGOp *tcg_op_alloc(TCGOpcode opc)
5a18407f 1993{
15fa08f8
RH
1994 TCGContext *s = tcg_ctx;
1995 TCGOp *op;
5a18407f 1996
15fa08f8
RH
1997 if (likely(QTAILQ_EMPTY(&s->free_ops))) {
1998 op = tcg_malloc(sizeof(TCGOp));
1999 } else {
2000 op = QTAILQ_FIRST(&s->free_ops);
2001 QTAILQ_REMOVE(&s->free_ops, op, link);
2002 }
2003 memset(op, 0, offsetof(TCGOp, link));
2004 op->opc = opc;
5a18407f 2005
15fa08f8
RH
2006 return op;
2007}
2008
2009TCGOp *tcg_emit_op(TCGOpcode opc)
2010{
2011 TCGOp *op = tcg_op_alloc(opc);
2012 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2013 return op;
2014}
5a18407f 2015
15fa08f8
RH
2016TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2017 TCGOpcode opc, int nargs)
2018{
2019 TCGOp *new_op = tcg_op_alloc(opc);
2020 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2021 return new_op;
2022}
2023
2024TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2025 TCGOpcode opc, int nargs)
2026{
15fa08f8
RH
2027 TCGOp *new_op = tcg_op_alloc(opc);
2028 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2029 return new_op;
2030}
2031
c70fbf0a
RH
2032#define TS_DEAD 1
2033#define TS_MEM 2
2034
5a18407f
RH
2035#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2036#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2037
9c43b68d
AJ
2038/* liveness analysis: end of function: all temps are dead, and globals
2039 should be in memory. */
b83eabea 2040static void tcg_la_func_end(TCGContext *s)
c896fe29 2041{
b83eabea
RH
2042 int ng = s->nb_globals;
2043 int nt = s->nb_temps;
2044 int i;
2045
2046 for (i = 0; i < ng; ++i) {
2047 s->temps[i].state = TS_DEAD | TS_MEM;
2048 }
2049 for (i = ng; i < nt; ++i) {
2050 s->temps[i].state = TS_DEAD;
2051 }
c896fe29
FB
2052}
2053
9c43b68d
AJ
2054/* liveness analysis: end of basic block: all temps are dead, globals
2055 and local temps should be in memory. */
b83eabea 2056static void tcg_la_bb_end(TCGContext *s)
641d5fbe 2057{
b83eabea
RH
2058 int ng = s->nb_globals;
2059 int nt = s->nb_temps;
2060 int i;
641d5fbe 2061
b83eabea
RH
2062 for (i = 0; i < ng; ++i) {
2063 s->temps[i].state = TS_DEAD | TS_MEM;
2064 }
2065 for (i = ng; i < nt; ++i) {
2066 s->temps[i].state = (s->temps[i].temp_local
2067 ? TS_DEAD | TS_MEM
2068 : TS_DEAD);
641d5fbe
FB
2069 }
2070}
2071
a1b3c48d 2072/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2073 given input arguments is dead. Instructions updating dead
2074 temporaries are removed. */
b83eabea 2075static void liveness_pass_1(TCGContext *s)
c896fe29 2076{
c70fbf0a 2077 int nb_globals = s->nb_globals;
15fa08f8 2078 TCGOp *op, *op_prev;
a1b3c48d 2079
b83eabea 2080 tcg_la_func_end(s);
c896fe29 2081
15fa08f8 2082 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
c45cb8bb
RH
2083 int i, nb_iargs, nb_oargs;
2084 TCGOpcode opc_new, opc_new2;
2085 bool have_opc_new2;
a1b3c48d 2086 TCGLifeData arg_life = 0;
b83eabea 2087 TCGTemp *arg_ts;
c45cb8bb
RH
2088 TCGOpcode opc = op->opc;
2089 const TCGOpDef *def = &tcg_op_defs[opc];
2090
c45cb8bb 2091 switch (opc) {
c896fe29 2092 case INDEX_op_call:
c6e113f5
FB
2093 {
2094 int call_flags;
c896fe29 2095
cd9090aa
RH
2096 nb_oargs = TCGOP_CALLO(op);
2097 nb_iargs = TCGOP_CALLI(op);
efee3746 2098 call_flags = op->args[nb_oargs + nb_iargs + 1];
c6e113f5 2099
c45cb8bb 2100 /* pure functions can be removed if their result is unused */
78505279 2101 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2102 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2103 arg_ts = arg_temp(op->args[i]);
2104 if (arg_ts->state != TS_DEAD) {
c6e113f5 2105 goto do_not_remove_call;
9c43b68d 2106 }
c6e113f5 2107 }
c45cb8bb 2108 goto do_remove;
c6e113f5
FB
2109 } else {
2110 do_not_remove_call:
c896fe29 2111
c6e113f5 2112 /* output args are dead */
cf066674 2113 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2114 arg_ts = arg_temp(op->args[i]);
2115 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2116 arg_life |= DEAD_ARG << i;
6b64b624 2117 }
b83eabea 2118 if (arg_ts->state & TS_MEM) {
a1b3c48d 2119 arg_life |= SYNC_ARG << i;
9c43b68d 2120 }
b83eabea 2121 arg_ts->state = TS_DEAD;
c6e113f5 2122 }
78505279 2123
78505279
AJ
2124 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2125 TCG_CALL_NO_READ_GLOBALS))) {
9c43b68d 2126 /* globals should go back to memory */
b83eabea
RH
2127 for (i = 0; i < nb_globals; i++) {
2128 s->temps[i].state = TS_DEAD | TS_MEM;
2129 }
c70fbf0a
RH
2130 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2131 /* globals should be synced to memory */
2132 for (i = 0; i < nb_globals; i++) {
b83eabea 2133 s->temps[i].state |= TS_MEM;
c70fbf0a 2134 }
b9c18f56
AJ
2135 }
2136
c19f47bf 2137 /* record arguments that die in this helper */
cf066674 2138 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2139 arg_ts = arg_temp(op->args[i]);
2140 if (arg_ts && arg_ts->state & TS_DEAD) {
2141 arg_life |= DEAD_ARG << i;
c6e113f5 2142 }
c6e113f5 2143 }
67cc32eb 2144 /* input arguments are live for preceding opcodes */
c70fbf0a 2145 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2146 arg_ts = arg_temp(op->args[i]);
2147 if (arg_ts) {
2148 arg_ts->state &= ~TS_DEAD;
c70fbf0a 2149 }
c19f47bf 2150 }
c896fe29 2151 }
c896fe29 2152 }
c896fe29 2153 break;
765b842a 2154 case INDEX_op_insn_start:
c896fe29 2155 break;
5ff9d6a4 2156 case INDEX_op_discard:
5ff9d6a4 2157 /* mark the temporary as dead */
b83eabea 2158 arg_temp(op->args[0])->state = TS_DEAD;
5ff9d6a4 2159 break;
1305c451
RH
2160
2161 case INDEX_op_add2_i32:
c45cb8bb 2162 opc_new = INDEX_op_add_i32;
f1fae40c 2163 goto do_addsub2;
1305c451 2164 case INDEX_op_sub2_i32:
c45cb8bb 2165 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2166 goto do_addsub2;
2167 case INDEX_op_add2_i64:
c45cb8bb 2168 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2169 goto do_addsub2;
2170 case INDEX_op_sub2_i64:
c45cb8bb 2171 opc_new = INDEX_op_sub_i64;
f1fae40c 2172 do_addsub2:
1305c451
RH
2173 nb_iargs = 4;
2174 nb_oargs = 2;
2175 /* Test if the high part of the operation is dead, but not
2176 the low part. The result can be optimized to a simple
2177 add or sub. This happens often for x86_64 guest when the
2178 cpu mode is set to 32 bit. */
b83eabea
RH
2179 if (arg_temp(op->args[1])->state == TS_DEAD) {
2180 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2181 goto do_remove;
2182 }
c45cb8bb
RH
2183 /* Replace the opcode and adjust the args in place,
2184 leaving 3 unused args at the end. */
2185 op->opc = opc = opc_new;
efee3746
RH
2186 op->args[1] = op->args[2];
2187 op->args[2] = op->args[4];
1305c451
RH
2188 /* Fall through and mark the single-word operation live. */
2189 nb_iargs = 2;
2190 nb_oargs = 1;
2191 }
2192 goto do_not_remove;
2193
1414968a 2194 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2195 opc_new = INDEX_op_mul_i32;
2196 opc_new2 = INDEX_op_muluh_i32;
2197 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2198 goto do_mul2;
f1fae40c 2199 case INDEX_op_muls2_i32:
c45cb8bb
RH
2200 opc_new = INDEX_op_mul_i32;
2201 opc_new2 = INDEX_op_mulsh_i32;
2202 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2203 goto do_mul2;
2204 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2205 opc_new = INDEX_op_mul_i64;
2206 opc_new2 = INDEX_op_muluh_i64;
2207 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2208 goto do_mul2;
f1fae40c 2209 case INDEX_op_muls2_i64:
c45cb8bb
RH
2210 opc_new = INDEX_op_mul_i64;
2211 opc_new2 = INDEX_op_mulsh_i64;
2212 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2213 goto do_mul2;
f1fae40c 2214 do_mul2:
1414968a
RH
2215 nb_iargs = 2;
2216 nb_oargs = 2;
b83eabea
RH
2217 if (arg_temp(op->args[1])->state == TS_DEAD) {
2218 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2219 /* Both parts of the operation are dead. */
1414968a
RH
2220 goto do_remove;
2221 }
03271524 2222 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2223 op->opc = opc = opc_new;
efee3746
RH
2224 op->args[1] = op->args[2];
2225 op->args[2] = op->args[3];
b83eabea 2226 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2227 /* The low part of the operation is dead; generate the high. */
2228 op->opc = opc = opc_new2;
efee3746
RH
2229 op->args[0] = op->args[1];
2230 op->args[1] = op->args[2];
2231 op->args[2] = op->args[3];
03271524
RH
2232 } else {
2233 goto do_not_remove;
1414968a 2234 }
03271524
RH
2235 /* Mark the single-word operation live. */
2236 nb_oargs = 1;
1414968a
RH
2237 goto do_not_remove;
2238
c896fe29 2239 default:
1305c451 2240 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2241 nb_iargs = def->nb_iargs;
2242 nb_oargs = def->nb_oargs;
c896fe29 2243
49516bc0
AJ
2244 /* Test if the operation can be removed because all
2245 its outputs are dead. We assume that nb_oargs == 0
2246 implies side effects */
2247 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2248 for (i = 0; i < nb_oargs; i++) {
b83eabea 2249 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2250 goto do_not_remove;
9c43b68d 2251 }
49516bc0 2252 }
1305c451 2253 do_remove:
0c627cdc 2254 tcg_op_remove(s, op);
49516bc0
AJ
2255 } else {
2256 do_not_remove:
49516bc0 2257 /* output args are dead */
c45cb8bb 2258 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2259 arg_ts = arg_temp(op->args[i]);
2260 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2261 arg_life |= DEAD_ARG << i;
6b64b624 2262 }
b83eabea 2263 if (arg_ts->state & TS_MEM) {
a1b3c48d 2264 arg_life |= SYNC_ARG << i;
9c43b68d 2265 }
b83eabea 2266 arg_ts->state = TS_DEAD;
49516bc0
AJ
2267 }
2268
2269 /* if end of basic block, update */
2270 if (def->flags & TCG_OPF_BB_END) {
b83eabea 2271 tcg_la_bb_end(s);
3d5c5f87
AJ
2272 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2273 /* globals should be synced to memory */
c70fbf0a 2274 for (i = 0; i < nb_globals; i++) {
b83eabea 2275 s->temps[i].state |= TS_MEM;
c70fbf0a 2276 }
49516bc0
AJ
2277 }
2278
c19f47bf 2279 /* record arguments that die in this opcode */
c45cb8bb 2280 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
b83eabea
RH
2281 arg_ts = arg_temp(op->args[i]);
2282 if (arg_ts->state & TS_DEAD) {
a1b3c48d 2283 arg_life |= DEAD_ARG << i;
c896fe29 2284 }
c19f47bf 2285 }
67cc32eb 2286 /* input arguments are live for preceding opcodes */
c19f47bf 2287 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
b83eabea 2288 arg_temp(op->args[i])->state &= ~TS_DEAD;
c896fe29 2289 }
c896fe29
FB
2290 }
2291 break;
2292 }
bee158cb 2293 op->life = arg_life;
1ff0a2c5 2294 }
c896fe29 2295}
c896fe29 2296
5a18407f 2297/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 2298static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
2299{
2300 int nb_globals = s->nb_globals;
15fa08f8 2301 int nb_temps, i;
5a18407f 2302 bool changes = false;
15fa08f8 2303 TCGOp *op, *op_next;
5a18407f 2304
5a18407f
RH
2305 /* Create a temporary for each indirect global. */
2306 for (i = 0; i < nb_globals; ++i) {
2307 TCGTemp *its = &s->temps[i];
2308 if (its->indirect_reg) {
2309 TCGTemp *dts = tcg_temp_alloc(s);
2310 dts->type = its->type;
2311 dts->base_type = its->base_type;
b83eabea
RH
2312 its->state_ptr = dts;
2313 } else {
2314 its->state_ptr = NULL;
5a18407f 2315 }
b83eabea
RH
2316 /* All globals begin dead. */
2317 its->state = TS_DEAD;
2318 }
2319 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2320 TCGTemp *its = &s->temps[i];
2321 its->state_ptr = NULL;
2322 its->state = TS_DEAD;
5a18407f 2323 }
5a18407f 2324
15fa08f8 2325 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
2326 TCGOpcode opc = op->opc;
2327 const TCGOpDef *def = &tcg_op_defs[opc];
2328 TCGLifeData arg_life = op->life;
2329 int nb_iargs, nb_oargs, call_flags;
b83eabea 2330 TCGTemp *arg_ts, *dir_ts;
5a18407f 2331
5a18407f 2332 if (opc == INDEX_op_call) {
cd9090aa
RH
2333 nb_oargs = TCGOP_CALLO(op);
2334 nb_iargs = TCGOP_CALLI(op);
efee3746 2335 call_flags = op->args[nb_oargs + nb_iargs + 1];
5a18407f
RH
2336 } else {
2337 nb_iargs = def->nb_iargs;
2338 nb_oargs = def->nb_oargs;
2339
2340 /* Set flags similar to how calls require. */
2341 if (def->flags & TCG_OPF_BB_END) {
2342 /* Like writing globals: save_globals */
2343 call_flags = 0;
2344 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2345 /* Like reading globals: sync_globals */
2346 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2347 } else {
2348 /* No effect on globals. */
2349 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2350 TCG_CALL_NO_WRITE_GLOBALS);
2351 }
2352 }
2353
2354 /* Make sure that input arguments are available. */
2355 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2356 arg_ts = arg_temp(op->args[i]);
2357 if (arg_ts) {
2358 dir_ts = arg_ts->state_ptr;
2359 if (dir_ts && arg_ts->state == TS_DEAD) {
2360 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2361 ? INDEX_op_ld_i32
2362 : INDEX_op_ld_i64);
2363 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
5a18407f 2364
b83eabea
RH
2365 lop->args[0] = temp_arg(dir_ts);
2366 lop->args[1] = temp_arg(arg_ts->mem_base);
2367 lop->args[2] = arg_ts->mem_offset;
5a18407f
RH
2368
2369 /* Loaded, but synced with memory. */
b83eabea 2370 arg_ts->state = TS_MEM;
5a18407f
RH
2371 }
2372 }
2373 }
2374
2375 /* Perform input replacement, and mark inputs that became dead.
2376 No action is required except keeping temp_state up to date
2377 so that we reload when needed. */
2378 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2379 arg_ts = arg_temp(op->args[i]);
2380 if (arg_ts) {
2381 dir_ts = arg_ts->state_ptr;
2382 if (dir_ts) {
2383 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2384 changes = true;
2385 if (IS_DEAD_ARG(i)) {
b83eabea 2386 arg_ts->state = TS_DEAD;
5a18407f
RH
2387 }
2388 }
2389 }
2390 }
2391
2392 /* Liveness analysis should ensure that the following are
2393 all correct, for call sites and basic block end points. */
2394 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2395 /* Nothing to do */
2396 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2397 for (i = 0; i < nb_globals; ++i) {
2398 /* Liveness should see that globals are synced back,
2399 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
2400 arg_ts = &s->temps[i];
2401 tcg_debug_assert(arg_ts->state_ptr == 0
2402 || arg_ts->state != 0);
5a18407f
RH
2403 }
2404 } else {
2405 for (i = 0; i < nb_globals; ++i) {
2406 /* Liveness should see that globals are saved back,
2407 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
2408 arg_ts = &s->temps[i];
2409 tcg_debug_assert(arg_ts->state_ptr == 0
2410 || arg_ts->state == TS_DEAD);
5a18407f
RH
2411 }
2412 }
2413
2414 /* Outputs become available. */
2415 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2416 arg_ts = arg_temp(op->args[i]);
2417 dir_ts = arg_ts->state_ptr;
2418 if (!dir_ts) {
5a18407f
RH
2419 continue;
2420 }
b83eabea 2421 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2422 changes = true;
2423
2424 /* The output is now live and modified. */
b83eabea 2425 arg_ts->state = 0;
5a18407f
RH
2426
2427 /* Sync outputs upon their last write. */
2428 if (NEED_SYNC_ARG(i)) {
b83eabea 2429 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2430 ? INDEX_op_st_i32
2431 : INDEX_op_st_i64);
2432 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
5a18407f 2433
b83eabea
RH
2434 sop->args[0] = temp_arg(dir_ts);
2435 sop->args[1] = temp_arg(arg_ts->mem_base);
2436 sop->args[2] = arg_ts->mem_offset;
5a18407f 2437
b83eabea 2438 arg_ts->state = TS_MEM;
5a18407f
RH
2439 }
2440 /* Drop outputs that are dead. */
2441 if (IS_DEAD_ARG(i)) {
b83eabea 2442 arg_ts->state = TS_DEAD;
5a18407f
RH
2443 }
2444 }
2445 }
2446
2447 return changes;
2448}
2449
8d8fdbae 2450#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
2451static void dump_regs(TCGContext *s)
2452{
2453 TCGTemp *ts;
2454 int i;
2455 char buf[64];
2456
2457 for(i = 0; i < s->nb_temps; i++) {
2458 ts = &s->temps[i];
43439139 2459 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
c896fe29
FB
2460 switch(ts->val_type) {
2461 case TEMP_VAL_REG:
2462 printf("%s", tcg_target_reg_names[ts->reg]);
2463 break;
2464 case TEMP_VAL_MEM:
b3a62939
RH
2465 printf("%d(%s)", (int)ts->mem_offset,
2466 tcg_target_reg_names[ts->mem_base->reg]);
c896fe29
FB
2467 break;
2468 case TEMP_VAL_CONST:
2469 printf("$0x%" TCG_PRIlx, ts->val);
2470 break;
2471 case TEMP_VAL_DEAD:
2472 printf("D");
2473 break;
2474 default:
2475 printf("???");
2476 break;
2477 }
2478 printf("\n");
2479 }
2480
2481 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
f8b2f202 2482 if (s->reg_to_temp[i] != NULL) {
c896fe29
FB
2483 printf("%s: %s\n",
2484 tcg_target_reg_names[i],
f8b2f202 2485 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
c896fe29
FB
2486 }
2487 }
2488}
2489
2490static void check_regs(TCGContext *s)
2491{
869938ae 2492 int reg;
b6638662 2493 int k;
c896fe29
FB
2494 TCGTemp *ts;
2495 char buf[64];
2496
f8b2f202
RH
2497 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2498 ts = s->reg_to_temp[reg];
2499 if (ts != NULL) {
2500 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
c896fe29
FB
2501 printf("Inconsistency for register %s:\n",
2502 tcg_target_reg_names[reg]);
b03cce8e 2503 goto fail;
c896fe29
FB
2504 }
2505 }
2506 }
f8b2f202 2507 for (k = 0; k < s->nb_temps; k++) {
c896fe29 2508 ts = &s->temps[k];
f8b2f202
RH
2509 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2510 && s->reg_to_temp[ts->reg] != ts) {
2511 printf("Inconsistency for temp %s:\n",
2512 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
b03cce8e 2513 fail:
f8b2f202
RH
2514 printf("reg state:\n");
2515 dump_regs(s);
2516 tcg_abort();
c896fe29
FB
2517 }
2518 }
2519}
2520#endif
2521
2272e4a7 2522static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 2523{
9b9c37c3
RH
2524#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2525 /* Sparc64 stack is accessed with offset of 2047 */
b591dc59
BS
2526 s->current_frame_offset = (s->current_frame_offset +
2527 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2528 ~(sizeof(tcg_target_long) - 1);
f44c9960 2529#endif
b591dc59
BS
2530 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2531 s->frame_end) {
5ff9d6a4 2532 tcg_abort();
b591dc59 2533 }
c896fe29 2534 ts->mem_offset = s->current_frame_offset;
b3a62939 2535 ts->mem_base = s->frame_temp;
c896fe29 2536 ts->mem_allocated = 1;
e2c6d1b4 2537 s->current_frame_offset += sizeof(tcg_target_long);
c896fe29
FB
2538}
2539
b3915dbb
RH
2540static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2541
59d7c14e
RH
2542/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2543 mark it free; otherwise mark it dead. */
2544static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 2545{
59d7c14e
RH
2546 if (ts->fixed_reg) {
2547 return;
2548 }
2549 if (ts->val_type == TEMP_VAL_REG) {
2550 s->reg_to_temp[ts->reg] = NULL;
2551 }
2552 ts->val_type = (free_or_dead < 0
2553 || ts->temp_local
fa477d25 2554 || ts->temp_global
59d7c14e
RH
2555 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2556}
7f6ceedf 2557
59d7c14e
RH
2558/* Mark a temporary as dead. */
2559static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2560{
2561 temp_free_or_dead(s, ts, 1);
2562}
2563
2564/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2565 registers needs to be allocated to store a constant. If 'free_or_dead'
2566 is non-zero, subsequently release the temporary; if it is positive, the
2567 temp is dead; if it is negative, the temp is free. */
2568static void temp_sync(TCGContext *s, TCGTemp *ts,
2569 TCGRegSet allocated_regs, int free_or_dead)
2570{
2571 if (ts->fixed_reg) {
2572 return;
2573 }
2574 if (!ts->mem_coherent) {
7f6ceedf 2575 if (!ts->mem_allocated) {
2272e4a7 2576 temp_allocate_frame(s, ts);
59d7c14e 2577 }
59d7c14e
RH
2578 switch (ts->val_type) {
2579 case TEMP_VAL_CONST:
2580 /* If we're going to free the temp immediately, then we won't
2581 require it later in a register, so attempt to store the
2582 constant to memory directly. */
2583 if (free_or_dead
2584 && tcg_out_sti(s, ts->type, ts->val,
2585 ts->mem_base->reg, ts->mem_offset)) {
2586 break;
2587 }
2588 temp_load(s, ts, tcg_target_available_regs[ts->type],
2589 allocated_regs);
2590 /* fallthrough */
2591
2592 case TEMP_VAL_REG:
2593 tcg_out_st(s, ts->type, ts->reg,
2594 ts->mem_base->reg, ts->mem_offset);
2595 break;
2596
2597 case TEMP_VAL_MEM:
2598 break;
2599
2600 case TEMP_VAL_DEAD:
2601 default:
2602 tcg_abort();
2603 }
2604 ts->mem_coherent = 1;
2605 }
2606 if (free_or_dead) {
2607 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 2608 }
7f6ceedf
AJ
2609}
2610
c896fe29 2611/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 2612static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 2613{
f8b2f202 2614 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 2615 if (ts != NULL) {
59d7c14e 2616 temp_sync(s, ts, allocated_regs, -1);
c896fe29
FB
2617 }
2618}
2619
2620/* Allocate a register belonging to reg1 & ~reg2 */
b3915dbb 2621static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
91478cef 2622 TCGRegSet allocated_regs, bool rev)
c896fe29 2623{
91478cef
RH
2624 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2625 const int *order;
b6638662 2626 TCGReg reg;
c896fe29
FB
2627 TCGRegSet reg_ct;
2628
07ddf036 2629 reg_ct = desired_regs & ~allocated_regs;
91478cef 2630 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29
FB
2631
2632 /* first try free registers */
91478cef
RH
2633 for(i = 0; i < n; i++) {
2634 reg = order[i];
f8b2f202 2635 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
c896fe29
FB
2636 return reg;
2637 }
2638
2639 /* XXX: do better spill choice */
91478cef
RH
2640 for(i = 0; i < n; i++) {
2641 reg = order[i];
c896fe29 2642 if (tcg_regset_test_reg(reg_ct, reg)) {
b3915dbb 2643 tcg_reg_free(s, reg, allocated_regs);
c896fe29
FB
2644 return reg;
2645 }
2646 }
2647
2648 tcg_abort();
2649}
2650
40ae5c62
RH
2651/* Make sure the temporary is in a register. If needed, allocate the register
2652 from DESIRED while avoiding ALLOCATED. */
2653static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2654 TCGRegSet allocated_regs)
2655{
2656 TCGReg reg;
2657
2658 switch (ts->val_type) {
2659 case TEMP_VAL_REG:
2660 return;
2661 case TEMP_VAL_CONST:
91478cef 2662 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2663 tcg_out_movi(s, ts->type, reg, ts->val);
2664 ts->mem_coherent = 0;
2665 break;
2666 case TEMP_VAL_MEM:
91478cef 2667 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2668 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2669 ts->mem_coherent = 1;
2670 break;
2671 case TEMP_VAL_DEAD:
2672 default:
2673 tcg_abort();
2674 }
2675 ts->reg = reg;
2676 ts->val_type = TEMP_VAL_REG;
2677 s->reg_to_temp[reg] = ts;
2678}
2679
59d7c14e
RH
2680/* Save a temporary to memory. 'allocated_regs' is used in case a
2681 temporary registers needs to be allocated to store a constant. */
2682static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 2683{
5a18407f
RH
2684 /* The liveness analysis already ensures that globals are back
2685 in memory. Keep an tcg_debug_assert for safety. */
2686 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1ad80729
AJ
2687}
2688
9814dd27 2689/* save globals to their canonical location and assume they can be
e8996ee0
FB
2690 modified be the following code. 'allocated_regs' is used in case a
2691 temporary registers needs to be allocated to store a constant. */
2692static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 2693{
ac3b8891 2694 int i, n;
c896fe29 2695
ac3b8891 2696 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 2697 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 2698 }
e5097dc8
FB
2699}
2700
3d5c5f87
AJ
2701/* sync globals to their canonical location and assume they can be
2702 read by the following code. 'allocated_regs' is used in case a
2703 temporary registers needs to be allocated to store a constant. */
2704static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2705{
ac3b8891 2706 int i, n;
3d5c5f87 2707
ac3b8891 2708 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 2709 TCGTemp *ts = &s->temps[i];
5a18407f
RH
2710 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2711 || ts->fixed_reg
2712 || ts->mem_coherent);
3d5c5f87
AJ
2713 }
2714}
2715
e5097dc8 2716/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
2717 all globals are stored at their canonical location. */
2718static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 2719{
e5097dc8
FB
2720 int i;
2721
b13eb728
RH
2722 for (i = s->nb_globals; i < s->nb_temps; i++) {
2723 TCGTemp *ts = &s->temps[i];
641d5fbe 2724 if (ts->temp_local) {
b13eb728 2725 temp_save(s, ts, allocated_regs);
641d5fbe 2726 } else {
5a18407f
RH
2727 /* The liveness analysis already ensures that temps are dead.
2728 Keep an tcg_debug_assert for safety. */
2729 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c896fe29
FB
2730 }
2731 }
e8996ee0
FB
2732
2733 save_globals(s, allocated_regs);
c896fe29
FB
2734}
2735
0fe4fca4
PB
2736static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2737 tcg_target_ulong val, TCGLifeData arg_life)
e8996ee0 2738{
e8996ee0 2739 if (ots->fixed_reg) {
59d7c14e 2740 /* For fixed registers, we do not do any constant propagation. */
e8996ee0 2741 tcg_out_movi(s, ots->type, ots->reg, val);
59d7c14e 2742 return;
e8996ee0 2743 }
59d7c14e
RH
2744
2745 /* The movi is not explicitly generated here. */
2746 if (ots->val_type == TEMP_VAL_REG) {
2747 s->reg_to_temp[ots->reg] = NULL;
ec7a869d 2748 }
59d7c14e
RH
2749 ots->val_type = TEMP_VAL_CONST;
2750 ots->val = val;
2751 ots->mem_coherent = 0;
2752 if (NEED_SYNC_ARG(0)) {
2753 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2754 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 2755 temp_dead(s, ots);
4c4e1ab2 2756 }
e8996ee0
FB
2757}
2758
dd186292 2759static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
0fe4fca4 2760{
43439139 2761 TCGTemp *ots = arg_temp(op->args[0]);
dd186292 2762 tcg_target_ulong val = op->args[1];
0fe4fca4 2763
dd186292 2764 tcg_reg_alloc_do_movi(s, ots, val, op->life);
0fe4fca4
PB
2765}
2766
dd186292 2767static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 2768{
dd186292 2769 const TCGLifeData arg_life = op->life;
c29c1d7e 2770 TCGRegSet allocated_regs;
c896fe29 2771 TCGTemp *ts, *ots;
450445d5 2772 TCGType otype, itype;
c896fe29 2773
d21369f5 2774 allocated_regs = s->reserved_regs;
43439139
RH
2775 ots = arg_temp(op->args[0]);
2776 ts = arg_temp(op->args[1]);
450445d5
RH
2777
2778 /* Note that otype != itype for no-op truncation. */
2779 otype = ots->type;
2780 itype = ts->type;
c29c1d7e 2781
0fe4fca4
PB
2782 if (ts->val_type == TEMP_VAL_CONST) {
2783 /* propagate constant or generate sti */
2784 tcg_target_ulong val = ts->val;
2785 if (IS_DEAD_ARG(1)) {
2786 temp_dead(s, ts);
2787 }
2788 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2789 return;
2790 }
2791
2792 /* If the source value is in memory we're going to be forced
2793 to have it in a register in order to perform the copy. Copy
2794 the SOURCE value into its own register first, that way we
2795 don't have to reload SOURCE the next time it is used. */
2796 if (ts->val_type == TEMP_VAL_MEM) {
40ae5c62 2797 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
c29c1d7e 2798 }
c896fe29 2799
0fe4fca4 2800 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
c29c1d7e
AJ
2801 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2802 /* mov to a non-saved dead register makes no sense (even with
2803 liveness analysis disabled). */
eabb7b91 2804 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 2805 if (!ots->mem_allocated) {
2272e4a7 2806 temp_allocate_frame(s, ots);
c29c1d7e 2807 }
b3a62939 2808 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 2809 if (IS_DEAD_ARG(1)) {
f8bf00f1 2810 temp_dead(s, ts);
c29c1d7e 2811 }
f8bf00f1 2812 temp_dead(s, ots);
c29c1d7e 2813 } else {
866cb6cb 2814 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
c896fe29 2815 /* the mov can be suppressed */
c29c1d7e 2816 if (ots->val_type == TEMP_VAL_REG) {
f8b2f202 2817 s->reg_to_temp[ots->reg] = NULL;
c29c1d7e
AJ
2818 }
2819 ots->reg = ts->reg;
f8bf00f1 2820 temp_dead(s, ts);
c896fe29 2821 } else {
c29c1d7e
AJ
2822 if (ots->val_type != TEMP_VAL_REG) {
2823 /* When allocating a new register, make sure to not spill the
2824 input one. */
2825 tcg_regset_set_reg(allocated_regs, ts->reg);
450445d5 2826 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
91478cef 2827 allocated_regs, ots->indirect_base);
c896fe29 2828 }
450445d5 2829 tcg_out_mov(s, otype, ots->reg, ts->reg);
c896fe29 2830 }
c29c1d7e
AJ
2831 ots->val_type = TEMP_VAL_REG;
2832 ots->mem_coherent = 0;
f8b2f202 2833 s->reg_to_temp[ots->reg] = ots;
c29c1d7e 2834 if (NEED_SYNC_ARG(0)) {
59d7c14e 2835 temp_sync(s, ots, allocated_regs, 0);
c896fe29 2836 }
ec7a869d 2837 }
c896fe29
FB
2838}
2839
dd186292 2840static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 2841{
dd186292
RH
2842 const TCGLifeData arg_life = op->life;
2843 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
2844 TCGRegSet i_allocated_regs;
2845 TCGRegSet o_allocated_regs;
b6638662
RH
2846 int i, k, nb_iargs, nb_oargs;
2847 TCGReg reg;
c896fe29
FB
2848 TCGArg arg;
2849 const TCGArgConstraint *arg_ct;
2850 TCGTemp *ts;
2851 TCGArg new_args[TCG_MAX_OP_ARGS];
2852 int const_args[TCG_MAX_OP_ARGS];
2853
2854 nb_oargs = def->nb_oargs;
2855 nb_iargs = def->nb_iargs;
2856
2857 /* copy constants */
2858 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 2859 op->args + nb_oargs + nb_iargs,
c896fe29
FB
2860 sizeof(TCGArg) * def->nb_cargs);
2861
d21369f5
RH
2862 i_allocated_regs = s->reserved_regs;
2863 o_allocated_regs = s->reserved_regs;
82790a87 2864
c896fe29 2865 /* satisfy input constraints */
dd186292 2866 for (k = 0; k < nb_iargs; k++) {
c896fe29 2867 i = def->sorted_args[nb_oargs + k];
dd186292 2868 arg = op->args[i];
c896fe29 2869 arg_ct = &def->args_ct[i];
43439139 2870 ts = arg_temp(arg);
40ae5c62
RH
2871
2872 if (ts->val_type == TEMP_VAL_CONST
2873 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2874 /* constant is OK for instruction */
2875 const_args[i] = 1;
2876 new_args[i] = ts->val;
2877 goto iarg_end;
c896fe29 2878 }
40ae5c62 2879
82790a87 2880 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
40ae5c62 2881
5ff9d6a4
FB
2882 if (arg_ct->ct & TCG_CT_IALIAS) {
2883 if (ts->fixed_reg) {
2884 /* if fixed register, we must allocate a new register
2885 if the alias is not the same register */
dd186292 2886 if (arg != op->args[arg_ct->alias_index])
5ff9d6a4
FB
2887 goto allocate_in_reg;
2888 } else {
2889 /* if the input is aliased to an output and if it is
2890 not dead after the instruction, we must allocate
2891 a new register and move it */
866cb6cb 2892 if (!IS_DEAD_ARG(i)) {
5ff9d6a4 2893 goto allocate_in_reg;
866cb6cb 2894 }
7e1df267
AJ
2895 /* check if the current register has already been allocated
2896 for another input aliased to an output */
2897 int k2, i2;
2898 for (k2 = 0 ; k2 < k ; k2++) {
2899 i2 = def->sorted_args[nb_oargs + k2];
2900 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2901 (new_args[i2] == ts->reg)) {
2902 goto allocate_in_reg;
2903 }
2904 }
5ff9d6a4 2905 }
c896fe29
FB
2906 }
2907 reg = ts->reg;
2908 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2909 /* nothing to do : the constraint is satisfied */
2910 } else {
2911 allocate_in_reg:
2912 /* allocate a new register matching the constraint
2913 and move the temporary register into it */
82790a87 2914 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
91478cef 2915 ts->indirect_base);
3b6dac34 2916 tcg_out_mov(s, ts->type, reg, ts->reg);
c896fe29 2917 }
c896fe29
FB
2918 new_args[i] = reg;
2919 const_args[i] = 0;
82790a87 2920 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29
FB
2921 iarg_end: ;
2922 }
2923
a52ad07e
AJ
2924 /* mark dead temporaries and free the associated registers */
2925 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2926 if (IS_DEAD_ARG(i)) {
43439139 2927 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
2928 }
2929 }
2930
e8996ee0 2931 if (def->flags & TCG_OPF_BB_END) {
82790a87 2932 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 2933 } else {
e8996ee0
FB
2934 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2935 /* XXX: permit generic clobber register list ? */
c8074023
RH
2936 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2937 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 2938 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 2939 }
c896fe29 2940 }
3d5c5f87
AJ
2941 }
2942 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2943 /* sync globals if the op has side effects and might trigger
2944 an exception. */
82790a87 2945 sync_globals(s, i_allocated_regs);
c896fe29 2946 }
e8996ee0
FB
2947
2948 /* satisfy the output constraints */
e8996ee0
FB
2949 for(k = 0; k < nb_oargs; k++) {
2950 i = def->sorted_args[k];
dd186292 2951 arg = op->args[i];
e8996ee0 2952 arg_ct = &def->args_ct[i];
43439139 2953 ts = arg_temp(arg);
17280ff4
RH
2954 if ((arg_ct->ct & TCG_CT_ALIAS)
2955 && !const_args[arg_ct->alias_index]) {
e8996ee0 2956 reg = new_args[arg_ct->alias_index];
82790a87
RH
2957 } else if (arg_ct->ct & TCG_CT_NEWREG) {
2958 reg = tcg_reg_alloc(s, arg_ct->u.regs,
2959 i_allocated_regs | o_allocated_regs,
2960 ts->indirect_base);
e8996ee0
FB
2961 } else {
2962 /* if fixed register, we try to use it */
2963 reg = ts->reg;
2964 if (ts->fixed_reg &&
2965 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2966 goto oarg_end;
2967 }
82790a87 2968 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
91478cef 2969 ts->indirect_base);
c896fe29 2970 }
82790a87 2971 tcg_regset_set_reg(o_allocated_regs, reg);
e8996ee0
FB
2972 /* if a fixed register is used, then a move will be done afterwards */
2973 if (!ts->fixed_reg) {
ec7a869d 2974 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 2975 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
2976 }
2977 ts->val_type = TEMP_VAL_REG;
2978 ts->reg = reg;
2979 /* temp value is modified, so the value kept in memory is
2980 potentially not the same */
2981 ts->mem_coherent = 0;
f8b2f202 2982 s->reg_to_temp[reg] = ts;
e8996ee0
FB
2983 }
2984 oarg_end:
2985 new_args[i] = reg;
c896fe29 2986 }
c896fe29
FB
2987 }
2988
c896fe29 2989 /* emit instruction */
d2fd745f
RH
2990 if (def->flags & TCG_OPF_VECTOR) {
2991 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
2992 new_args, const_args);
2993 } else {
2994 tcg_out_op(s, op->opc, new_args, const_args);
2995 }
2996
c896fe29
FB
2997 /* move the outputs in the correct register if needed */
2998 for(i = 0; i < nb_oargs; i++) {
43439139 2999 ts = arg_temp(op->args[i]);
c896fe29
FB
3000 reg = new_args[i];
3001 if (ts->fixed_reg && ts->reg != reg) {
3b6dac34 3002 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29 3003 }
ec7a869d 3004 if (NEED_SYNC_ARG(i)) {
82790a87 3005 temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
59d7c14e 3006 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 3007 temp_dead(s, ts);
ec7a869d 3008 }
c896fe29
FB
3009 }
3010}
3011
b03cce8e
FB
3012#ifdef TCG_TARGET_STACK_GROWSUP
3013#define STACK_DIR(x) (-(x))
3014#else
3015#define STACK_DIR(x) (x)
3016#endif
3017
dd186292 3018static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
c896fe29 3019{
cd9090aa
RH
3020 const int nb_oargs = TCGOP_CALLO(op);
3021 const int nb_iargs = TCGOP_CALLI(op);
dd186292 3022 const TCGLifeData arg_life = op->life;
b6638662
RH
3023 int flags, nb_regs, i;
3024 TCGReg reg;
cf066674 3025 TCGArg arg;
c896fe29 3026 TCGTemp *ts;
d3452f1f
RH
3027 intptr_t stack_offset;
3028 size_t call_stack_size;
cf066674
RH
3029 tcg_insn_unit *func_addr;
3030 int allocate_args;
c896fe29 3031 TCGRegSet allocated_regs;
c896fe29 3032
dd186292
RH
3033 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3034 flags = op->args[nb_oargs + nb_iargs + 1];
c896fe29 3035
6e17d0c5 3036 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
c45cb8bb
RH
3037 if (nb_regs > nb_iargs) {
3038 nb_regs = nb_iargs;
cf066674 3039 }
c896fe29
FB
3040
3041 /* assign stack slots first */
c45cb8bb 3042 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
c896fe29
FB
3043 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3044 ~(TCG_TARGET_STACK_ALIGN - 1);
b03cce8e
FB
3045 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3046 if (allocate_args) {
345649c0
BS
3047 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3048 preallocate call stack */
3049 tcg_abort();
b03cce8e 3050 }
39cf05d3
FB
3051
3052 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
dd186292
RH
3053 for (i = nb_regs; i < nb_iargs; i++) {
3054 arg = op->args[nb_oargs + i];
39cf05d3
FB
3055#ifdef TCG_TARGET_STACK_GROWSUP
3056 stack_offset -= sizeof(tcg_target_long);
3057#endif
3058 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3059 ts = arg_temp(arg);
40ae5c62
RH
3060 temp_load(s, ts, tcg_target_available_regs[ts->type],
3061 s->reserved_regs);
3062 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
c896fe29 3063 }
39cf05d3
FB
3064#ifndef TCG_TARGET_STACK_GROWSUP
3065 stack_offset += sizeof(tcg_target_long);
3066#endif
c896fe29
FB
3067 }
3068
3069 /* assign input registers */
d21369f5 3070 allocated_regs = s->reserved_regs;
dd186292
RH
3071 for (i = 0; i < nb_regs; i++) {
3072 arg = op->args[nb_oargs + i];
39cf05d3 3073 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3074 ts = arg_temp(arg);
39cf05d3 3075 reg = tcg_target_call_iarg_regs[i];
b3915dbb 3076 tcg_reg_free(s, reg, allocated_regs);
40ae5c62 3077
39cf05d3
FB
3078 if (ts->val_type == TEMP_VAL_REG) {
3079 if (ts->reg != reg) {
3b6dac34 3080 tcg_out_mov(s, ts->type, reg, ts->reg);
39cf05d3 3081 }
39cf05d3 3082 } else {
ccb1bb66 3083 TCGRegSet arg_set = 0;
40ae5c62 3084
40ae5c62
RH
3085 tcg_regset_set_reg(arg_set, reg);
3086 temp_load(s, ts, arg_set, allocated_regs);
c896fe29 3087 }
40ae5c62 3088
39cf05d3 3089 tcg_regset_set_reg(allocated_regs, reg);
c896fe29 3090 }
c896fe29
FB
3091 }
3092
c896fe29 3093 /* mark dead temporaries and free the associated registers */
dd186292 3094 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 3095 if (IS_DEAD_ARG(i)) {
43439139 3096 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
3097 }
3098 }
3099
3100 /* clobber call registers */
c8074023
RH
3101 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3102 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 3103 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
3104 }
3105 }
78505279
AJ
3106
3107 /* Save globals if they might be written by the helper, sync them if
3108 they might be read. */
3109 if (flags & TCG_CALL_NO_READ_GLOBALS) {
3110 /* Nothing to do */
3111 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3112 sync_globals(s, allocated_regs);
3113 } else {
b9c18f56
AJ
3114 save_globals(s, allocated_regs);
3115 }
c896fe29 3116
cf066674 3117 tcg_out_call(s, func_addr);
c896fe29
FB
3118
3119 /* assign output registers and emit moves if needed */
3120 for(i = 0; i < nb_oargs; i++) {
dd186292 3121 arg = op->args[i];
43439139 3122 ts = arg_temp(arg);
c896fe29 3123 reg = tcg_target_call_oarg_regs[i];
eabb7b91 3124 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
34b1a49c 3125
c896fe29
FB
3126 if (ts->fixed_reg) {
3127 if (ts->reg != reg) {
3b6dac34 3128 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29
FB
3129 }
3130 } else {
ec7a869d 3131 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 3132 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
3133 }
3134 ts->val_type = TEMP_VAL_REG;
3135 ts->reg = reg;
3136 ts->mem_coherent = 0;
f8b2f202 3137 s->reg_to_temp[reg] = ts;
ec7a869d 3138 if (NEED_SYNC_ARG(i)) {
59d7c14e
RH
3139 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
3140 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 3141 temp_dead(s, ts);
8c11ad25 3142 }
c896fe29
FB
3143 }
3144 }
c896fe29
FB
3145}
3146
3147#ifdef CONFIG_PROFILER
3148
c3fac113
EC
3149/* avoid copy/paste errors */
3150#define PROF_ADD(to, from, field) \
3151 do { \
3152 (to)->field += atomic_read(&((from)->field)); \
3153 } while (0)
3154
3155#define PROF_MAX(to, from, field) \
3156 do { \
3157 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3158 if (val__ > (to)->field) { \
3159 (to)->field = val__; \
3160 } \
3161 } while (0)
3162
3163/* Pass in a zero'ed @prof */
3164static inline
3165void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3166{
3468b59e 3167 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
c3fac113
EC
3168 unsigned int i;
3169
3468b59e
EC
3170 for (i = 0; i < n_ctxs; i++) {
3171 TCGContext *s = atomic_read(&tcg_ctxs[i]);
3172 const TCGProfile *orig = &s->prof;
c3fac113
EC
3173
3174 if (counters) {
3175 PROF_ADD(prof, orig, tb_count1);
3176 PROF_ADD(prof, orig, tb_count);
3177 PROF_ADD(prof, orig, op_count);
3178 PROF_MAX(prof, orig, op_count_max);
3179 PROF_ADD(prof, orig, temp_count);
3180 PROF_MAX(prof, orig, temp_count_max);
3181 PROF_ADD(prof, orig, del_op_count);
3182 PROF_ADD(prof, orig, code_in_len);
3183 PROF_ADD(prof, orig, code_out_len);
3184 PROF_ADD(prof, orig, search_out_len);
3185 PROF_ADD(prof, orig, interm_time);
3186 PROF_ADD(prof, orig, code_time);
3187 PROF_ADD(prof, orig, la_time);
3188 PROF_ADD(prof, orig, opt_time);
3189 PROF_ADD(prof, orig, restore_count);
3190 PROF_ADD(prof, orig, restore_time);
3191 }
3192 if (table) {
3193 int i;
3194
3195 for (i = 0; i < NB_OPS; i++) {
3196 PROF_ADD(prof, orig, table_op_count[i]);
3197 }
3198 }
3199 }
3200}
3201
3202#undef PROF_ADD
3203#undef PROF_MAX
3204
3205static void tcg_profile_snapshot_counters(TCGProfile *prof)
3206{
3207 tcg_profile_snapshot(prof, true, false);
3208}
3209
3210static void tcg_profile_snapshot_table(TCGProfile *prof)
3211{
3212 tcg_profile_snapshot(prof, false, true);
3213}
c896fe29 3214
246ae24d 3215void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
c896fe29 3216{
c3fac113 3217 TCGProfile prof = {};
c896fe29 3218 int i;
d70724ce 3219
c3fac113 3220 tcg_profile_snapshot_table(&prof);
15fc7daa 3221 for (i = 0; i < NB_OPS; i++) {
246ae24d 3222 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
c3fac113 3223 prof.table_op_count[i]);
c896fe29 3224 }
c896fe29 3225}
246ae24d
MF
3226#else
3227void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3228{
3229 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3230}
c896fe29
FB
3231#endif
3232
3233
5bd2ec3d 3234int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
c896fe29 3235{
c3fac113
EC
3236#ifdef CONFIG_PROFILER
3237 TCGProfile *prof = &s->prof;
3238#endif
15fa08f8
RH
3239 int i, num_insns;
3240 TCGOp *op;
c896fe29 3241
04fe6400
RH
3242#ifdef CONFIG_PROFILER
3243 {
3244 int n;
3245
15fa08f8
RH
3246 QTAILQ_FOREACH(op, &s->ops, link) {
3247 n++;
3248 }
c3fac113
EC
3249 atomic_set(&prof->op_count, prof->op_count + n);
3250 if (n > prof->op_count_max) {
3251 atomic_set(&prof->op_count_max, n);
04fe6400
RH
3252 }
3253
3254 n = s->nb_temps;
c3fac113
EC
3255 atomic_set(&prof->temp_count, prof->temp_count + n);
3256 if (n > prof->temp_count_max) {
3257 atomic_set(&prof->temp_count_max, n);
04fe6400
RH
3258 }
3259 }
3260#endif
3261
c896fe29 3262#ifdef DEBUG_DISAS
d977e1c2
AB
3263 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3264 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3265 qemu_log_lock();
93fcfe39 3266 qemu_log("OP:\n");
eeacee4d 3267 tcg_dump_ops(s);
93fcfe39 3268 qemu_log("\n");
1ee73216 3269 qemu_log_unlock();
c896fe29
FB
3270 }
3271#endif
3272
c5cc28ff 3273#ifdef CONFIG_PROFILER
c3fac113 3274 atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
3275#endif
3276
8f2e8c07 3277#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 3278 tcg_optimize(s);
8f2e8c07
KB
3279#endif
3280
a23a9ec6 3281#ifdef CONFIG_PROFILER
c3fac113
EC
3282 atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3283 atomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 3284#endif
c5cc28ff 3285
b83eabea 3286 liveness_pass_1(s);
5a18407f 3287
b83eabea 3288 if (s->nb_indirects > 0) {
5a18407f 3289#ifdef DEBUG_DISAS
b83eabea
RH
3290 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3291 && qemu_log_in_addr_range(tb->pc))) {
3292 qemu_log_lock();
3293 qemu_log("OP before indirect lowering:\n");
3294 tcg_dump_ops(s);
3295 qemu_log("\n");
3296 qemu_log_unlock();
3297 }
5a18407f 3298#endif
b83eabea
RH
3299 /* Replace indirect temps with direct temps. */
3300 if (liveness_pass_2(s)) {
3301 /* If changes were made, re-run liveness. */
3302 liveness_pass_1(s);
5a18407f
RH
3303 }
3304 }
c5cc28ff 3305
a23a9ec6 3306#ifdef CONFIG_PROFILER
c3fac113 3307 atomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 3308#endif
c896fe29
FB
3309
3310#ifdef DEBUG_DISAS
d977e1c2
AB
3311 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3312 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3313 qemu_log_lock();
c5cc28ff 3314 qemu_log("OP after optimization and liveness analysis:\n");
eeacee4d 3315 tcg_dump_ops(s);
93fcfe39 3316 qemu_log("\n");
1ee73216 3317 qemu_log_unlock();
c896fe29
FB
3318 }
3319#endif
3320
3321 tcg_reg_alloc_start(s);
3322
e7e168f4
EC
3323 s->code_buf = tb->tc.ptr;
3324 s->code_ptr = tb->tc.ptr;
c896fe29 3325
659ef5cb
RH
3326#ifdef TCG_TARGET_NEED_LDST_LABELS
3327 s->ldst_labels = NULL;
3328#endif
57a26946
RH
3329#ifdef TCG_TARGET_NEED_POOL_LABELS
3330 s->pool_labels = NULL;
3331#endif
9ecefc84 3332
fca8a500 3333 num_insns = -1;
15fa08f8 3334 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 3335 TCGOpcode opc = op->opc;
b3db8758 3336
c896fe29 3337#ifdef CONFIG_PROFILER
c3fac113 3338 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 3339#endif
c45cb8bb
RH
3340
3341 switch (opc) {
c896fe29 3342 case INDEX_op_mov_i32:
c896fe29 3343 case INDEX_op_mov_i64:
d2fd745f 3344 case INDEX_op_mov_vec:
dd186292 3345 tcg_reg_alloc_mov(s, op);
c896fe29 3346 break;
e8996ee0 3347 case INDEX_op_movi_i32:
e8996ee0 3348 case INDEX_op_movi_i64:
d2fd745f 3349 case INDEX_op_dupi_vec:
dd186292 3350 tcg_reg_alloc_movi(s, op);
e8996ee0 3351 break;
765b842a 3352 case INDEX_op_insn_start:
fca8a500
RH
3353 if (num_insns >= 0) {
3354 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3355 }
3356 num_insns++;
bad729e2
RH
3357 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3358 target_ulong a;
3359#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 3360 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 3361#else
efee3746 3362 a = op->args[i];
bad729e2 3363#endif
fca8a500 3364 s->gen_insn_data[num_insns][i] = a;
bad729e2 3365 }
c896fe29 3366 break;
5ff9d6a4 3367 case INDEX_op_discard:
43439139 3368 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 3369 break;
c896fe29 3370 case INDEX_op_set_label:
e8996ee0 3371 tcg_reg_alloc_bb_end(s, s->reserved_regs);
efee3746 3372 tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
c896fe29
FB
3373 break;
3374 case INDEX_op_call:
dd186292 3375 tcg_reg_alloc_call(s, op);
c45cb8bb 3376 break;
c896fe29 3377 default:
25c4d9cc 3378 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 3379 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
3380 /* Note: in order to speed up the code, it would be much
3381 faster to have specialized register allocator functions for
3382 some common argument patterns */
dd186292 3383 tcg_reg_alloc_op(s, op);
c896fe29
FB
3384 break;
3385 }
8d8fdbae 3386#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
3387 check_regs(s);
3388#endif
b125f9dc
RH
3389 /* Test for (pending) buffer overflow. The assumption is that any
3390 one operation beginning below the high water mark cannot overrun
3391 the buffer completely. Thus we can test for overflow after
3392 generating code without having to check during generation. */
644da9b3 3393 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
3394 return -1;
3395 }
c896fe29 3396 }
fca8a500
RH
3397 tcg_debug_assert(num_insns >= 0);
3398 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 3399
b76f0d8c 3400 /* Generate TB finalization at the end of block */
659ef5cb
RH
3401#ifdef TCG_TARGET_NEED_LDST_LABELS
3402 if (!tcg_out_ldst_finalize(s)) {
23dceda6
RH
3403 return -1;
3404 }
659ef5cb 3405#endif
57a26946
RH
3406#ifdef TCG_TARGET_NEED_POOL_LABELS
3407 if (!tcg_out_pool_finalize(s)) {
3408 return -1;
3409 }
3410#endif
c896fe29
FB
3411
3412 /* flush instruction cache */
1813e175 3413 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2aeabc08 3414
1813e175 3415 return tcg_current_code_size(s);
c896fe29
FB
3416}
3417
a23a9ec6 3418#ifdef CONFIG_PROFILER
405cf9ff 3419void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6 3420{
c3fac113
EC
3421 TCGProfile prof = {};
3422 const TCGProfile *s;
3423 int64_t tb_count;
3424 int64_t tb_div_count;
3425 int64_t tot;
3426
3427 tcg_profile_snapshot_counters(&prof);
3428 s = &prof;
3429 tb_count = s->tb_count;
3430 tb_div_count = tb_count ? tb_count : 1;
3431 tot = s->interm_time + s->code_time;
a23a9ec6 3432
a23a9ec6
FB
3433 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3434 tot, tot / 2.4e9);
3435 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
fca8a500
RH
3436 tb_count, s->tb_count1 - tb_count,
3437 (double)(s->tb_count1 - s->tb_count)
3438 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
a23a9ec6 3439 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
fca8a500 3440 (double)s->op_count / tb_div_count, s->op_count_max);
a23a9ec6 3441 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
fca8a500 3442 (double)s->del_op_count / tb_div_count);
a23a9ec6 3443 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
fca8a500
RH
3444 (double)s->temp_count / tb_div_count, s->temp_count_max);
3445 cpu_fprintf(f, "avg host code/TB %0.1f\n",
3446 (double)s->code_out_len / tb_div_count);
3447 cpu_fprintf(f, "avg search data/TB %0.1f\n",
3448 (double)s->search_out_len / tb_div_count);
a23a9ec6
FB
3449
3450 cpu_fprintf(f, "cycles/op %0.1f\n",
3451 s->op_count ? (double)tot / s->op_count : 0);
3452 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3453 s->code_in_len ? (double)tot / s->code_in_len : 0);
3454 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3455 s->code_out_len ? (double)tot / s->code_out_len : 0);
fca8a500
RH
3456 cpu_fprintf(f, "cycles/search byte %0.1f\n",
3457 s->search_out_len ? (double)tot / s->search_out_len : 0);
3458 if (tot == 0) {
a23a9ec6 3459 tot = 1;
fca8a500 3460 }
a23a9ec6
FB
3461 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3462 (double)s->interm_time / tot * 100.0);
3463 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3464 (double)s->code_time / tot * 100.0);
c5cc28ff
AJ
3465 cpu_fprintf(f, "optim./code time %0.1f%%\n",
3466 (double)s->opt_time / (s->code_time ? s->code_time : 1)
3467 * 100.0);
a23a9ec6
FB
3468 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
3469 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
3470 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3471 s->restore_count);
3472 cpu_fprintf(f, " avg cycles %0.1f\n",
3473 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
3474}
3475#else
405cf9ff 3476void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6 3477{
24bf7b3a 3478 cpu_fprintf(f, "[TCG profiler not compiled]\n");
a23a9ec6
FB
3479}
3480#endif
813da627
RH
3481
3482#ifdef ELF_HOST_MACHINE
5872bbf2
RH
3483/* In order to use this feature, the backend needs to do three things:
3484
3485 (1) Define ELF_HOST_MACHINE to indicate both what value to
3486 put into the ELF image and to indicate support for the feature.
3487
3488 (2) Define tcg_register_jit. This should create a buffer containing
3489 the contents of a .debug_frame section that describes the post-
3490 prologue unwind info for the tcg machine.
3491
3492 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3493*/
813da627
RH
3494
3495/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3496typedef enum {
3497 JIT_NOACTION = 0,
3498 JIT_REGISTER_FN,
3499 JIT_UNREGISTER_FN
3500} jit_actions_t;
3501
3502struct jit_code_entry {
3503 struct jit_code_entry *next_entry;
3504 struct jit_code_entry *prev_entry;
3505 const void *symfile_addr;
3506 uint64_t symfile_size;
3507};
3508
3509struct jit_descriptor {
3510 uint32_t version;
3511 uint32_t action_flag;
3512 struct jit_code_entry *relevant_entry;
3513 struct jit_code_entry *first_entry;
3514};
3515
3516void __jit_debug_register_code(void) __attribute__((noinline));
3517void __jit_debug_register_code(void)
3518{
3519 asm("");
3520}
3521
3522/* Must statically initialize the version, because GDB may check
3523 the version before we can set it. */
3524struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3525
3526/* End GDB interface. */
3527
3528static int find_string(const char *strtab, const char *str)
3529{
3530 const char *p = strtab + 1;
3531
3532 while (1) {
3533 if (strcmp(p, str) == 0) {
3534 return p - strtab;
3535 }
3536 p += strlen(p) + 1;
3537 }
3538}
3539
5872bbf2 3540static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2c90784a
RH
3541 const void *debug_frame,
3542 size_t debug_frame_size)
813da627 3543{
5872bbf2
RH
3544 struct __attribute__((packed)) DebugInfo {
3545 uint32_t len;
3546 uint16_t version;
3547 uint32_t abbrev;
3548 uint8_t ptr_size;
3549 uint8_t cu_die;
3550 uint16_t cu_lang;
3551 uintptr_t cu_low_pc;
3552 uintptr_t cu_high_pc;
3553 uint8_t fn_die;
3554 char fn_name[16];
3555 uintptr_t fn_low_pc;
3556 uintptr_t fn_high_pc;
3557 uint8_t cu_eoc;
3558 };
813da627
RH
3559
3560 struct ElfImage {
3561 ElfW(Ehdr) ehdr;
3562 ElfW(Phdr) phdr;
5872bbf2
RH
3563 ElfW(Shdr) shdr[7];
3564 ElfW(Sym) sym[2];
3565 struct DebugInfo di;
3566 uint8_t da[24];
3567 char str[80];
3568 };
3569
3570 struct ElfImage *img;
3571
3572 static const struct ElfImage img_template = {
3573 .ehdr = {
3574 .e_ident[EI_MAG0] = ELFMAG0,
3575 .e_ident[EI_MAG1] = ELFMAG1,
3576 .e_ident[EI_MAG2] = ELFMAG2,
3577 .e_ident[EI_MAG3] = ELFMAG3,
3578 .e_ident[EI_CLASS] = ELF_CLASS,
3579 .e_ident[EI_DATA] = ELF_DATA,
3580 .e_ident[EI_VERSION] = EV_CURRENT,
3581 .e_type = ET_EXEC,
3582 .e_machine = ELF_HOST_MACHINE,
3583 .e_version = EV_CURRENT,
3584 .e_phoff = offsetof(struct ElfImage, phdr),
3585 .e_shoff = offsetof(struct ElfImage, shdr),
3586 .e_ehsize = sizeof(ElfW(Shdr)),
3587 .e_phentsize = sizeof(ElfW(Phdr)),
3588 .e_phnum = 1,
3589 .e_shentsize = sizeof(ElfW(Shdr)),
3590 .e_shnum = ARRAY_SIZE(img->shdr),
3591 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
3592#ifdef ELF_HOST_FLAGS
3593 .e_flags = ELF_HOST_FLAGS,
3594#endif
3595#ifdef ELF_OSABI
3596 .e_ident[EI_OSABI] = ELF_OSABI,
3597#endif
5872bbf2
RH
3598 },
3599 .phdr = {
3600 .p_type = PT_LOAD,
3601 .p_flags = PF_X,
3602 },
3603 .shdr = {
3604 [0] = { .sh_type = SHT_NULL },
3605 /* Trick: The contents of code_gen_buffer are not present in
3606 this fake ELF file; that got allocated elsewhere. Therefore
3607 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3608 will not look for contents. We can record any address. */
3609 [1] = { /* .text */
3610 .sh_type = SHT_NOBITS,
3611 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3612 },
3613 [2] = { /* .debug_info */
3614 .sh_type = SHT_PROGBITS,
3615 .sh_offset = offsetof(struct ElfImage, di),
3616 .sh_size = sizeof(struct DebugInfo),
3617 },
3618 [3] = { /* .debug_abbrev */
3619 .sh_type = SHT_PROGBITS,
3620 .sh_offset = offsetof(struct ElfImage, da),
3621 .sh_size = sizeof(img->da),
3622 },
3623 [4] = { /* .debug_frame */
3624 .sh_type = SHT_PROGBITS,
3625 .sh_offset = sizeof(struct ElfImage),
3626 },
3627 [5] = { /* .symtab */
3628 .sh_type = SHT_SYMTAB,
3629 .sh_offset = offsetof(struct ElfImage, sym),
3630 .sh_size = sizeof(img->sym),
3631 .sh_info = 1,
3632 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3633 .sh_entsize = sizeof(ElfW(Sym)),
3634 },
3635 [6] = { /* .strtab */
3636 .sh_type = SHT_STRTAB,
3637 .sh_offset = offsetof(struct ElfImage, str),
3638 .sh_size = sizeof(img->str),
3639 }
3640 },
3641 .sym = {
3642 [1] = { /* code_gen_buffer */
3643 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3644 .st_shndx = 1,
3645 }
3646 },
3647 .di = {
3648 .len = sizeof(struct DebugInfo) - 4,
3649 .version = 2,
3650 .ptr_size = sizeof(void *),
3651 .cu_die = 1,
3652 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
3653 .fn_die = 2,
3654 .fn_name = "code_gen_buffer"
3655 },
3656 .da = {
3657 1, /* abbrev number (the cu) */
3658 0x11, 1, /* DW_TAG_compile_unit, has children */
3659 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3660 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3661 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3662 0, 0, /* end of abbrev */
3663 2, /* abbrev number (the fn) */
3664 0x2e, 0, /* DW_TAG_subprogram, no children */
3665 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3666 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3667 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3668 0, 0, /* end of abbrev */
3669 0 /* no more abbrev */
3670 },
3671 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3672 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
3673 };
3674
3675 /* We only need a single jit entry; statically allocate it. */
3676 static struct jit_code_entry one_entry;
3677
5872bbf2 3678 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 3679 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 3680 DebugFrameHeader *dfh;
813da627 3681
5872bbf2
RH
3682 img = g_malloc(img_size);
3683 *img = img_template;
813da627 3684
5872bbf2
RH
3685 img->phdr.p_vaddr = buf;
3686 img->phdr.p_paddr = buf;
3687 img->phdr.p_memsz = buf_size;
813da627 3688
813da627 3689 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 3690 img->shdr[1].sh_addr = buf;
813da627
RH
3691 img->shdr[1].sh_size = buf_size;
3692
5872bbf2
RH
3693 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3694 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3695
3696 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3697 img->shdr[4].sh_size = debug_frame_size;
3698
3699 img->shdr[5].sh_name = find_string(img->str, ".symtab");
3700 img->shdr[6].sh_name = find_string(img->str, ".strtab");
3701
3702 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3703 img->sym[1].st_value = buf;
3704 img->sym[1].st_size = buf_size;
813da627 3705
5872bbf2 3706 img->di.cu_low_pc = buf;
45aba097 3707 img->di.cu_high_pc = buf + buf_size;
5872bbf2 3708 img->di.fn_low_pc = buf;
45aba097 3709 img->di.fn_high_pc = buf + buf_size;
813da627 3710
2c90784a
RH
3711 dfh = (DebugFrameHeader *)(img + 1);
3712 memcpy(dfh, debug_frame, debug_frame_size);
3713 dfh->fde.func_start = buf;
3714 dfh->fde.func_len = buf_size;
3715
813da627
RH
3716#ifdef DEBUG_JIT
3717 /* Enable this block to be able to debug the ELF image file creation.
3718 One can use readelf, objdump, or other inspection utilities. */
3719 {
3720 FILE *f = fopen("/tmp/qemu.jit", "w+b");
3721 if (f) {
5872bbf2 3722 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
3723 /* Avoid stupid unused return value warning for fwrite. */
3724 }
3725 fclose(f);
3726 }
3727 }
3728#endif
3729
3730 one_entry.symfile_addr = img;
3731 one_entry.symfile_size = img_size;
3732
3733 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3734 __jit_debug_descriptor.relevant_entry = &one_entry;
3735 __jit_debug_descriptor.first_entry = &one_entry;
3736 __jit_debug_register_code();
3737}
3738#else
5872bbf2
RH
3739/* No support for the feature. Provide the entry point expected by exec.c,
3740 and implement the internal function we declared earlier. */
813da627
RH
3741
3742static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
3743 const void *debug_frame,
3744 size_t debug_frame_size)
813da627
RH
3745{
3746}
3747
3748void tcg_register_jit(void *buf, size_t buf_size)
3749{
3750}
3751#endif /* ELF_HOST_MACHINE */
db432672
RH
3752
3753#if !TCG_TARGET_MAYBE_vec
3754void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
3755{
3756 g_assert_not_reached();
3757}
3758#endif