]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg.c
tcg: Introduce set_jmp_insn_offset
[mirror_qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
72fd2efb 33#include "qemu/error-report.h"
f348b6d1 34#include "qemu/cutils.h"
1de7afc9 35#include "qemu/host-utils.h"
d4c51a0a 36#include "qemu/qemu-print.h"
1de7afc9 37#include "qemu/timer.h"
084cfca1 38#include "qemu/cacheflush.h"
ad768e6f 39#include "qemu/cacheinfo.h"
c896fe29 40
c5d3c498 41/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
42 CPU definitions. Currently they are used for qemu_ld/st
43 instructions */
44#define NO_CPU_IO_DEFS
c896fe29 45
63c91552 46#include "exec/exec-all.h"
dcb32f1d 47#include "tcg/tcg-op.h"
813da627 48
edee2579 49#if UINTPTR_MAX == UINT32_MAX
813da627 50# define ELF_CLASS ELFCLASS32
edee2579
RH
51#else
52# define ELF_CLASS ELFCLASS64
813da627 53#endif
e03b5686 54#if HOST_BIG_ENDIAN
813da627
RH
55# define ELF_DATA ELFDATA2MSB
56#else
57# define ELF_DATA ELFDATA2LSB
58#endif
59
c896fe29 60#include "elf.h"
508127e2 61#include "exec/log.h"
d2ba8026 62#include "tcg/tcg-ldst.h"
5ff7258c 63#include "tcg-internal.h"
5584e2db 64#include "accel/tcg/perf.h"
c896fe29 65
139c1837 66/* Forward declarations for functions declared in tcg-target.c.inc and
ce151109 67 used here. */
e4d58b41
RH
68static void tcg_target_init(TCGContext *s);
69static void tcg_target_qemu_prologue(TCGContext *s);
6ac17786 70static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 71 intptr_t value, intptr_t addend);
c896fe29 72
497a22eb
RH
73/* The CIE and FDE header definitions will be common to all hosts. */
74typedef struct {
75 uint32_t len __attribute__((aligned((sizeof(void *)))));
76 uint32_t id;
77 uint8_t version;
78 char augmentation[1];
79 uint8_t code_align;
80 uint8_t data_align;
81 uint8_t return_column;
82} DebugFrameCIE;
83
84typedef struct QEMU_PACKED {
85 uint32_t len __attribute__((aligned((sizeof(void *)))));
86 uint32_t cie_offset;
edee2579
RH
87 uintptr_t func_start;
88 uintptr_t func_len;
497a22eb
RH
89} DebugFrameFDEHeader;
90
2c90784a
RH
91typedef struct QEMU_PACKED {
92 DebugFrameCIE cie;
93 DebugFrameFDEHeader fde;
94} DebugFrameHeader;
95
755bf9e5 96static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
97 const void *debug_frame,
98 size_t debug_frame_size)
813da627
RH
99 __attribute__((unused));
100
139c1837 101/* Forward declarations for functions declared and used in tcg-target.c.inc. */
2a534aff 102static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 103 intptr_t arg2);
78113e83 104static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 105static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 106 TCGReg ret, tcg_target_long arg);
b55a8d9d 107static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
5e8892db
MR
108static void tcg_out_op(TCGContext *s, TCGOpcode opc,
109 const TCGArg args[TCG_MAX_OP_ARGS],
110 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 111#if TCG_TARGET_MAYBE_vec
e7632cfa
RH
112static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
113 TCGReg dst, TCGReg src);
d6ecb4a9
RH
114static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
115 TCGReg dst, TCGReg base, intptr_t offset);
4e186175
RH
116static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
117 TCGReg dst, int64_t arg);
5e8892db
MR
118static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
119 unsigned vecl, unsigned vece,
120 const TCGArg args[TCG_MAX_OP_ARGS],
121 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 122#else
e7632cfa
RH
123static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
124 TCGReg dst, TCGReg src)
125{
126 g_assert_not_reached();
127}
d6ecb4a9
RH
128static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
129 TCGReg dst, TCGReg base, intptr_t offset)
130{
131 g_assert_not_reached();
132}
4e186175
RH
133static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
134 TCGReg dst, int64_t arg)
e7632cfa
RH
135{
136 g_assert_not_reached();
137}
5e8892db
MR
138static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
139 unsigned vecl, unsigned vece,
140 const TCGArg args[TCG_MAX_OP_ARGS],
141 const int const_args[TCG_MAX_OP_ARGS])
d2fd745f
RH
142{
143 g_assert_not_reached();
144}
145#endif
2a534aff 146static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 147 intptr_t arg2);
59d7c14e
RH
148static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
149 TCGReg base, intptr_t ofs);
7b7d8b2d 150static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
cee44b03 151 const TCGHelperInfo *info);
a4fbbd77 152static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
659ef5cb 153#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5 154static int tcg_out_ldst_finalize(TCGContext *s);
659ef5cb 155#endif
c896fe29 156
42eb6dfc
RH
157TCGContext tcg_init_ctx;
158__thread TCGContext *tcg_ctx;
159
5ff7258c 160TCGContext **tcg_ctxs;
0e2d61cf
RH
161unsigned int tcg_cur_ctxs;
162unsigned int tcg_max_ctxs;
1c2adb95 163TCGv_env cpu_env = 0;
c8bc1168 164const void *tcg_code_gen_epilogue;
db0c51a3 165uintptr_t tcg_splitwx_diff;
df2cce29 166
b91ccb31
RH
167#ifndef CONFIG_TCG_INTERPRETER
168tcg_prologue_fn *tcg_qemu_tb_exec;
169#endif
170
d2fd745f 171static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 172static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 173
1813e175 174#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 175static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
176{
177 *s->code_ptr++ = v;
178}
179
4196dca6
PM
180static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
181 uint8_t v)
5c53bb81 182{
1813e175 183 *p = v;
5c53bb81 184}
1813e175 185#endif
5c53bb81 186
1813e175 187#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 188static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 189{
1813e175
RH
190 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
191 *s->code_ptr++ = v;
192 } else {
193 tcg_insn_unit *p = s->code_ptr;
194 memcpy(p, &v, sizeof(v));
195 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
196 }
c896fe29
FB
197}
198
4196dca6
PM
199static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
200 uint16_t v)
5c53bb81 201{
1813e175
RH
202 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
203 *p = v;
204 } else {
205 memcpy(p, &v, sizeof(v));
206 }
5c53bb81 207}
1813e175 208#endif
5c53bb81 209
1813e175 210#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 211static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 212{
1813e175
RH
213 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
214 *s->code_ptr++ = v;
215 } else {
216 tcg_insn_unit *p = s->code_ptr;
217 memcpy(p, &v, sizeof(v));
218 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
219 }
c896fe29
FB
220}
221
4196dca6
PM
222static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
223 uint32_t v)
5c53bb81 224{
1813e175
RH
225 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
226 *p = v;
227 } else {
228 memcpy(p, &v, sizeof(v));
229 }
5c53bb81 230}
1813e175 231#endif
5c53bb81 232
1813e175 233#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 234static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 235{
1813e175
RH
236 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
237 *s->code_ptr++ = v;
238 } else {
239 tcg_insn_unit *p = s->code_ptr;
240 memcpy(p, &v, sizeof(v));
241 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
242 }
ac26eb69
RH
243}
244
4196dca6
PM
245static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
246 uint64_t v)
5c53bb81 247{
1813e175
RH
248 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
249 *p = v;
250 } else {
251 memcpy(p, &v, sizeof(v));
252 }
5c53bb81 253}
1813e175 254#endif
5c53bb81 255
c896fe29
FB
256/* label relocation processing */
257
1813e175 258static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 259 TCGLabel *l, intptr_t addend)
c896fe29 260{
7ecd02a0 261 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
c896fe29 262
7ecd02a0
RH
263 r->type = type;
264 r->ptr = code_ptr;
265 r->addend = addend;
266 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
c896fe29
FB
267}
268
92ab8e7d 269static void tcg_out_label(TCGContext *s, TCGLabel *l)
c896fe29 270{
eabb7b91 271 tcg_debug_assert(!l->has_value);
c896fe29 272 l->has_value = 1;
92ab8e7d 273 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
c896fe29
FB
274}
275
42a268c2 276TCGLabel *gen_new_label(void)
c896fe29 277{
b1311c4a 278 TCGContext *s = tcg_ctx;
51e3972c 279 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 280
7ecd02a0
RH
281 memset(l, 0, sizeof(TCGLabel));
282 l->id = s->nb_labels++;
283 QSIMPLEQ_INIT(&l->relocs);
284
bef16ab4 285 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
42a268c2
RH
286
287 return l;
c896fe29
FB
288}
289
7ecd02a0
RH
290static bool tcg_resolve_relocs(TCGContext *s)
291{
292 TCGLabel *l;
293
294 QSIMPLEQ_FOREACH(l, &s->labels, next) {
295 TCGRelocation *r;
296 uintptr_t value = l->u.value;
297
298 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
299 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
300 return false;
301 }
302 }
303 }
304 return true;
305}
306
9f754620
RH
307static void set_jmp_reset_offset(TCGContext *s, int which)
308{
f14bed3f
RH
309 /*
310 * We will check for overflow at the end of the opcode loop in
311 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
312 */
313 s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
9f754620
RH
314}
315
b52a2c03
RH
316static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
317{
318 /*
319 * We will check for overflow at the end of the opcode loop in
320 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
321 */
322 tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
323 s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
324}
325
db6b7d0c 326/* Signal overflow, starting over with fewer guest insns. */
8905770b
MAL
327static G_NORETURN
328void tcg_raise_tb_overflow(TCGContext *s)
db6b7d0c
RH
329{
330 siglongjmp(s->jmp_trans, -2);
331}
332
4c22e840
RH
333#define C_PFX1(P, A) P##A
334#define C_PFX2(P, A, B) P##A##_##B
335#define C_PFX3(P, A, B, C) P##A##_##B##_##C
336#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
337#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
338#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
339
340/* Define an enumeration for the various combinations. */
341
342#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
343#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
344#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
345#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
346
347#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
348#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
349#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
350#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
351
352#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
353
354#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
355#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
356#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
357#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
358
359typedef enum {
360#include "tcg-target-con-set.h"
361} TCGConstraintSetIndex;
362
363static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
364
365#undef C_O0_I1
366#undef C_O0_I2
367#undef C_O0_I3
368#undef C_O0_I4
369#undef C_O1_I1
370#undef C_O1_I2
371#undef C_O1_I3
372#undef C_O1_I4
373#undef C_N1_I2
374#undef C_O2_I1
375#undef C_O2_I2
376#undef C_O2_I3
377#undef C_O2_I4
378
379/* Put all of the constraint sets into an array, indexed by the enum. */
380
381#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
382#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
383#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
384#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
385
386#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
387#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
388#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
389#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
390
391#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
392
393#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
394#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
395#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
396#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
397
398static const TCGTargetOpDef constraint_sets[] = {
399#include "tcg-target-con-set.h"
400};
401
402
403#undef C_O0_I1
404#undef C_O0_I2
405#undef C_O0_I3
406#undef C_O0_I4
407#undef C_O1_I1
408#undef C_O1_I2
409#undef C_O1_I3
410#undef C_O1_I4
411#undef C_N1_I2
412#undef C_O2_I1
413#undef C_O2_I2
414#undef C_O2_I3
415#undef C_O2_I4
416
417/* Expand the enumerator to be returned from tcg_target_op_def(). */
418
419#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
420#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
421#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
422#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
423
424#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
425#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
426#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
427#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
428
429#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
430
431#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
432#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
433#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
434#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
435
139c1837 436#include "tcg-target.c.inc"
c896fe29 437
38b47b19
EC
438static void alloc_tcg_plugin_context(TCGContext *s)
439{
440#ifdef CONFIG_PLUGIN
441 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
442 s->plugin_tb->insns =
443 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
444#endif
445}
446
3468b59e
EC
447/*
448 * All TCG threads except the parent (i.e. the one that called tcg_context_init
449 * and registered the target's TCG globals) must register with this function
450 * before initiating translation.
451 *
452 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
453 * of tcg_region_init() for the reasoning behind this.
454 *
455 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
456 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
457 * is not used anymore for translation once this function is called.
458 *
459 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
460 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
461 */
462#ifdef CONFIG_USER_ONLY
463void tcg_register_thread(void)
464{
465 tcg_ctx = &tcg_init_ctx;
466}
467#else
468void tcg_register_thread(void)
469{
470 TCGContext *s = g_malloc(sizeof(*s));
471 unsigned int i, n;
3468b59e
EC
472
473 *s = tcg_init_ctx;
474
475 /* Relink mem_base. */
476 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
477 if (tcg_init_ctx.temps[i].mem_base) {
478 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
479 tcg_debug_assert(b >= 0 && b < n);
480 s->temps[i].mem_base = &s->temps[b];
481 }
482 }
483
484 /* Claim an entry in tcg_ctxs */
0e2d61cf
RH
485 n = qatomic_fetch_inc(&tcg_cur_ctxs);
486 g_assert(n < tcg_max_ctxs);
d73415a3 487 qatomic_set(&tcg_ctxs[n], s);
3468b59e 488
38b47b19
EC
489 if (n > 0) {
490 alloc_tcg_plugin_context(s);
bf042e8e 491 tcg_region_initial_alloc(s);
38b47b19
EC
492 }
493
3468b59e 494 tcg_ctx = s;
e8feb96f 495}
3468b59e 496#endif /* !CONFIG_USER_ONLY */
e8feb96f 497
c896fe29
FB
498/* pool based memory allocation */
499void *tcg_malloc_internal(TCGContext *s, int size)
500{
501 TCGPool *p;
502 int pool_size;
a813e36f 503
c896fe29
FB
504 if (size > TCG_POOL_CHUNK_SIZE) {
505 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 506 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 507 p->size = size;
4055299e
KB
508 p->next = s->pool_first_large;
509 s->pool_first_large = p;
510 return p->data;
c896fe29
FB
511 } else {
512 p = s->pool_current;
513 if (!p) {
514 p = s->pool_first;
515 if (!p)
516 goto new_pool;
517 } else {
518 if (!p->next) {
519 new_pool:
520 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 521 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
522 p->size = pool_size;
523 p->next = NULL;
a813e36f 524 if (s->pool_current) {
c896fe29 525 s->pool_current->next = p;
a813e36f 526 } else {
c896fe29 527 s->pool_first = p;
a813e36f 528 }
c896fe29
FB
529 } else {
530 p = p->next;
531 }
532 }
533 }
534 s->pool_current = p;
535 s->pool_cur = p->data + size;
536 s->pool_end = p->data + p->size;
537 return p->data;
538}
539
540void tcg_pool_reset(TCGContext *s)
541{
4055299e
KB
542 TCGPool *p, *t;
543 for (p = s->pool_first_large; p; p = t) {
544 t = p->next;
545 g_free(p);
546 }
547 s->pool_first_large = NULL;
c896fe29
FB
548 s->pool_cur = s->pool_end = NULL;
549 s->pool_current = NULL;
550}
551
2ef6175a
RH
552#include "exec/helper-proto.h"
553
39004a71 554static TCGHelperInfo all_helpers[] = {
2ef6175a 555#include "exec/helper-tcg.h"
100b5e01 556};
619205fd 557static GHashTable *helper_table;
100b5e01 558
22f15579 559#ifdef CONFIG_TCG_INTERPRETER
c6ef8c7b
PMD
560static ffi_type *typecode_to_ffi(int argmask)
561{
562 switch (argmask) {
563 case dh_typecode_void:
564 return &ffi_type_void;
565 case dh_typecode_i32:
566 return &ffi_type_uint32;
567 case dh_typecode_s32:
568 return &ffi_type_sint32;
569 case dh_typecode_i64:
570 return &ffi_type_uint64;
571 case dh_typecode_s64:
572 return &ffi_type_sint64;
573 case dh_typecode_ptr:
574 return &ffi_type_pointer;
575 }
576 g_assert_not_reached();
577}
0c22e176
PMD
578
579static void init_ffi_layouts(void)
580{
581 /* g_direct_hash/equal for direct comparisons on uint32_t. */
f9c4bb80
RH
582 GHashTable *ffi_table = g_hash_table_new(NULL, NULL);
583
0c22e176 584 for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
f9c4bb80
RH
585 TCGHelperInfo *info = &all_helpers[i];
586 unsigned typemask = info->typemask;
0c22e176
PMD
587 gpointer hash = (gpointer)(uintptr_t)typemask;
588 struct {
589 ffi_cif cif;
590 ffi_type *args[];
591 } *ca;
592 ffi_status status;
593 int nargs;
f9c4bb80 594 ffi_cif *cif;
0c22e176 595
f9c4bb80
RH
596 cif = g_hash_table_lookup(ffi_table, hash);
597 if (cif) {
598 info->cif = cif;
0c22e176
PMD
599 continue;
600 }
601
602 /* Ignoring the return type, find the last non-zero field. */
603 nargs = 32 - clz32(typemask >> 3);
604 nargs = DIV_ROUND_UP(nargs, 3);
605
606 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
607 ca->cif.rtype = typecode_to_ffi(typemask & 7);
608 ca->cif.nargs = nargs;
609
610 if (nargs != 0) {
611 ca->cif.arg_types = ca->args;
612 for (int j = 0; j < nargs; ++j) {
613 int typecode = extract32(typemask, (j + 1) * 3, 3);
614 ca->args[j] = typecode_to_ffi(typecode);
615 }
616 }
617
618 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
619 ca->cif.rtype, ca->cif.arg_types);
620 assert(status == FFI_OK);
621
f9c4bb80
RH
622 cif = &ca->cif;
623 info->cif = cif;
624 g_hash_table_insert(ffi_table, hash, (gpointer)cif);
0c22e176 625 }
f9c4bb80
RH
626
627 g_hash_table_destroy(ffi_table);
0c22e176
PMD
628}
629#endif /* CONFIG_TCG_INTERPRETER */
22f15579 630
39004a71
RH
631typedef struct TCGCumulativeArgs {
632 int arg_idx; /* tcg_gen_callN args[] */
633 int info_in_idx; /* TCGHelperInfo in[] */
634 int arg_slot; /* regs+stack slot */
635 int ref_slot; /* stack slots for references */
636} TCGCumulativeArgs;
637
638static void layout_arg_even(TCGCumulativeArgs *cum)
639{
640 cum->arg_slot += cum->arg_slot & 1;
641}
642
643static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
644 TCGCallArgumentKind kind)
645{
646 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
647
648 *loc = (TCGCallArgumentLoc){
649 .kind = kind,
650 .arg_idx = cum->arg_idx,
651 .arg_slot = cum->arg_slot,
652 };
653 cum->info_in_idx++;
654 cum->arg_slot++;
655}
656
657static void layout_arg_normal_n(TCGCumulativeArgs *cum,
658 TCGHelperInfo *info, int n)
659{
660 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
661
662 for (int i = 0; i < n; ++i) {
663 /* Layout all using the same arg_idx, adjusting the subindex. */
664 loc[i] = (TCGCallArgumentLoc){
665 .kind = TCG_CALL_ARG_NORMAL,
666 .arg_idx = cum->arg_idx,
667 .tmp_subindex = i,
668 .arg_slot = cum->arg_slot + i,
669 };
670 }
671 cum->info_in_idx += n;
672 cum->arg_slot += n;
673}
674
675static void init_call_layout(TCGHelperInfo *info)
676{
677 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
678 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
679 unsigned typemask = info->typemask;
680 unsigned typecode;
681 TCGCumulativeArgs cum = { };
682
683 /*
684 * Parse and place any function return value.
685 */
686 typecode = typemask & 7;
687 switch (typecode) {
688 case dh_typecode_void:
689 info->nr_out = 0;
690 break;
691 case dh_typecode_i32:
692 case dh_typecode_s32:
693 case dh_typecode_ptr:
694 info->nr_out = 1;
695 info->out_kind = TCG_CALL_RET_NORMAL;
696 break;
697 case dh_typecode_i64:
698 case dh_typecode_s64:
699 info->nr_out = 64 / TCG_TARGET_REG_BITS;
700 info->out_kind = TCG_CALL_RET_NORMAL;
701 break;
702 default:
703 g_assert_not_reached();
704 }
705 assert(info->nr_out <= ARRAY_SIZE(tcg_target_call_oarg_regs));
706
707 /*
708 * Parse and place function arguments.
709 */
710 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
711 TCGCallArgumentKind kind;
712 TCGType type;
713
714 typecode = typemask & 7;
715 switch (typecode) {
716 case dh_typecode_i32:
717 case dh_typecode_s32:
718 type = TCG_TYPE_I32;
719 break;
720 case dh_typecode_i64:
721 case dh_typecode_s64:
722 type = TCG_TYPE_I64;
723 break;
724 case dh_typecode_ptr:
725 type = TCG_TYPE_PTR;
726 break;
727 default:
728 g_assert_not_reached();
729 }
730
731 switch (type) {
732 case TCG_TYPE_I32:
733 switch (TCG_TARGET_CALL_ARG_I32) {
734 case TCG_CALL_ARG_EVEN:
735 layout_arg_even(&cum);
736 /* fall through */
737 case TCG_CALL_ARG_NORMAL:
738 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
739 break;
740 case TCG_CALL_ARG_EXTEND:
741 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
742 layout_arg_1(&cum, info, kind);
743 break;
744 default:
745 qemu_build_not_reached();
746 }
747 break;
748
749 case TCG_TYPE_I64:
750 switch (TCG_TARGET_CALL_ARG_I64) {
751 case TCG_CALL_ARG_EVEN:
752 layout_arg_even(&cum);
753 /* fall through */
754 case TCG_CALL_ARG_NORMAL:
755 if (TCG_TARGET_REG_BITS == 32) {
756 layout_arg_normal_n(&cum, info, 2);
757 } else {
758 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
759 }
760 break;
761 default:
762 qemu_build_not_reached();
763 }
764 break;
765
766 default:
767 g_assert_not_reached();
768 }
769 }
770 info->nr_in = cum.info_in_idx;
771
772 /* Validate that we didn't overrun the input array. */
773 assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
774 /* Validate the backend has enough argument space. */
775 assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
776 assert(cum.ref_slot <= max_stk_slots);
777}
778
91478cef 779static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 780static void process_op_defs(TCGContext *s);
1c2adb95
RH
781static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
782 TCGReg reg, const char *name);
91478cef 783
43b972b7 784static void tcg_context_init(unsigned max_cpus)
c896fe29 785{
a76aabd3 786 TCGContext *s = &tcg_init_ctx;
100b5e01 787 int op, total_args, n, i;
c896fe29
FB
788 TCGOpDef *def;
789 TCGArgConstraint *args_ct;
1c2adb95 790 TCGTemp *ts;
c896fe29
FB
791
792 memset(s, 0, sizeof(*s));
c896fe29 793 s->nb_globals = 0;
c70fbf0a 794
c896fe29
FB
795 /* Count total number of arguments and allocate the corresponding
796 space */
797 total_args = 0;
798 for(op = 0; op < NB_OPS; op++) {
799 def = &tcg_op_defs[op];
800 n = def->nb_iargs + def->nb_oargs;
801 total_args += n;
802 }
803
bc2b17e6 804 args_ct = g_new0(TCGArgConstraint, total_args);
c896fe29
FB
805
806 for(op = 0; op < NB_OPS; op++) {
807 def = &tcg_op_defs[op];
808 def->args_ct = args_ct;
c896fe29 809 n = def->nb_iargs + def->nb_oargs;
c896fe29
FB
810 args_ct += n;
811 }
5cd8f621
RH
812
813 /* Register helpers. */
84fd9dd3 814 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 815 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 816
100b5e01 817 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
39004a71 818 init_call_layout(&all_helpers[i]);
84fd9dd3 819 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 820 (gpointer)&all_helpers[i]);
100b5e01 821 }
5cd8f621 822
22f15579 823#ifdef CONFIG_TCG_INTERPRETER
0c22e176 824 init_ffi_layouts();
22f15579
RH
825#endif
826
c896fe29 827 tcg_target_init(s);
f69d277e 828 process_op_defs(s);
91478cef
RH
829
830 /* Reverse the order of the saved registers, assuming they're all at
831 the start of tcg_target_reg_alloc_order. */
832 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
833 int r = tcg_target_reg_alloc_order[n];
834 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
835 break;
836 }
837 }
838 for (i = 0; i < n; ++i) {
839 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
840 }
841 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
842 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
843 }
b1311c4a 844
38b47b19
EC
845 alloc_tcg_plugin_context(s);
846
b1311c4a 847 tcg_ctx = s;
3468b59e
EC
848 /*
849 * In user-mode we simply share the init context among threads, since we
850 * use a single region. See the documentation tcg_region_init() for the
851 * reasoning behind this.
852 * In softmmu we will have at most max_cpus TCG threads.
853 */
854#ifdef CONFIG_USER_ONLY
df2cce29 855 tcg_ctxs = &tcg_ctx;
0e2d61cf
RH
856 tcg_cur_ctxs = 1;
857 tcg_max_ctxs = 1;
3468b59e 858#else
0e2d61cf
RH
859 tcg_max_ctxs = max_cpus;
860 tcg_ctxs = g_new0(TCGContext *, max_cpus);
3468b59e 861#endif
1c2adb95
RH
862
863 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
864 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
865 cpu_env = temp_tcgv_ptr(ts);
9002ec79 866}
b03cce8e 867
43b972b7 868void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
a76aabd3 869{
43b972b7
RH
870 tcg_context_init(max_cpus);
871 tcg_region_init(tb_size, splitwx, max_cpus);
a76aabd3
RH
872}
873
6e3b2bfd
EC
874/*
875 * Allocate TBs right before their corresponding translated code, making
876 * sure that TBs and code are on different cache lines.
877 */
878TranslationBlock *tcg_tb_alloc(TCGContext *s)
879{
880 uintptr_t align = qemu_icache_linesize;
881 TranslationBlock *tb;
882 void *next;
883
e8feb96f 884 retry:
6e3b2bfd
EC
885 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
886 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
887
888 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
889 if (tcg_region_alloc(s)) {
890 return NULL;
891 }
892 goto retry;
6e3b2bfd 893 }
d73415a3 894 qatomic_set(&s->code_gen_ptr, next);
57a26946 895 s->data_gen_ptr = NULL;
6e3b2bfd
EC
896 return tb;
897}
898
9002ec79
RH
899void tcg_prologue_init(TCGContext *s)
900{
b0a0794a 901 size_t prologue_size;
8163b749 902
b0a0794a
RH
903 s->code_ptr = s->code_gen_ptr;
904 s->code_buf = s->code_gen_ptr;
5b38ee31 905 s->data_gen_ptr = NULL;
b91ccb31
RH
906
907#ifndef CONFIG_TCG_INTERPRETER
b0a0794a 908 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
b91ccb31 909#endif
8163b749 910
5b38ee31
RH
911#ifdef TCG_TARGET_NEED_POOL_LABELS
912 s->pool_labels = NULL;
913#endif
914
653b87eb 915 qemu_thread_jit_write();
8163b749 916 /* Generate the prologue. */
b03cce8e 917 tcg_target_qemu_prologue(s);
5b38ee31
RH
918
919#ifdef TCG_TARGET_NEED_POOL_LABELS
920 /* Allow the prologue to put e.g. guest_base into a pool entry. */
921 {
1768987b
RH
922 int result = tcg_out_pool_finalize(s);
923 tcg_debug_assert(result == 0);
5b38ee31
RH
924 }
925#endif
926
b0a0794a 927 prologue_size = tcg_current_code_size(s);
5584e2db 928 perf_report_prologue(s->code_gen_ptr, prologue_size);
b0a0794a 929
df5d2b16 930#ifndef CONFIG_TCG_INTERPRETER
b0a0794a
RH
931 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
932 (uintptr_t)s->code_buf, prologue_size);
df5d2b16 933#endif
8163b749 934
d6b64b2b
RH
935#ifdef DEBUG_DISAS
936 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
c60f599b 937 FILE *logfile = qemu_log_trylock();
78b54858
RH
938 if (logfile) {
939 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
940 if (s->data_gen_ptr) {
941 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
942 size_t data_size = prologue_size - code_size;
943 size_t i;
944
945 disas(logfile, s->code_gen_ptr, code_size);
946
947 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
948 if (sizeof(tcg_target_ulong) == 8) {
949 fprintf(logfile,
950 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
951 (uintptr_t)s->data_gen_ptr + i,
952 *(uint64_t *)(s->data_gen_ptr + i));
953 } else {
954 fprintf(logfile,
955 "0x%08" PRIxPTR ": .long 0x%08x\n",
956 (uintptr_t)s->data_gen_ptr + i,
957 *(uint32_t *)(s->data_gen_ptr + i));
958 }
5b38ee31 959 }
78b54858
RH
960 } else {
961 disas(logfile, s->code_gen_ptr, prologue_size);
5b38ee31 962 }
78b54858 963 fprintf(logfile, "\n");
78b54858 964 qemu_log_unlock(logfile);
5b38ee31 965 }
d6b64b2b
RH
966 }
967#endif
cedbcb01 968
6eea0434
RH
969#ifndef CONFIG_TCG_INTERPRETER
970 /*
971 * Assert that goto_ptr is implemented completely, setting an epilogue.
972 * For tci, we use NULL as the signal to return from the interpreter,
973 * so skip this check.
974 */
f4e01e30 975 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
6eea0434 976#endif
d1c74ab3
RH
977
978 tcg_region_prologue_set(s);
c896fe29
FB
979}
980
c896fe29
FB
981void tcg_func_start(TCGContext *s)
982{
983 tcg_pool_reset(s);
984 s->nb_temps = s->nb_globals;
0ec9eabc
RH
985
986 /* No temps have been previously allocated for size or locality. */
987 memset(s->free_temps, 0, sizeof(s->free_temps));
988
c0522136
RH
989 /* No constant temps have been previously allocated. */
990 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
991 if (s->const_table[i]) {
992 g_hash_table_remove_all(s->const_table[i]);
993 }
994 }
995
abebf925 996 s->nb_ops = 0;
c896fe29
FB
997 s->nb_labels = 0;
998 s->current_frame_offset = s->frame_start;
999
0a209d4b
RH
1000#ifdef CONFIG_DEBUG_TCG
1001 s->goto_tb_issue_mask = 0;
1002#endif
1003
15fa08f8
RH
1004 QTAILQ_INIT(&s->ops);
1005 QTAILQ_INIT(&s->free_ops);
bef16ab4 1006 QSIMPLEQ_INIT(&s->labels);
c896fe29
FB
1007}
1008
ae30e866 1009static TCGTemp *tcg_temp_alloc(TCGContext *s)
7ca4b752
RH
1010{
1011 int n = s->nb_temps++;
ae30e866
RH
1012
1013 if (n >= TCG_MAX_TEMPS) {
db6b7d0c 1014 tcg_raise_tb_overflow(s);
ae30e866 1015 }
7ca4b752
RH
1016 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1017}
1018
ae30e866 1019static TCGTemp *tcg_global_alloc(TCGContext *s)
7ca4b752 1020{
fa477d25
RH
1021 TCGTemp *ts;
1022
7ca4b752 1023 tcg_debug_assert(s->nb_globals == s->nb_temps);
ae30e866 1024 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
7ca4b752 1025 s->nb_globals++;
fa477d25 1026 ts = tcg_temp_alloc(s);
ee17db83 1027 ts->kind = TEMP_GLOBAL;
fa477d25
RH
1028
1029 return ts;
c896fe29
FB
1030}
1031
085272b3
RH
1032static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1033 TCGReg reg, const char *name)
c896fe29 1034{
c896fe29 1035 TCGTemp *ts;
c896fe29 1036
b3a62939 1037 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 1038 tcg_abort();
b3a62939 1039 }
7ca4b752
RH
1040
1041 ts = tcg_global_alloc(s);
c896fe29
FB
1042 ts->base_type = type;
1043 ts->type = type;
ee17db83 1044 ts->kind = TEMP_FIXED;
c896fe29 1045 ts->reg = reg;
c896fe29 1046 ts->name = name;
c896fe29 1047 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 1048
085272b3 1049 return ts;
a7812ae4
PB
1050}
1051
b6638662 1052void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 1053{
b3a62939
RH
1054 s->frame_start = start;
1055 s->frame_end = start + size;
085272b3
RH
1056 s->frame_temp
1057 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
1058}
1059
085272b3
RH
1060TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1061 intptr_t offset, const char *name)
c896fe29 1062{
b1311c4a 1063 TCGContext *s = tcg_ctx;
dc41aa7d 1064 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 1065 TCGTemp *ts = tcg_global_alloc(s);
aef85402 1066 int indirect_reg = 0;
c896fe29 1067
c0522136
RH
1068 switch (base_ts->kind) {
1069 case TEMP_FIXED:
1070 break;
1071 case TEMP_GLOBAL:
5a18407f
RH
1072 /* We do not support double-indirect registers. */
1073 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 1074 base_ts->indirect_base = 1;
5a18407f
RH
1075 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1076 ? 2 : 1);
1077 indirect_reg = 1;
c0522136
RH
1078 break;
1079 default:
1080 g_assert_not_reached();
b3915dbb
RH
1081 }
1082
7ca4b752
RH
1083 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1084 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 1085 char buf[64];
7ca4b752
RH
1086
1087 ts->base_type = TCG_TYPE_I64;
c896fe29 1088 ts->type = TCG_TYPE_I32;
b3915dbb 1089 ts->indirect_reg = indirect_reg;
c896fe29 1090 ts->mem_allocated = 1;
b3a62939 1091 ts->mem_base = base_ts;
aef85402 1092 ts->mem_offset = offset;
c896fe29
FB
1093 pstrcpy(buf, sizeof(buf), name);
1094 pstrcat(buf, sizeof(buf), "_0");
1095 ts->name = strdup(buf);
c896fe29 1096
7ca4b752
RH
1097 tcg_debug_assert(ts2 == ts + 1);
1098 ts2->base_type = TCG_TYPE_I64;
1099 ts2->type = TCG_TYPE_I32;
b3915dbb 1100 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
1101 ts2->mem_allocated = 1;
1102 ts2->mem_base = base_ts;
aef85402 1103 ts2->mem_offset = offset + 4;
fac87bd2 1104 ts2->temp_subindex = 1;
c896fe29
FB
1105 pstrcpy(buf, sizeof(buf), name);
1106 pstrcat(buf, sizeof(buf), "_1");
120c1084 1107 ts2->name = strdup(buf);
7ca4b752 1108 } else {
c896fe29
FB
1109 ts->base_type = type;
1110 ts->type = type;
b3915dbb 1111 ts->indirect_reg = indirect_reg;
c896fe29 1112 ts->mem_allocated = 1;
b3a62939 1113 ts->mem_base = base_ts;
c896fe29 1114 ts->mem_offset = offset;
c896fe29 1115 ts->name = name;
c896fe29 1116 }
085272b3 1117 return ts;
a7812ae4
PB
1118}
1119
5bfa8034 1120TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
c896fe29 1121{
b1311c4a 1122 TCGContext *s = tcg_ctx;
ee17db83 1123 TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
c896fe29 1124 TCGTemp *ts;
641d5fbe 1125 int idx, k;
c896fe29 1126
0ec9eabc
RH
1127 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1128 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1129 if (idx < TCG_MAX_TEMPS) {
1130 /* There is already an available temp with the right type. */
1131 clear_bit(idx, s->free_temps[k].l);
1132
e8996ee0 1133 ts = &s->temps[idx];
e8996ee0 1134 ts->temp_allocated = 1;
7ca4b752 1135 tcg_debug_assert(ts->base_type == type);
ee17db83 1136 tcg_debug_assert(ts->kind == kind);
e8996ee0 1137 } else {
7ca4b752
RH
1138 ts = tcg_temp_alloc(s);
1139 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1140 TCGTemp *ts2 = tcg_temp_alloc(s);
1141
f6aa2f7d 1142 ts->base_type = type;
e8996ee0
FB
1143 ts->type = TCG_TYPE_I32;
1144 ts->temp_allocated = 1;
ee17db83 1145 ts->kind = kind;
7ca4b752
RH
1146
1147 tcg_debug_assert(ts2 == ts + 1);
1148 ts2->base_type = TCG_TYPE_I64;
1149 ts2->type = TCG_TYPE_I32;
1150 ts2->temp_allocated = 1;
fac87bd2 1151 ts2->temp_subindex = 1;
ee17db83 1152 ts2->kind = kind;
7ca4b752 1153 } else {
e8996ee0
FB
1154 ts->base_type = type;
1155 ts->type = type;
1156 ts->temp_allocated = 1;
ee17db83 1157 ts->kind = kind;
e8996ee0 1158 }
c896fe29 1159 }
27bfd83c
PM
1160
1161#if defined(CONFIG_DEBUG_TCG)
1162 s->temps_in_use++;
1163#endif
085272b3 1164 return ts;
c896fe29
FB
1165}
1166
d2fd745f
RH
1167TCGv_vec tcg_temp_new_vec(TCGType type)
1168{
1169 TCGTemp *t;
1170
1171#ifdef CONFIG_DEBUG_TCG
1172 switch (type) {
1173 case TCG_TYPE_V64:
1174 assert(TCG_TARGET_HAS_v64);
1175 break;
1176 case TCG_TYPE_V128:
1177 assert(TCG_TARGET_HAS_v128);
1178 break;
1179 case TCG_TYPE_V256:
1180 assert(TCG_TARGET_HAS_v256);
1181 break;
1182 default:
1183 g_assert_not_reached();
1184 }
1185#endif
1186
1187 t = tcg_temp_new_internal(type, 0);
1188 return temp_tcgv_vec(t);
1189}
1190
1191/* Create a new temp of the same type as an existing temp. */
1192TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1193{
1194 TCGTemp *t = tcgv_vec_temp(match);
1195
1196 tcg_debug_assert(t->temp_allocated != 0);
1197
1198 t = tcg_temp_new_internal(t->base_type, 0);
1199 return temp_tcgv_vec(t);
1200}
1201
5bfa8034 1202void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1203{
b1311c4a 1204 TCGContext *s = tcg_ctx;
085272b3 1205 int k, idx;
c896fe29 1206
c7482438
RH
1207 switch (ts->kind) {
1208 case TEMP_CONST:
1209 /*
1210 * In order to simplify users of tcg_constant_*,
1211 * silently ignore free.
1212 */
c0522136 1213 return;
c7482438
RH
1214 case TEMP_NORMAL:
1215 case TEMP_LOCAL:
1216 break;
1217 default:
1218 g_assert_not_reached();
c0522136
RH
1219 }
1220
27bfd83c
PM
1221#if defined(CONFIG_DEBUG_TCG)
1222 s->temps_in_use--;
1223 if (s->temps_in_use < 0) {
1224 fprintf(stderr, "More temporaries freed than allocated!\n");
1225 }
1226#endif
1227
eabb7b91 1228 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1229 ts->temp_allocated = 0;
0ec9eabc 1230
085272b3 1231 idx = temp_idx(ts);
ee17db83 1232 k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
0ec9eabc 1233 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1234}
1235
c0522136
RH
1236TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1237{
1238 TCGContext *s = tcg_ctx;
1239 GHashTable *h = s->const_table[type];
1240 TCGTemp *ts;
1241
1242 if (h == NULL) {
1243 h = g_hash_table_new(g_int64_hash, g_int64_equal);
1244 s->const_table[type] = h;
1245 }
1246
1247 ts = g_hash_table_lookup(h, &val);
1248 if (ts == NULL) {
aef85402
RH
1249 int64_t *val_ptr;
1250
c0522136
RH
1251 ts = tcg_temp_alloc(s);
1252
1253 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1254 TCGTemp *ts2 = tcg_temp_alloc(s);
1255
aef85402
RH
1256 tcg_debug_assert(ts2 == ts + 1);
1257
c0522136
RH
1258 ts->base_type = TCG_TYPE_I64;
1259 ts->type = TCG_TYPE_I32;
1260 ts->kind = TEMP_CONST;
1261 ts->temp_allocated = 1;
c0522136 1262
c0522136
RH
1263 ts2->base_type = TCG_TYPE_I64;
1264 ts2->type = TCG_TYPE_I32;
1265 ts2->kind = TEMP_CONST;
1266 ts2->temp_allocated = 1;
fac87bd2 1267 ts2->temp_subindex = 1;
aef85402
RH
1268
1269 /*
1270 * Retain the full value of the 64-bit constant in the low
1271 * part, so that the hash table works. Actual uses will
1272 * truncate the value to the low part.
1273 */
1274 ts[HOST_BIG_ENDIAN].val = val;
1275 ts[!HOST_BIG_ENDIAN].val = val >> 32;
1276 val_ptr = &ts[HOST_BIG_ENDIAN].val;
c0522136
RH
1277 } else {
1278 ts->base_type = type;
1279 ts->type = type;
1280 ts->kind = TEMP_CONST;
1281 ts->temp_allocated = 1;
1282 ts->val = val;
aef85402 1283 val_ptr = &ts->val;
c0522136 1284 }
aef85402 1285 g_hash_table_insert(h, val_ptr, ts);
c0522136
RH
1286 }
1287
1288 return ts;
1289}
1290
1291TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1292{
1293 val = dup_const(vece, val);
1294 return temp_tcgv_vec(tcg_constant_internal(type, val));
1295}
1296
88d4005b
RH
1297TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1298{
1299 TCGTemp *t = tcgv_vec_temp(match);
1300
1301 tcg_debug_assert(t->temp_allocated != 0);
1302 return tcg_constant_vec(t->base_type, vece, val);
1303}
1304
a7812ae4 1305TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1306{
a7812ae4
PB
1307 TCGv_i32 t0;
1308 t0 = tcg_temp_new_i32();
e8996ee0
FB
1309 tcg_gen_movi_i32(t0, val);
1310 return t0;
1311}
c896fe29 1312
a7812ae4 1313TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1314{
a7812ae4
PB
1315 TCGv_i64 t0;
1316 t0 = tcg_temp_new_i64();
e8996ee0
FB
1317 tcg_gen_movi_i64(t0, val);
1318 return t0;
c896fe29
FB
1319}
1320
a7812ae4 1321TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1322{
a7812ae4
PB
1323 TCGv_i32 t0;
1324 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1325 tcg_gen_movi_i32(t0, val);
1326 return t0;
1327}
1328
a7812ae4 1329TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1330{
a7812ae4
PB
1331 TCGv_i64 t0;
1332 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1333 tcg_gen_movi_i64(t0, val);
1334 return t0;
1335}
1336
27bfd83c
PM
1337#if defined(CONFIG_DEBUG_TCG)
1338void tcg_clear_temp_count(void)
1339{
b1311c4a 1340 TCGContext *s = tcg_ctx;
27bfd83c
PM
1341 s->temps_in_use = 0;
1342}
1343
1344int tcg_check_temp_count(void)
1345{
b1311c4a 1346 TCGContext *s = tcg_ctx;
27bfd83c
PM
1347 if (s->temps_in_use) {
1348 /* Clear the count so that we don't give another
1349 * warning immediately next time around.
1350 */
1351 s->temps_in_use = 0;
1352 return 1;
1353 }
1354 return 0;
1355}
1356#endif
1357
be0f34b5
RH
1358/* Return true if OP may appear in the opcode stream.
1359 Test the runtime variable that controls each opcode. */
1360bool tcg_op_supported(TCGOpcode op)
1361{
d2fd745f
RH
1362 const bool have_vec
1363 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1364
be0f34b5
RH
1365 switch (op) {
1366 case INDEX_op_discard:
1367 case INDEX_op_set_label:
1368 case INDEX_op_call:
1369 case INDEX_op_br:
1370 case INDEX_op_mb:
1371 case INDEX_op_insn_start:
1372 case INDEX_op_exit_tb:
1373 case INDEX_op_goto_tb:
f4e01e30 1374 case INDEX_op_goto_ptr:
be0f34b5
RH
1375 case INDEX_op_qemu_ld_i32:
1376 case INDEX_op_qemu_st_i32:
1377 case INDEX_op_qemu_ld_i64:
1378 case INDEX_op_qemu_st_i64:
1379 return true;
1380
07ce0b05
RH
1381 case INDEX_op_qemu_st8_i32:
1382 return TCG_TARGET_HAS_qemu_st8_i32;
1383
be0f34b5 1384 case INDEX_op_mov_i32:
be0f34b5
RH
1385 case INDEX_op_setcond_i32:
1386 case INDEX_op_brcond_i32:
1387 case INDEX_op_ld8u_i32:
1388 case INDEX_op_ld8s_i32:
1389 case INDEX_op_ld16u_i32:
1390 case INDEX_op_ld16s_i32:
1391 case INDEX_op_ld_i32:
1392 case INDEX_op_st8_i32:
1393 case INDEX_op_st16_i32:
1394 case INDEX_op_st_i32:
1395 case INDEX_op_add_i32:
1396 case INDEX_op_sub_i32:
1397 case INDEX_op_mul_i32:
1398 case INDEX_op_and_i32:
1399 case INDEX_op_or_i32:
1400 case INDEX_op_xor_i32:
1401 case INDEX_op_shl_i32:
1402 case INDEX_op_shr_i32:
1403 case INDEX_op_sar_i32:
1404 return true;
1405
1406 case INDEX_op_movcond_i32:
1407 return TCG_TARGET_HAS_movcond_i32;
1408 case INDEX_op_div_i32:
1409 case INDEX_op_divu_i32:
1410 return TCG_TARGET_HAS_div_i32;
1411 case INDEX_op_rem_i32:
1412 case INDEX_op_remu_i32:
1413 return TCG_TARGET_HAS_rem_i32;
1414 case INDEX_op_div2_i32:
1415 case INDEX_op_divu2_i32:
1416 return TCG_TARGET_HAS_div2_i32;
1417 case INDEX_op_rotl_i32:
1418 case INDEX_op_rotr_i32:
1419 return TCG_TARGET_HAS_rot_i32;
1420 case INDEX_op_deposit_i32:
1421 return TCG_TARGET_HAS_deposit_i32;
1422 case INDEX_op_extract_i32:
1423 return TCG_TARGET_HAS_extract_i32;
1424 case INDEX_op_sextract_i32:
1425 return TCG_TARGET_HAS_sextract_i32;
fce1296f
RH
1426 case INDEX_op_extract2_i32:
1427 return TCG_TARGET_HAS_extract2_i32;
be0f34b5
RH
1428 case INDEX_op_add2_i32:
1429 return TCG_TARGET_HAS_add2_i32;
1430 case INDEX_op_sub2_i32:
1431 return TCG_TARGET_HAS_sub2_i32;
1432 case INDEX_op_mulu2_i32:
1433 return TCG_TARGET_HAS_mulu2_i32;
1434 case INDEX_op_muls2_i32:
1435 return TCG_TARGET_HAS_muls2_i32;
1436 case INDEX_op_muluh_i32:
1437 return TCG_TARGET_HAS_muluh_i32;
1438 case INDEX_op_mulsh_i32:
1439 return TCG_TARGET_HAS_mulsh_i32;
1440 case INDEX_op_ext8s_i32:
1441 return TCG_TARGET_HAS_ext8s_i32;
1442 case INDEX_op_ext16s_i32:
1443 return TCG_TARGET_HAS_ext16s_i32;
1444 case INDEX_op_ext8u_i32:
1445 return TCG_TARGET_HAS_ext8u_i32;
1446 case INDEX_op_ext16u_i32:
1447 return TCG_TARGET_HAS_ext16u_i32;
1448 case INDEX_op_bswap16_i32:
1449 return TCG_TARGET_HAS_bswap16_i32;
1450 case INDEX_op_bswap32_i32:
1451 return TCG_TARGET_HAS_bswap32_i32;
1452 case INDEX_op_not_i32:
1453 return TCG_TARGET_HAS_not_i32;
1454 case INDEX_op_neg_i32:
1455 return TCG_TARGET_HAS_neg_i32;
1456 case INDEX_op_andc_i32:
1457 return TCG_TARGET_HAS_andc_i32;
1458 case INDEX_op_orc_i32:
1459 return TCG_TARGET_HAS_orc_i32;
1460 case INDEX_op_eqv_i32:
1461 return TCG_TARGET_HAS_eqv_i32;
1462 case INDEX_op_nand_i32:
1463 return TCG_TARGET_HAS_nand_i32;
1464 case INDEX_op_nor_i32:
1465 return TCG_TARGET_HAS_nor_i32;
1466 case INDEX_op_clz_i32:
1467 return TCG_TARGET_HAS_clz_i32;
1468 case INDEX_op_ctz_i32:
1469 return TCG_TARGET_HAS_ctz_i32;
1470 case INDEX_op_ctpop_i32:
1471 return TCG_TARGET_HAS_ctpop_i32;
1472
1473 case INDEX_op_brcond2_i32:
1474 case INDEX_op_setcond2_i32:
1475 return TCG_TARGET_REG_BITS == 32;
1476
1477 case INDEX_op_mov_i64:
be0f34b5
RH
1478 case INDEX_op_setcond_i64:
1479 case INDEX_op_brcond_i64:
1480 case INDEX_op_ld8u_i64:
1481 case INDEX_op_ld8s_i64:
1482 case INDEX_op_ld16u_i64:
1483 case INDEX_op_ld16s_i64:
1484 case INDEX_op_ld32u_i64:
1485 case INDEX_op_ld32s_i64:
1486 case INDEX_op_ld_i64:
1487 case INDEX_op_st8_i64:
1488 case INDEX_op_st16_i64:
1489 case INDEX_op_st32_i64:
1490 case INDEX_op_st_i64:
1491 case INDEX_op_add_i64:
1492 case INDEX_op_sub_i64:
1493 case INDEX_op_mul_i64:
1494 case INDEX_op_and_i64:
1495 case INDEX_op_or_i64:
1496 case INDEX_op_xor_i64:
1497 case INDEX_op_shl_i64:
1498 case INDEX_op_shr_i64:
1499 case INDEX_op_sar_i64:
1500 case INDEX_op_ext_i32_i64:
1501 case INDEX_op_extu_i32_i64:
1502 return TCG_TARGET_REG_BITS == 64;
1503
1504 case INDEX_op_movcond_i64:
1505 return TCG_TARGET_HAS_movcond_i64;
1506 case INDEX_op_div_i64:
1507 case INDEX_op_divu_i64:
1508 return TCG_TARGET_HAS_div_i64;
1509 case INDEX_op_rem_i64:
1510 case INDEX_op_remu_i64:
1511 return TCG_TARGET_HAS_rem_i64;
1512 case INDEX_op_div2_i64:
1513 case INDEX_op_divu2_i64:
1514 return TCG_TARGET_HAS_div2_i64;
1515 case INDEX_op_rotl_i64:
1516 case INDEX_op_rotr_i64:
1517 return TCG_TARGET_HAS_rot_i64;
1518 case INDEX_op_deposit_i64:
1519 return TCG_TARGET_HAS_deposit_i64;
1520 case INDEX_op_extract_i64:
1521 return TCG_TARGET_HAS_extract_i64;
1522 case INDEX_op_sextract_i64:
1523 return TCG_TARGET_HAS_sextract_i64;
fce1296f
RH
1524 case INDEX_op_extract2_i64:
1525 return TCG_TARGET_HAS_extract2_i64;
be0f34b5
RH
1526 case INDEX_op_extrl_i64_i32:
1527 return TCG_TARGET_HAS_extrl_i64_i32;
1528 case INDEX_op_extrh_i64_i32:
1529 return TCG_TARGET_HAS_extrh_i64_i32;
1530 case INDEX_op_ext8s_i64:
1531 return TCG_TARGET_HAS_ext8s_i64;
1532 case INDEX_op_ext16s_i64:
1533 return TCG_TARGET_HAS_ext16s_i64;
1534 case INDEX_op_ext32s_i64:
1535 return TCG_TARGET_HAS_ext32s_i64;
1536 case INDEX_op_ext8u_i64:
1537 return TCG_TARGET_HAS_ext8u_i64;
1538 case INDEX_op_ext16u_i64:
1539 return TCG_TARGET_HAS_ext16u_i64;
1540 case INDEX_op_ext32u_i64:
1541 return TCG_TARGET_HAS_ext32u_i64;
1542 case INDEX_op_bswap16_i64:
1543 return TCG_TARGET_HAS_bswap16_i64;
1544 case INDEX_op_bswap32_i64:
1545 return TCG_TARGET_HAS_bswap32_i64;
1546 case INDEX_op_bswap64_i64:
1547 return TCG_TARGET_HAS_bswap64_i64;
1548 case INDEX_op_not_i64:
1549 return TCG_TARGET_HAS_not_i64;
1550 case INDEX_op_neg_i64:
1551 return TCG_TARGET_HAS_neg_i64;
1552 case INDEX_op_andc_i64:
1553 return TCG_TARGET_HAS_andc_i64;
1554 case INDEX_op_orc_i64:
1555 return TCG_TARGET_HAS_orc_i64;
1556 case INDEX_op_eqv_i64:
1557 return TCG_TARGET_HAS_eqv_i64;
1558 case INDEX_op_nand_i64:
1559 return TCG_TARGET_HAS_nand_i64;
1560 case INDEX_op_nor_i64:
1561 return TCG_TARGET_HAS_nor_i64;
1562 case INDEX_op_clz_i64:
1563 return TCG_TARGET_HAS_clz_i64;
1564 case INDEX_op_ctz_i64:
1565 return TCG_TARGET_HAS_ctz_i64;
1566 case INDEX_op_ctpop_i64:
1567 return TCG_TARGET_HAS_ctpop_i64;
1568 case INDEX_op_add2_i64:
1569 return TCG_TARGET_HAS_add2_i64;
1570 case INDEX_op_sub2_i64:
1571 return TCG_TARGET_HAS_sub2_i64;
1572 case INDEX_op_mulu2_i64:
1573 return TCG_TARGET_HAS_mulu2_i64;
1574 case INDEX_op_muls2_i64:
1575 return TCG_TARGET_HAS_muls2_i64;
1576 case INDEX_op_muluh_i64:
1577 return TCG_TARGET_HAS_muluh_i64;
1578 case INDEX_op_mulsh_i64:
1579 return TCG_TARGET_HAS_mulsh_i64;
1580
d2fd745f
RH
1581 case INDEX_op_mov_vec:
1582 case INDEX_op_dup_vec:
37ee55a0 1583 case INDEX_op_dupm_vec:
d2fd745f
RH
1584 case INDEX_op_ld_vec:
1585 case INDEX_op_st_vec:
1586 case INDEX_op_add_vec:
1587 case INDEX_op_sub_vec:
1588 case INDEX_op_and_vec:
1589 case INDEX_op_or_vec:
1590 case INDEX_op_xor_vec:
212be173 1591 case INDEX_op_cmp_vec:
d2fd745f
RH
1592 return have_vec;
1593 case INDEX_op_dup2_vec:
1594 return have_vec && TCG_TARGET_REG_BITS == 32;
1595 case INDEX_op_not_vec:
1596 return have_vec && TCG_TARGET_HAS_not_vec;
1597 case INDEX_op_neg_vec:
1598 return have_vec && TCG_TARGET_HAS_neg_vec;
bcefc902
RH
1599 case INDEX_op_abs_vec:
1600 return have_vec && TCG_TARGET_HAS_abs_vec;
d2fd745f
RH
1601 case INDEX_op_andc_vec:
1602 return have_vec && TCG_TARGET_HAS_andc_vec;
1603 case INDEX_op_orc_vec:
1604 return have_vec && TCG_TARGET_HAS_orc_vec;
ed523473
RH
1605 case INDEX_op_nand_vec:
1606 return have_vec && TCG_TARGET_HAS_nand_vec;
1607 case INDEX_op_nor_vec:
1608 return have_vec && TCG_TARGET_HAS_nor_vec;
1609 case INDEX_op_eqv_vec:
1610 return have_vec && TCG_TARGET_HAS_eqv_vec;
3774030a
RH
1611 case INDEX_op_mul_vec:
1612 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1613 case INDEX_op_shli_vec:
1614 case INDEX_op_shri_vec:
1615 case INDEX_op_sari_vec:
1616 return have_vec && TCG_TARGET_HAS_shi_vec;
1617 case INDEX_op_shls_vec:
1618 case INDEX_op_shrs_vec:
1619 case INDEX_op_sars_vec:
1620 return have_vec && TCG_TARGET_HAS_shs_vec;
1621 case INDEX_op_shlv_vec:
1622 case INDEX_op_shrv_vec:
1623 case INDEX_op_sarv_vec:
1624 return have_vec && TCG_TARGET_HAS_shv_vec;
b0f7e744
RH
1625 case INDEX_op_rotli_vec:
1626 return have_vec && TCG_TARGET_HAS_roti_vec;
23850a74
RH
1627 case INDEX_op_rotls_vec:
1628 return have_vec && TCG_TARGET_HAS_rots_vec;
5d0ceda9
RH
1629 case INDEX_op_rotlv_vec:
1630 case INDEX_op_rotrv_vec:
1631 return have_vec && TCG_TARGET_HAS_rotv_vec;
8afaf050
RH
1632 case INDEX_op_ssadd_vec:
1633 case INDEX_op_usadd_vec:
1634 case INDEX_op_sssub_vec:
1635 case INDEX_op_ussub_vec:
1636 return have_vec && TCG_TARGET_HAS_sat_vec;
dd0a0fcd
RH
1637 case INDEX_op_smin_vec:
1638 case INDEX_op_umin_vec:
1639 case INDEX_op_smax_vec:
1640 case INDEX_op_umax_vec:
1641 return have_vec && TCG_TARGET_HAS_minmax_vec;
38dc1294
RH
1642 case INDEX_op_bitsel_vec:
1643 return have_vec && TCG_TARGET_HAS_bitsel_vec;
f75da298
RH
1644 case INDEX_op_cmpsel_vec:
1645 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
d2fd745f 1646
db432672
RH
1647 default:
1648 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1649 return true;
be0f34b5 1650 }
be0f34b5
RH
1651}
1652
39004a71
RH
1653static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
1654
ae8b75dc 1655void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1656{
3e92aa34 1657 const TCGHelperInfo *info;
39004a71
RH
1658 TCGv_i64 extend_free[MAX_CALL_IARGS];
1659 int n_extend = 0;
75e8b9b7 1660 TCGOp *op;
39004a71 1661 int i, n, pi = 0, total_args;
afb49896 1662
619205fd 1663 info = g_hash_table_lookup(helper_table, (gpointer)func);
39004a71
RH
1664 total_args = info->nr_out + info->nr_in + 2;
1665 op = tcg_op_alloc(INDEX_op_call, total_args);
2bece2c8 1666
38b47b19
EC
1667#ifdef CONFIG_PLUGIN
1668 /* detect non-plugin helpers */
1669 if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) {
1670 tcg_ctx->plugin_insn->calls_helpers = true;
1671 }
1672#endif
1673
39004a71
RH
1674 TCGOP_CALLO(op) = n = info->nr_out;
1675 switch (n) {
1676 case 0:
1677 tcg_debug_assert(ret == NULL);
1678 break;
1679 case 1:
1680 tcg_debug_assert(ret != NULL);
1681 op->args[pi++] = temp_arg(ret);
1682 break;
1683 case 2:
1684 tcg_debug_assert(ret != NULL);
1685 tcg_debug_assert(ret->base_type == ret->type + 1);
1686 tcg_debug_assert(ret->temp_subindex == 0);
1687 op->args[pi++] = temp_arg(ret);
1688 op->args[pi++] = temp_arg(ret + 1);
1689 break;
1690 default:
1691 g_assert_not_reached();
1692 }
1693
1694 TCGOP_CALLI(op) = n = info->nr_in;
1695 for (i = 0; i < n; i++) {
1696 const TCGCallArgumentLoc *loc = &info->in[i];
1697 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
1698
1699 switch (loc->kind) {
1700 case TCG_CALL_ARG_NORMAL:
1701 op->args[pi++] = temp_arg(ts);
1702 break;
eb8b0224 1703
39004a71
RH
1704 case TCG_CALL_ARG_EXTEND_U:
1705 case TCG_CALL_ARG_EXTEND_S:
1706 {
eb8b0224 1707 TCGv_i64 temp = tcg_temp_new_i64();
39004a71
RH
1708 TCGv_i32 orig = temp_tcgv_i32(ts);
1709
1710 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
eb8b0224
RH
1711 tcg_gen_ext_i32_i64(temp, orig);
1712 } else {
1713 tcg_gen_extu_i32_i64(temp, orig);
1714 }
39004a71
RH
1715 op->args[pi++] = tcgv_i64_arg(temp);
1716 extend_free[n_extend++] = temp;
2bece2c8 1717 }
e2a9dd6b 1718 break;
7b7d8b2d 1719
e2a9dd6b
RH
1720 default:
1721 g_assert_not_reached();
c896fe29
FB
1722 }
1723 }
75e8b9b7 1724 op->args[pi++] = (uintptr_t)func;
3e92aa34 1725 op->args[pi++] = (uintptr_t)info;
39004a71 1726 tcg_debug_assert(pi == total_args);
a7812ae4 1727
39004a71 1728 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
7319d83a 1729
39004a71
RH
1730 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
1731 for (i = 0; i < n_extend; ++i) {
1732 tcg_temp_free_i64(extend_free[i]);
2bece2c8 1733 }
c896fe29 1734}
c896fe29 1735
8fcd3692 1736static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1737{
ac3b8891 1738 int i, n;
ac3b8891 1739
ee17db83
RH
1740 for (i = 0, n = s->nb_temps; i < n; i++) {
1741 TCGTemp *ts = &s->temps[i];
1742 TCGTempVal val = TEMP_VAL_MEM;
1743
1744 switch (ts->kind) {
c0522136
RH
1745 case TEMP_CONST:
1746 val = TEMP_VAL_CONST;
1747 break;
ee17db83
RH
1748 case TEMP_FIXED:
1749 val = TEMP_VAL_REG;
1750 break;
1751 case TEMP_GLOBAL:
1752 break;
1753 case TEMP_NORMAL:
c7482438 1754 case TEMP_EBB:
ee17db83
RH
1755 val = TEMP_VAL_DEAD;
1756 /* fall through */
1757 case TEMP_LOCAL:
1758 ts->mem_allocated = 0;
1759 break;
1760 default:
1761 g_assert_not_reached();
1762 }
1763 ts->val_type = val;
e8996ee0 1764 }
f8b2f202
RH
1765
1766 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1767}
1768
f8b2f202
RH
1769static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1770 TCGTemp *ts)
c896fe29 1771{
1807f4c4 1772 int idx = temp_idx(ts);
ac56dd48 1773
ee17db83
RH
1774 switch (ts->kind) {
1775 case TEMP_FIXED:
1776 case TEMP_GLOBAL:
ac56dd48 1777 pstrcpy(buf, buf_size, ts->name);
ee17db83
RH
1778 break;
1779 case TEMP_LOCAL:
f8b2f202 1780 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
ee17db83 1781 break;
c7482438
RH
1782 case TEMP_EBB:
1783 snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals);
1784 break;
ee17db83 1785 case TEMP_NORMAL:
f8b2f202 1786 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
ee17db83 1787 break;
c0522136
RH
1788 case TEMP_CONST:
1789 switch (ts->type) {
1790 case TCG_TYPE_I32:
1791 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1792 break;
1793#if TCG_TARGET_REG_BITS > 32
1794 case TCG_TYPE_I64:
1795 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1796 break;
1797#endif
1798 case TCG_TYPE_V64:
1799 case TCG_TYPE_V128:
1800 case TCG_TYPE_V256:
1801 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1802 64 << (ts->type - TCG_TYPE_V64), ts->val);
1803 break;
1804 default:
1805 g_assert_not_reached();
1806 }
1807 break;
c896fe29
FB
1808 }
1809 return buf;
1810}
1811
43439139
RH
1812static char *tcg_get_arg_str(TCGContext *s, char *buf,
1813 int buf_size, TCGArg arg)
f8b2f202 1814{
43439139 1815 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1816}
1817
f48f3ede
BS
1818static const char * const cond_name[] =
1819{
0aed257f
RH
1820 [TCG_COND_NEVER] = "never",
1821 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1822 [TCG_COND_EQ] = "eq",
1823 [TCG_COND_NE] = "ne",
1824 [TCG_COND_LT] = "lt",
1825 [TCG_COND_GE] = "ge",
1826 [TCG_COND_LE] = "le",
1827 [TCG_COND_GT] = "gt",
1828 [TCG_COND_LTU] = "ltu",
1829 [TCG_COND_GEU] = "geu",
1830 [TCG_COND_LEU] = "leu",
1831 [TCG_COND_GTU] = "gtu"
1832};
1833
f713d6ad
RH
1834static const char * const ldst_name[] =
1835{
1836 [MO_UB] = "ub",
1837 [MO_SB] = "sb",
1838 [MO_LEUW] = "leuw",
1839 [MO_LESW] = "lesw",
1840 [MO_LEUL] = "leul",
1841 [MO_LESL] = "lesl",
fc313c64 1842 [MO_LEUQ] = "leq",
f713d6ad
RH
1843 [MO_BEUW] = "beuw",
1844 [MO_BESW] = "besw",
1845 [MO_BEUL] = "beul",
1846 [MO_BESL] = "besl",
fc313c64 1847 [MO_BEUQ] = "beq",
f713d6ad
RH
1848};
1849
1f00b27f 1850static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
52bf9771 1851#ifdef TARGET_ALIGNED_ONLY
1f00b27f
SS
1852 [MO_UNALN >> MO_ASHIFT] = "un+",
1853 [MO_ALIGN >> MO_ASHIFT] = "",
1854#else
1855 [MO_UNALN >> MO_ASHIFT] = "",
1856 [MO_ALIGN >> MO_ASHIFT] = "al+",
1857#endif
1858 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1859 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1860 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1861 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1862 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1863 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1864};
1865
587195bd
RH
1866static const char bswap_flag_name[][6] = {
1867 [TCG_BSWAP_IZ] = "iz",
1868 [TCG_BSWAP_OZ] = "oz",
1869 [TCG_BSWAP_OS] = "os",
1870 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
1871 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
1872};
1873
b016486e
RH
1874static inline bool tcg_regset_single(TCGRegSet d)
1875{
1876 return (d & (d - 1)) == 0;
1877}
1878
1879static inline TCGReg tcg_regset_first(TCGRegSet d)
1880{
1881 if (TCG_TARGET_NB_REGS <= 32) {
1882 return ctz32(d);
1883 } else {
1884 return ctz64(d);
1885 }
1886}
1887
b7a83ff8
RH
1888/* Return only the number of characters output -- no error return. */
1889#define ne_fprintf(...) \
1890 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
1891
1892static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
c896fe29 1893{
c896fe29 1894 char buf[128];
c45cb8bb 1895 TCGOp *op;
c45cb8bb 1896
15fa08f8 1897 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1898 int i, k, nb_oargs, nb_iargs, nb_cargs;
1899 const TCGOpDef *def;
c45cb8bb 1900 TCGOpcode c;
bdfb460e 1901 int col = 0;
c896fe29 1902
c45cb8bb 1903 c = op->opc;
c896fe29 1904 def = &tcg_op_defs[c];
c45cb8bb 1905
765b842a 1906 if (c == INDEX_op_insn_start) {
b016486e 1907 nb_oargs = 0;
b7a83ff8 1908 col += ne_fprintf(f, "\n ----");
9aef40ed
RH
1909
1910 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1911 target_ulong a;
7e4597d7 1912#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1913 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1914#else
efee3746 1915 a = op->args[i];
7e4597d7 1916#endif
b7a83ff8 1917 col += ne_fprintf(f, " " TARGET_FMT_lx, a);
eeacee4d 1918 }
7e4597d7 1919 } else if (c == INDEX_op_call) {
3e92aa34 1920 const TCGHelperInfo *info = tcg_call_info(op);
fa52e660 1921 void *func = tcg_call_func(op);
3e92aa34 1922
c896fe29 1923 /* variable number of arguments */
cd9090aa
RH
1924 nb_oargs = TCGOP_CALLO(op);
1925 nb_iargs = TCGOP_CALLI(op);
c896fe29 1926 nb_cargs = def->nb_cargs;
c896fe29 1927
b7a83ff8 1928 col += ne_fprintf(f, " %s ", def->name);
3e92aa34
RH
1929
1930 /*
1931 * Print the function name from TCGHelperInfo, if available.
1932 * Note that plugins have a template function for the info,
1933 * but the actual function pointer comes from the plugin.
1934 */
3e92aa34 1935 if (func == info->func) {
b7a83ff8 1936 col += ne_fprintf(f, "%s", info->name);
3e92aa34 1937 } else {
b7a83ff8 1938 col += ne_fprintf(f, "plugin(%p)", func);
3e92aa34
RH
1939 }
1940
b7a83ff8 1941 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
cf066674 1942 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1943 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1944 op->args[i]));
b03cce8e 1945 }
cf066674 1946 for (i = 0; i < nb_iargs; i++) {
efee3746 1947 TCGArg arg = op->args[nb_oargs + i];
39004a71 1948 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
b7a83ff8 1949 col += ne_fprintf(f, ",%s", t);
e8996ee0 1950 }
b03cce8e 1951 } else {
b7a83ff8 1952 col += ne_fprintf(f, " %s ", def->name);
c45cb8bb
RH
1953
1954 nb_oargs = def->nb_oargs;
1955 nb_iargs = def->nb_iargs;
1956 nb_cargs = def->nb_cargs;
1957
d2fd745f 1958 if (def->flags & TCG_OPF_VECTOR) {
b7a83ff8
RH
1959 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
1960 8 << TCGOP_VECE(op));
d2fd745f
RH
1961 }
1962
b03cce8e 1963 k = 0;
c45cb8bb 1964 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1965 const char *sep = k ? "," : "";
1966 col += ne_fprintf(f, "%s%s", sep,
1967 tcg_get_arg_str(s, buf, sizeof(buf),
1968 op->args[k++]));
b03cce8e 1969 }
c45cb8bb 1970 for (i = 0; i < nb_iargs; i++) {
b7a83ff8
RH
1971 const char *sep = k ? "," : "";
1972 col += ne_fprintf(f, "%s%s", sep,
1973 tcg_get_arg_str(s, buf, sizeof(buf),
1974 op->args[k++]));
b03cce8e 1975 }
be210acb
RH
1976 switch (c) {
1977 case INDEX_op_brcond_i32:
be210acb 1978 case INDEX_op_setcond_i32:
ffc5ea09 1979 case INDEX_op_movcond_i32:
ffc5ea09 1980 case INDEX_op_brcond2_i32:
be210acb 1981 case INDEX_op_setcond2_i32:
ffc5ea09 1982 case INDEX_op_brcond_i64:
be210acb 1983 case INDEX_op_setcond_i64:
ffc5ea09 1984 case INDEX_op_movcond_i64:
212be173 1985 case INDEX_op_cmp_vec:
f75da298 1986 case INDEX_op_cmpsel_vec:
efee3746
RH
1987 if (op->args[k] < ARRAY_SIZE(cond_name)
1988 && cond_name[op->args[k]]) {
b7a83ff8 1989 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
eeacee4d 1990 } else {
b7a83ff8 1991 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 1992 }
f48f3ede 1993 i = 1;
be210acb 1994 break;
f713d6ad
RH
1995 case INDEX_op_qemu_ld_i32:
1996 case INDEX_op_qemu_st_i32:
07ce0b05 1997 case INDEX_op_qemu_st8_i32:
f713d6ad
RH
1998 case INDEX_op_qemu_ld_i64:
1999 case INDEX_op_qemu_st_i64:
59227d5d 2000 {
9002ffcb 2001 MemOpIdx oi = op->args[k++];
14776ab5 2002 MemOp op = get_memop(oi);
59227d5d
RH
2003 unsigned ix = get_mmuidx(oi);
2004
59c4b7e8 2005 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
b7a83ff8 2006 col += ne_fprintf(f, ",$0x%x,%u", op, ix);
59c4b7e8 2007 } else {
1f00b27f
SS
2008 const char *s_al, *s_op;
2009 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 2010 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
b7a83ff8 2011 col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
2012 }
2013 i = 1;
f713d6ad 2014 }
f713d6ad 2015 break;
587195bd
RH
2016 case INDEX_op_bswap16_i32:
2017 case INDEX_op_bswap16_i64:
2018 case INDEX_op_bswap32_i32:
2019 case INDEX_op_bswap32_i64:
2020 case INDEX_op_bswap64_i64:
2021 {
2022 TCGArg flags = op->args[k];
2023 const char *name = NULL;
2024
2025 if (flags < ARRAY_SIZE(bswap_flag_name)) {
2026 name = bswap_flag_name[flags];
2027 }
2028 if (name) {
b7a83ff8 2029 col += ne_fprintf(f, ",%s", name);
587195bd 2030 } else {
b7a83ff8 2031 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
587195bd
RH
2032 }
2033 i = k = 1;
2034 }
2035 break;
be210acb 2036 default:
f48f3ede 2037 i = 0;
be210acb
RH
2038 break;
2039 }
51e3972c
RH
2040 switch (c) {
2041 case INDEX_op_set_label:
2042 case INDEX_op_br:
2043 case INDEX_op_brcond_i32:
2044 case INDEX_op_brcond_i64:
2045 case INDEX_op_brcond2_i32:
b7a83ff8
RH
2046 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2047 arg_label(op->args[k])->id);
51e3972c
RH
2048 i++, k++;
2049 break;
2050 default:
2051 break;
2052 }
2053 for (; i < nb_cargs; i++, k++) {
b7a83ff8
RH
2054 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2055 op->args[k]);
bdfb460e
RH
2056 }
2057 }
bdfb460e 2058
1894f69a 2059 if (have_prefs || op->life) {
b7a83ff8
RH
2060 for (; col < 40; ++col) {
2061 putc(' ', f);
bdfb460e 2062 }
1894f69a
RH
2063 }
2064
2065 if (op->life) {
2066 unsigned life = op->life;
bdfb460e
RH
2067
2068 if (life & (SYNC_ARG * 3)) {
b7a83ff8 2069 ne_fprintf(f, " sync:");
bdfb460e
RH
2070 for (i = 0; i < 2; ++i) {
2071 if (life & (SYNC_ARG << i)) {
b7a83ff8 2072 ne_fprintf(f, " %d", i);
bdfb460e
RH
2073 }
2074 }
2075 }
2076 life /= DEAD_ARG;
2077 if (life) {
b7a83ff8 2078 ne_fprintf(f, " dead:");
bdfb460e
RH
2079 for (i = 0; life; ++i, life >>= 1) {
2080 if (life & 1) {
b7a83ff8 2081 ne_fprintf(f, " %d", i);
bdfb460e
RH
2082 }
2083 }
b03cce8e 2084 }
c896fe29 2085 }
1894f69a
RH
2086
2087 if (have_prefs) {
2088 for (i = 0; i < nb_oargs; ++i) {
31fd884b 2089 TCGRegSet set = output_pref(op, i);
1894f69a
RH
2090
2091 if (i == 0) {
b7a83ff8 2092 ne_fprintf(f, " pref=");
1894f69a 2093 } else {
b7a83ff8 2094 ne_fprintf(f, ",");
1894f69a
RH
2095 }
2096 if (set == 0) {
b7a83ff8 2097 ne_fprintf(f, "none");
1894f69a 2098 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
b7a83ff8 2099 ne_fprintf(f, "all");
1894f69a
RH
2100#ifdef CONFIG_DEBUG_TCG
2101 } else if (tcg_regset_single(set)) {
2102 TCGReg reg = tcg_regset_first(set);
b7a83ff8 2103 ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
1894f69a
RH
2104#endif
2105 } else if (TCG_TARGET_NB_REGS <= 32) {
b7a83ff8 2106 ne_fprintf(f, "0x%x", (uint32_t)set);
1894f69a 2107 } else {
b7a83ff8 2108 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
1894f69a
RH
2109 }
2110 }
2111 }
2112
b7a83ff8 2113 putc('\n', f);
c896fe29
FB
2114 }
2115}
2116
2117/* we give more priority to constraints with less registers */
2118static int get_constraint_priority(const TCGOpDef *def, int k)
2119{
74a11790 2120 const TCGArgConstraint *arg_ct = &def->args_ct[k];
29f5e925 2121 int n = ctpop64(arg_ct->regs);
c896fe29 2122
29f5e925
RH
2123 /*
2124 * Sort constraints of a single register first, which includes output
2125 * aliases (which must exactly match the input already allocated).
2126 */
2127 if (n == 1 || arg_ct->oalias) {
2128 return INT_MAX;
2129 }
2130
2131 /*
2132 * Sort register pairs next, first then second immediately after.
2133 * Arbitrarily sort multiple pairs by the index of the first reg;
2134 * there shouldn't be many pairs.
2135 */
2136 switch (arg_ct->pair) {
2137 case 1:
2138 case 3:
2139 return (k + 1) * 2;
2140 case 2:
2141 return (arg_ct->pair_index + 1) * 2 - 1;
c896fe29 2142 }
29f5e925
RH
2143
2144 /* Finally, sort by decreasing register count. */
2145 assert(n > 1);
2146 return -n;
c896fe29
FB
2147}
2148
2149/* sort from highest priority to lowest */
2150static void sort_constraints(TCGOpDef *def, int start, int n)
2151{
66792f90
RH
2152 int i, j;
2153 TCGArgConstraint *a = def->args_ct;
c896fe29 2154
66792f90
RH
2155 for (i = 0; i < n; i++) {
2156 a[start + i].sort_index = start + i;
2157 }
2158 if (n <= 1) {
c896fe29 2159 return;
66792f90
RH
2160 }
2161 for (i = 0; i < n - 1; i++) {
2162 for (j = i + 1; j < n; j++) {
2163 int p1 = get_constraint_priority(def, a[start + i].sort_index);
2164 int p2 = get_constraint_priority(def, a[start + j].sort_index);
c896fe29 2165 if (p1 < p2) {
66792f90
RH
2166 int tmp = a[start + i].sort_index;
2167 a[start + i].sort_index = a[start + j].sort_index;
2168 a[start + j].sort_index = tmp;
c896fe29
FB
2169 }
2170 }
2171 }
2172}
2173
f69d277e 2174static void process_op_defs(TCGContext *s)
c896fe29 2175{
a9751609 2176 TCGOpcode op;
c896fe29 2177
f69d277e
RH
2178 for (op = 0; op < NB_OPS; op++) {
2179 TCGOpDef *def = &tcg_op_defs[op];
2180 const TCGTargetOpDef *tdefs;
29f5e925
RH
2181 bool saw_alias_pair = false;
2182 int i, o, i2, o2, nb_args;
f69d277e
RH
2183
2184 if (def->flags & TCG_OPF_NOT_PRESENT) {
2185 continue;
2186 }
2187
c896fe29 2188 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
2189 if (nb_args == 0) {
2190 continue;
2191 }
2192
4c22e840
RH
2193 /*
2194 * Macro magic should make it impossible, but double-check that
2195 * the array index is in range. Since the signness of an enum
2196 * is implementation defined, force the result to unsigned.
2197 */
2198 unsigned con_set = tcg_target_op_def(op);
2199 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2200 tdefs = &constraint_sets[con_set];
f69d277e
RH
2201
2202 for (i = 0; i < nb_args; i++) {
2203 const char *ct_str = tdefs->args_ct_str[i];
8940ea0d
PMD
2204 bool input_p = i >= def->nb_oargs;
2205
f69d277e 2206 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 2207 tcg_debug_assert(ct_str != NULL);
f69d277e 2208
8940ea0d
PMD
2209 switch (*ct_str) {
2210 case '0' ... '9':
2211 o = *ct_str - '0';
2212 tcg_debug_assert(input_p);
2213 tcg_debug_assert(o < def->nb_oargs);
2214 tcg_debug_assert(def->args_ct[o].regs != 0);
2215 tcg_debug_assert(!def->args_ct[o].oalias);
2216 def->args_ct[i] = def->args_ct[o];
2217 /* The output sets oalias. */
2218 def->args_ct[o].oalias = 1;
2219 def->args_ct[o].alias_index = i;
2220 /* The input sets ialias. */
2221 def->args_ct[i].ialias = 1;
2222 def->args_ct[i].alias_index = o;
29f5e925
RH
2223 if (def->args_ct[i].pair) {
2224 saw_alias_pair = true;
2225 }
8940ea0d
PMD
2226 tcg_debug_assert(ct_str[1] == '\0');
2227 continue;
2228
2229 case '&':
2230 tcg_debug_assert(!input_p);
2231 def->args_ct[i].newreg = true;
2232 ct_str++;
2233 break;
29f5e925
RH
2234
2235 case 'p': /* plus */
2236 /* Allocate to the register after the previous. */
2237 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2238 o = i - 1;
2239 tcg_debug_assert(!def->args_ct[o].pair);
2240 tcg_debug_assert(!def->args_ct[o].ct);
2241 def->args_ct[i] = (TCGArgConstraint){
2242 .pair = 2,
2243 .pair_index = o,
2244 .regs = def->args_ct[o].regs << 1,
2245 };
2246 def->args_ct[o].pair = 1;
2247 def->args_ct[o].pair_index = i;
2248 tcg_debug_assert(ct_str[1] == '\0');
2249 continue;
2250
2251 case 'm': /* minus */
2252 /* Allocate to the register before the previous. */
2253 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2254 o = i - 1;
2255 tcg_debug_assert(!def->args_ct[o].pair);
2256 tcg_debug_assert(!def->args_ct[o].ct);
2257 def->args_ct[i] = (TCGArgConstraint){
2258 .pair = 1,
2259 .pair_index = o,
2260 .regs = def->args_ct[o].regs >> 1,
2261 };
2262 def->args_ct[o].pair = 2;
2263 def->args_ct[o].pair_index = i;
2264 tcg_debug_assert(ct_str[1] == '\0');
2265 continue;
8940ea0d
PMD
2266 }
2267
2268 do {
2269 switch (*ct_str) {
17280ff4
RH
2270 case 'i':
2271 def->args_ct[i].ct |= TCG_CT_CONST;
17280ff4 2272 break;
358b4923 2273
358b4923
RH
2274 /* Include all of the target-specific constraints. */
2275
2276#undef CONST
2277#define CONST(CASE, MASK) \
8940ea0d 2278 case CASE: def->args_ct[i].ct |= MASK; break;
358b4923 2279#define REGS(CASE, MASK) \
8940ea0d 2280 case CASE: def->args_ct[i].regs |= MASK; break;
358b4923
RH
2281
2282#include "tcg-target-con-str.h"
2283
2284#undef REGS
2285#undef CONST
17280ff4 2286 default:
8940ea0d
PMD
2287 case '0' ... '9':
2288 case '&':
29f5e925
RH
2289 case 'p':
2290 case 'm':
17280ff4 2291 /* Typo in TCGTargetOpDef constraint. */
358b4923 2292 g_assert_not_reached();
c896fe29 2293 }
8940ea0d 2294 } while (*++ct_str != '\0');
c896fe29
FB
2295 }
2296
c68aaa18 2297 /* TCGTargetOpDef entry with too much information? */
eabb7b91 2298 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 2299
29f5e925
RH
2300 /*
2301 * Fix up output pairs that are aliased with inputs.
2302 * When we created the alias, we copied pair from the output.
2303 * There are three cases:
2304 * (1a) Pairs of inputs alias pairs of outputs.
2305 * (1b) One input aliases the first of a pair of outputs.
2306 * (2) One input aliases the second of a pair of outputs.
2307 *
2308 * Case 1a is handled by making sure that the pair_index'es are
2309 * properly updated so that they appear the same as a pair of inputs.
2310 *
2311 * Case 1b is handled by setting the pair_index of the input to
2312 * itself, simply so it doesn't point to an unrelated argument.
2313 * Since we don't encounter the "second" during the input allocation
2314 * phase, nothing happens with the second half of the input pair.
2315 *
2316 * Case 2 is handled by setting the second input to pair=3, the
2317 * first output to pair=3, and the pair_index'es to match.
2318 */
2319 if (saw_alias_pair) {
2320 for (i = def->nb_oargs; i < nb_args; i++) {
2321 /*
2322 * Since [0-9pm] must be alone in the constraint string,
2323 * the only way they can both be set is if the pair comes
2324 * from the output alias.
2325 */
2326 if (!def->args_ct[i].ialias) {
2327 continue;
2328 }
2329 switch (def->args_ct[i].pair) {
2330 case 0:
2331 break;
2332 case 1:
2333 o = def->args_ct[i].alias_index;
2334 o2 = def->args_ct[o].pair_index;
2335 tcg_debug_assert(def->args_ct[o].pair == 1);
2336 tcg_debug_assert(def->args_ct[o2].pair == 2);
2337 if (def->args_ct[o2].oalias) {
2338 /* Case 1a */
2339 i2 = def->args_ct[o2].alias_index;
2340 tcg_debug_assert(def->args_ct[i2].pair == 2);
2341 def->args_ct[i2].pair_index = i;
2342 def->args_ct[i].pair_index = i2;
2343 } else {
2344 /* Case 1b */
2345 def->args_ct[i].pair_index = i;
2346 }
2347 break;
2348 case 2:
2349 o = def->args_ct[i].alias_index;
2350 o2 = def->args_ct[o].pair_index;
2351 tcg_debug_assert(def->args_ct[o].pair == 2);
2352 tcg_debug_assert(def->args_ct[o2].pair == 1);
2353 if (def->args_ct[o2].oalias) {
2354 /* Case 1a */
2355 i2 = def->args_ct[o2].alias_index;
2356 tcg_debug_assert(def->args_ct[i2].pair == 1);
2357 def->args_ct[i2].pair_index = i;
2358 def->args_ct[i].pair_index = i2;
2359 } else {
2360 /* Case 2 */
2361 def->args_ct[i].pair = 3;
2362 def->args_ct[o2].pair = 3;
2363 def->args_ct[i].pair_index = o2;
2364 def->args_ct[o2].pair_index = i;
2365 }
2366 break;
2367 default:
2368 g_assert_not_reached();
2369 }
2370 }
2371 }
2372
c896fe29
FB
2373 /* sort the constraints (XXX: this is just an heuristic) */
2374 sort_constraints(def, 0, def->nb_oargs);
2375 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 2376 }
c896fe29
FB
2377}
2378
0c627cdc
RH
2379void tcg_op_remove(TCGContext *s, TCGOp *op)
2380{
d88a117e
RH
2381 TCGLabel *label;
2382
2383 switch (op->opc) {
2384 case INDEX_op_br:
2385 label = arg_label(op->args[0]);
2386 label->refs--;
2387 break;
2388 case INDEX_op_brcond_i32:
2389 case INDEX_op_brcond_i64:
2390 label = arg_label(op->args[3]);
2391 label->refs--;
2392 break;
2393 case INDEX_op_brcond2_i32:
2394 label = arg_label(op->args[5]);
2395 label->refs--;
2396 break;
2397 default:
2398 break;
2399 }
2400
15fa08f8
RH
2401 QTAILQ_REMOVE(&s->ops, op, link);
2402 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
abebf925 2403 s->nb_ops--;
0c627cdc
RH
2404
2405#ifdef CONFIG_PROFILER
d73415a3 2406 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
2407#endif
2408}
2409
a80cdd31
RH
2410void tcg_remove_ops_after(TCGOp *op)
2411{
2412 TCGContext *s = tcg_ctx;
2413
2414 while (true) {
2415 TCGOp *last = tcg_last_op();
2416 if (last == op) {
2417 return;
2418 }
2419 tcg_op_remove(s, last);
2420 }
2421}
2422
d4478943 2423static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
5a18407f 2424{
15fa08f8 2425 TCGContext *s = tcg_ctx;
cb10bc63
RH
2426 TCGOp *op = NULL;
2427
2428 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
2429 QTAILQ_FOREACH(op, &s->free_ops, link) {
2430 if (nargs <= op->nargs) {
2431 QTAILQ_REMOVE(&s->free_ops, op, link);
2432 nargs = op->nargs;
2433 goto found;
2434 }
2435 }
15fa08f8 2436 }
cb10bc63
RH
2437
2438 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
2439 nargs = MAX(4, nargs);
2440 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
2441
2442 found:
15fa08f8
RH
2443 memset(op, 0, offsetof(TCGOp, link));
2444 op->opc = opc;
cb10bc63
RH
2445 op->nargs = nargs;
2446
2447 /* Check for bitfield overflow. */
2448 tcg_debug_assert(op->nargs == nargs);
5a18407f 2449
cb10bc63 2450 s->nb_ops++;
15fa08f8
RH
2451 return op;
2452}
2453
d4478943 2454TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
15fa08f8 2455{
d4478943 2456 TCGOp *op = tcg_op_alloc(opc, nargs);
15fa08f8
RH
2457 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2458 return op;
2459}
5a18407f 2460
d4478943
PMD
2461TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2462 TCGOpcode opc, unsigned nargs)
15fa08f8 2463{
d4478943 2464 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2465 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2466 return new_op;
2467}
2468
d4478943
PMD
2469TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2470 TCGOpcode opc, unsigned nargs)
5a18407f 2471{
d4478943 2472 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2473 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2474 return new_op;
2475}
2476
b4fc67c7
RH
2477/* Reachable analysis : remove unreachable code. */
2478static void reachable_code_pass(TCGContext *s)
2479{
2480 TCGOp *op, *op_next;
2481 bool dead = false;
2482
2483 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2484 bool remove = dead;
2485 TCGLabel *label;
b4fc67c7
RH
2486
2487 switch (op->opc) {
2488 case INDEX_op_set_label:
2489 label = arg_label(op->args[0]);
2490 if (label->refs == 0) {
2491 /*
2492 * While there is an occasional backward branch, virtually
2493 * all branches generated by the translators are forward.
2494 * Which means that generally we will have already removed
2495 * all references to the label that will be, and there is
2496 * little to be gained by iterating.
2497 */
2498 remove = true;
2499 } else {
2500 /* Once we see a label, insns become live again. */
2501 dead = false;
2502 remove = false;
2503
2504 /*
2505 * Optimization can fold conditional branches to unconditional.
2506 * If we find a label with one reference which is preceded by
2507 * an unconditional branch to it, remove both. This needed to
2508 * wait until the dead code in between them was removed.
2509 */
2510 if (label->refs == 1) {
eae3eb3e 2511 TCGOp *op_prev = QTAILQ_PREV(op, link);
b4fc67c7
RH
2512 if (op_prev->opc == INDEX_op_br &&
2513 label == arg_label(op_prev->args[0])) {
2514 tcg_op_remove(s, op_prev);
2515 remove = true;
2516 }
2517 }
2518 }
2519 break;
2520
2521 case INDEX_op_br:
2522 case INDEX_op_exit_tb:
2523 case INDEX_op_goto_ptr:
2524 /* Unconditional branches; everything following is dead. */
2525 dead = true;
2526 break;
2527
2528 case INDEX_op_call:
2529 /* Notice noreturn helper calls, raising exceptions. */
90163900 2530 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
b4fc67c7
RH
2531 dead = true;
2532 }
2533 break;
2534
2535 case INDEX_op_insn_start:
2536 /* Never remove -- we need to keep these for unwind. */
2537 remove = false;
2538 break;
2539
2540 default:
2541 break;
2542 }
2543
2544 if (remove) {
2545 tcg_op_remove(s, op);
2546 }
2547 }
2548}
2549
c70fbf0a
RH
2550#define TS_DEAD 1
2551#define TS_MEM 2
2552
5a18407f
RH
2553#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2554#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2555
25f49c5f
RH
2556/* For liveness_pass_1, the register preferences for a given temp. */
2557static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2558{
2559 return ts->state_ptr;
2560}
2561
2562/* For liveness_pass_1, reset the preferences for a given temp to the
2563 * maximal regset for its type.
2564 */
2565static inline void la_reset_pref(TCGTemp *ts)
2566{
2567 *la_temp_pref(ts)
2568 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2569}
2570
9c43b68d
AJ
2571/* liveness analysis: end of function: all temps are dead, and globals
2572 should be in memory. */
2616c808 2573static void la_func_end(TCGContext *s, int ng, int nt)
c896fe29 2574{
b83eabea
RH
2575 int i;
2576
2577 for (i = 0; i < ng; ++i) {
2578 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f 2579 la_reset_pref(&s->temps[i]);
b83eabea
RH
2580 }
2581 for (i = ng; i < nt; ++i) {
2582 s->temps[i].state = TS_DEAD;
25f49c5f 2583 la_reset_pref(&s->temps[i]);
b83eabea 2584 }
c896fe29
FB
2585}
2586
9c43b68d
AJ
2587/* liveness analysis: end of basic block: all temps are dead, globals
2588 and local temps should be in memory. */
2616c808 2589static void la_bb_end(TCGContext *s, int ng, int nt)
641d5fbe 2590{
b83eabea 2591 int i;
641d5fbe 2592
ee17db83
RH
2593 for (i = 0; i < nt; ++i) {
2594 TCGTemp *ts = &s->temps[i];
2595 int state;
2596
2597 switch (ts->kind) {
2598 case TEMP_FIXED:
2599 case TEMP_GLOBAL:
2600 case TEMP_LOCAL:
2601 state = TS_DEAD | TS_MEM;
2602 break;
2603 case TEMP_NORMAL:
c7482438 2604 case TEMP_EBB:
c0522136 2605 case TEMP_CONST:
ee17db83
RH
2606 state = TS_DEAD;
2607 break;
2608 default:
2609 g_assert_not_reached();
2610 }
2611 ts->state = state;
2612 la_reset_pref(ts);
641d5fbe
FB
2613 }
2614}
2615
f65a061c
RH
2616/* liveness analysis: sync globals back to memory. */
2617static void la_global_sync(TCGContext *s, int ng)
2618{
2619 int i;
2620
2621 for (i = 0; i < ng; ++i) {
25f49c5f
RH
2622 int state = s->temps[i].state;
2623 s->temps[i].state = state | TS_MEM;
2624 if (state == TS_DEAD) {
2625 /* If the global was previously dead, reset prefs. */
2626 la_reset_pref(&s->temps[i]);
2627 }
f65a061c
RH
2628 }
2629}
2630
b4cb76e6 2631/*
c7482438
RH
2632 * liveness analysis: conditional branch: all temps are dead unless
2633 * explicitly live-across-conditional-branch, globals and local temps
2634 * should be synced.
b4cb76e6
RH
2635 */
2636static void la_bb_sync(TCGContext *s, int ng, int nt)
2637{
2638 la_global_sync(s, ng);
2639
2640 for (int i = ng; i < nt; ++i) {
c0522136
RH
2641 TCGTemp *ts = &s->temps[i];
2642 int state;
2643
2644 switch (ts->kind) {
2645 case TEMP_LOCAL:
2646 state = ts->state;
2647 ts->state = state | TS_MEM;
b4cb76e6
RH
2648 if (state != TS_DEAD) {
2649 continue;
2650 }
c0522136
RH
2651 break;
2652 case TEMP_NORMAL:
b4cb76e6 2653 s->temps[i].state = TS_DEAD;
c0522136 2654 break;
c7482438 2655 case TEMP_EBB:
c0522136
RH
2656 case TEMP_CONST:
2657 continue;
2658 default:
2659 g_assert_not_reached();
b4cb76e6
RH
2660 }
2661 la_reset_pref(&s->temps[i]);
2662 }
2663}
2664
f65a061c
RH
2665/* liveness analysis: sync globals back to memory and kill. */
2666static void la_global_kill(TCGContext *s, int ng)
2667{
2668 int i;
2669
2670 for (i = 0; i < ng; i++) {
2671 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f
RH
2672 la_reset_pref(&s->temps[i]);
2673 }
2674}
2675
2676/* liveness analysis: note live globals crossing calls. */
2677static void la_cross_call(TCGContext *s, int nt)
2678{
2679 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2680 int i;
2681
2682 for (i = 0; i < nt; i++) {
2683 TCGTemp *ts = &s->temps[i];
2684 if (!(ts->state & TS_DEAD)) {
2685 TCGRegSet *pset = la_temp_pref(ts);
2686 TCGRegSet set = *pset;
2687
2688 set &= mask;
2689 /* If the combination is not possible, restart. */
2690 if (set == 0) {
2691 set = tcg_target_available_regs[ts->type] & mask;
2692 }
2693 *pset = set;
2694 }
f65a061c
RH
2695 }
2696}
2697
a1b3c48d 2698/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2699 given input arguments is dead. Instructions updating dead
2700 temporaries are removed. */
b83eabea 2701static void liveness_pass_1(TCGContext *s)
c896fe29 2702{
c70fbf0a 2703 int nb_globals = s->nb_globals;
2616c808 2704 int nb_temps = s->nb_temps;
15fa08f8 2705 TCGOp *op, *op_prev;
25f49c5f
RH
2706 TCGRegSet *prefs;
2707 int i;
2708
2709 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2710 for (i = 0; i < nb_temps; ++i) {
2711 s->temps[i].state_ptr = prefs + i;
2712 }
a1b3c48d 2713
ae36a246 2714 /* ??? Should be redundant with the exit_tb that ends the TB. */
2616c808 2715 la_func_end(s, nb_globals, nb_temps);
c896fe29 2716
eae3eb3e 2717 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
25f49c5f 2718 int nb_iargs, nb_oargs;
c45cb8bb
RH
2719 TCGOpcode opc_new, opc_new2;
2720 bool have_opc_new2;
a1b3c48d 2721 TCGLifeData arg_life = 0;
25f49c5f 2722 TCGTemp *ts;
c45cb8bb
RH
2723 TCGOpcode opc = op->opc;
2724 const TCGOpDef *def = &tcg_op_defs[opc];
2725
c45cb8bb 2726 switch (opc) {
c896fe29 2727 case INDEX_op_call:
c6e113f5 2728 {
39004a71
RH
2729 const TCGHelperInfo *info = tcg_call_info(op);
2730 int call_flags = tcg_call_flags(op);
c896fe29 2731
cd9090aa
RH
2732 nb_oargs = TCGOP_CALLO(op);
2733 nb_iargs = TCGOP_CALLI(op);
c6e113f5 2734
c45cb8bb 2735 /* pure functions can be removed if their result is unused */
78505279 2736 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2737 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2738 ts = arg_temp(op->args[i]);
2739 if (ts->state != TS_DEAD) {
c6e113f5 2740 goto do_not_remove_call;
9c43b68d 2741 }
c6e113f5 2742 }
c45cb8bb 2743 goto do_remove;
152c35aa
RH
2744 }
2745 do_not_remove_call:
c896fe29 2746
25f49c5f 2747 /* Output args are dead. */
152c35aa 2748 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2749 ts = arg_temp(op->args[i]);
2750 if (ts->state & TS_DEAD) {
152c35aa
RH
2751 arg_life |= DEAD_ARG << i;
2752 }
25f49c5f 2753 if (ts->state & TS_MEM) {
152c35aa 2754 arg_life |= SYNC_ARG << i;
c6e113f5 2755 }
25f49c5f
RH
2756 ts->state = TS_DEAD;
2757 la_reset_pref(ts);
152c35aa 2758 }
78505279 2759
31fd884b
RH
2760 /* Not used -- it will be tcg_target_call_oarg_reg(). */
2761 memset(op->output_pref, 0, sizeof(op->output_pref));
2762
152c35aa
RH
2763 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2764 TCG_CALL_NO_READ_GLOBALS))) {
f65a061c 2765 la_global_kill(s, nb_globals);
152c35aa 2766 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
f65a061c 2767 la_global_sync(s, nb_globals);
152c35aa 2768 }
b9c18f56 2769
25f49c5f 2770 /* Record arguments that die in this helper. */
152c35aa 2771 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
25f49c5f 2772 ts = arg_temp(op->args[i]);
39004a71 2773 if (ts->state & TS_DEAD) {
152c35aa 2774 arg_life |= DEAD_ARG << i;
c6e113f5 2775 }
152c35aa 2776 }
25f49c5f
RH
2777
2778 /* For all live registers, remove call-clobbered prefs. */
2779 la_cross_call(s, nb_temps);
2780
39004a71
RH
2781 /*
2782 * Input arguments are live for preceding opcodes.
2783 *
2784 * For those arguments that die, and will be allocated in
2785 * registers, clear the register set for that arg, to be
2786 * filled in below. For args that will be on the stack,
2787 * reset to any available reg. Process arguments in reverse
2788 * order so that if a temp is used more than once, the stack
2789 * reset to max happens before the register reset to 0.
2790 */
2791 for (i = nb_iargs - 1; i >= 0; i--) {
2792 const TCGCallArgumentLoc *loc = &info->in[i];
2793 ts = arg_temp(op->args[nb_oargs + i]);
25f49c5f 2794
39004a71
RH
2795 if (ts->state & TS_DEAD) {
2796 switch (loc->kind) {
2797 case TCG_CALL_ARG_NORMAL:
2798 case TCG_CALL_ARG_EXTEND_U:
2799 case TCG_CALL_ARG_EXTEND_S:
2800 if (REG_P(loc)) {
2801 *la_temp_pref(ts) = 0;
2802 break;
2803 }
2804 /* fall through */
2805 default:
2806 *la_temp_pref(ts) =
2807 tcg_target_available_regs[ts->type];
2808 break;
2809 }
25f49c5f
RH
2810 ts->state &= ~TS_DEAD;
2811 }
2812 }
2813
39004a71
RH
2814 /*
2815 * For each input argument, add its input register to prefs.
2816 * If a temp is used once, this produces a single set bit;
2817 * if a temp is used multiple times, this produces a set.
2818 */
2819 for (i = 0; i < nb_iargs; i++) {
2820 const TCGCallArgumentLoc *loc = &info->in[i];
2821 ts = arg_temp(op->args[nb_oargs + i]);
2822
2823 switch (loc->kind) {
2824 case TCG_CALL_ARG_NORMAL:
2825 case TCG_CALL_ARG_EXTEND_U:
2826 case TCG_CALL_ARG_EXTEND_S:
2827 if (REG_P(loc)) {
2828 tcg_regset_set_reg(*la_temp_pref(ts),
2829 tcg_target_call_iarg_regs[loc->arg_slot]);
2830 }
2831 break;
2832 default:
2833 break;
c19f47bf 2834 }
c896fe29 2835 }
c896fe29 2836 }
c896fe29 2837 break;
765b842a 2838 case INDEX_op_insn_start:
c896fe29 2839 break;
5ff9d6a4 2840 case INDEX_op_discard:
5ff9d6a4 2841 /* mark the temporary as dead */
25f49c5f
RH
2842 ts = arg_temp(op->args[0]);
2843 ts->state = TS_DEAD;
2844 la_reset_pref(ts);
5ff9d6a4 2845 break;
1305c451
RH
2846
2847 case INDEX_op_add2_i32:
c45cb8bb 2848 opc_new = INDEX_op_add_i32;
f1fae40c 2849 goto do_addsub2;
1305c451 2850 case INDEX_op_sub2_i32:
c45cb8bb 2851 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2852 goto do_addsub2;
2853 case INDEX_op_add2_i64:
c45cb8bb 2854 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2855 goto do_addsub2;
2856 case INDEX_op_sub2_i64:
c45cb8bb 2857 opc_new = INDEX_op_sub_i64;
f1fae40c 2858 do_addsub2:
1305c451
RH
2859 nb_iargs = 4;
2860 nb_oargs = 2;
2861 /* Test if the high part of the operation is dead, but not
2862 the low part. The result can be optimized to a simple
2863 add or sub. This happens often for x86_64 guest when the
2864 cpu mode is set to 32 bit. */
b83eabea
RH
2865 if (arg_temp(op->args[1])->state == TS_DEAD) {
2866 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2867 goto do_remove;
2868 }
c45cb8bb
RH
2869 /* Replace the opcode and adjust the args in place,
2870 leaving 3 unused args at the end. */
2871 op->opc = opc = opc_new;
efee3746
RH
2872 op->args[1] = op->args[2];
2873 op->args[2] = op->args[4];
1305c451
RH
2874 /* Fall through and mark the single-word operation live. */
2875 nb_iargs = 2;
2876 nb_oargs = 1;
2877 }
2878 goto do_not_remove;
2879
1414968a 2880 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2881 opc_new = INDEX_op_mul_i32;
2882 opc_new2 = INDEX_op_muluh_i32;
2883 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2884 goto do_mul2;
f1fae40c 2885 case INDEX_op_muls2_i32:
c45cb8bb
RH
2886 opc_new = INDEX_op_mul_i32;
2887 opc_new2 = INDEX_op_mulsh_i32;
2888 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2889 goto do_mul2;
2890 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2891 opc_new = INDEX_op_mul_i64;
2892 opc_new2 = INDEX_op_muluh_i64;
2893 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2894 goto do_mul2;
f1fae40c 2895 case INDEX_op_muls2_i64:
c45cb8bb
RH
2896 opc_new = INDEX_op_mul_i64;
2897 opc_new2 = INDEX_op_mulsh_i64;
2898 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2899 goto do_mul2;
f1fae40c 2900 do_mul2:
1414968a
RH
2901 nb_iargs = 2;
2902 nb_oargs = 2;
b83eabea
RH
2903 if (arg_temp(op->args[1])->state == TS_DEAD) {
2904 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2905 /* Both parts of the operation are dead. */
1414968a
RH
2906 goto do_remove;
2907 }
03271524 2908 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2909 op->opc = opc = opc_new;
efee3746
RH
2910 op->args[1] = op->args[2];
2911 op->args[2] = op->args[3];
b83eabea 2912 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2913 /* The low part of the operation is dead; generate the high. */
2914 op->opc = opc = opc_new2;
efee3746
RH
2915 op->args[0] = op->args[1];
2916 op->args[1] = op->args[2];
2917 op->args[2] = op->args[3];
03271524
RH
2918 } else {
2919 goto do_not_remove;
1414968a 2920 }
03271524
RH
2921 /* Mark the single-word operation live. */
2922 nb_oargs = 1;
1414968a
RH
2923 goto do_not_remove;
2924
c896fe29 2925 default:
1305c451 2926 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2927 nb_iargs = def->nb_iargs;
2928 nb_oargs = def->nb_oargs;
c896fe29 2929
49516bc0
AJ
2930 /* Test if the operation can be removed because all
2931 its outputs are dead. We assume that nb_oargs == 0
2932 implies side effects */
2933 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2934 for (i = 0; i < nb_oargs; i++) {
b83eabea 2935 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2936 goto do_not_remove;
9c43b68d 2937 }
49516bc0 2938 }
152c35aa
RH
2939 goto do_remove;
2940 }
2941 goto do_not_remove;
49516bc0 2942
152c35aa
RH
2943 do_remove:
2944 tcg_op_remove(s, op);
2945 break;
2946
2947 do_not_remove:
152c35aa 2948 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2949 ts = arg_temp(op->args[i]);
2950
2951 /* Remember the preference of the uses that followed. */
31fd884b
RH
2952 if (i < ARRAY_SIZE(op->output_pref)) {
2953 op->output_pref[i] = *la_temp_pref(ts);
2954 }
25f49c5f
RH
2955
2956 /* Output args are dead. */
2957 if (ts->state & TS_DEAD) {
152c35aa 2958 arg_life |= DEAD_ARG << i;
49516bc0 2959 }
25f49c5f 2960 if (ts->state & TS_MEM) {
152c35aa
RH
2961 arg_life |= SYNC_ARG << i;
2962 }
25f49c5f
RH
2963 ts->state = TS_DEAD;
2964 la_reset_pref(ts);
152c35aa 2965 }
49516bc0 2966
25f49c5f 2967 /* If end of basic block, update. */
ae36a246
RH
2968 if (def->flags & TCG_OPF_BB_EXIT) {
2969 la_func_end(s, nb_globals, nb_temps);
b4cb76e6
RH
2970 } else if (def->flags & TCG_OPF_COND_BRANCH) {
2971 la_bb_sync(s, nb_globals, nb_temps);
ae36a246 2972 } else if (def->flags & TCG_OPF_BB_END) {
2616c808 2973 la_bb_end(s, nb_globals, nb_temps);
152c35aa 2974 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
f65a061c 2975 la_global_sync(s, nb_globals);
25f49c5f
RH
2976 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2977 la_cross_call(s, nb_temps);
2978 }
152c35aa
RH
2979 }
2980
25f49c5f 2981 /* Record arguments that die in this opcode. */
152c35aa 2982 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2983 ts = arg_temp(op->args[i]);
2984 if (ts->state & TS_DEAD) {
152c35aa 2985 arg_life |= DEAD_ARG << i;
c896fe29 2986 }
c896fe29 2987 }
25f49c5f
RH
2988
2989 /* Input arguments are live for preceding opcodes. */
152c35aa 2990 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2991 ts = arg_temp(op->args[i]);
2992 if (ts->state & TS_DEAD) {
2993 /* For operands that were dead, initially allow
2994 all regs for the type. */
2995 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2996 ts->state &= ~TS_DEAD;
2997 }
2998 }
2999
3000 /* Incorporate constraints for this operand. */
3001 switch (opc) {
3002 case INDEX_op_mov_i32:
3003 case INDEX_op_mov_i64:
3004 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3005 have proper constraints. That said, special case
3006 moves to propagate preferences backward. */
3007 if (IS_DEAD_ARG(1)) {
3008 *la_temp_pref(arg_temp(op->args[0]))
3009 = *la_temp_pref(arg_temp(op->args[1]));
3010 }
3011 break;
3012
3013 default:
3014 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3015 const TCGArgConstraint *ct = &def->args_ct[i];
3016 TCGRegSet set, *pset;
3017
3018 ts = arg_temp(op->args[i]);
3019 pset = la_temp_pref(ts);
3020 set = *pset;
3021
9be0d080 3022 set &= ct->regs;
bc2b17e6 3023 if (ct->ialias) {
31fd884b 3024 set &= output_pref(op, ct->alias_index);
25f49c5f
RH
3025 }
3026 /* If the combination is not possible, restart. */
3027 if (set == 0) {
9be0d080 3028 set = ct->regs;
25f49c5f
RH
3029 }
3030 *pset = set;
3031 }
3032 break;
152c35aa 3033 }
c896fe29
FB
3034 break;
3035 }
bee158cb 3036 op->life = arg_life;
1ff0a2c5 3037 }
c896fe29 3038}
c896fe29 3039
5a18407f 3040/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 3041static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
3042{
3043 int nb_globals = s->nb_globals;
15fa08f8 3044 int nb_temps, i;
5a18407f 3045 bool changes = false;
15fa08f8 3046 TCGOp *op, *op_next;
5a18407f 3047
5a18407f
RH
3048 /* Create a temporary for each indirect global. */
3049 for (i = 0; i < nb_globals; ++i) {
3050 TCGTemp *its = &s->temps[i];
3051 if (its->indirect_reg) {
3052 TCGTemp *dts = tcg_temp_alloc(s);
3053 dts->type = its->type;
3054 dts->base_type = its->base_type;
c7482438 3055 dts->kind = TEMP_EBB;
b83eabea
RH
3056 its->state_ptr = dts;
3057 } else {
3058 its->state_ptr = NULL;
5a18407f 3059 }
b83eabea
RH
3060 /* All globals begin dead. */
3061 its->state = TS_DEAD;
3062 }
3063 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3064 TCGTemp *its = &s->temps[i];
3065 its->state_ptr = NULL;
3066 its->state = TS_DEAD;
5a18407f 3067 }
5a18407f 3068
15fa08f8 3069 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
3070 TCGOpcode opc = op->opc;
3071 const TCGOpDef *def = &tcg_op_defs[opc];
3072 TCGLifeData arg_life = op->life;
3073 int nb_iargs, nb_oargs, call_flags;
b83eabea 3074 TCGTemp *arg_ts, *dir_ts;
5a18407f 3075
5a18407f 3076 if (opc == INDEX_op_call) {
cd9090aa
RH
3077 nb_oargs = TCGOP_CALLO(op);
3078 nb_iargs = TCGOP_CALLI(op);
90163900 3079 call_flags = tcg_call_flags(op);
5a18407f
RH
3080 } else {
3081 nb_iargs = def->nb_iargs;
3082 nb_oargs = def->nb_oargs;
3083
3084 /* Set flags similar to how calls require. */
b4cb76e6
RH
3085 if (def->flags & TCG_OPF_COND_BRANCH) {
3086 /* Like reading globals: sync_globals */
3087 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3088 } else if (def->flags & TCG_OPF_BB_END) {
5a18407f
RH
3089 /* Like writing globals: save_globals */
3090 call_flags = 0;
3091 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3092 /* Like reading globals: sync_globals */
3093 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3094 } else {
3095 /* No effect on globals. */
3096 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3097 TCG_CALL_NO_WRITE_GLOBALS);
3098 }
3099 }
3100
3101 /* Make sure that input arguments are available. */
3102 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3103 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3104 dir_ts = arg_ts->state_ptr;
3105 if (dir_ts && arg_ts->state == TS_DEAD) {
3106 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3107 ? INDEX_op_ld_i32
3108 : INDEX_op_ld_i64);
3109 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3110
3111 lop->args[0] = temp_arg(dir_ts);
3112 lop->args[1] = temp_arg(arg_ts->mem_base);
3113 lop->args[2] = arg_ts->mem_offset;
3114
3115 /* Loaded, but synced with memory. */
3116 arg_ts->state = TS_MEM;
5a18407f
RH
3117 }
3118 }
3119
3120 /* Perform input replacement, and mark inputs that became dead.
3121 No action is required except keeping temp_state up to date
3122 so that we reload when needed. */
3123 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3124 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3125 dir_ts = arg_ts->state_ptr;
3126 if (dir_ts) {
3127 op->args[i] = temp_arg(dir_ts);
3128 changes = true;
3129 if (IS_DEAD_ARG(i)) {
3130 arg_ts->state = TS_DEAD;
5a18407f
RH
3131 }
3132 }
3133 }
3134
3135 /* Liveness analysis should ensure that the following are
3136 all correct, for call sites and basic block end points. */
3137 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
3138 /* Nothing to do */
3139 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
3140 for (i = 0; i < nb_globals; ++i) {
3141 /* Liveness should see that globals are synced back,
3142 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
3143 arg_ts = &s->temps[i];
3144 tcg_debug_assert(arg_ts->state_ptr == 0
3145 || arg_ts->state != 0);
5a18407f
RH
3146 }
3147 } else {
3148 for (i = 0; i < nb_globals; ++i) {
3149 /* Liveness should see that globals are saved back,
3150 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
3151 arg_ts = &s->temps[i];
3152 tcg_debug_assert(arg_ts->state_ptr == 0
3153 || arg_ts->state == TS_DEAD);
5a18407f
RH
3154 }
3155 }
3156
3157 /* Outputs become available. */
61f15c48
RH
3158 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
3159 arg_ts = arg_temp(op->args[0]);
b83eabea 3160 dir_ts = arg_ts->state_ptr;
61f15c48
RH
3161 if (dir_ts) {
3162 op->args[0] = temp_arg(dir_ts);
3163 changes = true;
3164
3165 /* The output is now live and modified. */
3166 arg_ts->state = 0;
3167
3168 if (NEED_SYNC_ARG(0)) {
3169 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3170 ? INDEX_op_st_i32
3171 : INDEX_op_st_i64);
d4478943 3172 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
61f15c48
RH
3173 TCGTemp *out_ts = dir_ts;
3174
3175 if (IS_DEAD_ARG(0)) {
3176 out_ts = arg_temp(op->args[1]);
3177 arg_ts->state = TS_DEAD;
3178 tcg_op_remove(s, op);
3179 } else {
3180 arg_ts->state = TS_MEM;
3181 }
3182
3183 sop->args[0] = temp_arg(out_ts);
3184 sop->args[1] = temp_arg(arg_ts->mem_base);
3185 sop->args[2] = arg_ts->mem_offset;
3186 } else {
3187 tcg_debug_assert(!IS_DEAD_ARG(0));
3188 }
5a18407f 3189 }
61f15c48
RH
3190 } else {
3191 for (i = 0; i < nb_oargs; i++) {
3192 arg_ts = arg_temp(op->args[i]);
3193 dir_ts = arg_ts->state_ptr;
3194 if (!dir_ts) {
3195 continue;
3196 }
3197 op->args[i] = temp_arg(dir_ts);
3198 changes = true;
5a18407f 3199
61f15c48
RH
3200 /* The output is now live and modified. */
3201 arg_ts->state = 0;
5a18407f 3202
61f15c48
RH
3203 /* Sync outputs upon their last write. */
3204 if (NEED_SYNC_ARG(i)) {
3205 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3206 ? INDEX_op_st_i32
3207 : INDEX_op_st_i64);
d4478943 3208 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
5a18407f 3209
61f15c48
RH
3210 sop->args[0] = temp_arg(dir_ts);
3211 sop->args[1] = temp_arg(arg_ts->mem_base);
3212 sop->args[2] = arg_ts->mem_offset;
5a18407f 3213
61f15c48
RH
3214 arg_ts->state = TS_MEM;
3215 }
3216 /* Drop outputs that are dead. */
3217 if (IS_DEAD_ARG(i)) {
3218 arg_ts->state = TS_DEAD;
3219 }
5a18407f
RH
3220 }
3221 }
3222 }
3223
3224 return changes;
3225}
3226
2272e4a7 3227static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 3228{
31c96417
RH
3229 int size = tcg_type_size(ts->type);
3230 int align;
3231 intptr_t off;
c1c09194
RH
3232
3233 switch (ts->type) {
3234 case TCG_TYPE_I32:
31c96417 3235 align = 4;
c1c09194
RH
3236 break;
3237 case TCG_TYPE_I64:
3238 case TCG_TYPE_V64:
31c96417 3239 align = 8;
c1c09194
RH
3240 break;
3241 case TCG_TYPE_V128:
c1c09194
RH
3242 case TCG_TYPE_V256:
3243 /* Note that we do not require aligned storage for V256. */
31c96417 3244 align = 16;
c1c09194
RH
3245 break;
3246 default:
3247 g_assert_not_reached();
b591dc59 3248 }
c1c09194 3249
b9537d59
RH
3250 /*
3251 * Assume the stack is sufficiently aligned.
3252 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3253 * and do not require 16 byte vector alignment. This seems slightly
3254 * easier than fully parameterizing the above switch statement.
3255 */
3256 align = MIN(TCG_TARGET_STACK_ALIGN, align);
c1c09194 3257 off = ROUND_UP(s->current_frame_offset, align);
732d5897
RH
3258
3259 /* If we've exhausted the stack frame, restart with a smaller TB. */
3260 if (off + size > s->frame_end) {
3261 tcg_raise_tb_overflow(s);
3262 }
c1c09194
RH
3263 s->current_frame_offset = off + size;
3264
3265 ts->mem_offset = off;
9defd1bd
RH
3266#if defined(__sparc__)
3267 ts->mem_offset += TCG_TARGET_STACK_BIAS;
3268#endif
b3a62939 3269 ts->mem_base = s->frame_temp;
c896fe29 3270 ts->mem_allocated = 1;
c896fe29
FB
3271}
3272
098859f1
RH
3273/* Assign @reg to @ts, and update reg_to_temp[]. */
3274static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
3275{
3276 if (ts->val_type == TEMP_VAL_REG) {
3277 TCGReg old = ts->reg;
3278 tcg_debug_assert(s->reg_to_temp[old] == ts);
3279 if (old == reg) {
3280 return;
3281 }
3282 s->reg_to_temp[old] = NULL;
3283 }
3284 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3285 s->reg_to_temp[reg] = ts;
3286 ts->val_type = TEMP_VAL_REG;
3287 ts->reg = reg;
3288}
3289
3290/* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3291static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
3292{
3293 tcg_debug_assert(type != TEMP_VAL_REG);
3294 if (ts->val_type == TEMP_VAL_REG) {
3295 TCGReg reg = ts->reg;
3296 tcg_debug_assert(s->reg_to_temp[reg] == ts);
3297 s->reg_to_temp[reg] = NULL;
3298 }
3299 ts->val_type = type;
3300}
3301
b722452a 3302static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
b3915dbb 3303
59d7c14e
RH
3304/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3305 mark it free; otherwise mark it dead. */
3306static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 3307{
c0522136
RH
3308 TCGTempVal new_type;
3309
3310 switch (ts->kind) {
3311 case TEMP_FIXED:
59d7c14e 3312 return;
c0522136
RH
3313 case TEMP_GLOBAL:
3314 case TEMP_LOCAL:
3315 new_type = TEMP_VAL_MEM;
3316 break;
3317 case TEMP_NORMAL:
c7482438 3318 case TEMP_EBB:
c0522136
RH
3319 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
3320 break;
3321 case TEMP_CONST:
3322 new_type = TEMP_VAL_CONST;
3323 break;
3324 default:
3325 g_assert_not_reached();
59d7c14e 3326 }
098859f1 3327 set_temp_val_nonreg(s, ts, new_type);
59d7c14e 3328}
7f6ceedf 3329
59d7c14e
RH
3330/* Mark a temporary as dead. */
3331static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3332{
3333 temp_free_or_dead(s, ts, 1);
3334}
3335
3336/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3337 registers needs to be allocated to store a constant. If 'free_or_dead'
3338 is non-zero, subsequently release the temporary; if it is positive, the
3339 temp is dead; if it is negative, the temp is free. */
98b4e186
RH
3340static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3341 TCGRegSet preferred_regs, int free_or_dead)
59d7c14e 3342{
c0522136 3343 if (!temp_readonly(ts) && !ts->mem_coherent) {
7f6ceedf 3344 if (!ts->mem_allocated) {
2272e4a7 3345 temp_allocate_frame(s, ts);
59d7c14e 3346 }
59d7c14e
RH
3347 switch (ts->val_type) {
3348 case TEMP_VAL_CONST:
3349 /* If we're going to free the temp immediately, then we won't
3350 require it later in a register, so attempt to store the
3351 constant to memory directly. */
3352 if (free_or_dead
3353 && tcg_out_sti(s, ts->type, ts->val,
3354 ts->mem_base->reg, ts->mem_offset)) {
3355 break;
3356 }
3357 temp_load(s, ts, tcg_target_available_regs[ts->type],
98b4e186 3358 allocated_regs, preferred_regs);
59d7c14e
RH
3359 /* fallthrough */
3360
3361 case TEMP_VAL_REG:
3362 tcg_out_st(s, ts->type, ts->reg,
3363 ts->mem_base->reg, ts->mem_offset);
3364 break;
3365
3366 case TEMP_VAL_MEM:
3367 break;
3368
3369 case TEMP_VAL_DEAD:
3370 default:
3371 tcg_abort();
3372 }
3373 ts->mem_coherent = 1;
3374 }
3375 if (free_or_dead) {
3376 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 3377 }
7f6ceedf
AJ
3378}
3379
c896fe29 3380/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 3381static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 3382{
f8b2f202 3383 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 3384 if (ts != NULL) {
98b4e186 3385 temp_sync(s, ts, allocated_regs, 0, -1);
c896fe29
FB
3386 }
3387}
3388
b016486e
RH
3389/**
3390 * tcg_reg_alloc:
3391 * @required_regs: Set of registers in which we must allocate.
3392 * @allocated_regs: Set of registers which must be avoided.
3393 * @preferred_regs: Set of registers we should prefer.
3394 * @rev: True if we search the registers in "indirect" order.
3395 *
3396 * The allocated register must be in @required_regs & ~@allocated_regs,
3397 * but if we can put it in @preferred_regs we may save a move later.
3398 */
3399static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3400 TCGRegSet allocated_regs,
3401 TCGRegSet preferred_regs, bool rev)
c896fe29 3402{
b016486e
RH
3403 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3404 TCGRegSet reg_ct[2];
91478cef 3405 const int *order;
c896fe29 3406
b016486e
RH
3407 reg_ct[1] = required_regs & ~allocated_regs;
3408 tcg_debug_assert(reg_ct[1] != 0);
3409 reg_ct[0] = reg_ct[1] & preferred_regs;
3410
3411 /* Skip the preferred_regs option if it cannot be satisfied,
3412 or if the preference made no difference. */
3413 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3414
91478cef 3415 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29 3416
b016486e
RH
3417 /* Try free registers, preferences first. */
3418 for (j = f; j < 2; j++) {
3419 TCGRegSet set = reg_ct[j];
3420
3421 if (tcg_regset_single(set)) {
3422 /* One register in the set. */
3423 TCGReg reg = tcg_regset_first(set);
3424 if (s->reg_to_temp[reg] == NULL) {
3425 return reg;
3426 }
3427 } else {
3428 for (i = 0; i < n; i++) {
3429 TCGReg reg = order[i];
3430 if (s->reg_to_temp[reg] == NULL &&
3431 tcg_regset_test_reg(set, reg)) {
3432 return reg;
3433 }
3434 }
3435 }
c896fe29
FB
3436 }
3437
b016486e
RH
3438 /* We must spill something. */
3439 for (j = f; j < 2; j++) {
3440 TCGRegSet set = reg_ct[j];
3441
3442 if (tcg_regset_single(set)) {
3443 /* One register in the set. */
3444 TCGReg reg = tcg_regset_first(set);
b3915dbb 3445 tcg_reg_free(s, reg, allocated_regs);
c896fe29 3446 return reg;
b016486e
RH
3447 } else {
3448 for (i = 0; i < n; i++) {
3449 TCGReg reg = order[i];
3450 if (tcg_regset_test_reg(set, reg)) {
3451 tcg_reg_free(s, reg, allocated_regs);
3452 return reg;
3453 }
3454 }
c896fe29
FB
3455 }
3456 }
3457
3458 tcg_abort();
3459}
3460
29f5e925
RH
3461static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
3462 TCGRegSet allocated_regs,
3463 TCGRegSet preferred_regs, bool rev)
3464{
3465 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3466 TCGRegSet reg_ct[2];
3467 const int *order;
3468
3469 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
3470 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
3471 tcg_debug_assert(reg_ct[1] != 0);
3472 reg_ct[0] = reg_ct[1] & preferred_regs;
3473
3474 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3475
3476 /*
3477 * Skip the preferred_regs option if it cannot be satisfied,
3478 * or if the preference made no difference.
3479 */
3480 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3481
3482 /*
3483 * Minimize the number of flushes by looking for 2 free registers first,
3484 * then a single flush, then two flushes.
3485 */
3486 for (fmin = 2; fmin >= 0; fmin--) {
3487 for (j = k; j < 2; j++) {
3488 TCGRegSet set = reg_ct[j];
3489
3490 for (i = 0; i < n; i++) {
3491 TCGReg reg = order[i];
3492
3493 if (tcg_regset_test_reg(set, reg)) {
3494 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
3495 if (f >= fmin) {
3496 tcg_reg_free(s, reg, allocated_regs);
3497 tcg_reg_free(s, reg + 1, allocated_regs);
3498 return reg;
3499 }
3500 }
3501 }
3502 }
3503 }
3504 tcg_abort();
3505}
3506
40ae5c62
RH
3507/* Make sure the temporary is in a register. If needed, allocate the register
3508 from DESIRED while avoiding ALLOCATED. */
3509static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
b722452a 3510 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
40ae5c62
RH
3511{
3512 TCGReg reg;
3513
3514 switch (ts->val_type) {
3515 case TEMP_VAL_REG:
3516 return;
3517 case TEMP_VAL_CONST:
b016486e 3518 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3519 preferred_regs, ts->indirect_base);
0a6a8bc8
RH
3520 if (ts->type <= TCG_TYPE_I64) {
3521 tcg_out_movi(s, ts->type, reg, ts->val);
3522 } else {
4e186175
RH
3523 uint64_t val = ts->val;
3524 MemOp vece = MO_64;
3525
3526 /*
3527 * Find the minimal vector element that matches the constant.
3528 * The targets will, in general, have to do this search anyway,
3529 * do this generically.
3530 */
4e186175
RH
3531 if (val == dup_const(MO_8, val)) {
3532 vece = MO_8;
3533 } else if (val == dup_const(MO_16, val)) {
3534 vece = MO_16;
0b4286dd 3535 } else if (val == dup_const(MO_32, val)) {
4e186175
RH
3536 vece = MO_32;
3537 }
3538
3539 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
0a6a8bc8 3540 }
40ae5c62
RH
3541 ts->mem_coherent = 0;
3542 break;
3543 case TEMP_VAL_MEM:
b016486e 3544 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3545 preferred_regs, ts->indirect_base);
40ae5c62
RH
3546 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3547 ts->mem_coherent = 1;
3548 break;
3549 case TEMP_VAL_DEAD:
3550 default:
3551 tcg_abort();
3552 }
098859f1 3553 set_temp_val_reg(s, ts, reg);
40ae5c62
RH
3554}
3555
59d7c14e
RH
3556/* Save a temporary to memory. 'allocated_regs' is used in case a
3557 temporary registers needs to be allocated to store a constant. */
3558static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 3559{
5a18407f
RH
3560 /* The liveness analysis already ensures that globals are back
3561 in memory. Keep an tcg_debug_assert for safety. */
e01fa97d 3562 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
1ad80729
AJ
3563}
3564
9814dd27 3565/* save globals to their canonical location and assume they can be
e8996ee0
FB
3566 modified be the following code. 'allocated_regs' is used in case a
3567 temporary registers needs to be allocated to store a constant. */
3568static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 3569{
ac3b8891 3570 int i, n;
c896fe29 3571
ac3b8891 3572 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 3573 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 3574 }
e5097dc8
FB
3575}
3576
3d5c5f87
AJ
3577/* sync globals to their canonical location and assume they can be
3578 read by the following code. 'allocated_regs' is used in case a
3579 temporary registers needs to be allocated to store a constant. */
3580static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3581{
ac3b8891 3582 int i, n;
3d5c5f87 3583
ac3b8891 3584 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 3585 TCGTemp *ts = &s->temps[i];
5a18407f 3586 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
ee17db83 3587 || ts->kind == TEMP_FIXED
5a18407f 3588 || ts->mem_coherent);
3d5c5f87
AJ
3589 }
3590}
3591
e5097dc8 3592/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
3593 all globals are stored at their canonical location. */
3594static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 3595{
e5097dc8
FB
3596 int i;
3597
b13eb728
RH
3598 for (i = s->nb_globals; i < s->nb_temps; i++) {
3599 TCGTemp *ts = &s->temps[i];
c0522136
RH
3600
3601 switch (ts->kind) {
3602 case TEMP_LOCAL:
b13eb728 3603 temp_save(s, ts, allocated_regs);
c0522136
RH
3604 break;
3605 case TEMP_NORMAL:
c7482438 3606 case TEMP_EBB:
5a18407f
RH
3607 /* The liveness analysis already ensures that temps are dead.
3608 Keep an tcg_debug_assert for safety. */
3609 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136
RH
3610 break;
3611 case TEMP_CONST:
3612 /* Similarly, we should have freed any allocated register. */
3613 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3614 break;
3615 default:
3616 g_assert_not_reached();
c896fe29
FB
3617 }
3618 }
e8996ee0
FB
3619
3620 save_globals(s, allocated_regs);
c896fe29
FB
3621}
3622
b4cb76e6 3623/*
c7482438
RH
3624 * At a conditional branch, we assume all temporaries are dead unless
3625 * explicitly live-across-conditional-branch; all globals and local
3626 * temps are synced to their location.
b4cb76e6
RH
3627 */
3628static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3629{
3630 sync_globals(s, allocated_regs);
3631
3632 for (int i = s->nb_globals; i < s->nb_temps; i++) {
3633 TCGTemp *ts = &s->temps[i];
3634 /*
3635 * The liveness analysis already ensures that temps are dead.
3636 * Keep tcg_debug_asserts for safety.
3637 */
c0522136
RH
3638 switch (ts->kind) {
3639 case TEMP_LOCAL:
b4cb76e6 3640 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
c0522136
RH
3641 break;
3642 case TEMP_NORMAL:
b4cb76e6 3643 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136 3644 break;
c7482438 3645 case TEMP_EBB:
c0522136
RH
3646 case TEMP_CONST:
3647 break;
3648 default:
3649 g_assert_not_reached();
b4cb76e6
RH
3650 }
3651 }
3652}
3653
bab1671f 3654/*
c58f4c97 3655 * Specialized code generation for INDEX_op_mov_* with a constant.
bab1671f 3656 */
0fe4fca4 3657static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
ba87719c
RH
3658 tcg_target_ulong val, TCGLifeData arg_life,
3659 TCGRegSet preferred_regs)
e8996ee0 3660{
d63e3b6e 3661 /* ENV should not be modified. */
e01fa97d 3662 tcg_debug_assert(!temp_readonly(ots));
59d7c14e
RH
3663
3664 /* The movi is not explicitly generated here. */
098859f1 3665 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
59d7c14e
RH
3666 ots->val = val;
3667 ots->mem_coherent = 0;
3668 if (NEED_SYNC_ARG(0)) {
ba87719c 3669 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
59d7c14e 3670 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 3671 temp_dead(s, ots);
4c4e1ab2 3672 }
e8996ee0
FB
3673}
3674
bab1671f
RH
3675/*
3676 * Specialized code generation for INDEX_op_mov_*.
3677 */
dd186292 3678static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 3679{
dd186292 3680 const TCGLifeData arg_life = op->life;
69e3706d 3681 TCGRegSet allocated_regs, preferred_regs;
c896fe29 3682 TCGTemp *ts, *ots;
450445d5 3683 TCGType otype, itype;
098859f1 3684 TCGReg oreg, ireg;
c896fe29 3685
d21369f5 3686 allocated_regs = s->reserved_regs;
31fd884b 3687 preferred_regs = output_pref(op, 0);
43439139
RH
3688 ots = arg_temp(op->args[0]);
3689 ts = arg_temp(op->args[1]);
450445d5 3690
d63e3b6e 3691 /* ENV should not be modified. */
e01fa97d 3692 tcg_debug_assert(!temp_readonly(ots));
d63e3b6e 3693
450445d5
RH
3694 /* Note that otype != itype for no-op truncation. */
3695 otype = ots->type;
3696 itype = ts->type;
c29c1d7e 3697
0fe4fca4
PB
3698 if (ts->val_type == TEMP_VAL_CONST) {
3699 /* propagate constant or generate sti */
3700 tcg_target_ulong val = ts->val;
3701 if (IS_DEAD_ARG(1)) {
3702 temp_dead(s, ts);
3703 }
69e3706d 3704 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
0fe4fca4
PB
3705 return;
3706 }
3707
3708 /* If the source value is in memory we're going to be forced
3709 to have it in a register in order to perform the copy. Copy
3710 the SOURCE value into its own register first, that way we
3711 don't have to reload SOURCE the next time it is used. */
3712 if (ts->val_type == TEMP_VAL_MEM) {
69e3706d
RH
3713 temp_load(s, ts, tcg_target_available_regs[itype],
3714 allocated_regs, preferred_regs);
c29c1d7e 3715 }
0fe4fca4 3716 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
098859f1
RH
3717 ireg = ts->reg;
3718
d63e3b6e 3719 if (IS_DEAD_ARG(0)) {
c29c1d7e
AJ
3720 /* mov to a non-saved dead register makes no sense (even with
3721 liveness analysis disabled). */
eabb7b91 3722 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 3723 if (!ots->mem_allocated) {
2272e4a7 3724 temp_allocate_frame(s, ots);
c29c1d7e 3725 }
098859f1 3726 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 3727 if (IS_DEAD_ARG(1)) {
f8bf00f1 3728 temp_dead(s, ts);
c29c1d7e 3729 }
f8bf00f1 3730 temp_dead(s, ots);
098859f1
RH
3731 return;
3732 }
3733
3734 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
3735 /*
3736 * The mov can be suppressed. Kill input first, so that it
3737 * is unlinked from reg_to_temp, then set the output to the
3738 * reg that we saved from the input.
3739 */
3740 temp_dead(s, ts);
3741 oreg = ireg;
c29c1d7e 3742 } else {
098859f1
RH
3743 if (ots->val_type == TEMP_VAL_REG) {
3744 oreg = ots->reg;
c896fe29 3745 } else {
098859f1
RH
3746 /* Make sure to not spill the input register during allocation. */
3747 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3748 allocated_regs | ((TCGRegSet)1 << ireg),
3749 preferred_regs, ots->indirect_base);
c896fe29 3750 }
098859f1
RH
3751 if (!tcg_out_mov(s, otype, oreg, ireg)) {
3752 /*
3753 * Cross register class move not supported.
3754 * Store the source register into the destination slot
3755 * and leave the destination temp as TEMP_VAL_MEM.
3756 */
3757 assert(!temp_readonly(ots));
3758 if (!ts->mem_allocated) {
3759 temp_allocate_frame(s, ots);
3760 }
3761 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
3762 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
3763 ots->mem_coherent = 1;
3764 return;
c896fe29 3765 }
ec7a869d 3766 }
098859f1
RH
3767 set_temp_val_reg(s, ots, oreg);
3768 ots->mem_coherent = 0;
3769
3770 if (NEED_SYNC_ARG(0)) {
3771 temp_sync(s, ots, allocated_regs, 0, 0);
3772 }
c896fe29
FB
3773}
3774
bab1671f
RH
3775/*
3776 * Specialized code generation for INDEX_op_dup_vec.
3777 */
3778static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
3779{
3780 const TCGLifeData arg_life = op->life;
3781 TCGRegSet dup_out_regs, dup_in_regs;
3782 TCGTemp *its, *ots;
3783 TCGType itype, vtype;
3784 unsigned vece;
31c96417 3785 int lowpart_ofs;
bab1671f
RH
3786 bool ok;
3787
3788 ots = arg_temp(op->args[0]);
3789 its = arg_temp(op->args[1]);
3790
3791 /* ENV should not be modified. */
e01fa97d 3792 tcg_debug_assert(!temp_readonly(ots));
bab1671f
RH
3793
3794 itype = its->type;
3795 vece = TCGOP_VECE(op);
3796 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3797
3798 if (its->val_type == TEMP_VAL_CONST) {
3799 /* Propagate constant via movi -> dupi. */
3800 tcg_target_ulong val = its->val;
3801 if (IS_DEAD_ARG(1)) {
3802 temp_dead(s, its);
3803 }
31fd884b 3804 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
bab1671f
RH
3805 return;
3806 }
3807
9be0d080
RH
3808 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3809 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
bab1671f
RH
3810
3811 /* Allocate the output register now. */
3812 if (ots->val_type != TEMP_VAL_REG) {
3813 TCGRegSet allocated_regs = s->reserved_regs;
098859f1 3814 TCGReg oreg;
bab1671f
RH
3815
3816 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
3817 /* Make sure to not spill the input register. */
3818 tcg_regset_set_reg(allocated_regs, its->reg);
3819 }
098859f1 3820 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 3821 output_pref(op, 0), ots->indirect_base);
098859f1 3822 set_temp_val_reg(s, ots, oreg);
bab1671f
RH
3823 }
3824
3825 switch (its->val_type) {
3826 case TEMP_VAL_REG:
3827 /*
3828 * The dup constriaints must be broad, covering all possible VECE.
3829 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3830 * to fail, indicating that extra moves are required for that case.
3831 */
3832 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
3833 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
3834 goto done;
3835 }
3836 /* Try again from memory or a vector input register. */
3837 }
3838 if (!its->mem_coherent) {
3839 /*
3840 * The input register is not synced, and so an extra store
3841 * would be required to use memory. Attempt an integer-vector
3842 * register move first. We do not have a TCGRegSet for this.
3843 */
3844 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
3845 break;
3846 }
3847 /* Sync the temp back to its slot and load from there. */
3848 temp_sync(s, its, s->reserved_regs, 0, 0);
3849 }
3850 /* fall through */
3851
3852 case TEMP_VAL_MEM:
31c96417
RH
3853 lowpart_ofs = 0;
3854 if (HOST_BIG_ENDIAN) {
3855 lowpart_ofs = tcg_type_size(itype) - (1 << vece);
3856 }
d6ecb4a9 3857 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
31c96417 3858 its->mem_offset + lowpart_ofs)) {
d6ecb4a9
RH
3859 goto done;
3860 }
098859f1 3861 /* Load the input into the destination vector register. */
bab1671f
RH
3862 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
3863 break;
3864
3865 default:
3866 g_assert_not_reached();
3867 }
3868
3869 /* We now have a vector input register, so dup must succeed. */
3870 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
3871 tcg_debug_assert(ok);
3872
3873 done:
36f5539c 3874 ots->mem_coherent = 0;
bab1671f
RH
3875 if (IS_DEAD_ARG(1)) {
3876 temp_dead(s, its);
3877 }
3878 if (NEED_SYNC_ARG(0)) {
3879 temp_sync(s, ots, s->reserved_regs, 0, 0);
3880 }
3881 if (IS_DEAD_ARG(0)) {
3882 temp_dead(s, ots);
3883 }
3884}
3885
dd186292 3886static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 3887{
dd186292
RH
3888 const TCGLifeData arg_life = op->life;
3889 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
3890 TCGRegSet i_allocated_regs;
3891 TCGRegSet o_allocated_regs;
b6638662
RH
3892 int i, k, nb_iargs, nb_oargs;
3893 TCGReg reg;
c896fe29
FB
3894 TCGArg arg;
3895 const TCGArgConstraint *arg_ct;
3896 TCGTemp *ts;
3897 TCGArg new_args[TCG_MAX_OP_ARGS];
3898 int const_args[TCG_MAX_OP_ARGS];
3899
3900 nb_oargs = def->nb_oargs;
3901 nb_iargs = def->nb_iargs;
3902
3903 /* copy constants */
a813e36f 3904 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 3905 op->args + nb_oargs + nb_iargs,
c896fe29
FB
3906 sizeof(TCGArg) * def->nb_cargs);
3907
d21369f5
RH
3908 i_allocated_regs = s->reserved_regs;
3909 o_allocated_regs = s->reserved_regs;
82790a87 3910
a813e36f 3911 /* satisfy input constraints */
dd186292 3912 for (k = 0; k < nb_iargs; k++) {
29f5e925
RH
3913 TCGRegSet i_preferred_regs, i_required_regs;
3914 bool allocate_new_reg, copyto_new_reg;
3915 TCGTemp *ts2;
3916 int i1, i2;
d62816f2 3917
66792f90 3918 i = def->args_ct[nb_oargs + k].sort_index;
dd186292 3919 arg = op->args[i];
c896fe29 3920 arg_ct = &def->args_ct[i];
43439139 3921 ts = arg_temp(arg);
40ae5c62
RH
3922
3923 if (ts->val_type == TEMP_VAL_CONST
a4fbbd77 3924 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
40ae5c62
RH
3925 /* constant is OK for instruction */
3926 const_args[i] = 1;
3927 new_args[i] = ts->val;
d62816f2 3928 continue;
c896fe29 3929 }
40ae5c62 3930
1c1824dc
RH
3931 reg = ts->reg;
3932 i_preferred_regs = 0;
29f5e925 3933 i_required_regs = arg_ct->regs;
1c1824dc 3934 allocate_new_reg = false;
29f5e925
RH
3935 copyto_new_reg = false;
3936
3937 switch (arg_ct->pair) {
3938 case 0: /* not paired */
3939 if (arg_ct->ialias) {
31fd884b 3940 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
3941
3942 /*
3943 * If the input is readonly, then it cannot also be an
3944 * output and aliased to itself. If the input is not
3945 * dead after the instruction, we must allocate a new
3946 * register and move it.
3947 */
3948 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
3949 allocate_new_reg = true;
3950 } else if (ts->val_type == TEMP_VAL_REG) {
3951 /*
3952 * Check if the current register has already been
3953 * allocated for another input.
3954 */
3955 allocate_new_reg =
3956 tcg_regset_test_reg(i_allocated_regs, reg);
3957 }
3958 }
3959 if (!allocate_new_reg) {
3960 temp_load(s, ts, i_required_regs, i_allocated_regs,
3961 i_preferred_regs);
3962 reg = ts->reg;
3963 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
3964 }
3965 if (allocate_new_reg) {
3966 /*
3967 * Allocate a new register matching the constraint
3968 * and move the temporary register into it.
3969 */
3970 temp_load(s, ts, tcg_target_available_regs[ts->type],
3971 i_allocated_regs, 0);
3972 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
3973 i_preferred_regs, ts->indirect_base);
3974 copyto_new_reg = true;
3975 }
3976 break;
3977
3978 case 1:
3979 /* First of an input pair; if i1 == i2, the second is an output. */
3980 i1 = i;
3981 i2 = arg_ct->pair_index;
3982 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
3983
3984 /*
3985 * It is easier to default to allocating a new pair
3986 * and to identify a few cases where it's not required.
3987 */
3988 if (arg_ct->ialias) {
31fd884b 3989 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
3990 if (IS_DEAD_ARG(i1) &&
3991 IS_DEAD_ARG(i2) &&
3992 !temp_readonly(ts) &&
3993 ts->val_type == TEMP_VAL_REG &&
3994 ts->reg < TCG_TARGET_NB_REGS - 1 &&
3995 tcg_regset_test_reg(i_required_regs, reg) &&
3996 !tcg_regset_test_reg(i_allocated_regs, reg) &&
3997 !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
3998 (ts2
3999 ? ts2->val_type == TEMP_VAL_REG &&
4000 ts2->reg == reg + 1 &&
4001 !temp_readonly(ts2)
4002 : s->reg_to_temp[reg + 1] == NULL)) {
4003 break;
4004 }
4005 } else {
4006 /* Without aliasing, the pair must also be an input. */
4007 tcg_debug_assert(ts2);
4008 if (ts->val_type == TEMP_VAL_REG &&
4009 ts2->val_type == TEMP_VAL_REG &&
4010 ts2->reg == reg + 1 &&
4011 tcg_regset_test_reg(i_required_regs, reg)) {
4012 break;
4013 }
4014 }
4015 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4016 0, ts->indirect_base);
4017 goto do_pair;
4018
4019 case 2: /* pair second */
4020 reg = new_args[arg_ct->pair_index] + 1;
4021 goto do_pair;
1c1824dc 4022
29f5e925
RH
4023 case 3: /* ialias with second output, no first input */
4024 tcg_debug_assert(arg_ct->ialias);
31fd884b 4025 i_preferred_regs = output_pref(op, arg_ct->alias_index);
d62816f2 4026
29f5e925
RH
4027 if (IS_DEAD_ARG(i) &&
4028 !temp_readonly(ts) &&
4029 ts->val_type == TEMP_VAL_REG &&
4030 reg > 0 &&
4031 s->reg_to_temp[reg - 1] == NULL &&
4032 tcg_regset_test_reg(i_required_regs, reg) &&
4033 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4034 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4035 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4036 break;
4037 }
4038 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4039 i_allocated_regs, 0,
4040 ts->indirect_base);
4041 tcg_regset_set_reg(i_allocated_regs, reg);
4042 reg += 1;
4043 goto do_pair;
4044
4045 do_pair:
c0522136 4046 /*
29f5e925
RH
4047 * If an aliased input is not dead after the instruction,
4048 * we must allocate a new register and move it.
c0522136 4049 */
29f5e925
RH
4050 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4051 TCGRegSet t_allocated_regs = i_allocated_regs;
4052
1c1824dc 4053 /*
29f5e925
RH
4054 * Because of the alias, and the continued life, make sure
4055 * that the temp is somewhere *other* than the reg pair,
4056 * and we get a copy in reg.
1c1824dc 4057 */
29f5e925
RH
4058 tcg_regset_set_reg(t_allocated_regs, reg);
4059 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4060 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4061 /* If ts was already in reg, copy it somewhere else. */
4062 TCGReg nr;
4063 bool ok;
4064
4065 tcg_debug_assert(ts->kind != TEMP_FIXED);
4066 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4067 t_allocated_regs, 0, ts->indirect_base);
4068 ok = tcg_out_mov(s, ts->type, nr, reg);
4069 tcg_debug_assert(ok);
4070
4071 set_temp_val_reg(s, ts, nr);
4072 } else {
4073 temp_load(s, ts, tcg_target_available_regs[ts->type],
4074 t_allocated_regs, 0);
4075 copyto_new_reg = true;
4076 }
4077 } else {
4078 /* Preferably allocate to reg, otherwise copy. */
4079 i_required_regs = (TCGRegSet)1 << reg;
4080 temp_load(s, ts, i_required_regs, i_allocated_regs,
4081 i_preferred_regs);
4082 copyto_new_reg = ts->reg != reg;
5ff9d6a4 4083 }
29f5e925 4084 break;
d62816f2 4085
29f5e925
RH
4086 default:
4087 g_assert_not_reached();
1c1824dc 4088 }
d62816f2 4089
29f5e925 4090 if (copyto_new_reg) {
78113e83 4091 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
240c08d0
RH
4092 /*
4093 * Cross register class move not supported. Sync the
4094 * temp back to its slot and load from there.
4095 */
4096 temp_sync(s, ts, i_allocated_regs, 0, 0);
4097 tcg_out_ld(s, ts->type, reg,
4098 ts->mem_base->reg, ts->mem_offset);
78113e83 4099 }
c896fe29 4100 }
c896fe29
FB
4101 new_args[i] = reg;
4102 const_args[i] = 0;
82790a87 4103 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29 4104 }
a813e36f 4105
a52ad07e
AJ
4106 /* mark dead temporaries and free the associated registers */
4107 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4108 if (IS_DEAD_ARG(i)) {
43439139 4109 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
4110 }
4111 }
4112
b4cb76e6
RH
4113 if (def->flags & TCG_OPF_COND_BRANCH) {
4114 tcg_reg_alloc_cbranch(s, i_allocated_regs);
4115 } else if (def->flags & TCG_OPF_BB_END) {
82790a87 4116 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 4117 } else {
e8996ee0 4118 if (def->flags & TCG_OPF_CALL_CLOBBER) {
a813e36f 4119 /* XXX: permit generic clobber register list ? */
c8074023
RH
4120 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4121 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 4122 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 4123 }
c896fe29 4124 }
3d5c5f87
AJ
4125 }
4126 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
4127 /* sync globals if the op has side effects and might trigger
4128 an exception. */
82790a87 4129 sync_globals(s, i_allocated_regs);
c896fe29 4130 }
a813e36f 4131
e8996ee0 4132 /* satisfy the output constraints */
e8996ee0 4133 for(k = 0; k < nb_oargs; k++) {
66792f90 4134 i = def->args_ct[k].sort_index;
dd186292 4135 arg = op->args[i];
e8996ee0 4136 arg_ct = &def->args_ct[i];
43439139 4137 ts = arg_temp(arg);
d63e3b6e
RH
4138
4139 /* ENV should not be modified. */
e01fa97d 4140 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4141
29f5e925
RH
4142 switch (arg_ct->pair) {
4143 case 0: /* not paired */
4144 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
4145 reg = new_args[arg_ct->alias_index];
4146 } else if (arg_ct->newreg) {
4147 reg = tcg_reg_alloc(s, arg_ct->regs,
4148 i_allocated_regs | o_allocated_regs,
31fd884b 4149 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4150 } else {
4151 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
31fd884b 4152 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4153 }
4154 break;
4155
4156 case 1: /* first of pair */
4157 tcg_debug_assert(!arg_ct->newreg);
4158 if (arg_ct->oalias) {
4159 reg = new_args[arg_ct->alias_index];
4160 break;
4161 }
4162 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
31fd884b 4163 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4164 break;
4165
4166 case 2: /* second of pair */
4167 tcg_debug_assert(!arg_ct->newreg);
4168 if (arg_ct->oalias) {
4169 reg = new_args[arg_ct->alias_index];
4170 } else {
4171 reg = new_args[arg_ct->pair_index] + 1;
4172 }
4173 break;
4174
4175 case 3: /* first of pair, aliasing with a second input */
4176 tcg_debug_assert(!arg_ct->newreg);
4177 reg = new_args[arg_ct->pair_index] - 1;
4178 break;
4179
4180 default:
4181 g_assert_not_reached();
c896fe29 4182 }
82790a87 4183 tcg_regset_set_reg(o_allocated_regs, reg);
098859f1 4184 set_temp_val_reg(s, ts, reg);
d63e3b6e 4185 ts->mem_coherent = 0;
e8996ee0 4186 new_args[i] = reg;
c896fe29 4187 }
c896fe29
FB
4188 }
4189
c896fe29 4190 /* emit instruction */
d2fd745f
RH
4191 if (def->flags & TCG_OPF_VECTOR) {
4192 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
4193 new_args, const_args);
4194 } else {
4195 tcg_out_op(s, op->opc, new_args, const_args);
4196 }
4197
c896fe29
FB
4198 /* move the outputs in the correct register if needed */
4199 for(i = 0; i < nb_oargs; i++) {
43439139 4200 ts = arg_temp(op->args[i]);
d63e3b6e
RH
4201
4202 /* ENV should not be modified. */
e01fa97d 4203 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4204
ec7a869d 4205 if (NEED_SYNC_ARG(i)) {
98b4e186 4206 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
59d7c14e 4207 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 4208 temp_dead(s, ts);
ec7a869d 4209 }
c896fe29
FB
4210 }
4211}
4212
efe86b21
RH
4213static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
4214{
4215 const TCGLifeData arg_life = op->life;
4216 TCGTemp *ots, *itsl, *itsh;
4217 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4218
4219 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4220 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
4221 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
4222
4223 ots = arg_temp(op->args[0]);
4224 itsl = arg_temp(op->args[1]);
4225 itsh = arg_temp(op->args[2]);
4226
4227 /* ENV should not be modified. */
4228 tcg_debug_assert(!temp_readonly(ots));
4229
4230 /* Allocate the output register now. */
4231 if (ots->val_type != TEMP_VAL_REG) {
4232 TCGRegSet allocated_regs = s->reserved_regs;
4233 TCGRegSet dup_out_regs =
4234 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
098859f1 4235 TCGReg oreg;
efe86b21
RH
4236
4237 /* Make sure to not spill the input registers. */
4238 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
4239 tcg_regset_set_reg(allocated_regs, itsl->reg);
4240 }
4241 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
4242 tcg_regset_set_reg(allocated_regs, itsh->reg);
4243 }
4244
098859f1 4245 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 4246 output_pref(op, 0), ots->indirect_base);
098859f1 4247 set_temp_val_reg(s, ots, oreg);
efe86b21
RH
4248 }
4249
4250 /* Promote dup2 of immediates to dupi_vec. */
4251 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
4252 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
4253 MemOp vece = MO_64;
4254
4255 if (val == dup_const(MO_8, val)) {
4256 vece = MO_8;
4257 } else if (val == dup_const(MO_16, val)) {
4258 vece = MO_16;
4259 } else if (val == dup_const(MO_32, val)) {
4260 vece = MO_32;
4261 }
4262
4263 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
4264 goto done;
4265 }
4266
4267 /* If the two inputs form one 64-bit value, try dupm_vec. */
aef85402
RH
4268 if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
4269 itsh->temp_subindex == !HOST_BIG_ENDIAN &&
4270 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
4271 TCGTemp *its = itsl - HOST_BIG_ENDIAN;
4272
4273 temp_sync(s, its + 0, s->reserved_regs, 0, 0);
4274 temp_sync(s, its + 1, s->reserved_regs, 0, 0);
4275
efe86b21
RH
4276 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
4277 its->mem_base->reg, its->mem_offset)) {
4278 goto done;
4279 }
4280 }
4281
4282 /* Fall back to generic expansion. */
4283 return false;
4284
4285 done:
36f5539c 4286 ots->mem_coherent = 0;
efe86b21
RH
4287 if (IS_DEAD_ARG(1)) {
4288 temp_dead(s, itsl);
4289 }
4290 if (IS_DEAD_ARG(2)) {
4291 temp_dead(s, itsh);
4292 }
4293 if (NEED_SYNC_ARG(0)) {
4294 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
4295 } else if (IS_DEAD_ARG(0)) {
4296 temp_dead(s, ots);
4297 }
4298 return true;
4299}
4300
39004a71
RH
4301static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
4302 TCGRegSet allocated_regs)
c896fe29 4303{
39004a71
RH
4304 if (ts->val_type == TEMP_VAL_REG) {
4305 if (ts->reg != reg) {
4306 tcg_reg_free(s, reg, allocated_regs);
4307 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4308 /*
4309 * Cross register class move not supported. Sync the
4310 * temp back to its slot and load from there.
4311 */
4312 temp_sync(s, ts, allocated_regs, 0, 0);
4313 tcg_out_ld(s, ts->type, reg,
4314 ts->mem_base->reg, ts->mem_offset);
4315 }
4316 }
4317 } else {
4318 TCGRegSet arg_set = 0;
c896fe29 4319
39004a71
RH
4320 tcg_reg_free(s, reg, allocated_regs);
4321 tcg_regset_set_reg(arg_set, reg);
4322 temp_load(s, ts, arg_set, allocated_regs, 0);
b03cce8e 4323 }
39004a71 4324}
39cf05d3 4325
39004a71
RH
4326static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
4327 TCGRegSet allocated_regs)
4328{
4329 /*
4330 * When the destination is on the stack, load up the temp and store.
4331 * If there are many call-saved registers, the temp might live to
4332 * see another use; otherwise it'll be discarded.
4333 */
4334 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
4335 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
4336 TCG_TARGET_CALL_STACK_OFFSET +
4337 stk_slot * sizeof(tcg_target_long));
4338}
a813e36f 4339
39004a71
RH
4340static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
4341 TCGTemp *ts, TCGRegSet *allocated_regs)
4342{
4343 if (REG_P(l)) {
4344 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
4345 load_arg_reg(s, reg, ts, *allocated_regs);
4346 tcg_regset_set_reg(*allocated_regs, reg);
4347 } else {
4348 load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
4349 ts, *allocated_regs);
4350 }
4351}
40ae5c62 4352
39004a71
RH
4353static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
4354{
4355 const int nb_oargs = TCGOP_CALLO(op);
4356 const int nb_iargs = TCGOP_CALLI(op);
4357 const TCGLifeData arg_life = op->life;
4358 const TCGHelperInfo *info = tcg_call_info(op);
4359 TCGRegSet allocated_regs = s->reserved_regs;
4360 int i;
40ae5c62 4361
39004a71
RH
4362 /*
4363 * Move inputs into place in reverse order,
4364 * so that we place stacked arguments first.
4365 */
4366 for (i = nb_iargs - 1; i >= 0; --i) {
4367 const TCGCallArgumentLoc *loc = &info->in[i];
4368 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
40ae5c62 4369
39004a71
RH
4370 switch (loc->kind) {
4371 case TCG_CALL_ARG_NORMAL:
4372 case TCG_CALL_ARG_EXTEND_U:
4373 case TCG_CALL_ARG_EXTEND_S:
4374 load_arg_normal(s, loc, ts, &allocated_regs);
4375 break;
4376 default:
4377 g_assert_not_reached();
c896fe29 4378 }
c896fe29 4379 }
a813e36f 4380
39004a71 4381 /* Mark dead temporaries and free the associated registers. */
dd186292 4382 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 4383 if (IS_DEAD_ARG(i)) {
43439139 4384 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
4385 }
4386 }
a813e36f 4387
39004a71 4388 /* Clobber call registers. */
c8074023
RH
4389 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4390 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 4391 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
4392 }
4393 }
78505279 4394
39004a71
RH
4395 /*
4396 * Save globals if they might be written by the helper,
4397 * sync them if they might be read.
4398 */
4399 if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
78505279 4400 /* Nothing to do */
39004a71 4401 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
78505279
AJ
4402 sync_globals(s, allocated_regs);
4403 } else {
b9c18f56
AJ
4404 save_globals(s, allocated_regs);
4405 }
c896fe29 4406
cee44b03 4407 tcg_out_call(s, tcg_call_func(op), info);
c896fe29 4408
39004a71
RH
4409 /* Assign output registers and emit moves if needed. */
4410 switch (info->out_kind) {
4411 case TCG_CALL_RET_NORMAL:
4412 for (i = 0; i < nb_oargs; i++) {
4413 TCGTemp *ts = arg_temp(op->args[i]);
4414 TCGReg reg = tcg_target_call_oarg_regs[i];
d63e3b6e 4415
39004a71
RH
4416 /* ENV should not be modified. */
4417 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4418
39004a71
RH
4419 set_temp_val_reg(s, ts, reg);
4420 ts->mem_coherent = 0;
4421 }
4422 break;
4423 default:
4424 g_assert_not_reached();
4425 }
4426
4427 /* Flush or discard output registers as needed. */
4428 for (i = 0; i < nb_oargs; i++) {
4429 TCGTemp *ts = arg_temp(op->args[i]);
d63e3b6e 4430 if (NEED_SYNC_ARG(i)) {
39004a71 4431 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
d63e3b6e
RH
4432 } else if (IS_DEAD_ARG(i)) {
4433 temp_dead(s, ts);
c896fe29
FB
4434 }
4435 }
c896fe29
FB
4436}
4437
4438#ifdef CONFIG_PROFILER
4439
c3fac113
EC
4440/* avoid copy/paste errors */
4441#define PROF_ADD(to, from, field) \
4442 do { \
d73415a3 4443 (to)->field += qatomic_read(&((from)->field)); \
c3fac113
EC
4444 } while (0)
4445
4446#define PROF_MAX(to, from, field) \
4447 do { \
d73415a3 4448 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
c3fac113
EC
4449 if (val__ > (to)->field) { \
4450 (to)->field = val__; \
4451 } \
4452 } while (0)
4453
4454/* Pass in a zero'ed @prof */
4455static inline
4456void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
4457{
0e2d61cf 4458 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
c3fac113
EC
4459 unsigned int i;
4460
3468b59e 4461 for (i = 0; i < n_ctxs; i++) {
d73415a3 4462 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3468b59e 4463 const TCGProfile *orig = &s->prof;
c3fac113
EC
4464
4465 if (counters) {
72fd2efb 4466 PROF_ADD(prof, orig, cpu_exec_time);
c3fac113
EC
4467 PROF_ADD(prof, orig, tb_count1);
4468 PROF_ADD(prof, orig, tb_count);
4469 PROF_ADD(prof, orig, op_count);
4470 PROF_MAX(prof, orig, op_count_max);
4471 PROF_ADD(prof, orig, temp_count);
4472 PROF_MAX(prof, orig, temp_count_max);
4473 PROF_ADD(prof, orig, del_op_count);
4474 PROF_ADD(prof, orig, code_in_len);
4475 PROF_ADD(prof, orig, code_out_len);
4476 PROF_ADD(prof, orig, search_out_len);
4477 PROF_ADD(prof, orig, interm_time);
4478 PROF_ADD(prof, orig, code_time);
4479 PROF_ADD(prof, orig, la_time);
4480 PROF_ADD(prof, orig, opt_time);
4481 PROF_ADD(prof, orig, restore_count);
4482 PROF_ADD(prof, orig, restore_time);
4483 }
4484 if (table) {
4485 int i;
4486
4487 for (i = 0; i < NB_OPS; i++) {
4488 PROF_ADD(prof, orig, table_op_count[i]);
4489 }
4490 }
4491 }
4492}
4493
4494#undef PROF_ADD
4495#undef PROF_MAX
4496
4497static void tcg_profile_snapshot_counters(TCGProfile *prof)
4498{
4499 tcg_profile_snapshot(prof, true, false);
4500}
4501
4502static void tcg_profile_snapshot_table(TCGProfile *prof)
4503{
4504 tcg_profile_snapshot(prof, false, true);
4505}
c896fe29 4506
b6a7f3e0 4507void tcg_dump_op_count(GString *buf)
c896fe29 4508{
c3fac113 4509 TCGProfile prof = {};
c896fe29 4510 int i;
d70724ce 4511
c3fac113 4512 tcg_profile_snapshot_table(&prof);
15fc7daa 4513 for (i = 0; i < NB_OPS; i++) {
b6a7f3e0
DB
4514 g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
4515 prof.table_op_count[i]);
c896fe29 4516 }
c896fe29 4517}
72fd2efb
EC
4518
4519int64_t tcg_cpu_exec_time(void)
4520{
0e2d61cf 4521 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
72fd2efb
EC
4522 unsigned int i;
4523 int64_t ret = 0;
4524
4525 for (i = 0; i < n_ctxs; i++) {
d73415a3 4526 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
72fd2efb
EC
4527 const TCGProfile *prof = &s->prof;
4528
d73415a3 4529 ret += qatomic_read(&prof->cpu_exec_time);
72fd2efb
EC
4530 }
4531 return ret;
4532}
246ae24d 4533#else
b6a7f3e0 4534void tcg_dump_op_count(GString *buf)
246ae24d 4535{
b6a7f3e0 4536 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
246ae24d 4537}
72fd2efb
EC
4538
4539int64_t tcg_cpu_exec_time(void)
4540{
4541 error_report("%s: TCG profiler not compiled", __func__);
4542 exit(EXIT_FAILURE);
4543}
c896fe29
FB
4544#endif
4545
4546
fbf59aad 4547int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
c896fe29 4548{
c3fac113
EC
4549#ifdef CONFIG_PROFILER
4550 TCGProfile *prof = &s->prof;
4551#endif
15fa08f8
RH
4552 int i, num_insns;
4553 TCGOp *op;
c896fe29 4554
04fe6400
RH
4555#ifdef CONFIG_PROFILER
4556 {
c1f543b7 4557 int n = 0;
04fe6400 4558
15fa08f8
RH
4559 QTAILQ_FOREACH(op, &s->ops, link) {
4560 n++;
4561 }
d73415a3 4562 qatomic_set(&prof->op_count, prof->op_count + n);
c3fac113 4563 if (n > prof->op_count_max) {
d73415a3 4564 qatomic_set(&prof->op_count_max, n);
04fe6400
RH
4565 }
4566
4567 n = s->nb_temps;
d73415a3 4568 qatomic_set(&prof->temp_count, prof->temp_count + n);
c3fac113 4569 if (n > prof->temp_count_max) {
d73415a3 4570 qatomic_set(&prof->temp_count_max, n);
04fe6400
RH
4571 }
4572 }
4573#endif
4574
c896fe29 4575#ifdef DEBUG_DISAS
d977e1c2 4576 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
fbf59aad 4577 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4578 FILE *logfile = qemu_log_trylock();
78b54858
RH
4579 if (logfile) {
4580 fprintf(logfile, "OP:\n");
b7a83ff8 4581 tcg_dump_ops(s, logfile, false);
78b54858
RH
4582 fprintf(logfile, "\n");
4583 qemu_log_unlock(logfile);
4584 }
c896fe29
FB
4585 }
4586#endif
4587
bef16ab4
RH
4588#ifdef CONFIG_DEBUG_TCG
4589 /* Ensure all labels referenced have been emitted. */
4590 {
4591 TCGLabel *l;
4592 bool error = false;
4593
4594 QSIMPLEQ_FOREACH(l, &s->labels, next) {
4595 if (unlikely(!l->present) && l->refs) {
4596 qemu_log_mask(CPU_LOG_TB_OP,
4597 "$L%d referenced but not present.\n", l->id);
4598 error = true;
4599 }
4600 }
4601 assert(!error);
4602 }
4603#endif
4604
c5cc28ff 4605#ifdef CONFIG_PROFILER
d73415a3 4606 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
4607#endif
4608
8f2e8c07 4609#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 4610 tcg_optimize(s);
8f2e8c07
KB
4611#endif
4612
a23a9ec6 4613#ifdef CONFIG_PROFILER
d73415a3
SH
4614 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
4615 qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 4616#endif
c5cc28ff 4617
b4fc67c7 4618 reachable_code_pass(s);
b83eabea 4619 liveness_pass_1(s);
5a18407f 4620
b83eabea 4621 if (s->nb_indirects > 0) {
5a18407f 4622#ifdef DEBUG_DISAS
b83eabea 4623 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
fbf59aad 4624 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4625 FILE *logfile = qemu_log_trylock();
78b54858
RH
4626 if (logfile) {
4627 fprintf(logfile, "OP before indirect lowering:\n");
b7a83ff8 4628 tcg_dump_ops(s, logfile, false);
78b54858
RH
4629 fprintf(logfile, "\n");
4630 qemu_log_unlock(logfile);
4631 }
b83eabea 4632 }
5a18407f 4633#endif
b83eabea
RH
4634 /* Replace indirect temps with direct temps. */
4635 if (liveness_pass_2(s)) {
4636 /* If changes were made, re-run liveness. */
4637 liveness_pass_1(s);
5a18407f
RH
4638 }
4639 }
c5cc28ff 4640
a23a9ec6 4641#ifdef CONFIG_PROFILER
d73415a3 4642 qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 4643#endif
c896fe29
FB
4644
4645#ifdef DEBUG_DISAS
d977e1c2 4646 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
fbf59aad 4647 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4648 FILE *logfile = qemu_log_trylock();
78b54858
RH
4649 if (logfile) {
4650 fprintf(logfile, "OP after optimization and liveness analysis:\n");
b7a83ff8 4651 tcg_dump_ops(s, logfile, true);
78b54858
RH
4652 fprintf(logfile, "\n");
4653 qemu_log_unlock(logfile);
4654 }
c896fe29
FB
4655 }
4656#endif
4657
35abb009
RH
4658 /* Initialize goto_tb jump offsets. */
4659 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
4660 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
4661 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
4662 if (TCG_TARGET_HAS_direct_jump) {
4663 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
4664 tcg_ctx->tb_jmp_target_addr = NULL;
4665 } else {
4666 tcg_ctx->tb_jmp_insn_offset = NULL;
4667 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
4668 }
4669
c896fe29
FB
4670 tcg_reg_alloc_start(s);
4671
db0c51a3
RH
4672 /*
4673 * Reset the buffer pointers when restarting after overflow.
4674 * TODO: Move this into translate-all.c with the rest of the
4675 * buffer management. Having only this done here is confusing.
4676 */
4677 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
4678 s->code_ptr = s->code_buf;
c896fe29 4679
659ef5cb 4680#ifdef TCG_TARGET_NEED_LDST_LABELS
6001f772 4681 QSIMPLEQ_INIT(&s->ldst_labels);
659ef5cb 4682#endif
57a26946
RH
4683#ifdef TCG_TARGET_NEED_POOL_LABELS
4684 s->pool_labels = NULL;
4685#endif
9ecefc84 4686
fca8a500 4687 num_insns = -1;
15fa08f8 4688 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 4689 TCGOpcode opc = op->opc;
b3db8758 4690
c896fe29 4691#ifdef CONFIG_PROFILER
d73415a3 4692 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 4693#endif
c45cb8bb
RH
4694
4695 switch (opc) {
c896fe29 4696 case INDEX_op_mov_i32:
c896fe29 4697 case INDEX_op_mov_i64:
d2fd745f 4698 case INDEX_op_mov_vec:
dd186292 4699 tcg_reg_alloc_mov(s, op);
c896fe29 4700 break;
bab1671f
RH
4701 case INDEX_op_dup_vec:
4702 tcg_reg_alloc_dup(s, op);
4703 break;
765b842a 4704 case INDEX_op_insn_start:
fca8a500 4705 if (num_insns >= 0) {
9f754620
RH
4706 size_t off = tcg_current_code_size(s);
4707 s->gen_insn_end_off[num_insns] = off;
4708 /* Assert that we do not overflow our stored offset. */
4709 assert(s->gen_insn_end_off[num_insns] == off);
fca8a500
RH
4710 }
4711 num_insns++;
bad729e2
RH
4712 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
4713 target_ulong a;
4714#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 4715 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 4716#else
efee3746 4717 a = op->args[i];
bad729e2 4718#endif
fca8a500 4719 s->gen_insn_data[num_insns][i] = a;
bad729e2 4720 }
c896fe29 4721 break;
5ff9d6a4 4722 case INDEX_op_discard:
43439139 4723 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 4724 break;
c896fe29 4725 case INDEX_op_set_label:
e8996ee0 4726 tcg_reg_alloc_bb_end(s, s->reserved_regs);
92ab8e7d 4727 tcg_out_label(s, arg_label(op->args[0]));
c896fe29
FB
4728 break;
4729 case INDEX_op_call:
dd186292 4730 tcg_reg_alloc_call(s, op);
c45cb8bb 4731 break;
b55a8d9d
RH
4732 case INDEX_op_exit_tb:
4733 tcg_out_exit_tb(s, op->args[0]);
4734 break;
efe86b21
RH
4735 case INDEX_op_dup2_vec:
4736 if (tcg_reg_alloc_dup2(s, op)) {
4737 break;
4738 }
4739 /* fall through */
c896fe29 4740 default:
25c4d9cc 4741 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 4742 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
4743 /* Note: in order to speed up the code, it would be much
4744 faster to have specialized register allocator functions for
4745 some common argument patterns */
dd186292 4746 tcg_reg_alloc_op(s, op);
c896fe29
FB
4747 break;
4748 }
b125f9dc
RH
4749 /* Test for (pending) buffer overflow. The assumption is that any
4750 one operation beginning below the high water mark cannot overrun
4751 the buffer completely. Thus we can test for overflow after
4752 generating code without having to check during generation. */
644da9b3 4753 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
4754 return -1;
4755 }
6e6c4efe
RH
4756 /* Test for TB overflow, as seen by gen_insn_end_off. */
4757 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4758 return -2;
4759 }
c896fe29 4760 }
fca8a500
RH
4761 tcg_debug_assert(num_insns >= 0);
4762 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 4763
b76f0d8c 4764 /* Generate TB finalization at the end of block */
659ef5cb 4765#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5
RH
4766 i = tcg_out_ldst_finalize(s);
4767 if (i < 0) {
4768 return i;
23dceda6 4769 }
659ef5cb 4770#endif
57a26946 4771#ifdef TCG_TARGET_NEED_POOL_LABELS
1768987b
RH
4772 i = tcg_out_pool_finalize(s);
4773 if (i < 0) {
4774 return i;
57a26946
RH
4775 }
4776#endif
7ecd02a0
RH
4777 if (!tcg_resolve_relocs(s)) {
4778 return -2;
4779 }
c896fe29 4780
df5d2b16 4781#ifndef CONFIG_TCG_INTERPRETER
c896fe29 4782 /* flush instruction cache */
db0c51a3
RH
4783 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
4784 (uintptr_t)s->code_buf,
1da8de39 4785 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
df5d2b16 4786#endif
2aeabc08 4787
1813e175 4788 return tcg_current_code_size(s);
c896fe29
FB
4789}
4790
a23a9ec6 4791#ifdef CONFIG_PROFILER
3a841ab5 4792void tcg_dump_info(GString *buf)
a23a9ec6 4793{
c3fac113
EC
4794 TCGProfile prof = {};
4795 const TCGProfile *s;
4796 int64_t tb_count;
4797 int64_t tb_div_count;
4798 int64_t tot;
4799
4800 tcg_profile_snapshot_counters(&prof);
4801 s = &prof;
4802 tb_count = s->tb_count;
4803 tb_div_count = tb_count ? tb_count : 1;
4804 tot = s->interm_time + s->code_time;
a23a9ec6 4805
3a841ab5
DB
4806 g_string_append_printf(buf, "JIT cycles %" PRId64
4807 " (%0.3f s at 2.4 GHz)\n",
4808 tot, tot / 2.4e9);
4809 g_string_append_printf(buf, "translated TBs %" PRId64
4810 " (aborted=%" PRId64 " %0.1f%%)\n",
4811 tb_count, s->tb_count1 - tb_count,
4812 (double)(s->tb_count1 - s->tb_count)
4813 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4814 g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
4815 (double)s->op_count / tb_div_count, s->op_count_max);
4816 g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
4817 (double)s->del_op_count / tb_div_count);
4818 g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
4819 (double)s->temp_count / tb_div_count,
4820 s->temp_count_max);
4821 g_string_append_printf(buf, "avg host code/TB %0.1f\n",
4822 (double)s->code_out_len / tb_div_count);
4823 g_string_append_printf(buf, "avg search data/TB %0.1f\n",
4824 (double)s->search_out_len / tb_div_count);
a813e36f 4825
3a841ab5
DB
4826 g_string_append_printf(buf, "cycles/op %0.1f\n",
4827 s->op_count ? (double)tot / s->op_count : 0);
4828 g_string_append_printf(buf, "cycles/in byte %0.1f\n",
4829 s->code_in_len ? (double)tot / s->code_in_len : 0);
4830 g_string_append_printf(buf, "cycles/out byte %0.1f\n",
4831 s->code_out_len ? (double)tot / s->code_out_len : 0);
4832 g_string_append_printf(buf, "cycles/search byte %0.1f\n",
4833 s->search_out_len ?
4834 (double)tot / s->search_out_len : 0);
fca8a500 4835 if (tot == 0) {
a23a9ec6 4836 tot = 1;
fca8a500 4837 }
3a841ab5
DB
4838 g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
4839 (double)s->interm_time / tot * 100.0);
4840 g_string_append_printf(buf, " gen_code time %0.1f%%\n",
4841 (double)s->code_time / tot * 100.0);
4842 g_string_append_printf(buf, "optim./code time %0.1f%%\n",
4843 (double)s->opt_time / (s->code_time ?
4844 s->code_time : 1)
4845 * 100.0);
4846 g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
4847 (double)s->la_time / (s->code_time ?
4848 s->code_time : 1) * 100.0);
4849 g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
4850 s->restore_count);
4851 g_string_append_printf(buf, " avg cycles %0.1f\n",
4852 s->restore_count ?
4853 (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
4854}
4855#else
3a841ab5 4856void tcg_dump_info(GString *buf)
a23a9ec6 4857{
3a841ab5 4858 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
a23a9ec6
FB
4859}
4860#endif
813da627
RH
4861
4862#ifdef ELF_HOST_MACHINE
5872bbf2
RH
4863/* In order to use this feature, the backend needs to do three things:
4864
4865 (1) Define ELF_HOST_MACHINE to indicate both what value to
4866 put into the ELF image and to indicate support for the feature.
4867
4868 (2) Define tcg_register_jit. This should create a buffer containing
4869 the contents of a .debug_frame section that describes the post-
4870 prologue unwind info for the tcg machine.
4871
4872 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4873*/
813da627
RH
4874
4875/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4876typedef enum {
4877 JIT_NOACTION = 0,
4878 JIT_REGISTER_FN,
4879 JIT_UNREGISTER_FN
4880} jit_actions_t;
4881
4882struct jit_code_entry {
4883 struct jit_code_entry *next_entry;
4884 struct jit_code_entry *prev_entry;
4885 const void *symfile_addr;
4886 uint64_t symfile_size;
4887};
4888
4889struct jit_descriptor {
4890 uint32_t version;
4891 uint32_t action_flag;
4892 struct jit_code_entry *relevant_entry;
4893 struct jit_code_entry *first_entry;
4894};
4895
4896void __jit_debug_register_code(void) __attribute__((noinline));
4897void __jit_debug_register_code(void)
4898{
4899 asm("");
4900}
4901
4902/* Must statically initialize the version, because GDB may check
4903 the version before we can set it. */
4904struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4905
4906/* End GDB interface. */
4907
4908static int find_string(const char *strtab, const char *str)
4909{
4910 const char *p = strtab + 1;
4911
4912 while (1) {
4913 if (strcmp(p, str) == 0) {
4914 return p - strtab;
4915 }
4916 p += strlen(p) + 1;
4917 }
4918}
4919
755bf9e5 4920static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
2c90784a
RH
4921 const void *debug_frame,
4922 size_t debug_frame_size)
813da627 4923{
5872bbf2
RH
4924 struct __attribute__((packed)) DebugInfo {
4925 uint32_t len;
4926 uint16_t version;
4927 uint32_t abbrev;
4928 uint8_t ptr_size;
4929 uint8_t cu_die;
4930 uint16_t cu_lang;
4931 uintptr_t cu_low_pc;
4932 uintptr_t cu_high_pc;
4933 uint8_t fn_die;
4934 char fn_name[16];
4935 uintptr_t fn_low_pc;
4936 uintptr_t fn_high_pc;
4937 uint8_t cu_eoc;
4938 };
813da627
RH
4939
4940 struct ElfImage {
4941 ElfW(Ehdr) ehdr;
4942 ElfW(Phdr) phdr;
5872bbf2
RH
4943 ElfW(Shdr) shdr[7];
4944 ElfW(Sym) sym[2];
4945 struct DebugInfo di;
4946 uint8_t da[24];
4947 char str[80];
4948 };
4949
4950 struct ElfImage *img;
4951
4952 static const struct ElfImage img_template = {
4953 .ehdr = {
4954 .e_ident[EI_MAG0] = ELFMAG0,
4955 .e_ident[EI_MAG1] = ELFMAG1,
4956 .e_ident[EI_MAG2] = ELFMAG2,
4957 .e_ident[EI_MAG3] = ELFMAG3,
4958 .e_ident[EI_CLASS] = ELF_CLASS,
4959 .e_ident[EI_DATA] = ELF_DATA,
4960 .e_ident[EI_VERSION] = EV_CURRENT,
4961 .e_type = ET_EXEC,
4962 .e_machine = ELF_HOST_MACHINE,
4963 .e_version = EV_CURRENT,
4964 .e_phoff = offsetof(struct ElfImage, phdr),
4965 .e_shoff = offsetof(struct ElfImage, shdr),
4966 .e_ehsize = sizeof(ElfW(Shdr)),
4967 .e_phentsize = sizeof(ElfW(Phdr)),
4968 .e_phnum = 1,
4969 .e_shentsize = sizeof(ElfW(Shdr)),
4970 .e_shnum = ARRAY_SIZE(img->shdr),
4971 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
4972#ifdef ELF_HOST_FLAGS
4973 .e_flags = ELF_HOST_FLAGS,
4974#endif
4975#ifdef ELF_OSABI
4976 .e_ident[EI_OSABI] = ELF_OSABI,
4977#endif
5872bbf2
RH
4978 },
4979 .phdr = {
4980 .p_type = PT_LOAD,
4981 .p_flags = PF_X,
4982 },
4983 .shdr = {
4984 [0] = { .sh_type = SHT_NULL },
4985 /* Trick: The contents of code_gen_buffer are not present in
4986 this fake ELF file; that got allocated elsewhere. Therefore
4987 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4988 will not look for contents. We can record any address. */
4989 [1] = { /* .text */
4990 .sh_type = SHT_NOBITS,
4991 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4992 },
4993 [2] = { /* .debug_info */
4994 .sh_type = SHT_PROGBITS,
4995 .sh_offset = offsetof(struct ElfImage, di),
4996 .sh_size = sizeof(struct DebugInfo),
4997 },
4998 [3] = { /* .debug_abbrev */
4999 .sh_type = SHT_PROGBITS,
5000 .sh_offset = offsetof(struct ElfImage, da),
5001 .sh_size = sizeof(img->da),
5002 },
5003 [4] = { /* .debug_frame */
5004 .sh_type = SHT_PROGBITS,
5005 .sh_offset = sizeof(struct ElfImage),
5006 },
5007 [5] = { /* .symtab */
5008 .sh_type = SHT_SYMTAB,
5009 .sh_offset = offsetof(struct ElfImage, sym),
5010 .sh_size = sizeof(img->sym),
5011 .sh_info = 1,
5012 .sh_link = ARRAY_SIZE(img->shdr) - 1,
5013 .sh_entsize = sizeof(ElfW(Sym)),
5014 },
5015 [6] = { /* .strtab */
5016 .sh_type = SHT_STRTAB,
5017 .sh_offset = offsetof(struct ElfImage, str),
5018 .sh_size = sizeof(img->str),
5019 }
5020 },
5021 .sym = {
5022 [1] = { /* code_gen_buffer */
5023 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
5024 .st_shndx = 1,
5025 }
5026 },
5027 .di = {
5028 .len = sizeof(struct DebugInfo) - 4,
5029 .version = 2,
5030 .ptr_size = sizeof(void *),
5031 .cu_die = 1,
5032 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
5033 .fn_die = 2,
5034 .fn_name = "code_gen_buffer"
5035 },
5036 .da = {
5037 1, /* abbrev number (the cu) */
5038 0x11, 1, /* DW_TAG_compile_unit, has children */
5039 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5040 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5041 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5042 0, 0, /* end of abbrev */
5043 2, /* abbrev number (the fn) */
5044 0x2e, 0, /* DW_TAG_subprogram, no children */
5045 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5046 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5047 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5048 0, 0, /* end of abbrev */
5049 0 /* no more abbrev */
5050 },
5051 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5052 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
5053 };
5054
5055 /* We only need a single jit entry; statically allocate it. */
5056 static struct jit_code_entry one_entry;
5057
5872bbf2 5058 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 5059 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 5060 DebugFrameHeader *dfh;
813da627 5061
5872bbf2
RH
5062 img = g_malloc(img_size);
5063 *img = img_template;
813da627 5064
5872bbf2
RH
5065 img->phdr.p_vaddr = buf;
5066 img->phdr.p_paddr = buf;
5067 img->phdr.p_memsz = buf_size;
813da627 5068
813da627 5069 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 5070 img->shdr[1].sh_addr = buf;
813da627
RH
5071 img->shdr[1].sh_size = buf_size;
5072
5872bbf2
RH
5073 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
5074 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
5075
5076 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
5077 img->shdr[4].sh_size = debug_frame_size;
5078
5079 img->shdr[5].sh_name = find_string(img->str, ".symtab");
5080 img->shdr[6].sh_name = find_string(img->str, ".strtab");
5081
5082 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
5083 img->sym[1].st_value = buf;
5084 img->sym[1].st_size = buf_size;
813da627 5085
5872bbf2 5086 img->di.cu_low_pc = buf;
45aba097 5087 img->di.cu_high_pc = buf + buf_size;
5872bbf2 5088 img->di.fn_low_pc = buf;
45aba097 5089 img->di.fn_high_pc = buf + buf_size;
813da627 5090
2c90784a
RH
5091 dfh = (DebugFrameHeader *)(img + 1);
5092 memcpy(dfh, debug_frame, debug_frame_size);
5093 dfh->fde.func_start = buf;
5094 dfh->fde.func_len = buf_size;
5095
813da627
RH
5096#ifdef DEBUG_JIT
5097 /* Enable this block to be able to debug the ELF image file creation.
5098 One can use readelf, objdump, or other inspection utilities. */
5099 {
eb6b2edf
BM
5100 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
5101 FILE *f = fopen(jit, "w+b");
813da627 5102 if (f) {
5872bbf2 5103 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
5104 /* Avoid stupid unused return value warning for fwrite. */
5105 }
5106 fclose(f);
5107 }
5108 }
5109#endif
5110
5111 one_entry.symfile_addr = img;
5112 one_entry.symfile_size = img_size;
5113
5114 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
5115 __jit_debug_descriptor.relevant_entry = &one_entry;
5116 __jit_debug_descriptor.first_entry = &one_entry;
5117 __jit_debug_register_code();
5118}
5119#else
5872bbf2
RH
5120/* No support for the feature. Provide the entry point expected by exec.c,
5121 and implement the internal function we declared earlier. */
813da627 5122
755bf9e5 5123static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
5124 const void *debug_frame,
5125 size_t debug_frame_size)
813da627
RH
5126{
5127}
5128
755bf9e5 5129void tcg_register_jit(const void *buf, size_t buf_size)
813da627
RH
5130{
5131}
5132#endif /* ELF_HOST_MACHINE */
db432672
RH
5133
5134#if !TCG_TARGET_MAYBE_vec
5135void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
5136{
5137 g_assert_not_reached();
5138}
5139#endif