]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg.c
tcg/aarch64: Merge tcg_out_callr into tcg_out_call
[mirror_qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
72fd2efb 33#include "qemu/error-report.h"
f348b6d1 34#include "qemu/cutils.h"
1de7afc9 35#include "qemu/host-utils.h"
d4c51a0a 36#include "qemu/qemu-print.h"
1de7afc9 37#include "qemu/timer.h"
084cfca1 38#include "qemu/cacheflush.h"
ad768e6f 39#include "qemu/cacheinfo.h"
c896fe29 40
c5d3c498 41/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
42 CPU definitions. Currently they are used for qemu_ld/st
43 instructions */
44#define NO_CPU_IO_DEFS
c896fe29 45
63c91552 46#include "exec/exec-all.h"
dcb32f1d 47#include "tcg/tcg-op.h"
813da627 48
edee2579 49#if UINTPTR_MAX == UINT32_MAX
813da627 50# define ELF_CLASS ELFCLASS32
edee2579
RH
51#else
52# define ELF_CLASS ELFCLASS64
813da627 53#endif
e03b5686 54#if HOST_BIG_ENDIAN
813da627
RH
55# define ELF_DATA ELFDATA2MSB
56#else
57# define ELF_DATA ELFDATA2LSB
58#endif
59
c896fe29 60#include "elf.h"
508127e2 61#include "exec/log.h"
d2ba8026 62#include "tcg/tcg-ldst.h"
5ff7258c 63#include "tcg-internal.h"
c896fe29 64
139c1837 65/* Forward declarations for functions declared in tcg-target.c.inc and
ce151109 66 used here. */
e4d58b41
RH
67static void tcg_target_init(TCGContext *s);
68static void tcg_target_qemu_prologue(TCGContext *s);
6ac17786 69static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 70 intptr_t value, intptr_t addend);
c896fe29 71
497a22eb
RH
72/* The CIE and FDE header definitions will be common to all hosts. */
73typedef struct {
74 uint32_t len __attribute__((aligned((sizeof(void *)))));
75 uint32_t id;
76 uint8_t version;
77 char augmentation[1];
78 uint8_t code_align;
79 uint8_t data_align;
80 uint8_t return_column;
81} DebugFrameCIE;
82
83typedef struct QEMU_PACKED {
84 uint32_t len __attribute__((aligned((sizeof(void *)))));
85 uint32_t cie_offset;
edee2579
RH
86 uintptr_t func_start;
87 uintptr_t func_len;
497a22eb
RH
88} DebugFrameFDEHeader;
89
2c90784a
RH
90typedef struct QEMU_PACKED {
91 DebugFrameCIE cie;
92 DebugFrameFDEHeader fde;
93} DebugFrameHeader;
94
755bf9e5 95static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
96 const void *debug_frame,
97 size_t debug_frame_size)
813da627
RH
98 __attribute__((unused));
99
139c1837 100/* Forward declarations for functions declared and used in tcg-target.c.inc. */
2a534aff 101static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 102 intptr_t arg2);
78113e83 103static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 104static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 105 TCGReg ret, tcg_target_long arg);
5e8892db
MR
106static void tcg_out_op(TCGContext *s, TCGOpcode opc,
107 const TCGArg args[TCG_MAX_OP_ARGS],
108 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 109#if TCG_TARGET_MAYBE_vec
e7632cfa
RH
110static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
111 TCGReg dst, TCGReg src);
d6ecb4a9
RH
112static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
113 TCGReg dst, TCGReg base, intptr_t offset);
4e186175
RH
114static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
115 TCGReg dst, int64_t arg);
5e8892db
MR
116static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
117 unsigned vecl, unsigned vece,
118 const TCGArg args[TCG_MAX_OP_ARGS],
119 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 120#else
e7632cfa
RH
121static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
122 TCGReg dst, TCGReg src)
123{
124 g_assert_not_reached();
125}
d6ecb4a9
RH
126static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
127 TCGReg dst, TCGReg base, intptr_t offset)
128{
129 g_assert_not_reached();
130}
4e186175
RH
131static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
132 TCGReg dst, int64_t arg)
e7632cfa
RH
133{
134 g_assert_not_reached();
135}
5e8892db
MR
136static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
137 unsigned vecl, unsigned vece,
138 const TCGArg args[TCG_MAX_OP_ARGS],
139 const int const_args[TCG_MAX_OP_ARGS])
d2fd745f
RH
140{
141 g_assert_not_reached();
142}
143#endif
2a534aff 144static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 145 intptr_t arg2);
59d7c14e
RH
146static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
147 TCGReg base, intptr_t ofs);
7b7d8b2d
RH
148#ifdef CONFIG_TCG_INTERPRETER
149static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
150 ffi_cif *cif);
151#else
2be7d76b 152static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
7b7d8b2d 153#endif
a4fbbd77 154static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
659ef5cb 155#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5 156static int tcg_out_ldst_finalize(TCGContext *s);
659ef5cb 157#endif
c896fe29 158
42eb6dfc
RH
159TCGContext tcg_init_ctx;
160__thread TCGContext *tcg_ctx;
161
5ff7258c 162TCGContext **tcg_ctxs;
0e2d61cf
RH
163unsigned int tcg_cur_ctxs;
164unsigned int tcg_max_ctxs;
1c2adb95 165TCGv_env cpu_env = 0;
c8bc1168 166const void *tcg_code_gen_epilogue;
db0c51a3 167uintptr_t tcg_splitwx_diff;
df2cce29 168
b91ccb31
RH
169#ifndef CONFIG_TCG_INTERPRETER
170tcg_prologue_fn *tcg_qemu_tb_exec;
171#endif
172
d2fd745f 173static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 174static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 175
1813e175 176#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 177static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
178{
179 *s->code_ptr++ = v;
180}
181
4196dca6
PM
182static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
183 uint8_t v)
5c53bb81 184{
1813e175 185 *p = v;
5c53bb81 186}
1813e175 187#endif
5c53bb81 188
1813e175 189#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 190static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 191{
1813e175
RH
192 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
193 *s->code_ptr++ = v;
194 } else {
195 tcg_insn_unit *p = s->code_ptr;
196 memcpy(p, &v, sizeof(v));
197 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
198 }
c896fe29
FB
199}
200
4196dca6
PM
201static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
202 uint16_t v)
5c53bb81 203{
1813e175
RH
204 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
205 *p = v;
206 } else {
207 memcpy(p, &v, sizeof(v));
208 }
5c53bb81 209}
1813e175 210#endif
5c53bb81 211
1813e175 212#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 213static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 214{
1813e175
RH
215 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
216 *s->code_ptr++ = v;
217 } else {
218 tcg_insn_unit *p = s->code_ptr;
219 memcpy(p, &v, sizeof(v));
220 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
221 }
c896fe29
FB
222}
223
4196dca6
PM
224static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
225 uint32_t v)
5c53bb81 226{
1813e175
RH
227 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
228 *p = v;
229 } else {
230 memcpy(p, &v, sizeof(v));
231 }
5c53bb81 232}
1813e175 233#endif
5c53bb81 234
1813e175 235#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 236static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 237{
1813e175
RH
238 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
239 *s->code_ptr++ = v;
240 } else {
241 tcg_insn_unit *p = s->code_ptr;
242 memcpy(p, &v, sizeof(v));
243 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
244 }
ac26eb69
RH
245}
246
4196dca6
PM
247static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
248 uint64_t v)
5c53bb81 249{
1813e175
RH
250 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
251 *p = v;
252 } else {
253 memcpy(p, &v, sizeof(v));
254 }
5c53bb81 255}
1813e175 256#endif
5c53bb81 257
c896fe29
FB
258/* label relocation processing */
259
1813e175 260static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 261 TCGLabel *l, intptr_t addend)
c896fe29 262{
7ecd02a0 263 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
c896fe29 264
7ecd02a0
RH
265 r->type = type;
266 r->ptr = code_ptr;
267 r->addend = addend;
268 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
c896fe29
FB
269}
270
92ab8e7d 271static void tcg_out_label(TCGContext *s, TCGLabel *l)
c896fe29 272{
eabb7b91 273 tcg_debug_assert(!l->has_value);
c896fe29 274 l->has_value = 1;
92ab8e7d 275 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
c896fe29
FB
276}
277
42a268c2 278TCGLabel *gen_new_label(void)
c896fe29 279{
b1311c4a 280 TCGContext *s = tcg_ctx;
51e3972c 281 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 282
7ecd02a0
RH
283 memset(l, 0, sizeof(TCGLabel));
284 l->id = s->nb_labels++;
285 QSIMPLEQ_INIT(&l->relocs);
286
bef16ab4 287 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
42a268c2
RH
288
289 return l;
c896fe29
FB
290}
291
7ecd02a0
RH
292static bool tcg_resolve_relocs(TCGContext *s)
293{
294 TCGLabel *l;
295
296 QSIMPLEQ_FOREACH(l, &s->labels, next) {
297 TCGRelocation *r;
298 uintptr_t value = l->u.value;
299
300 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
301 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
302 return false;
303 }
304 }
305 }
306 return true;
307}
308
9f754620
RH
309static void set_jmp_reset_offset(TCGContext *s, int which)
310{
f14bed3f
RH
311 /*
312 * We will check for overflow at the end of the opcode loop in
313 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
314 */
315 s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
9f754620
RH
316}
317
db6b7d0c 318/* Signal overflow, starting over with fewer guest insns. */
8905770b
MAL
319static G_NORETURN
320void tcg_raise_tb_overflow(TCGContext *s)
db6b7d0c
RH
321{
322 siglongjmp(s->jmp_trans, -2);
323}
324
4c22e840
RH
325#define C_PFX1(P, A) P##A
326#define C_PFX2(P, A, B) P##A##_##B
327#define C_PFX3(P, A, B, C) P##A##_##B##_##C
328#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
329#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
330#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
331
332/* Define an enumeration for the various combinations. */
333
334#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
335#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
336#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
337#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
338
339#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
340#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
341#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
342#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
343
344#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
345
346#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
347#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
348#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
349#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
350
351typedef enum {
352#include "tcg-target-con-set.h"
353} TCGConstraintSetIndex;
354
355static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
356
357#undef C_O0_I1
358#undef C_O0_I2
359#undef C_O0_I3
360#undef C_O0_I4
361#undef C_O1_I1
362#undef C_O1_I2
363#undef C_O1_I3
364#undef C_O1_I4
365#undef C_N1_I2
366#undef C_O2_I1
367#undef C_O2_I2
368#undef C_O2_I3
369#undef C_O2_I4
370
371/* Put all of the constraint sets into an array, indexed by the enum. */
372
373#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
374#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
375#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
376#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
377
378#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
379#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
380#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
381#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
382
383#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
384
385#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
386#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
387#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
388#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
389
390static const TCGTargetOpDef constraint_sets[] = {
391#include "tcg-target-con-set.h"
392};
393
394
395#undef C_O0_I1
396#undef C_O0_I2
397#undef C_O0_I3
398#undef C_O0_I4
399#undef C_O1_I1
400#undef C_O1_I2
401#undef C_O1_I3
402#undef C_O1_I4
403#undef C_N1_I2
404#undef C_O2_I1
405#undef C_O2_I2
406#undef C_O2_I3
407#undef C_O2_I4
408
409/* Expand the enumerator to be returned from tcg_target_op_def(). */
410
411#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
412#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
413#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
414#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
415
416#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
417#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
418#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
419#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
420
421#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
422
423#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
424#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
425#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
426#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
427
139c1837 428#include "tcg-target.c.inc"
c896fe29 429
38b47b19
EC
430static void alloc_tcg_plugin_context(TCGContext *s)
431{
432#ifdef CONFIG_PLUGIN
433 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
434 s->plugin_tb->insns =
435 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
436#endif
437}
438
3468b59e
EC
439/*
440 * All TCG threads except the parent (i.e. the one that called tcg_context_init
441 * and registered the target's TCG globals) must register with this function
442 * before initiating translation.
443 *
444 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
445 * of tcg_region_init() for the reasoning behind this.
446 *
447 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
448 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
449 * is not used anymore for translation once this function is called.
450 *
451 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
452 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
453 */
454#ifdef CONFIG_USER_ONLY
455void tcg_register_thread(void)
456{
457 tcg_ctx = &tcg_init_ctx;
458}
459#else
460void tcg_register_thread(void)
461{
462 TCGContext *s = g_malloc(sizeof(*s));
463 unsigned int i, n;
3468b59e
EC
464
465 *s = tcg_init_ctx;
466
467 /* Relink mem_base. */
468 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
469 if (tcg_init_ctx.temps[i].mem_base) {
470 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
471 tcg_debug_assert(b >= 0 && b < n);
472 s->temps[i].mem_base = &s->temps[b];
473 }
474 }
475
476 /* Claim an entry in tcg_ctxs */
0e2d61cf
RH
477 n = qatomic_fetch_inc(&tcg_cur_ctxs);
478 g_assert(n < tcg_max_ctxs);
d73415a3 479 qatomic_set(&tcg_ctxs[n], s);
3468b59e 480
38b47b19
EC
481 if (n > 0) {
482 alloc_tcg_plugin_context(s);
bf042e8e 483 tcg_region_initial_alloc(s);
38b47b19
EC
484 }
485
3468b59e 486 tcg_ctx = s;
e8feb96f 487}
3468b59e 488#endif /* !CONFIG_USER_ONLY */
e8feb96f 489
c896fe29
FB
490/* pool based memory allocation */
491void *tcg_malloc_internal(TCGContext *s, int size)
492{
493 TCGPool *p;
494 int pool_size;
a813e36f 495
c896fe29
FB
496 if (size > TCG_POOL_CHUNK_SIZE) {
497 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 498 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 499 p->size = size;
4055299e
KB
500 p->next = s->pool_first_large;
501 s->pool_first_large = p;
502 return p->data;
c896fe29
FB
503 } else {
504 p = s->pool_current;
505 if (!p) {
506 p = s->pool_first;
507 if (!p)
508 goto new_pool;
509 } else {
510 if (!p->next) {
511 new_pool:
512 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 513 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
514 p->size = pool_size;
515 p->next = NULL;
a813e36f 516 if (s->pool_current) {
c896fe29 517 s->pool_current->next = p;
a813e36f 518 } else {
c896fe29 519 s->pool_first = p;
a813e36f 520 }
c896fe29
FB
521 } else {
522 p = p->next;
523 }
524 }
525 }
526 s->pool_current = p;
527 s->pool_cur = p->data + size;
528 s->pool_end = p->data + p->size;
529 return p->data;
530}
531
532void tcg_pool_reset(TCGContext *s)
533{
4055299e
KB
534 TCGPool *p, *t;
535 for (p = s->pool_first_large; p; p = t) {
536 t = p->next;
537 g_free(p);
538 }
539 s->pool_first_large = NULL;
c896fe29
FB
540 s->pool_cur = s->pool_end = NULL;
541 s->pool_current = NULL;
542}
543
2ef6175a
RH
544#include "exec/helper-proto.h"
545
39004a71 546static TCGHelperInfo all_helpers[] = {
2ef6175a 547#include "exec/helper-tcg.h"
100b5e01 548};
619205fd 549static GHashTable *helper_table;
100b5e01 550
22f15579 551#ifdef CONFIG_TCG_INTERPRETER
c6ef8c7b
PMD
552static ffi_type *typecode_to_ffi(int argmask)
553{
554 switch (argmask) {
555 case dh_typecode_void:
556 return &ffi_type_void;
557 case dh_typecode_i32:
558 return &ffi_type_uint32;
559 case dh_typecode_s32:
560 return &ffi_type_sint32;
561 case dh_typecode_i64:
562 return &ffi_type_uint64;
563 case dh_typecode_s64:
564 return &ffi_type_sint64;
565 case dh_typecode_ptr:
566 return &ffi_type_pointer;
567 }
568 g_assert_not_reached();
569}
0c22e176
PMD
570
571static void init_ffi_layouts(void)
572{
573 /* g_direct_hash/equal for direct comparisons on uint32_t. */
f9c4bb80
RH
574 GHashTable *ffi_table = g_hash_table_new(NULL, NULL);
575
0c22e176 576 for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
f9c4bb80
RH
577 TCGHelperInfo *info = &all_helpers[i];
578 unsigned typemask = info->typemask;
0c22e176
PMD
579 gpointer hash = (gpointer)(uintptr_t)typemask;
580 struct {
581 ffi_cif cif;
582 ffi_type *args[];
583 } *ca;
584 ffi_status status;
585 int nargs;
f9c4bb80 586 ffi_cif *cif;
0c22e176 587
f9c4bb80
RH
588 cif = g_hash_table_lookup(ffi_table, hash);
589 if (cif) {
590 info->cif = cif;
0c22e176
PMD
591 continue;
592 }
593
594 /* Ignoring the return type, find the last non-zero field. */
595 nargs = 32 - clz32(typemask >> 3);
596 nargs = DIV_ROUND_UP(nargs, 3);
597
598 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
599 ca->cif.rtype = typecode_to_ffi(typemask & 7);
600 ca->cif.nargs = nargs;
601
602 if (nargs != 0) {
603 ca->cif.arg_types = ca->args;
604 for (int j = 0; j < nargs; ++j) {
605 int typecode = extract32(typemask, (j + 1) * 3, 3);
606 ca->args[j] = typecode_to_ffi(typecode);
607 }
608 }
609
610 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
611 ca->cif.rtype, ca->cif.arg_types);
612 assert(status == FFI_OK);
613
f9c4bb80
RH
614 cif = &ca->cif;
615 info->cif = cif;
616 g_hash_table_insert(ffi_table, hash, (gpointer)cif);
0c22e176 617 }
f9c4bb80
RH
618
619 g_hash_table_destroy(ffi_table);
0c22e176
PMD
620}
621#endif /* CONFIG_TCG_INTERPRETER */
22f15579 622
39004a71
RH
623typedef struct TCGCumulativeArgs {
624 int arg_idx; /* tcg_gen_callN args[] */
625 int info_in_idx; /* TCGHelperInfo in[] */
626 int arg_slot; /* regs+stack slot */
627 int ref_slot; /* stack slots for references */
628} TCGCumulativeArgs;
629
630static void layout_arg_even(TCGCumulativeArgs *cum)
631{
632 cum->arg_slot += cum->arg_slot & 1;
633}
634
635static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
636 TCGCallArgumentKind kind)
637{
638 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
639
640 *loc = (TCGCallArgumentLoc){
641 .kind = kind,
642 .arg_idx = cum->arg_idx,
643 .arg_slot = cum->arg_slot,
644 };
645 cum->info_in_idx++;
646 cum->arg_slot++;
647}
648
649static void layout_arg_normal_n(TCGCumulativeArgs *cum,
650 TCGHelperInfo *info, int n)
651{
652 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
653
654 for (int i = 0; i < n; ++i) {
655 /* Layout all using the same arg_idx, adjusting the subindex. */
656 loc[i] = (TCGCallArgumentLoc){
657 .kind = TCG_CALL_ARG_NORMAL,
658 .arg_idx = cum->arg_idx,
659 .tmp_subindex = i,
660 .arg_slot = cum->arg_slot + i,
661 };
662 }
663 cum->info_in_idx += n;
664 cum->arg_slot += n;
665}
666
667static void init_call_layout(TCGHelperInfo *info)
668{
669 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
670 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
671 unsigned typemask = info->typemask;
672 unsigned typecode;
673 TCGCumulativeArgs cum = { };
674
675 /*
676 * Parse and place any function return value.
677 */
678 typecode = typemask & 7;
679 switch (typecode) {
680 case dh_typecode_void:
681 info->nr_out = 0;
682 break;
683 case dh_typecode_i32:
684 case dh_typecode_s32:
685 case dh_typecode_ptr:
686 info->nr_out = 1;
687 info->out_kind = TCG_CALL_RET_NORMAL;
688 break;
689 case dh_typecode_i64:
690 case dh_typecode_s64:
691 info->nr_out = 64 / TCG_TARGET_REG_BITS;
692 info->out_kind = TCG_CALL_RET_NORMAL;
693 break;
694 default:
695 g_assert_not_reached();
696 }
697 assert(info->nr_out <= ARRAY_SIZE(tcg_target_call_oarg_regs));
698
699 /*
700 * Parse and place function arguments.
701 */
702 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
703 TCGCallArgumentKind kind;
704 TCGType type;
705
706 typecode = typemask & 7;
707 switch (typecode) {
708 case dh_typecode_i32:
709 case dh_typecode_s32:
710 type = TCG_TYPE_I32;
711 break;
712 case dh_typecode_i64:
713 case dh_typecode_s64:
714 type = TCG_TYPE_I64;
715 break;
716 case dh_typecode_ptr:
717 type = TCG_TYPE_PTR;
718 break;
719 default:
720 g_assert_not_reached();
721 }
722
723 switch (type) {
724 case TCG_TYPE_I32:
725 switch (TCG_TARGET_CALL_ARG_I32) {
726 case TCG_CALL_ARG_EVEN:
727 layout_arg_even(&cum);
728 /* fall through */
729 case TCG_CALL_ARG_NORMAL:
730 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
731 break;
732 case TCG_CALL_ARG_EXTEND:
733 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
734 layout_arg_1(&cum, info, kind);
735 break;
736 default:
737 qemu_build_not_reached();
738 }
739 break;
740
741 case TCG_TYPE_I64:
742 switch (TCG_TARGET_CALL_ARG_I64) {
743 case TCG_CALL_ARG_EVEN:
744 layout_arg_even(&cum);
745 /* fall through */
746 case TCG_CALL_ARG_NORMAL:
747 if (TCG_TARGET_REG_BITS == 32) {
748 layout_arg_normal_n(&cum, info, 2);
749 } else {
750 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
751 }
752 break;
753 default:
754 qemu_build_not_reached();
755 }
756 break;
757
758 default:
759 g_assert_not_reached();
760 }
761 }
762 info->nr_in = cum.info_in_idx;
763
764 /* Validate that we didn't overrun the input array. */
765 assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
766 /* Validate the backend has enough argument space. */
767 assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
768 assert(cum.ref_slot <= max_stk_slots);
769}
770
91478cef 771static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 772static void process_op_defs(TCGContext *s);
1c2adb95
RH
773static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
774 TCGReg reg, const char *name);
91478cef 775
43b972b7 776static void tcg_context_init(unsigned max_cpus)
c896fe29 777{
a76aabd3 778 TCGContext *s = &tcg_init_ctx;
100b5e01 779 int op, total_args, n, i;
c896fe29
FB
780 TCGOpDef *def;
781 TCGArgConstraint *args_ct;
1c2adb95 782 TCGTemp *ts;
c896fe29
FB
783
784 memset(s, 0, sizeof(*s));
c896fe29 785 s->nb_globals = 0;
c70fbf0a 786
c896fe29
FB
787 /* Count total number of arguments and allocate the corresponding
788 space */
789 total_args = 0;
790 for(op = 0; op < NB_OPS; op++) {
791 def = &tcg_op_defs[op];
792 n = def->nb_iargs + def->nb_oargs;
793 total_args += n;
794 }
795
bc2b17e6 796 args_ct = g_new0(TCGArgConstraint, total_args);
c896fe29
FB
797
798 for(op = 0; op < NB_OPS; op++) {
799 def = &tcg_op_defs[op];
800 def->args_ct = args_ct;
c896fe29 801 n = def->nb_iargs + def->nb_oargs;
c896fe29
FB
802 args_ct += n;
803 }
5cd8f621
RH
804
805 /* Register helpers. */
84fd9dd3 806 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 807 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 808
100b5e01 809 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
39004a71 810 init_call_layout(&all_helpers[i]);
84fd9dd3 811 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 812 (gpointer)&all_helpers[i]);
100b5e01 813 }
5cd8f621 814
22f15579 815#ifdef CONFIG_TCG_INTERPRETER
0c22e176 816 init_ffi_layouts();
22f15579
RH
817#endif
818
c896fe29 819 tcg_target_init(s);
f69d277e 820 process_op_defs(s);
91478cef
RH
821
822 /* Reverse the order of the saved registers, assuming they're all at
823 the start of tcg_target_reg_alloc_order. */
824 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
825 int r = tcg_target_reg_alloc_order[n];
826 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
827 break;
828 }
829 }
830 for (i = 0; i < n; ++i) {
831 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
832 }
833 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
834 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
835 }
b1311c4a 836
38b47b19
EC
837 alloc_tcg_plugin_context(s);
838
b1311c4a 839 tcg_ctx = s;
3468b59e
EC
840 /*
841 * In user-mode we simply share the init context among threads, since we
842 * use a single region. See the documentation tcg_region_init() for the
843 * reasoning behind this.
844 * In softmmu we will have at most max_cpus TCG threads.
845 */
846#ifdef CONFIG_USER_ONLY
df2cce29 847 tcg_ctxs = &tcg_ctx;
0e2d61cf
RH
848 tcg_cur_ctxs = 1;
849 tcg_max_ctxs = 1;
3468b59e 850#else
0e2d61cf
RH
851 tcg_max_ctxs = max_cpus;
852 tcg_ctxs = g_new0(TCGContext *, max_cpus);
3468b59e 853#endif
1c2adb95
RH
854
855 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
856 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
857 cpu_env = temp_tcgv_ptr(ts);
9002ec79 858}
b03cce8e 859
43b972b7 860void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
a76aabd3 861{
43b972b7
RH
862 tcg_context_init(max_cpus);
863 tcg_region_init(tb_size, splitwx, max_cpus);
a76aabd3
RH
864}
865
6e3b2bfd
EC
866/*
867 * Allocate TBs right before their corresponding translated code, making
868 * sure that TBs and code are on different cache lines.
869 */
870TranslationBlock *tcg_tb_alloc(TCGContext *s)
871{
872 uintptr_t align = qemu_icache_linesize;
873 TranslationBlock *tb;
874 void *next;
875
e8feb96f 876 retry:
6e3b2bfd
EC
877 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
878 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
879
880 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
881 if (tcg_region_alloc(s)) {
882 return NULL;
883 }
884 goto retry;
6e3b2bfd 885 }
d73415a3 886 qatomic_set(&s->code_gen_ptr, next);
57a26946 887 s->data_gen_ptr = NULL;
6e3b2bfd
EC
888 return tb;
889}
890
9002ec79
RH
891void tcg_prologue_init(TCGContext *s)
892{
b0a0794a 893 size_t prologue_size;
8163b749 894
b0a0794a
RH
895 s->code_ptr = s->code_gen_ptr;
896 s->code_buf = s->code_gen_ptr;
5b38ee31 897 s->data_gen_ptr = NULL;
b91ccb31
RH
898
899#ifndef CONFIG_TCG_INTERPRETER
b0a0794a 900 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
b91ccb31 901#endif
8163b749 902
5b38ee31
RH
903#ifdef TCG_TARGET_NEED_POOL_LABELS
904 s->pool_labels = NULL;
905#endif
906
653b87eb 907 qemu_thread_jit_write();
8163b749 908 /* Generate the prologue. */
b03cce8e 909 tcg_target_qemu_prologue(s);
5b38ee31
RH
910
911#ifdef TCG_TARGET_NEED_POOL_LABELS
912 /* Allow the prologue to put e.g. guest_base into a pool entry. */
913 {
1768987b
RH
914 int result = tcg_out_pool_finalize(s);
915 tcg_debug_assert(result == 0);
5b38ee31
RH
916 }
917#endif
918
b0a0794a
RH
919 prologue_size = tcg_current_code_size(s);
920
df5d2b16 921#ifndef CONFIG_TCG_INTERPRETER
b0a0794a
RH
922 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
923 (uintptr_t)s->code_buf, prologue_size);
df5d2b16 924#endif
8163b749 925
d6b64b2b
RH
926#ifdef DEBUG_DISAS
927 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
c60f599b 928 FILE *logfile = qemu_log_trylock();
78b54858
RH
929 if (logfile) {
930 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
931 if (s->data_gen_ptr) {
932 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
933 size_t data_size = prologue_size - code_size;
934 size_t i;
935
936 disas(logfile, s->code_gen_ptr, code_size);
937
938 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
939 if (sizeof(tcg_target_ulong) == 8) {
940 fprintf(logfile,
941 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
942 (uintptr_t)s->data_gen_ptr + i,
943 *(uint64_t *)(s->data_gen_ptr + i));
944 } else {
945 fprintf(logfile,
946 "0x%08" PRIxPTR ": .long 0x%08x\n",
947 (uintptr_t)s->data_gen_ptr + i,
948 *(uint32_t *)(s->data_gen_ptr + i));
949 }
5b38ee31 950 }
78b54858
RH
951 } else {
952 disas(logfile, s->code_gen_ptr, prologue_size);
5b38ee31 953 }
78b54858 954 fprintf(logfile, "\n");
78b54858 955 qemu_log_unlock(logfile);
5b38ee31 956 }
d6b64b2b
RH
957 }
958#endif
cedbcb01 959
6eea0434
RH
960#ifndef CONFIG_TCG_INTERPRETER
961 /*
962 * Assert that goto_ptr is implemented completely, setting an epilogue.
963 * For tci, we use NULL as the signal to return from the interpreter,
964 * so skip this check.
965 */
f4e01e30 966 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
6eea0434 967#endif
d1c74ab3
RH
968
969 tcg_region_prologue_set(s);
c896fe29
FB
970}
971
c896fe29
FB
972void tcg_func_start(TCGContext *s)
973{
974 tcg_pool_reset(s);
975 s->nb_temps = s->nb_globals;
0ec9eabc
RH
976
977 /* No temps have been previously allocated for size or locality. */
978 memset(s->free_temps, 0, sizeof(s->free_temps));
979
c0522136
RH
980 /* No constant temps have been previously allocated. */
981 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
982 if (s->const_table[i]) {
983 g_hash_table_remove_all(s->const_table[i]);
984 }
985 }
986
abebf925 987 s->nb_ops = 0;
c896fe29
FB
988 s->nb_labels = 0;
989 s->current_frame_offset = s->frame_start;
990
0a209d4b
RH
991#ifdef CONFIG_DEBUG_TCG
992 s->goto_tb_issue_mask = 0;
993#endif
994
15fa08f8
RH
995 QTAILQ_INIT(&s->ops);
996 QTAILQ_INIT(&s->free_ops);
bef16ab4 997 QSIMPLEQ_INIT(&s->labels);
c896fe29
FB
998}
999
ae30e866 1000static TCGTemp *tcg_temp_alloc(TCGContext *s)
7ca4b752
RH
1001{
1002 int n = s->nb_temps++;
ae30e866
RH
1003
1004 if (n >= TCG_MAX_TEMPS) {
db6b7d0c 1005 tcg_raise_tb_overflow(s);
ae30e866 1006 }
7ca4b752
RH
1007 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1008}
1009
ae30e866 1010static TCGTemp *tcg_global_alloc(TCGContext *s)
7ca4b752 1011{
fa477d25
RH
1012 TCGTemp *ts;
1013
7ca4b752 1014 tcg_debug_assert(s->nb_globals == s->nb_temps);
ae30e866 1015 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
7ca4b752 1016 s->nb_globals++;
fa477d25 1017 ts = tcg_temp_alloc(s);
ee17db83 1018 ts->kind = TEMP_GLOBAL;
fa477d25
RH
1019
1020 return ts;
c896fe29
FB
1021}
1022
085272b3
RH
1023static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1024 TCGReg reg, const char *name)
c896fe29 1025{
c896fe29 1026 TCGTemp *ts;
c896fe29 1027
b3a62939 1028 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 1029 tcg_abort();
b3a62939 1030 }
7ca4b752
RH
1031
1032 ts = tcg_global_alloc(s);
c896fe29
FB
1033 ts->base_type = type;
1034 ts->type = type;
ee17db83 1035 ts->kind = TEMP_FIXED;
c896fe29 1036 ts->reg = reg;
c896fe29 1037 ts->name = name;
c896fe29 1038 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 1039
085272b3 1040 return ts;
a7812ae4
PB
1041}
1042
b6638662 1043void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 1044{
b3a62939
RH
1045 s->frame_start = start;
1046 s->frame_end = start + size;
085272b3
RH
1047 s->frame_temp
1048 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
1049}
1050
085272b3
RH
1051TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1052 intptr_t offset, const char *name)
c896fe29 1053{
b1311c4a 1054 TCGContext *s = tcg_ctx;
dc41aa7d 1055 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 1056 TCGTemp *ts = tcg_global_alloc(s);
aef85402 1057 int indirect_reg = 0;
c896fe29 1058
c0522136
RH
1059 switch (base_ts->kind) {
1060 case TEMP_FIXED:
1061 break;
1062 case TEMP_GLOBAL:
5a18407f
RH
1063 /* We do not support double-indirect registers. */
1064 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 1065 base_ts->indirect_base = 1;
5a18407f
RH
1066 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1067 ? 2 : 1);
1068 indirect_reg = 1;
c0522136
RH
1069 break;
1070 default:
1071 g_assert_not_reached();
b3915dbb
RH
1072 }
1073
7ca4b752
RH
1074 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1075 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 1076 char buf[64];
7ca4b752
RH
1077
1078 ts->base_type = TCG_TYPE_I64;
c896fe29 1079 ts->type = TCG_TYPE_I32;
b3915dbb 1080 ts->indirect_reg = indirect_reg;
c896fe29 1081 ts->mem_allocated = 1;
b3a62939 1082 ts->mem_base = base_ts;
aef85402 1083 ts->mem_offset = offset;
c896fe29
FB
1084 pstrcpy(buf, sizeof(buf), name);
1085 pstrcat(buf, sizeof(buf), "_0");
1086 ts->name = strdup(buf);
c896fe29 1087
7ca4b752
RH
1088 tcg_debug_assert(ts2 == ts + 1);
1089 ts2->base_type = TCG_TYPE_I64;
1090 ts2->type = TCG_TYPE_I32;
b3915dbb 1091 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
1092 ts2->mem_allocated = 1;
1093 ts2->mem_base = base_ts;
aef85402 1094 ts2->mem_offset = offset + 4;
fac87bd2 1095 ts2->temp_subindex = 1;
c896fe29
FB
1096 pstrcpy(buf, sizeof(buf), name);
1097 pstrcat(buf, sizeof(buf), "_1");
120c1084 1098 ts2->name = strdup(buf);
7ca4b752 1099 } else {
c896fe29
FB
1100 ts->base_type = type;
1101 ts->type = type;
b3915dbb 1102 ts->indirect_reg = indirect_reg;
c896fe29 1103 ts->mem_allocated = 1;
b3a62939 1104 ts->mem_base = base_ts;
c896fe29 1105 ts->mem_offset = offset;
c896fe29 1106 ts->name = name;
c896fe29 1107 }
085272b3 1108 return ts;
a7812ae4
PB
1109}
1110
5bfa8034 1111TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
c896fe29 1112{
b1311c4a 1113 TCGContext *s = tcg_ctx;
ee17db83 1114 TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
c896fe29 1115 TCGTemp *ts;
641d5fbe 1116 int idx, k;
c896fe29 1117
0ec9eabc
RH
1118 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1119 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1120 if (idx < TCG_MAX_TEMPS) {
1121 /* There is already an available temp with the right type. */
1122 clear_bit(idx, s->free_temps[k].l);
1123
e8996ee0 1124 ts = &s->temps[idx];
e8996ee0 1125 ts->temp_allocated = 1;
7ca4b752 1126 tcg_debug_assert(ts->base_type == type);
ee17db83 1127 tcg_debug_assert(ts->kind == kind);
e8996ee0 1128 } else {
7ca4b752
RH
1129 ts = tcg_temp_alloc(s);
1130 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1131 TCGTemp *ts2 = tcg_temp_alloc(s);
1132
f6aa2f7d 1133 ts->base_type = type;
e8996ee0
FB
1134 ts->type = TCG_TYPE_I32;
1135 ts->temp_allocated = 1;
ee17db83 1136 ts->kind = kind;
7ca4b752
RH
1137
1138 tcg_debug_assert(ts2 == ts + 1);
1139 ts2->base_type = TCG_TYPE_I64;
1140 ts2->type = TCG_TYPE_I32;
1141 ts2->temp_allocated = 1;
fac87bd2 1142 ts2->temp_subindex = 1;
ee17db83 1143 ts2->kind = kind;
7ca4b752 1144 } else {
e8996ee0
FB
1145 ts->base_type = type;
1146 ts->type = type;
1147 ts->temp_allocated = 1;
ee17db83 1148 ts->kind = kind;
e8996ee0 1149 }
c896fe29 1150 }
27bfd83c
PM
1151
1152#if defined(CONFIG_DEBUG_TCG)
1153 s->temps_in_use++;
1154#endif
085272b3 1155 return ts;
c896fe29
FB
1156}
1157
d2fd745f
RH
1158TCGv_vec tcg_temp_new_vec(TCGType type)
1159{
1160 TCGTemp *t;
1161
1162#ifdef CONFIG_DEBUG_TCG
1163 switch (type) {
1164 case TCG_TYPE_V64:
1165 assert(TCG_TARGET_HAS_v64);
1166 break;
1167 case TCG_TYPE_V128:
1168 assert(TCG_TARGET_HAS_v128);
1169 break;
1170 case TCG_TYPE_V256:
1171 assert(TCG_TARGET_HAS_v256);
1172 break;
1173 default:
1174 g_assert_not_reached();
1175 }
1176#endif
1177
1178 t = tcg_temp_new_internal(type, 0);
1179 return temp_tcgv_vec(t);
1180}
1181
1182/* Create a new temp of the same type as an existing temp. */
1183TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1184{
1185 TCGTemp *t = tcgv_vec_temp(match);
1186
1187 tcg_debug_assert(t->temp_allocated != 0);
1188
1189 t = tcg_temp_new_internal(t->base_type, 0);
1190 return temp_tcgv_vec(t);
1191}
1192
5bfa8034 1193void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1194{
b1311c4a 1195 TCGContext *s = tcg_ctx;
085272b3 1196 int k, idx;
c896fe29 1197
c7482438
RH
1198 switch (ts->kind) {
1199 case TEMP_CONST:
1200 /*
1201 * In order to simplify users of tcg_constant_*,
1202 * silently ignore free.
1203 */
c0522136 1204 return;
c7482438
RH
1205 case TEMP_NORMAL:
1206 case TEMP_LOCAL:
1207 break;
1208 default:
1209 g_assert_not_reached();
c0522136
RH
1210 }
1211
27bfd83c
PM
1212#if defined(CONFIG_DEBUG_TCG)
1213 s->temps_in_use--;
1214 if (s->temps_in_use < 0) {
1215 fprintf(stderr, "More temporaries freed than allocated!\n");
1216 }
1217#endif
1218
eabb7b91 1219 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1220 ts->temp_allocated = 0;
0ec9eabc 1221
085272b3 1222 idx = temp_idx(ts);
ee17db83 1223 k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
0ec9eabc 1224 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1225}
1226
c0522136
RH
1227TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1228{
1229 TCGContext *s = tcg_ctx;
1230 GHashTable *h = s->const_table[type];
1231 TCGTemp *ts;
1232
1233 if (h == NULL) {
1234 h = g_hash_table_new(g_int64_hash, g_int64_equal);
1235 s->const_table[type] = h;
1236 }
1237
1238 ts = g_hash_table_lookup(h, &val);
1239 if (ts == NULL) {
aef85402
RH
1240 int64_t *val_ptr;
1241
c0522136
RH
1242 ts = tcg_temp_alloc(s);
1243
1244 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1245 TCGTemp *ts2 = tcg_temp_alloc(s);
1246
aef85402
RH
1247 tcg_debug_assert(ts2 == ts + 1);
1248
c0522136
RH
1249 ts->base_type = TCG_TYPE_I64;
1250 ts->type = TCG_TYPE_I32;
1251 ts->kind = TEMP_CONST;
1252 ts->temp_allocated = 1;
c0522136 1253
c0522136
RH
1254 ts2->base_type = TCG_TYPE_I64;
1255 ts2->type = TCG_TYPE_I32;
1256 ts2->kind = TEMP_CONST;
1257 ts2->temp_allocated = 1;
fac87bd2 1258 ts2->temp_subindex = 1;
aef85402
RH
1259
1260 /*
1261 * Retain the full value of the 64-bit constant in the low
1262 * part, so that the hash table works. Actual uses will
1263 * truncate the value to the low part.
1264 */
1265 ts[HOST_BIG_ENDIAN].val = val;
1266 ts[!HOST_BIG_ENDIAN].val = val >> 32;
1267 val_ptr = &ts[HOST_BIG_ENDIAN].val;
c0522136
RH
1268 } else {
1269 ts->base_type = type;
1270 ts->type = type;
1271 ts->kind = TEMP_CONST;
1272 ts->temp_allocated = 1;
1273 ts->val = val;
aef85402 1274 val_ptr = &ts->val;
c0522136 1275 }
aef85402 1276 g_hash_table_insert(h, val_ptr, ts);
c0522136
RH
1277 }
1278
1279 return ts;
1280}
1281
1282TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1283{
1284 val = dup_const(vece, val);
1285 return temp_tcgv_vec(tcg_constant_internal(type, val));
1286}
1287
88d4005b
RH
1288TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1289{
1290 TCGTemp *t = tcgv_vec_temp(match);
1291
1292 tcg_debug_assert(t->temp_allocated != 0);
1293 return tcg_constant_vec(t->base_type, vece, val);
1294}
1295
a7812ae4 1296TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1297{
a7812ae4
PB
1298 TCGv_i32 t0;
1299 t0 = tcg_temp_new_i32();
e8996ee0
FB
1300 tcg_gen_movi_i32(t0, val);
1301 return t0;
1302}
c896fe29 1303
a7812ae4 1304TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1305{
a7812ae4
PB
1306 TCGv_i64 t0;
1307 t0 = tcg_temp_new_i64();
e8996ee0
FB
1308 tcg_gen_movi_i64(t0, val);
1309 return t0;
c896fe29
FB
1310}
1311
a7812ae4 1312TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1313{
a7812ae4
PB
1314 TCGv_i32 t0;
1315 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1316 tcg_gen_movi_i32(t0, val);
1317 return t0;
1318}
1319
a7812ae4 1320TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1321{
a7812ae4
PB
1322 TCGv_i64 t0;
1323 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1324 tcg_gen_movi_i64(t0, val);
1325 return t0;
1326}
1327
27bfd83c
PM
1328#if defined(CONFIG_DEBUG_TCG)
1329void tcg_clear_temp_count(void)
1330{
b1311c4a 1331 TCGContext *s = tcg_ctx;
27bfd83c
PM
1332 s->temps_in_use = 0;
1333}
1334
1335int tcg_check_temp_count(void)
1336{
b1311c4a 1337 TCGContext *s = tcg_ctx;
27bfd83c
PM
1338 if (s->temps_in_use) {
1339 /* Clear the count so that we don't give another
1340 * warning immediately next time around.
1341 */
1342 s->temps_in_use = 0;
1343 return 1;
1344 }
1345 return 0;
1346}
1347#endif
1348
be0f34b5
RH
1349/* Return true if OP may appear in the opcode stream.
1350 Test the runtime variable that controls each opcode. */
1351bool tcg_op_supported(TCGOpcode op)
1352{
d2fd745f
RH
1353 const bool have_vec
1354 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1355
be0f34b5
RH
1356 switch (op) {
1357 case INDEX_op_discard:
1358 case INDEX_op_set_label:
1359 case INDEX_op_call:
1360 case INDEX_op_br:
1361 case INDEX_op_mb:
1362 case INDEX_op_insn_start:
1363 case INDEX_op_exit_tb:
1364 case INDEX_op_goto_tb:
f4e01e30 1365 case INDEX_op_goto_ptr:
be0f34b5
RH
1366 case INDEX_op_qemu_ld_i32:
1367 case INDEX_op_qemu_st_i32:
1368 case INDEX_op_qemu_ld_i64:
1369 case INDEX_op_qemu_st_i64:
1370 return true;
1371
07ce0b05
RH
1372 case INDEX_op_qemu_st8_i32:
1373 return TCG_TARGET_HAS_qemu_st8_i32;
1374
be0f34b5 1375 case INDEX_op_mov_i32:
be0f34b5
RH
1376 case INDEX_op_setcond_i32:
1377 case INDEX_op_brcond_i32:
1378 case INDEX_op_ld8u_i32:
1379 case INDEX_op_ld8s_i32:
1380 case INDEX_op_ld16u_i32:
1381 case INDEX_op_ld16s_i32:
1382 case INDEX_op_ld_i32:
1383 case INDEX_op_st8_i32:
1384 case INDEX_op_st16_i32:
1385 case INDEX_op_st_i32:
1386 case INDEX_op_add_i32:
1387 case INDEX_op_sub_i32:
1388 case INDEX_op_mul_i32:
1389 case INDEX_op_and_i32:
1390 case INDEX_op_or_i32:
1391 case INDEX_op_xor_i32:
1392 case INDEX_op_shl_i32:
1393 case INDEX_op_shr_i32:
1394 case INDEX_op_sar_i32:
1395 return true;
1396
1397 case INDEX_op_movcond_i32:
1398 return TCG_TARGET_HAS_movcond_i32;
1399 case INDEX_op_div_i32:
1400 case INDEX_op_divu_i32:
1401 return TCG_TARGET_HAS_div_i32;
1402 case INDEX_op_rem_i32:
1403 case INDEX_op_remu_i32:
1404 return TCG_TARGET_HAS_rem_i32;
1405 case INDEX_op_div2_i32:
1406 case INDEX_op_divu2_i32:
1407 return TCG_TARGET_HAS_div2_i32;
1408 case INDEX_op_rotl_i32:
1409 case INDEX_op_rotr_i32:
1410 return TCG_TARGET_HAS_rot_i32;
1411 case INDEX_op_deposit_i32:
1412 return TCG_TARGET_HAS_deposit_i32;
1413 case INDEX_op_extract_i32:
1414 return TCG_TARGET_HAS_extract_i32;
1415 case INDEX_op_sextract_i32:
1416 return TCG_TARGET_HAS_sextract_i32;
fce1296f
RH
1417 case INDEX_op_extract2_i32:
1418 return TCG_TARGET_HAS_extract2_i32;
be0f34b5
RH
1419 case INDEX_op_add2_i32:
1420 return TCG_TARGET_HAS_add2_i32;
1421 case INDEX_op_sub2_i32:
1422 return TCG_TARGET_HAS_sub2_i32;
1423 case INDEX_op_mulu2_i32:
1424 return TCG_TARGET_HAS_mulu2_i32;
1425 case INDEX_op_muls2_i32:
1426 return TCG_TARGET_HAS_muls2_i32;
1427 case INDEX_op_muluh_i32:
1428 return TCG_TARGET_HAS_muluh_i32;
1429 case INDEX_op_mulsh_i32:
1430 return TCG_TARGET_HAS_mulsh_i32;
1431 case INDEX_op_ext8s_i32:
1432 return TCG_TARGET_HAS_ext8s_i32;
1433 case INDEX_op_ext16s_i32:
1434 return TCG_TARGET_HAS_ext16s_i32;
1435 case INDEX_op_ext8u_i32:
1436 return TCG_TARGET_HAS_ext8u_i32;
1437 case INDEX_op_ext16u_i32:
1438 return TCG_TARGET_HAS_ext16u_i32;
1439 case INDEX_op_bswap16_i32:
1440 return TCG_TARGET_HAS_bswap16_i32;
1441 case INDEX_op_bswap32_i32:
1442 return TCG_TARGET_HAS_bswap32_i32;
1443 case INDEX_op_not_i32:
1444 return TCG_TARGET_HAS_not_i32;
1445 case INDEX_op_neg_i32:
1446 return TCG_TARGET_HAS_neg_i32;
1447 case INDEX_op_andc_i32:
1448 return TCG_TARGET_HAS_andc_i32;
1449 case INDEX_op_orc_i32:
1450 return TCG_TARGET_HAS_orc_i32;
1451 case INDEX_op_eqv_i32:
1452 return TCG_TARGET_HAS_eqv_i32;
1453 case INDEX_op_nand_i32:
1454 return TCG_TARGET_HAS_nand_i32;
1455 case INDEX_op_nor_i32:
1456 return TCG_TARGET_HAS_nor_i32;
1457 case INDEX_op_clz_i32:
1458 return TCG_TARGET_HAS_clz_i32;
1459 case INDEX_op_ctz_i32:
1460 return TCG_TARGET_HAS_ctz_i32;
1461 case INDEX_op_ctpop_i32:
1462 return TCG_TARGET_HAS_ctpop_i32;
1463
1464 case INDEX_op_brcond2_i32:
1465 case INDEX_op_setcond2_i32:
1466 return TCG_TARGET_REG_BITS == 32;
1467
1468 case INDEX_op_mov_i64:
be0f34b5
RH
1469 case INDEX_op_setcond_i64:
1470 case INDEX_op_brcond_i64:
1471 case INDEX_op_ld8u_i64:
1472 case INDEX_op_ld8s_i64:
1473 case INDEX_op_ld16u_i64:
1474 case INDEX_op_ld16s_i64:
1475 case INDEX_op_ld32u_i64:
1476 case INDEX_op_ld32s_i64:
1477 case INDEX_op_ld_i64:
1478 case INDEX_op_st8_i64:
1479 case INDEX_op_st16_i64:
1480 case INDEX_op_st32_i64:
1481 case INDEX_op_st_i64:
1482 case INDEX_op_add_i64:
1483 case INDEX_op_sub_i64:
1484 case INDEX_op_mul_i64:
1485 case INDEX_op_and_i64:
1486 case INDEX_op_or_i64:
1487 case INDEX_op_xor_i64:
1488 case INDEX_op_shl_i64:
1489 case INDEX_op_shr_i64:
1490 case INDEX_op_sar_i64:
1491 case INDEX_op_ext_i32_i64:
1492 case INDEX_op_extu_i32_i64:
1493 return TCG_TARGET_REG_BITS == 64;
1494
1495 case INDEX_op_movcond_i64:
1496 return TCG_TARGET_HAS_movcond_i64;
1497 case INDEX_op_div_i64:
1498 case INDEX_op_divu_i64:
1499 return TCG_TARGET_HAS_div_i64;
1500 case INDEX_op_rem_i64:
1501 case INDEX_op_remu_i64:
1502 return TCG_TARGET_HAS_rem_i64;
1503 case INDEX_op_div2_i64:
1504 case INDEX_op_divu2_i64:
1505 return TCG_TARGET_HAS_div2_i64;
1506 case INDEX_op_rotl_i64:
1507 case INDEX_op_rotr_i64:
1508 return TCG_TARGET_HAS_rot_i64;
1509 case INDEX_op_deposit_i64:
1510 return TCG_TARGET_HAS_deposit_i64;
1511 case INDEX_op_extract_i64:
1512 return TCG_TARGET_HAS_extract_i64;
1513 case INDEX_op_sextract_i64:
1514 return TCG_TARGET_HAS_sextract_i64;
fce1296f
RH
1515 case INDEX_op_extract2_i64:
1516 return TCG_TARGET_HAS_extract2_i64;
be0f34b5
RH
1517 case INDEX_op_extrl_i64_i32:
1518 return TCG_TARGET_HAS_extrl_i64_i32;
1519 case INDEX_op_extrh_i64_i32:
1520 return TCG_TARGET_HAS_extrh_i64_i32;
1521 case INDEX_op_ext8s_i64:
1522 return TCG_TARGET_HAS_ext8s_i64;
1523 case INDEX_op_ext16s_i64:
1524 return TCG_TARGET_HAS_ext16s_i64;
1525 case INDEX_op_ext32s_i64:
1526 return TCG_TARGET_HAS_ext32s_i64;
1527 case INDEX_op_ext8u_i64:
1528 return TCG_TARGET_HAS_ext8u_i64;
1529 case INDEX_op_ext16u_i64:
1530 return TCG_TARGET_HAS_ext16u_i64;
1531 case INDEX_op_ext32u_i64:
1532 return TCG_TARGET_HAS_ext32u_i64;
1533 case INDEX_op_bswap16_i64:
1534 return TCG_TARGET_HAS_bswap16_i64;
1535 case INDEX_op_bswap32_i64:
1536 return TCG_TARGET_HAS_bswap32_i64;
1537 case INDEX_op_bswap64_i64:
1538 return TCG_TARGET_HAS_bswap64_i64;
1539 case INDEX_op_not_i64:
1540 return TCG_TARGET_HAS_not_i64;
1541 case INDEX_op_neg_i64:
1542 return TCG_TARGET_HAS_neg_i64;
1543 case INDEX_op_andc_i64:
1544 return TCG_TARGET_HAS_andc_i64;
1545 case INDEX_op_orc_i64:
1546 return TCG_TARGET_HAS_orc_i64;
1547 case INDEX_op_eqv_i64:
1548 return TCG_TARGET_HAS_eqv_i64;
1549 case INDEX_op_nand_i64:
1550 return TCG_TARGET_HAS_nand_i64;
1551 case INDEX_op_nor_i64:
1552 return TCG_TARGET_HAS_nor_i64;
1553 case INDEX_op_clz_i64:
1554 return TCG_TARGET_HAS_clz_i64;
1555 case INDEX_op_ctz_i64:
1556 return TCG_TARGET_HAS_ctz_i64;
1557 case INDEX_op_ctpop_i64:
1558 return TCG_TARGET_HAS_ctpop_i64;
1559 case INDEX_op_add2_i64:
1560 return TCG_TARGET_HAS_add2_i64;
1561 case INDEX_op_sub2_i64:
1562 return TCG_TARGET_HAS_sub2_i64;
1563 case INDEX_op_mulu2_i64:
1564 return TCG_TARGET_HAS_mulu2_i64;
1565 case INDEX_op_muls2_i64:
1566 return TCG_TARGET_HAS_muls2_i64;
1567 case INDEX_op_muluh_i64:
1568 return TCG_TARGET_HAS_muluh_i64;
1569 case INDEX_op_mulsh_i64:
1570 return TCG_TARGET_HAS_mulsh_i64;
1571
d2fd745f
RH
1572 case INDEX_op_mov_vec:
1573 case INDEX_op_dup_vec:
37ee55a0 1574 case INDEX_op_dupm_vec:
d2fd745f
RH
1575 case INDEX_op_ld_vec:
1576 case INDEX_op_st_vec:
1577 case INDEX_op_add_vec:
1578 case INDEX_op_sub_vec:
1579 case INDEX_op_and_vec:
1580 case INDEX_op_or_vec:
1581 case INDEX_op_xor_vec:
212be173 1582 case INDEX_op_cmp_vec:
d2fd745f
RH
1583 return have_vec;
1584 case INDEX_op_dup2_vec:
1585 return have_vec && TCG_TARGET_REG_BITS == 32;
1586 case INDEX_op_not_vec:
1587 return have_vec && TCG_TARGET_HAS_not_vec;
1588 case INDEX_op_neg_vec:
1589 return have_vec && TCG_TARGET_HAS_neg_vec;
bcefc902
RH
1590 case INDEX_op_abs_vec:
1591 return have_vec && TCG_TARGET_HAS_abs_vec;
d2fd745f
RH
1592 case INDEX_op_andc_vec:
1593 return have_vec && TCG_TARGET_HAS_andc_vec;
1594 case INDEX_op_orc_vec:
1595 return have_vec && TCG_TARGET_HAS_orc_vec;
ed523473
RH
1596 case INDEX_op_nand_vec:
1597 return have_vec && TCG_TARGET_HAS_nand_vec;
1598 case INDEX_op_nor_vec:
1599 return have_vec && TCG_TARGET_HAS_nor_vec;
1600 case INDEX_op_eqv_vec:
1601 return have_vec && TCG_TARGET_HAS_eqv_vec;
3774030a
RH
1602 case INDEX_op_mul_vec:
1603 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1604 case INDEX_op_shli_vec:
1605 case INDEX_op_shri_vec:
1606 case INDEX_op_sari_vec:
1607 return have_vec && TCG_TARGET_HAS_shi_vec;
1608 case INDEX_op_shls_vec:
1609 case INDEX_op_shrs_vec:
1610 case INDEX_op_sars_vec:
1611 return have_vec && TCG_TARGET_HAS_shs_vec;
1612 case INDEX_op_shlv_vec:
1613 case INDEX_op_shrv_vec:
1614 case INDEX_op_sarv_vec:
1615 return have_vec && TCG_TARGET_HAS_shv_vec;
b0f7e744
RH
1616 case INDEX_op_rotli_vec:
1617 return have_vec && TCG_TARGET_HAS_roti_vec;
23850a74
RH
1618 case INDEX_op_rotls_vec:
1619 return have_vec && TCG_TARGET_HAS_rots_vec;
5d0ceda9
RH
1620 case INDEX_op_rotlv_vec:
1621 case INDEX_op_rotrv_vec:
1622 return have_vec && TCG_TARGET_HAS_rotv_vec;
8afaf050
RH
1623 case INDEX_op_ssadd_vec:
1624 case INDEX_op_usadd_vec:
1625 case INDEX_op_sssub_vec:
1626 case INDEX_op_ussub_vec:
1627 return have_vec && TCG_TARGET_HAS_sat_vec;
dd0a0fcd
RH
1628 case INDEX_op_smin_vec:
1629 case INDEX_op_umin_vec:
1630 case INDEX_op_smax_vec:
1631 case INDEX_op_umax_vec:
1632 return have_vec && TCG_TARGET_HAS_minmax_vec;
38dc1294
RH
1633 case INDEX_op_bitsel_vec:
1634 return have_vec && TCG_TARGET_HAS_bitsel_vec;
f75da298
RH
1635 case INDEX_op_cmpsel_vec:
1636 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
d2fd745f 1637
db432672
RH
1638 default:
1639 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1640 return true;
be0f34b5 1641 }
be0f34b5
RH
1642}
1643
39004a71
RH
1644static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
1645
ae8b75dc 1646void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1647{
3e92aa34 1648 const TCGHelperInfo *info;
39004a71
RH
1649 TCGv_i64 extend_free[MAX_CALL_IARGS];
1650 int n_extend = 0;
75e8b9b7 1651 TCGOp *op;
39004a71 1652 int i, n, pi = 0, total_args;
afb49896 1653
619205fd 1654 info = g_hash_table_lookup(helper_table, (gpointer)func);
39004a71
RH
1655 total_args = info->nr_out + info->nr_in + 2;
1656 op = tcg_op_alloc(INDEX_op_call, total_args);
2bece2c8 1657
38b47b19
EC
1658#ifdef CONFIG_PLUGIN
1659 /* detect non-plugin helpers */
1660 if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) {
1661 tcg_ctx->plugin_insn->calls_helpers = true;
1662 }
1663#endif
1664
39004a71
RH
1665 TCGOP_CALLO(op) = n = info->nr_out;
1666 switch (n) {
1667 case 0:
1668 tcg_debug_assert(ret == NULL);
1669 break;
1670 case 1:
1671 tcg_debug_assert(ret != NULL);
1672 op->args[pi++] = temp_arg(ret);
1673 break;
1674 case 2:
1675 tcg_debug_assert(ret != NULL);
1676 tcg_debug_assert(ret->base_type == ret->type + 1);
1677 tcg_debug_assert(ret->temp_subindex == 0);
1678 op->args[pi++] = temp_arg(ret);
1679 op->args[pi++] = temp_arg(ret + 1);
1680 break;
1681 default:
1682 g_assert_not_reached();
1683 }
1684
1685 TCGOP_CALLI(op) = n = info->nr_in;
1686 for (i = 0; i < n; i++) {
1687 const TCGCallArgumentLoc *loc = &info->in[i];
1688 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
1689
1690 switch (loc->kind) {
1691 case TCG_CALL_ARG_NORMAL:
1692 op->args[pi++] = temp_arg(ts);
1693 break;
eb8b0224 1694
39004a71
RH
1695 case TCG_CALL_ARG_EXTEND_U:
1696 case TCG_CALL_ARG_EXTEND_S:
1697 {
eb8b0224 1698 TCGv_i64 temp = tcg_temp_new_i64();
39004a71
RH
1699 TCGv_i32 orig = temp_tcgv_i32(ts);
1700
1701 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
eb8b0224
RH
1702 tcg_gen_ext_i32_i64(temp, orig);
1703 } else {
1704 tcg_gen_extu_i32_i64(temp, orig);
1705 }
39004a71
RH
1706 op->args[pi++] = tcgv_i64_arg(temp);
1707 extend_free[n_extend++] = temp;
2bece2c8 1708 }
e2a9dd6b 1709 break;
7b7d8b2d 1710
e2a9dd6b
RH
1711 default:
1712 g_assert_not_reached();
c896fe29
FB
1713 }
1714 }
75e8b9b7 1715 op->args[pi++] = (uintptr_t)func;
3e92aa34 1716 op->args[pi++] = (uintptr_t)info;
39004a71 1717 tcg_debug_assert(pi == total_args);
a7812ae4 1718
39004a71 1719 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
7319d83a 1720
39004a71
RH
1721 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
1722 for (i = 0; i < n_extend; ++i) {
1723 tcg_temp_free_i64(extend_free[i]);
2bece2c8 1724 }
c896fe29 1725}
c896fe29 1726
8fcd3692 1727static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1728{
ac3b8891 1729 int i, n;
ac3b8891 1730
ee17db83
RH
1731 for (i = 0, n = s->nb_temps; i < n; i++) {
1732 TCGTemp *ts = &s->temps[i];
1733 TCGTempVal val = TEMP_VAL_MEM;
1734
1735 switch (ts->kind) {
c0522136
RH
1736 case TEMP_CONST:
1737 val = TEMP_VAL_CONST;
1738 break;
ee17db83
RH
1739 case TEMP_FIXED:
1740 val = TEMP_VAL_REG;
1741 break;
1742 case TEMP_GLOBAL:
1743 break;
1744 case TEMP_NORMAL:
c7482438 1745 case TEMP_EBB:
ee17db83
RH
1746 val = TEMP_VAL_DEAD;
1747 /* fall through */
1748 case TEMP_LOCAL:
1749 ts->mem_allocated = 0;
1750 break;
1751 default:
1752 g_assert_not_reached();
1753 }
1754 ts->val_type = val;
e8996ee0 1755 }
f8b2f202
RH
1756
1757 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1758}
1759
f8b2f202
RH
1760static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1761 TCGTemp *ts)
c896fe29 1762{
1807f4c4 1763 int idx = temp_idx(ts);
ac56dd48 1764
ee17db83
RH
1765 switch (ts->kind) {
1766 case TEMP_FIXED:
1767 case TEMP_GLOBAL:
ac56dd48 1768 pstrcpy(buf, buf_size, ts->name);
ee17db83
RH
1769 break;
1770 case TEMP_LOCAL:
f8b2f202 1771 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
ee17db83 1772 break;
c7482438
RH
1773 case TEMP_EBB:
1774 snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals);
1775 break;
ee17db83 1776 case TEMP_NORMAL:
f8b2f202 1777 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
ee17db83 1778 break;
c0522136
RH
1779 case TEMP_CONST:
1780 switch (ts->type) {
1781 case TCG_TYPE_I32:
1782 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1783 break;
1784#if TCG_TARGET_REG_BITS > 32
1785 case TCG_TYPE_I64:
1786 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1787 break;
1788#endif
1789 case TCG_TYPE_V64:
1790 case TCG_TYPE_V128:
1791 case TCG_TYPE_V256:
1792 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1793 64 << (ts->type - TCG_TYPE_V64), ts->val);
1794 break;
1795 default:
1796 g_assert_not_reached();
1797 }
1798 break;
c896fe29
FB
1799 }
1800 return buf;
1801}
1802
43439139
RH
1803static char *tcg_get_arg_str(TCGContext *s, char *buf,
1804 int buf_size, TCGArg arg)
f8b2f202 1805{
43439139 1806 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1807}
1808
f48f3ede
BS
1809static const char * const cond_name[] =
1810{
0aed257f
RH
1811 [TCG_COND_NEVER] = "never",
1812 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1813 [TCG_COND_EQ] = "eq",
1814 [TCG_COND_NE] = "ne",
1815 [TCG_COND_LT] = "lt",
1816 [TCG_COND_GE] = "ge",
1817 [TCG_COND_LE] = "le",
1818 [TCG_COND_GT] = "gt",
1819 [TCG_COND_LTU] = "ltu",
1820 [TCG_COND_GEU] = "geu",
1821 [TCG_COND_LEU] = "leu",
1822 [TCG_COND_GTU] = "gtu"
1823};
1824
f713d6ad
RH
1825static const char * const ldst_name[] =
1826{
1827 [MO_UB] = "ub",
1828 [MO_SB] = "sb",
1829 [MO_LEUW] = "leuw",
1830 [MO_LESW] = "lesw",
1831 [MO_LEUL] = "leul",
1832 [MO_LESL] = "lesl",
fc313c64 1833 [MO_LEUQ] = "leq",
f713d6ad
RH
1834 [MO_BEUW] = "beuw",
1835 [MO_BESW] = "besw",
1836 [MO_BEUL] = "beul",
1837 [MO_BESL] = "besl",
fc313c64 1838 [MO_BEUQ] = "beq",
f713d6ad
RH
1839};
1840
1f00b27f 1841static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
52bf9771 1842#ifdef TARGET_ALIGNED_ONLY
1f00b27f
SS
1843 [MO_UNALN >> MO_ASHIFT] = "un+",
1844 [MO_ALIGN >> MO_ASHIFT] = "",
1845#else
1846 [MO_UNALN >> MO_ASHIFT] = "",
1847 [MO_ALIGN >> MO_ASHIFT] = "al+",
1848#endif
1849 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1850 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1851 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1852 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1853 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1854 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1855};
1856
587195bd
RH
1857static const char bswap_flag_name[][6] = {
1858 [TCG_BSWAP_IZ] = "iz",
1859 [TCG_BSWAP_OZ] = "oz",
1860 [TCG_BSWAP_OS] = "os",
1861 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
1862 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
1863};
1864
b016486e
RH
1865static inline bool tcg_regset_single(TCGRegSet d)
1866{
1867 return (d & (d - 1)) == 0;
1868}
1869
1870static inline TCGReg tcg_regset_first(TCGRegSet d)
1871{
1872 if (TCG_TARGET_NB_REGS <= 32) {
1873 return ctz32(d);
1874 } else {
1875 return ctz64(d);
1876 }
1877}
1878
b7a83ff8
RH
1879/* Return only the number of characters output -- no error return. */
1880#define ne_fprintf(...) \
1881 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
1882
1883static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
c896fe29 1884{
c896fe29 1885 char buf[128];
c45cb8bb 1886 TCGOp *op;
c45cb8bb 1887
15fa08f8 1888 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1889 int i, k, nb_oargs, nb_iargs, nb_cargs;
1890 const TCGOpDef *def;
c45cb8bb 1891 TCGOpcode c;
bdfb460e 1892 int col = 0;
c896fe29 1893
c45cb8bb 1894 c = op->opc;
c896fe29 1895 def = &tcg_op_defs[c];
c45cb8bb 1896
765b842a 1897 if (c == INDEX_op_insn_start) {
b016486e 1898 nb_oargs = 0;
b7a83ff8 1899 col += ne_fprintf(f, "\n ----");
9aef40ed
RH
1900
1901 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1902 target_ulong a;
7e4597d7 1903#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1904 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1905#else
efee3746 1906 a = op->args[i];
7e4597d7 1907#endif
b7a83ff8 1908 col += ne_fprintf(f, " " TARGET_FMT_lx, a);
eeacee4d 1909 }
7e4597d7 1910 } else if (c == INDEX_op_call) {
3e92aa34 1911 const TCGHelperInfo *info = tcg_call_info(op);
fa52e660 1912 void *func = tcg_call_func(op);
3e92aa34 1913
c896fe29 1914 /* variable number of arguments */
cd9090aa
RH
1915 nb_oargs = TCGOP_CALLO(op);
1916 nb_iargs = TCGOP_CALLI(op);
c896fe29 1917 nb_cargs = def->nb_cargs;
c896fe29 1918
b7a83ff8 1919 col += ne_fprintf(f, " %s ", def->name);
3e92aa34
RH
1920
1921 /*
1922 * Print the function name from TCGHelperInfo, if available.
1923 * Note that plugins have a template function for the info,
1924 * but the actual function pointer comes from the plugin.
1925 */
3e92aa34 1926 if (func == info->func) {
b7a83ff8 1927 col += ne_fprintf(f, "%s", info->name);
3e92aa34 1928 } else {
b7a83ff8 1929 col += ne_fprintf(f, "plugin(%p)", func);
3e92aa34
RH
1930 }
1931
b7a83ff8 1932 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
cf066674 1933 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1934 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1935 op->args[i]));
b03cce8e 1936 }
cf066674 1937 for (i = 0; i < nb_iargs; i++) {
efee3746 1938 TCGArg arg = op->args[nb_oargs + i];
39004a71 1939 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
b7a83ff8 1940 col += ne_fprintf(f, ",%s", t);
e8996ee0 1941 }
b03cce8e 1942 } else {
b7a83ff8 1943 col += ne_fprintf(f, " %s ", def->name);
c45cb8bb
RH
1944
1945 nb_oargs = def->nb_oargs;
1946 nb_iargs = def->nb_iargs;
1947 nb_cargs = def->nb_cargs;
1948
d2fd745f 1949 if (def->flags & TCG_OPF_VECTOR) {
b7a83ff8
RH
1950 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
1951 8 << TCGOP_VECE(op));
d2fd745f
RH
1952 }
1953
b03cce8e 1954 k = 0;
c45cb8bb 1955 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1956 const char *sep = k ? "," : "";
1957 col += ne_fprintf(f, "%s%s", sep,
1958 tcg_get_arg_str(s, buf, sizeof(buf),
1959 op->args[k++]));
b03cce8e 1960 }
c45cb8bb 1961 for (i = 0; i < nb_iargs; i++) {
b7a83ff8
RH
1962 const char *sep = k ? "," : "";
1963 col += ne_fprintf(f, "%s%s", sep,
1964 tcg_get_arg_str(s, buf, sizeof(buf),
1965 op->args[k++]));
b03cce8e 1966 }
be210acb
RH
1967 switch (c) {
1968 case INDEX_op_brcond_i32:
be210acb 1969 case INDEX_op_setcond_i32:
ffc5ea09 1970 case INDEX_op_movcond_i32:
ffc5ea09 1971 case INDEX_op_brcond2_i32:
be210acb 1972 case INDEX_op_setcond2_i32:
ffc5ea09 1973 case INDEX_op_brcond_i64:
be210acb 1974 case INDEX_op_setcond_i64:
ffc5ea09 1975 case INDEX_op_movcond_i64:
212be173 1976 case INDEX_op_cmp_vec:
f75da298 1977 case INDEX_op_cmpsel_vec:
efee3746
RH
1978 if (op->args[k] < ARRAY_SIZE(cond_name)
1979 && cond_name[op->args[k]]) {
b7a83ff8 1980 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
eeacee4d 1981 } else {
b7a83ff8 1982 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 1983 }
f48f3ede 1984 i = 1;
be210acb 1985 break;
f713d6ad
RH
1986 case INDEX_op_qemu_ld_i32:
1987 case INDEX_op_qemu_st_i32:
07ce0b05 1988 case INDEX_op_qemu_st8_i32:
f713d6ad
RH
1989 case INDEX_op_qemu_ld_i64:
1990 case INDEX_op_qemu_st_i64:
59227d5d 1991 {
9002ffcb 1992 MemOpIdx oi = op->args[k++];
14776ab5 1993 MemOp op = get_memop(oi);
59227d5d
RH
1994 unsigned ix = get_mmuidx(oi);
1995
59c4b7e8 1996 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
b7a83ff8 1997 col += ne_fprintf(f, ",$0x%x,%u", op, ix);
59c4b7e8 1998 } else {
1f00b27f
SS
1999 const char *s_al, *s_op;
2000 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 2001 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
b7a83ff8 2002 col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
2003 }
2004 i = 1;
f713d6ad 2005 }
f713d6ad 2006 break;
587195bd
RH
2007 case INDEX_op_bswap16_i32:
2008 case INDEX_op_bswap16_i64:
2009 case INDEX_op_bswap32_i32:
2010 case INDEX_op_bswap32_i64:
2011 case INDEX_op_bswap64_i64:
2012 {
2013 TCGArg flags = op->args[k];
2014 const char *name = NULL;
2015
2016 if (flags < ARRAY_SIZE(bswap_flag_name)) {
2017 name = bswap_flag_name[flags];
2018 }
2019 if (name) {
b7a83ff8 2020 col += ne_fprintf(f, ",%s", name);
587195bd 2021 } else {
b7a83ff8 2022 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
587195bd
RH
2023 }
2024 i = k = 1;
2025 }
2026 break;
be210acb 2027 default:
f48f3ede 2028 i = 0;
be210acb
RH
2029 break;
2030 }
51e3972c
RH
2031 switch (c) {
2032 case INDEX_op_set_label:
2033 case INDEX_op_br:
2034 case INDEX_op_brcond_i32:
2035 case INDEX_op_brcond_i64:
2036 case INDEX_op_brcond2_i32:
b7a83ff8
RH
2037 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2038 arg_label(op->args[k])->id);
51e3972c
RH
2039 i++, k++;
2040 break;
2041 default:
2042 break;
2043 }
2044 for (; i < nb_cargs; i++, k++) {
b7a83ff8
RH
2045 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2046 op->args[k]);
bdfb460e
RH
2047 }
2048 }
bdfb460e 2049
1894f69a 2050 if (have_prefs || op->life) {
b7a83ff8
RH
2051 for (; col < 40; ++col) {
2052 putc(' ', f);
bdfb460e 2053 }
1894f69a
RH
2054 }
2055
2056 if (op->life) {
2057 unsigned life = op->life;
bdfb460e
RH
2058
2059 if (life & (SYNC_ARG * 3)) {
b7a83ff8 2060 ne_fprintf(f, " sync:");
bdfb460e
RH
2061 for (i = 0; i < 2; ++i) {
2062 if (life & (SYNC_ARG << i)) {
b7a83ff8 2063 ne_fprintf(f, " %d", i);
bdfb460e
RH
2064 }
2065 }
2066 }
2067 life /= DEAD_ARG;
2068 if (life) {
b7a83ff8 2069 ne_fprintf(f, " dead:");
bdfb460e
RH
2070 for (i = 0; life; ++i, life >>= 1) {
2071 if (life & 1) {
b7a83ff8 2072 ne_fprintf(f, " %d", i);
bdfb460e
RH
2073 }
2074 }
b03cce8e 2075 }
c896fe29 2076 }
1894f69a
RH
2077
2078 if (have_prefs) {
2079 for (i = 0; i < nb_oargs; ++i) {
31fd884b 2080 TCGRegSet set = output_pref(op, i);
1894f69a
RH
2081
2082 if (i == 0) {
b7a83ff8 2083 ne_fprintf(f, " pref=");
1894f69a 2084 } else {
b7a83ff8 2085 ne_fprintf(f, ",");
1894f69a
RH
2086 }
2087 if (set == 0) {
b7a83ff8 2088 ne_fprintf(f, "none");
1894f69a 2089 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
b7a83ff8 2090 ne_fprintf(f, "all");
1894f69a
RH
2091#ifdef CONFIG_DEBUG_TCG
2092 } else if (tcg_regset_single(set)) {
2093 TCGReg reg = tcg_regset_first(set);
b7a83ff8 2094 ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
1894f69a
RH
2095#endif
2096 } else if (TCG_TARGET_NB_REGS <= 32) {
b7a83ff8 2097 ne_fprintf(f, "0x%x", (uint32_t)set);
1894f69a 2098 } else {
b7a83ff8 2099 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
1894f69a
RH
2100 }
2101 }
2102 }
2103
b7a83ff8 2104 putc('\n', f);
c896fe29
FB
2105 }
2106}
2107
2108/* we give more priority to constraints with less registers */
2109static int get_constraint_priority(const TCGOpDef *def, int k)
2110{
74a11790 2111 const TCGArgConstraint *arg_ct = &def->args_ct[k];
29f5e925 2112 int n = ctpop64(arg_ct->regs);
c896fe29 2113
29f5e925
RH
2114 /*
2115 * Sort constraints of a single register first, which includes output
2116 * aliases (which must exactly match the input already allocated).
2117 */
2118 if (n == 1 || arg_ct->oalias) {
2119 return INT_MAX;
2120 }
2121
2122 /*
2123 * Sort register pairs next, first then second immediately after.
2124 * Arbitrarily sort multiple pairs by the index of the first reg;
2125 * there shouldn't be many pairs.
2126 */
2127 switch (arg_ct->pair) {
2128 case 1:
2129 case 3:
2130 return (k + 1) * 2;
2131 case 2:
2132 return (arg_ct->pair_index + 1) * 2 - 1;
c896fe29 2133 }
29f5e925
RH
2134
2135 /* Finally, sort by decreasing register count. */
2136 assert(n > 1);
2137 return -n;
c896fe29
FB
2138}
2139
2140/* sort from highest priority to lowest */
2141static void sort_constraints(TCGOpDef *def, int start, int n)
2142{
66792f90
RH
2143 int i, j;
2144 TCGArgConstraint *a = def->args_ct;
c896fe29 2145
66792f90
RH
2146 for (i = 0; i < n; i++) {
2147 a[start + i].sort_index = start + i;
2148 }
2149 if (n <= 1) {
c896fe29 2150 return;
66792f90
RH
2151 }
2152 for (i = 0; i < n - 1; i++) {
2153 for (j = i + 1; j < n; j++) {
2154 int p1 = get_constraint_priority(def, a[start + i].sort_index);
2155 int p2 = get_constraint_priority(def, a[start + j].sort_index);
c896fe29 2156 if (p1 < p2) {
66792f90
RH
2157 int tmp = a[start + i].sort_index;
2158 a[start + i].sort_index = a[start + j].sort_index;
2159 a[start + j].sort_index = tmp;
c896fe29
FB
2160 }
2161 }
2162 }
2163}
2164
f69d277e 2165static void process_op_defs(TCGContext *s)
c896fe29 2166{
a9751609 2167 TCGOpcode op;
c896fe29 2168
f69d277e
RH
2169 for (op = 0; op < NB_OPS; op++) {
2170 TCGOpDef *def = &tcg_op_defs[op];
2171 const TCGTargetOpDef *tdefs;
29f5e925
RH
2172 bool saw_alias_pair = false;
2173 int i, o, i2, o2, nb_args;
f69d277e
RH
2174
2175 if (def->flags & TCG_OPF_NOT_PRESENT) {
2176 continue;
2177 }
2178
c896fe29 2179 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
2180 if (nb_args == 0) {
2181 continue;
2182 }
2183
4c22e840
RH
2184 /*
2185 * Macro magic should make it impossible, but double-check that
2186 * the array index is in range. Since the signness of an enum
2187 * is implementation defined, force the result to unsigned.
2188 */
2189 unsigned con_set = tcg_target_op_def(op);
2190 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2191 tdefs = &constraint_sets[con_set];
f69d277e
RH
2192
2193 for (i = 0; i < nb_args; i++) {
2194 const char *ct_str = tdefs->args_ct_str[i];
8940ea0d
PMD
2195 bool input_p = i >= def->nb_oargs;
2196
f69d277e 2197 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 2198 tcg_debug_assert(ct_str != NULL);
f69d277e 2199
8940ea0d
PMD
2200 switch (*ct_str) {
2201 case '0' ... '9':
2202 o = *ct_str - '0';
2203 tcg_debug_assert(input_p);
2204 tcg_debug_assert(o < def->nb_oargs);
2205 tcg_debug_assert(def->args_ct[o].regs != 0);
2206 tcg_debug_assert(!def->args_ct[o].oalias);
2207 def->args_ct[i] = def->args_ct[o];
2208 /* The output sets oalias. */
2209 def->args_ct[o].oalias = 1;
2210 def->args_ct[o].alias_index = i;
2211 /* The input sets ialias. */
2212 def->args_ct[i].ialias = 1;
2213 def->args_ct[i].alias_index = o;
29f5e925
RH
2214 if (def->args_ct[i].pair) {
2215 saw_alias_pair = true;
2216 }
8940ea0d
PMD
2217 tcg_debug_assert(ct_str[1] == '\0');
2218 continue;
2219
2220 case '&':
2221 tcg_debug_assert(!input_p);
2222 def->args_ct[i].newreg = true;
2223 ct_str++;
2224 break;
29f5e925
RH
2225
2226 case 'p': /* plus */
2227 /* Allocate to the register after the previous. */
2228 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2229 o = i - 1;
2230 tcg_debug_assert(!def->args_ct[o].pair);
2231 tcg_debug_assert(!def->args_ct[o].ct);
2232 def->args_ct[i] = (TCGArgConstraint){
2233 .pair = 2,
2234 .pair_index = o,
2235 .regs = def->args_ct[o].regs << 1,
2236 };
2237 def->args_ct[o].pair = 1;
2238 def->args_ct[o].pair_index = i;
2239 tcg_debug_assert(ct_str[1] == '\0');
2240 continue;
2241
2242 case 'm': /* minus */
2243 /* Allocate to the register before the previous. */
2244 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2245 o = i - 1;
2246 tcg_debug_assert(!def->args_ct[o].pair);
2247 tcg_debug_assert(!def->args_ct[o].ct);
2248 def->args_ct[i] = (TCGArgConstraint){
2249 .pair = 1,
2250 .pair_index = o,
2251 .regs = def->args_ct[o].regs >> 1,
2252 };
2253 def->args_ct[o].pair = 2;
2254 def->args_ct[o].pair_index = i;
2255 tcg_debug_assert(ct_str[1] == '\0');
2256 continue;
8940ea0d
PMD
2257 }
2258
2259 do {
2260 switch (*ct_str) {
17280ff4
RH
2261 case 'i':
2262 def->args_ct[i].ct |= TCG_CT_CONST;
17280ff4 2263 break;
358b4923 2264
358b4923
RH
2265 /* Include all of the target-specific constraints. */
2266
2267#undef CONST
2268#define CONST(CASE, MASK) \
8940ea0d 2269 case CASE: def->args_ct[i].ct |= MASK; break;
358b4923 2270#define REGS(CASE, MASK) \
8940ea0d 2271 case CASE: def->args_ct[i].regs |= MASK; break;
358b4923
RH
2272
2273#include "tcg-target-con-str.h"
2274
2275#undef REGS
2276#undef CONST
17280ff4 2277 default:
8940ea0d
PMD
2278 case '0' ... '9':
2279 case '&':
29f5e925
RH
2280 case 'p':
2281 case 'm':
17280ff4 2282 /* Typo in TCGTargetOpDef constraint. */
358b4923 2283 g_assert_not_reached();
c896fe29 2284 }
8940ea0d 2285 } while (*++ct_str != '\0');
c896fe29
FB
2286 }
2287
c68aaa18 2288 /* TCGTargetOpDef entry with too much information? */
eabb7b91 2289 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 2290
29f5e925
RH
2291 /*
2292 * Fix up output pairs that are aliased with inputs.
2293 * When we created the alias, we copied pair from the output.
2294 * There are three cases:
2295 * (1a) Pairs of inputs alias pairs of outputs.
2296 * (1b) One input aliases the first of a pair of outputs.
2297 * (2) One input aliases the second of a pair of outputs.
2298 *
2299 * Case 1a is handled by making sure that the pair_index'es are
2300 * properly updated so that they appear the same as a pair of inputs.
2301 *
2302 * Case 1b is handled by setting the pair_index of the input to
2303 * itself, simply so it doesn't point to an unrelated argument.
2304 * Since we don't encounter the "second" during the input allocation
2305 * phase, nothing happens with the second half of the input pair.
2306 *
2307 * Case 2 is handled by setting the second input to pair=3, the
2308 * first output to pair=3, and the pair_index'es to match.
2309 */
2310 if (saw_alias_pair) {
2311 for (i = def->nb_oargs; i < nb_args; i++) {
2312 /*
2313 * Since [0-9pm] must be alone in the constraint string,
2314 * the only way they can both be set is if the pair comes
2315 * from the output alias.
2316 */
2317 if (!def->args_ct[i].ialias) {
2318 continue;
2319 }
2320 switch (def->args_ct[i].pair) {
2321 case 0:
2322 break;
2323 case 1:
2324 o = def->args_ct[i].alias_index;
2325 o2 = def->args_ct[o].pair_index;
2326 tcg_debug_assert(def->args_ct[o].pair == 1);
2327 tcg_debug_assert(def->args_ct[o2].pair == 2);
2328 if (def->args_ct[o2].oalias) {
2329 /* Case 1a */
2330 i2 = def->args_ct[o2].alias_index;
2331 tcg_debug_assert(def->args_ct[i2].pair == 2);
2332 def->args_ct[i2].pair_index = i;
2333 def->args_ct[i].pair_index = i2;
2334 } else {
2335 /* Case 1b */
2336 def->args_ct[i].pair_index = i;
2337 }
2338 break;
2339 case 2:
2340 o = def->args_ct[i].alias_index;
2341 o2 = def->args_ct[o].pair_index;
2342 tcg_debug_assert(def->args_ct[o].pair == 2);
2343 tcg_debug_assert(def->args_ct[o2].pair == 1);
2344 if (def->args_ct[o2].oalias) {
2345 /* Case 1a */
2346 i2 = def->args_ct[o2].alias_index;
2347 tcg_debug_assert(def->args_ct[i2].pair == 1);
2348 def->args_ct[i2].pair_index = i;
2349 def->args_ct[i].pair_index = i2;
2350 } else {
2351 /* Case 2 */
2352 def->args_ct[i].pair = 3;
2353 def->args_ct[o2].pair = 3;
2354 def->args_ct[i].pair_index = o2;
2355 def->args_ct[o2].pair_index = i;
2356 }
2357 break;
2358 default:
2359 g_assert_not_reached();
2360 }
2361 }
2362 }
2363
c896fe29
FB
2364 /* sort the constraints (XXX: this is just an heuristic) */
2365 sort_constraints(def, 0, def->nb_oargs);
2366 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 2367 }
c896fe29
FB
2368}
2369
0c627cdc
RH
2370void tcg_op_remove(TCGContext *s, TCGOp *op)
2371{
d88a117e
RH
2372 TCGLabel *label;
2373
2374 switch (op->opc) {
2375 case INDEX_op_br:
2376 label = arg_label(op->args[0]);
2377 label->refs--;
2378 break;
2379 case INDEX_op_brcond_i32:
2380 case INDEX_op_brcond_i64:
2381 label = arg_label(op->args[3]);
2382 label->refs--;
2383 break;
2384 case INDEX_op_brcond2_i32:
2385 label = arg_label(op->args[5]);
2386 label->refs--;
2387 break;
2388 default:
2389 break;
2390 }
2391
15fa08f8
RH
2392 QTAILQ_REMOVE(&s->ops, op, link);
2393 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
abebf925 2394 s->nb_ops--;
0c627cdc
RH
2395
2396#ifdef CONFIG_PROFILER
d73415a3 2397 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
2398#endif
2399}
2400
a80cdd31
RH
2401void tcg_remove_ops_after(TCGOp *op)
2402{
2403 TCGContext *s = tcg_ctx;
2404
2405 while (true) {
2406 TCGOp *last = tcg_last_op();
2407 if (last == op) {
2408 return;
2409 }
2410 tcg_op_remove(s, last);
2411 }
2412}
2413
d4478943 2414static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
5a18407f 2415{
15fa08f8 2416 TCGContext *s = tcg_ctx;
cb10bc63
RH
2417 TCGOp *op = NULL;
2418
2419 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
2420 QTAILQ_FOREACH(op, &s->free_ops, link) {
2421 if (nargs <= op->nargs) {
2422 QTAILQ_REMOVE(&s->free_ops, op, link);
2423 nargs = op->nargs;
2424 goto found;
2425 }
2426 }
15fa08f8 2427 }
cb10bc63
RH
2428
2429 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
2430 nargs = MAX(4, nargs);
2431 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
2432
2433 found:
15fa08f8
RH
2434 memset(op, 0, offsetof(TCGOp, link));
2435 op->opc = opc;
cb10bc63
RH
2436 op->nargs = nargs;
2437
2438 /* Check for bitfield overflow. */
2439 tcg_debug_assert(op->nargs == nargs);
5a18407f 2440
cb10bc63 2441 s->nb_ops++;
15fa08f8
RH
2442 return op;
2443}
2444
d4478943 2445TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
15fa08f8 2446{
d4478943 2447 TCGOp *op = tcg_op_alloc(opc, nargs);
15fa08f8
RH
2448 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2449 return op;
2450}
5a18407f 2451
d4478943
PMD
2452TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2453 TCGOpcode opc, unsigned nargs)
15fa08f8 2454{
d4478943 2455 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2456 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2457 return new_op;
2458}
2459
d4478943
PMD
2460TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2461 TCGOpcode opc, unsigned nargs)
5a18407f 2462{
d4478943 2463 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2464 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2465 return new_op;
2466}
2467
b4fc67c7
RH
2468/* Reachable analysis : remove unreachable code. */
2469static void reachable_code_pass(TCGContext *s)
2470{
2471 TCGOp *op, *op_next;
2472 bool dead = false;
2473
2474 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2475 bool remove = dead;
2476 TCGLabel *label;
b4fc67c7
RH
2477
2478 switch (op->opc) {
2479 case INDEX_op_set_label:
2480 label = arg_label(op->args[0]);
2481 if (label->refs == 0) {
2482 /*
2483 * While there is an occasional backward branch, virtually
2484 * all branches generated by the translators are forward.
2485 * Which means that generally we will have already removed
2486 * all references to the label that will be, and there is
2487 * little to be gained by iterating.
2488 */
2489 remove = true;
2490 } else {
2491 /* Once we see a label, insns become live again. */
2492 dead = false;
2493 remove = false;
2494
2495 /*
2496 * Optimization can fold conditional branches to unconditional.
2497 * If we find a label with one reference which is preceded by
2498 * an unconditional branch to it, remove both. This needed to
2499 * wait until the dead code in between them was removed.
2500 */
2501 if (label->refs == 1) {
eae3eb3e 2502 TCGOp *op_prev = QTAILQ_PREV(op, link);
b4fc67c7
RH
2503 if (op_prev->opc == INDEX_op_br &&
2504 label == arg_label(op_prev->args[0])) {
2505 tcg_op_remove(s, op_prev);
2506 remove = true;
2507 }
2508 }
2509 }
2510 break;
2511
2512 case INDEX_op_br:
2513 case INDEX_op_exit_tb:
2514 case INDEX_op_goto_ptr:
2515 /* Unconditional branches; everything following is dead. */
2516 dead = true;
2517 break;
2518
2519 case INDEX_op_call:
2520 /* Notice noreturn helper calls, raising exceptions. */
90163900 2521 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
b4fc67c7
RH
2522 dead = true;
2523 }
2524 break;
2525
2526 case INDEX_op_insn_start:
2527 /* Never remove -- we need to keep these for unwind. */
2528 remove = false;
2529 break;
2530
2531 default:
2532 break;
2533 }
2534
2535 if (remove) {
2536 tcg_op_remove(s, op);
2537 }
2538 }
2539}
2540
c70fbf0a
RH
2541#define TS_DEAD 1
2542#define TS_MEM 2
2543
5a18407f
RH
2544#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2545#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2546
25f49c5f
RH
2547/* For liveness_pass_1, the register preferences for a given temp. */
2548static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2549{
2550 return ts->state_ptr;
2551}
2552
2553/* For liveness_pass_1, reset the preferences for a given temp to the
2554 * maximal regset for its type.
2555 */
2556static inline void la_reset_pref(TCGTemp *ts)
2557{
2558 *la_temp_pref(ts)
2559 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2560}
2561
9c43b68d
AJ
2562/* liveness analysis: end of function: all temps are dead, and globals
2563 should be in memory. */
2616c808 2564static void la_func_end(TCGContext *s, int ng, int nt)
c896fe29 2565{
b83eabea
RH
2566 int i;
2567
2568 for (i = 0; i < ng; ++i) {
2569 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f 2570 la_reset_pref(&s->temps[i]);
b83eabea
RH
2571 }
2572 for (i = ng; i < nt; ++i) {
2573 s->temps[i].state = TS_DEAD;
25f49c5f 2574 la_reset_pref(&s->temps[i]);
b83eabea 2575 }
c896fe29
FB
2576}
2577
9c43b68d
AJ
2578/* liveness analysis: end of basic block: all temps are dead, globals
2579 and local temps should be in memory. */
2616c808 2580static void la_bb_end(TCGContext *s, int ng, int nt)
641d5fbe 2581{
b83eabea 2582 int i;
641d5fbe 2583
ee17db83
RH
2584 for (i = 0; i < nt; ++i) {
2585 TCGTemp *ts = &s->temps[i];
2586 int state;
2587
2588 switch (ts->kind) {
2589 case TEMP_FIXED:
2590 case TEMP_GLOBAL:
2591 case TEMP_LOCAL:
2592 state = TS_DEAD | TS_MEM;
2593 break;
2594 case TEMP_NORMAL:
c7482438 2595 case TEMP_EBB:
c0522136 2596 case TEMP_CONST:
ee17db83
RH
2597 state = TS_DEAD;
2598 break;
2599 default:
2600 g_assert_not_reached();
2601 }
2602 ts->state = state;
2603 la_reset_pref(ts);
641d5fbe
FB
2604 }
2605}
2606
f65a061c
RH
2607/* liveness analysis: sync globals back to memory. */
2608static void la_global_sync(TCGContext *s, int ng)
2609{
2610 int i;
2611
2612 for (i = 0; i < ng; ++i) {
25f49c5f
RH
2613 int state = s->temps[i].state;
2614 s->temps[i].state = state | TS_MEM;
2615 if (state == TS_DEAD) {
2616 /* If the global was previously dead, reset prefs. */
2617 la_reset_pref(&s->temps[i]);
2618 }
f65a061c
RH
2619 }
2620}
2621
b4cb76e6 2622/*
c7482438
RH
2623 * liveness analysis: conditional branch: all temps are dead unless
2624 * explicitly live-across-conditional-branch, globals and local temps
2625 * should be synced.
b4cb76e6
RH
2626 */
2627static void la_bb_sync(TCGContext *s, int ng, int nt)
2628{
2629 la_global_sync(s, ng);
2630
2631 for (int i = ng; i < nt; ++i) {
c0522136
RH
2632 TCGTemp *ts = &s->temps[i];
2633 int state;
2634
2635 switch (ts->kind) {
2636 case TEMP_LOCAL:
2637 state = ts->state;
2638 ts->state = state | TS_MEM;
b4cb76e6
RH
2639 if (state != TS_DEAD) {
2640 continue;
2641 }
c0522136
RH
2642 break;
2643 case TEMP_NORMAL:
b4cb76e6 2644 s->temps[i].state = TS_DEAD;
c0522136 2645 break;
c7482438 2646 case TEMP_EBB:
c0522136
RH
2647 case TEMP_CONST:
2648 continue;
2649 default:
2650 g_assert_not_reached();
b4cb76e6
RH
2651 }
2652 la_reset_pref(&s->temps[i]);
2653 }
2654}
2655
f65a061c
RH
2656/* liveness analysis: sync globals back to memory and kill. */
2657static void la_global_kill(TCGContext *s, int ng)
2658{
2659 int i;
2660
2661 for (i = 0; i < ng; i++) {
2662 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f
RH
2663 la_reset_pref(&s->temps[i]);
2664 }
2665}
2666
2667/* liveness analysis: note live globals crossing calls. */
2668static void la_cross_call(TCGContext *s, int nt)
2669{
2670 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2671 int i;
2672
2673 for (i = 0; i < nt; i++) {
2674 TCGTemp *ts = &s->temps[i];
2675 if (!(ts->state & TS_DEAD)) {
2676 TCGRegSet *pset = la_temp_pref(ts);
2677 TCGRegSet set = *pset;
2678
2679 set &= mask;
2680 /* If the combination is not possible, restart. */
2681 if (set == 0) {
2682 set = tcg_target_available_regs[ts->type] & mask;
2683 }
2684 *pset = set;
2685 }
f65a061c
RH
2686 }
2687}
2688
a1b3c48d 2689/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2690 given input arguments is dead. Instructions updating dead
2691 temporaries are removed. */
b83eabea 2692static void liveness_pass_1(TCGContext *s)
c896fe29 2693{
c70fbf0a 2694 int nb_globals = s->nb_globals;
2616c808 2695 int nb_temps = s->nb_temps;
15fa08f8 2696 TCGOp *op, *op_prev;
25f49c5f
RH
2697 TCGRegSet *prefs;
2698 int i;
2699
2700 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2701 for (i = 0; i < nb_temps; ++i) {
2702 s->temps[i].state_ptr = prefs + i;
2703 }
a1b3c48d 2704
ae36a246 2705 /* ??? Should be redundant with the exit_tb that ends the TB. */
2616c808 2706 la_func_end(s, nb_globals, nb_temps);
c896fe29 2707
eae3eb3e 2708 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
25f49c5f 2709 int nb_iargs, nb_oargs;
c45cb8bb
RH
2710 TCGOpcode opc_new, opc_new2;
2711 bool have_opc_new2;
a1b3c48d 2712 TCGLifeData arg_life = 0;
25f49c5f 2713 TCGTemp *ts;
c45cb8bb
RH
2714 TCGOpcode opc = op->opc;
2715 const TCGOpDef *def = &tcg_op_defs[opc];
2716
c45cb8bb 2717 switch (opc) {
c896fe29 2718 case INDEX_op_call:
c6e113f5 2719 {
39004a71
RH
2720 const TCGHelperInfo *info = tcg_call_info(op);
2721 int call_flags = tcg_call_flags(op);
c896fe29 2722
cd9090aa
RH
2723 nb_oargs = TCGOP_CALLO(op);
2724 nb_iargs = TCGOP_CALLI(op);
c6e113f5 2725
c45cb8bb 2726 /* pure functions can be removed if their result is unused */
78505279 2727 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2728 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2729 ts = arg_temp(op->args[i]);
2730 if (ts->state != TS_DEAD) {
c6e113f5 2731 goto do_not_remove_call;
9c43b68d 2732 }
c6e113f5 2733 }
c45cb8bb 2734 goto do_remove;
152c35aa
RH
2735 }
2736 do_not_remove_call:
c896fe29 2737
25f49c5f 2738 /* Output args are dead. */
152c35aa 2739 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2740 ts = arg_temp(op->args[i]);
2741 if (ts->state & TS_DEAD) {
152c35aa
RH
2742 arg_life |= DEAD_ARG << i;
2743 }
25f49c5f 2744 if (ts->state & TS_MEM) {
152c35aa 2745 arg_life |= SYNC_ARG << i;
c6e113f5 2746 }
25f49c5f
RH
2747 ts->state = TS_DEAD;
2748 la_reset_pref(ts);
152c35aa 2749 }
78505279 2750
31fd884b
RH
2751 /* Not used -- it will be tcg_target_call_oarg_reg(). */
2752 memset(op->output_pref, 0, sizeof(op->output_pref));
2753
152c35aa
RH
2754 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2755 TCG_CALL_NO_READ_GLOBALS))) {
f65a061c 2756 la_global_kill(s, nb_globals);
152c35aa 2757 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
f65a061c 2758 la_global_sync(s, nb_globals);
152c35aa 2759 }
b9c18f56 2760
25f49c5f 2761 /* Record arguments that die in this helper. */
152c35aa 2762 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
25f49c5f 2763 ts = arg_temp(op->args[i]);
39004a71 2764 if (ts->state & TS_DEAD) {
152c35aa 2765 arg_life |= DEAD_ARG << i;
c6e113f5 2766 }
152c35aa 2767 }
25f49c5f
RH
2768
2769 /* For all live registers, remove call-clobbered prefs. */
2770 la_cross_call(s, nb_temps);
2771
39004a71
RH
2772 /*
2773 * Input arguments are live for preceding opcodes.
2774 *
2775 * For those arguments that die, and will be allocated in
2776 * registers, clear the register set for that arg, to be
2777 * filled in below. For args that will be on the stack,
2778 * reset to any available reg. Process arguments in reverse
2779 * order so that if a temp is used more than once, the stack
2780 * reset to max happens before the register reset to 0.
2781 */
2782 for (i = nb_iargs - 1; i >= 0; i--) {
2783 const TCGCallArgumentLoc *loc = &info->in[i];
2784 ts = arg_temp(op->args[nb_oargs + i]);
25f49c5f 2785
39004a71
RH
2786 if (ts->state & TS_DEAD) {
2787 switch (loc->kind) {
2788 case TCG_CALL_ARG_NORMAL:
2789 case TCG_CALL_ARG_EXTEND_U:
2790 case TCG_CALL_ARG_EXTEND_S:
2791 if (REG_P(loc)) {
2792 *la_temp_pref(ts) = 0;
2793 break;
2794 }
2795 /* fall through */
2796 default:
2797 *la_temp_pref(ts) =
2798 tcg_target_available_regs[ts->type];
2799 break;
2800 }
25f49c5f
RH
2801 ts->state &= ~TS_DEAD;
2802 }
2803 }
2804
39004a71
RH
2805 /*
2806 * For each input argument, add its input register to prefs.
2807 * If a temp is used once, this produces a single set bit;
2808 * if a temp is used multiple times, this produces a set.
2809 */
2810 for (i = 0; i < nb_iargs; i++) {
2811 const TCGCallArgumentLoc *loc = &info->in[i];
2812 ts = arg_temp(op->args[nb_oargs + i]);
2813
2814 switch (loc->kind) {
2815 case TCG_CALL_ARG_NORMAL:
2816 case TCG_CALL_ARG_EXTEND_U:
2817 case TCG_CALL_ARG_EXTEND_S:
2818 if (REG_P(loc)) {
2819 tcg_regset_set_reg(*la_temp_pref(ts),
2820 tcg_target_call_iarg_regs[loc->arg_slot]);
2821 }
2822 break;
2823 default:
2824 break;
c19f47bf 2825 }
c896fe29 2826 }
c896fe29 2827 }
c896fe29 2828 break;
765b842a 2829 case INDEX_op_insn_start:
c896fe29 2830 break;
5ff9d6a4 2831 case INDEX_op_discard:
5ff9d6a4 2832 /* mark the temporary as dead */
25f49c5f
RH
2833 ts = arg_temp(op->args[0]);
2834 ts->state = TS_DEAD;
2835 la_reset_pref(ts);
5ff9d6a4 2836 break;
1305c451
RH
2837
2838 case INDEX_op_add2_i32:
c45cb8bb 2839 opc_new = INDEX_op_add_i32;
f1fae40c 2840 goto do_addsub2;
1305c451 2841 case INDEX_op_sub2_i32:
c45cb8bb 2842 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2843 goto do_addsub2;
2844 case INDEX_op_add2_i64:
c45cb8bb 2845 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2846 goto do_addsub2;
2847 case INDEX_op_sub2_i64:
c45cb8bb 2848 opc_new = INDEX_op_sub_i64;
f1fae40c 2849 do_addsub2:
1305c451
RH
2850 nb_iargs = 4;
2851 nb_oargs = 2;
2852 /* Test if the high part of the operation is dead, but not
2853 the low part. The result can be optimized to a simple
2854 add or sub. This happens often for x86_64 guest when the
2855 cpu mode is set to 32 bit. */
b83eabea
RH
2856 if (arg_temp(op->args[1])->state == TS_DEAD) {
2857 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2858 goto do_remove;
2859 }
c45cb8bb
RH
2860 /* Replace the opcode and adjust the args in place,
2861 leaving 3 unused args at the end. */
2862 op->opc = opc = opc_new;
efee3746
RH
2863 op->args[1] = op->args[2];
2864 op->args[2] = op->args[4];
1305c451
RH
2865 /* Fall through and mark the single-word operation live. */
2866 nb_iargs = 2;
2867 nb_oargs = 1;
2868 }
2869 goto do_not_remove;
2870
1414968a 2871 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2872 opc_new = INDEX_op_mul_i32;
2873 opc_new2 = INDEX_op_muluh_i32;
2874 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2875 goto do_mul2;
f1fae40c 2876 case INDEX_op_muls2_i32:
c45cb8bb
RH
2877 opc_new = INDEX_op_mul_i32;
2878 opc_new2 = INDEX_op_mulsh_i32;
2879 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2880 goto do_mul2;
2881 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2882 opc_new = INDEX_op_mul_i64;
2883 opc_new2 = INDEX_op_muluh_i64;
2884 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2885 goto do_mul2;
f1fae40c 2886 case INDEX_op_muls2_i64:
c45cb8bb
RH
2887 opc_new = INDEX_op_mul_i64;
2888 opc_new2 = INDEX_op_mulsh_i64;
2889 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2890 goto do_mul2;
f1fae40c 2891 do_mul2:
1414968a
RH
2892 nb_iargs = 2;
2893 nb_oargs = 2;
b83eabea
RH
2894 if (arg_temp(op->args[1])->state == TS_DEAD) {
2895 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2896 /* Both parts of the operation are dead. */
1414968a
RH
2897 goto do_remove;
2898 }
03271524 2899 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2900 op->opc = opc = opc_new;
efee3746
RH
2901 op->args[1] = op->args[2];
2902 op->args[2] = op->args[3];
b83eabea 2903 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2904 /* The low part of the operation is dead; generate the high. */
2905 op->opc = opc = opc_new2;
efee3746
RH
2906 op->args[0] = op->args[1];
2907 op->args[1] = op->args[2];
2908 op->args[2] = op->args[3];
03271524
RH
2909 } else {
2910 goto do_not_remove;
1414968a 2911 }
03271524
RH
2912 /* Mark the single-word operation live. */
2913 nb_oargs = 1;
1414968a
RH
2914 goto do_not_remove;
2915
c896fe29 2916 default:
1305c451 2917 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2918 nb_iargs = def->nb_iargs;
2919 nb_oargs = def->nb_oargs;
c896fe29 2920
49516bc0
AJ
2921 /* Test if the operation can be removed because all
2922 its outputs are dead. We assume that nb_oargs == 0
2923 implies side effects */
2924 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2925 for (i = 0; i < nb_oargs; i++) {
b83eabea 2926 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2927 goto do_not_remove;
9c43b68d 2928 }
49516bc0 2929 }
152c35aa
RH
2930 goto do_remove;
2931 }
2932 goto do_not_remove;
49516bc0 2933
152c35aa
RH
2934 do_remove:
2935 tcg_op_remove(s, op);
2936 break;
2937
2938 do_not_remove:
152c35aa 2939 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2940 ts = arg_temp(op->args[i]);
2941
2942 /* Remember the preference of the uses that followed. */
31fd884b
RH
2943 if (i < ARRAY_SIZE(op->output_pref)) {
2944 op->output_pref[i] = *la_temp_pref(ts);
2945 }
25f49c5f
RH
2946
2947 /* Output args are dead. */
2948 if (ts->state & TS_DEAD) {
152c35aa 2949 arg_life |= DEAD_ARG << i;
49516bc0 2950 }
25f49c5f 2951 if (ts->state & TS_MEM) {
152c35aa
RH
2952 arg_life |= SYNC_ARG << i;
2953 }
25f49c5f
RH
2954 ts->state = TS_DEAD;
2955 la_reset_pref(ts);
152c35aa 2956 }
49516bc0 2957
25f49c5f 2958 /* If end of basic block, update. */
ae36a246
RH
2959 if (def->flags & TCG_OPF_BB_EXIT) {
2960 la_func_end(s, nb_globals, nb_temps);
b4cb76e6
RH
2961 } else if (def->flags & TCG_OPF_COND_BRANCH) {
2962 la_bb_sync(s, nb_globals, nb_temps);
ae36a246 2963 } else if (def->flags & TCG_OPF_BB_END) {
2616c808 2964 la_bb_end(s, nb_globals, nb_temps);
152c35aa 2965 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
f65a061c 2966 la_global_sync(s, nb_globals);
25f49c5f
RH
2967 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2968 la_cross_call(s, nb_temps);
2969 }
152c35aa
RH
2970 }
2971
25f49c5f 2972 /* Record arguments that die in this opcode. */
152c35aa 2973 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2974 ts = arg_temp(op->args[i]);
2975 if (ts->state & TS_DEAD) {
152c35aa 2976 arg_life |= DEAD_ARG << i;
c896fe29 2977 }
c896fe29 2978 }
25f49c5f
RH
2979
2980 /* Input arguments are live for preceding opcodes. */
152c35aa 2981 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2982 ts = arg_temp(op->args[i]);
2983 if (ts->state & TS_DEAD) {
2984 /* For operands that were dead, initially allow
2985 all regs for the type. */
2986 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2987 ts->state &= ~TS_DEAD;
2988 }
2989 }
2990
2991 /* Incorporate constraints for this operand. */
2992 switch (opc) {
2993 case INDEX_op_mov_i32:
2994 case INDEX_op_mov_i64:
2995 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2996 have proper constraints. That said, special case
2997 moves to propagate preferences backward. */
2998 if (IS_DEAD_ARG(1)) {
2999 *la_temp_pref(arg_temp(op->args[0]))
3000 = *la_temp_pref(arg_temp(op->args[1]));
3001 }
3002 break;
3003
3004 default:
3005 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3006 const TCGArgConstraint *ct = &def->args_ct[i];
3007 TCGRegSet set, *pset;
3008
3009 ts = arg_temp(op->args[i]);
3010 pset = la_temp_pref(ts);
3011 set = *pset;
3012
9be0d080 3013 set &= ct->regs;
bc2b17e6 3014 if (ct->ialias) {
31fd884b 3015 set &= output_pref(op, ct->alias_index);
25f49c5f
RH
3016 }
3017 /* If the combination is not possible, restart. */
3018 if (set == 0) {
9be0d080 3019 set = ct->regs;
25f49c5f
RH
3020 }
3021 *pset = set;
3022 }
3023 break;
152c35aa 3024 }
c896fe29
FB
3025 break;
3026 }
bee158cb 3027 op->life = arg_life;
1ff0a2c5 3028 }
c896fe29 3029}
c896fe29 3030
5a18407f 3031/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 3032static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
3033{
3034 int nb_globals = s->nb_globals;
15fa08f8 3035 int nb_temps, i;
5a18407f 3036 bool changes = false;
15fa08f8 3037 TCGOp *op, *op_next;
5a18407f 3038
5a18407f
RH
3039 /* Create a temporary for each indirect global. */
3040 for (i = 0; i < nb_globals; ++i) {
3041 TCGTemp *its = &s->temps[i];
3042 if (its->indirect_reg) {
3043 TCGTemp *dts = tcg_temp_alloc(s);
3044 dts->type = its->type;
3045 dts->base_type = its->base_type;
c7482438 3046 dts->kind = TEMP_EBB;
b83eabea
RH
3047 its->state_ptr = dts;
3048 } else {
3049 its->state_ptr = NULL;
5a18407f 3050 }
b83eabea
RH
3051 /* All globals begin dead. */
3052 its->state = TS_DEAD;
3053 }
3054 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3055 TCGTemp *its = &s->temps[i];
3056 its->state_ptr = NULL;
3057 its->state = TS_DEAD;
5a18407f 3058 }
5a18407f 3059
15fa08f8 3060 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
3061 TCGOpcode opc = op->opc;
3062 const TCGOpDef *def = &tcg_op_defs[opc];
3063 TCGLifeData arg_life = op->life;
3064 int nb_iargs, nb_oargs, call_flags;
b83eabea 3065 TCGTemp *arg_ts, *dir_ts;
5a18407f 3066
5a18407f 3067 if (opc == INDEX_op_call) {
cd9090aa
RH
3068 nb_oargs = TCGOP_CALLO(op);
3069 nb_iargs = TCGOP_CALLI(op);
90163900 3070 call_flags = tcg_call_flags(op);
5a18407f
RH
3071 } else {
3072 nb_iargs = def->nb_iargs;
3073 nb_oargs = def->nb_oargs;
3074
3075 /* Set flags similar to how calls require. */
b4cb76e6
RH
3076 if (def->flags & TCG_OPF_COND_BRANCH) {
3077 /* Like reading globals: sync_globals */
3078 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3079 } else if (def->flags & TCG_OPF_BB_END) {
5a18407f
RH
3080 /* Like writing globals: save_globals */
3081 call_flags = 0;
3082 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3083 /* Like reading globals: sync_globals */
3084 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3085 } else {
3086 /* No effect on globals. */
3087 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3088 TCG_CALL_NO_WRITE_GLOBALS);
3089 }
3090 }
3091
3092 /* Make sure that input arguments are available. */
3093 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3094 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3095 dir_ts = arg_ts->state_ptr;
3096 if (dir_ts && arg_ts->state == TS_DEAD) {
3097 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3098 ? INDEX_op_ld_i32
3099 : INDEX_op_ld_i64);
3100 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3101
3102 lop->args[0] = temp_arg(dir_ts);
3103 lop->args[1] = temp_arg(arg_ts->mem_base);
3104 lop->args[2] = arg_ts->mem_offset;
3105
3106 /* Loaded, but synced with memory. */
3107 arg_ts->state = TS_MEM;
5a18407f
RH
3108 }
3109 }
3110
3111 /* Perform input replacement, and mark inputs that became dead.
3112 No action is required except keeping temp_state up to date
3113 so that we reload when needed. */
3114 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3115 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3116 dir_ts = arg_ts->state_ptr;
3117 if (dir_ts) {
3118 op->args[i] = temp_arg(dir_ts);
3119 changes = true;
3120 if (IS_DEAD_ARG(i)) {
3121 arg_ts->state = TS_DEAD;
5a18407f
RH
3122 }
3123 }
3124 }
3125
3126 /* Liveness analysis should ensure that the following are
3127 all correct, for call sites and basic block end points. */
3128 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
3129 /* Nothing to do */
3130 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
3131 for (i = 0; i < nb_globals; ++i) {
3132 /* Liveness should see that globals are synced back,
3133 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
3134 arg_ts = &s->temps[i];
3135 tcg_debug_assert(arg_ts->state_ptr == 0
3136 || arg_ts->state != 0);
5a18407f
RH
3137 }
3138 } else {
3139 for (i = 0; i < nb_globals; ++i) {
3140 /* Liveness should see that globals are saved back,
3141 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
3142 arg_ts = &s->temps[i];
3143 tcg_debug_assert(arg_ts->state_ptr == 0
3144 || arg_ts->state == TS_DEAD);
5a18407f
RH
3145 }
3146 }
3147
3148 /* Outputs become available. */
61f15c48
RH
3149 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
3150 arg_ts = arg_temp(op->args[0]);
b83eabea 3151 dir_ts = arg_ts->state_ptr;
61f15c48
RH
3152 if (dir_ts) {
3153 op->args[0] = temp_arg(dir_ts);
3154 changes = true;
3155
3156 /* The output is now live and modified. */
3157 arg_ts->state = 0;
3158
3159 if (NEED_SYNC_ARG(0)) {
3160 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3161 ? INDEX_op_st_i32
3162 : INDEX_op_st_i64);
d4478943 3163 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
61f15c48
RH
3164 TCGTemp *out_ts = dir_ts;
3165
3166 if (IS_DEAD_ARG(0)) {
3167 out_ts = arg_temp(op->args[1]);
3168 arg_ts->state = TS_DEAD;
3169 tcg_op_remove(s, op);
3170 } else {
3171 arg_ts->state = TS_MEM;
3172 }
3173
3174 sop->args[0] = temp_arg(out_ts);
3175 sop->args[1] = temp_arg(arg_ts->mem_base);
3176 sop->args[2] = arg_ts->mem_offset;
3177 } else {
3178 tcg_debug_assert(!IS_DEAD_ARG(0));
3179 }
5a18407f 3180 }
61f15c48
RH
3181 } else {
3182 for (i = 0; i < nb_oargs; i++) {
3183 arg_ts = arg_temp(op->args[i]);
3184 dir_ts = arg_ts->state_ptr;
3185 if (!dir_ts) {
3186 continue;
3187 }
3188 op->args[i] = temp_arg(dir_ts);
3189 changes = true;
5a18407f 3190
61f15c48
RH
3191 /* The output is now live and modified. */
3192 arg_ts->state = 0;
5a18407f 3193
61f15c48
RH
3194 /* Sync outputs upon their last write. */
3195 if (NEED_SYNC_ARG(i)) {
3196 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3197 ? INDEX_op_st_i32
3198 : INDEX_op_st_i64);
d4478943 3199 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
5a18407f 3200
61f15c48
RH
3201 sop->args[0] = temp_arg(dir_ts);
3202 sop->args[1] = temp_arg(arg_ts->mem_base);
3203 sop->args[2] = arg_ts->mem_offset;
5a18407f 3204
61f15c48
RH
3205 arg_ts->state = TS_MEM;
3206 }
3207 /* Drop outputs that are dead. */
3208 if (IS_DEAD_ARG(i)) {
3209 arg_ts->state = TS_DEAD;
3210 }
5a18407f
RH
3211 }
3212 }
3213 }
3214
3215 return changes;
3216}
3217
2272e4a7 3218static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 3219{
31c96417
RH
3220 int size = tcg_type_size(ts->type);
3221 int align;
3222 intptr_t off;
c1c09194
RH
3223
3224 switch (ts->type) {
3225 case TCG_TYPE_I32:
31c96417 3226 align = 4;
c1c09194
RH
3227 break;
3228 case TCG_TYPE_I64:
3229 case TCG_TYPE_V64:
31c96417 3230 align = 8;
c1c09194
RH
3231 break;
3232 case TCG_TYPE_V128:
c1c09194
RH
3233 case TCG_TYPE_V256:
3234 /* Note that we do not require aligned storage for V256. */
31c96417 3235 align = 16;
c1c09194
RH
3236 break;
3237 default:
3238 g_assert_not_reached();
b591dc59 3239 }
c1c09194 3240
b9537d59
RH
3241 /*
3242 * Assume the stack is sufficiently aligned.
3243 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3244 * and do not require 16 byte vector alignment. This seems slightly
3245 * easier than fully parameterizing the above switch statement.
3246 */
3247 align = MIN(TCG_TARGET_STACK_ALIGN, align);
c1c09194 3248 off = ROUND_UP(s->current_frame_offset, align);
732d5897
RH
3249
3250 /* If we've exhausted the stack frame, restart with a smaller TB. */
3251 if (off + size > s->frame_end) {
3252 tcg_raise_tb_overflow(s);
3253 }
c1c09194
RH
3254 s->current_frame_offset = off + size;
3255
3256 ts->mem_offset = off;
9defd1bd
RH
3257#if defined(__sparc__)
3258 ts->mem_offset += TCG_TARGET_STACK_BIAS;
3259#endif
b3a62939 3260 ts->mem_base = s->frame_temp;
c896fe29 3261 ts->mem_allocated = 1;
c896fe29
FB
3262}
3263
098859f1
RH
3264/* Assign @reg to @ts, and update reg_to_temp[]. */
3265static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
3266{
3267 if (ts->val_type == TEMP_VAL_REG) {
3268 TCGReg old = ts->reg;
3269 tcg_debug_assert(s->reg_to_temp[old] == ts);
3270 if (old == reg) {
3271 return;
3272 }
3273 s->reg_to_temp[old] = NULL;
3274 }
3275 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3276 s->reg_to_temp[reg] = ts;
3277 ts->val_type = TEMP_VAL_REG;
3278 ts->reg = reg;
3279}
3280
3281/* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3282static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
3283{
3284 tcg_debug_assert(type != TEMP_VAL_REG);
3285 if (ts->val_type == TEMP_VAL_REG) {
3286 TCGReg reg = ts->reg;
3287 tcg_debug_assert(s->reg_to_temp[reg] == ts);
3288 s->reg_to_temp[reg] = NULL;
3289 }
3290 ts->val_type = type;
3291}
3292
b722452a 3293static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
b3915dbb 3294
59d7c14e
RH
3295/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3296 mark it free; otherwise mark it dead. */
3297static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 3298{
c0522136
RH
3299 TCGTempVal new_type;
3300
3301 switch (ts->kind) {
3302 case TEMP_FIXED:
59d7c14e 3303 return;
c0522136
RH
3304 case TEMP_GLOBAL:
3305 case TEMP_LOCAL:
3306 new_type = TEMP_VAL_MEM;
3307 break;
3308 case TEMP_NORMAL:
c7482438 3309 case TEMP_EBB:
c0522136
RH
3310 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
3311 break;
3312 case TEMP_CONST:
3313 new_type = TEMP_VAL_CONST;
3314 break;
3315 default:
3316 g_assert_not_reached();
59d7c14e 3317 }
098859f1 3318 set_temp_val_nonreg(s, ts, new_type);
59d7c14e 3319}
7f6ceedf 3320
59d7c14e
RH
3321/* Mark a temporary as dead. */
3322static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3323{
3324 temp_free_or_dead(s, ts, 1);
3325}
3326
3327/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3328 registers needs to be allocated to store a constant. If 'free_or_dead'
3329 is non-zero, subsequently release the temporary; if it is positive, the
3330 temp is dead; if it is negative, the temp is free. */
98b4e186
RH
3331static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3332 TCGRegSet preferred_regs, int free_or_dead)
59d7c14e 3333{
c0522136 3334 if (!temp_readonly(ts) && !ts->mem_coherent) {
7f6ceedf 3335 if (!ts->mem_allocated) {
2272e4a7 3336 temp_allocate_frame(s, ts);
59d7c14e 3337 }
59d7c14e
RH
3338 switch (ts->val_type) {
3339 case TEMP_VAL_CONST:
3340 /* If we're going to free the temp immediately, then we won't
3341 require it later in a register, so attempt to store the
3342 constant to memory directly. */
3343 if (free_or_dead
3344 && tcg_out_sti(s, ts->type, ts->val,
3345 ts->mem_base->reg, ts->mem_offset)) {
3346 break;
3347 }
3348 temp_load(s, ts, tcg_target_available_regs[ts->type],
98b4e186 3349 allocated_regs, preferred_regs);
59d7c14e
RH
3350 /* fallthrough */
3351
3352 case TEMP_VAL_REG:
3353 tcg_out_st(s, ts->type, ts->reg,
3354 ts->mem_base->reg, ts->mem_offset);
3355 break;
3356
3357 case TEMP_VAL_MEM:
3358 break;
3359
3360 case TEMP_VAL_DEAD:
3361 default:
3362 tcg_abort();
3363 }
3364 ts->mem_coherent = 1;
3365 }
3366 if (free_or_dead) {
3367 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 3368 }
7f6ceedf
AJ
3369}
3370
c896fe29 3371/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 3372static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 3373{
f8b2f202 3374 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 3375 if (ts != NULL) {
98b4e186 3376 temp_sync(s, ts, allocated_regs, 0, -1);
c896fe29
FB
3377 }
3378}
3379
b016486e
RH
3380/**
3381 * tcg_reg_alloc:
3382 * @required_regs: Set of registers in which we must allocate.
3383 * @allocated_regs: Set of registers which must be avoided.
3384 * @preferred_regs: Set of registers we should prefer.
3385 * @rev: True if we search the registers in "indirect" order.
3386 *
3387 * The allocated register must be in @required_regs & ~@allocated_regs,
3388 * but if we can put it in @preferred_regs we may save a move later.
3389 */
3390static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3391 TCGRegSet allocated_regs,
3392 TCGRegSet preferred_regs, bool rev)
c896fe29 3393{
b016486e
RH
3394 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3395 TCGRegSet reg_ct[2];
91478cef 3396 const int *order;
c896fe29 3397
b016486e
RH
3398 reg_ct[1] = required_regs & ~allocated_regs;
3399 tcg_debug_assert(reg_ct[1] != 0);
3400 reg_ct[0] = reg_ct[1] & preferred_regs;
3401
3402 /* Skip the preferred_regs option if it cannot be satisfied,
3403 or if the preference made no difference. */
3404 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3405
91478cef 3406 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29 3407
b016486e
RH
3408 /* Try free registers, preferences first. */
3409 for (j = f; j < 2; j++) {
3410 TCGRegSet set = reg_ct[j];
3411
3412 if (tcg_regset_single(set)) {
3413 /* One register in the set. */
3414 TCGReg reg = tcg_regset_first(set);
3415 if (s->reg_to_temp[reg] == NULL) {
3416 return reg;
3417 }
3418 } else {
3419 for (i = 0; i < n; i++) {
3420 TCGReg reg = order[i];
3421 if (s->reg_to_temp[reg] == NULL &&
3422 tcg_regset_test_reg(set, reg)) {
3423 return reg;
3424 }
3425 }
3426 }
c896fe29
FB
3427 }
3428
b016486e
RH
3429 /* We must spill something. */
3430 for (j = f; j < 2; j++) {
3431 TCGRegSet set = reg_ct[j];
3432
3433 if (tcg_regset_single(set)) {
3434 /* One register in the set. */
3435 TCGReg reg = tcg_regset_first(set);
b3915dbb 3436 tcg_reg_free(s, reg, allocated_regs);
c896fe29 3437 return reg;
b016486e
RH
3438 } else {
3439 for (i = 0; i < n; i++) {
3440 TCGReg reg = order[i];
3441 if (tcg_regset_test_reg(set, reg)) {
3442 tcg_reg_free(s, reg, allocated_regs);
3443 return reg;
3444 }
3445 }
c896fe29
FB
3446 }
3447 }
3448
3449 tcg_abort();
3450}
3451
29f5e925
RH
3452static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
3453 TCGRegSet allocated_regs,
3454 TCGRegSet preferred_regs, bool rev)
3455{
3456 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3457 TCGRegSet reg_ct[2];
3458 const int *order;
3459
3460 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
3461 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
3462 tcg_debug_assert(reg_ct[1] != 0);
3463 reg_ct[0] = reg_ct[1] & preferred_regs;
3464
3465 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3466
3467 /*
3468 * Skip the preferred_regs option if it cannot be satisfied,
3469 * or if the preference made no difference.
3470 */
3471 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3472
3473 /*
3474 * Minimize the number of flushes by looking for 2 free registers first,
3475 * then a single flush, then two flushes.
3476 */
3477 for (fmin = 2; fmin >= 0; fmin--) {
3478 for (j = k; j < 2; j++) {
3479 TCGRegSet set = reg_ct[j];
3480
3481 for (i = 0; i < n; i++) {
3482 TCGReg reg = order[i];
3483
3484 if (tcg_regset_test_reg(set, reg)) {
3485 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
3486 if (f >= fmin) {
3487 tcg_reg_free(s, reg, allocated_regs);
3488 tcg_reg_free(s, reg + 1, allocated_regs);
3489 return reg;
3490 }
3491 }
3492 }
3493 }
3494 }
3495 tcg_abort();
3496}
3497
40ae5c62
RH
3498/* Make sure the temporary is in a register. If needed, allocate the register
3499 from DESIRED while avoiding ALLOCATED. */
3500static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
b722452a 3501 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
40ae5c62
RH
3502{
3503 TCGReg reg;
3504
3505 switch (ts->val_type) {
3506 case TEMP_VAL_REG:
3507 return;
3508 case TEMP_VAL_CONST:
b016486e 3509 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3510 preferred_regs, ts->indirect_base);
0a6a8bc8
RH
3511 if (ts->type <= TCG_TYPE_I64) {
3512 tcg_out_movi(s, ts->type, reg, ts->val);
3513 } else {
4e186175
RH
3514 uint64_t val = ts->val;
3515 MemOp vece = MO_64;
3516
3517 /*
3518 * Find the minimal vector element that matches the constant.
3519 * The targets will, in general, have to do this search anyway,
3520 * do this generically.
3521 */
4e186175
RH
3522 if (val == dup_const(MO_8, val)) {
3523 vece = MO_8;
3524 } else if (val == dup_const(MO_16, val)) {
3525 vece = MO_16;
0b4286dd 3526 } else if (val == dup_const(MO_32, val)) {
4e186175
RH
3527 vece = MO_32;
3528 }
3529
3530 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
0a6a8bc8 3531 }
40ae5c62
RH
3532 ts->mem_coherent = 0;
3533 break;
3534 case TEMP_VAL_MEM:
b016486e 3535 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3536 preferred_regs, ts->indirect_base);
40ae5c62
RH
3537 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3538 ts->mem_coherent = 1;
3539 break;
3540 case TEMP_VAL_DEAD:
3541 default:
3542 tcg_abort();
3543 }
098859f1 3544 set_temp_val_reg(s, ts, reg);
40ae5c62
RH
3545}
3546
59d7c14e
RH
3547/* Save a temporary to memory. 'allocated_regs' is used in case a
3548 temporary registers needs to be allocated to store a constant. */
3549static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 3550{
5a18407f
RH
3551 /* The liveness analysis already ensures that globals are back
3552 in memory. Keep an tcg_debug_assert for safety. */
e01fa97d 3553 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
1ad80729
AJ
3554}
3555
9814dd27 3556/* save globals to their canonical location and assume they can be
e8996ee0
FB
3557 modified be the following code. 'allocated_regs' is used in case a
3558 temporary registers needs to be allocated to store a constant. */
3559static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 3560{
ac3b8891 3561 int i, n;
c896fe29 3562
ac3b8891 3563 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 3564 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 3565 }
e5097dc8
FB
3566}
3567
3d5c5f87
AJ
3568/* sync globals to their canonical location and assume they can be
3569 read by the following code. 'allocated_regs' is used in case a
3570 temporary registers needs to be allocated to store a constant. */
3571static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3572{
ac3b8891 3573 int i, n;
3d5c5f87 3574
ac3b8891 3575 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 3576 TCGTemp *ts = &s->temps[i];
5a18407f 3577 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
ee17db83 3578 || ts->kind == TEMP_FIXED
5a18407f 3579 || ts->mem_coherent);
3d5c5f87
AJ
3580 }
3581}
3582
e5097dc8 3583/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
3584 all globals are stored at their canonical location. */
3585static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 3586{
e5097dc8
FB
3587 int i;
3588
b13eb728
RH
3589 for (i = s->nb_globals; i < s->nb_temps; i++) {
3590 TCGTemp *ts = &s->temps[i];
c0522136
RH
3591
3592 switch (ts->kind) {
3593 case TEMP_LOCAL:
b13eb728 3594 temp_save(s, ts, allocated_regs);
c0522136
RH
3595 break;
3596 case TEMP_NORMAL:
c7482438 3597 case TEMP_EBB:
5a18407f
RH
3598 /* The liveness analysis already ensures that temps are dead.
3599 Keep an tcg_debug_assert for safety. */
3600 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136
RH
3601 break;
3602 case TEMP_CONST:
3603 /* Similarly, we should have freed any allocated register. */
3604 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3605 break;
3606 default:
3607 g_assert_not_reached();
c896fe29
FB
3608 }
3609 }
e8996ee0
FB
3610
3611 save_globals(s, allocated_regs);
c896fe29
FB
3612}
3613
b4cb76e6 3614/*
c7482438
RH
3615 * At a conditional branch, we assume all temporaries are dead unless
3616 * explicitly live-across-conditional-branch; all globals and local
3617 * temps are synced to their location.
b4cb76e6
RH
3618 */
3619static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3620{
3621 sync_globals(s, allocated_regs);
3622
3623 for (int i = s->nb_globals; i < s->nb_temps; i++) {
3624 TCGTemp *ts = &s->temps[i];
3625 /*
3626 * The liveness analysis already ensures that temps are dead.
3627 * Keep tcg_debug_asserts for safety.
3628 */
c0522136
RH
3629 switch (ts->kind) {
3630 case TEMP_LOCAL:
b4cb76e6 3631 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
c0522136
RH
3632 break;
3633 case TEMP_NORMAL:
b4cb76e6 3634 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136 3635 break;
c7482438 3636 case TEMP_EBB:
c0522136
RH
3637 case TEMP_CONST:
3638 break;
3639 default:
3640 g_assert_not_reached();
b4cb76e6
RH
3641 }
3642 }
3643}
3644
bab1671f 3645/*
c58f4c97 3646 * Specialized code generation for INDEX_op_mov_* with a constant.
bab1671f 3647 */
0fe4fca4 3648static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
ba87719c
RH
3649 tcg_target_ulong val, TCGLifeData arg_life,
3650 TCGRegSet preferred_regs)
e8996ee0 3651{
d63e3b6e 3652 /* ENV should not be modified. */
e01fa97d 3653 tcg_debug_assert(!temp_readonly(ots));
59d7c14e
RH
3654
3655 /* The movi is not explicitly generated here. */
098859f1 3656 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
59d7c14e
RH
3657 ots->val = val;
3658 ots->mem_coherent = 0;
3659 if (NEED_SYNC_ARG(0)) {
ba87719c 3660 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
59d7c14e 3661 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 3662 temp_dead(s, ots);
4c4e1ab2 3663 }
e8996ee0
FB
3664}
3665
bab1671f
RH
3666/*
3667 * Specialized code generation for INDEX_op_mov_*.
3668 */
dd186292 3669static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 3670{
dd186292 3671 const TCGLifeData arg_life = op->life;
69e3706d 3672 TCGRegSet allocated_regs, preferred_regs;
c896fe29 3673 TCGTemp *ts, *ots;
450445d5 3674 TCGType otype, itype;
098859f1 3675 TCGReg oreg, ireg;
c896fe29 3676
d21369f5 3677 allocated_regs = s->reserved_regs;
31fd884b 3678 preferred_regs = output_pref(op, 0);
43439139
RH
3679 ots = arg_temp(op->args[0]);
3680 ts = arg_temp(op->args[1]);
450445d5 3681
d63e3b6e 3682 /* ENV should not be modified. */
e01fa97d 3683 tcg_debug_assert(!temp_readonly(ots));
d63e3b6e 3684
450445d5
RH
3685 /* Note that otype != itype for no-op truncation. */
3686 otype = ots->type;
3687 itype = ts->type;
c29c1d7e 3688
0fe4fca4
PB
3689 if (ts->val_type == TEMP_VAL_CONST) {
3690 /* propagate constant or generate sti */
3691 tcg_target_ulong val = ts->val;
3692 if (IS_DEAD_ARG(1)) {
3693 temp_dead(s, ts);
3694 }
69e3706d 3695 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
0fe4fca4
PB
3696 return;
3697 }
3698
3699 /* If the source value is in memory we're going to be forced
3700 to have it in a register in order to perform the copy. Copy
3701 the SOURCE value into its own register first, that way we
3702 don't have to reload SOURCE the next time it is used. */
3703 if (ts->val_type == TEMP_VAL_MEM) {
69e3706d
RH
3704 temp_load(s, ts, tcg_target_available_regs[itype],
3705 allocated_regs, preferred_regs);
c29c1d7e 3706 }
0fe4fca4 3707 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
098859f1
RH
3708 ireg = ts->reg;
3709
d63e3b6e 3710 if (IS_DEAD_ARG(0)) {
c29c1d7e
AJ
3711 /* mov to a non-saved dead register makes no sense (even with
3712 liveness analysis disabled). */
eabb7b91 3713 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 3714 if (!ots->mem_allocated) {
2272e4a7 3715 temp_allocate_frame(s, ots);
c29c1d7e 3716 }
098859f1 3717 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 3718 if (IS_DEAD_ARG(1)) {
f8bf00f1 3719 temp_dead(s, ts);
c29c1d7e 3720 }
f8bf00f1 3721 temp_dead(s, ots);
098859f1
RH
3722 return;
3723 }
3724
3725 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
3726 /*
3727 * The mov can be suppressed. Kill input first, so that it
3728 * is unlinked from reg_to_temp, then set the output to the
3729 * reg that we saved from the input.
3730 */
3731 temp_dead(s, ts);
3732 oreg = ireg;
c29c1d7e 3733 } else {
098859f1
RH
3734 if (ots->val_type == TEMP_VAL_REG) {
3735 oreg = ots->reg;
c896fe29 3736 } else {
098859f1
RH
3737 /* Make sure to not spill the input register during allocation. */
3738 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3739 allocated_regs | ((TCGRegSet)1 << ireg),
3740 preferred_regs, ots->indirect_base);
c896fe29 3741 }
098859f1
RH
3742 if (!tcg_out_mov(s, otype, oreg, ireg)) {
3743 /*
3744 * Cross register class move not supported.
3745 * Store the source register into the destination slot
3746 * and leave the destination temp as TEMP_VAL_MEM.
3747 */
3748 assert(!temp_readonly(ots));
3749 if (!ts->mem_allocated) {
3750 temp_allocate_frame(s, ots);
3751 }
3752 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
3753 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
3754 ots->mem_coherent = 1;
3755 return;
c896fe29 3756 }
ec7a869d 3757 }
098859f1
RH
3758 set_temp_val_reg(s, ots, oreg);
3759 ots->mem_coherent = 0;
3760
3761 if (NEED_SYNC_ARG(0)) {
3762 temp_sync(s, ots, allocated_regs, 0, 0);
3763 }
c896fe29
FB
3764}
3765
bab1671f
RH
3766/*
3767 * Specialized code generation for INDEX_op_dup_vec.
3768 */
3769static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
3770{
3771 const TCGLifeData arg_life = op->life;
3772 TCGRegSet dup_out_regs, dup_in_regs;
3773 TCGTemp *its, *ots;
3774 TCGType itype, vtype;
3775 unsigned vece;
31c96417 3776 int lowpart_ofs;
bab1671f
RH
3777 bool ok;
3778
3779 ots = arg_temp(op->args[0]);
3780 its = arg_temp(op->args[1]);
3781
3782 /* ENV should not be modified. */
e01fa97d 3783 tcg_debug_assert(!temp_readonly(ots));
bab1671f
RH
3784
3785 itype = its->type;
3786 vece = TCGOP_VECE(op);
3787 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3788
3789 if (its->val_type == TEMP_VAL_CONST) {
3790 /* Propagate constant via movi -> dupi. */
3791 tcg_target_ulong val = its->val;
3792 if (IS_DEAD_ARG(1)) {
3793 temp_dead(s, its);
3794 }
31fd884b 3795 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
bab1671f
RH
3796 return;
3797 }
3798
9be0d080
RH
3799 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3800 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
bab1671f
RH
3801
3802 /* Allocate the output register now. */
3803 if (ots->val_type != TEMP_VAL_REG) {
3804 TCGRegSet allocated_regs = s->reserved_regs;
098859f1 3805 TCGReg oreg;
bab1671f
RH
3806
3807 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
3808 /* Make sure to not spill the input register. */
3809 tcg_regset_set_reg(allocated_regs, its->reg);
3810 }
098859f1 3811 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 3812 output_pref(op, 0), ots->indirect_base);
098859f1 3813 set_temp_val_reg(s, ots, oreg);
bab1671f
RH
3814 }
3815
3816 switch (its->val_type) {
3817 case TEMP_VAL_REG:
3818 /*
3819 * The dup constriaints must be broad, covering all possible VECE.
3820 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3821 * to fail, indicating that extra moves are required for that case.
3822 */
3823 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
3824 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
3825 goto done;
3826 }
3827 /* Try again from memory or a vector input register. */
3828 }
3829 if (!its->mem_coherent) {
3830 /*
3831 * The input register is not synced, and so an extra store
3832 * would be required to use memory. Attempt an integer-vector
3833 * register move first. We do not have a TCGRegSet for this.
3834 */
3835 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
3836 break;
3837 }
3838 /* Sync the temp back to its slot and load from there. */
3839 temp_sync(s, its, s->reserved_regs, 0, 0);
3840 }
3841 /* fall through */
3842
3843 case TEMP_VAL_MEM:
31c96417
RH
3844 lowpart_ofs = 0;
3845 if (HOST_BIG_ENDIAN) {
3846 lowpart_ofs = tcg_type_size(itype) - (1 << vece);
3847 }
d6ecb4a9 3848 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
31c96417 3849 its->mem_offset + lowpart_ofs)) {
d6ecb4a9
RH
3850 goto done;
3851 }
098859f1 3852 /* Load the input into the destination vector register. */
bab1671f
RH
3853 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
3854 break;
3855
3856 default:
3857 g_assert_not_reached();
3858 }
3859
3860 /* We now have a vector input register, so dup must succeed. */
3861 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
3862 tcg_debug_assert(ok);
3863
3864 done:
36f5539c 3865 ots->mem_coherent = 0;
bab1671f
RH
3866 if (IS_DEAD_ARG(1)) {
3867 temp_dead(s, its);
3868 }
3869 if (NEED_SYNC_ARG(0)) {
3870 temp_sync(s, ots, s->reserved_regs, 0, 0);
3871 }
3872 if (IS_DEAD_ARG(0)) {
3873 temp_dead(s, ots);
3874 }
3875}
3876
dd186292 3877static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 3878{
dd186292
RH
3879 const TCGLifeData arg_life = op->life;
3880 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
3881 TCGRegSet i_allocated_regs;
3882 TCGRegSet o_allocated_regs;
b6638662
RH
3883 int i, k, nb_iargs, nb_oargs;
3884 TCGReg reg;
c896fe29
FB
3885 TCGArg arg;
3886 const TCGArgConstraint *arg_ct;
3887 TCGTemp *ts;
3888 TCGArg new_args[TCG_MAX_OP_ARGS];
3889 int const_args[TCG_MAX_OP_ARGS];
3890
3891 nb_oargs = def->nb_oargs;
3892 nb_iargs = def->nb_iargs;
3893
3894 /* copy constants */
a813e36f 3895 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 3896 op->args + nb_oargs + nb_iargs,
c896fe29
FB
3897 sizeof(TCGArg) * def->nb_cargs);
3898
d21369f5
RH
3899 i_allocated_regs = s->reserved_regs;
3900 o_allocated_regs = s->reserved_regs;
82790a87 3901
a813e36f 3902 /* satisfy input constraints */
dd186292 3903 for (k = 0; k < nb_iargs; k++) {
29f5e925
RH
3904 TCGRegSet i_preferred_regs, i_required_regs;
3905 bool allocate_new_reg, copyto_new_reg;
3906 TCGTemp *ts2;
3907 int i1, i2;
d62816f2 3908
66792f90 3909 i = def->args_ct[nb_oargs + k].sort_index;
dd186292 3910 arg = op->args[i];
c896fe29 3911 arg_ct = &def->args_ct[i];
43439139 3912 ts = arg_temp(arg);
40ae5c62
RH
3913
3914 if (ts->val_type == TEMP_VAL_CONST
a4fbbd77 3915 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
40ae5c62
RH
3916 /* constant is OK for instruction */
3917 const_args[i] = 1;
3918 new_args[i] = ts->val;
d62816f2 3919 continue;
c896fe29 3920 }
40ae5c62 3921
1c1824dc
RH
3922 reg = ts->reg;
3923 i_preferred_regs = 0;
29f5e925 3924 i_required_regs = arg_ct->regs;
1c1824dc 3925 allocate_new_reg = false;
29f5e925
RH
3926 copyto_new_reg = false;
3927
3928 switch (arg_ct->pair) {
3929 case 0: /* not paired */
3930 if (arg_ct->ialias) {
31fd884b 3931 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
3932
3933 /*
3934 * If the input is readonly, then it cannot also be an
3935 * output and aliased to itself. If the input is not
3936 * dead after the instruction, we must allocate a new
3937 * register and move it.
3938 */
3939 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
3940 allocate_new_reg = true;
3941 } else if (ts->val_type == TEMP_VAL_REG) {
3942 /*
3943 * Check if the current register has already been
3944 * allocated for another input.
3945 */
3946 allocate_new_reg =
3947 tcg_regset_test_reg(i_allocated_regs, reg);
3948 }
3949 }
3950 if (!allocate_new_reg) {
3951 temp_load(s, ts, i_required_regs, i_allocated_regs,
3952 i_preferred_regs);
3953 reg = ts->reg;
3954 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
3955 }
3956 if (allocate_new_reg) {
3957 /*
3958 * Allocate a new register matching the constraint
3959 * and move the temporary register into it.
3960 */
3961 temp_load(s, ts, tcg_target_available_regs[ts->type],
3962 i_allocated_regs, 0);
3963 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
3964 i_preferred_regs, ts->indirect_base);
3965 copyto_new_reg = true;
3966 }
3967 break;
3968
3969 case 1:
3970 /* First of an input pair; if i1 == i2, the second is an output. */
3971 i1 = i;
3972 i2 = arg_ct->pair_index;
3973 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
3974
3975 /*
3976 * It is easier to default to allocating a new pair
3977 * and to identify a few cases where it's not required.
3978 */
3979 if (arg_ct->ialias) {
31fd884b 3980 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
3981 if (IS_DEAD_ARG(i1) &&
3982 IS_DEAD_ARG(i2) &&
3983 !temp_readonly(ts) &&
3984 ts->val_type == TEMP_VAL_REG &&
3985 ts->reg < TCG_TARGET_NB_REGS - 1 &&
3986 tcg_regset_test_reg(i_required_regs, reg) &&
3987 !tcg_regset_test_reg(i_allocated_regs, reg) &&
3988 !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
3989 (ts2
3990 ? ts2->val_type == TEMP_VAL_REG &&
3991 ts2->reg == reg + 1 &&
3992 !temp_readonly(ts2)
3993 : s->reg_to_temp[reg + 1] == NULL)) {
3994 break;
3995 }
3996 } else {
3997 /* Without aliasing, the pair must also be an input. */
3998 tcg_debug_assert(ts2);
3999 if (ts->val_type == TEMP_VAL_REG &&
4000 ts2->val_type == TEMP_VAL_REG &&
4001 ts2->reg == reg + 1 &&
4002 tcg_regset_test_reg(i_required_regs, reg)) {
4003 break;
4004 }
4005 }
4006 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4007 0, ts->indirect_base);
4008 goto do_pair;
4009
4010 case 2: /* pair second */
4011 reg = new_args[arg_ct->pair_index] + 1;
4012 goto do_pair;
1c1824dc 4013
29f5e925
RH
4014 case 3: /* ialias with second output, no first input */
4015 tcg_debug_assert(arg_ct->ialias);
31fd884b 4016 i_preferred_regs = output_pref(op, arg_ct->alias_index);
d62816f2 4017
29f5e925
RH
4018 if (IS_DEAD_ARG(i) &&
4019 !temp_readonly(ts) &&
4020 ts->val_type == TEMP_VAL_REG &&
4021 reg > 0 &&
4022 s->reg_to_temp[reg - 1] == NULL &&
4023 tcg_regset_test_reg(i_required_regs, reg) &&
4024 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4025 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4026 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4027 break;
4028 }
4029 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4030 i_allocated_regs, 0,
4031 ts->indirect_base);
4032 tcg_regset_set_reg(i_allocated_regs, reg);
4033 reg += 1;
4034 goto do_pair;
4035
4036 do_pair:
c0522136 4037 /*
29f5e925
RH
4038 * If an aliased input is not dead after the instruction,
4039 * we must allocate a new register and move it.
c0522136 4040 */
29f5e925
RH
4041 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4042 TCGRegSet t_allocated_regs = i_allocated_regs;
4043
1c1824dc 4044 /*
29f5e925
RH
4045 * Because of the alias, and the continued life, make sure
4046 * that the temp is somewhere *other* than the reg pair,
4047 * and we get a copy in reg.
1c1824dc 4048 */
29f5e925
RH
4049 tcg_regset_set_reg(t_allocated_regs, reg);
4050 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4051 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4052 /* If ts was already in reg, copy it somewhere else. */
4053 TCGReg nr;
4054 bool ok;
4055
4056 tcg_debug_assert(ts->kind != TEMP_FIXED);
4057 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4058 t_allocated_regs, 0, ts->indirect_base);
4059 ok = tcg_out_mov(s, ts->type, nr, reg);
4060 tcg_debug_assert(ok);
4061
4062 set_temp_val_reg(s, ts, nr);
4063 } else {
4064 temp_load(s, ts, tcg_target_available_regs[ts->type],
4065 t_allocated_regs, 0);
4066 copyto_new_reg = true;
4067 }
4068 } else {
4069 /* Preferably allocate to reg, otherwise copy. */
4070 i_required_regs = (TCGRegSet)1 << reg;
4071 temp_load(s, ts, i_required_regs, i_allocated_regs,
4072 i_preferred_regs);
4073 copyto_new_reg = ts->reg != reg;
5ff9d6a4 4074 }
29f5e925 4075 break;
d62816f2 4076
29f5e925
RH
4077 default:
4078 g_assert_not_reached();
1c1824dc 4079 }
d62816f2 4080
29f5e925 4081 if (copyto_new_reg) {
78113e83 4082 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
240c08d0
RH
4083 /*
4084 * Cross register class move not supported. Sync the
4085 * temp back to its slot and load from there.
4086 */
4087 temp_sync(s, ts, i_allocated_regs, 0, 0);
4088 tcg_out_ld(s, ts->type, reg,
4089 ts->mem_base->reg, ts->mem_offset);
78113e83 4090 }
c896fe29 4091 }
c896fe29
FB
4092 new_args[i] = reg;
4093 const_args[i] = 0;
82790a87 4094 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29 4095 }
a813e36f 4096
a52ad07e
AJ
4097 /* mark dead temporaries and free the associated registers */
4098 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4099 if (IS_DEAD_ARG(i)) {
43439139 4100 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
4101 }
4102 }
4103
b4cb76e6
RH
4104 if (def->flags & TCG_OPF_COND_BRANCH) {
4105 tcg_reg_alloc_cbranch(s, i_allocated_regs);
4106 } else if (def->flags & TCG_OPF_BB_END) {
82790a87 4107 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 4108 } else {
e8996ee0 4109 if (def->flags & TCG_OPF_CALL_CLOBBER) {
a813e36f 4110 /* XXX: permit generic clobber register list ? */
c8074023
RH
4111 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4112 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 4113 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 4114 }
c896fe29 4115 }
3d5c5f87
AJ
4116 }
4117 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
4118 /* sync globals if the op has side effects and might trigger
4119 an exception. */
82790a87 4120 sync_globals(s, i_allocated_regs);
c896fe29 4121 }
a813e36f 4122
e8996ee0 4123 /* satisfy the output constraints */
e8996ee0 4124 for(k = 0; k < nb_oargs; k++) {
66792f90 4125 i = def->args_ct[k].sort_index;
dd186292 4126 arg = op->args[i];
e8996ee0 4127 arg_ct = &def->args_ct[i];
43439139 4128 ts = arg_temp(arg);
d63e3b6e
RH
4129
4130 /* ENV should not be modified. */
e01fa97d 4131 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4132
29f5e925
RH
4133 switch (arg_ct->pair) {
4134 case 0: /* not paired */
4135 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
4136 reg = new_args[arg_ct->alias_index];
4137 } else if (arg_ct->newreg) {
4138 reg = tcg_reg_alloc(s, arg_ct->regs,
4139 i_allocated_regs | o_allocated_regs,
31fd884b 4140 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4141 } else {
4142 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
31fd884b 4143 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4144 }
4145 break;
4146
4147 case 1: /* first of pair */
4148 tcg_debug_assert(!arg_ct->newreg);
4149 if (arg_ct->oalias) {
4150 reg = new_args[arg_ct->alias_index];
4151 break;
4152 }
4153 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
31fd884b 4154 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4155 break;
4156
4157 case 2: /* second of pair */
4158 tcg_debug_assert(!arg_ct->newreg);
4159 if (arg_ct->oalias) {
4160 reg = new_args[arg_ct->alias_index];
4161 } else {
4162 reg = new_args[arg_ct->pair_index] + 1;
4163 }
4164 break;
4165
4166 case 3: /* first of pair, aliasing with a second input */
4167 tcg_debug_assert(!arg_ct->newreg);
4168 reg = new_args[arg_ct->pair_index] - 1;
4169 break;
4170
4171 default:
4172 g_assert_not_reached();
c896fe29 4173 }
82790a87 4174 tcg_regset_set_reg(o_allocated_regs, reg);
098859f1 4175 set_temp_val_reg(s, ts, reg);
d63e3b6e 4176 ts->mem_coherent = 0;
e8996ee0 4177 new_args[i] = reg;
c896fe29 4178 }
c896fe29
FB
4179 }
4180
c896fe29 4181 /* emit instruction */
d2fd745f
RH
4182 if (def->flags & TCG_OPF_VECTOR) {
4183 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
4184 new_args, const_args);
4185 } else {
4186 tcg_out_op(s, op->opc, new_args, const_args);
4187 }
4188
c896fe29
FB
4189 /* move the outputs in the correct register if needed */
4190 for(i = 0; i < nb_oargs; i++) {
43439139 4191 ts = arg_temp(op->args[i]);
d63e3b6e
RH
4192
4193 /* ENV should not be modified. */
e01fa97d 4194 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4195
ec7a869d 4196 if (NEED_SYNC_ARG(i)) {
98b4e186 4197 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
59d7c14e 4198 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 4199 temp_dead(s, ts);
ec7a869d 4200 }
c896fe29
FB
4201 }
4202}
4203
efe86b21
RH
4204static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
4205{
4206 const TCGLifeData arg_life = op->life;
4207 TCGTemp *ots, *itsl, *itsh;
4208 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4209
4210 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4211 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
4212 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
4213
4214 ots = arg_temp(op->args[0]);
4215 itsl = arg_temp(op->args[1]);
4216 itsh = arg_temp(op->args[2]);
4217
4218 /* ENV should not be modified. */
4219 tcg_debug_assert(!temp_readonly(ots));
4220
4221 /* Allocate the output register now. */
4222 if (ots->val_type != TEMP_VAL_REG) {
4223 TCGRegSet allocated_regs = s->reserved_regs;
4224 TCGRegSet dup_out_regs =
4225 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
098859f1 4226 TCGReg oreg;
efe86b21
RH
4227
4228 /* Make sure to not spill the input registers. */
4229 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
4230 tcg_regset_set_reg(allocated_regs, itsl->reg);
4231 }
4232 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
4233 tcg_regset_set_reg(allocated_regs, itsh->reg);
4234 }
4235
098859f1 4236 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 4237 output_pref(op, 0), ots->indirect_base);
098859f1 4238 set_temp_val_reg(s, ots, oreg);
efe86b21
RH
4239 }
4240
4241 /* Promote dup2 of immediates to dupi_vec. */
4242 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
4243 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
4244 MemOp vece = MO_64;
4245
4246 if (val == dup_const(MO_8, val)) {
4247 vece = MO_8;
4248 } else if (val == dup_const(MO_16, val)) {
4249 vece = MO_16;
4250 } else if (val == dup_const(MO_32, val)) {
4251 vece = MO_32;
4252 }
4253
4254 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
4255 goto done;
4256 }
4257
4258 /* If the two inputs form one 64-bit value, try dupm_vec. */
aef85402
RH
4259 if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
4260 itsh->temp_subindex == !HOST_BIG_ENDIAN &&
4261 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
4262 TCGTemp *its = itsl - HOST_BIG_ENDIAN;
4263
4264 temp_sync(s, its + 0, s->reserved_regs, 0, 0);
4265 temp_sync(s, its + 1, s->reserved_regs, 0, 0);
4266
efe86b21
RH
4267 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
4268 its->mem_base->reg, its->mem_offset)) {
4269 goto done;
4270 }
4271 }
4272
4273 /* Fall back to generic expansion. */
4274 return false;
4275
4276 done:
36f5539c 4277 ots->mem_coherent = 0;
efe86b21
RH
4278 if (IS_DEAD_ARG(1)) {
4279 temp_dead(s, itsl);
4280 }
4281 if (IS_DEAD_ARG(2)) {
4282 temp_dead(s, itsh);
4283 }
4284 if (NEED_SYNC_ARG(0)) {
4285 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
4286 } else if (IS_DEAD_ARG(0)) {
4287 temp_dead(s, ots);
4288 }
4289 return true;
4290}
4291
39004a71
RH
4292static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
4293 TCGRegSet allocated_regs)
c896fe29 4294{
39004a71
RH
4295 if (ts->val_type == TEMP_VAL_REG) {
4296 if (ts->reg != reg) {
4297 tcg_reg_free(s, reg, allocated_regs);
4298 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4299 /*
4300 * Cross register class move not supported. Sync the
4301 * temp back to its slot and load from there.
4302 */
4303 temp_sync(s, ts, allocated_regs, 0, 0);
4304 tcg_out_ld(s, ts->type, reg,
4305 ts->mem_base->reg, ts->mem_offset);
4306 }
4307 }
4308 } else {
4309 TCGRegSet arg_set = 0;
c896fe29 4310
39004a71
RH
4311 tcg_reg_free(s, reg, allocated_regs);
4312 tcg_regset_set_reg(arg_set, reg);
4313 temp_load(s, ts, arg_set, allocated_regs, 0);
b03cce8e 4314 }
39004a71 4315}
39cf05d3 4316
39004a71
RH
4317static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
4318 TCGRegSet allocated_regs)
4319{
4320 /*
4321 * When the destination is on the stack, load up the temp and store.
4322 * If there are many call-saved registers, the temp might live to
4323 * see another use; otherwise it'll be discarded.
4324 */
4325 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
4326 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
4327 TCG_TARGET_CALL_STACK_OFFSET +
4328 stk_slot * sizeof(tcg_target_long));
4329}
a813e36f 4330
39004a71
RH
4331static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
4332 TCGTemp *ts, TCGRegSet *allocated_regs)
4333{
4334 if (REG_P(l)) {
4335 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
4336 load_arg_reg(s, reg, ts, *allocated_regs);
4337 tcg_regset_set_reg(*allocated_regs, reg);
4338 } else {
4339 load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
4340 ts, *allocated_regs);
4341 }
4342}
40ae5c62 4343
39004a71
RH
4344static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
4345{
4346 const int nb_oargs = TCGOP_CALLO(op);
4347 const int nb_iargs = TCGOP_CALLI(op);
4348 const TCGLifeData arg_life = op->life;
4349 const TCGHelperInfo *info = tcg_call_info(op);
4350 TCGRegSet allocated_regs = s->reserved_regs;
4351 int i;
40ae5c62 4352
39004a71
RH
4353 /*
4354 * Move inputs into place in reverse order,
4355 * so that we place stacked arguments first.
4356 */
4357 for (i = nb_iargs - 1; i >= 0; --i) {
4358 const TCGCallArgumentLoc *loc = &info->in[i];
4359 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
40ae5c62 4360
39004a71
RH
4361 switch (loc->kind) {
4362 case TCG_CALL_ARG_NORMAL:
4363 case TCG_CALL_ARG_EXTEND_U:
4364 case TCG_CALL_ARG_EXTEND_S:
4365 load_arg_normal(s, loc, ts, &allocated_regs);
4366 break;
4367 default:
4368 g_assert_not_reached();
c896fe29 4369 }
c896fe29 4370 }
a813e36f 4371
39004a71 4372 /* Mark dead temporaries and free the associated registers. */
dd186292 4373 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 4374 if (IS_DEAD_ARG(i)) {
43439139 4375 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
4376 }
4377 }
a813e36f 4378
39004a71 4379 /* Clobber call registers. */
c8074023
RH
4380 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4381 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 4382 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
4383 }
4384 }
78505279 4385
39004a71
RH
4386 /*
4387 * Save globals if they might be written by the helper,
4388 * sync them if they might be read.
4389 */
4390 if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
78505279 4391 /* Nothing to do */
39004a71 4392 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
78505279
AJ
4393 sync_globals(s, allocated_regs);
4394 } else {
b9c18f56
AJ
4395 save_globals(s, allocated_regs);
4396 }
c896fe29 4397
7b7d8b2d 4398#ifdef CONFIG_TCG_INTERPRETER
f9c4bb80 4399 tcg_out_call(s, tcg_call_func(op), info->cif);
7b7d8b2d 4400#else
39004a71 4401 tcg_out_call(s, tcg_call_func(op));
7b7d8b2d 4402#endif
c896fe29 4403
39004a71
RH
4404 /* Assign output registers and emit moves if needed. */
4405 switch (info->out_kind) {
4406 case TCG_CALL_RET_NORMAL:
4407 for (i = 0; i < nb_oargs; i++) {
4408 TCGTemp *ts = arg_temp(op->args[i]);
4409 TCGReg reg = tcg_target_call_oarg_regs[i];
d63e3b6e 4410
39004a71
RH
4411 /* ENV should not be modified. */
4412 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4413
39004a71
RH
4414 set_temp_val_reg(s, ts, reg);
4415 ts->mem_coherent = 0;
4416 }
4417 break;
4418 default:
4419 g_assert_not_reached();
4420 }
4421
4422 /* Flush or discard output registers as needed. */
4423 for (i = 0; i < nb_oargs; i++) {
4424 TCGTemp *ts = arg_temp(op->args[i]);
d63e3b6e 4425 if (NEED_SYNC_ARG(i)) {
39004a71 4426 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
d63e3b6e
RH
4427 } else if (IS_DEAD_ARG(i)) {
4428 temp_dead(s, ts);
c896fe29
FB
4429 }
4430 }
c896fe29
FB
4431}
4432
4433#ifdef CONFIG_PROFILER
4434
c3fac113
EC
4435/* avoid copy/paste errors */
4436#define PROF_ADD(to, from, field) \
4437 do { \
d73415a3 4438 (to)->field += qatomic_read(&((from)->field)); \
c3fac113
EC
4439 } while (0)
4440
4441#define PROF_MAX(to, from, field) \
4442 do { \
d73415a3 4443 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
c3fac113
EC
4444 if (val__ > (to)->field) { \
4445 (to)->field = val__; \
4446 } \
4447 } while (0)
4448
4449/* Pass in a zero'ed @prof */
4450static inline
4451void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
4452{
0e2d61cf 4453 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
c3fac113
EC
4454 unsigned int i;
4455
3468b59e 4456 for (i = 0; i < n_ctxs; i++) {
d73415a3 4457 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3468b59e 4458 const TCGProfile *orig = &s->prof;
c3fac113
EC
4459
4460 if (counters) {
72fd2efb 4461 PROF_ADD(prof, orig, cpu_exec_time);
c3fac113
EC
4462 PROF_ADD(prof, orig, tb_count1);
4463 PROF_ADD(prof, orig, tb_count);
4464 PROF_ADD(prof, orig, op_count);
4465 PROF_MAX(prof, orig, op_count_max);
4466 PROF_ADD(prof, orig, temp_count);
4467 PROF_MAX(prof, orig, temp_count_max);
4468 PROF_ADD(prof, orig, del_op_count);
4469 PROF_ADD(prof, orig, code_in_len);
4470 PROF_ADD(prof, orig, code_out_len);
4471 PROF_ADD(prof, orig, search_out_len);
4472 PROF_ADD(prof, orig, interm_time);
4473 PROF_ADD(prof, orig, code_time);
4474 PROF_ADD(prof, orig, la_time);
4475 PROF_ADD(prof, orig, opt_time);
4476 PROF_ADD(prof, orig, restore_count);
4477 PROF_ADD(prof, orig, restore_time);
4478 }
4479 if (table) {
4480 int i;
4481
4482 for (i = 0; i < NB_OPS; i++) {
4483 PROF_ADD(prof, orig, table_op_count[i]);
4484 }
4485 }
4486 }
4487}
4488
4489#undef PROF_ADD
4490#undef PROF_MAX
4491
4492static void tcg_profile_snapshot_counters(TCGProfile *prof)
4493{
4494 tcg_profile_snapshot(prof, true, false);
4495}
4496
4497static void tcg_profile_snapshot_table(TCGProfile *prof)
4498{
4499 tcg_profile_snapshot(prof, false, true);
4500}
c896fe29 4501
b6a7f3e0 4502void tcg_dump_op_count(GString *buf)
c896fe29 4503{
c3fac113 4504 TCGProfile prof = {};
c896fe29 4505 int i;
d70724ce 4506
c3fac113 4507 tcg_profile_snapshot_table(&prof);
15fc7daa 4508 for (i = 0; i < NB_OPS; i++) {
b6a7f3e0
DB
4509 g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
4510 prof.table_op_count[i]);
c896fe29 4511 }
c896fe29 4512}
72fd2efb
EC
4513
4514int64_t tcg_cpu_exec_time(void)
4515{
0e2d61cf 4516 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
72fd2efb
EC
4517 unsigned int i;
4518 int64_t ret = 0;
4519
4520 for (i = 0; i < n_ctxs; i++) {
d73415a3 4521 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
72fd2efb
EC
4522 const TCGProfile *prof = &s->prof;
4523
d73415a3 4524 ret += qatomic_read(&prof->cpu_exec_time);
72fd2efb
EC
4525 }
4526 return ret;
4527}
246ae24d 4528#else
b6a7f3e0 4529void tcg_dump_op_count(GString *buf)
246ae24d 4530{
b6a7f3e0 4531 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
246ae24d 4532}
72fd2efb
EC
4533
4534int64_t tcg_cpu_exec_time(void)
4535{
4536 error_report("%s: TCG profiler not compiled", __func__);
4537 exit(EXIT_FAILURE);
4538}
c896fe29
FB
4539#endif
4540
4541
fbf59aad 4542int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
c896fe29 4543{
c3fac113
EC
4544#ifdef CONFIG_PROFILER
4545 TCGProfile *prof = &s->prof;
4546#endif
15fa08f8
RH
4547 int i, num_insns;
4548 TCGOp *op;
c896fe29 4549
04fe6400
RH
4550#ifdef CONFIG_PROFILER
4551 {
c1f543b7 4552 int n = 0;
04fe6400 4553
15fa08f8
RH
4554 QTAILQ_FOREACH(op, &s->ops, link) {
4555 n++;
4556 }
d73415a3 4557 qatomic_set(&prof->op_count, prof->op_count + n);
c3fac113 4558 if (n > prof->op_count_max) {
d73415a3 4559 qatomic_set(&prof->op_count_max, n);
04fe6400
RH
4560 }
4561
4562 n = s->nb_temps;
d73415a3 4563 qatomic_set(&prof->temp_count, prof->temp_count + n);
c3fac113 4564 if (n > prof->temp_count_max) {
d73415a3 4565 qatomic_set(&prof->temp_count_max, n);
04fe6400
RH
4566 }
4567 }
4568#endif
4569
c896fe29 4570#ifdef DEBUG_DISAS
d977e1c2 4571 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
fbf59aad 4572 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4573 FILE *logfile = qemu_log_trylock();
78b54858
RH
4574 if (logfile) {
4575 fprintf(logfile, "OP:\n");
b7a83ff8 4576 tcg_dump_ops(s, logfile, false);
78b54858
RH
4577 fprintf(logfile, "\n");
4578 qemu_log_unlock(logfile);
4579 }
c896fe29
FB
4580 }
4581#endif
4582
bef16ab4
RH
4583#ifdef CONFIG_DEBUG_TCG
4584 /* Ensure all labels referenced have been emitted. */
4585 {
4586 TCGLabel *l;
4587 bool error = false;
4588
4589 QSIMPLEQ_FOREACH(l, &s->labels, next) {
4590 if (unlikely(!l->present) && l->refs) {
4591 qemu_log_mask(CPU_LOG_TB_OP,
4592 "$L%d referenced but not present.\n", l->id);
4593 error = true;
4594 }
4595 }
4596 assert(!error);
4597 }
4598#endif
4599
c5cc28ff 4600#ifdef CONFIG_PROFILER
d73415a3 4601 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
4602#endif
4603
8f2e8c07 4604#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 4605 tcg_optimize(s);
8f2e8c07
KB
4606#endif
4607
a23a9ec6 4608#ifdef CONFIG_PROFILER
d73415a3
SH
4609 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
4610 qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 4611#endif
c5cc28ff 4612
b4fc67c7 4613 reachable_code_pass(s);
b83eabea 4614 liveness_pass_1(s);
5a18407f 4615
b83eabea 4616 if (s->nb_indirects > 0) {
5a18407f 4617#ifdef DEBUG_DISAS
b83eabea 4618 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
fbf59aad 4619 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4620 FILE *logfile = qemu_log_trylock();
78b54858
RH
4621 if (logfile) {
4622 fprintf(logfile, "OP before indirect lowering:\n");
b7a83ff8 4623 tcg_dump_ops(s, logfile, false);
78b54858
RH
4624 fprintf(logfile, "\n");
4625 qemu_log_unlock(logfile);
4626 }
b83eabea 4627 }
5a18407f 4628#endif
b83eabea
RH
4629 /* Replace indirect temps with direct temps. */
4630 if (liveness_pass_2(s)) {
4631 /* If changes were made, re-run liveness. */
4632 liveness_pass_1(s);
5a18407f
RH
4633 }
4634 }
c5cc28ff 4635
a23a9ec6 4636#ifdef CONFIG_PROFILER
d73415a3 4637 qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 4638#endif
c896fe29
FB
4639
4640#ifdef DEBUG_DISAS
d977e1c2 4641 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
fbf59aad 4642 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4643 FILE *logfile = qemu_log_trylock();
78b54858
RH
4644 if (logfile) {
4645 fprintf(logfile, "OP after optimization and liveness analysis:\n");
b7a83ff8 4646 tcg_dump_ops(s, logfile, true);
78b54858
RH
4647 fprintf(logfile, "\n");
4648 qemu_log_unlock(logfile);
4649 }
c896fe29
FB
4650 }
4651#endif
4652
35abb009
RH
4653 /* Initialize goto_tb jump offsets. */
4654 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
4655 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
4656 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
4657 if (TCG_TARGET_HAS_direct_jump) {
4658 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
4659 tcg_ctx->tb_jmp_target_addr = NULL;
4660 } else {
4661 tcg_ctx->tb_jmp_insn_offset = NULL;
4662 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
4663 }
4664
c896fe29
FB
4665 tcg_reg_alloc_start(s);
4666
db0c51a3
RH
4667 /*
4668 * Reset the buffer pointers when restarting after overflow.
4669 * TODO: Move this into translate-all.c with the rest of the
4670 * buffer management. Having only this done here is confusing.
4671 */
4672 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
4673 s->code_ptr = s->code_buf;
c896fe29 4674
659ef5cb 4675#ifdef TCG_TARGET_NEED_LDST_LABELS
6001f772 4676 QSIMPLEQ_INIT(&s->ldst_labels);
659ef5cb 4677#endif
57a26946
RH
4678#ifdef TCG_TARGET_NEED_POOL_LABELS
4679 s->pool_labels = NULL;
4680#endif
9ecefc84 4681
fca8a500 4682 num_insns = -1;
15fa08f8 4683 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 4684 TCGOpcode opc = op->opc;
b3db8758 4685
c896fe29 4686#ifdef CONFIG_PROFILER
d73415a3 4687 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 4688#endif
c45cb8bb
RH
4689
4690 switch (opc) {
c896fe29 4691 case INDEX_op_mov_i32:
c896fe29 4692 case INDEX_op_mov_i64:
d2fd745f 4693 case INDEX_op_mov_vec:
dd186292 4694 tcg_reg_alloc_mov(s, op);
c896fe29 4695 break;
bab1671f
RH
4696 case INDEX_op_dup_vec:
4697 tcg_reg_alloc_dup(s, op);
4698 break;
765b842a 4699 case INDEX_op_insn_start:
fca8a500 4700 if (num_insns >= 0) {
9f754620
RH
4701 size_t off = tcg_current_code_size(s);
4702 s->gen_insn_end_off[num_insns] = off;
4703 /* Assert that we do not overflow our stored offset. */
4704 assert(s->gen_insn_end_off[num_insns] == off);
fca8a500
RH
4705 }
4706 num_insns++;
bad729e2
RH
4707 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
4708 target_ulong a;
4709#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 4710 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 4711#else
efee3746 4712 a = op->args[i];
bad729e2 4713#endif
fca8a500 4714 s->gen_insn_data[num_insns][i] = a;
bad729e2 4715 }
c896fe29 4716 break;
5ff9d6a4 4717 case INDEX_op_discard:
43439139 4718 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 4719 break;
c896fe29 4720 case INDEX_op_set_label:
e8996ee0 4721 tcg_reg_alloc_bb_end(s, s->reserved_regs);
92ab8e7d 4722 tcg_out_label(s, arg_label(op->args[0]));
c896fe29
FB
4723 break;
4724 case INDEX_op_call:
dd186292 4725 tcg_reg_alloc_call(s, op);
c45cb8bb 4726 break;
efe86b21
RH
4727 case INDEX_op_dup2_vec:
4728 if (tcg_reg_alloc_dup2(s, op)) {
4729 break;
4730 }
4731 /* fall through */
c896fe29 4732 default:
25c4d9cc 4733 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 4734 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
4735 /* Note: in order to speed up the code, it would be much
4736 faster to have specialized register allocator functions for
4737 some common argument patterns */
dd186292 4738 tcg_reg_alloc_op(s, op);
c896fe29
FB
4739 break;
4740 }
b125f9dc
RH
4741 /* Test for (pending) buffer overflow. The assumption is that any
4742 one operation beginning below the high water mark cannot overrun
4743 the buffer completely. Thus we can test for overflow after
4744 generating code without having to check during generation. */
644da9b3 4745 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
4746 return -1;
4747 }
6e6c4efe
RH
4748 /* Test for TB overflow, as seen by gen_insn_end_off. */
4749 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4750 return -2;
4751 }
c896fe29 4752 }
fca8a500
RH
4753 tcg_debug_assert(num_insns >= 0);
4754 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 4755
b76f0d8c 4756 /* Generate TB finalization at the end of block */
659ef5cb 4757#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5
RH
4758 i = tcg_out_ldst_finalize(s);
4759 if (i < 0) {
4760 return i;
23dceda6 4761 }
659ef5cb 4762#endif
57a26946 4763#ifdef TCG_TARGET_NEED_POOL_LABELS
1768987b
RH
4764 i = tcg_out_pool_finalize(s);
4765 if (i < 0) {
4766 return i;
57a26946
RH
4767 }
4768#endif
7ecd02a0
RH
4769 if (!tcg_resolve_relocs(s)) {
4770 return -2;
4771 }
c896fe29 4772
df5d2b16 4773#ifndef CONFIG_TCG_INTERPRETER
c896fe29 4774 /* flush instruction cache */
db0c51a3
RH
4775 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
4776 (uintptr_t)s->code_buf,
1da8de39 4777 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
df5d2b16 4778#endif
2aeabc08 4779
1813e175 4780 return tcg_current_code_size(s);
c896fe29
FB
4781}
4782
a23a9ec6 4783#ifdef CONFIG_PROFILER
3a841ab5 4784void tcg_dump_info(GString *buf)
a23a9ec6 4785{
c3fac113
EC
4786 TCGProfile prof = {};
4787 const TCGProfile *s;
4788 int64_t tb_count;
4789 int64_t tb_div_count;
4790 int64_t tot;
4791
4792 tcg_profile_snapshot_counters(&prof);
4793 s = &prof;
4794 tb_count = s->tb_count;
4795 tb_div_count = tb_count ? tb_count : 1;
4796 tot = s->interm_time + s->code_time;
a23a9ec6 4797
3a841ab5
DB
4798 g_string_append_printf(buf, "JIT cycles %" PRId64
4799 " (%0.3f s at 2.4 GHz)\n",
4800 tot, tot / 2.4e9);
4801 g_string_append_printf(buf, "translated TBs %" PRId64
4802 " (aborted=%" PRId64 " %0.1f%%)\n",
4803 tb_count, s->tb_count1 - tb_count,
4804 (double)(s->tb_count1 - s->tb_count)
4805 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4806 g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
4807 (double)s->op_count / tb_div_count, s->op_count_max);
4808 g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
4809 (double)s->del_op_count / tb_div_count);
4810 g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
4811 (double)s->temp_count / tb_div_count,
4812 s->temp_count_max);
4813 g_string_append_printf(buf, "avg host code/TB %0.1f\n",
4814 (double)s->code_out_len / tb_div_count);
4815 g_string_append_printf(buf, "avg search data/TB %0.1f\n",
4816 (double)s->search_out_len / tb_div_count);
a813e36f 4817
3a841ab5
DB
4818 g_string_append_printf(buf, "cycles/op %0.1f\n",
4819 s->op_count ? (double)tot / s->op_count : 0);
4820 g_string_append_printf(buf, "cycles/in byte %0.1f\n",
4821 s->code_in_len ? (double)tot / s->code_in_len : 0);
4822 g_string_append_printf(buf, "cycles/out byte %0.1f\n",
4823 s->code_out_len ? (double)tot / s->code_out_len : 0);
4824 g_string_append_printf(buf, "cycles/search byte %0.1f\n",
4825 s->search_out_len ?
4826 (double)tot / s->search_out_len : 0);
fca8a500 4827 if (tot == 0) {
a23a9ec6 4828 tot = 1;
fca8a500 4829 }
3a841ab5
DB
4830 g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
4831 (double)s->interm_time / tot * 100.0);
4832 g_string_append_printf(buf, " gen_code time %0.1f%%\n",
4833 (double)s->code_time / tot * 100.0);
4834 g_string_append_printf(buf, "optim./code time %0.1f%%\n",
4835 (double)s->opt_time / (s->code_time ?
4836 s->code_time : 1)
4837 * 100.0);
4838 g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
4839 (double)s->la_time / (s->code_time ?
4840 s->code_time : 1) * 100.0);
4841 g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
4842 s->restore_count);
4843 g_string_append_printf(buf, " avg cycles %0.1f\n",
4844 s->restore_count ?
4845 (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
4846}
4847#else
3a841ab5 4848void tcg_dump_info(GString *buf)
a23a9ec6 4849{
3a841ab5 4850 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
a23a9ec6
FB
4851}
4852#endif
813da627
RH
4853
4854#ifdef ELF_HOST_MACHINE
5872bbf2
RH
4855/* In order to use this feature, the backend needs to do three things:
4856
4857 (1) Define ELF_HOST_MACHINE to indicate both what value to
4858 put into the ELF image and to indicate support for the feature.
4859
4860 (2) Define tcg_register_jit. This should create a buffer containing
4861 the contents of a .debug_frame section that describes the post-
4862 prologue unwind info for the tcg machine.
4863
4864 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4865*/
813da627
RH
4866
4867/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4868typedef enum {
4869 JIT_NOACTION = 0,
4870 JIT_REGISTER_FN,
4871 JIT_UNREGISTER_FN
4872} jit_actions_t;
4873
4874struct jit_code_entry {
4875 struct jit_code_entry *next_entry;
4876 struct jit_code_entry *prev_entry;
4877 const void *symfile_addr;
4878 uint64_t symfile_size;
4879};
4880
4881struct jit_descriptor {
4882 uint32_t version;
4883 uint32_t action_flag;
4884 struct jit_code_entry *relevant_entry;
4885 struct jit_code_entry *first_entry;
4886};
4887
4888void __jit_debug_register_code(void) __attribute__((noinline));
4889void __jit_debug_register_code(void)
4890{
4891 asm("");
4892}
4893
4894/* Must statically initialize the version, because GDB may check
4895 the version before we can set it. */
4896struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4897
4898/* End GDB interface. */
4899
4900static int find_string(const char *strtab, const char *str)
4901{
4902 const char *p = strtab + 1;
4903
4904 while (1) {
4905 if (strcmp(p, str) == 0) {
4906 return p - strtab;
4907 }
4908 p += strlen(p) + 1;
4909 }
4910}
4911
755bf9e5 4912static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
2c90784a
RH
4913 const void *debug_frame,
4914 size_t debug_frame_size)
813da627 4915{
5872bbf2
RH
4916 struct __attribute__((packed)) DebugInfo {
4917 uint32_t len;
4918 uint16_t version;
4919 uint32_t abbrev;
4920 uint8_t ptr_size;
4921 uint8_t cu_die;
4922 uint16_t cu_lang;
4923 uintptr_t cu_low_pc;
4924 uintptr_t cu_high_pc;
4925 uint8_t fn_die;
4926 char fn_name[16];
4927 uintptr_t fn_low_pc;
4928 uintptr_t fn_high_pc;
4929 uint8_t cu_eoc;
4930 };
813da627
RH
4931
4932 struct ElfImage {
4933 ElfW(Ehdr) ehdr;
4934 ElfW(Phdr) phdr;
5872bbf2
RH
4935 ElfW(Shdr) shdr[7];
4936 ElfW(Sym) sym[2];
4937 struct DebugInfo di;
4938 uint8_t da[24];
4939 char str[80];
4940 };
4941
4942 struct ElfImage *img;
4943
4944 static const struct ElfImage img_template = {
4945 .ehdr = {
4946 .e_ident[EI_MAG0] = ELFMAG0,
4947 .e_ident[EI_MAG1] = ELFMAG1,
4948 .e_ident[EI_MAG2] = ELFMAG2,
4949 .e_ident[EI_MAG3] = ELFMAG3,
4950 .e_ident[EI_CLASS] = ELF_CLASS,
4951 .e_ident[EI_DATA] = ELF_DATA,
4952 .e_ident[EI_VERSION] = EV_CURRENT,
4953 .e_type = ET_EXEC,
4954 .e_machine = ELF_HOST_MACHINE,
4955 .e_version = EV_CURRENT,
4956 .e_phoff = offsetof(struct ElfImage, phdr),
4957 .e_shoff = offsetof(struct ElfImage, shdr),
4958 .e_ehsize = sizeof(ElfW(Shdr)),
4959 .e_phentsize = sizeof(ElfW(Phdr)),
4960 .e_phnum = 1,
4961 .e_shentsize = sizeof(ElfW(Shdr)),
4962 .e_shnum = ARRAY_SIZE(img->shdr),
4963 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
4964#ifdef ELF_HOST_FLAGS
4965 .e_flags = ELF_HOST_FLAGS,
4966#endif
4967#ifdef ELF_OSABI
4968 .e_ident[EI_OSABI] = ELF_OSABI,
4969#endif
5872bbf2
RH
4970 },
4971 .phdr = {
4972 .p_type = PT_LOAD,
4973 .p_flags = PF_X,
4974 },
4975 .shdr = {
4976 [0] = { .sh_type = SHT_NULL },
4977 /* Trick: The contents of code_gen_buffer are not present in
4978 this fake ELF file; that got allocated elsewhere. Therefore
4979 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4980 will not look for contents. We can record any address. */
4981 [1] = { /* .text */
4982 .sh_type = SHT_NOBITS,
4983 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4984 },
4985 [2] = { /* .debug_info */
4986 .sh_type = SHT_PROGBITS,
4987 .sh_offset = offsetof(struct ElfImage, di),
4988 .sh_size = sizeof(struct DebugInfo),
4989 },
4990 [3] = { /* .debug_abbrev */
4991 .sh_type = SHT_PROGBITS,
4992 .sh_offset = offsetof(struct ElfImage, da),
4993 .sh_size = sizeof(img->da),
4994 },
4995 [4] = { /* .debug_frame */
4996 .sh_type = SHT_PROGBITS,
4997 .sh_offset = sizeof(struct ElfImage),
4998 },
4999 [5] = { /* .symtab */
5000 .sh_type = SHT_SYMTAB,
5001 .sh_offset = offsetof(struct ElfImage, sym),
5002 .sh_size = sizeof(img->sym),
5003 .sh_info = 1,
5004 .sh_link = ARRAY_SIZE(img->shdr) - 1,
5005 .sh_entsize = sizeof(ElfW(Sym)),
5006 },
5007 [6] = { /* .strtab */
5008 .sh_type = SHT_STRTAB,
5009 .sh_offset = offsetof(struct ElfImage, str),
5010 .sh_size = sizeof(img->str),
5011 }
5012 },
5013 .sym = {
5014 [1] = { /* code_gen_buffer */
5015 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
5016 .st_shndx = 1,
5017 }
5018 },
5019 .di = {
5020 .len = sizeof(struct DebugInfo) - 4,
5021 .version = 2,
5022 .ptr_size = sizeof(void *),
5023 .cu_die = 1,
5024 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
5025 .fn_die = 2,
5026 .fn_name = "code_gen_buffer"
5027 },
5028 .da = {
5029 1, /* abbrev number (the cu) */
5030 0x11, 1, /* DW_TAG_compile_unit, has children */
5031 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5032 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5033 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5034 0, 0, /* end of abbrev */
5035 2, /* abbrev number (the fn) */
5036 0x2e, 0, /* DW_TAG_subprogram, no children */
5037 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5038 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5039 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5040 0, 0, /* end of abbrev */
5041 0 /* no more abbrev */
5042 },
5043 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5044 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
5045 };
5046
5047 /* We only need a single jit entry; statically allocate it. */
5048 static struct jit_code_entry one_entry;
5049
5872bbf2 5050 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 5051 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 5052 DebugFrameHeader *dfh;
813da627 5053
5872bbf2
RH
5054 img = g_malloc(img_size);
5055 *img = img_template;
813da627 5056
5872bbf2
RH
5057 img->phdr.p_vaddr = buf;
5058 img->phdr.p_paddr = buf;
5059 img->phdr.p_memsz = buf_size;
813da627 5060
813da627 5061 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 5062 img->shdr[1].sh_addr = buf;
813da627
RH
5063 img->shdr[1].sh_size = buf_size;
5064
5872bbf2
RH
5065 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
5066 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
5067
5068 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
5069 img->shdr[4].sh_size = debug_frame_size;
5070
5071 img->shdr[5].sh_name = find_string(img->str, ".symtab");
5072 img->shdr[6].sh_name = find_string(img->str, ".strtab");
5073
5074 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
5075 img->sym[1].st_value = buf;
5076 img->sym[1].st_size = buf_size;
813da627 5077
5872bbf2 5078 img->di.cu_low_pc = buf;
45aba097 5079 img->di.cu_high_pc = buf + buf_size;
5872bbf2 5080 img->di.fn_low_pc = buf;
45aba097 5081 img->di.fn_high_pc = buf + buf_size;
813da627 5082
2c90784a
RH
5083 dfh = (DebugFrameHeader *)(img + 1);
5084 memcpy(dfh, debug_frame, debug_frame_size);
5085 dfh->fde.func_start = buf;
5086 dfh->fde.func_len = buf_size;
5087
813da627
RH
5088#ifdef DEBUG_JIT
5089 /* Enable this block to be able to debug the ELF image file creation.
5090 One can use readelf, objdump, or other inspection utilities. */
5091 {
eb6b2edf
BM
5092 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
5093 FILE *f = fopen(jit, "w+b");
813da627 5094 if (f) {
5872bbf2 5095 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
5096 /* Avoid stupid unused return value warning for fwrite. */
5097 }
5098 fclose(f);
5099 }
5100 }
5101#endif
5102
5103 one_entry.symfile_addr = img;
5104 one_entry.symfile_size = img_size;
5105
5106 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
5107 __jit_debug_descriptor.relevant_entry = &one_entry;
5108 __jit_debug_descriptor.first_entry = &one_entry;
5109 __jit_debug_register_code();
5110}
5111#else
5872bbf2
RH
5112/* No support for the feature. Provide the entry point expected by exec.c,
5113 and implement the internal function we declared earlier. */
813da627 5114
755bf9e5 5115static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
5116 const void *debug_frame,
5117 size_t debug_frame_size)
813da627
RH
5118{
5119}
5120
755bf9e5 5121void tcg_register_jit(const void *buf, size_t buf_size)
813da627
RH
5122{
5123}
5124#endif /* ELF_HOST_MACHINE */
db432672
RH
5125
5126#if !TCG_TARGET_MAYBE_vec
5127void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
5128{
5129 g_assert_not_reached();
5130}
5131#endif