]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg.c
tcg: Define TCG_TYPE_I128 and related helper macros
[mirror_qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
72fd2efb 33#include "qemu/error-report.h"
f348b6d1 34#include "qemu/cutils.h"
1de7afc9 35#include "qemu/host-utils.h"
d4c51a0a 36#include "qemu/qemu-print.h"
1de7afc9 37#include "qemu/timer.h"
084cfca1 38#include "qemu/cacheflush.h"
ad768e6f 39#include "qemu/cacheinfo.h"
c896fe29 40
c5d3c498 41/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
42 CPU definitions. Currently they are used for qemu_ld/st
43 instructions */
44#define NO_CPU_IO_DEFS
c896fe29 45
63c91552 46#include "exec/exec-all.h"
dcb32f1d 47#include "tcg/tcg-op.h"
813da627 48
edee2579 49#if UINTPTR_MAX == UINT32_MAX
813da627 50# define ELF_CLASS ELFCLASS32
edee2579
RH
51#else
52# define ELF_CLASS ELFCLASS64
813da627 53#endif
e03b5686 54#if HOST_BIG_ENDIAN
813da627
RH
55# define ELF_DATA ELFDATA2MSB
56#else
57# define ELF_DATA ELFDATA2LSB
58#endif
59
c896fe29 60#include "elf.h"
508127e2 61#include "exec/log.h"
d2ba8026 62#include "tcg/tcg-ldst.h"
5ff7258c 63#include "tcg-internal.h"
5584e2db 64#include "accel/tcg/perf.h"
c896fe29 65
139c1837 66/* Forward declarations for functions declared in tcg-target.c.inc and
ce151109 67 used here. */
e4d58b41
RH
68static void tcg_target_init(TCGContext *s);
69static void tcg_target_qemu_prologue(TCGContext *s);
6ac17786 70static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 71 intptr_t value, intptr_t addend);
c896fe29 72
497a22eb
RH
73/* The CIE and FDE header definitions will be common to all hosts. */
74typedef struct {
75 uint32_t len __attribute__((aligned((sizeof(void *)))));
76 uint32_t id;
77 uint8_t version;
78 char augmentation[1];
79 uint8_t code_align;
80 uint8_t data_align;
81 uint8_t return_column;
82} DebugFrameCIE;
83
84typedef struct QEMU_PACKED {
85 uint32_t len __attribute__((aligned((sizeof(void *)))));
86 uint32_t cie_offset;
edee2579
RH
87 uintptr_t func_start;
88 uintptr_t func_len;
497a22eb
RH
89} DebugFrameFDEHeader;
90
2c90784a
RH
91typedef struct QEMU_PACKED {
92 DebugFrameCIE cie;
93 DebugFrameFDEHeader fde;
94} DebugFrameHeader;
95
755bf9e5 96static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
97 const void *debug_frame,
98 size_t debug_frame_size)
813da627
RH
99 __attribute__((unused));
100
139c1837 101/* Forward declarations for functions declared and used in tcg-target.c.inc. */
2a534aff 102static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 103 intptr_t arg2);
78113e83 104static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 105static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 106 TCGReg ret, tcg_target_long arg);
b55a8d9d 107static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
cf7d6b8e 108static void tcg_out_goto_tb(TCGContext *s, int which);
5e8892db
MR
109static void tcg_out_op(TCGContext *s, TCGOpcode opc,
110 const TCGArg args[TCG_MAX_OP_ARGS],
111 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 112#if TCG_TARGET_MAYBE_vec
e7632cfa
RH
113static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
114 TCGReg dst, TCGReg src);
d6ecb4a9
RH
115static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
116 TCGReg dst, TCGReg base, intptr_t offset);
4e186175
RH
117static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
118 TCGReg dst, int64_t arg);
5e8892db
MR
119static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
120 unsigned vecl, unsigned vece,
121 const TCGArg args[TCG_MAX_OP_ARGS],
122 const int const_args[TCG_MAX_OP_ARGS]);
d2fd745f 123#else
e7632cfa
RH
124static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
125 TCGReg dst, TCGReg src)
126{
127 g_assert_not_reached();
128}
d6ecb4a9
RH
129static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
130 TCGReg dst, TCGReg base, intptr_t offset)
131{
132 g_assert_not_reached();
133}
4e186175
RH
134static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
135 TCGReg dst, int64_t arg)
e7632cfa
RH
136{
137 g_assert_not_reached();
138}
5e8892db
MR
139static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
140 unsigned vecl, unsigned vece,
141 const TCGArg args[TCG_MAX_OP_ARGS],
142 const int const_args[TCG_MAX_OP_ARGS])
d2fd745f
RH
143{
144 g_assert_not_reached();
145}
146#endif
2a534aff 147static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 148 intptr_t arg2);
59d7c14e
RH
149static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
150 TCGReg base, intptr_t ofs);
7b7d8b2d 151static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
cee44b03 152 const TCGHelperInfo *info);
a4fbbd77 153static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
659ef5cb 154#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5 155static int tcg_out_ldst_finalize(TCGContext *s);
659ef5cb 156#endif
c896fe29 157
42eb6dfc
RH
158TCGContext tcg_init_ctx;
159__thread TCGContext *tcg_ctx;
160
5ff7258c 161TCGContext **tcg_ctxs;
0e2d61cf
RH
162unsigned int tcg_cur_ctxs;
163unsigned int tcg_max_ctxs;
1c2adb95 164TCGv_env cpu_env = 0;
c8bc1168 165const void *tcg_code_gen_epilogue;
db0c51a3 166uintptr_t tcg_splitwx_diff;
df2cce29 167
b91ccb31
RH
168#ifndef CONFIG_TCG_INTERPRETER
169tcg_prologue_fn *tcg_qemu_tb_exec;
170#endif
171
d2fd745f 172static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 173static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 174
1813e175 175#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 176static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
177{
178 *s->code_ptr++ = v;
179}
180
4196dca6
PM
181static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
182 uint8_t v)
5c53bb81 183{
1813e175 184 *p = v;
5c53bb81 185}
1813e175 186#endif
5c53bb81 187
1813e175 188#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 189static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 190{
1813e175
RH
191 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
192 *s->code_ptr++ = v;
193 } else {
194 tcg_insn_unit *p = s->code_ptr;
195 memcpy(p, &v, sizeof(v));
196 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
197 }
c896fe29
FB
198}
199
4196dca6
PM
200static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
201 uint16_t v)
5c53bb81 202{
1813e175
RH
203 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
204 *p = v;
205 } else {
206 memcpy(p, &v, sizeof(v));
207 }
5c53bb81 208}
1813e175 209#endif
5c53bb81 210
1813e175 211#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 212static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 213{
1813e175
RH
214 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
215 *s->code_ptr++ = v;
216 } else {
217 tcg_insn_unit *p = s->code_ptr;
218 memcpy(p, &v, sizeof(v));
219 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
220 }
c896fe29
FB
221}
222
4196dca6
PM
223static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
224 uint32_t v)
5c53bb81 225{
1813e175
RH
226 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
227 *p = v;
228 } else {
229 memcpy(p, &v, sizeof(v));
230 }
5c53bb81 231}
1813e175 232#endif
5c53bb81 233
1813e175 234#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 235static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 236{
1813e175
RH
237 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
238 *s->code_ptr++ = v;
239 } else {
240 tcg_insn_unit *p = s->code_ptr;
241 memcpy(p, &v, sizeof(v));
242 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
243 }
ac26eb69
RH
244}
245
4196dca6
PM
246static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
247 uint64_t v)
5c53bb81 248{
1813e175
RH
249 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
250 *p = v;
251 } else {
252 memcpy(p, &v, sizeof(v));
253 }
5c53bb81 254}
1813e175 255#endif
5c53bb81 256
c896fe29
FB
257/* label relocation processing */
258
1813e175 259static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 260 TCGLabel *l, intptr_t addend)
c896fe29 261{
7ecd02a0 262 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
c896fe29 263
7ecd02a0
RH
264 r->type = type;
265 r->ptr = code_ptr;
266 r->addend = addend;
267 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
c896fe29
FB
268}
269
92ab8e7d 270static void tcg_out_label(TCGContext *s, TCGLabel *l)
c896fe29 271{
eabb7b91 272 tcg_debug_assert(!l->has_value);
c896fe29 273 l->has_value = 1;
92ab8e7d 274 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
c896fe29
FB
275}
276
42a268c2 277TCGLabel *gen_new_label(void)
c896fe29 278{
b1311c4a 279 TCGContext *s = tcg_ctx;
51e3972c 280 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 281
7ecd02a0
RH
282 memset(l, 0, sizeof(TCGLabel));
283 l->id = s->nb_labels++;
284 QSIMPLEQ_INIT(&l->relocs);
285
bef16ab4 286 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
42a268c2
RH
287
288 return l;
c896fe29
FB
289}
290
7ecd02a0
RH
291static bool tcg_resolve_relocs(TCGContext *s)
292{
293 TCGLabel *l;
294
295 QSIMPLEQ_FOREACH(l, &s->labels, next) {
296 TCGRelocation *r;
297 uintptr_t value = l->u.value;
298
299 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
300 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
301 return false;
302 }
303 }
304 }
305 return true;
306}
307
9f754620
RH
308static void set_jmp_reset_offset(TCGContext *s, int which)
309{
f14bed3f
RH
310 /*
311 * We will check for overflow at the end of the opcode loop in
312 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
313 */
b7e4afbd 314 s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
9f754620
RH
315}
316
b52a2c03
RH
317static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
318{
319 /*
320 * We will check for overflow at the end of the opcode loop in
321 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
322 */
9da6079b 323 s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
b52a2c03
RH
324}
325
becc452a
RH
326static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
327{
328 /*
329 * Return the read-execute version of the pointer, for the benefit
330 * of any pc-relative addressing mode.
331 */
9da6079b 332 return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
becc452a
RH
333}
334
db6b7d0c 335/* Signal overflow, starting over with fewer guest insns. */
8905770b
MAL
336static G_NORETURN
337void tcg_raise_tb_overflow(TCGContext *s)
db6b7d0c
RH
338{
339 siglongjmp(s->jmp_trans, -2);
340}
341
4c22e840
RH
342#define C_PFX1(P, A) P##A
343#define C_PFX2(P, A, B) P##A##_##B
344#define C_PFX3(P, A, B, C) P##A##_##B##_##C
345#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
346#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
347#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
348
349/* Define an enumeration for the various combinations. */
350
351#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
352#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
353#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
354#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
355
356#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
357#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
358#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
359#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
360
361#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
362
363#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
364#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
365#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
366#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
367
368typedef enum {
369#include "tcg-target-con-set.h"
370} TCGConstraintSetIndex;
371
372static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
373
374#undef C_O0_I1
375#undef C_O0_I2
376#undef C_O0_I3
377#undef C_O0_I4
378#undef C_O1_I1
379#undef C_O1_I2
380#undef C_O1_I3
381#undef C_O1_I4
382#undef C_N1_I2
383#undef C_O2_I1
384#undef C_O2_I2
385#undef C_O2_I3
386#undef C_O2_I4
387
388/* Put all of the constraint sets into an array, indexed by the enum. */
389
390#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
391#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
392#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
393#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
394
395#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
396#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
397#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
398#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
399
400#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
401
402#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
403#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
404#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
405#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
406
407static const TCGTargetOpDef constraint_sets[] = {
408#include "tcg-target-con-set.h"
409};
410
411
412#undef C_O0_I1
413#undef C_O0_I2
414#undef C_O0_I3
415#undef C_O0_I4
416#undef C_O1_I1
417#undef C_O1_I2
418#undef C_O1_I3
419#undef C_O1_I4
420#undef C_N1_I2
421#undef C_O2_I1
422#undef C_O2_I2
423#undef C_O2_I3
424#undef C_O2_I4
425
426/* Expand the enumerator to be returned from tcg_target_op_def(). */
427
428#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
429#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
430#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
431#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
432
433#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
434#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
435#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
436#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
437
438#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
439
440#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
441#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
442#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
443#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
444
139c1837 445#include "tcg-target.c.inc"
c896fe29 446
38b47b19
EC
447static void alloc_tcg_plugin_context(TCGContext *s)
448{
449#ifdef CONFIG_PLUGIN
450 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
451 s->plugin_tb->insns =
452 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
453#endif
454}
455
3468b59e
EC
456/*
457 * All TCG threads except the parent (i.e. the one that called tcg_context_init
458 * and registered the target's TCG globals) must register with this function
459 * before initiating translation.
460 *
461 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
462 * of tcg_region_init() for the reasoning behind this.
463 *
464 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
465 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
466 * is not used anymore for translation once this function is called.
467 *
468 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
469 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
470 */
471#ifdef CONFIG_USER_ONLY
472void tcg_register_thread(void)
473{
474 tcg_ctx = &tcg_init_ctx;
475}
476#else
477void tcg_register_thread(void)
478{
479 TCGContext *s = g_malloc(sizeof(*s));
480 unsigned int i, n;
3468b59e
EC
481
482 *s = tcg_init_ctx;
483
484 /* Relink mem_base. */
485 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
486 if (tcg_init_ctx.temps[i].mem_base) {
487 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
488 tcg_debug_assert(b >= 0 && b < n);
489 s->temps[i].mem_base = &s->temps[b];
490 }
491 }
492
493 /* Claim an entry in tcg_ctxs */
0e2d61cf
RH
494 n = qatomic_fetch_inc(&tcg_cur_ctxs);
495 g_assert(n < tcg_max_ctxs);
d73415a3 496 qatomic_set(&tcg_ctxs[n], s);
3468b59e 497
38b47b19
EC
498 if (n > 0) {
499 alloc_tcg_plugin_context(s);
bf042e8e 500 tcg_region_initial_alloc(s);
38b47b19
EC
501 }
502
3468b59e 503 tcg_ctx = s;
e8feb96f 504}
3468b59e 505#endif /* !CONFIG_USER_ONLY */
e8feb96f 506
c896fe29
FB
507/* pool based memory allocation */
508void *tcg_malloc_internal(TCGContext *s, int size)
509{
510 TCGPool *p;
511 int pool_size;
a813e36f 512
c896fe29
FB
513 if (size > TCG_POOL_CHUNK_SIZE) {
514 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 515 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 516 p->size = size;
4055299e
KB
517 p->next = s->pool_first_large;
518 s->pool_first_large = p;
519 return p->data;
c896fe29
FB
520 } else {
521 p = s->pool_current;
522 if (!p) {
523 p = s->pool_first;
524 if (!p)
525 goto new_pool;
526 } else {
527 if (!p->next) {
528 new_pool:
529 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 530 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
531 p->size = pool_size;
532 p->next = NULL;
a813e36f 533 if (s->pool_current) {
c896fe29 534 s->pool_current->next = p;
a813e36f 535 } else {
c896fe29 536 s->pool_first = p;
a813e36f 537 }
c896fe29
FB
538 } else {
539 p = p->next;
540 }
541 }
542 }
543 s->pool_current = p;
544 s->pool_cur = p->data + size;
545 s->pool_end = p->data + p->size;
546 return p->data;
547}
548
549void tcg_pool_reset(TCGContext *s)
550{
4055299e
KB
551 TCGPool *p, *t;
552 for (p = s->pool_first_large; p; p = t) {
553 t = p->next;
554 g_free(p);
555 }
556 s->pool_first_large = NULL;
c896fe29
FB
557 s->pool_cur = s->pool_end = NULL;
558 s->pool_current = NULL;
559}
560
2ef6175a
RH
561#include "exec/helper-proto.h"
562
39004a71 563static TCGHelperInfo all_helpers[] = {
2ef6175a 564#include "exec/helper-tcg.h"
100b5e01 565};
619205fd 566static GHashTable *helper_table;
100b5e01 567
22f15579 568#ifdef CONFIG_TCG_INTERPRETER
c6ef8c7b
PMD
569static ffi_type *typecode_to_ffi(int argmask)
570{
571 switch (argmask) {
572 case dh_typecode_void:
573 return &ffi_type_void;
574 case dh_typecode_i32:
575 return &ffi_type_uint32;
576 case dh_typecode_s32:
577 return &ffi_type_sint32;
578 case dh_typecode_i64:
579 return &ffi_type_uint64;
580 case dh_typecode_s64:
581 return &ffi_type_sint64;
582 case dh_typecode_ptr:
583 return &ffi_type_pointer;
584 }
585 g_assert_not_reached();
586}
0c22e176
PMD
587
588static void init_ffi_layouts(void)
589{
590 /* g_direct_hash/equal for direct comparisons on uint32_t. */
f9c4bb80
RH
591 GHashTable *ffi_table = g_hash_table_new(NULL, NULL);
592
0c22e176 593 for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
f9c4bb80
RH
594 TCGHelperInfo *info = &all_helpers[i];
595 unsigned typemask = info->typemask;
0c22e176
PMD
596 gpointer hash = (gpointer)(uintptr_t)typemask;
597 struct {
598 ffi_cif cif;
599 ffi_type *args[];
600 } *ca;
601 ffi_status status;
602 int nargs;
f9c4bb80 603 ffi_cif *cif;
0c22e176 604
f9c4bb80
RH
605 cif = g_hash_table_lookup(ffi_table, hash);
606 if (cif) {
607 info->cif = cif;
0c22e176
PMD
608 continue;
609 }
610
611 /* Ignoring the return type, find the last non-zero field. */
612 nargs = 32 - clz32(typemask >> 3);
613 nargs = DIV_ROUND_UP(nargs, 3);
614
615 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
616 ca->cif.rtype = typecode_to_ffi(typemask & 7);
617 ca->cif.nargs = nargs;
618
619 if (nargs != 0) {
620 ca->cif.arg_types = ca->args;
621 for (int j = 0; j < nargs; ++j) {
622 int typecode = extract32(typemask, (j + 1) * 3, 3);
623 ca->args[j] = typecode_to_ffi(typecode);
624 }
625 }
626
627 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
628 ca->cif.rtype, ca->cif.arg_types);
629 assert(status == FFI_OK);
630
f9c4bb80
RH
631 cif = &ca->cif;
632 info->cif = cif;
633 g_hash_table_insert(ffi_table, hash, (gpointer)cif);
0c22e176 634 }
f9c4bb80
RH
635
636 g_hash_table_destroy(ffi_table);
0c22e176
PMD
637}
638#endif /* CONFIG_TCG_INTERPRETER */
22f15579 639
39004a71
RH
640typedef struct TCGCumulativeArgs {
641 int arg_idx; /* tcg_gen_callN args[] */
642 int info_in_idx; /* TCGHelperInfo in[] */
643 int arg_slot; /* regs+stack slot */
644 int ref_slot; /* stack slots for references */
645} TCGCumulativeArgs;
646
647static void layout_arg_even(TCGCumulativeArgs *cum)
648{
649 cum->arg_slot += cum->arg_slot & 1;
650}
651
652static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
653 TCGCallArgumentKind kind)
654{
655 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
656
657 *loc = (TCGCallArgumentLoc){
658 .kind = kind,
659 .arg_idx = cum->arg_idx,
660 .arg_slot = cum->arg_slot,
661 };
662 cum->info_in_idx++;
663 cum->arg_slot++;
664}
665
666static void layout_arg_normal_n(TCGCumulativeArgs *cum,
667 TCGHelperInfo *info, int n)
668{
669 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
670
671 for (int i = 0; i < n; ++i) {
672 /* Layout all using the same arg_idx, adjusting the subindex. */
673 loc[i] = (TCGCallArgumentLoc){
674 .kind = TCG_CALL_ARG_NORMAL,
675 .arg_idx = cum->arg_idx,
676 .tmp_subindex = i,
677 .arg_slot = cum->arg_slot + i,
678 };
679 }
680 cum->info_in_idx += n;
681 cum->arg_slot += n;
682}
683
684static void init_call_layout(TCGHelperInfo *info)
685{
686 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
687 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
688 unsigned typemask = info->typemask;
689 unsigned typecode;
690 TCGCumulativeArgs cum = { };
691
692 /*
693 * Parse and place any function return value.
694 */
695 typecode = typemask & 7;
696 switch (typecode) {
697 case dh_typecode_void:
698 info->nr_out = 0;
699 break;
700 case dh_typecode_i32:
701 case dh_typecode_s32:
702 case dh_typecode_ptr:
703 info->nr_out = 1;
704 info->out_kind = TCG_CALL_RET_NORMAL;
705 break;
706 case dh_typecode_i64:
707 case dh_typecode_s64:
708 info->nr_out = 64 / TCG_TARGET_REG_BITS;
709 info->out_kind = TCG_CALL_RET_NORMAL;
710 break;
711 default:
712 g_assert_not_reached();
713 }
714 assert(info->nr_out <= ARRAY_SIZE(tcg_target_call_oarg_regs));
715
716 /*
717 * Parse and place function arguments.
718 */
719 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
720 TCGCallArgumentKind kind;
721 TCGType type;
722
723 typecode = typemask & 7;
724 switch (typecode) {
725 case dh_typecode_i32:
726 case dh_typecode_s32:
727 type = TCG_TYPE_I32;
728 break;
729 case dh_typecode_i64:
730 case dh_typecode_s64:
731 type = TCG_TYPE_I64;
732 break;
733 case dh_typecode_ptr:
734 type = TCG_TYPE_PTR;
735 break;
736 default:
737 g_assert_not_reached();
738 }
739
740 switch (type) {
741 case TCG_TYPE_I32:
742 switch (TCG_TARGET_CALL_ARG_I32) {
743 case TCG_CALL_ARG_EVEN:
744 layout_arg_even(&cum);
745 /* fall through */
746 case TCG_CALL_ARG_NORMAL:
747 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
748 break;
749 case TCG_CALL_ARG_EXTEND:
750 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
751 layout_arg_1(&cum, info, kind);
752 break;
753 default:
754 qemu_build_not_reached();
755 }
756 break;
757
758 case TCG_TYPE_I64:
759 switch (TCG_TARGET_CALL_ARG_I64) {
760 case TCG_CALL_ARG_EVEN:
761 layout_arg_even(&cum);
762 /* fall through */
763 case TCG_CALL_ARG_NORMAL:
764 if (TCG_TARGET_REG_BITS == 32) {
765 layout_arg_normal_n(&cum, info, 2);
766 } else {
767 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
768 }
769 break;
770 default:
771 qemu_build_not_reached();
772 }
773 break;
774
775 default:
776 g_assert_not_reached();
777 }
778 }
779 info->nr_in = cum.info_in_idx;
780
781 /* Validate that we didn't overrun the input array. */
782 assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
783 /* Validate the backend has enough argument space. */
784 assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
785 assert(cum.ref_slot <= max_stk_slots);
786}
787
91478cef 788static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 789static void process_op_defs(TCGContext *s);
1c2adb95
RH
790static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
791 TCGReg reg, const char *name);
91478cef 792
43b972b7 793static void tcg_context_init(unsigned max_cpus)
c896fe29 794{
a76aabd3 795 TCGContext *s = &tcg_init_ctx;
100b5e01 796 int op, total_args, n, i;
c896fe29
FB
797 TCGOpDef *def;
798 TCGArgConstraint *args_ct;
1c2adb95 799 TCGTemp *ts;
c896fe29
FB
800
801 memset(s, 0, sizeof(*s));
c896fe29 802 s->nb_globals = 0;
c70fbf0a 803
c896fe29
FB
804 /* Count total number of arguments and allocate the corresponding
805 space */
806 total_args = 0;
807 for(op = 0; op < NB_OPS; op++) {
808 def = &tcg_op_defs[op];
809 n = def->nb_iargs + def->nb_oargs;
810 total_args += n;
811 }
812
bc2b17e6 813 args_ct = g_new0(TCGArgConstraint, total_args);
c896fe29
FB
814
815 for(op = 0; op < NB_OPS; op++) {
816 def = &tcg_op_defs[op];
817 def->args_ct = args_ct;
c896fe29 818 n = def->nb_iargs + def->nb_oargs;
c896fe29
FB
819 args_ct += n;
820 }
5cd8f621
RH
821
822 /* Register helpers. */
84fd9dd3 823 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 824 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 825
100b5e01 826 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
39004a71 827 init_call_layout(&all_helpers[i]);
84fd9dd3 828 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 829 (gpointer)&all_helpers[i]);
100b5e01 830 }
5cd8f621 831
22f15579 832#ifdef CONFIG_TCG_INTERPRETER
0c22e176 833 init_ffi_layouts();
22f15579
RH
834#endif
835
c896fe29 836 tcg_target_init(s);
f69d277e 837 process_op_defs(s);
91478cef
RH
838
839 /* Reverse the order of the saved registers, assuming they're all at
840 the start of tcg_target_reg_alloc_order. */
841 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
842 int r = tcg_target_reg_alloc_order[n];
843 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
844 break;
845 }
846 }
847 for (i = 0; i < n; ++i) {
848 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
849 }
850 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
851 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
852 }
b1311c4a 853
38b47b19
EC
854 alloc_tcg_plugin_context(s);
855
b1311c4a 856 tcg_ctx = s;
3468b59e
EC
857 /*
858 * In user-mode we simply share the init context among threads, since we
859 * use a single region. See the documentation tcg_region_init() for the
860 * reasoning behind this.
861 * In softmmu we will have at most max_cpus TCG threads.
862 */
863#ifdef CONFIG_USER_ONLY
df2cce29 864 tcg_ctxs = &tcg_ctx;
0e2d61cf
RH
865 tcg_cur_ctxs = 1;
866 tcg_max_ctxs = 1;
3468b59e 867#else
0e2d61cf
RH
868 tcg_max_ctxs = max_cpus;
869 tcg_ctxs = g_new0(TCGContext *, max_cpus);
3468b59e 870#endif
1c2adb95
RH
871
872 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
873 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
874 cpu_env = temp_tcgv_ptr(ts);
9002ec79 875}
b03cce8e 876
43b972b7 877void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
a76aabd3 878{
43b972b7
RH
879 tcg_context_init(max_cpus);
880 tcg_region_init(tb_size, splitwx, max_cpus);
a76aabd3
RH
881}
882
6e3b2bfd
EC
883/*
884 * Allocate TBs right before their corresponding translated code, making
885 * sure that TBs and code are on different cache lines.
886 */
887TranslationBlock *tcg_tb_alloc(TCGContext *s)
888{
889 uintptr_t align = qemu_icache_linesize;
890 TranslationBlock *tb;
891 void *next;
892
e8feb96f 893 retry:
6e3b2bfd
EC
894 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
895 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
896
897 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
898 if (tcg_region_alloc(s)) {
899 return NULL;
900 }
901 goto retry;
6e3b2bfd 902 }
d73415a3 903 qatomic_set(&s->code_gen_ptr, next);
57a26946 904 s->data_gen_ptr = NULL;
6e3b2bfd
EC
905 return tb;
906}
907
9002ec79
RH
908void tcg_prologue_init(TCGContext *s)
909{
b0a0794a 910 size_t prologue_size;
8163b749 911
b0a0794a
RH
912 s->code_ptr = s->code_gen_ptr;
913 s->code_buf = s->code_gen_ptr;
5b38ee31 914 s->data_gen_ptr = NULL;
b91ccb31
RH
915
916#ifndef CONFIG_TCG_INTERPRETER
b0a0794a 917 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
b91ccb31 918#endif
8163b749 919
5b38ee31
RH
920#ifdef TCG_TARGET_NEED_POOL_LABELS
921 s->pool_labels = NULL;
922#endif
923
653b87eb 924 qemu_thread_jit_write();
8163b749 925 /* Generate the prologue. */
b03cce8e 926 tcg_target_qemu_prologue(s);
5b38ee31
RH
927
928#ifdef TCG_TARGET_NEED_POOL_LABELS
929 /* Allow the prologue to put e.g. guest_base into a pool entry. */
930 {
1768987b
RH
931 int result = tcg_out_pool_finalize(s);
932 tcg_debug_assert(result == 0);
5b38ee31
RH
933 }
934#endif
935
b0a0794a 936 prologue_size = tcg_current_code_size(s);
5584e2db 937 perf_report_prologue(s->code_gen_ptr, prologue_size);
b0a0794a 938
df5d2b16 939#ifndef CONFIG_TCG_INTERPRETER
b0a0794a
RH
940 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
941 (uintptr_t)s->code_buf, prologue_size);
df5d2b16 942#endif
8163b749 943
d6b64b2b
RH
944#ifdef DEBUG_DISAS
945 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
c60f599b 946 FILE *logfile = qemu_log_trylock();
78b54858
RH
947 if (logfile) {
948 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
949 if (s->data_gen_ptr) {
950 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
951 size_t data_size = prologue_size - code_size;
952 size_t i;
953
954 disas(logfile, s->code_gen_ptr, code_size);
955
956 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
957 if (sizeof(tcg_target_ulong) == 8) {
958 fprintf(logfile,
959 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
960 (uintptr_t)s->data_gen_ptr + i,
961 *(uint64_t *)(s->data_gen_ptr + i));
962 } else {
963 fprintf(logfile,
964 "0x%08" PRIxPTR ": .long 0x%08x\n",
965 (uintptr_t)s->data_gen_ptr + i,
966 *(uint32_t *)(s->data_gen_ptr + i));
967 }
5b38ee31 968 }
78b54858
RH
969 } else {
970 disas(logfile, s->code_gen_ptr, prologue_size);
5b38ee31 971 }
78b54858 972 fprintf(logfile, "\n");
78b54858 973 qemu_log_unlock(logfile);
5b38ee31 974 }
d6b64b2b
RH
975 }
976#endif
cedbcb01 977
6eea0434
RH
978#ifndef CONFIG_TCG_INTERPRETER
979 /*
980 * Assert that goto_ptr is implemented completely, setting an epilogue.
981 * For tci, we use NULL as the signal to return from the interpreter,
982 * so skip this check.
983 */
f4e01e30 984 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
6eea0434 985#endif
d1c74ab3
RH
986
987 tcg_region_prologue_set(s);
c896fe29
FB
988}
989
c896fe29
FB
990void tcg_func_start(TCGContext *s)
991{
992 tcg_pool_reset(s);
993 s->nb_temps = s->nb_globals;
0ec9eabc
RH
994
995 /* No temps have been previously allocated for size or locality. */
996 memset(s->free_temps, 0, sizeof(s->free_temps));
997
c0522136
RH
998 /* No constant temps have been previously allocated. */
999 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
1000 if (s->const_table[i]) {
1001 g_hash_table_remove_all(s->const_table[i]);
1002 }
1003 }
1004
abebf925 1005 s->nb_ops = 0;
c896fe29
FB
1006 s->nb_labels = 0;
1007 s->current_frame_offset = s->frame_start;
1008
0a209d4b
RH
1009#ifdef CONFIG_DEBUG_TCG
1010 s->goto_tb_issue_mask = 0;
1011#endif
1012
15fa08f8
RH
1013 QTAILQ_INIT(&s->ops);
1014 QTAILQ_INIT(&s->free_ops);
bef16ab4 1015 QSIMPLEQ_INIT(&s->labels);
c896fe29
FB
1016}
1017
ae30e866 1018static TCGTemp *tcg_temp_alloc(TCGContext *s)
7ca4b752
RH
1019{
1020 int n = s->nb_temps++;
ae30e866
RH
1021
1022 if (n >= TCG_MAX_TEMPS) {
db6b7d0c 1023 tcg_raise_tb_overflow(s);
ae30e866 1024 }
7ca4b752
RH
1025 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1026}
1027
ae30e866 1028static TCGTemp *tcg_global_alloc(TCGContext *s)
7ca4b752 1029{
fa477d25
RH
1030 TCGTemp *ts;
1031
7ca4b752 1032 tcg_debug_assert(s->nb_globals == s->nb_temps);
ae30e866 1033 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
7ca4b752 1034 s->nb_globals++;
fa477d25 1035 ts = tcg_temp_alloc(s);
ee17db83 1036 ts->kind = TEMP_GLOBAL;
fa477d25
RH
1037
1038 return ts;
c896fe29
FB
1039}
1040
085272b3
RH
1041static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1042 TCGReg reg, const char *name)
c896fe29 1043{
c896fe29 1044 TCGTemp *ts;
c896fe29 1045
b3a62939 1046 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 1047 tcg_abort();
b3a62939 1048 }
7ca4b752
RH
1049
1050 ts = tcg_global_alloc(s);
c896fe29
FB
1051 ts->base_type = type;
1052 ts->type = type;
ee17db83 1053 ts->kind = TEMP_FIXED;
c896fe29 1054 ts->reg = reg;
c896fe29 1055 ts->name = name;
c896fe29 1056 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 1057
085272b3 1058 return ts;
a7812ae4
PB
1059}
1060
b6638662 1061void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 1062{
b3a62939
RH
1063 s->frame_start = start;
1064 s->frame_end = start + size;
085272b3
RH
1065 s->frame_temp
1066 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
1067}
1068
085272b3
RH
1069TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1070 intptr_t offset, const char *name)
c896fe29 1071{
b1311c4a 1072 TCGContext *s = tcg_ctx;
dc41aa7d 1073 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 1074 TCGTemp *ts = tcg_global_alloc(s);
aef85402 1075 int indirect_reg = 0;
c896fe29 1076
c0522136
RH
1077 switch (base_ts->kind) {
1078 case TEMP_FIXED:
1079 break;
1080 case TEMP_GLOBAL:
5a18407f
RH
1081 /* We do not support double-indirect registers. */
1082 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 1083 base_ts->indirect_base = 1;
5a18407f
RH
1084 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1085 ? 2 : 1);
1086 indirect_reg = 1;
c0522136
RH
1087 break;
1088 default:
1089 g_assert_not_reached();
b3915dbb
RH
1090 }
1091
7ca4b752
RH
1092 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1093 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 1094 char buf[64];
7ca4b752
RH
1095
1096 ts->base_type = TCG_TYPE_I64;
c896fe29 1097 ts->type = TCG_TYPE_I32;
b3915dbb 1098 ts->indirect_reg = indirect_reg;
c896fe29 1099 ts->mem_allocated = 1;
b3a62939 1100 ts->mem_base = base_ts;
aef85402 1101 ts->mem_offset = offset;
c896fe29
FB
1102 pstrcpy(buf, sizeof(buf), name);
1103 pstrcat(buf, sizeof(buf), "_0");
1104 ts->name = strdup(buf);
c896fe29 1105
7ca4b752
RH
1106 tcg_debug_assert(ts2 == ts + 1);
1107 ts2->base_type = TCG_TYPE_I64;
1108 ts2->type = TCG_TYPE_I32;
b3915dbb 1109 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
1110 ts2->mem_allocated = 1;
1111 ts2->mem_base = base_ts;
aef85402 1112 ts2->mem_offset = offset + 4;
fac87bd2 1113 ts2->temp_subindex = 1;
c896fe29
FB
1114 pstrcpy(buf, sizeof(buf), name);
1115 pstrcat(buf, sizeof(buf), "_1");
120c1084 1116 ts2->name = strdup(buf);
7ca4b752 1117 } else {
c896fe29
FB
1118 ts->base_type = type;
1119 ts->type = type;
b3915dbb 1120 ts->indirect_reg = indirect_reg;
c896fe29 1121 ts->mem_allocated = 1;
b3a62939 1122 ts->mem_base = base_ts;
c896fe29 1123 ts->mem_offset = offset;
c896fe29 1124 ts->name = name;
c896fe29 1125 }
085272b3 1126 return ts;
a7812ae4
PB
1127}
1128
5bfa8034 1129TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
c896fe29 1130{
b1311c4a 1131 TCGContext *s = tcg_ctx;
ee17db83 1132 TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
c896fe29 1133 TCGTemp *ts;
641d5fbe 1134 int idx, k;
c896fe29 1135
0ec9eabc
RH
1136 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1137 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1138 if (idx < TCG_MAX_TEMPS) {
1139 /* There is already an available temp with the right type. */
1140 clear_bit(idx, s->free_temps[k].l);
1141
e8996ee0 1142 ts = &s->temps[idx];
e8996ee0 1143 ts->temp_allocated = 1;
7ca4b752 1144 tcg_debug_assert(ts->base_type == type);
ee17db83 1145 tcg_debug_assert(ts->kind == kind);
e8996ee0 1146 } else {
7ca4b752
RH
1147 ts = tcg_temp_alloc(s);
1148 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1149 TCGTemp *ts2 = tcg_temp_alloc(s);
1150
f6aa2f7d 1151 ts->base_type = type;
e8996ee0
FB
1152 ts->type = TCG_TYPE_I32;
1153 ts->temp_allocated = 1;
ee17db83 1154 ts->kind = kind;
7ca4b752
RH
1155
1156 tcg_debug_assert(ts2 == ts + 1);
1157 ts2->base_type = TCG_TYPE_I64;
1158 ts2->type = TCG_TYPE_I32;
1159 ts2->temp_allocated = 1;
fac87bd2 1160 ts2->temp_subindex = 1;
ee17db83 1161 ts2->kind = kind;
7ca4b752 1162 } else {
e8996ee0
FB
1163 ts->base_type = type;
1164 ts->type = type;
1165 ts->temp_allocated = 1;
ee17db83 1166 ts->kind = kind;
e8996ee0 1167 }
c896fe29 1168 }
27bfd83c
PM
1169
1170#if defined(CONFIG_DEBUG_TCG)
1171 s->temps_in_use++;
1172#endif
085272b3 1173 return ts;
c896fe29
FB
1174}
1175
d2fd745f
RH
1176TCGv_vec tcg_temp_new_vec(TCGType type)
1177{
1178 TCGTemp *t;
1179
1180#ifdef CONFIG_DEBUG_TCG
1181 switch (type) {
1182 case TCG_TYPE_V64:
1183 assert(TCG_TARGET_HAS_v64);
1184 break;
1185 case TCG_TYPE_V128:
1186 assert(TCG_TARGET_HAS_v128);
1187 break;
1188 case TCG_TYPE_V256:
1189 assert(TCG_TARGET_HAS_v256);
1190 break;
1191 default:
1192 g_assert_not_reached();
1193 }
1194#endif
1195
1196 t = tcg_temp_new_internal(type, 0);
1197 return temp_tcgv_vec(t);
1198}
1199
1200/* Create a new temp of the same type as an existing temp. */
1201TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1202{
1203 TCGTemp *t = tcgv_vec_temp(match);
1204
1205 tcg_debug_assert(t->temp_allocated != 0);
1206
1207 t = tcg_temp_new_internal(t->base_type, 0);
1208 return temp_tcgv_vec(t);
1209}
1210
5bfa8034 1211void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1212{
b1311c4a 1213 TCGContext *s = tcg_ctx;
085272b3 1214 int k, idx;
c896fe29 1215
c7482438
RH
1216 switch (ts->kind) {
1217 case TEMP_CONST:
1218 /*
1219 * In order to simplify users of tcg_constant_*,
1220 * silently ignore free.
1221 */
c0522136 1222 return;
c7482438
RH
1223 case TEMP_NORMAL:
1224 case TEMP_LOCAL:
1225 break;
1226 default:
1227 g_assert_not_reached();
c0522136
RH
1228 }
1229
27bfd83c
PM
1230#if defined(CONFIG_DEBUG_TCG)
1231 s->temps_in_use--;
1232 if (s->temps_in_use < 0) {
1233 fprintf(stderr, "More temporaries freed than allocated!\n");
1234 }
1235#endif
1236
eabb7b91 1237 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1238 ts->temp_allocated = 0;
0ec9eabc 1239
085272b3 1240 idx = temp_idx(ts);
ee17db83 1241 k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
0ec9eabc 1242 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1243}
1244
c0522136
RH
1245TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1246{
1247 TCGContext *s = tcg_ctx;
1248 GHashTable *h = s->const_table[type];
1249 TCGTemp *ts;
1250
1251 if (h == NULL) {
1252 h = g_hash_table_new(g_int64_hash, g_int64_equal);
1253 s->const_table[type] = h;
1254 }
1255
1256 ts = g_hash_table_lookup(h, &val);
1257 if (ts == NULL) {
aef85402
RH
1258 int64_t *val_ptr;
1259
c0522136
RH
1260 ts = tcg_temp_alloc(s);
1261
1262 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1263 TCGTemp *ts2 = tcg_temp_alloc(s);
1264
aef85402
RH
1265 tcg_debug_assert(ts2 == ts + 1);
1266
c0522136
RH
1267 ts->base_type = TCG_TYPE_I64;
1268 ts->type = TCG_TYPE_I32;
1269 ts->kind = TEMP_CONST;
1270 ts->temp_allocated = 1;
c0522136 1271
c0522136
RH
1272 ts2->base_type = TCG_TYPE_I64;
1273 ts2->type = TCG_TYPE_I32;
1274 ts2->kind = TEMP_CONST;
1275 ts2->temp_allocated = 1;
fac87bd2 1276 ts2->temp_subindex = 1;
aef85402
RH
1277
1278 /*
1279 * Retain the full value of the 64-bit constant in the low
1280 * part, so that the hash table works. Actual uses will
1281 * truncate the value to the low part.
1282 */
1283 ts[HOST_BIG_ENDIAN].val = val;
1284 ts[!HOST_BIG_ENDIAN].val = val >> 32;
1285 val_ptr = &ts[HOST_BIG_ENDIAN].val;
c0522136
RH
1286 } else {
1287 ts->base_type = type;
1288 ts->type = type;
1289 ts->kind = TEMP_CONST;
1290 ts->temp_allocated = 1;
1291 ts->val = val;
aef85402 1292 val_ptr = &ts->val;
c0522136 1293 }
aef85402 1294 g_hash_table_insert(h, val_ptr, ts);
c0522136
RH
1295 }
1296
1297 return ts;
1298}
1299
1300TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1301{
1302 val = dup_const(vece, val);
1303 return temp_tcgv_vec(tcg_constant_internal(type, val));
1304}
1305
88d4005b
RH
1306TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1307{
1308 TCGTemp *t = tcgv_vec_temp(match);
1309
1310 tcg_debug_assert(t->temp_allocated != 0);
1311 return tcg_constant_vec(t->base_type, vece, val);
1312}
1313
a7812ae4 1314TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1315{
a7812ae4
PB
1316 TCGv_i32 t0;
1317 t0 = tcg_temp_new_i32();
e8996ee0
FB
1318 tcg_gen_movi_i32(t0, val);
1319 return t0;
1320}
c896fe29 1321
a7812ae4 1322TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1323{
a7812ae4
PB
1324 TCGv_i64 t0;
1325 t0 = tcg_temp_new_i64();
e8996ee0
FB
1326 tcg_gen_movi_i64(t0, val);
1327 return t0;
c896fe29
FB
1328}
1329
a7812ae4 1330TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1331{
a7812ae4
PB
1332 TCGv_i32 t0;
1333 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1334 tcg_gen_movi_i32(t0, val);
1335 return t0;
1336}
1337
a7812ae4 1338TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1339{
a7812ae4
PB
1340 TCGv_i64 t0;
1341 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1342 tcg_gen_movi_i64(t0, val);
1343 return t0;
1344}
1345
27bfd83c
PM
1346#if defined(CONFIG_DEBUG_TCG)
1347void tcg_clear_temp_count(void)
1348{
b1311c4a 1349 TCGContext *s = tcg_ctx;
27bfd83c
PM
1350 s->temps_in_use = 0;
1351}
1352
1353int tcg_check_temp_count(void)
1354{
b1311c4a 1355 TCGContext *s = tcg_ctx;
27bfd83c
PM
1356 if (s->temps_in_use) {
1357 /* Clear the count so that we don't give another
1358 * warning immediately next time around.
1359 */
1360 s->temps_in_use = 0;
1361 return 1;
1362 }
1363 return 0;
1364}
1365#endif
1366
be0f34b5
RH
1367/* Return true if OP may appear in the opcode stream.
1368 Test the runtime variable that controls each opcode. */
1369bool tcg_op_supported(TCGOpcode op)
1370{
d2fd745f
RH
1371 const bool have_vec
1372 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1373
be0f34b5
RH
1374 switch (op) {
1375 case INDEX_op_discard:
1376 case INDEX_op_set_label:
1377 case INDEX_op_call:
1378 case INDEX_op_br:
1379 case INDEX_op_mb:
1380 case INDEX_op_insn_start:
1381 case INDEX_op_exit_tb:
1382 case INDEX_op_goto_tb:
f4e01e30 1383 case INDEX_op_goto_ptr:
be0f34b5
RH
1384 case INDEX_op_qemu_ld_i32:
1385 case INDEX_op_qemu_st_i32:
1386 case INDEX_op_qemu_ld_i64:
1387 case INDEX_op_qemu_st_i64:
1388 return true;
1389
07ce0b05
RH
1390 case INDEX_op_qemu_st8_i32:
1391 return TCG_TARGET_HAS_qemu_st8_i32;
1392
be0f34b5 1393 case INDEX_op_mov_i32:
be0f34b5
RH
1394 case INDEX_op_setcond_i32:
1395 case INDEX_op_brcond_i32:
1396 case INDEX_op_ld8u_i32:
1397 case INDEX_op_ld8s_i32:
1398 case INDEX_op_ld16u_i32:
1399 case INDEX_op_ld16s_i32:
1400 case INDEX_op_ld_i32:
1401 case INDEX_op_st8_i32:
1402 case INDEX_op_st16_i32:
1403 case INDEX_op_st_i32:
1404 case INDEX_op_add_i32:
1405 case INDEX_op_sub_i32:
1406 case INDEX_op_mul_i32:
1407 case INDEX_op_and_i32:
1408 case INDEX_op_or_i32:
1409 case INDEX_op_xor_i32:
1410 case INDEX_op_shl_i32:
1411 case INDEX_op_shr_i32:
1412 case INDEX_op_sar_i32:
1413 return true;
1414
1415 case INDEX_op_movcond_i32:
1416 return TCG_TARGET_HAS_movcond_i32;
1417 case INDEX_op_div_i32:
1418 case INDEX_op_divu_i32:
1419 return TCG_TARGET_HAS_div_i32;
1420 case INDEX_op_rem_i32:
1421 case INDEX_op_remu_i32:
1422 return TCG_TARGET_HAS_rem_i32;
1423 case INDEX_op_div2_i32:
1424 case INDEX_op_divu2_i32:
1425 return TCG_TARGET_HAS_div2_i32;
1426 case INDEX_op_rotl_i32:
1427 case INDEX_op_rotr_i32:
1428 return TCG_TARGET_HAS_rot_i32;
1429 case INDEX_op_deposit_i32:
1430 return TCG_TARGET_HAS_deposit_i32;
1431 case INDEX_op_extract_i32:
1432 return TCG_TARGET_HAS_extract_i32;
1433 case INDEX_op_sextract_i32:
1434 return TCG_TARGET_HAS_sextract_i32;
fce1296f
RH
1435 case INDEX_op_extract2_i32:
1436 return TCG_TARGET_HAS_extract2_i32;
be0f34b5
RH
1437 case INDEX_op_add2_i32:
1438 return TCG_TARGET_HAS_add2_i32;
1439 case INDEX_op_sub2_i32:
1440 return TCG_TARGET_HAS_sub2_i32;
1441 case INDEX_op_mulu2_i32:
1442 return TCG_TARGET_HAS_mulu2_i32;
1443 case INDEX_op_muls2_i32:
1444 return TCG_TARGET_HAS_muls2_i32;
1445 case INDEX_op_muluh_i32:
1446 return TCG_TARGET_HAS_muluh_i32;
1447 case INDEX_op_mulsh_i32:
1448 return TCG_TARGET_HAS_mulsh_i32;
1449 case INDEX_op_ext8s_i32:
1450 return TCG_TARGET_HAS_ext8s_i32;
1451 case INDEX_op_ext16s_i32:
1452 return TCG_TARGET_HAS_ext16s_i32;
1453 case INDEX_op_ext8u_i32:
1454 return TCG_TARGET_HAS_ext8u_i32;
1455 case INDEX_op_ext16u_i32:
1456 return TCG_TARGET_HAS_ext16u_i32;
1457 case INDEX_op_bswap16_i32:
1458 return TCG_TARGET_HAS_bswap16_i32;
1459 case INDEX_op_bswap32_i32:
1460 return TCG_TARGET_HAS_bswap32_i32;
1461 case INDEX_op_not_i32:
1462 return TCG_TARGET_HAS_not_i32;
1463 case INDEX_op_neg_i32:
1464 return TCG_TARGET_HAS_neg_i32;
1465 case INDEX_op_andc_i32:
1466 return TCG_TARGET_HAS_andc_i32;
1467 case INDEX_op_orc_i32:
1468 return TCG_TARGET_HAS_orc_i32;
1469 case INDEX_op_eqv_i32:
1470 return TCG_TARGET_HAS_eqv_i32;
1471 case INDEX_op_nand_i32:
1472 return TCG_TARGET_HAS_nand_i32;
1473 case INDEX_op_nor_i32:
1474 return TCG_TARGET_HAS_nor_i32;
1475 case INDEX_op_clz_i32:
1476 return TCG_TARGET_HAS_clz_i32;
1477 case INDEX_op_ctz_i32:
1478 return TCG_TARGET_HAS_ctz_i32;
1479 case INDEX_op_ctpop_i32:
1480 return TCG_TARGET_HAS_ctpop_i32;
1481
1482 case INDEX_op_brcond2_i32:
1483 case INDEX_op_setcond2_i32:
1484 return TCG_TARGET_REG_BITS == 32;
1485
1486 case INDEX_op_mov_i64:
be0f34b5
RH
1487 case INDEX_op_setcond_i64:
1488 case INDEX_op_brcond_i64:
1489 case INDEX_op_ld8u_i64:
1490 case INDEX_op_ld8s_i64:
1491 case INDEX_op_ld16u_i64:
1492 case INDEX_op_ld16s_i64:
1493 case INDEX_op_ld32u_i64:
1494 case INDEX_op_ld32s_i64:
1495 case INDEX_op_ld_i64:
1496 case INDEX_op_st8_i64:
1497 case INDEX_op_st16_i64:
1498 case INDEX_op_st32_i64:
1499 case INDEX_op_st_i64:
1500 case INDEX_op_add_i64:
1501 case INDEX_op_sub_i64:
1502 case INDEX_op_mul_i64:
1503 case INDEX_op_and_i64:
1504 case INDEX_op_or_i64:
1505 case INDEX_op_xor_i64:
1506 case INDEX_op_shl_i64:
1507 case INDEX_op_shr_i64:
1508 case INDEX_op_sar_i64:
1509 case INDEX_op_ext_i32_i64:
1510 case INDEX_op_extu_i32_i64:
1511 return TCG_TARGET_REG_BITS == 64;
1512
1513 case INDEX_op_movcond_i64:
1514 return TCG_TARGET_HAS_movcond_i64;
1515 case INDEX_op_div_i64:
1516 case INDEX_op_divu_i64:
1517 return TCG_TARGET_HAS_div_i64;
1518 case INDEX_op_rem_i64:
1519 case INDEX_op_remu_i64:
1520 return TCG_TARGET_HAS_rem_i64;
1521 case INDEX_op_div2_i64:
1522 case INDEX_op_divu2_i64:
1523 return TCG_TARGET_HAS_div2_i64;
1524 case INDEX_op_rotl_i64:
1525 case INDEX_op_rotr_i64:
1526 return TCG_TARGET_HAS_rot_i64;
1527 case INDEX_op_deposit_i64:
1528 return TCG_TARGET_HAS_deposit_i64;
1529 case INDEX_op_extract_i64:
1530 return TCG_TARGET_HAS_extract_i64;
1531 case INDEX_op_sextract_i64:
1532 return TCG_TARGET_HAS_sextract_i64;
fce1296f
RH
1533 case INDEX_op_extract2_i64:
1534 return TCG_TARGET_HAS_extract2_i64;
be0f34b5
RH
1535 case INDEX_op_extrl_i64_i32:
1536 return TCG_TARGET_HAS_extrl_i64_i32;
1537 case INDEX_op_extrh_i64_i32:
1538 return TCG_TARGET_HAS_extrh_i64_i32;
1539 case INDEX_op_ext8s_i64:
1540 return TCG_TARGET_HAS_ext8s_i64;
1541 case INDEX_op_ext16s_i64:
1542 return TCG_TARGET_HAS_ext16s_i64;
1543 case INDEX_op_ext32s_i64:
1544 return TCG_TARGET_HAS_ext32s_i64;
1545 case INDEX_op_ext8u_i64:
1546 return TCG_TARGET_HAS_ext8u_i64;
1547 case INDEX_op_ext16u_i64:
1548 return TCG_TARGET_HAS_ext16u_i64;
1549 case INDEX_op_ext32u_i64:
1550 return TCG_TARGET_HAS_ext32u_i64;
1551 case INDEX_op_bswap16_i64:
1552 return TCG_TARGET_HAS_bswap16_i64;
1553 case INDEX_op_bswap32_i64:
1554 return TCG_TARGET_HAS_bswap32_i64;
1555 case INDEX_op_bswap64_i64:
1556 return TCG_TARGET_HAS_bswap64_i64;
1557 case INDEX_op_not_i64:
1558 return TCG_TARGET_HAS_not_i64;
1559 case INDEX_op_neg_i64:
1560 return TCG_TARGET_HAS_neg_i64;
1561 case INDEX_op_andc_i64:
1562 return TCG_TARGET_HAS_andc_i64;
1563 case INDEX_op_orc_i64:
1564 return TCG_TARGET_HAS_orc_i64;
1565 case INDEX_op_eqv_i64:
1566 return TCG_TARGET_HAS_eqv_i64;
1567 case INDEX_op_nand_i64:
1568 return TCG_TARGET_HAS_nand_i64;
1569 case INDEX_op_nor_i64:
1570 return TCG_TARGET_HAS_nor_i64;
1571 case INDEX_op_clz_i64:
1572 return TCG_TARGET_HAS_clz_i64;
1573 case INDEX_op_ctz_i64:
1574 return TCG_TARGET_HAS_ctz_i64;
1575 case INDEX_op_ctpop_i64:
1576 return TCG_TARGET_HAS_ctpop_i64;
1577 case INDEX_op_add2_i64:
1578 return TCG_TARGET_HAS_add2_i64;
1579 case INDEX_op_sub2_i64:
1580 return TCG_TARGET_HAS_sub2_i64;
1581 case INDEX_op_mulu2_i64:
1582 return TCG_TARGET_HAS_mulu2_i64;
1583 case INDEX_op_muls2_i64:
1584 return TCG_TARGET_HAS_muls2_i64;
1585 case INDEX_op_muluh_i64:
1586 return TCG_TARGET_HAS_muluh_i64;
1587 case INDEX_op_mulsh_i64:
1588 return TCG_TARGET_HAS_mulsh_i64;
1589
d2fd745f
RH
1590 case INDEX_op_mov_vec:
1591 case INDEX_op_dup_vec:
37ee55a0 1592 case INDEX_op_dupm_vec:
d2fd745f
RH
1593 case INDEX_op_ld_vec:
1594 case INDEX_op_st_vec:
1595 case INDEX_op_add_vec:
1596 case INDEX_op_sub_vec:
1597 case INDEX_op_and_vec:
1598 case INDEX_op_or_vec:
1599 case INDEX_op_xor_vec:
212be173 1600 case INDEX_op_cmp_vec:
d2fd745f
RH
1601 return have_vec;
1602 case INDEX_op_dup2_vec:
1603 return have_vec && TCG_TARGET_REG_BITS == 32;
1604 case INDEX_op_not_vec:
1605 return have_vec && TCG_TARGET_HAS_not_vec;
1606 case INDEX_op_neg_vec:
1607 return have_vec && TCG_TARGET_HAS_neg_vec;
bcefc902
RH
1608 case INDEX_op_abs_vec:
1609 return have_vec && TCG_TARGET_HAS_abs_vec;
d2fd745f
RH
1610 case INDEX_op_andc_vec:
1611 return have_vec && TCG_TARGET_HAS_andc_vec;
1612 case INDEX_op_orc_vec:
1613 return have_vec && TCG_TARGET_HAS_orc_vec;
ed523473
RH
1614 case INDEX_op_nand_vec:
1615 return have_vec && TCG_TARGET_HAS_nand_vec;
1616 case INDEX_op_nor_vec:
1617 return have_vec && TCG_TARGET_HAS_nor_vec;
1618 case INDEX_op_eqv_vec:
1619 return have_vec && TCG_TARGET_HAS_eqv_vec;
3774030a
RH
1620 case INDEX_op_mul_vec:
1621 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1622 case INDEX_op_shli_vec:
1623 case INDEX_op_shri_vec:
1624 case INDEX_op_sari_vec:
1625 return have_vec && TCG_TARGET_HAS_shi_vec;
1626 case INDEX_op_shls_vec:
1627 case INDEX_op_shrs_vec:
1628 case INDEX_op_sars_vec:
1629 return have_vec && TCG_TARGET_HAS_shs_vec;
1630 case INDEX_op_shlv_vec:
1631 case INDEX_op_shrv_vec:
1632 case INDEX_op_sarv_vec:
1633 return have_vec && TCG_TARGET_HAS_shv_vec;
b0f7e744
RH
1634 case INDEX_op_rotli_vec:
1635 return have_vec && TCG_TARGET_HAS_roti_vec;
23850a74
RH
1636 case INDEX_op_rotls_vec:
1637 return have_vec && TCG_TARGET_HAS_rots_vec;
5d0ceda9
RH
1638 case INDEX_op_rotlv_vec:
1639 case INDEX_op_rotrv_vec:
1640 return have_vec && TCG_TARGET_HAS_rotv_vec;
8afaf050
RH
1641 case INDEX_op_ssadd_vec:
1642 case INDEX_op_usadd_vec:
1643 case INDEX_op_sssub_vec:
1644 case INDEX_op_ussub_vec:
1645 return have_vec && TCG_TARGET_HAS_sat_vec;
dd0a0fcd
RH
1646 case INDEX_op_smin_vec:
1647 case INDEX_op_umin_vec:
1648 case INDEX_op_smax_vec:
1649 case INDEX_op_umax_vec:
1650 return have_vec && TCG_TARGET_HAS_minmax_vec;
38dc1294
RH
1651 case INDEX_op_bitsel_vec:
1652 return have_vec && TCG_TARGET_HAS_bitsel_vec;
f75da298
RH
1653 case INDEX_op_cmpsel_vec:
1654 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
d2fd745f 1655
db432672
RH
1656 default:
1657 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1658 return true;
be0f34b5 1659 }
be0f34b5
RH
1660}
1661
39004a71
RH
1662static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
1663
ae8b75dc 1664void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1665{
3e92aa34 1666 const TCGHelperInfo *info;
39004a71
RH
1667 TCGv_i64 extend_free[MAX_CALL_IARGS];
1668 int n_extend = 0;
75e8b9b7 1669 TCGOp *op;
39004a71 1670 int i, n, pi = 0, total_args;
afb49896 1671
619205fd 1672 info = g_hash_table_lookup(helper_table, (gpointer)func);
39004a71
RH
1673 total_args = info->nr_out + info->nr_in + 2;
1674 op = tcg_op_alloc(INDEX_op_call, total_args);
2bece2c8 1675
38b47b19 1676#ifdef CONFIG_PLUGIN
17083f6f
EC
1677 /* Flag helpers that may affect guest state */
1678 if (tcg_ctx->plugin_insn &&
1679 !(info->flags & TCG_CALL_PLUGIN) &&
1680 !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
38b47b19
EC
1681 tcg_ctx->plugin_insn->calls_helpers = true;
1682 }
1683#endif
1684
39004a71
RH
1685 TCGOP_CALLO(op) = n = info->nr_out;
1686 switch (n) {
1687 case 0:
1688 tcg_debug_assert(ret == NULL);
1689 break;
1690 case 1:
1691 tcg_debug_assert(ret != NULL);
1692 op->args[pi++] = temp_arg(ret);
1693 break;
1694 case 2:
1695 tcg_debug_assert(ret != NULL);
1696 tcg_debug_assert(ret->base_type == ret->type + 1);
1697 tcg_debug_assert(ret->temp_subindex == 0);
1698 op->args[pi++] = temp_arg(ret);
1699 op->args[pi++] = temp_arg(ret + 1);
1700 break;
1701 default:
1702 g_assert_not_reached();
1703 }
1704
1705 TCGOP_CALLI(op) = n = info->nr_in;
1706 for (i = 0; i < n; i++) {
1707 const TCGCallArgumentLoc *loc = &info->in[i];
1708 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
1709
1710 switch (loc->kind) {
1711 case TCG_CALL_ARG_NORMAL:
1712 op->args[pi++] = temp_arg(ts);
1713 break;
eb8b0224 1714
39004a71
RH
1715 case TCG_CALL_ARG_EXTEND_U:
1716 case TCG_CALL_ARG_EXTEND_S:
1717 {
eb8b0224 1718 TCGv_i64 temp = tcg_temp_new_i64();
39004a71
RH
1719 TCGv_i32 orig = temp_tcgv_i32(ts);
1720
1721 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
eb8b0224
RH
1722 tcg_gen_ext_i32_i64(temp, orig);
1723 } else {
1724 tcg_gen_extu_i32_i64(temp, orig);
1725 }
39004a71
RH
1726 op->args[pi++] = tcgv_i64_arg(temp);
1727 extend_free[n_extend++] = temp;
2bece2c8 1728 }
e2a9dd6b 1729 break;
7b7d8b2d 1730
e2a9dd6b
RH
1731 default:
1732 g_assert_not_reached();
c896fe29
FB
1733 }
1734 }
75e8b9b7 1735 op->args[pi++] = (uintptr_t)func;
3e92aa34 1736 op->args[pi++] = (uintptr_t)info;
39004a71 1737 tcg_debug_assert(pi == total_args);
a7812ae4 1738
39004a71 1739 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
7319d83a 1740
39004a71
RH
1741 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
1742 for (i = 0; i < n_extend; ++i) {
1743 tcg_temp_free_i64(extend_free[i]);
2bece2c8 1744 }
c896fe29 1745}
c896fe29 1746
8fcd3692 1747static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1748{
ac3b8891 1749 int i, n;
ac3b8891 1750
ee17db83
RH
1751 for (i = 0, n = s->nb_temps; i < n; i++) {
1752 TCGTemp *ts = &s->temps[i];
1753 TCGTempVal val = TEMP_VAL_MEM;
1754
1755 switch (ts->kind) {
c0522136
RH
1756 case TEMP_CONST:
1757 val = TEMP_VAL_CONST;
1758 break;
ee17db83
RH
1759 case TEMP_FIXED:
1760 val = TEMP_VAL_REG;
1761 break;
1762 case TEMP_GLOBAL:
1763 break;
1764 case TEMP_NORMAL:
c7482438 1765 case TEMP_EBB:
ee17db83
RH
1766 val = TEMP_VAL_DEAD;
1767 /* fall through */
1768 case TEMP_LOCAL:
1769 ts->mem_allocated = 0;
1770 break;
1771 default:
1772 g_assert_not_reached();
1773 }
1774 ts->val_type = val;
e8996ee0 1775 }
f8b2f202
RH
1776
1777 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1778}
1779
f8b2f202
RH
1780static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1781 TCGTemp *ts)
c896fe29 1782{
1807f4c4 1783 int idx = temp_idx(ts);
ac56dd48 1784
ee17db83
RH
1785 switch (ts->kind) {
1786 case TEMP_FIXED:
1787 case TEMP_GLOBAL:
ac56dd48 1788 pstrcpy(buf, buf_size, ts->name);
ee17db83
RH
1789 break;
1790 case TEMP_LOCAL:
f8b2f202 1791 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
ee17db83 1792 break;
c7482438
RH
1793 case TEMP_EBB:
1794 snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals);
1795 break;
ee17db83 1796 case TEMP_NORMAL:
f8b2f202 1797 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
ee17db83 1798 break;
c0522136
RH
1799 case TEMP_CONST:
1800 switch (ts->type) {
1801 case TCG_TYPE_I32:
1802 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1803 break;
1804#if TCG_TARGET_REG_BITS > 32
1805 case TCG_TYPE_I64:
1806 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1807 break;
1808#endif
1809 case TCG_TYPE_V64:
1810 case TCG_TYPE_V128:
1811 case TCG_TYPE_V256:
1812 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1813 64 << (ts->type - TCG_TYPE_V64), ts->val);
1814 break;
1815 default:
1816 g_assert_not_reached();
1817 }
1818 break;
c896fe29
FB
1819 }
1820 return buf;
1821}
1822
43439139
RH
1823static char *tcg_get_arg_str(TCGContext *s, char *buf,
1824 int buf_size, TCGArg arg)
f8b2f202 1825{
43439139 1826 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1827}
1828
f48f3ede
BS
1829static const char * const cond_name[] =
1830{
0aed257f
RH
1831 [TCG_COND_NEVER] = "never",
1832 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1833 [TCG_COND_EQ] = "eq",
1834 [TCG_COND_NE] = "ne",
1835 [TCG_COND_LT] = "lt",
1836 [TCG_COND_GE] = "ge",
1837 [TCG_COND_LE] = "le",
1838 [TCG_COND_GT] = "gt",
1839 [TCG_COND_LTU] = "ltu",
1840 [TCG_COND_GEU] = "geu",
1841 [TCG_COND_LEU] = "leu",
1842 [TCG_COND_GTU] = "gtu"
1843};
1844
f713d6ad
RH
1845static const char * const ldst_name[] =
1846{
1847 [MO_UB] = "ub",
1848 [MO_SB] = "sb",
1849 [MO_LEUW] = "leuw",
1850 [MO_LESW] = "lesw",
1851 [MO_LEUL] = "leul",
1852 [MO_LESL] = "lesl",
fc313c64 1853 [MO_LEUQ] = "leq",
f713d6ad
RH
1854 [MO_BEUW] = "beuw",
1855 [MO_BESW] = "besw",
1856 [MO_BEUL] = "beul",
1857 [MO_BESL] = "besl",
fc313c64 1858 [MO_BEUQ] = "beq",
f713d6ad
RH
1859};
1860
1f00b27f 1861static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
52bf9771 1862#ifdef TARGET_ALIGNED_ONLY
1f00b27f
SS
1863 [MO_UNALN >> MO_ASHIFT] = "un+",
1864 [MO_ALIGN >> MO_ASHIFT] = "",
1865#else
1866 [MO_UNALN >> MO_ASHIFT] = "",
1867 [MO_ALIGN >> MO_ASHIFT] = "al+",
1868#endif
1869 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1870 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1871 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1872 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1873 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1874 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1875};
1876
587195bd
RH
1877static const char bswap_flag_name[][6] = {
1878 [TCG_BSWAP_IZ] = "iz",
1879 [TCG_BSWAP_OZ] = "oz",
1880 [TCG_BSWAP_OS] = "os",
1881 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
1882 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
1883};
1884
b016486e
RH
1885static inline bool tcg_regset_single(TCGRegSet d)
1886{
1887 return (d & (d - 1)) == 0;
1888}
1889
1890static inline TCGReg tcg_regset_first(TCGRegSet d)
1891{
1892 if (TCG_TARGET_NB_REGS <= 32) {
1893 return ctz32(d);
1894 } else {
1895 return ctz64(d);
1896 }
1897}
1898
b7a83ff8
RH
1899/* Return only the number of characters output -- no error return. */
1900#define ne_fprintf(...) \
1901 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
1902
1903static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
c896fe29 1904{
c896fe29 1905 char buf[128];
c45cb8bb 1906 TCGOp *op;
c45cb8bb 1907
15fa08f8 1908 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1909 int i, k, nb_oargs, nb_iargs, nb_cargs;
1910 const TCGOpDef *def;
c45cb8bb 1911 TCGOpcode c;
bdfb460e 1912 int col = 0;
c896fe29 1913
c45cb8bb 1914 c = op->opc;
c896fe29 1915 def = &tcg_op_defs[c];
c45cb8bb 1916
765b842a 1917 if (c == INDEX_op_insn_start) {
b016486e 1918 nb_oargs = 0;
b7a83ff8 1919 col += ne_fprintf(f, "\n ----");
9aef40ed
RH
1920
1921 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1922 target_ulong a;
7e4597d7 1923#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1924 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1925#else
efee3746 1926 a = op->args[i];
7e4597d7 1927#endif
b7a83ff8 1928 col += ne_fprintf(f, " " TARGET_FMT_lx, a);
eeacee4d 1929 }
7e4597d7 1930 } else if (c == INDEX_op_call) {
3e92aa34 1931 const TCGHelperInfo *info = tcg_call_info(op);
fa52e660 1932 void *func = tcg_call_func(op);
3e92aa34 1933
c896fe29 1934 /* variable number of arguments */
cd9090aa
RH
1935 nb_oargs = TCGOP_CALLO(op);
1936 nb_iargs = TCGOP_CALLI(op);
c896fe29 1937 nb_cargs = def->nb_cargs;
c896fe29 1938
b7a83ff8 1939 col += ne_fprintf(f, " %s ", def->name);
3e92aa34
RH
1940
1941 /*
1942 * Print the function name from TCGHelperInfo, if available.
1943 * Note that plugins have a template function for the info,
1944 * but the actual function pointer comes from the plugin.
1945 */
3e92aa34 1946 if (func == info->func) {
b7a83ff8 1947 col += ne_fprintf(f, "%s", info->name);
3e92aa34 1948 } else {
b7a83ff8 1949 col += ne_fprintf(f, "plugin(%p)", func);
3e92aa34
RH
1950 }
1951
b7a83ff8 1952 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
cf066674 1953 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1954 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1955 op->args[i]));
b03cce8e 1956 }
cf066674 1957 for (i = 0; i < nb_iargs; i++) {
efee3746 1958 TCGArg arg = op->args[nb_oargs + i];
39004a71 1959 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
b7a83ff8 1960 col += ne_fprintf(f, ",%s", t);
e8996ee0 1961 }
b03cce8e 1962 } else {
b7a83ff8 1963 col += ne_fprintf(f, " %s ", def->name);
c45cb8bb
RH
1964
1965 nb_oargs = def->nb_oargs;
1966 nb_iargs = def->nb_iargs;
1967 nb_cargs = def->nb_cargs;
1968
d2fd745f 1969 if (def->flags & TCG_OPF_VECTOR) {
b7a83ff8
RH
1970 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
1971 8 << TCGOP_VECE(op));
d2fd745f
RH
1972 }
1973
b03cce8e 1974 k = 0;
c45cb8bb 1975 for (i = 0; i < nb_oargs; i++) {
b7a83ff8
RH
1976 const char *sep = k ? "," : "";
1977 col += ne_fprintf(f, "%s%s", sep,
1978 tcg_get_arg_str(s, buf, sizeof(buf),
1979 op->args[k++]));
b03cce8e 1980 }
c45cb8bb 1981 for (i = 0; i < nb_iargs; i++) {
b7a83ff8
RH
1982 const char *sep = k ? "," : "";
1983 col += ne_fprintf(f, "%s%s", sep,
1984 tcg_get_arg_str(s, buf, sizeof(buf),
1985 op->args[k++]));
b03cce8e 1986 }
be210acb
RH
1987 switch (c) {
1988 case INDEX_op_brcond_i32:
be210acb 1989 case INDEX_op_setcond_i32:
ffc5ea09 1990 case INDEX_op_movcond_i32:
ffc5ea09 1991 case INDEX_op_brcond2_i32:
be210acb 1992 case INDEX_op_setcond2_i32:
ffc5ea09 1993 case INDEX_op_brcond_i64:
be210acb 1994 case INDEX_op_setcond_i64:
ffc5ea09 1995 case INDEX_op_movcond_i64:
212be173 1996 case INDEX_op_cmp_vec:
f75da298 1997 case INDEX_op_cmpsel_vec:
efee3746
RH
1998 if (op->args[k] < ARRAY_SIZE(cond_name)
1999 && cond_name[op->args[k]]) {
b7a83ff8 2000 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
eeacee4d 2001 } else {
b7a83ff8 2002 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 2003 }
f48f3ede 2004 i = 1;
be210acb 2005 break;
f713d6ad
RH
2006 case INDEX_op_qemu_ld_i32:
2007 case INDEX_op_qemu_st_i32:
07ce0b05 2008 case INDEX_op_qemu_st8_i32:
f713d6ad
RH
2009 case INDEX_op_qemu_ld_i64:
2010 case INDEX_op_qemu_st_i64:
59227d5d 2011 {
9002ffcb 2012 MemOpIdx oi = op->args[k++];
14776ab5 2013 MemOp op = get_memop(oi);
59227d5d
RH
2014 unsigned ix = get_mmuidx(oi);
2015
59c4b7e8 2016 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
b7a83ff8 2017 col += ne_fprintf(f, ",$0x%x,%u", op, ix);
59c4b7e8 2018 } else {
1f00b27f
SS
2019 const char *s_al, *s_op;
2020 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 2021 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
b7a83ff8 2022 col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
2023 }
2024 i = 1;
f713d6ad 2025 }
f713d6ad 2026 break;
587195bd
RH
2027 case INDEX_op_bswap16_i32:
2028 case INDEX_op_bswap16_i64:
2029 case INDEX_op_bswap32_i32:
2030 case INDEX_op_bswap32_i64:
2031 case INDEX_op_bswap64_i64:
2032 {
2033 TCGArg flags = op->args[k];
2034 const char *name = NULL;
2035
2036 if (flags < ARRAY_SIZE(bswap_flag_name)) {
2037 name = bswap_flag_name[flags];
2038 }
2039 if (name) {
b7a83ff8 2040 col += ne_fprintf(f, ",%s", name);
587195bd 2041 } else {
b7a83ff8 2042 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
587195bd
RH
2043 }
2044 i = k = 1;
2045 }
2046 break;
be210acb 2047 default:
f48f3ede 2048 i = 0;
be210acb
RH
2049 break;
2050 }
51e3972c
RH
2051 switch (c) {
2052 case INDEX_op_set_label:
2053 case INDEX_op_br:
2054 case INDEX_op_brcond_i32:
2055 case INDEX_op_brcond_i64:
2056 case INDEX_op_brcond2_i32:
b7a83ff8
RH
2057 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2058 arg_label(op->args[k])->id);
51e3972c
RH
2059 i++, k++;
2060 break;
2061 default:
2062 break;
2063 }
2064 for (; i < nb_cargs; i++, k++) {
b7a83ff8
RH
2065 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2066 op->args[k]);
bdfb460e
RH
2067 }
2068 }
bdfb460e 2069
1894f69a 2070 if (have_prefs || op->life) {
b7a83ff8
RH
2071 for (; col < 40; ++col) {
2072 putc(' ', f);
bdfb460e 2073 }
1894f69a
RH
2074 }
2075
2076 if (op->life) {
2077 unsigned life = op->life;
bdfb460e
RH
2078
2079 if (life & (SYNC_ARG * 3)) {
b7a83ff8 2080 ne_fprintf(f, " sync:");
bdfb460e
RH
2081 for (i = 0; i < 2; ++i) {
2082 if (life & (SYNC_ARG << i)) {
b7a83ff8 2083 ne_fprintf(f, " %d", i);
bdfb460e
RH
2084 }
2085 }
2086 }
2087 life /= DEAD_ARG;
2088 if (life) {
b7a83ff8 2089 ne_fprintf(f, " dead:");
bdfb460e
RH
2090 for (i = 0; life; ++i, life >>= 1) {
2091 if (life & 1) {
b7a83ff8 2092 ne_fprintf(f, " %d", i);
bdfb460e
RH
2093 }
2094 }
b03cce8e 2095 }
c896fe29 2096 }
1894f69a
RH
2097
2098 if (have_prefs) {
2099 for (i = 0; i < nb_oargs; ++i) {
31fd884b 2100 TCGRegSet set = output_pref(op, i);
1894f69a
RH
2101
2102 if (i == 0) {
b7a83ff8 2103 ne_fprintf(f, " pref=");
1894f69a 2104 } else {
b7a83ff8 2105 ne_fprintf(f, ",");
1894f69a
RH
2106 }
2107 if (set == 0) {
b7a83ff8 2108 ne_fprintf(f, "none");
1894f69a 2109 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
b7a83ff8 2110 ne_fprintf(f, "all");
1894f69a
RH
2111#ifdef CONFIG_DEBUG_TCG
2112 } else if (tcg_regset_single(set)) {
2113 TCGReg reg = tcg_regset_first(set);
b7a83ff8 2114 ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
1894f69a
RH
2115#endif
2116 } else if (TCG_TARGET_NB_REGS <= 32) {
b7a83ff8 2117 ne_fprintf(f, "0x%x", (uint32_t)set);
1894f69a 2118 } else {
b7a83ff8 2119 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
1894f69a
RH
2120 }
2121 }
2122 }
2123
b7a83ff8 2124 putc('\n', f);
c896fe29
FB
2125 }
2126}
2127
2128/* we give more priority to constraints with less registers */
2129static int get_constraint_priority(const TCGOpDef *def, int k)
2130{
74a11790 2131 const TCGArgConstraint *arg_ct = &def->args_ct[k];
29f5e925 2132 int n = ctpop64(arg_ct->regs);
c896fe29 2133
29f5e925
RH
2134 /*
2135 * Sort constraints of a single register first, which includes output
2136 * aliases (which must exactly match the input already allocated).
2137 */
2138 if (n == 1 || arg_ct->oalias) {
2139 return INT_MAX;
2140 }
2141
2142 /*
2143 * Sort register pairs next, first then second immediately after.
2144 * Arbitrarily sort multiple pairs by the index of the first reg;
2145 * there shouldn't be many pairs.
2146 */
2147 switch (arg_ct->pair) {
2148 case 1:
2149 case 3:
2150 return (k + 1) * 2;
2151 case 2:
2152 return (arg_ct->pair_index + 1) * 2 - 1;
c896fe29 2153 }
29f5e925
RH
2154
2155 /* Finally, sort by decreasing register count. */
2156 assert(n > 1);
2157 return -n;
c896fe29
FB
2158}
2159
2160/* sort from highest priority to lowest */
2161static void sort_constraints(TCGOpDef *def, int start, int n)
2162{
66792f90
RH
2163 int i, j;
2164 TCGArgConstraint *a = def->args_ct;
c896fe29 2165
66792f90
RH
2166 for (i = 0; i < n; i++) {
2167 a[start + i].sort_index = start + i;
2168 }
2169 if (n <= 1) {
c896fe29 2170 return;
66792f90
RH
2171 }
2172 for (i = 0; i < n - 1; i++) {
2173 for (j = i + 1; j < n; j++) {
2174 int p1 = get_constraint_priority(def, a[start + i].sort_index);
2175 int p2 = get_constraint_priority(def, a[start + j].sort_index);
c896fe29 2176 if (p1 < p2) {
66792f90
RH
2177 int tmp = a[start + i].sort_index;
2178 a[start + i].sort_index = a[start + j].sort_index;
2179 a[start + j].sort_index = tmp;
c896fe29
FB
2180 }
2181 }
2182 }
2183}
2184
f69d277e 2185static void process_op_defs(TCGContext *s)
c896fe29 2186{
a9751609 2187 TCGOpcode op;
c896fe29 2188
f69d277e
RH
2189 for (op = 0; op < NB_OPS; op++) {
2190 TCGOpDef *def = &tcg_op_defs[op];
2191 const TCGTargetOpDef *tdefs;
29f5e925
RH
2192 bool saw_alias_pair = false;
2193 int i, o, i2, o2, nb_args;
f69d277e
RH
2194
2195 if (def->flags & TCG_OPF_NOT_PRESENT) {
2196 continue;
2197 }
2198
c896fe29 2199 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
2200 if (nb_args == 0) {
2201 continue;
2202 }
2203
4c22e840
RH
2204 /*
2205 * Macro magic should make it impossible, but double-check that
2206 * the array index is in range. Since the signness of an enum
2207 * is implementation defined, force the result to unsigned.
2208 */
2209 unsigned con_set = tcg_target_op_def(op);
2210 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2211 tdefs = &constraint_sets[con_set];
f69d277e
RH
2212
2213 for (i = 0; i < nb_args; i++) {
2214 const char *ct_str = tdefs->args_ct_str[i];
8940ea0d
PMD
2215 bool input_p = i >= def->nb_oargs;
2216
f69d277e 2217 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 2218 tcg_debug_assert(ct_str != NULL);
f69d277e 2219
8940ea0d
PMD
2220 switch (*ct_str) {
2221 case '0' ... '9':
2222 o = *ct_str - '0';
2223 tcg_debug_assert(input_p);
2224 tcg_debug_assert(o < def->nb_oargs);
2225 tcg_debug_assert(def->args_ct[o].regs != 0);
2226 tcg_debug_assert(!def->args_ct[o].oalias);
2227 def->args_ct[i] = def->args_ct[o];
2228 /* The output sets oalias. */
2229 def->args_ct[o].oalias = 1;
2230 def->args_ct[o].alias_index = i;
2231 /* The input sets ialias. */
2232 def->args_ct[i].ialias = 1;
2233 def->args_ct[i].alias_index = o;
29f5e925
RH
2234 if (def->args_ct[i].pair) {
2235 saw_alias_pair = true;
2236 }
8940ea0d
PMD
2237 tcg_debug_assert(ct_str[1] == '\0');
2238 continue;
2239
2240 case '&':
2241 tcg_debug_assert(!input_p);
2242 def->args_ct[i].newreg = true;
2243 ct_str++;
2244 break;
29f5e925
RH
2245
2246 case 'p': /* plus */
2247 /* Allocate to the register after the previous. */
2248 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2249 o = i - 1;
2250 tcg_debug_assert(!def->args_ct[o].pair);
2251 tcg_debug_assert(!def->args_ct[o].ct);
2252 def->args_ct[i] = (TCGArgConstraint){
2253 .pair = 2,
2254 .pair_index = o,
2255 .regs = def->args_ct[o].regs << 1,
2256 };
2257 def->args_ct[o].pair = 1;
2258 def->args_ct[o].pair_index = i;
2259 tcg_debug_assert(ct_str[1] == '\0');
2260 continue;
2261
2262 case 'm': /* minus */
2263 /* Allocate to the register before the previous. */
2264 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2265 o = i - 1;
2266 tcg_debug_assert(!def->args_ct[o].pair);
2267 tcg_debug_assert(!def->args_ct[o].ct);
2268 def->args_ct[i] = (TCGArgConstraint){
2269 .pair = 1,
2270 .pair_index = o,
2271 .regs = def->args_ct[o].regs >> 1,
2272 };
2273 def->args_ct[o].pair = 2;
2274 def->args_ct[o].pair_index = i;
2275 tcg_debug_assert(ct_str[1] == '\0');
2276 continue;
8940ea0d
PMD
2277 }
2278
2279 do {
2280 switch (*ct_str) {
17280ff4
RH
2281 case 'i':
2282 def->args_ct[i].ct |= TCG_CT_CONST;
17280ff4 2283 break;
358b4923 2284
358b4923
RH
2285 /* Include all of the target-specific constraints. */
2286
2287#undef CONST
2288#define CONST(CASE, MASK) \
8940ea0d 2289 case CASE: def->args_ct[i].ct |= MASK; break;
358b4923 2290#define REGS(CASE, MASK) \
8940ea0d 2291 case CASE: def->args_ct[i].regs |= MASK; break;
358b4923
RH
2292
2293#include "tcg-target-con-str.h"
2294
2295#undef REGS
2296#undef CONST
17280ff4 2297 default:
8940ea0d
PMD
2298 case '0' ... '9':
2299 case '&':
29f5e925
RH
2300 case 'p':
2301 case 'm':
17280ff4 2302 /* Typo in TCGTargetOpDef constraint. */
358b4923 2303 g_assert_not_reached();
c896fe29 2304 }
8940ea0d 2305 } while (*++ct_str != '\0');
c896fe29
FB
2306 }
2307
c68aaa18 2308 /* TCGTargetOpDef entry with too much information? */
eabb7b91 2309 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 2310
29f5e925
RH
2311 /*
2312 * Fix up output pairs that are aliased with inputs.
2313 * When we created the alias, we copied pair from the output.
2314 * There are three cases:
2315 * (1a) Pairs of inputs alias pairs of outputs.
2316 * (1b) One input aliases the first of a pair of outputs.
2317 * (2) One input aliases the second of a pair of outputs.
2318 *
2319 * Case 1a is handled by making sure that the pair_index'es are
2320 * properly updated so that they appear the same as a pair of inputs.
2321 *
2322 * Case 1b is handled by setting the pair_index of the input to
2323 * itself, simply so it doesn't point to an unrelated argument.
2324 * Since we don't encounter the "second" during the input allocation
2325 * phase, nothing happens with the second half of the input pair.
2326 *
2327 * Case 2 is handled by setting the second input to pair=3, the
2328 * first output to pair=3, and the pair_index'es to match.
2329 */
2330 if (saw_alias_pair) {
2331 for (i = def->nb_oargs; i < nb_args; i++) {
2332 /*
2333 * Since [0-9pm] must be alone in the constraint string,
2334 * the only way they can both be set is if the pair comes
2335 * from the output alias.
2336 */
2337 if (!def->args_ct[i].ialias) {
2338 continue;
2339 }
2340 switch (def->args_ct[i].pair) {
2341 case 0:
2342 break;
2343 case 1:
2344 o = def->args_ct[i].alias_index;
2345 o2 = def->args_ct[o].pair_index;
2346 tcg_debug_assert(def->args_ct[o].pair == 1);
2347 tcg_debug_assert(def->args_ct[o2].pair == 2);
2348 if (def->args_ct[o2].oalias) {
2349 /* Case 1a */
2350 i2 = def->args_ct[o2].alias_index;
2351 tcg_debug_assert(def->args_ct[i2].pair == 2);
2352 def->args_ct[i2].pair_index = i;
2353 def->args_ct[i].pair_index = i2;
2354 } else {
2355 /* Case 1b */
2356 def->args_ct[i].pair_index = i;
2357 }
2358 break;
2359 case 2:
2360 o = def->args_ct[i].alias_index;
2361 o2 = def->args_ct[o].pair_index;
2362 tcg_debug_assert(def->args_ct[o].pair == 2);
2363 tcg_debug_assert(def->args_ct[o2].pair == 1);
2364 if (def->args_ct[o2].oalias) {
2365 /* Case 1a */
2366 i2 = def->args_ct[o2].alias_index;
2367 tcg_debug_assert(def->args_ct[i2].pair == 1);
2368 def->args_ct[i2].pair_index = i;
2369 def->args_ct[i].pair_index = i2;
2370 } else {
2371 /* Case 2 */
2372 def->args_ct[i].pair = 3;
2373 def->args_ct[o2].pair = 3;
2374 def->args_ct[i].pair_index = o2;
2375 def->args_ct[o2].pair_index = i;
2376 }
2377 break;
2378 default:
2379 g_assert_not_reached();
2380 }
2381 }
2382 }
2383
c896fe29
FB
2384 /* sort the constraints (XXX: this is just an heuristic) */
2385 sort_constraints(def, 0, def->nb_oargs);
2386 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 2387 }
c896fe29
FB
2388}
2389
0c627cdc
RH
2390void tcg_op_remove(TCGContext *s, TCGOp *op)
2391{
d88a117e
RH
2392 TCGLabel *label;
2393
2394 switch (op->opc) {
2395 case INDEX_op_br:
2396 label = arg_label(op->args[0]);
2397 label->refs--;
2398 break;
2399 case INDEX_op_brcond_i32:
2400 case INDEX_op_brcond_i64:
2401 label = arg_label(op->args[3]);
2402 label->refs--;
2403 break;
2404 case INDEX_op_brcond2_i32:
2405 label = arg_label(op->args[5]);
2406 label->refs--;
2407 break;
2408 default:
2409 break;
2410 }
2411
15fa08f8
RH
2412 QTAILQ_REMOVE(&s->ops, op, link);
2413 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
abebf925 2414 s->nb_ops--;
0c627cdc
RH
2415
2416#ifdef CONFIG_PROFILER
d73415a3 2417 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
2418#endif
2419}
2420
a80cdd31
RH
2421void tcg_remove_ops_after(TCGOp *op)
2422{
2423 TCGContext *s = tcg_ctx;
2424
2425 while (true) {
2426 TCGOp *last = tcg_last_op();
2427 if (last == op) {
2428 return;
2429 }
2430 tcg_op_remove(s, last);
2431 }
2432}
2433
d4478943 2434static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
5a18407f 2435{
15fa08f8 2436 TCGContext *s = tcg_ctx;
cb10bc63
RH
2437 TCGOp *op = NULL;
2438
2439 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
2440 QTAILQ_FOREACH(op, &s->free_ops, link) {
2441 if (nargs <= op->nargs) {
2442 QTAILQ_REMOVE(&s->free_ops, op, link);
2443 nargs = op->nargs;
2444 goto found;
2445 }
2446 }
15fa08f8 2447 }
cb10bc63
RH
2448
2449 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
2450 nargs = MAX(4, nargs);
2451 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
2452
2453 found:
15fa08f8
RH
2454 memset(op, 0, offsetof(TCGOp, link));
2455 op->opc = opc;
cb10bc63
RH
2456 op->nargs = nargs;
2457
2458 /* Check for bitfield overflow. */
2459 tcg_debug_assert(op->nargs == nargs);
5a18407f 2460
cb10bc63 2461 s->nb_ops++;
15fa08f8
RH
2462 return op;
2463}
2464
d4478943 2465TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
15fa08f8 2466{
d4478943 2467 TCGOp *op = tcg_op_alloc(opc, nargs);
15fa08f8
RH
2468 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2469 return op;
2470}
5a18407f 2471
d4478943
PMD
2472TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2473 TCGOpcode opc, unsigned nargs)
15fa08f8 2474{
d4478943 2475 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2476 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2477 return new_op;
2478}
2479
d4478943
PMD
2480TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2481 TCGOpcode opc, unsigned nargs)
5a18407f 2482{
d4478943 2483 TCGOp *new_op = tcg_op_alloc(opc, nargs);
15fa08f8 2484 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2485 return new_op;
2486}
2487
b4fc67c7
RH
2488/* Reachable analysis : remove unreachable code. */
2489static void reachable_code_pass(TCGContext *s)
2490{
2491 TCGOp *op, *op_next;
2492 bool dead = false;
2493
2494 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2495 bool remove = dead;
2496 TCGLabel *label;
b4fc67c7
RH
2497
2498 switch (op->opc) {
2499 case INDEX_op_set_label:
2500 label = arg_label(op->args[0]);
2501 if (label->refs == 0) {
2502 /*
2503 * While there is an occasional backward branch, virtually
2504 * all branches generated by the translators are forward.
2505 * Which means that generally we will have already removed
2506 * all references to the label that will be, and there is
2507 * little to be gained by iterating.
2508 */
2509 remove = true;
2510 } else {
2511 /* Once we see a label, insns become live again. */
2512 dead = false;
2513 remove = false;
2514
2515 /*
2516 * Optimization can fold conditional branches to unconditional.
2517 * If we find a label with one reference which is preceded by
2518 * an unconditional branch to it, remove both. This needed to
2519 * wait until the dead code in between them was removed.
2520 */
2521 if (label->refs == 1) {
eae3eb3e 2522 TCGOp *op_prev = QTAILQ_PREV(op, link);
b4fc67c7
RH
2523 if (op_prev->opc == INDEX_op_br &&
2524 label == arg_label(op_prev->args[0])) {
2525 tcg_op_remove(s, op_prev);
2526 remove = true;
2527 }
2528 }
2529 }
2530 break;
2531
2532 case INDEX_op_br:
2533 case INDEX_op_exit_tb:
2534 case INDEX_op_goto_ptr:
2535 /* Unconditional branches; everything following is dead. */
2536 dead = true;
2537 break;
2538
2539 case INDEX_op_call:
2540 /* Notice noreturn helper calls, raising exceptions. */
90163900 2541 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
b4fc67c7
RH
2542 dead = true;
2543 }
2544 break;
2545
2546 case INDEX_op_insn_start:
2547 /* Never remove -- we need to keep these for unwind. */
2548 remove = false;
2549 break;
2550
2551 default:
2552 break;
2553 }
2554
2555 if (remove) {
2556 tcg_op_remove(s, op);
2557 }
2558 }
2559}
2560
c70fbf0a
RH
2561#define TS_DEAD 1
2562#define TS_MEM 2
2563
5a18407f
RH
2564#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2565#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2566
25f49c5f
RH
2567/* For liveness_pass_1, the register preferences for a given temp. */
2568static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2569{
2570 return ts->state_ptr;
2571}
2572
2573/* For liveness_pass_1, reset the preferences for a given temp to the
2574 * maximal regset for its type.
2575 */
2576static inline void la_reset_pref(TCGTemp *ts)
2577{
2578 *la_temp_pref(ts)
2579 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2580}
2581
9c43b68d
AJ
2582/* liveness analysis: end of function: all temps are dead, and globals
2583 should be in memory. */
2616c808 2584static void la_func_end(TCGContext *s, int ng, int nt)
c896fe29 2585{
b83eabea
RH
2586 int i;
2587
2588 for (i = 0; i < ng; ++i) {
2589 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f 2590 la_reset_pref(&s->temps[i]);
b83eabea
RH
2591 }
2592 for (i = ng; i < nt; ++i) {
2593 s->temps[i].state = TS_DEAD;
25f49c5f 2594 la_reset_pref(&s->temps[i]);
b83eabea 2595 }
c896fe29
FB
2596}
2597
9c43b68d
AJ
2598/* liveness analysis: end of basic block: all temps are dead, globals
2599 and local temps should be in memory. */
2616c808 2600static void la_bb_end(TCGContext *s, int ng, int nt)
641d5fbe 2601{
b83eabea 2602 int i;
641d5fbe 2603
ee17db83
RH
2604 for (i = 0; i < nt; ++i) {
2605 TCGTemp *ts = &s->temps[i];
2606 int state;
2607
2608 switch (ts->kind) {
2609 case TEMP_FIXED:
2610 case TEMP_GLOBAL:
2611 case TEMP_LOCAL:
2612 state = TS_DEAD | TS_MEM;
2613 break;
2614 case TEMP_NORMAL:
c7482438 2615 case TEMP_EBB:
c0522136 2616 case TEMP_CONST:
ee17db83
RH
2617 state = TS_DEAD;
2618 break;
2619 default:
2620 g_assert_not_reached();
2621 }
2622 ts->state = state;
2623 la_reset_pref(ts);
641d5fbe
FB
2624 }
2625}
2626
f65a061c
RH
2627/* liveness analysis: sync globals back to memory. */
2628static void la_global_sync(TCGContext *s, int ng)
2629{
2630 int i;
2631
2632 for (i = 0; i < ng; ++i) {
25f49c5f
RH
2633 int state = s->temps[i].state;
2634 s->temps[i].state = state | TS_MEM;
2635 if (state == TS_DEAD) {
2636 /* If the global was previously dead, reset prefs. */
2637 la_reset_pref(&s->temps[i]);
2638 }
f65a061c
RH
2639 }
2640}
2641
b4cb76e6 2642/*
c7482438
RH
2643 * liveness analysis: conditional branch: all temps are dead unless
2644 * explicitly live-across-conditional-branch, globals and local temps
2645 * should be synced.
b4cb76e6
RH
2646 */
2647static void la_bb_sync(TCGContext *s, int ng, int nt)
2648{
2649 la_global_sync(s, ng);
2650
2651 for (int i = ng; i < nt; ++i) {
c0522136
RH
2652 TCGTemp *ts = &s->temps[i];
2653 int state;
2654
2655 switch (ts->kind) {
2656 case TEMP_LOCAL:
2657 state = ts->state;
2658 ts->state = state | TS_MEM;
b4cb76e6
RH
2659 if (state != TS_DEAD) {
2660 continue;
2661 }
c0522136
RH
2662 break;
2663 case TEMP_NORMAL:
b4cb76e6 2664 s->temps[i].state = TS_DEAD;
c0522136 2665 break;
c7482438 2666 case TEMP_EBB:
c0522136
RH
2667 case TEMP_CONST:
2668 continue;
2669 default:
2670 g_assert_not_reached();
b4cb76e6
RH
2671 }
2672 la_reset_pref(&s->temps[i]);
2673 }
2674}
2675
f65a061c
RH
2676/* liveness analysis: sync globals back to memory and kill. */
2677static void la_global_kill(TCGContext *s, int ng)
2678{
2679 int i;
2680
2681 for (i = 0; i < ng; i++) {
2682 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f
RH
2683 la_reset_pref(&s->temps[i]);
2684 }
2685}
2686
2687/* liveness analysis: note live globals crossing calls. */
2688static void la_cross_call(TCGContext *s, int nt)
2689{
2690 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2691 int i;
2692
2693 for (i = 0; i < nt; i++) {
2694 TCGTemp *ts = &s->temps[i];
2695 if (!(ts->state & TS_DEAD)) {
2696 TCGRegSet *pset = la_temp_pref(ts);
2697 TCGRegSet set = *pset;
2698
2699 set &= mask;
2700 /* If the combination is not possible, restart. */
2701 if (set == 0) {
2702 set = tcg_target_available_regs[ts->type] & mask;
2703 }
2704 *pset = set;
2705 }
f65a061c
RH
2706 }
2707}
2708
a1b3c48d 2709/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2710 given input arguments is dead. Instructions updating dead
2711 temporaries are removed. */
b83eabea 2712static void liveness_pass_1(TCGContext *s)
c896fe29 2713{
c70fbf0a 2714 int nb_globals = s->nb_globals;
2616c808 2715 int nb_temps = s->nb_temps;
15fa08f8 2716 TCGOp *op, *op_prev;
25f49c5f
RH
2717 TCGRegSet *prefs;
2718 int i;
2719
2720 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2721 for (i = 0; i < nb_temps; ++i) {
2722 s->temps[i].state_ptr = prefs + i;
2723 }
a1b3c48d 2724
ae36a246 2725 /* ??? Should be redundant with the exit_tb that ends the TB. */
2616c808 2726 la_func_end(s, nb_globals, nb_temps);
c896fe29 2727
eae3eb3e 2728 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
25f49c5f 2729 int nb_iargs, nb_oargs;
c45cb8bb
RH
2730 TCGOpcode opc_new, opc_new2;
2731 bool have_opc_new2;
a1b3c48d 2732 TCGLifeData arg_life = 0;
25f49c5f 2733 TCGTemp *ts;
c45cb8bb
RH
2734 TCGOpcode opc = op->opc;
2735 const TCGOpDef *def = &tcg_op_defs[opc];
2736
c45cb8bb 2737 switch (opc) {
c896fe29 2738 case INDEX_op_call:
c6e113f5 2739 {
39004a71
RH
2740 const TCGHelperInfo *info = tcg_call_info(op);
2741 int call_flags = tcg_call_flags(op);
c896fe29 2742
cd9090aa
RH
2743 nb_oargs = TCGOP_CALLO(op);
2744 nb_iargs = TCGOP_CALLI(op);
c6e113f5 2745
c45cb8bb 2746 /* pure functions can be removed if their result is unused */
78505279 2747 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2748 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2749 ts = arg_temp(op->args[i]);
2750 if (ts->state != TS_DEAD) {
c6e113f5 2751 goto do_not_remove_call;
9c43b68d 2752 }
c6e113f5 2753 }
c45cb8bb 2754 goto do_remove;
152c35aa
RH
2755 }
2756 do_not_remove_call:
c896fe29 2757
25f49c5f 2758 /* Output args are dead. */
152c35aa 2759 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2760 ts = arg_temp(op->args[i]);
2761 if (ts->state & TS_DEAD) {
152c35aa
RH
2762 arg_life |= DEAD_ARG << i;
2763 }
25f49c5f 2764 if (ts->state & TS_MEM) {
152c35aa 2765 arg_life |= SYNC_ARG << i;
c6e113f5 2766 }
25f49c5f
RH
2767 ts->state = TS_DEAD;
2768 la_reset_pref(ts);
152c35aa 2769 }
78505279 2770
31fd884b
RH
2771 /* Not used -- it will be tcg_target_call_oarg_reg(). */
2772 memset(op->output_pref, 0, sizeof(op->output_pref));
2773
152c35aa
RH
2774 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2775 TCG_CALL_NO_READ_GLOBALS))) {
f65a061c 2776 la_global_kill(s, nb_globals);
152c35aa 2777 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
f65a061c 2778 la_global_sync(s, nb_globals);
152c35aa 2779 }
b9c18f56 2780
25f49c5f 2781 /* Record arguments that die in this helper. */
152c35aa 2782 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
25f49c5f 2783 ts = arg_temp(op->args[i]);
39004a71 2784 if (ts->state & TS_DEAD) {
152c35aa 2785 arg_life |= DEAD_ARG << i;
c6e113f5 2786 }
152c35aa 2787 }
25f49c5f
RH
2788
2789 /* For all live registers, remove call-clobbered prefs. */
2790 la_cross_call(s, nb_temps);
2791
39004a71
RH
2792 /*
2793 * Input arguments are live for preceding opcodes.
2794 *
2795 * For those arguments that die, and will be allocated in
2796 * registers, clear the register set for that arg, to be
2797 * filled in below. For args that will be on the stack,
2798 * reset to any available reg. Process arguments in reverse
2799 * order so that if a temp is used more than once, the stack
2800 * reset to max happens before the register reset to 0.
2801 */
2802 for (i = nb_iargs - 1; i >= 0; i--) {
2803 const TCGCallArgumentLoc *loc = &info->in[i];
2804 ts = arg_temp(op->args[nb_oargs + i]);
25f49c5f 2805
39004a71
RH
2806 if (ts->state & TS_DEAD) {
2807 switch (loc->kind) {
2808 case TCG_CALL_ARG_NORMAL:
2809 case TCG_CALL_ARG_EXTEND_U:
2810 case TCG_CALL_ARG_EXTEND_S:
2811 if (REG_P(loc)) {
2812 *la_temp_pref(ts) = 0;
2813 break;
2814 }
2815 /* fall through */
2816 default:
2817 *la_temp_pref(ts) =
2818 tcg_target_available_regs[ts->type];
2819 break;
2820 }
25f49c5f
RH
2821 ts->state &= ~TS_DEAD;
2822 }
2823 }
2824
39004a71
RH
2825 /*
2826 * For each input argument, add its input register to prefs.
2827 * If a temp is used once, this produces a single set bit;
2828 * if a temp is used multiple times, this produces a set.
2829 */
2830 for (i = 0; i < nb_iargs; i++) {
2831 const TCGCallArgumentLoc *loc = &info->in[i];
2832 ts = arg_temp(op->args[nb_oargs + i]);
2833
2834 switch (loc->kind) {
2835 case TCG_CALL_ARG_NORMAL:
2836 case TCG_CALL_ARG_EXTEND_U:
2837 case TCG_CALL_ARG_EXTEND_S:
2838 if (REG_P(loc)) {
2839 tcg_regset_set_reg(*la_temp_pref(ts),
2840 tcg_target_call_iarg_regs[loc->arg_slot]);
2841 }
2842 break;
2843 default:
2844 break;
c19f47bf 2845 }
c896fe29 2846 }
c896fe29 2847 }
c896fe29 2848 break;
765b842a 2849 case INDEX_op_insn_start:
c896fe29 2850 break;
5ff9d6a4 2851 case INDEX_op_discard:
5ff9d6a4 2852 /* mark the temporary as dead */
25f49c5f
RH
2853 ts = arg_temp(op->args[0]);
2854 ts->state = TS_DEAD;
2855 la_reset_pref(ts);
5ff9d6a4 2856 break;
1305c451
RH
2857
2858 case INDEX_op_add2_i32:
c45cb8bb 2859 opc_new = INDEX_op_add_i32;
f1fae40c 2860 goto do_addsub2;
1305c451 2861 case INDEX_op_sub2_i32:
c45cb8bb 2862 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2863 goto do_addsub2;
2864 case INDEX_op_add2_i64:
c45cb8bb 2865 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2866 goto do_addsub2;
2867 case INDEX_op_sub2_i64:
c45cb8bb 2868 opc_new = INDEX_op_sub_i64;
f1fae40c 2869 do_addsub2:
1305c451
RH
2870 nb_iargs = 4;
2871 nb_oargs = 2;
2872 /* Test if the high part of the operation is dead, but not
2873 the low part. The result can be optimized to a simple
2874 add or sub. This happens often for x86_64 guest when the
2875 cpu mode is set to 32 bit. */
b83eabea
RH
2876 if (arg_temp(op->args[1])->state == TS_DEAD) {
2877 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2878 goto do_remove;
2879 }
c45cb8bb
RH
2880 /* Replace the opcode and adjust the args in place,
2881 leaving 3 unused args at the end. */
2882 op->opc = opc = opc_new;
efee3746
RH
2883 op->args[1] = op->args[2];
2884 op->args[2] = op->args[4];
1305c451
RH
2885 /* Fall through and mark the single-word operation live. */
2886 nb_iargs = 2;
2887 nb_oargs = 1;
2888 }
2889 goto do_not_remove;
2890
1414968a 2891 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2892 opc_new = INDEX_op_mul_i32;
2893 opc_new2 = INDEX_op_muluh_i32;
2894 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2895 goto do_mul2;
f1fae40c 2896 case INDEX_op_muls2_i32:
c45cb8bb
RH
2897 opc_new = INDEX_op_mul_i32;
2898 opc_new2 = INDEX_op_mulsh_i32;
2899 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2900 goto do_mul2;
2901 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2902 opc_new = INDEX_op_mul_i64;
2903 opc_new2 = INDEX_op_muluh_i64;
2904 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2905 goto do_mul2;
f1fae40c 2906 case INDEX_op_muls2_i64:
c45cb8bb
RH
2907 opc_new = INDEX_op_mul_i64;
2908 opc_new2 = INDEX_op_mulsh_i64;
2909 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2910 goto do_mul2;
f1fae40c 2911 do_mul2:
1414968a
RH
2912 nb_iargs = 2;
2913 nb_oargs = 2;
b83eabea
RH
2914 if (arg_temp(op->args[1])->state == TS_DEAD) {
2915 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2916 /* Both parts of the operation are dead. */
1414968a
RH
2917 goto do_remove;
2918 }
03271524 2919 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2920 op->opc = opc = opc_new;
efee3746
RH
2921 op->args[1] = op->args[2];
2922 op->args[2] = op->args[3];
b83eabea 2923 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2924 /* The low part of the operation is dead; generate the high. */
2925 op->opc = opc = opc_new2;
efee3746
RH
2926 op->args[0] = op->args[1];
2927 op->args[1] = op->args[2];
2928 op->args[2] = op->args[3];
03271524
RH
2929 } else {
2930 goto do_not_remove;
1414968a 2931 }
03271524
RH
2932 /* Mark the single-word operation live. */
2933 nb_oargs = 1;
1414968a
RH
2934 goto do_not_remove;
2935
c896fe29 2936 default:
1305c451 2937 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2938 nb_iargs = def->nb_iargs;
2939 nb_oargs = def->nb_oargs;
c896fe29 2940
49516bc0
AJ
2941 /* Test if the operation can be removed because all
2942 its outputs are dead. We assume that nb_oargs == 0
2943 implies side effects */
2944 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2945 for (i = 0; i < nb_oargs; i++) {
b83eabea 2946 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2947 goto do_not_remove;
9c43b68d 2948 }
49516bc0 2949 }
152c35aa
RH
2950 goto do_remove;
2951 }
2952 goto do_not_remove;
49516bc0 2953
152c35aa
RH
2954 do_remove:
2955 tcg_op_remove(s, op);
2956 break;
2957
2958 do_not_remove:
152c35aa 2959 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2960 ts = arg_temp(op->args[i]);
2961
2962 /* Remember the preference of the uses that followed. */
31fd884b
RH
2963 if (i < ARRAY_SIZE(op->output_pref)) {
2964 op->output_pref[i] = *la_temp_pref(ts);
2965 }
25f49c5f
RH
2966
2967 /* Output args are dead. */
2968 if (ts->state & TS_DEAD) {
152c35aa 2969 arg_life |= DEAD_ARG << i;
49516bc0 2970 }
25f49c5f 2971 if (ts->state & TS_MEM) {
152c35aa
RH
2972 arg_life |= SYNC_ARG << i;
2973 }
25f49c5f
RH
2974 ts->state = TS_DEAD;
2975 la_reset_pref(ts);
152c35aa 2976 }
49516bc0 2977
25f49c5f 2978 /* If end of basic block, update. */
ae36a246
RH
2979 if (def->flags & TCG_OPF_BB_EXIT) {
2980 la_func_end(s, nb_globals, nb_temps);
b4cb76e6
RH
2981 } else if (def->flags & TCG_OPF_COND_BRANCH) {
2982 la_bb_sync(s, nb_globals, nb_temps);
ae36a246 2983 } else if (def->flags & TCG_OPF_BB_END) {
2616c808 2984 la_bb_end(s, nb_globals, nb_temps);
152c35aa 2985 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
f65a061c 2986 la_global_sync(s, nb_globals);
25f49c5f
RH
2987 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2988 la_cross_call(s, nb_temps);
2989 }
152c35aa
RH
2990 }
2991
25f49c5f 2992 /* Record arguments that die in this opcode. */
152c35aa 2993 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2994 ts = arg_temp(op->args[i]);
2995 if (ts->state & TS_DEAD) {
152c35aa 2996 arg_life |= DEAD_ARG << i;
c896fe29 2997 }
c896fe29 2998 }
25f49c5f
RH
2999
3000 /* Input arguments are live for preceding opcodes. */
152c35aa 3001 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
3002 ts = arg_temp(op->args[i]);
3003 if (ts->state & TS_DEAD) {
3004 /* For operands that were dead, initially allow
3005 all regs for the type. */
3006 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
3007 ts->state &= ~TS_DEAD;
3008 }
3009 }
3010
3011 /* Incorporate constraints for this operand. */
3012 switch (opc) {
3013 case INDEX_op_mov_i32:
3014 case INDEX_op_mov_i64:
3015 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3016 have proper constraints. That said, special case
3017 moves to propagate preferences backward. */
3018 if (IS_DEAD_ARG(1)) {
3019 *la_temp_pref(arg_temp(op->args[0]))
3020 = *la_temp_pref(arg_temp(op->args[1]));
3021 }
3022 break;
3023
3024 default:
3025 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3026 const TCGArgConstraint *ct = &def->args_ct[i];
3027 TCGRegSet set, *pset;
3028
3029 ts = arg_temp(op->args[i]);
3030 pset = la_temp_pref(ts);
3031 set = *pset;
3032
9be0d080 3033 set &= ct->regs;
bc2b17e6 3034 if (ct->ialias) {
31fd884b 3035 set &= output_pref(op, ct->alias_index);
25f49c5f
RH
3036 }
3037 /* If the combination is not possible, restart. */
3038 if (set == 0) {
9be0d080 3039 set = ct->regs;
25f49c5f
RH
3040 }
3041 *pset = set;
3042 }
3043 break;
152c35aa 3044 }
c896fe29
FB
3045 break;
3046 }
bee158cb 3047 op->life = arg_life;
1ff0a2c5 3048 }
c896fe29 3049}
c896fe29 3050
5a18407f 3051/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 3052static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
3053{
3054 int nb_globals = s->nb_globals;
15fa08f8 3055 int nb_temps, i;
5a18407f 3056 bool changes = false;
15fa08f8 3057 TCGOp *op, *op_next;
5a18407f 3058
5a18407f
RH
3059 /* Create a temporary for each indirect global. */
3060 for (i = 0; i < nb_globals; ++i) {
3061 TCGTemp *its = &s->temps[i];
3062 if (its->indirect_reg) {
3063 TCGTemp *dts = tcg_temp_alloc(s);
3064 dts->type = its->type;
3065 dts->base_type = its->base_type;
e1e64652 3066 dts->temp_subindex = its->temp_subindex;
c7482438 3067 dts->kind = TEMP_EBB;
b83eabea
RH
3068 its->state_ptr = dts;
3069 } else {
3070 its->state_ptr = NULL;
5a18407f 3071 }
b83eabea
RH
3072 /* All globals begin dead. */
3073 its->state = TS_DEAD;
3074 }
3075 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3076 TCGTemp *its = &s->temps[i];
3077 its->state_ptr = NULL;
3078 its->state = TS_DEAD;
5a18407f 3079 }
5a18407f 3080
15fa08f8 3081 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
3082 TCGOpcode opc = op->opc;
3083 const TCGOpDef *def = &tcg_op_defs[opc];
3084 TCGLifeData arg_life = op->life;
3085 int nb_iargs, nb_oargs, call_flags;
b83eabea 3086 TCGTemp *arg_ts, *dir_ts;
5a18407f 3087
5a18407f 3088 if (opc == INDEX_op_call) {
cd9090aa
RH
3089 nb_oargs = TCGOP_CALLO(op);
3090 nb_iargs = TCGOP_CALLI(op);
90163900 3091 call_flags = tcg_call_flags(op);
5a18407f
RH
3092 } else {
3093 nb_iargs = def->nb_iargs;
3094 nb_oargs = def->nb_oargs;
3095
3096 /* Set flags similar to how calls require. */
b4cb76e6
RH
3097 if (def->flags & TCG_OPF_COND_BRANCH) {
3098 /* Like reading globals: sync_globals */
3099 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3100 } else if (def->flags & TCG_OPF_BB_END) {
5a18407f
RH
3101 /* Like writing globals: save_globals */
3102 call_flags = 0;
3103 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3104 /* Like reading globals: sync_globals */
3105 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3106 } else {
3107 /* No effect on globals. */
3108 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3109 TCG_CALL_NO_WRITE_GLOBALS);
3110 }
3111 }
3112
3113 /* Make sure that input arguments are available. */
3114 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3115 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3116 dir_ts = arg_ts->state_ptr;
3117 if (dir_ts && arg_ts->state == TS_DEAD) {
3118 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3119 ? INDEX_op_ld_i32
3120 : INDEX_op_ld_i64);
3121 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3122
3123 lop->args[0] = temp_arg(dir_ts);
3124 lop->args[1] = temp_arg(arg_ts->mem_base);
3125 lop->args[2] = arg_ts->mem_offset;
3126
3127 /* Loaded, but synced with memory. */
3128 arg_ts->state = TS_MEM;
5a18407f
RH
3129 }
3130 }
3131
3132 /* Perform input replacement, and mark inputs that became dead.
3133 No action is required except keeping temp_state up to date
3134 so that we reload when needed. */
3135 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea 3136 arg_ts = arg_temp(op->args[i]);
39004a71
RH
3137 dir_ts = arg_ts->state_ptr;
3138 if (dir_ts) {
3139 op->args[i] = temp_arg(dir_ts);
3140 changes = true;
3141 if (IS_DEAD_ARG(i)) {
3142 arg_ts->state = TS_DEAD;
5a18407f
RH
3143 }
3144 }
3145 }
3146
3147 /* Liveness analysis should ensure that the following are
3148 all correct, for call sites and basic block end points. */
3149 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
3150 /* Nothing to do */
3151 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
3152 for (i = 0; i < nb_globals; ++i) {
3153 /* Liveness should see that globals are synced back,
3154 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
3155 arg_ts = &s->temps[i];
3156 tcg_debug_assert(arg_ts->state_ptr == 0
3157 || arg_ts->state != 0);
5a18407f
RH
3158 }
3159 } else {
3160 for (i = 0; i < nb_globals; ++i) {
3161 /* Liveness should see that globals are saved back,
3162 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
3163 arg_ts = &s->temps[i];
3164 tcg_debug_assert(arg_ts->state_ptr == 0
3165 || arg_ts->state == TS_DEAD);
5a18407f
RH
3166 }
3167 }
3168
3169 /* Outputs become available. */
61f15c48
RH
3170 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
3171 arg_ts = arg_temp(op->args[0]);
b83eabea 3172 dir_ts = arg_ts->state_ptr;
61f15c48
RH
3173 if (dir_ts) {
3174 op->args[0] = temp_arg(dir_ts);
3175 changes = true;
3176
3177 /* The output is now live and modified. */
3178 arg_ts->state = 0;
3179
3180 if (NEED_SYNC_ARG(0)) {
3181 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3182 ? INDEX_op_st_i32
3183 : INDEX_op_st_i64);
d4478943 3184 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
61f15c48
RH
3185 TCGTemp *out_ts = dir_ts;
3186
3187 if (IS_DEAD_ARG(0)) {
3188 out_ts = arg_temp(op->args[1]);
3189 arg_ts->state = TS_DEAD;
3190 tcg_op_remove(s, op);
3191 } else {
3192 arg_ts->state = TS_MEM;
3193 }
3194
3195 sop->args[0] = temp_arg(out_ts);
3196 sop->args[1] = temp_arg(arg_ts->mem_base);
3197 sop->args[2] = arg_ts->mem_offset;
3198 } else {
3199 tcg_debug_assert(!IS_DEAD_ARG(0));
3200 }
5a18407f 3201 }
61f15c48
RH
3202 } else {
3203 for (i = 0; i < nb_oargs; i++) {
3204 arg_ts = arg_temp(op->args[i]);
3205 dir_ts = arg_ts->state_ptr;
3206 if (!dir_ts) {
3207 continue;
3208 }
3209 op->args[i] = temp_arg(dir_ts);
3210 changes = true;
5a18407f 3211
61f15c48
RH
3212 /* The output is now live and modified. */
3213 arg_ts->state = 0;
5a18407f 3214
61f15c48
RH
3215 /* Sync outputs upon their last write. */
3216 if (NEED_SYNC_ARG(i)) {
3217 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3218 ? INDEX_op_st_i32
3219 : INDEX_op_st_i64);
d4478943 3220 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
5a18407f 3221
61f15c48
RH
3222 sop->args[0] = temp_arg(dir_ts);
3223 sop->args[1] = temp_arg(arg_ts->mem_base);
3224 sop->args[2] = arg_ts->mem_offset;
5a18407f 3225
61f15c48
RH
3226 arg_ts->state = TS_MEM;
3227 }
3228 /* Drop outputs that are dead. */
3229 if (IS_DEAD_ARG(i)) {
3230 arg_ts->state = TS_DEAD;
3231 }
5a18407f
RH
3232 }
3233 }
3234 }
3235
3236 return changes;
3237}
3238
2272e4a7 3239static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 3240{
31c96417
RH
3241 int size = tcg_type_size(ts->type);
3242 int align;
3243 intptr_t off;
c1c09194
RH
3244
3245 switch (ts->type) {
3246 case TCG_TYPE_I32:
31c96417 3247 align = 4;
c1c09194
RH
3248 break;
3249 case TCG_TYPE_I64:
3250 case TCG_TYPE_V64:
31c96417 3251 align = 8;
c1c09194
RH
3252 break;
3253 case TCG_TYPE_V128:
c1c09194
RH
3254 case TCG_TYPE_V256:
3255 /* Note that we do not require aligned storage for V256. */
31c96417 3256 align = 16;
c1c09194
RH
3257 break;
3258 default:
3259 g_assert_not_reached();
b591dc59 3260 }
c1c09194 3261
b9537d59
RH
3262 /*
3263 * Assume the stack is sufficiently aligned.
3264 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3265 * and do not require 16 byte vector alignment. This seems slightly
3266 * easier than fully parameterizing the above switch statement.
3267 */
3268 align = MIN(TCG_TARGET_STACK_ALIGN, align);
c1c09194 3269 off = ROUND_UP(s->current_frame_offset, align);
732d5897
RH
3270
3271 /* If we've exhausted the stack frame, restart with a smaller TB. */
3272 if (off + size > s->frame_end) {
3273 tcg_raise_tb_overflow(s);
3274 }
c1c09194
RH
3275 s->current_frame_offset = off + size;
3276
3277 ts->mem_offset = off;
9defd1bd
RH
3278#if defined(__sparc__)
3279 ts->mem_offset += TCG_TARGET_STACK_BIAS;
3280#endif
b3a62939 3281 ts->mem_base = s->frame_temp;
c896fe29 3282 ts->mem_allocated = 1;
c896fe29
FB
3283}
3284
098859f1
RH
3285/* Assign @reg to @ts, and update reg_to_temp[]. */
3286static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
3287{
3288 if (ts->val_type == TEMP_VAL_REG) {
3289 TCGReg old = ts->reg;
3290 tcg_debug_assert(s->reg_to_temp[old] == ts);
3291 if (old == reg) {
3292 return;
3293 }
3294 s->reg_to_temp[old] = NULL;
3295 }
3296 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3297 s->reg_to_temp[reg] = ts;
3298 ts->val_type = TEMP_VAL_REG;
3299 ts->reg = reg;
3300}
3301
3302/* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3303static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
3304{
3305 tcg_debug_assert(type != TEMP_VAL_REG);
3306 if (ts->val_type == TEMP_VAL_REG) {
3307 TCGReg reg = ts->reg;
3308 tcg_debug_assert(s->reg_to_temp[reg] == ts);
3309 s->reg_to_temp[reg] = NULL;
3310 }
3311 ts->val_type = type;
3312}
3313
b722452a 3314static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
b3915dbb 3315
59d7c14e
RH
3316/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3317 mark it free; otherwise mark it dead. */
3318static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 3319{
c0522136
RH
3320 TCGTempVal new_type;
3321
3322 switch (ts->kind) {
3323 case TEMP_FIXED:
59d7c14e 3324 return;
c0522136
RH
3325 case TEMP_GLOBAL:
3326 case TEMP_LOCAL:
3327 new_type = TEMP_VAL_MEM;
3328 break;
3329 case TEMP_NORMAL:
c7482438 3330 case TEMP_EBB:
c0522136
RH
3331 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
3332 break;
3333 case TEMP_CONST:
3334 new_type = TEMP_VAL_CONST;
3335 break;
3336 default:
3337 g_assert_not_reached();
59d7c14e 3338 }
098859f1 3339 set_temp_val_nonreg(s, ts, new_type);
59d7c14e 3340}
7f6ceedf 3341
59d7c14e
RH
3342/* Mark a temporary as dead. */
3343static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3344{
3345 temp_free_or_dead(s, ts, 1);
3346}
3347
3348/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3349 registers needs to be allocated to store a constant. If 'free_or_dead'
3350 is non-zero, subsequently release the temporary; if it is positive, the
3351 temp is dead; if it is negative, the temp is free. */
98b4e186
RH
3352static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3353 TCGRegSet preferred_regs, int free_or_dead)
59d7c14e 3354{
c0522136 3355 if (!temp_readonly(ts) && !ts->mem_coherent) {
7f6ceedf 3356 if (!ts->mem_allocated) {
2272e4a7 3357 temp_allocate_frame(s, ts);
59d7c14e 3358 }
59d7c14e
RH
3359 switch (ts->val_type) {
3360 case TEMP_VAL_CONST:
3361 /* If we're going to free the temp immediately, then we won't
3362 require it later in a register, so attempt to store the
3363 constant to memory directly. */
3364 if (free_or_dead
3365 && tcg_out_sti(s, ts->type, ts->val,
3366 ts->mem_base->reg, ts->mem_offset)) {
3367 break;
3368 }
3369 temp_load(s, ts, tcg_target_available_regs[ts->type],
98b4e186 3370 allocated_regs, preferred_regs);
59d7c14e
RH
3371 /* fallthrough */
3372
3373 case TEMP_VAL_REG:
3374 tcg_out_st(s, ts->type, ts->reg,
3375 ts->mem_base->reg, ts->mem_offset);
3376 break;
3377
3378 case TEMP_VAL_MEM:
3379 break;
3380
3381 case TEMP_VAL_DEAD:
3382 default:
3383 tcg_abort();
3384 }
3385 ts->mem_coherent = 1;
3386 }
3387 if (free_or_dead) {
3388 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 3389 }
7f6ceedf
AJ
3390}
3391
c896fe29 3392/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 3393static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 3394{
f8b2f202 3395 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 3396 if (ts != NULL) {
98b4e186 3397 temp_sync(s, ts, allocated_regs, 0, -1);
c896fe29
FB
3398 }
3399}
3400
b016486e
RH
3401/**
3402 * tcg_reg_alloc:
3403 * @required_regs: Set of registers in which we must allocate.
3404 * @allocated_regs: Set of registers which must be avoided.
3405 * @preferred_regs: Set of registers we should prefer.
3406 * @rev: True if we search the registers in "indirect" order.
3407 *
3408 * The allocated register must be in @required_regs & ~@allocated_regs,
3409 * but if we can put it in @preferred_regs we may save a move later.
3410 */
3411static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3412 TCGRegSet allocated_regs,
3413 TCGRegSet preferred_regs, bool rev)
c896fe29 3414{
b016486e
RH
3415 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3416 TCGRegSet reg_ct[2];
91478cef 3417 const int *order;
c896fe29 3418
b016486e
RH
3419 reg_ct[1] = required_regs & ~allocated_regs;
3420 tcg_debug_assert(reg_ct[1] != 0);
3421 reg_ct[0] = reg_ct[1] & preferred_regs;
3422
3423 /* Skip the preferred_regs option if it cannot be satisfied,
3424 or if the preference made no difference. */
3425 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3426
91478cef 3427 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29 3428
b016486e
RH
3429 /* Try free registers, preferences first. */
3430 for (j = f; j < 2; j++) {
3431 TCGRegSet set = reg_ct[j];
3432
3433 if (tcg_regset_single(set)) {
3434 /* One register in the set. */
3435 TCGReg reg = tcg_regset_first(set);
3436 if (s->reg_to_temp[reg] == NULL) {
3437 return reg;
3438 }
3439 } else {
3440 for (i = 0; i < n; i++) {
3441 TCGReg reg = order[i];
3442 if (s->reg_to_temp[reg] == NULL &&
3443 tcg_regset_test_reg(set, reg)) {
3444 return reg;
3445 }
3446 }
3447 }
c896fe29
FB
3448 }
3449
b016486e
RH
3450 /* We must spill something. */
3451 for (j = f; j < 2; j++) {
3452 TCGRegSet set = reg_ct[j];
3453
3454 if (tcg_regset_single(set)) {
3455 /* One register in the set. */
3456 TCGReg reg = tcg_regset_first(set);
b3915dbb 3457 tcg_reg_free(s, reg, allocated_regs);
c896fe29 3458 return reg;
b016486e
RH
3459 } else {
3460 for (i = 0; i < n; i++) {
3461 TCGReg reg = order[i];
3462 if (tcg_regset_test_reg(set, reg)) {
3463 tcg_reg_free(s, reg, allocated_regs);
3464 return reg;
3465 }
3466 }
c896fe29
FB
3467 }
3468 }
3469
3470 tcg_abort();
3471}
3472
29f5e925
RH
3473static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
3474 TCGRegSet allocated_regs,
3475 TCGRegSet preferred_regs, bool rev)
3476{
3477 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3478 TCGRegSet reg_ct[2];
3479 const int *order;
3480
3481 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
3482 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
3483 tcg_debug_assert(reg_ct[1] != 0);
3484 reg_ct[0] = reg_ct[1] & preferred_regs;
3485
3486 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3487
3488 /*
3489 * Skip the preferred_regs option if it cannot be satisfied,
3490 * or if the preference made no difference.
3491 */
3492 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3493
3494 /*
3495 * Minimize the number of flushes by looking for 2 free registers first,
3496 * then a single flush, then two flushes.
3497 */
3498 for (fmin = 2; fmin >= 0; fmin--) {
3499 for (j = k; j < 2; j++) {
3500 TCGRegSet set = reg_ct[j];
3501
3502 for (i = 0; i < n; i++) {
3503 TCGReg reg = order[i];
3504
3505 if (tcg_regset_test_reg(set, reg)) {
3506 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
3507 if (f >= fmin) {
3508 tcg_reg_free(s, reg, allocated_regs);
3509 tcg_reg_free(s, reg + 1, allocated_regs);
3510 return reg;
3511 }
3512 }
3513 }
3514 }
3515 }
3516 tcg_abort();
3517}
3518
40ae5c62
RH
3519/* Make sure the temporary is in a register. If needed, allocate the register
3520 from DESIRED while avoiding ALLOCATED. */
3521static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
b722452a 3522 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
40ae5c62
RH
3523{
3524 TCGReg reg;
3525
3526 switch (ts->val_type) {
3527 case TEMP_VAL_REG:
3528 return;
3529 case TEMP_VAL_CONST:
b016486e 3530 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3531 preferred_regs, ts->indirect_base);
0a6a8bc8
RH
3532 if (ts->type <= TCG_TYPE_I64) {
3533 tcg_out_movi(s, ts->type, reg, ts->val);
3534 } else {
4e186175
RH
3535 uint64_t val = ts->val;
3536 MemOp vece = MO_64;
3537
3538 /*
3539 * Find the minimal vector element that matches the constant.
3540 * The targets will, in general, have to do this search anyway,
3541 * do this generically.
3542 */
4e186175
RH
3543 if (val == dup_const(MO_8, val)) {
3544 vece = MO_8;
3545 } else if (val == dup_const(MO_16, val)) {
3546 vece = MO_16;
0b4286dd 3547 } else if (val == dup_const(MO_32, val)) {
4e186175
RH
3548 vece = MO_32;
3549 }
3550
3551 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
0a6a8bc8 3552 }
40ae5c62
RH
3553 ts->mem_coherent = 0;
3554 break;
3555 case TEMP_VAL_MEM:
b016486e 3556 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3557 preferred_regs, ts->indirect_base);
40ae5c62
RH
3558 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3559 ts->mem_coherent = 1;
3560 break;
3561 case TEMP_VAL_DEAD:
3562 default:
3563 tcg_abort();
3564 }
098859f1 3565 set_temp_val_reg(s, ts, reg);
40ae5c62
RH
3566}
3567
59d7c14e
RH
3568/* Save a temporary to memory. 'allocated_regs' is used in case a
3569 temporary registers needs to be allocated to store a constant. */
3570static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 3571{
5a18407f
RH
3572 /* The liveness analysis already ensures that globals are back
3573 in memory. Keep an tcg_debug_assert for safety. */
e01fa97d 3574 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
1ad80729
AJ
3575}
3576
9814dd27 3577/* save globals to their canonical location and assume they can be
e8996ee0
FB
3578 modified be the following code. 'allocated_regs' is used in case a
3579 temporary registers needs to be allocated to store a constant. */
3580static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 3581{
ac3b8891 3582 int i, n;
c896fe29 3583
ac3b8891 3584 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 3585 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 3586 }
e5097dc8
FB
3587}
3588
3d5c5f87
AJ
3589/* sync globals to their canonical location and assume they can be
3590 read by the following code. 'allocated_regs' is used in case a
3591 temporary registers needs to be allocated to store a constant. */
3592static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3593{
ac3b8891 3594 int i, n;
3d5c5f87 3595
ac3b8891 3596 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 3597 TCGTemp *ts = &s->temps[i];
5a18407f 3598 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
ee17db83 3599 || ts->kind == TEMP_FIXED
5a18407f 3600 || ts->mem_coherent);
3d5c5f87
AJ
3601 }
3602}
3603
e5097dc8 3604/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
3605 all globals are stored at their canonical location. */
3606static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 3607{
e5097dc8
FB
3608 int i;
3609
b13eb728
RH
3610 for (i = s->nb_globals; i < s->nb_temps; i++) {
3611 TCGTemp *ts = &s->temps[i];
c0522136
RH
3612
3613 switch (ts->kind) {
3614 case TEMP_LOCAL:
b13eb728 3615 temp_save(s, ts, allocated_regs);
c0522136
RH
3616 break;
3617 case TEMP_NORMAL:
c7482438 3618 case TEMP_EBB:
5a18407f
RH
3619 /* The liveness analysis already ensures that temps are dead.
3620 Keep an tcg_debug_assert for safety. */
3621 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136
RH
3622 break;
3623 case TEMP_CONST:
3624 /* Similarly, we should have freed any allocated register. */
3625 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3626 break;
3627 default:
3628 g_assert_not_reached();
c896fe29
FB
3629 }
3630 }
e8996ee0
FB
3631
3632 save_globals(s, allocated_regs);
c896fe29
FB
3633}
3634
b4cb76e6 3635/*
c7482438
RH
3636 * At a conditional branch, we assume all temporaries are dead unless
3637 * explicitly live-across-conditional-branch; all globals and local
3638 * temps are synced to their location.
b4cb76e6
RH
3639 */
3640static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3641{
3642 sync_globals(s, allocated_regs);
3643
3644 for (int i = s->nb_globals; i < s->nb_temps; i++) {
3645 TCGTemp *ts = &s->temps[i];
3646 /*
3647 * The liveness analysis already ensures that temps are dead.
3648 * Keep tcg_debug_asserts for safety.
3649 */
c0522136
RH
3650 switch (ts->kind) {
3651 case TEMP_LOCAL:
b4cb76e6 3652 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
c0522136
RH
3653 break;
3654 case TEMP_NORMAL:
b4cb76e6 3655 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c0522136 3656 break;
c7482438 3657 case TEMP_EBB:
c0522136
RH
3658 case TEMP_CONST:
3659 break;
3660 default:
3661 g_assert_not_reached();
b4cb76e6
RH
3662 }
3663 }
3664}
3665
bab1671f 3666/*
c58f4c97 3667 * Specialized code generation for INDEX_op_mov_* with a constant.
bab1671f 3668 */
0fe4fca4 3669static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
ba87719c
RH
3670 tcg_target_ulong val, TCGLifeData arg_life,
3671 TCGRegSet preferred_regs)
e8996ee0 3672{
d63e3b6e 3673 /* ENV should not be modified. */
e01fa97d 3674 tcg_debug_assert(!temp_readonly(ots));
59d7c14e
RH
3675
3676 /* The movi is not explicitly generated here. */
098859f1 3677 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
59d7c14e
RH
3678 ots->val = val;
3679 ots->mem_coherent = 0;
3680 if (NEED_SYNC_ARG(0)) {
ba87719c 3681 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
59d7c14e 3682 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 3683 temp_dead(s, ots);
4c4e1ab2 3684 }
e8996ee0
FB
3685}
3686
bab1671f
RH
3687/*
3688 * Specialized code generation for INDEX_op_mov_*.
3689 */
dd186292 3690static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 3691{
dd186292 3692 const TCGLifeData arg_life = op->life;
69e3706d 3693 TCGRegSet allocated_regs, preferred_regs;
c896fe29 3694 TCGTemp *ts, *ots;
450445d5 3695 TCGType otype, itype;
098859f1 3696 TCGReg oreg, ireg;
c896fe29 3697
d21369f5 3698 allocated_regs = s->reserved_regs;
31fd884b 3699 preferred_regs = output_pref(op, 0);
43439139
RH
3700 ots = arg_temp(op->args[0]);
3701 ts = arg_temp(op->args[1]);
450445d5 3702
d63e3b6e 3703 /* ENV should not be modified. */
e01fa97d 3704 tcg_debug_assert(!temp_readonly(ots));
d63e3b6e 3705
450445d5
RH
3706 /* Note that otype != itype for no-op truncation. */
3707 otype = ots->type;
3708 itype = ts->type;
c29c1d7e 3709
0fe4fca4
PB
3710 if (ts->val_type == TEMP_VAL_CONST) {
3711 /* propagate constant or generate sti */
3712 tcg_target_ulong val = ts->val;
3713 if (IS_DEAD_ARG(1)) {
3714 temp_dead(s, ts);
3715 }
69e3706d 3716 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
0fe4fca4
PB
3717 return;
3718 }
3719
3720 /* If the source value is in memory we're going to be forced
3721 to have it in a register in order to perform the copy. Copy
3722 the SOURCE value into its own register first, that way we
3723 don't have to reload SOURCE the next time it is used. */
3724 if (ts->val_type == TEMP_VAL_MEM) {
69e3706d
RH
3725 temp_load(s, ts, tcg_target_available_regs[itype],
3726 allocated_regs, preferred_regs);
c29c1d7e 3727 }
0fe4fca4 3728 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
098859f1
RH
3729 ireg = ts->reg;
3730
d63e3b6e 3731 if (IS_DEAD_ARG(0)) {
c29c1d7e
AJ
3732 /* mov to a non-saved dead register makes no sense (even with
3733 liveness analysis disabled). */
eabb7b91 3734 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 3735 if (!ots->mem_allocated) {
2272e4a7 3736 temp_allocate_frame(s, ots);
c29c1d7e 3737 }
098859f1 3738 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 3739 if (IS_DEAD_ARG(1)) {
f8bf00f1 3740 temp_dead(s, ts);
c29c1d7e 3741 }
f8bf00f1 3742 temp_dead(s, ots);
098859f1
RH
3743 return;
3744 }
3745
3746 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
3747 /*
3748 * The mov can be suppressed. Kill input first, so that it
3749 * is unlinked from reg_to_temp, then set the output to the
3750 * reg that we saved from the input.
3751 */
3752 temp_dead(s, ts);
3753 oreg = ireg;
c29c1d7e 3754 } else {
098859f1
RH
3755 if (ots->val_type == TEMP_VAL_REG) {
3756 oreg = ots->reg;
c896fe29 3757 } else {
098859f1
RH
3758 /* Make sure to not spill the input register during allocation. */
3759 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3760 allocated_regs | ((TCGRegSet)1 << ireg),
3761 preferred_regs, ots->indirect_base);
c896fe29 3762 }
098859f1
RH
3763 if (!tcg_out_mov(s, otype, oreg, ireg)) {
3764 /*
3765 * Cross register class move not supported.
3766 * Store the source register into the destination slot
3767 * and leave the destination temp as TEMP_VAL_MEM.
3768 */
3769 assert(!temp_readonly(ots));
3770 if (!ts->mem_allocated) {
3771 temp_allocate_frame(s, ots);
3772 }
3773 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
3774 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
3775 ots->mem_coherent = 1;
3776 return;
c896fe29 3777 }
ec7a869d 3778 }
098859f1
RH
3779 set_temp_val_reg(s, ots, oreg);
3780 ots->mem_coherent = 0;
3781
3782 if (NEED_SYNC_ARG(0)) {
3783 temp_sync(s, ots, allocated_regs, 0, 0);
3784 }
c896fe29
FB
3785}
3786
bab1671f
RH
3787/*
3788 * Specialized code generation for INDEX_op_dup_vec.
3789 */
3790static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
3791{
3792 const TCGLifeData arg_life = op->life;
3793 TCGRegSet dup_out_regs, dup_in_regs;
3794 TCGTemp *its, *ots;
3795 TCGType itype, vtype;
3796 unsigned vece;
31c96417 3797 int lowpart_ofs;
bab1671f
RH
3798 bool ok;
3799
3800 ots = arg_temp(op->args[0]);
3801 its = arg_temp(op->args[1]);
3802
3803 /* ENV should not be modified. */
e01fa97d 3804 tcg_debug_assert(!temp_readonly(ots));
bab1671f
RH
3805
3806 itype = its->type;
3807 vece = TCGOP_VECE(op);
3808 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3809
3810 if (its->val_type == TEMP_VAL_CONST) {
3811 /* Propagate constant via movi -> dupi. */
3812 tcg_target_ulong val = its->val;
3813 if (IS_DEAD_ARG(1)) {
3814 temp_dead(s, its);
3815 }
31fd884b 3816 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
bab1671f
RH
3817 return;
3818 }
3819
9be0d080
RH
3820 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3821 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
bab1671f
RH
3822
3823 /* Allocate the output register now. */
3824 if (ots->val_type != TEMP_VAL_REG) {
3825 TCGRegSet allocated_regs = s->reserved_regs;
098859f1 3826 TCGReg oreg;
bab1671f
RH
3827
3828 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
3829 /* Make sure to not spill the input register. */
3830 tcg_regset_set_reg(allocated_regs, its->reg);
3831 }
098859f1 3832 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 3833 output_pref(op, 0), ots->indirect_base);
098859f1 3834 set_temp_val_reg(s, ots, oreg);
bab1671f
RH
3835 }
3836
3837 switch (its->val_type) {
3838 case TEMP_VAL_REG:
3839 /*
3840 * The dup constriaints must be broad, covering all possible VECE.
3841 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3842 * to fail, indicating that extra moves are required for that case.
3843 */
3844 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
3845 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
3846 goto done;
3847 }
3848 /* Try again from memory or a vector input register. */
3849 }
3850 if (!its->mem_coherent) {
3851 /*
3852 * The input register is not synced, and so an extra store
3853 * would be required to use memory. Attempt an integer-vector
3854 * register move first. We do not have a TCGRegSet for this.
3855 */
3856 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
3857 break;
3858 }
3859 /* Sync the temp back to its slot and load from there. */
3860 temp_sync(s, its, s->reserved_regs, 0, 0);
3861 }
3862 /* fall through */
3863
3864 case TEMP_VAL_MEM:
31c96417
RH
3865 lowpart_ofs = 0;
3866 if (HOST_BIG_ENDIAN) {
3867 lowpart_ofs = tcg_type_size(itype) - (1 << vece);
3868 }
d6ecb4a9 3869 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
31c96417 3870 its->mem_offset + lowpart_ofs)) {
d6ecb4a9
RH
3871 goto done;
3872 }
098859f1 3873 /* Load the input into the destination vector register. */
bab1671f
RH
3874 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
3875 break;
3876
3877 default:
3878 g_assert_not_reached();
3879 }
3880
3881 /* We now have a vector input register, so dup must succeed. */
3882 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
3883 tcg_debug_assert(ok);
3884
3885 done:
36f5539c 3886 ots->mem_coherent = 0;
bab1671f
RH
3887 if (IS_DEAD_ARG(1)) {
3888 temp_dead(s, its);
3889 }
3890 if (NEED_SYNC_ARG(0)) {
3891 temp_sync(s, ots, s->reserved_regs, 0, 0);
3892 }
3893 if (IS_DEAD_ARG(0)) {
3894 temp_dead(s, ots);
3895 }
3896}
3897
dd186292 3898static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 3899{
dd186292
RH
3900 const TCGLifeData arg_life = op->life;
3901 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
3902 TCGRegSet i_allocated_regs;
3903 TCGRegSet o_allocated_regs;
b6638662
RH
3904 int i, k, nb_iargs, nb_oargs;
3905 TCGReg reg;
c896fe29
FB
3906 TCGArg arg;
3907 const TCGArgConstraint *arg_ct;
3908 TCGTemp *ts;
3909 TCGArg new_args[TCG_MAX_OP_ARGS];
3910 int const_args[TCG_MAX_OP_ARGS];
3911
3912 nb_oargs = def->nb_oargs;
3913 nb_iargs = def->nb_iargs;
3914
3915 /* copy constants */
a813e36f 3916 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 3917 op->args + nb_oargs + nb_iargs,
c896fe29
FB
3918 sizeof(TCGArg) * def->nb_cargs);
3919
d21369f5
RH
3920 i_allocated_regs = s->reserved_regs;
3921 o_allocated_regs = s->reserved_regs;
82790a87 3922
a813e36f 3923 /* satisfy input constraints */
dd186292 3924 for (k = 0; k < nb_iargs; k++) {
29f5e925
RH
3925 TCGRegSet i_preferred_regs, i_required_regs;
3926 bool allocate_new_reg, copyto_new_reg;
3927 TCGTemp *ts2;
3928 int i1, i2;
d62816f2 3929
66792f90 3930 i = def->args_ct[nb_oargs + k].sort_index;
dd186292 3931 arg = op->args[i];
c896fe29 3932 arg_ct = &def->args_ct[i];
43439139 3933 ts = arg_temp(arg);
40ae5c62
RH
3934
3935 if (ts->val_type == TEMP_VAL_CONST
a4fbbd77 3936 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
40ae5c62
RH
3937 /* constant is OK for instruction */
3938 const_args[i] = 1;
3939 new_args[i] = ts->val;
d62816f2 3940 continue;
c896fe29 3941 }
40ae5c62 3942
1c1824dc
RH
3943 reg = ts->reg;
3944 i_preferred_regs = 0;
29f5e925 3945 i_required_regs = arg_ct->regs;
1c1824dc 3946 allocate_new_reg = false;
29f5e925
RH
3947 copyto_new_reg = false;
3948
3949 switch (arg_ct->pair) {
3950 case 0: /* not paired */
3951 if (arg_ct->ialias) {
31fd884b 3952 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
3953
3954 /*
3955 * If the input is readonly, then it cannot also be an
3956 * output and aliased to itself. If the input is not
3957 * dead after the instruction, we must allocate a new
3958 * register and move it.
3959 */
3960 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
3961 allocate_new_reg = true;
3962 } else if (ts->val_type == TEMP_VAL_REG) {
3963 /*
3964 * Check if the current register has already been
3965 * allocated for another input.
3966 */
3967 allocate_new_reg =
3968 tcg_regset_test_reg(i_allocated_regs, reg);
3969 }
3970 }
3971 if (!allocate_new_reg) {
3972 temp_load(s, ts, i_required_regs, i_allocated_regs,
3973 i_preferred_regs);
3974 reg = ts->reg;
3975 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
3976 }
3977 if (allocate_new_reg) {
3978 /*
3979 * Allocate a new register matching the constraint
3980 * and move the temporary register into it.
3981 */
3982 temp_load(s, ts, tcg_target_available_regs[ts->type],
3983 i_allocated_regs, 0);
3984 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
3985 i_preferred_regs, ts->indirect_base);
3986 copyto_new_reg = true;
3987 }
3988 break;
3989
3990 case 1:
3991 /* First of an input pair; if i1 == i2, the second is an output. */
3992 i1 = i;
3993 i2 = arg_ct->pair_index;
3994 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
3995
3996 /*
3997 * It is easier to default to allocating a new pair
3998 * and to identify a few cases where it's not required.
3999 */
4000 if (arg_ct->ialias) {
31fd884b 4001 i_preferred_regs = output_pref(op, arg_ct->alias_index);
29f5e925
RH
4002 if (IS_DEAD_ARG(i1) &&
4003 IS_DEAD_ARG(i2) &&
4004 !temp_readonly(ts) &&
4005 ts->val_type == TEMP_VAL_REG &&
4006 ts->reg < TCG_TARGET_NB_REGS - 1 &&
4007 tcg_regset_test_reg(i_required_regs, reg) &&
4008 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4009 !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
4010 (ts2
4011 ? ts2->val_type == TEMP_VAL_REG &&
4012 ts2->reg == reg + 1 &&
4013 !temp_readonly(ts2)
4014 : s->reg_to_temp[reg + 1] == NULL)) {
4015 break;
4016 }
4017 } else {
4018 /* Without aliasing, the pair must also be an input. */
4019 tcg_debug_assert(ts2);
4020 if (ts->val_type == TEMP_VAL_REG &&
4021 ts2->val_type == TEMP_VAL_REG &&
4022 ts2->reg == reg + 1 &&
4023 tcg_regset_test_reg(i_required_regs, reg)) {
4024 break;
4025 }
4026 }
4027 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4028 0, ts->indirect_base);
4029 goto do_pair;
4030
4031 case 2: /* pair second */
4032 reg = new_args[arg_ct->pair_index] + 1;
4033 goto do_pair;
1c1824dc 4034
29f5e925
RH
4035 case 3: /* ialias with second output, no first input */
4036 tcg_debug_assert(arg_ct->ialias);
31fd884b 4037 i_preferred_regs = output_pref(op, arg_ct->alias_index);
d62816f2 4038
29f5e925
RH
4039 if (IS_DEAD_ARG(i) &&
4040 !temp_readonly(ts) &&
4041 ts->val_type == TEMP_VAL_REG &&
4042 reg > 0 &&
4043 s->reg_to_temp[reg - 1] == NULL &&
4044 tcg_regset_test_reg(i_required_regs, reg) &&
4045 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4046 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4047 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4048 break;
4049 }
4050 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4051 i_allocated_regs, 0,
4052 ts->indirect_base);
4053 tcg_regset_set_reg(i_allocated_regs, reg);
4054 reg += 1;
4055 goto do_pair;
4056
4057 do_pair:
c0522136 4058 /*
29f5e925
RH
4059 * If an aliased input is not dead after the instruction,
4060 * we must allocate a new register and move it.
c0522136 4061 */
29f5e925
RH
4062 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4063 TCGRegSet t_allocated_regs = i_allocated_regs;
4064
1c1824dc 4065 /*
29f5e925
RH
4066 * Because of the alias, and the continued life, make sure
4067 * that the temp is somewhere *other* than the reg pair,
4068 * and we get a copy in reg.
1c1824dc 4069 */
29f5e925
RH
4070 tcg_regset_set_reg(t_allocated_regs, reg);
4071 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4072 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4073 /* If ts was already in reg, copy it somewhere else. */
4074 TCGReg nr;
4075 bool ok;
4076
4077 tcg_debug_assert(ts->kind != TEMP_FIXED);
4078 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4079 t_allocated_regs, 0, ts->indirect_base);
4080 ok = tcg_out_mov(s, ts->type, nr, reg);
4081 tcg_debug_assert(ok);
4082
4083 set_temp_val_reg(s, ts, nr);
4084 } else {
4085 temp_load(s, ts, tcg_target_available_regs[ts->type],
4086 t_allocated_regs, 0);
4087 copyto_new_reg = true;
4088 }
4089 } else {
4090 /* Preferably allocate to reg, otherwise copy. */
4091 i_required_regs = (TCGRegSet)1 << reg;
4092 temp_load(s, ts, i_required_regs, i_allocated_regs,
4093 i_preferred_regs);
4094 copyto_new_reg = ts->reg != reg;
5ff9d6a4 4095 }
29f5e925 4096 break;
d62816f2 4097
29f5e925
RH
4098 default:
4099 g_assert_not_reached();
1c1824dc 4100 }
d62816f2 4101
29f5e925 4102 if (copyto_new_reg) {
78113e83 4103 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
240c08d0
RH
4104 /*
4105 * Cross register class move not supported. Sync the
4106 * temp back to its slot and load from there.
4107 */
4108 temp_sync(s, ts, i_allocated_regs, 0, 0);
4109 tcg_out_ld(s, ts->type, reg,
4110 ts->mem_base->reg, ts->mem_offset);
78113e83 4111 }
c896fe29 4112 }
c896fe29
FB
4113 new_args[i] = reg;
4114 const_args[i] = 0;
82790a87 4115 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29 4116 }
a813e36f 4117
a52ad07e
AJ
4118 /* mark dead temporaries and free the associated registers */
4119 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4120 if (IS_DEAD_ARG(i)) {
43439139 4121 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
4122 }
4123 }
4124
b4cb76e6
RH
4125 if (def->flags & TCG_OPF_COND_BRANCH) {
4126 tcg_reg_alloc_cbranch(s, i_allocated_regs);
4127 } else if (def->flags & TCG_OPF_BB_END) {
82790a87 4128 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 4129 } else {
e8996ee0 4130 if (def->flags & TCG_OPF_CALL_CLOBBER) {
a813e36f 4131 /* XXX: permit generic clobber register list ? */
c8074023
RH
4132 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4133 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 4134 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 4135 }
c896fe29 4136 }
3d5c5f87
AJ
4137 }
4138 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
4139 /* sync globals if the op has side effects and might trigger
4140 an exception. */
82790a87 4141 sync_globals(s, i_allocated_regs);
c896fe29 4142 }
a813e36f 4143
e8996ee0 4144 /* satisfy the output constraints */
e8996ee0 4145 for(k = 0; k < nb_oargs; k++) {
66792f90 4146 i = def->args_ct[k].sort_index;
dd186292 4147 arg = op->args[i];
e8996ee0 4148 arg_ct = &def->args_ct[i];
43439139 4149 ts = arg_temp(arg);
d63e3b6e
RH
4150
4151 /* ENV should not be modified. */
e01fa97d 4152 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4153
29f5e925
RH
4154 switch (arg_ct->pair) {
4155 case 0: /* not paired */
4156 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
4157 reg = new_args[arg_ct->alias_index];
4158 } else if (arg_ct->newreg) {
4159 reg = tcg_reg_alloc(s, arg_ct->regs,
4160 i_allocated_regs | o_allocated_regs,
31fd884b 4161 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4162 } else {
4163 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
31fd884b 4164 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4165 }
4166 break;
4167
4168 case 1: /* first of pair */
4169 tcg_debug_assert(!arg_ct->newreg);
4170 if (arg_ct->oalias) {
4171 reg = new_args[arg_ct->alias_index];
4172 break;
4173 }
4174 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
31fd884b 4175 output_pref(op, k), ts->indirect_base);
29f5e925
RH
4176 break;
4177
4178 case 2: /* second of pair */
4179 tcg_debug_assert(!arg_ct->newreg);
4180 if (arg_ct->oalias) {
4181 reg = new_args[arg_ct->alias_index];
4182 } else {
4183 reg = new_args[arg_ct->pair_index] + 1;
4184 }
4185 break;
4186
4187 case 3: /* first of pair, aliasing with a second input */
4188 tcg_debug_assert(!arg_ct->newreg);
4189 reg = new_args[arg_ct->pair_index] - 1;
4190 break;
4191
4192 default:
4193 g_assert_not_reached();
c896fe29 4194 }
82790a87 4195 tcg_regset_set_reg(o_allocated_regs, reg);
098859f1 4196 set_temp_val_reg(s, ts, reg);
d63e3b6e 4197 ts->mem_coherent = 0;
e8996ee0 4198 new_args[i] = reg;
c896fe29 4199 }
c896fe29
FB
4200 }
4201
c896fe29 4202 /* emit instruction */
d2fd745f
RH
4203 if (def->flags & TCG_OPF_VECTOR) {
4204 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
4205 new_args, const_args);
4206 } else {
4207 tcg_out_op(s, op->opc, new_args, const_args);
4208 }
4209
c896fe29
FB
4210 /* move the outputs in the correct register if needed */
4211 for(i = 0; i < nb_oargs; i++) {
43439139 4212 ts = arg_temp(op->args[i]);
d63e3b6e
RH
4213
4214 /* ENV should not be modified. */
e01fa97d 4215 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4216
ec7a869d 4217 if (NEED_SYNC_ARG(i)) {
98b4e186 4218 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
59d7c14e 4219 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 4220 temp_dead(s, ts);
ec7a869d 4221 }
c896fe29
FB
4222 }
4223}
4224
efe86b21
RH
4225static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
4226{
4227 const TCGLifeData arg_life = op->life;
4228 TCGTemp *ots, *itsl, *itsh;
4229 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4230
4231 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4232 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
4233 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
4234
4235 ots = arg_temp(op->args[0]);
4236 itsl = arg_temp(op->args[1]);
4237 itsh = arg_temp(op->args[2]);
4238
4239 /* ENV should not be modified. */
4240 tcg_debug_assert(!temp_readonly(ots));
4241
4242 /* Allocate the output register now. */
4243 if (ots->val_type != TEMP_VAL_REG) {
4244 TCGRegSet allocated_regs = s->reserved_regs;
4245 TCGRegSet dup_out_regs =
4246 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
098859f1 4247 TCGReg oreg;
efe86b21
RH
4248
4249 /* Make sure to not spill the input registers. */
4250 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
4251 tcg_regset_set_reg(allocated_regs, itsl->reg);
4252 }
4253 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
4254 tcg_regset_set_reg(allocated_regs, itsh->reg);
4255 }
4256
098859f1 4257 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
31fd884b 4258 output_pref(op, 0), ots->indirect_base);
098859f1 4259 set_temp_val_reg(s, ots, oreg);
efe86b21
RH
4260 }
4261
4262 /* Promote dup2 of immediates to dupi_vec. */
4263 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
4264 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
4265 MemOp vece = MO_64;
4266
4267 if (val == dup_const(MO_8, val)) {
4268 vece = MO_8;
4269 } else if (val == dup_const(MO_16, val)) {
4270 vece = MO_16;
4271 } else if (val == dup_const(MO_32, val)) {
4272 vece = MO_32;
4273 }
4274
4275 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
4276 goto done;
4277 }
4278
4279 /* If the two inputs form one 64-bit value, try dupm_vec. */
aef85402
RH
4280 if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
4281 itsh->temp_subindex == !HOST_BIG_ENDIAN &&
4282 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
4283 TCGTemp *its = itsl - HOST_BIG_ENDIAN;
4284
4285 temp_sync(s, its + 0, s->reserved_regs, 0, 0);
4286 temp_sync(s, its + 1, s->reserved_regs, 0, 0);
4287
efe86b21
RH
4288 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
4289 its->mem_base->reg, its->mem_offset)) {
4290 goto done;
4291 }
4292 }
4293
4294 /* Fall back to generic expansion. */
4295 return false;
4296
4297 done:
36f5539c 4298 ots->mem_coherent = 0;
efe86b21
RH
4299 if (IS_DEAD_ARG(1)) {
4300 temp_dead(s, itsl);
4301 }
4302 if (IS_DEAD_ARG(2)) {
4303 temp_dead(s, itsh);
4304 }
4305 if (NEED_SYNC_ARG(0)) {
4306 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
4307 } else if (IS_DEAD_ARG(0)) {
4308 temp_dead(s, ots);
4309 }
4310 return true;
4311}
4312
39004a71
RH
4313static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
4314 TCGRegSet allocated_regs)
c896fe29 4315{
39004a71
RH
4316 if (ts->val_type == TEMP_VAL_REG) {
4317 if (ts->reg != reg) {
4318 tcg_reg_free(s, reg, allocated_regs);
4319 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4320 /*
4321 * Cross register class move not supported. Sync the
4322 * temp back to its slot and load from there.
4323 */
4324 temp_sync(s, ts, allocated_regs, 0, 0);
4325 tcg_out_ld(s, ts->type, reg,
4326 ts->mem_base->reg, ts->mem_offset);
4327 }
4328 }
4329 } else {
4330 TCGRegSet arg_set = 0;
c896fe29 4331
39004a71
RH
4332 tcg_reg_free(s, reg, allocated_regs);
4333 tcg_regset_set_reg(arg_set, reg);
4334 temp_load(s, ts, arg_set, allocated_regs, 0);
b03cce8e 4335 }
39004a71 4336}
39cf05d3 4337
39004a71
RH
4338static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
4339 TCGRegSet allocated_regs)
4340{
4341 /*
4342 * When the destination is on the stack, load up the temp and store.
4343 * If there are many call-saved registers, the temp might live to
4344 * see another use; otherwise it'll be discarded.
4345 */
4346 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
4347 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
4348 TCG_TARGET_CALL_STACK_OFFSET +
4349 stk_slot * sizeof(tcg_target_long));
4350}
a813e36f 4351
39004a71
RH
4352static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
4353 TCGTemp *ts, TCGRegSet *allocated_regs)
4354{
4355 if (REG_P(l)) {
4356 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
4357 load_arg_reg(s, reg, ts, *allocated_regs);
4358 tcg_regset_set_reg(*allocated_regs, reg);
4359 } else {
4360 load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
4361 ts, *allocated_regs);
4362 }
4363}
40ae5c62 4364
39004a71
RH
4365static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
4366{
4367 const int nb_oargs = TCGOP_CALLO(op);
4368 const int nb_iargs = TCGOP_CALLI(op);
4369 const TCGLifeData arg_life = op->life;
4370 const TCGHelperInfo *info = tcg_call_info(op);
4371 TCGRegSet allocated_regs = s->reserved_regs;
4372 int i;
40ae5c62 4373
39004a71
RH
4374 /*
4375 * Move inputs into place in reverse order,
4376 * so that we place stacked arguments first.
4377 */
4378 for (i = nb_iargs - 1; i >= 0; --i) {
4379 const TCGCallArgumentLoc *loc = &info->in[i];
4380 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
40ae5c62 4381
39004a71
RH
4382 switch (loc->kind) {
4383 case TCG_CALL_ARG_NORMAL:
4384 case TCG_CALL_ARG_EXTEND_U:
4385 case TCG_CALL_ARG_EXTEND_S:
4386 load_arg_normal(s, loc, ts, &allocated_regs);
4387 break;
4388 default:
4389 g_assert_not_reached();
c896fe29 4390 }
c896fe29 4391 }
a813e36f 4392
39004a71 4393 /* Mark dead temporaries and free the associated registers. */
dd186292 4394 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 4395 if (IS_DEAD_ARG(i)) {
43439139 4396 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
4397 }
4398 }
a813e36f 4399
39004a71 4400 /* Clobber call registers. */
c8074023
RH
4401 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4402 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 4403 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
4404 }
4405 }
78505279 4406
39004a71
RH
4407 /*
4408 * Save globals if they might be written by the helper,
4409 * sync them if they might be read.
4410 */
4411 if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
78505279 4412 /* Nothing to do */
39004a71 4413 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
78505279
AJ
4414 sync_globals(s, allocated_regs);
4415 } else {
b9c18f56
AJ
4416 save_globals(s, allocated_regs);
4417 }
c896fe29 4418
cee44b03 4419 tcg_out_call(s, tcg_call_func(op), info);
c896fe29 4420
39004a71
RH
4421 /* Assign output registers and emit moves if needed. */
4422 switch (info->out_kind) {
4423 case TCG_CALL_RET_NORMAL:
4424 for (i = 0; i < nb_oargs; i++) {
4425 TCGTemp *ts = arg_temp(op->args[i]);
4426 TCGReg reg = tcg_target_call_oarg_regs[i];
d63e3b6e 4427
39004a71
RH
4428 /* ENV should not be modified. */
4429 tcg_debug_assert(!temp_readonly(ts));
d63e3b6e 4430
39004a71
RH
4431 set_temp_val_reg(s, ts, reg);
4432 ts->mem_coherent = 0;
4433 }
4434 break;
4435 default:
4436 g_assert_not_reached();
4437 }
4438
4439 /* Flush or discard output registers as needed. */
4440 for (i = 0; i < nb_oargs; i++) {
4441 TCGTemp *ts = arg_temp(op->args[i]);
d63e3b6e 4442 if (NEED_SYNC_ARG(i)) {
39004a71 4443 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
d63e3b6e
RH
4444 } else if (IS_DEAD_ARG(i)) {
4445 temp_dead(s, ts);
c896fe29
FB
4446 }
4447 }
c896fe29
FB
4448}
4449
4450#ifdef CONFIG_PROFILER
4451
c3fac113
EC
4452/* avoid copy/paste errors */
4453#define PROF_ADD(to, from, field) \
4454 do { \
d73415a3 4455 (to)->field += qatomic_read(&((from)->field)); \
c3fac113
EC
4456 } while (0)
4457
4458#define PROF_MAX(to, from, field) \
4459 do { \
d73415a3 4460 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
c3fac113
EC
4461 if (val__ > (to)->field) { \
4462 (to)->field = val__; \
4463 } \
4464 } while (0)
4465
4466/* Pass in a zero'ed @prof */
4467static inline
4468void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
4469{
0e2d61cf 4470 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
c3fac113
EC
4471 unsigned int i;
4472
3468b59e 4473 for (i = 0; i < n_ctxs; i++) {
d73415a3 4474 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3468b59e 4475 const TCGProfile *orig = &s->prof;
c3fac113
EC
4476
4477 if (counters) {
72fd2efb 4478 PROF_ADD(prof, orig, cpu_exec_time);
c3fac113
EC
4479 PROF_ADD(prof, orig, tb_count1);
4480 PROF_ADD(prof, orig, tb_count);
4481 PROF_ADD(prof, orig, op_count);
4482 PROF_MAX(prof, orig, op_count_max);
4483 PROF_ADD(prof, orig, temp_count);
4484 PROF_MAX(prof, orig, temp_count_max);
4485 PROF_ADD(prof, orig, del_op_count);
4486 PROF_ADD(prof, orig, code_in_len);
4487 PROF_ADD(prof, orig, code_out_len);
4488 PROF_ADD(prof, orig, search_out_len);
4489 PROF_ADD(prof, orig, interm_time);
4490 PROF_ADD(prof, orig, code_time);
4491 PROF_ADD(prof, orig, la_time);
4492 PROF_ADD(prof, orig, opt_time);
4493 PROF_ADD(prof, orig, restore_count);
4494 PROF_ADD(prof, orig, restore_time);
4495 }
4496 if (table) {
4497 int i;
4498
4499 for (i = 0; i < NB_OPS; i++) {
4500 PROF_ADD(prof, orig, table_op_count[i]);
4501 }
4502 }
4503 }
4504}
4505
4506#undef PROF_ADD
4507#undef PROF_MAX
4508
4509static void tcg_profile_snapshot_counters(TCGProfile *prof)
4510{
4511 tcg_profile_snapshot(prof, true, false);
4512}
4513
4514static void tcg_profile_snapshot_table(TCGProfile *prof)
4515{
4516 tcg_profile_snapshot(prof, false, true);
4517}
c896fe29 4518
b6a7f3e0 4519void tcg_dump_op_count(GString *buf)
c896fe29 4520{
c3fac113 4521 TCGProfile prof = {};
c896fe29 4522 int i;
d70724ce 4523
c3fac113 4524 tcg_profile_snapshot_table(&prof);
15fc7daa 4525 for (i = 0; i < NB_OPS; i++) {
b6a7f3e0
DB
4526 g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
4527 prof.table_op_count[i]);
c896fe29 4528 }
c896fe29 4529}
72fd2efb
EC
4530
4531int64_t tcg_cpu_exec_time(void)
4532{
0e2d61cf 4533 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
72fd2efb
EC
4534 unsigned int i;
4535 int64_t ret = 0;
4536
4537 for (i = 0; i < n_ctxs; i++) {
d73415a3 4538 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
72fd2efb
EC
4539 const TCGProfile *prof = &s->prof;
4540
d73415a3 4541 ret += qatomic_read(&prof->cpu_exec_time);
72fd2efb
EC
4542 }
4543 return ret;
4544}
246ae24d 4545#else
b6a7f3e0 4546void tcg_dump_op_count(GString *buf)
246ae24d 4547{
b6a7f3e0 4548 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
246ae24d 4549}
72fd2efb
EC
4550
4551int64_t tcg_cpu_exec_time(void)
4552{
4553 error_report("%s: TCG profiler not compiled", __func__);
4554 exit(EXIT_FAILURE);
4555}
c896fe29
FB
4556#endif
4557
4558
fbf59aad 4559int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
c896fe29 4560{
c3fac113
EC
4561#ifdef CONFIG_PROFILER
4562 TCGProfile *prof = &s->prof;
4563#endif
15fa08f8
RH
4564 int i, num_insns;
4565 TCGOp *op;
c896fe29 4566
04fe6400
RH
4567#ifdef CONFIG_PROFILER
4568 {
c1f543b7 4569 int n = 0;
04fe6400 4570
15fa08f8
RH
4571 QTAILQ_FOREACH(op, &s->ops, link) {
4572 n++;
4573 }
d73415a3 4574 qatomic_set(&prof->op_count, prof->op_count + n);
c3fac113 4575 if (n > prof->op_count_max) {
d73415a3 4576 qatomic_set(&prof->op_count_max, n);
04fe6400
RH
4577 }
4578
4579 n = s->nb_temps;
d73415a3 4580 qatomic_set(&prof->temp_count, prof->temp_count + n);
c3fac113 4581 if (n > prof->temp_count_max) {
d73415a3 4582 qatomic_set(&prof->temp_count_max, n);
04fe6400
RH
4583 }
4584 }
4585#endif
4586
c896fe29 4587#ifdef DEBUG_DISAS
d977e1c2 4588 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
fbf59aad 4589 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4590 FILE *logfile = qemu_log_trylock();
78b54858
RH
4591 if (logfile) {
4592 fprintf(logfile, "OP:\n");
b7a83ff8 4593 tcg_dump_ops(s, logfile, false);
78b54858
RH
4594 fprintf(logfile, "\n");
4595 qemu_log_unlock(logfile);
4596 }
c896fe29
FB
4597 }
4598#endif
4599
bef16ab4
RH
4600#ifdef CONFIG_DEBUG_TCG
4601 /* Ensure all labels referenced have been emitted. */
4602 {
4603 TCGLabel *l;
4604 bool error = false;
4605
4606 QSIMPLEQ_FOREACH(l, &s->labels, next) {
4607 if (unlikely(!l->present) && l->refs) {
4608 qemu_log_mask(CPU_LOG_TB_OP,
4609 "$L%d referenced but not present.\n", l->id);
4610 error = true;
4611 }
4612 }
4613 assert(!error);
4614 }
4615#endif
4616
c5cc28ff 4617#ifdef CONFIG_PROFILER
d73415a3 4618 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
4619#endif
4620
8f2e8c07 4621#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 4622 tcg_optimize(s);
8f2e8c07
KB
4623#endif
4624
a23a9ec6 4625#ifdef CONFIG_PROFILER
d73415a3
SH
4626 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
4627 qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 4628#endif
c5cc28ff 4629
b4fc67c7 4630 reachable_code_pass(s);
b83eabea 4631 liveness_pass_1(s);
5a18407f 4632
b83eabea 4633 if (s->nb_indirects > 0) {
5a18407f 4634#ifdef DEBUG_DISAS
b83eabea 4635 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
fbf59aad 4636 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4637 FILE *logfile = qemu_log_trylock();
78b54858
RH
4638 if (logfile) {
4639 fprintf(logfile, "OP before indirect lowering:\n");
b7a83ff8 4640 tcg_dump_ops(s, logfile, false);
78b54858
RH
4641 fprintf(logfile, "\n");
4642 qemu_log_unlock(logfile);
4643 }
b83eabea 4644 }
5a18407f 4645#endif
b83eabea
RH
4646 /* Replace indirect temps with direct temps. */
4647 if (liveness_pass_2(s)) {
4648 /* If changes were made, re-run liveness. */
4649 liveness_pass_1(s);
5a18407f
RH
4650 }
4651 }
c5cc28ff 4652
a23a9ec6 4653#ifdef CONFIG_PROFILER
d73415a3 4654 qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 4655#endif
c896fe29
FB
4656
4657#ifdef DEBUG_DISAS
d977e1c2 4658 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
fbf59aad 4659 && qemu_log_in_addr_range(pc_start))) {
c60f599b 4660 FILE *logfile = qemu_log_trylock();
78b54858
RH
4661 if (logfile) {
4662 fprintf(logfile, "OP after optimization and liveness analysis:\n");
b7a83ff8 4663 tcg_dump_ops(s, logfile, true);
78b54858
RH
4664 fprintf(logfile, "\n");
4665 qemu_log_unlock(logfile);
4666 }
c896fe29
FB
4667 }
4668#endif
4669
35abb009 4670 /* Initialize goto_tb jump offsets. */
3a50f424
RH
4671 tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
4672 tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
9da6079b
RH
4673 tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
4674 tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
35abb009 4675
c896fe29
FB
4676 tcg_reg_alloc_start(s);
4677
db0c51a3
RH
4678 /*
4679 * Reset the buffer pointers when restarting after overflow.
4680 * TODO: Move this into translate-all.c with the rest of the
4681 * buffer management. Having only this done here is confusing.
4682 */
4683 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
4684 s->code_ptr = s->code_buf;
c896fe29 4685
659ef5cb 4686#ifdef TCG_TARGET_NEED_LDST_LABELS
6001f772 4687 QSIMPLEQ_INIT(&s->ldst_labels);
659ef5cb 4688#endif
57a26946
RH
4689#ifdef TCG_TARGET_NEED_POOL_LABELS
4690 s->pool_labels = NULL;
4691#endif
9ecefc84 4692
fca8a500 4693 num_insns = -1;
15fa08f8 4694 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 4695 TCGOpcode opc = op->opc;
b3db8758 4696
c896fe29 4697#ifdef CONFIG_PROFILER
d73415a3 4698 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 4699#endif
c45cb8bb
RH
4700
4701 switch (opc) {
c896fe29 4702 case INDEX_op_mov_i32:
c896fe29 4703 case INDEX_op_mov_i64:
d2fd745f 4704 case INDEX_op_mov_vec:
dd186292 4705 tcg_reg_alloc_mov(s, op);
c896fe29 4706 break;
bab1671f
RH
4707 case INDEX_op_dup_vec:
4708 tcg_reg_alloc_dup(s, op);
4709 break;
765b842a 4710 case INDEX_op_insn_start:
fca8a500 4711 if (num_insns >= 0) {
9f754620
RH
4712 size_t off = tcg_current_code_size(s);
4713 s->gen_insn_end_off[num_insns] = off;
4714 /* Assert that we do not overflow our stored offset. */
4715 assert(s->gen_insn_end_off[num_insns] == off);
fca8a500
RH
4716 }
4717 num_insns++;
bad729e2
RH
4718 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
4719 target_ulong a;
4720#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 4721 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 4722#else
efee3746 4723 a = op->args[i];
bad729e2 4724#endif
fca8a500 4725 s->gen_insn_data[num_insns][i] = a;
bad729e2 4726 }
c896fe29 4727 break;
5ff9d6a4 4728 case INDEX_op_discard:
43439139 4729 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 4730 break;
c896fe29 4731 case INDEX_op_set_label:
e8996ee0 4732 tcg_reg_alloc_bb_end(s, s->reserved_regs);
92ab8e7d 4733 tcg_out_label(s, arg_label(op->args[0]));
c896fe29
FB
4734 break;
4735 case INDEX_op_call:
dd186292 4736 tcg_reg_alloc_call(s, op);
c45cb8bb 4737 break;
b55a8d9d
RH
4738 case INDEX_op_exit_tb:
4739 tcg_out_exit_tb(s, op->args[0]);
4740 break;
cf7d6b8e
RH
4741 case INDEX_op_goto_tb:
4742 tcg_out_goto_tb(s, op->args[0]);
4743 break;
efe86b21
RH
4744 case INDEX_op_dup2_vec:
4745 if (tcg_reg_alloc_dup2(s, op)) {
4746 break;
4747 }
4748 /* fall through */
c896fe29 4749 default:
25c4d9cc 4750 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 4751 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
4752 /* Note: in order to speed up the code, it would be much
4753 faster to have specialized register allocator functions for
4754 some common argument patterns */
dd186292 4755 tcg_reg_alloc_op(s, op);
c896fe29
FB
4756 break;
4757 }
b125f9dc
RH
4758 /* Test for (pending) buffer overflow. The assumption is that any
4759 one operation beginning below the high water mark cannot overrun
4760 the buffer completely. Thus we can test for overflow after
4761 generating code without having to check during generation. */
644da9b3 4762 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
4763 return -1;
4764 }
6e6c4efe
RH
4765 /* Test for TB overflow, as seen by gen_insn_end_off. */
4766 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4767 return -2;
4768 }
c896fe29 4769 }
fca8a500
RH
4770 tcg_debug_assert(num_insns >= 0);
4771 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 4772
b76f0d8c 4773 /* Generate TB finalization at the end of block */
659ef5cb 4774#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5
RH
4775 i = tcg_out_ldst_finalize(s);
4776 if (i < 0) {
4777 return i;
23dceda6 4778 }
659ef5cb 4779#endif
57a26946 4780#ifdef TCG_TARGET_NEED_POOL_LABELS
1768987b
RH
4781 i = tcg_out_pool_finalize(s);
4782 if (i < 0) {
4783 return i;
57a26946
RH
4784 }
4785#endif
7ecd02a0
RH
4786 if (!tcg_resolve_relocs(s)) {
4787 return -2;
4788 }
c896fe29 4789
df5d2b16 4790#ifndef CONFIG_TCG_INTERPRETER
c896fe29 4791 /* flush instruction cache */
db0c51a3
RH
4792 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
4793 (uintptr_t)s->code_buf,
1da8de39 4794 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
df5d2b16 4795#endif
2aeabc08 4796
1813e175 4797 return tcg_current_code_size(s);
c896fe29
FB
4798}
4799
a23a9ec6 4800#ifdef CONFIG_PROFILER
3a841ab5 4801void tcg_dump_info(GString *buf)
a23a9ec6 4802{
c3fac113
EC
4803 TCGProfile prof = {};
4804 const TCGProfile *s;
4805 int64_t tb_count;
4806 int64_t tb_div_count;
4807 int64_t tot;
4808
4809 tcg_profile_snapshot_counters(&prof);
4810 s = &prof;
4811 tb_count = s->tb_count;
4812 tb_div_count = tb_count ? tb_count : 1;
4813 tot = s->interm_time + s->code_time;
a23a9ec6 4814
3a841ab5
DB
4815 g_string_append_printf(buf, "JIT cycles %" PRId64
4816 " (%0.3f s at 2.4 GHz)\n",
4817 tot, tot / 2.4e9);
4818 g_string_append_printf(buf, "translated TBs %" PRId64
4819 " (aborted=%" PRId64 " %0.1f%%)\n",
4820 tb_count, s->tb_count1 - tb_count,
4821 (double)(s->tb_count1 - s->tb_count)
4822 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4823 g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
4824 (double)s->op_count / tb_div_count, s->op_count_max);
4825 g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
4826 (double)s->del_op_count / tb_div_count);
4827 g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
4828 (double)s->temp_count / tb_div_count,
4829 s->temp_count_max);
4830 g_string_append_printf(buf, "avg host code/TB %0.1f\n",
4831 (double)s->code_out_len / tb_div_count);
4832 g_string_append_printf(buf, "avg search data/TB %0.1f\n",
4833 (double)s->search_out_len / tb_div_count);
a813e36f 4834
3a841ab5
DB
4835 g_string_append_printf(buf, "cycles/op %0.1f\n",
4836 s->op_count ? (double)tot / s->op_count : 0);
4837 g_string_append_printf(buf, "cycles/in byte %0.1f\n",
4838 s->code_in_len ? (double)tot / s->code_in_len : 0);
4839 g_string_append_printf(buf, "cycles/out byte %0.1f\n",
4840 s->code_out_len ? (double)tot / s->code_out_len : 0);
4841 g_string_append_printf(buf, "cycles/search byte %0.1f\n",
4842 s->search_out_len ?
4843 (double)tot / s->search_out_len : 0);
fca8a500 4844 if (tot == 0) {
a23a9ec6 4845 tot = 1;
fca8a500 4846 }
3a841ab5
DB
4847 g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
4848 (double)s->interm_time / tot * 100.0);
4849 g_string_append_printf(buf, " gen_code time %0.1f%%\n",
4850 (double)s->code_time / tot * 100.0);
4851 g_string_append_printf(buf, "optim./code time %0.1f%%\n",
4852 (double)s->opt_time / (s->code_time ?
4853 s->code_time : 1)
4854 * 100.0);
4855 g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
4856 (double)s->la_time / (s->code_time ?
4857 s->code_time : 1) * 100.0);
4858 g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
4859 s->restore_count);
4860 g_string_append_printf(buf, " avg cycles %0.1f\n",
4861 s->restore_count ?
4862 (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
4863}
4864#else
3a841ab5 4865void tcg_dump_info(GString *buf)
a23a9ec6 4866{
3a841ab5 4867 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
a23a9ec6
FB
4868}
4869#endif
813da627
RH
4870
4871#ifdef ELF_HOST_MACHINE
5872bbf2
RH
4872/* In order to use this feature, the backend needs to do three things:
4873
4874 (1) Define ELF_HOST_MACHINE to indicate both what value to
4875 put into the ELF image and to indicate support for the feature.
4876
4877 (2) Define tcg_register_jit. This should create a buffer containing
4878 the contents of a .debug_frame section that describes the post-
4879 prologue unwind info for the tcg machine.
4880
4881 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4882*/
813da627
RH
4883
4884/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4885typedef enum {
4886 JIT_NOACTION = 0,
4887 JIT_REGISTER_FN,
4888 JIT_UNREGISTER_FN
4889} jit_actions_t;
4890
4891struct jit_code_entry {
4892 struct jit_code_entry *next_entry;
4893 struct jit_code_entry *prev_entry;
4894 const void *symfile_addr;
4895 uint64_t symfile_size;
4896};
4897
4898struct jit_descriptor {
4899 uint32_t version;
4900 uint32_t action_flag;
4901 struct jit_code_entry *relevant_entry;
4902 struct jit_code_entry *first_entry;
4903};
4904
4905void __jit_debug_register_code(void) __attribute__((noinline));
4906void __jit_debug_register_code(void)
4907{
4908 asm("");
4909}
4910
4911/* Must statically initialize the version, because GDB may check
4912 the version before we can set it. */
4913struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4914
4915/* End GDB interface. */
4916
4917static int find_string(const char *strtab, const char *str)
4918{
4919 const char *p = strtab + 1;
4920
4921 while (1) {
4922 if (strcmp(p, str) == 0) {
4923 return p - strtab;
4924 }
4925 p += strlen(p) + 1;
4926 }
4927}
4928
755bf9e5 4929static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
2c90784a
RH
4930 const void *debug_frame,
4931 size_t debug_frame_size)
813da627 4932{
5872bbf2
RH
4933 struct __attribute__((packed)) DebugInfo {
4934 uint32_t len;
4935 uint16_t version;
4936 uint32_t abbrev;
4937 uint8_t ptr_size;
4938 uint8_t cu_die;
4939 uint16_t cu_lang;
4940 uintptr_t cu_low_pc;
4941 uintptr_t cu_high_pc;
4942 uint8_t fn_die;
4943 char fn_name[16];
4944 uintptr_t fn_low_pc;
4945 uintptr_t fn_high_pc;
4946 uint8_t cu_eoc;
4947 };
813da627
RH
4948
4949 struct ElfImage {
4950 ElfW(Ehdr) ehdr;
4951 ElfW(Phdr) phdr;
5872bbf2
RH
4952 ElfW(Shdr) shdr[7];
4953 ElfW(Sym) sym[2];
4954 struct DebugInfo di;
4955 uint8_t da[24];
4956 char str[80];
4957 };
4958
4959 struct ElfImage *img;
4960
4961 static const struct ElfImage img_template = {
4962 .ehdr = {
4963 .e_ident[EI_MAG0] = ELFMAG0,
4964 .e_ident[EI_MAG1] = ELFMAG1,
4965 .e_ident[EI_MAG2] = ELFMAG2,
4966 .e_ident[EI_MAG3] = ELFMAG3,
4967 .e_ident[EI_CLASS] = ELF_CLASS,
4968 .e_ident[EI_DATA] = ELF_DATA,
4969 .e_ident[EI_VERSION] = EV_CURRENT,
4970 .e_type = ET_EXEC,
4971 .e_machine = ELF_HOST_MACHINE,
4972 .e_version = EV_CURRENT,
4973 .e_phoff = offsetof(struct ElfImage, phdr),
4974 .e_shoff = offsetof(struct ElfImage, shdr),
4975 .e_ehsize = sizeof(ElfW(Shdr)),
4976 .e_phentsize = sizeof(ElfW(Phdr)),
4977 .e_phnum = 1,
4978 .e_shentsize = sizeof(ElfW(Shdr)),
4979 .e_shnum = ARRAY_SIZE(img->shdr),
4980 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
4981#ifdef ELF_HOST_FLAGS
4982 .e_flags = ELF_HOST_FLAGS,
4983#endif
4984#ifdef ELF_OSABI
4985 .e_ident[EI_OSABI] = ELF_OSABI,
4986#endif
5872bbf2
RH
4987 },
4988 .phdr = {
4989 .p_type = PT_LOAD,
4990 .p_flags = PF_X,
4991 },
4992 .shdr = {
4993 [0] = { .sh_type = SHT_NULL },
4994 /* Trick: The contents of code_gen_buffer are not present in
4995 this fake ELF file; that got allocated elsewhere. Therefore
4996 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4997 will not look for contents. We can record any address. */
4998 [1] = { /* .text */
4999 .sh_type = SHT_NOBITS,
5000 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
5001 },
5002 [2] = { /* .debug_info */
5003 .sh_type = SHT_PROGBITS,
5004 .sh_offset = offsetof(struct ElfImage, di),
5005 .sh_size = sizeof(struct DebugInfo),
5006 },
5007 [3] = { /* .debug_abbrev */
5008 .sh_type = SHT_PROGBITS,
5009 .sh_offset = offsetof(struct ElfImage, da),
5010 .sh_size = sizeof(img->da),
5011 },
5012 [4] = { /* .debug_frame */
5013 .sh_type = SHT_PROGBITS,
5014 .sh_offset = sizeof(struct ElfImage),
5015 },
5016 [5] = { /* .symtab */
5017 .sh_type = SHT_SYMTAB,
5018 .sh_offset = offsetof(struct ElfImage, sym),
5019 .sh_size = sizeof(img->sym),
5020 .sh_info = 1,
5021 .sh_link = ARRAY_SIZE(img->shdr) - 1,
5022 .sh_entsize = sizeof(ElfW(Sym)),
5023 },
5024 [6] = { /* .strtab */
5025 .sh_type = SHT_STRTAB,
5026 .sh_offset = offsetof(struct ElfImage, str),
5027 .sh_size = sizeof(img->str),
5028 }
5029 },
5030 .sym = {
5031 [1] = { /* code_gen_buffer */
5032 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
5033 .st_shndx = 1,
5034 }
5035 },
5036 .di = {
5037 .len = sizeof(struct DebugInfo) - 4,
5038 .version = 2,
5039 .ptr_size = sizeof(void *),
5040 .cu_die = 1,
5041 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
5042 .fn_die = 2,
5043 .fn_name = "code_gen_buffer"
5044 },
5045 .da = {
5046 1, /* abbrev number (the cu) */
5047 0x11, 1, /* DW_TAG_compile_unit, has children */
5048 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5049 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5050 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5051 0, 0, /* end of abbrev */
5052 2, /* abbrev number (the fn) */
5053 0x2e, 0, /* DW_TAG_subprogram, no children */
5054 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5055 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5056 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5057 0, 0, /* end of abbrev */
5058 0 /* no more abbrev */
5059 },
5060 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5061 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
5062 };
5063
5064 /* We only need a single jit entry; statically allocate it. */
5065 static struct jit_code_entry one_entry;
5066
5872bbf2 5067 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 5068 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 5069 DebugFrameHeader *dfh;
813da627 5070
5872bbf2
RH
5071 img = g_malloc(img_size);
5072 *img = img_template;
813da627 5073
5872bbf2
RH
5074 img->phdr.p_vaddr = buf;
5075 img->phdr.p_paddr = buf;
5076 img->phdr.p_memsz = buf_size;
813da627 5077
813da627 5078 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 5079 img->shdr[1].sh_addr = buf;
813da627
RH
5080 img->shdr[1].sh_size = buf_size;
5081
5872bbf2
RH
5082 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
5083 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
5084
5085 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
5086 img->shdr[4].sh_size = debug_frame_size;
5087
5088 img->shdr[5].sh_name = find_string(img->str, ".symtab");
5089 img->shdr[6].sh_name = find_string(img->str, ".strtab");
5090
5091 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
5092 img->sym[1].st_value = buf;
5093 img->sym[1].st_size = buf_size;
813da627 5094
5872bbf2 5095 img->di.cu_low_pc = buf;
45aba097 5096 img->di.cu_high_pc = buf + buf_size;
5872bbf2 5097 img->di.fn_low_pc = buf;
45aba097 5098 img->di.fn_high_pc = buf + buf_size;
813da627 5099
2c90784a
RH
5100 dfh = (DebugFrameHeader *)(img + 1);
5101 memcpy(dfh, debug_frame, debug_frame_size);
5102 dfh->fde.func_start = buf;
5103 dfh->fde.func_len = buf_size;
5104
813da627
RH
5105#ifdef DEBUG_JIT
5106 /* Enable this block to be able to debug the ELF image file creation.
5107 One can use readelf, objdump, or other inspection utilities. */
5108 {
eb6b2edf
BM
5109 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
5110 FILE *f = fopen(jit, "w+b");
813da627 5111 if (f) {
5872bbf2 5112 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
5113 /* Avoid stupid unused return value warning for fwrite. */
5114 }
5115 fclose(f);
5116 }
5117 }
5118#endif
5119
5120 one_entry.symfile_addr = img;
5121 one_entry.symfile_size = img_size;
5122
5123 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
5124 __jit_debug_descriptor.relevant_entry = &one_entry;
5125 __jit_debug_descriptor.first_entry = &one_entry;
5126 __jit_debug_register_code();
5127}
5128#else
5872bbf2
RH
5129/* No support for the feature. Provide the entry point expected by exec.c,
5130 and implement the internal function we declared earlier. */
813da627 5131
755bf9e5 5132static void tcg_register_jit_int(const void *buf, size_t size,
2c90784a
RH
5133 const void *debug_frame,
5134 size_t debug_frame_size)
813da627
RH
5135{
5136}
5137
755bf9e5 5138void tcg_register_jit(const void *buf, size_t buf_size)
813da627
RH
5139{
5140}
5141#endif /* ELF_HOST_MACHINE */
db432672
RH
5142
5143#if !TCG_TARGET_MAYBE_vec
5144void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
5145{
5146 g_assert_not_reached();
5147}
5148#endif