]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg.c
tcg: Split out tcg_out_extu_i32_i64
[mirror_qemu.git] / tcg / tcg.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27
28 #include "qemu/osdep.h"
29
30 /* Define to jump the ELF file used to communicate with GDB. */
31 #undef DEBUG_JIT
32
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/cacheflush.h"
38 #include "qemu/cacheinfo.h"
39 #include "qemu/timer.h"
40
41 /* Note: the long term plan is to reduce the dependencies on the QEMU
42 CPU definitions. Currently they are used for qemu_ld/st
43 instructions */
44 #define NO_CPU_IO_DEFS
45
46 #include "exec/exec-all.h"
47 #include "tcg/tcg-op.h"
48
49 #if UINTPTR_MAX == UINT32_MAX
50 # define ELF_CLASS ELFCLASS32
51 #else
52 # define ELF_CLASS ELFCLASS64
53 #endif
54 #if HOST_BIG_ENDIAN
55 # define ELF_DATA ELFDATA2MSB
56 #else
57 # define ELF_DATA ELFDATA2LSB
58 #endif
59
60 #include "elf.h"
61 #include "exec/log.h"
62 #include "tcg/tcg-ldst.h"
63 #include "tcg/tcg-temp-internal.h"
64 #include "tcg-internal.h"
65 #include "accel/tcg/perf.h"
66
67 /* Forward declarations for functions declared in tcg-target.c.inc and
68 used here. */
69 static void tcg_target_init(TCGContext *s);
70 static void tcg_target_qemu_prologue(TCGContext *s);
71 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
72 intptr_t value, intptr_t addend);
73
74 /* The CIE and FDE header definitions will be common to all hosts. */
75 typedef struct {
76 uint32_t len __attribute__((aligned((sizeof(void *)))));
77 uint32_t id;
78 uint8_t version;
79 char augmentation[1];
80 uint8_t code_align;
81 uint8_t data_align;
82 uint8_t return_column;
83 } DebugFrameCIE;
84
85 typedef struct QEMU_PACKED {
86 uint32_t len __attribute__((aligned((sizeof(void *)))));
87 uint32_t cie_offset;
88 uintptr_t func_start;
89 uintptr_t func_len;
90 } DebugFrameFDEHeader;
91
92 typedef struct QEMU_PACKED {
93 DebugFrameCIE cie;
94 DebugFrameFDEHeader fde;
95 } DebugFrameHeader;
96
97 static void tcg_register_jit_int(const void *buf, size_t size,
98 const void *debug_frame,
99 size_t debug_frame_size)
100 __attribute__((unused));
101
102 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
103 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
104 intptr_t arg2);
105 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
106 static void tcg_out_movi(TCGContext *s, TCGType type,
107 TCGReg ret, tcg_target_long arg);
108 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
109 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
110 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
111 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
112 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
113 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
114 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
115 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
116 static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
117 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
118 static void tcg_out_goto_tb(TCGContext *s, int which);
119 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
120 const TCGArg args[TCG_MAX_OP_ARGS],
121 const int const_args[TCG_MAX_OP_ARGS]);
122 #if TCG_TARGET_MAYBE_vec
123 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
124 TCGReg dst, TCGReg src);
125 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
126 TCGReg dst, TCGReg base, intptr_t offset);
127 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
128 TCGReg dst, int64_t arg);
129 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
130 unsigned vecl, unsigned vece,
131 const TCGArg args[TCG_MAX_OP_ARGS],
132 const int const_args[TCG_MAX_OP_ARGS]);
133 #else
134 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
135 TCGReg dst, TCGReg src)
136 {
137 g_assert_not_reached();
138 }
139 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
140 TCGReg dst, TCGReg base, intptr_t offset)
141 {
142 g_assert_not_reached();
143 }
144 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
145 TCGReg dst, int64_t arg)
146 {
147 g_assert_not_reached();
148 }
149 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
150 unsigned vecl, unsigned vece,
151 const TCGArg args[TCG_MAX_OP_ARGS],
152 const int const_args[TCG_MAX_OP_ARGS])
153 {
154 g_assert_not_reached();
155 }
156 #endif
157 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
158 intptr_t arg2);
159 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
160 TCGReg base, intptr_t ofs);
161 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
162 const TCGHelperInfo *info);
163 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
164 static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
165 #ifdef TCG_TARGET_NEED_LDST_LABELS
166 static int tcg_out_ldst_finalize(TCGContext *s);
167 #endif
168
169 TCGContext tcg_init_ctx;
170 __thread TCGContext *tcg_ctx;
171
172 TCGContext **tcg_ctxs;
173 unsigned int tcg_cur_ctxs;
174 unsigned int tcg_max_ctxs;
175 TCGv_env cpu_env = 0;
176 const void *tcg_code_gen_epilogue;
177 uintptr_t tcg_splitwx_diff;
178
179 #ifndef CONFIG_TCG_INTERPRETER
180 tcg_prologue_fn *tcg_qemu_tb_exec;
181 #endif
182
183 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
184 static TCGRegSet tcg_target_call_clobber_regs;
185
186 #if TCG_TARGET_INSN_UNIT_SIZE == 1
187 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
188 {
189 *s->code_ptr++ = v;
190 }
191
192 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
193 uint8_t v)
194 {
195 *p = v;
196 }
197 #endif
198
199 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
200 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
201 {
202 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
203 *s->code_ptr++ = v;
204 } else {
205 tcg_insn_unit *p = s->code_ptr;
206 memcpy(p, &v, sizeof(v));
207 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
208 }
209 }
210
211 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
212 uint16_t v)
213 {
214 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
215 *p = v;
216 } else {
217 memcpy(p, &v, sizeof(v));
218 }
219 }
220 #endif
221
222 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
223 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
224 {
225 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
226 *s->code_ptr++ = v;
227 } else {
228 tcg_insn_unit *p = s->code_ptr;
229 memcpy(p, &v, sizeof(v));
230 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
231 }
232 }
233
234 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
235 uint32_t v)
236 {
237 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
238 *p = v;
239 } else {
240 memcpy(p, &v, sizeof(v));
241 }
242 }
243 #endif
244
245 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
246 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
247 {
248 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
249 *s->code_ptr++ = v;
250 } else {
251 tcg_insn_unit *p = s->code_ptr;
252 memcpy(p, &v, sizeof(v));
253 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
254 }
255 }
256
257 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
258 uint64_t v)
259 {
260 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
261 *p = v;
262 } else {
263 memcpy(p, &v, sizeof(v));
264 }
265 }
266 #endif
267
268 /* label relocation processing */
269
270 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
271 TCGLabel *l, intptr_t addend)
272 {
273 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
274
275 r->type = type;
276 r->ptr = code_ptr;
277 r->addend = addend;
278 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
279 }
280
281 static void tcg_out_label(TCGContext *s, TCGLabel *l)
282 {
283 tcg_debug_assert(!l->has_value);
284 l->has_value = 1;
285 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
286 }
287
288 TCGLabel *gen_new_label(void)
289 {
290 TCGContext *s = tcg_ctx;
291 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
292
293 memset(l, 0, sizeof(TCGLabel));
294 l->id = s->nb_labels++;
295 QSIMPLEQ_INIT(&l->branches);
296 QSIMPLEQ_INIT(&l->relocs);
297
298 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
299
300 return l;
301 }
302
303 static bool tcg_resolve_relocs(TCGContext *s)
304 {
305 TCGLabel *l;
306
307 QSIMPLEQ_FOREACH(l, &s->labels, next) {
308 TCGRelocation *r;
309 uintptr_t value = l->u.value;
310
311 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
312 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
313 return false;
314 }
315 }
316 }
317 return true;
318 }
319
320 static void set_jmp_reset_offset(TCGContext *s, int which)
321 {
322 /*
323 * We will check for overflow at the end of the opcode loop in
324 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
325 */
326 s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
327 }
328
329 static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
330 {
331 /*
332 * We will check for overflow at the end of the opcode loop in
333 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
334 */
335 s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
336 }
337
338 static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
339 {
340 /*
341 * Return the read-execute version of the pointer, for the benefit
342 * of any pc-relative addressing mode.
343 */
344 return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
345 }
346
347 /* Signal overflow, starting over with fewer guest insns. */
348 static G_NORETURN
349 void tcg_raise_tb_overflow(TCGContext *s)
350 {
351 siglongjmp(s->jmp_trans, -2);
352 }
353
354 #define C_PFX1(P, A) P##A
355 #define C_PFX2(P, A, B) P##A##_##B
356 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
357 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
358 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
359 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
360
361 /* Define an enumeration for the various combinations. */
362
363 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
364 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
365 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
366 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
367
368 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
369 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
370 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
371 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
372
373 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
374
375 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
376 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
377 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
378 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
379
380 typedef enum {
381 #include "tcg-target-con-set.h"
382 } TCGConstraintSetIndex;
383
384 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
385
386 #undef C_O0_I1
387 #undef C_O0_I2
388 #undef C_O0_I3
389 #undef C_O0_I4
390 #undef C_O1_I1
391 #undef C_O1_I2
392 #undef C_O1_I3
393 #undef C_O1_I4
394 #undef C_N1_I2
395 #undef C_O2_I1
396 #undef C_O2_I2
397 #undef C_O2_I3
398 #undef C_O2_I4
399
400 /* Put all of the constraint sets into an array, indexed by the enum. */
401
402 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
403 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
404 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
405 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
406
407 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
408 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
409 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
410 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
411
412 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
413
414 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
415 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
416 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
417 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
418
419 static const TCGTargetOpDef constraint_sets[] = {
420 #include "tcg-target-con-set.h"
421 };
422
423
424 #undef C_O0_I1
425 #undef C_O0_I2
426 #undef C_O0_I3
427 #undef C_O0_I4
428 #undef C_O1_I1
429 #undef C_O1_I2
430 #undef C_O1_I3
431 #undef C_O1_I4
432 #undef C_N1_I2
433 #undef C_O2_I1
434 #undef C_O2_I2
435 #undef C_O2_I3
436 #undef C_O2_I4
437
438 /* Expand the enumerator to be returned from tcg_target_op_def(). */
439
440 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
441 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
442 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
443 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
444
445 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
446 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
447 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
448 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
449
450 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
451
452 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
453 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
454 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
455 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
456
457 #include "tcg-target.c.inc"
458
459 static void alloc_tcg_plugin_context(TCGContext *s)
460 {
461 #ifdef CONFIG_PLUGIN
462 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
463 s->plugin_tb->insns =
464 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
465 #endif
466 }
467
468 /*
469 * All TCG threads except the parent (i.e. the one that called tcg_context_init
470 * and registered the target's TCG globals) must register with this function
471 * before initiating translation.
472 *
473 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
474 * of tcg_region_init() for the reasoning behind this.
475 *
476 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
477 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
478 * is not used anymore for translation once this function is called.
479 *
480 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
481 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
482 */
483 #ifdef CONFIG_USER_ONLY
484 void tcg_register_thread(void)
485 {
486 tcg_ctx = &tcg_init_ctx;
487 }
488 #else
489 void tcg_register_thread(void)
490 {
491 TCGContext *s = g_malloc(sizeof(*s));
492 unsigned int i, n;
493
494 *s = tcg_init_ctx;
495
496 /* Relink mem_base. */
497 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
498 if (tcg_init_ctx.temps[i].mem_base) {
499 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
500 tcg_debug_assert(b >= 0 && b < n);
501 s->temps[i].mem_base = &s->temps[b];
502 }
503 }
504
505 /* Claim an entry in tcg_ctxs */
506 n = qatomic_fetch_inc(&tcg_cur_ctxs);
507 g_assert(n < tcg_max_ctxs);
508 qatomic_set(&tcg_ctxs[n], s);
509
510 if (n > 0) {
511 alloc_tcg_plugin_context(s);
512 tcg_region_initial_alloc(s);
513 }
514
515 tcg_ctx = s;
516 }
517 #endif /* !CONFIG_USER_ONLY */
518
519 /* pool based memory allocation */
520 void *tcg_malloc_internal(TCGContext *s, int size)
521 {
522 TCGPool *p;
523 int pool_size;
524
525 if (size > TCG_POOL_CHUNK_SIZE) {
526 /* big malloc: insert a new pool (XXX: could optimize) */
527 p = g_malloc(sizeof(TCGPool) + size);
528 p->size = size;
529 p->next = s->pool_first_large;
530 s->pool_first_large = p;
531 return p->data;
532 } else {
533 p = s->pool_current;
534 if (!p) {
535 p = s->pool_first;
536 if (!p)
537 goto new_pool;
538 } else {
539 if (!p->next) {
540 new_pool:
541 pool_size = TCG_POOL_CHUNK_SIZE;
542 p = g_malloc(sizeof(TCGPool) + pool_size);
543 p->size = pool_size;
544 p->next = NULL;
545 if (s->pool_current) {
546 s->pool_current->next = p;
547 } else {
548 s->pool_first = p;
549 }
550 } else {
551 p = p->next;
552 }
553 }
554 }
555 s->pool_current = p;
556 s->pool_cur = p->data + size;
557 s->pool_end = p->data + p->size;
558 return p->data;
559 }
560
561 void tcg_pool_reset(TCGContext *s)
562 {
563 TCGPool *p, *t;
564 for (p = s->pool_first_large; p; p = t) {
565 t = p->next;
566 g_free(p);
567 }
568 s->pool_first_large = NULL;
569 s->pool_cur = s->pool_end = NULL;
570 s->pool_current = NULL;
571 }
572
573 #include "exec/helper-proto.h"
574
575 static TCGHelperInfo all_helpers[] = {
576 #include "exec/helper-tcg.h"
577 };
578 static GHashTable *helper_table;
579
580 #ifdef CONFIG_TCG_INTERPRETER
581 static ffi_type *typecode_to_ffi(int argmask)
582 {
583 /*
584 * libffi does not support __int128_t, so we have forced Int128
585 * to use the structure definition instead of the builtin type.
586 */
587 static ffi_type *ffi_type_i128_elements[3] = {
588 &ffi_type_uint64,
589 &ffi_type_uint64,
590 NULL
591 };
592 static ffi_type ffi_type_i128 = {
593 .size = 16,
594 .alignment = __alignof__(Int128),
595 .type = FFI_TYPE_STRUCT,
596 .elements = ffi_type_i128_elements,
597 };
598
599 switch (argmask) {
600 case dh_typecode_void:
601 return &ffi_type_void;
602 case dh_typecode_i32:
603 return &ffi_type_uint32;
604 case dh_typecode_s32:
605 return &ffi_type_sint32;
606 case dh_typecode_i64:
607 return &ffi_type_uint64;
608 case dh_typecode_s64:
609 return &ffi_type_sint64;
610 case dh_typecode_ptr:
611 return &ffi_type_pointer;
612 case dh_typecode_i128:
613 return &ffi_type_i128;
614 }
615 g_assert_not_reached();
616 }
617
618 static void init_ffi_layouts(void)
619 {
620 /* g_direct_hash/equal for direct comparisons on uint32_t. */
621 GHashTable *ffi_table = g_hash_table_new(NULL, NULL);
622
623 for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
624 TCGHelperInfo *info = &all_helpers[i];
625 unsigned typemask = info->typemask;
626 gpointer hash = (gpointer)(uintptr_t)typemask;
627 struct {
628 ffi_cif cif;
629 ffi_type *args[];
630 } *ca;
631 ffi_status status;
632 int nargs;
633 ffi_cif *cif;
634
635 cif = g_hash_table_lookup(ffi_table, hash);
636 if (cif) {
637 info->cif = cif;
638 continue;
639 }
640
641 /* Ignoring the return type, find the last non-zero field. */
642 nargs = 32 - clz32(typemask >> 3);
643 nargs = DIV_ROUND_UP(nargs, 3);
644 assert(nargs <= MAX_CALL_IARGS);
645
646 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
647 ca->cif.rtype = typecode_to_ffi(typemask & 7);
648 ca->cif.nargs = nargs;
649
650 if (nargs != 0) {
651 ca->cif.arg_types = ca->args;
652 for (int j = 0; j < nargs; ++j) {
653 int typecode = extract32(typemask, (j + 1) * 3, 3);
654 ca->args[j] = typecode_to_ffi(typecode);
655 }
656 }
657
658 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
659 ca->cif.rtype, ca->cif.arg_types);
660 assert(status == FFI_OK);
661
662 cif = &ca->cif;
663 info->cif = cif;
664 g_hash_table_insert(ffi_table, hash, (gpointer)cif);
665 }
666
667 g_hash_table_destroy(ffi_table);
668 }
669 #endif /* CONFIG_TCG_INTERPRETER */
670
671 typedef struct TCGCumulativeArgs {
672 int arg_idx; /* tcg_gen_callN args[] */
673 int info_in_idx; /* TCGHelperInfo in[] */
674 int arg_slot; /* regs+stack slot */
675 int ref_slot; /* stack slots for references */
676 } TCGCumulativeArgs;
677
678 static void layout_arg_even(TCGCumulativeArgs *cum)
679 {
680 cum->arg_slot += cum->arg_slot & 1;
681 }
682
683 static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
684 TCGCallArgumentKind kind)
685 {
686 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
687
688 *loc = (TCGCallArgumentLoc){
689 .kind = kind,
690 .arg_idx = cum->arg_idx,
691 .arg_slot = cum->arg_slot,
692 };
693 cum->info_in_idx++;
694 cum->arg_slot++;
695 }
696
697 static void layout_arg_normal_n(TCGCumulativeArgs *cum,
698 TCGHelperInfo *info, int n)
699 {
700 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
701
702 for (int i = 0; i < n; ++i) {
703 /* Layout all using the same arg_idx, adjusting the subindex. */
704 loc[i] = (TCGCallArgumentLoc){
705 .kind = TCG_CALL_ARG_NORMAL,
706 .arg_idx = cum->arg_idx,
707 .tmp_subindex = i,
708 .arg_slot = cum->arg_slot + i,
709 };
710 }
711 cum->info_in_idx += n;
712 cum->arg_slot += n;
713 }
714
715 static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
716 {
717 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
718 int n = 128 / TCG_TARGET_REG_BITS;
719
720 /* The first subindex carries the pointer. */
721 layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
722
723 /*
724 * The callee is allowed to clobber memory associated with
725 * structure pass by-reference. Therefore we must make copies.
726 * Allocate space from "ref_slot", which will be adjusted to
727 * follow the parameters on the stack.
728 */
729 loc[0].ref_slot = cum->ref_slot;
730
731 /*
732 * Subsequent words also go into the reference slot, but
733 * do not accumulate into the regular arguments.
734 */
735 for (int i = 1; i < n; ++i) {
736 loc[i] = (TCGCallArgumentLoc){
737 .kind = TCG_CALL_ARG_BY_REF_N,
738 .arg_idx = cum->arg_idx,
739 .tmp_subindex = i,
740 .ref_slot = cum->ref_slot + i,
741 };
742 }
743 cum->info_in_idx += n;
744 cum->ref_slot += n;
745 }
746
747 static void init_call_layout(TCGHelperInfo *info)
748 {
749 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
750 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
751 unsigned typemask = info->typemask;
752 unsigned typecode;
753 TCGCumulativeArgs cum = { };
754
755 /*
756 * Parse and place any function return value.
757 */
758 typecode = typemask & 7;
759 switch (typecode) {
760 case dh_typecode_void:
761 info->nr_out = 0;
762 break;
763 case dh_typecode_i32:
764 case dh_typecode_s32:
765 case dh_typecode_ptr:
766 info->nr_out = 1;
767 info->out_kind = TCG_CALL_RET_NORMAL;
768 break;
769 case dh_typecode_i64:
770 case dh_typecode_s64:
771 info->nr_out = 64 / TCG_TARGET_REG_BITS;
772 info->out_kind = TCG_CALL_RET_NORMAL;
773 /* Query the last register now to trigger any assert early. */
774 tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
775 break;
776 case dh_typecode_i128:
777 info->nr_out = 128 / TCG_TARGET_REG_BITS;
778 info->out_kind = TCG_TARGET_CALL_RET_I128;
779 switch (TCG_TARGET_CALL_RET_I128) {
780 case TCG_CALL_RET_NORMAL:
781 /* Query the last register now to trigger any assert early. */
782 tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
783 break;
784 case TCG_CALL_RET_BY_VEC:
785 /* Query the single register now to trigger any assert early. */
786 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0);
787 break;
788 case TCG_CALL_RET_BY_REF:
789 /*
790 * Allocate the first argument to the output.
791 * We don't need to store this anywhere, just make it
792 * unavailable for use in the input loop below.
793 */
794 cum.arg_slot = 1;
795 break;
796 default:
797 qemu_build_not_reached();
798 }
799 break;
800 default:
801 g_assert_not_reached();
802 }
803
804 /*
805 * Parse and place function arguments.
806 */
807 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
808 TCGCallArgumentKind kind;
809 TCGType type;
810
811 typecode = typemask & 7;
812 switch (typecode) {
813 case dh_typecode_i32:
814 case dh_typecode_s32:
815 type = TCG_TYPE_I32;
816 break;
817 case dh_typecode_i64:
818 case dh_typecode_s64:
819 type = TCG_TYPE_I64;
820 break;
821 case dh_typecode_ptr:
822 type = TCG_TYPE_PTR;
823 break;
824 case dh_typecode_i128:
825 type = TCG_TYPE_I128;
826 break;
827 default:
828 g_assert_not_reached();
829 }
830
831 switch (type) {
832 case TCG_TYPE_I32:
833 switch (TCG_TARGET_CALL_ARG_I32) {
834 case TCG_CALL_ARG_EVEN:
835 layout_arg_even(&cum);
836 /* fall through */
837 case TCG_CALL_ARG_NORMAL:
838 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
839 break;
840 case TCG_CALL_ARG_EXTEND:
841 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
842 layout_arg_1(&cum, info, kind);
843 break;
844 default:
845 qemu_build_not_reached();
846 }
847 break;
848
849 case TCG_TYPE_I64:
850 switch (TCG_TARGET_CALL_ARG_I64) {
851 case TCG_CALL_ARG_EVEN:
852 layout_arg_even(&cum);
853 /* fall through */
854 case TCG_CALL_ARG_NORMAL:
855 if (TCG_TARGET_REG_BITS == 32) {
856 layout_arg_normal_n(&cum, info, 2);
857 } else {
858 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
859 }
860 break;
861 default:
862 qemu_build_not_reached();
863 }
864 break;
865
866 case TCG_TYPE_I128:
867 switch (TCG_TARGET_CALL_ARG_I128) {
868 case TCG_CALL_ARG_EVEN:
869 layout_arg_even(&cum);
870 /* fall through */
871 case TCG_CALL_ARG_NORMAL:
872 layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
873 break;
874 case TCG_CALL_ARG_BY_REF:
875 layout_arg_by_ref(&cum, info);
876 break;
877 default:
878 qemu_build_not_reached();
879 }
880 break;
881
882 default:
883 g_assert_not_reached();
884 }
885 }
886 info->nr_in = cum.info_in_idx;
887
888 /* Validate that we didn't overrun the input array. */
889 assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
890 /* Validate the backend has enough argument space. */
891 assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
892
893 /*
894 * Relocate the "ref_slot" area to the end of the parameters.
895 * Minimizing this stack offset helps code size for x86,
896 * which has a signed 8-bit offset encoding.
897 */
898 if (cum.ref_slot != 0) {
899 int ref_base = 0;
900
901 if (cum.arg_slot > max_reg_slots) {
902 int align = __alignof(Int128) / sizeof(tcg_target_long);
903
904 ref_base = cum.arg_slot - max_reg_slots;
905 if (align > 1) {
906 ref_base = ROUND_UP(ref_base, align);
907 }
908 }
909 assert(ref_base + cum.ref_slot <= max_stk_slots);
910
911 if (ref_base != 0) {
912 for (int i = cum.info_in_idx - 1; i >= 0; --i) {
913 TCGCallArgumentLoc *loc = &info->in[i];
914 switch (loc->kind) {
915 case TCG_CALL_ARG_BY_REF:
916 case TCG_CALL_ARG_BY_REF_N:
917 loc->ref_slot += ref_base;
918 break;
919 default:
920 break;
921 }
922 }
923 }
924 }
925 }
926
927 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
928 static void process_op_defs(TCGContext *s);
929 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
930 TCGReg reg, const char *name);
931
932 static void tcg_context_init(unsigned max_cpus)
933 {
934 TCGContext *s = &tcg_init_ctx;
935 int op, total_args, n, i;
936 TCGOpDef *def;
937 TCGArgConstraint *args_ct;
938 TCGTemp *ts;
939
940 memset(s, 0, sizeof(*s));
941 s->nb_globals = 0;
942
943 /* Count total number of arguments and allocate the corresponding
944 space */
945 total_args = 0;
946 for(op = 0; op < NB_OPS; op++) {
947 def = &tcg_op_defs[op];
948 n = def->nb_iargs + def->nb_oargs;
949 total_args += n;
950 }
951
952 args_ct = g_new0(TCGArgConstraint, total_args);
953
954 for(op = 0; op < NB_OPS; op++) {
955 def = &tcg_op_defs[op];
956 def->args_ct = args_ct;
957 n = def->nb_iargs + def->nb_oargs;
958 args_ct += n;
959 }
960
961 /* Register helpers. */
962 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
963 helper_table = g_hash_table_new(NULL, NULL);
964
965 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
966 init_call_layout(&all_helpers[i]);
967 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
968 (gpointer)&all_helpers[i]);
969 }
970
971 #ifdef CONFIG_TCG_INTERPRETER
972 init_ffi_layouts();
973 #endif
974
975 tcg_target_init(s);
976 process_op_defs(s);
977
978 /* Reverse the order of the saved registers, assuming they're all at
979 the start of tcg_target_reg_alloc_order. */
980 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
981 int r = tcg_target_reg_alloc_order[n];
982 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
983 break;
984 }
985 }
986 for (i = 0; i < n; ++i) {
987 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
988 }
989 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
990 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
991 }
992
993 alloc_tcg_plugin_context(s);
994
995 tcg_ctx = s;
996 /*
997 * In user-mode we simply share the init context among threads, since we
998 * use a single region. See the documentation tcg_region_init() for the
999 * reasoning behind this.
1000 * In softmmu we will have at most max_cpus TCG threads.
1001 */
1002 #ifdef CONFIG_USER_ONLY
1003 tcg_ctxs = &tcg_ctx;
1004 tcg_cur_ctxs = 1;
1005 tcg_max_ctxs = 1;
1006 #else
1007 tcg_max_ctxs = max_cpus;
1008 tcg_ctxs = g_new0(TCGContext *, max_cpus);
1009 #endif
1010
1011 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
1012 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
1013 cpu_env = temp_tcgv_ptr(ts);
1014 }
1015
1016 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
1017 {
1018 tcg_context_init(max_cpus);
1019 tcg_region_init(tb_size, splitwx, max_cpus);
1020 }
1021
1022 /*
1023 * Allocate TBs right before their corresponding translated code, making
1024 * sure that TBs and code are on different cache lines.
1025 */
1026 TranslationBlock *tcg_tb_alloc(TCGContext *s)
1027 {
1028 uintptr_t align = qemu_icache_linesize;
1029 TranslationBlock *tb;
1030 void *next;
1031
1032 retry:
1033 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
1034 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
1035
1036 if (unlikely(next > s->code_gen_highwater)) {
1037 if (tcg_region_alloc(s)) {
1038 return NULL;
1039 }
1040 goto retry;
1041 }
1042 qatomic_set(&s->code_gen_ptr, next);
1043 s->data_gen_ptr = NULL;
1044 return tb;
1045 }
1046
1047 void tcg_prologue_init(TCGContext *s)
1048 {
1049 size_t prologue_size;
1050
1051 s->code_ptr = s->code_gen_ptr;
1052 s->code_buf = s->code_gen_ptr;
1053 s->data_gen_ptr = NULL;
1054
1055 #ifndef CONFIG_TCG_INTERPRETER
1056 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
1057 #endif
1058
1059 #ifdef TCG_TARGET_NEED_POOL_LABELS
1060 s->pool_labels = NULL;
1061 #endif
1062
1063 qemu_thread_jit_write();
1064 /* Generate the prologue. */
1065 tcg_target_qemu_prologue(s);
1066
1067 #ifdef TCG_TARGET_NEED_POOL_LABELS
1068 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1069 {
1070 int result = tcg_out_pool_finalize(s);
1071 tcg_debug_assert(result == 0);
1072 }
1073 #endif
1074
1075 prologue_size = tcg_current_code_size(s);
1076 perf_report_prologue(s->code_gen_ptr, prologue_size);
1077
1078 #ifndef CONFIG_TCG_INTERPRETER
1079 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
1080 (uintptr_t)s->code_buf, prologue_size);
1081 #endif
1082
1083 #ifdef DEBUG_DISAS
1084 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1085 FILE *logfile = qemu_log_trylock();
1086 if (logfile) {
1087 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
1088 if (s->data_gen_ptr) {
1089 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
1090 size_t data_size = prologue_size - code_size;
1091 size_t i;
1092
1093 disas(logfile, s->code_gen_ptr, code_size);
1094
1095 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1096 if (sizeof(tcg_target_ulong) == 8) {
1097 fprintf(logfile,
1098 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1099 (uintptr_t)s->data_gen_ptr + i,
1100 *(uint64_t *)(s->data_gen_ptr + i));
1101 } else {
1102 fprintf(logfile,
1103 "0x%08" PRIxPTR ": .long 0x%08x\n",
1104 (uintptr_t)s->data_gen_ptr + i,
1105 *(uint32_t *)(s->data_gen_ptr + i));
1106 }
1107 }
1108 } else {
1109 disas(logfile, s->code_gen_ptr, prologue_size);
1110 }
1111 fprintf(logfile, "\n");
1112 qemu_log_unlock(logfile);
1113 }
1114 }
1115 #endif
1116
1117 #ifndef CONFIG_TCG_INTERPRETER
1118 /*
1119 * Assert that goto_ptr is implemented completely, setting an epilogue.
1120 * For tci, we use NULL as the signal to return from the interpreter,
1121 * so skip this check.
1122 */
1123 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
1124 #endif
1125
1126 tcg_region_prologue_set(s);
1127 }
1128
1129 void tcg_func_start(TCGContext *s)
1130 {
1131 tcg_pool_reset(s);
1132 s->nb_temps = s->nb_globals;
1133
1134 /* No temps have been previously allocated for size or locality. */
1135 memset(s->free_temps, 0, sizeof(s->free_temps));
1136
1137 /* No constant temps have been previously allocated. */
1138 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
1139 if (s->const_table[i]) {
1140 g_hash_table_remove_all(s->const_table[i]);
1141 }
1142 }
1143
1144 s->nb_ops = 0;
1145 s->nb_labels = 0;
1146 s->current_frame_offset = s->frame_start;
1147
1148 #ifdef CONFIG_DEBUG_TCG
1149 s->goto_tb_issue_mask = 0;
1150 #endif
1151
1152 QTAILQ_INIT(&s->ops);
1153 QTAILQ_INIT(&s->free_ops);
1154 QSIMPLEQ_INIT(&s->labels);
1155 }
1156
1157 static TCGTemp *tcg_temp_alloc(TCGContext *s)
1158 {
1159 int n = s->nb_temps++;
1160
1161 if (n >= TCG_MAX_TEMPS) {
1162 tcg_raise_tb_overflow(s);
1163 }
1164 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1165 }
1166
1167 static TCGTemp *tcg_global_alloc(TCGContext *s)
1168 {
1169 TCGTemp *ts;
1170
1171 tcg_debug_assert(s->nb_globals == s->nb_temps);
1172 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
1173 s->nb_globals++;
1174 ts = tcg_temp_alloc(s);
1175 ts->kind = TEMP_GLOBAL;
1176
1177 return ts;
1178 }
1179
1180 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1181 TCGReg reg, const char *name)
1182 {
1183 TCGTemp *ts;
1184
1185 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1186
1187 ts = tcg_global_alloc(s);
1188 ts->base_type = type;
1189 ts->type = type;
1190 ts->kind = TEMP_FIXED;
1191 ts->reg = reg;
1192 ts->name = name;
1193 tcg_regset_set_reg(s->reserved_regs, reg);
1194
1195 return ts;
1196 }
1197
1198 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
1199 {
1200 s->frame_start = start;
1201 s->frame_end = start + size;
1202 s->frame_temp
1203 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
1204 }
1205
1206 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1207 intptr_t offset, const char *name)
1208 {
1209 TCGContext *s = tcg_ctx;
1210 TCGTemp *base_ts = tcgv_ptr_temp(base);
1211 TCGTemp *ts = tcg_global_alloc(s);
1212 int indirect_reg = 0;
1213
1214 switch (base_ts->kind) {
1215 case TEMP_FIXED:
1216 break;
1217 case TEMP_GLOBAL:
1218 /* We do not support double-indirect registers. */
1219 tcg_debug_assert(!base_ts->indirect_reg);
1220 base_ts->indirect_base = 1;
1221 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1222 ? 2 : 1);
1223 indirect_reg = 1;
1224 break;
1225 default:
1226 g_assert_not_reached();
1227 }
1228
1229 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1230 TCGTemp *ts2 = tcg_global_alloc(s);
1231 char buf[64];
1232
1233 ts->base_type = TCG_TYPE_I64;
1234 ts->type = TCG_TYPE_I32;
1235 ts->indirect_reg = indirect_reg;
1236 ts->mem_allocated = 1;
1237 ts->mem_base = base_ts;
1238 ts->mem_offset = offset;
1239 pstrcpy(buf, sizeof(buf), name);
1240 pstrcat(buf, sizeof(buf), "_0");
1241 ts->name = strdup(buf);
1242
1243 tcg_debug_assert(ts2 == ts + 1);
1244 ts2->base_type = TCG_TYPE_I64;
1245 ts2->type = TCG_TYPE_I32;
1246 ts2->indirect_reg = indirect_reg;
1247 ts2->mem_allocated = 1;
1248 ts2->mem_base = base_ts;
1249 ts2->mem_offset = offset + 4;
1250 ts2->temp_subindex = 1;
1251 pstrcpy(buf, sizeof(buf), name);
1252 pstrcat(buf, sizeof(buf), "_1");
1253 ts2->name = strdup(buf);
1254 } else {
1255 ts->base_type = type;
1256 ts->type = type;
1257 ts->indirect_reg = indirect_reg;
1258 ts->mem_allocated = 1;
1259 ts->mem_base = base_ts;
1260 ts->mem_offset = offset;
1261 ts->name = name;
1262 }
1263 return ts;
1264 }
1265
1266 TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
1267 {
1268 TCGContext *s = tcg_ctx;
1269 TCGTemp *ts;
1270 int n;
1271
1272 if (kind == TEMP_EBB) {
1273 int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
1274
1275 if (idx < TCG_MAX_TEMPS) {
1276 /* There is already an available temp with the right type. */
1277 clear_bit(idx, s->free_temps[type].l);
1278
1279 ts = &s->temps[idx];
1280 ts->temp_allocated = 1;
1281 tcg_debug_assert(ts->base_type == type);
1282 tcg_debug_assert(ts->kind == kind);
1283 return ts;
1284 }
1285 } else {
1286 tcg_debug_assert(kind == TEMP_TB);
1287 }
1288
1289 switch (type) {
1290 case TCG_TYPE_I32:
1291 case TCG_TYPE_V64:
1292 case TCG_TYPE_V128:
1293 case TCG_TYPE_V256:
1294 n = 1;
1295 break;
1296 case TCG_TYPE_I64:
1297 n = 64 / TCG_TARGET_REG_BITS;
1298 break;
1299 case TCG_TYPE_I128:
1300 n = 128 / TCG_TARGET_REG_BITS;
1301 break;
1302 default:
1303 g_assert_not_reached();
1304 }
1305
1306 ts = tcg_temp_alloc(s);
1307 ts->base_type = type;
1308 ts->temp_allocated = 1;
1309 ts->kind = kind;
1310
1311 if (n == 1) {
1312 ts->type = type;
1313 } else {
1314 ts->type = TCG_TYPE_REG;
1315
1316 for (int i = 1; i < n; ++i) {
1317 TCGTemp *ts2 = tcg_temp_alloc(s);
1318
1319 tcg_debug_assert(ts2 == ts + i);
1320 ts2->base_type = type;
1321 ts2->type = TCG_TYPE_REG;
1322 ts2->temp_allocated = 1;
1323 ts2->temp_subindex = i;
1324 ts2->kind = kind;
1325 }
1326 }
1327 return ts;
1328 }
1329
1330 TCGv_vec tcg_temp_new_vec(TCGType type)
1331 {
1332 TCGTemp *t;
1333
1334 #ifdef CONFIG_DEBUG_TCG
1335 switch (type) {
1336 case TCG_TYPE_V64:
1337 assert(TCG_TARGET_HAS_v64);
1338 break;
1339 case TCG_TYPE_V128:
1340 assert(TCG_TARGET_HAS_v128);
1341 break;
1342 case TCG_TYPE_V256:
1343 assert(TCG_TARGET_HAS_v256);
1344 break;
1345 default:
1346 g_assert_not_reached();
1347 }
1348 #endif
1349
1350 t = tcg_temp_new_internal(type, TEMP_EBB);
1351 return temp_tcgv_vec(t);
1352 }
1353
1354 /* Create a new temp of the same type as an existing temp. */
1355 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1356 {
1357 TCGTemp *t = tcgv_vec_temp(match);
1358
1359 tcg_debug_assert(t->temp_allocated != 0);
1360
1361 t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
1362 return temp_tcgv_vec(t);
1363 }
1364
1365 void tcg_temp_free_internal(TCGTemp *ts)
1366 {
1367 TCGContext *s = tcg_ctx;
1368
1369 switch (ts->kind) {
1370 case TEMP_CONST:
1371 case TEMP_TB:
1372 /* Silently ignore free. */
1373 break;
1374 case TEMP_EBB:
1375 tcg_debug_assert(ts->temp_allocated != 0);
1376 ts->temp_allocated = 0;
1377 set_bit(temp_idx(ts), s->free_temps[ts->base_type].l);
1378 break;
1379 default:
1380 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1381 g_assert_not_reached();
1382 }
1383 }
1384
1385 TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1386 {
1387 TCGContext *s = tcg_ctx;
1388 GHashTable *h = s->const_table[type];
1389 TCGTemp *ts;
1390
1391 if (h == NULL) {
1392 h = g_hash_table_new(g_int64_hash, g_int64_equal);
1393 s->const_table[type] = h;
1394 }
1395
1396 ts = g_hash_table_lookup(h, &val);
1397 if (ts == NULL) {
1398 int64_t *val_ptr;
1399
1400 ts = tcg_temp_alloc(s);
1401
1402 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1403 TCGTemp *ts2 = tcg_temp_alloc(s);
1404
1405 tcg_debug_assert(ts2 == ts + 1);
1406
1407 ts->base_type = TCG_TYPE_I64;
1408 ts->type = TCG_TYPE_I32;
1409 ts->kind = TEMP_CONST;
1410 ts->temp_allocated = 1;
1411
1412 ts2->base_type = TCG_TYPE_I64;
1413 ts2->type = TCG_TYPE_I32;
1414 ts2->kind = TEMP_CONST;
1415 ts2->temp_allocated = 1;
1416 ts2->temp_subindex = 1;
1417
1418 /*
1419 * Retain the full value of the 64-bit constant in the low
1420 * part, so that the hash table works. Actual uses will
1421 * truncate the value to the low part.
1422 */
1423 ts[HOST_BIG_ENDIAN].val = val;
1424 ts[!HOST_BIG_ENDIAN].val = val >> 32;
1425 val_ptr = &ts[HOST_BIG_ENDIAN].val;
1426 } else {
1427 ts->base_type = type;
1428 ts->type = type;
1429 ts->kind = TEMP_CONST;
1430 ts->temp_allocated = 1;
1431 ts->val = val;
1432 val_ptr = &ts->val;
1433 }
1434 g_hash_table_insert(h, val_ptr, ts);
1435 }
1436
1437 return ts;
1438 }
1439
1440 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1441 {
1442 val = dup_const(vece, val);
1443 return temp_tcgv_vec(tcg_constant_internal(type, val));
1444 }
1445
1446 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1447 {
1448 TCGTemp *t = tcgv_vec_temp(match);
1449
1450 tcg_debug_assert(t->temp_allocated != 0);
1451 return tcg_constant_vec(t->base_type, vece, val);
1452 }
1453
1454 /* Return true if OP may appear in the opcode stream.
1455 Test the runtime variable that controls each opcode. */
1456 bool tcg_op_supported(TCGOpcode op)
1457 {
1458 const bool have_vec
1459 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1460
1461 switch (op) {
1462 case INDEX_op_discard:
1463 case INDEX_op_set_label:
1464 case INDEX_op_call:
1465 case INDEX_op_br:
1466 case INDEX_op_mb:
1467 case INDEX_op_insn_start:
1468 case INDEX_op_exit_tb:
1469 case INDEX_op_goto_tb:
1470 case INDEX_op_goto_ptr:
1471 case INDEX_op_qemu_ld_i32:
1472 case INDEX_op_qemu_st_i32:
1473 case INDEX_op_qemu_ld_i64:
1474 case INDEX_op_qemu_st_i64:
1475 return true;
1476
1477 case INDEX_op_qemu_st8_i32:
1478 return TCG_TARGET_HAS_qemu_st8_i32;
1479
1480 case INDEX_op_mov_i32:
1481 case INDEX_op_setcond_i32:
1482 case INDEX_op_brcond_i32:
1483 case INDEX_op_ld8u_i32:
1484 case INDEX_op_ld8s_i32:
1485 case INDEX_op_ld16u_i32:
1486 case INDEX_op_ld16s_i32:
1487 case INDEX_op_ld_i32:
1488 case INDEX_op_st8_i32:
1489 case INDEX_op_st16_i32:
1490 case INDEX_op_st_i32:
1491 case INDEX_op_add_i32:
1492 case INDEX_op_sub_i32:
1493 case INDEX_op_mul_i32:
1494 case INDEX_op_and_i32:
1495 case INDEX_op_or_i32:
1496 case INDEX_op_xor_i32:
1497 case INDEX_op_shl_i32:
1498 case INDEX_op_shr_i32:
1499 case INDEX_op_sar_i32:
1500 return true;
1501
1502 case INDEX_op_movcond_i32:
1503 return TCG_TARGET_HAS_movcond_i32;
1504 case INDEX_op_div_i32:
1505 case INDEX_op_divu_i32:
1506 return TCG_TARGET_HAS_div_i32;
1507 case INDEX_op_rem_i32:
1508 case INDEX_op_remu_i32:
1509 return TCG_TARGET_HAS_rem_i32;
1510 case INDEX_op_div2_i32:
1511 case INDEX_op_divu2_i32:
1512 return TCG_TARGET_HAS_div2_i32;
1513 case INDEX_op_rotl_i32:
1514 case INDEX_op_rotr_i32:
1515 return TCG_TARGET_HAS_rot_i32;
1516 case INDEX_op_deposit_i32:
1517 return TCG_TARGET_HAS_deposit_i32;
1518 case INDEX_op_extract_i32:
1519 return TCG_TARGET_HAS_extract_i32;
1520 case INDEX_op_sextract_i32:
1521 return TCG_TARGET_HAS_sextract_i32;
1522 case INDEX_op_extract2_i32:
1523 return TCG_TARGET_HAS_extract2_i32;
1524 case INDEX_op_add2_i32:
1525 return TCG_TARGET_HAS_add2_i32;
1526 case INDEX_op_sub2_i32:
1527 return TCG_TARGET_HAS_sub2_i32;
1528 case INDEX_op_mulu2_i32:
1529 return TCG_TARGET_HAS_mulu2_i32;
1530 case INDEX_op_muls2_i32:
1531 return TCG_TARGET_HAS_muls2_i32;
1532 case INDEX_op_muluh_i32:
1533 return TCG_TARGET_HAS_muluh_i32;
1534 case INDEX_op_mulsh_i32:
1535 return TCG_TARGET_HAS_mulsh_i32;
1536 case INDEX_op_ext8s_i32:
1537 return TCG_TARGET_HAS_ext8s_i32;
1538 case INDEX_op_ext16s_i32:
1539 return TCG_TARGET_HAS_ext16s_i32;
1540 case INDEX_op_ext8u_i32:
1541 return TCG_TARGET_HAS_ext8u_i32;
1542 case INDEX_op_ext16u_i32:
1543 return TCG_TARGET_HAS_ext16u_i32;
1544 case INDEX_op_bswap16_i32:
1545 return TCG_TARGET_HAS_bswap16_i32;
1546 case INDEX_op_bswap32_i32:
1547 return TCG_TARGET_HAS_bswap32_i32;
1548 case INDEX_op_not_i32:
1549 return TCG_TARGET_HAS_not_i32;
1550 case INDEX_op_neg_i32:
1551 return TCG_TARGET_HAS_neg_i32;
1552 case INDEX_op_andc_i32:
1553 return TCG_TARGET_HAS_andc_i32;
1554 case INDEX_op_orc_i32:
1555 return TCG_TARGET_HAS_orc_i32;
1556 case INDEX_op_eqv_i32:
1557 return TCG_TARGET_HAS_eqv_i32;
1558 case INDEX_op_nand_i32:
1559 return TCG_TARGET_HAS_nand_i32;
1560 case INDEX_op_nor_i32:
1561 return TCG_TARGET_HAS_nor_i32;
1562 case INDEX_op_clz_i32:
1563 return TCG_TARGET_HAS_clz_i32;
1564 case INDEX_op_ctz_i32:
1565 return TCG_TARGET_HAS_ctz_i32;
1566 case INDEX_op_ctpop_i32:
1567 return TCG_TARGET_HAS_ctpop_i32;
1568
1569 case INDEX_op_brcond2_i32:
1570 case INDEX_op_setcond2_i32:
1571 return TCG_TARGET_REG_BITS == 32;
1572
1573 case INDEX_op_mov_i64:
1574 case INDEX_op_setcond_i64:
1575 case INDEX_op_brcond_i64:
1576 case INDEX_op_ld8u_i64:
1577 case INDEX_op_ld8s_i64:
1578 case INDEX_op_ld16u_i64:
1579 case INDEX_op_ld16s_i64:
1580 case INDEX_op_ld32u_i64:
1581 case INDEX_op_ld32s_i64:
1582 case INDEX_op_ld_i64:
1583 case INDEX_op_st8_i64:
1584 case INDEX_op_st16_i64:
1585 case INDEX_op_st32_i64:
1586 case INDEX_op_st_i64:
1587 case INDEX_op_add_i64:
1588 case INDEX_op_sub_i64:
1589 case INDEX_op_mul_i64:
1590 case INDEX_op_and_i64:
1591 case INDEX_op_or_i64:
1592 case INDEX_op_xor_i64:
1593 case INDEX_op_shl_i64:
1594 case INDEX_op_shr_i64:
1595 case INDEX_op_sar_i64:
1596 case INDEX_op_ext_i32_i64:
1597 case INDEX_op_extu_i32_i64:
1598 return TCG_TARGET_REG_BITS == 64;
1599
1600 case INDEX_op_movcond_i64:
1601 return TCG_TARGET_HAS_movcond_i64;
1602 case INDEX_op_div_i64:
1603 case INDEX_op_divu_i64:
1604 return TCG_TARGET_HAS_div_i64;
1605 case INDEX_op_rem_i64:
1606 case INDEX_op_remu_i64:
1607 return TCG_TARGET_HAS_rem_i64;
1608 case INDEX_op_div2_i64:
1609 case INDEX_op_divu2_i64:
1610 return TCG_TARGET_HAS_div2_i64;
1611 case INDEX_op_rotl_i64:
1612 case INDEX_op_rotr_i64:
1613 return TCG_TARGET_HAS_rot_i64;
1614 case INDEX_op_deposit_i64:
1615 return TCG_TARGET_HAS_deposit_i64;
1616 case INDEX_op_extract_i64:
1617 return TCG_TARGET_HAS_extract_i64;
1618 case INDEX_op_sextract_i64:
1619 return TCG_TARGET_HAS_sextract_i64;
1620 case INDEX_op_extract2_i64:
1621 return TCG_TARGET_HAS_extract2_i64;
1622 case INDEX_op_extrl_i64_i32:
1623 return TCG_TARGET_HAS_extrl_i64_i32;
1624 case INDEX_op_extrh_i64_i32:
1625 return TCG_TARGET_HAS_extrh_i64_i32;
1626 case INDEX_op_ext8s_i64:
1627 return TCG_TARGET_HAS_ext8s_i64;
1628 case INDEX_op_ext16s_i64:
1629 return TCG_TARGET_HAS_ext16s_i64;
1630 case INDEX_op_ext32s_i64:
1631 return TCG_TARGET_HAS_ext32s_i64;
1632 case INDEX_op_ext8u_i64:
1633 return TCG_TARGET_HAS_ext8u_i64;
1634 case INDEX_op_ext16u_i64:
1635 return TCG_TARGET_HAS_ext16u_i64;
1636 case INDEX_op_ext32u_i64:
1637 return TCG_TARGET_HAS_ext32u_i64;
1638 case INDEX_op_bswap16_i64:
1639 return TCG_TARGET_HAS_bswap16_i64;
1640 case INDEX_op_bswap32_i64:
1641 return TCG_TARGET_HAS_bswap32_i64;
1642 case INDEX_op_bswap64_i64:
1643 return TCG_TARGET_HAS_bswap64_i64;
1644 case INDEX_op_not_i64:
1645 return TCG_TARGET_HAS_not_i64;
1646 case INDEX_op_neg_i64:
1647 return TCG_TARGET_HAS_neg_i64;
1648 case INDEX_op_andc_i64:
1649 return TCG_TARGET_HAS_andc_i64;
1650 case INDEX_op_orc_i64:
1651 return TCG_TARGET_HAS_orc_i64;
1652 case INDEX_op_eqv_i64:
1653 return TCG_TARGET_HAS_eqv_i64;
1654 case INDEX_op_nand_i64:
1655 return TCG_TARGET_HAS_nand_i64;
1656 case INDEX_op_nor_i64:
1657 return TCG_TARGET_HAS_nor_i64;
1658 case INDEX_op_clz_i64:
1659 return TCG_TARGET_HAS_clz_i64;
1660 case INDEX_op_ctz_i64:
1661 return TCG_TARGET_HAS_ctz_i64;
1662 case INDEX_op_ctpop_i64:
1663 return TCG_TARGET_HAS_ctpop_i64;
1664 case INDEX_op_add2_i64:
1665 return TCG_TARGET_HAS_add2_i64;
1666 case INDEX_op_sub2_i64:
1667 return TCG_TARGET_HAS_sub2_i64;
1668 case INDEX_op_mulu2_i64:
1669 return TCG_TARGET_HAS_mulu2_i64;
1670 case INDEX_op_muls2_i64:
1671 return TCG_TARGET_HAS_muls2_i64;
1672 case INDEX_op_muluh_i64:
1673 return TCG_TARGET_HAS_muluh_i64;
1674 case INDEX_op_mulsh_i64:
1675 return TCG_TARGET_HAS_mulsh_i64;
1676
1677 case INDEX_op_mov_vec:
1678 case INDEX_op_dup_vec:
1679 case INDEX_op_dupm_vec:
1680 case INDEX_op_ld_vec:
1681 case INDEX_op_st_vec:
1682 case INDEX_op_add_vec:
1683 case INDEX_op_sub_vec:
1684 case INDEX_op_and_vec:
1685 case INDEX_op_or_vec:
1686 case INDEX_op_xor_vec:
1687 case INDEX_op_cmp_vec:
1688 return have_vec;
1689 case INDEX_op_dup2_vec:
1690 return have_vec && TCG_TARGET_REG_BITS == 32;
1691 case INDEX_op_not_vec:
1692 return have_vec && TCG_TARGET_HAS_not_vec;
1693 case INDEX_op_neg_vec:
1694 return have_vec && TCG_TARGET_HAS_neg_vec;
1695 case INDEX_op_abs_vec:
1696 return have_vec && TCG_TARGET_HAS_abs_vec;
1697 case INDEX_op_andc_vec:
1698 return have_vec && TCG_TARGET_HAS_andc_vec;
1699 case INDEX_op_orc_vec:
1700 return have_vec && TCG_TARGET_HAS_orc_vec;
1701 case INDEX_op_nand_vec:
1702 return have_vec && TCG_TARGET_HAS_nand_vec;
1703 case INDEX_op_nor_vec:
1704 return have_vec && TCG_TARGET_HAS_nor_vec;
1705 case INDEX_op_eqv_vec:
1706 return have_vec && TCG_TARGET_HAS_eqv_vec;
1707 case INDEX_op_mul_vec:
1708 return have_vec && TCG_TARGET_HAS_mul_vec;
1709 case INDEX_op_shli_vec:
1710 case INDEX_op_shri_vec:
1711 case INDEX_op_sari_vec:
1712 return have_vec && TCG_TARGET_HAS_shi_vec;
1713 case INDEX_op_shls_vec:
1714 case INDEX_op_shrs_vec:
1715 case INDEX_op_sars_vec:
1716 return have_vec && TCG_TARGET_HAS_shs_vec;
1717 case INDEX_op_shlv_vec:
1718 case INDEX_op_shrv_vec:
1719 case INDEX_op_sarv_vec:
1720 return have_vec && TCG_TARGET_HAS_shv_vec;
1721 case INDEX_op_rotli_vec:
1722 return have_vec && TCG_TARGET_HAS_roti_vec;
1723 case INDEX_op_rotls_vec:
1724 return have_vec && TCG_TARGET_HAS_rots_vec;
1725 case INDEX_op_rotlv_vec:
1726 case INDEX_op_rotrv_vec:
1727 return have_vec && TCG_TARGET_HAS_rotv_vec;
1728 case INDEX_op_ssadd_vec:
1729 case INDEX_op_usadd_vec:
1730 case INDEX_op_sssub_vec:
1731 case INDEX_op_ussub_vec:
1732 return have_vec && TCG_TARGET_HAS_sat_vec;
1733 case INDEX_op_smin_vec:
1734 case INDEX_op_umin_vec:
1735 case INDEX_op_smax_vec:
1736 case INDEX_op_umax_vec:
1737 return have_vec && TCG_TARGET_HAS_minmax_vec;
1738 case INDEX_op_bitsel_vec:
1739 return have_vec && TCG_TARGET_HAS_bitsel_vec;
1740 case INDEX_op_cmpsel_vec:
1741 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
1742
1743 default:
1744 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1745 return true;
1746 }
1747 }
1748
1749 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
1750
1751 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1752 {
1753 const TCGHelperInfo *info;
1754 TCGv_i64 extend_free[MAX_CALL_IARGS];
1755 int n_extend = 0;
1756 TCGOp *op;
1757 int i, n, pi = 0, total_args;
1758
1759 info = g_hash_table_lookup(helper_table, (gpointer)func);
1760 total_args = info->nr_out + info->nr_in + 2;
1761 op = tcg_op_alloc(INDEX_op_call, total_args);
1762
1763 #ifdef CONFIG_PLUGIN
1764 /* Flag helpers that may affect guest state */
1765 if (tcg_ctx->plugin_insn &&
1766 !(info->flags & TCG_CALL_PLUGIN) &&
1767 !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1768 tcg_ctx->plugin_insn->calls_helpers = true;
1769 }
1770 #endif
1771
1772 TCGOP_CALLO(op) = n = info->nr_out;
1773 switch (n) {
1774 case 0:
1775 tcg_debug_assert(ret == NULL);
1776 break;
1777 case 1:
1778 tcg_debug_assert(ret != NULL);
1779 op->args[pi++] = temp_arg(ret);
1780 break;
1781 case 2:
1782 case 4:
1783 tcg_debug_assert(ret != NULL);
1784 tcg_debug_assert(ret->base_type == ret->type + ctz32(n));
1785 tcg_debug_assert(ret->temp_subindex == 0);
1786 for (i = 0; i < n; ++i) {
1787 op->args[pi++] = temp_arg(ret + i);
1788 }
1789 break;
1790 default:
1791 g_assert_not_reached();
1792 }
1793
1794 TCGOP_CALLI(op) = n = info->nr_in;
1795 for (i = 0; i < n; i++) {
1796 const TCGCallArgumentLoc *loc = &info->in[i];
1797 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
1798
1799 switch (loc->kind) {
1800 case TCG_CALL_ARG_NORMAL:
1801 case TCG_CALL_ARG_BY_REF:
1802 case TCG_CALL_ARG_BY_REF_N:
1803 op->args[pi++] = temp_arg(ts);
1804 break;
1805
1806 case TCG_CALL_ARG_EXTEND_U:
1807 case TCG_CALL_ARG_EXTEND_S:
1808 {
1809 TCGv_i64 temp = tcg_temp_ebb_new_i64();
1810 TCGv_i32 orig = temp_tcgv_i32(ts);
1811
1812 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
1813 tcg_gen_ext_i32_i64(temp, orig);
1814 } else {
1815 tcg_gen_extu_i32_i64(temp, orig);
1816 }
1817 op->args[pi++] = tcgv_i64_arg(temp);
1818 extend_free[n_extend++] = temp;
1819 }
1820 break;
1821
1822 default:
1823 g_assert_not_reached();
1824 }
1825 }
1826 op->args[pi++] = (uintptr_t)func;
1827 op->args[pi++] = (uintptr_t)info;
1828 tcg_debug_assert(pi == total_args);
1829
1830 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
1831
1832 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
1833 for (i = 0; i < n_extend; ++i) {
1834 tcg_temp_free_i64(extend_free[i]);
1835 }
1836 }
1837
1838 static void tcg_reg_alloc_start(TCGContext *s)
1839 {
1840 int i, n;
1841
1842 for (i = 0, n = s->nb_temps; i < n; i++) {
1843 TCGTemp *ts = &s->temps[i];
1844 TCGTempVal val = TEMP_VAL_MEM;
1845
1846 switch (ts->kind) {
1847 case TEMP_CONST:
1848 val = TEMP_VAL_CONST;
1849 break;
1850 case TEMP_FIXED:
1851 val = TEMP_VAL_REG;
1852 break;
1853 case TEMP_GLOBAL:
1854 break;
1855 case TEMP_EBB:
1856 val = TEMP_VAL_DEAD;
1857 /* fall through */
1858 case TEMP_TB:
1859 ts->mem_allocated = 0;
1860 break;
1861 default:
1862 g_assert_not_reached();
1863 }
1864 ts->val_type = val;
1865 }
1866
1867 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1868 }
1869
1870 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1871 TCGTemp *ts)
1872 {
1873 int idx = temp_idx(ts);
1874
1875 switch (ts->kind) {
1876 case TEMP_FIXED:
1877 case TEMP_GLOBAL:
1878 pstrcpy(buf, buf_size, ts->name);
1879 break;
1880 case TEMP_TB:
1881 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1882 break;
1883 case TEMP_EBB:
1884 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1885 break;
1886 case TEMP_CONST:
1887 switch (ts->type) {
1888 case TCG_TYPE_I32:
1889 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1890 break;
1891 #if TCG_TARGET_REG_BITS > 32
1892 case TCG_TYPE_I64:
1893 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1894 break;
1895 #endif
1896 case TCG_TYPE_V64:
1897 case TCG_TYPE_V128:
1898 case TCG_TYPE_V256:
1899 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1900 64 << (ts->type - TCG_TYPE_V64), ts->val);
1901 break;
1902 default:
1903 g_assert_not_reached();
1904 }
1905 break;
1906 }
1907 return buf;
1908 }
1909
1910 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1911 int buf_size, TCGArg arg)
1912 {
1913 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1914 }
1915
1916 static const char * const cond_name[] =
1917 {
1918 [TCG_COND_NEVER] = "never",
1919 [TCG_COND_ALWAYS] = "always",
1920 [TCG_COND_EQ] = "eq",
1921 [TCG_COND_NE] = "ne",
1922 [TCG_COND_LT] = "lt",
1923 [TCG_COND_GE] = "ge",
1924 [TCG_COND_LE] = "le",
1925 [TCG_COND_GT] = "gt",
1926 [TCG_COND_LTU] = "ltu",
1927 [TCG_COND_GEU] = "geu",
1928 [TCG_COND_LEU] = "leu",
1929 [TCG_COND_GTU] = "gtu"
1930 };
1931
1932 static const char * const ldst_name[] =
1933 {
1934 [MO_UB] = "ub",
1935 [MO_SB] = "sb",
1936 [MO_LEUW] = "leuw",
1937 [MO_LESW] = "lesw",
1938 [MO_LEUL] = "leul",
1939 [MO_LESL] = "lesl",
1940 [MO_LEUQ] = "leq",
1941 [MO_BEUW] = "beuw",
1942 [MO_BESW] = "besw",
1943 [MO_BEUL] = "beul",
1944 [MO_BESL] = "besl",
1945 [MO_BEUQ] = "beq",
1946 };
1947
1948 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1949 #ifdef TARGET_ALIGNED_ONLY
1950 [MO_UNALN >> MO_ASHIFT] = "un+",
1951 [MO_ALIGN >> MO_ASHIFT] = "",
1952 #else
1953 [MO_UNALN >> MO_ASHIFT] = "",
1954 [MO_ALIGN >> MO_ASHIFT] = "al+",
1955 #endif
1956 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1957 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1958 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1959 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1960 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1961 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1962 };
1963
1964 static const char bswap_flag_name[][6] = {
1965 [TCG_BSWAP_IZ] = "iz",
1966 [TCG_BSWAP_OZ] = "oz",
1967 [TCG_BSWAP_OS] = "os",
1968 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
1969 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
1970 };
1971
1972 static inline bool tcg_regset_single(TCGRegSet d)
1973 {
1974 return (d & (d - 1)) == 0;
1975 }
1976
1977 static inline TCGReg tcg_regset_first(TCGRegSet d)
1978 {
1979 if (TCG_TARGET_NB_REGS <= 32) {
1980 return ctz32(d);
1981 } else {
1982 return ctz64(d);
1983 }
1984 }
1985
1986 /* Return only the number of characters output -- no error return. */
1987 #define ne_fprintf(...) \
1988 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
1989
1990 static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
1991 {
1992 char buf[128];
1993 TCGOp *op;
1994
1995 QTAILQ_FOREACH(op, &s->ops, link) {
1996 int i, k, nb_oargs, nb_iargs, nb_cargs;
1997 const TCGOpDef *def;
1998 TCGOpcode c;
1999 int col = 0;
2000
2001 c = op->opc;
2002 def = &tcg_op_defs[c];
2003
2004 if (c == INDEX_op_insn_start) {
2005 nb_oargs = 0;
2006 col += ne_fprintf(f, "\n ----");
2007
2008 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2009 target_ulong a;
2010 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2011 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
2012 #else
2013 a = op->args[i];
2014 #endif
2015 col += ne_fprintf(f, " " TARGET_FMT_lx, a);
2016 }
2017 } else if (c == INDEX_op_call) {
2018 const TCGHelperInfo *info = tcg_call_info(op);
2019 void *func = tcg_call_func(op);
2020
2021 /* variable number of arguments */
2022 nb_oargs = TCGOP_CALLO(op);
2023 nb_iargs = TCGOP_CALLI(op);
2024 nb_cargs = def->nb_cargs;
2025
2026 col += ne_fprintf(f, " %s ", def->name);
2027
2028 /*
2029 * Print the function name from TCGHelperInfo, if available.
2030 * Note that plugins have a template function for the info,
2031 * but the actual function pointer comes from the plugin.
2032 */
2033 if (func == info->func) {
2034 col += ne_fprintf(f, "%s", info->name);
2035 } else {
2036 col += ne_fprintf(f, "plugin(%p)", func);
2037 }
2038
2039 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
2040 for (i = 0; i < nb_oargs; i++) {
2041 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
2042 op->args[i]));
2043 }
2044 for (i = 0; i < nb_iargs; i++) {
2045 TCGArg arg = op->args[nb_oargs + i];
2046 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
2047 col += ne_fprintf(f, ",%s", t);
2048 }
2049 } else {
2050 col += ne_fprintf(f, " %s ", def->name);
2051
2052 nb_oargs = def->nb_oargs;
2053 nb_iargs = def->nb_iargs;
2054 nb_cargs = def->nb_cargs;
2055
2056 if (def->flags & TCG_OPF_VECTOR) {
2057 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
2058 8 << TCGOP_VECE(op));
2059 }
2060
2061 k = 0;
2062 for (i = 0; i < nb_oargs; i++) {
2063 const char *sep = k ? "," : "";
2064 col += ne_fprintf(f, "%s%s", sep,
2065 tcg_get_arg_str(s, buf, sizeof(buf),
2066 op->args[k++]));
2067 }
2068 for (i = 0; i < nb_iargs; i++) {
2069 const char *sep = k ? "," : "";
2070 col += ne_fprintf(f, "%s%s", sep,
2071 tcg_get_arg_str(s, buf, sizeof(buf),
2072 op->args[k++]));
2073 }
2074 switch (c) {
2075 case INDEX_op_brcond_i32:
2076 case INDEX_op_setcond_i32:
2077 case INDEX_op_movcond_i32:
2078 case INDEX_op_brcond2_i32:
2079 case INDEX_op_setcond2_i32:
2080 case INDEX_op_brcond_i64:
2081 case INDEX_op_setcond_i64:
2082 case INDEX_op_movcond_i64:
2083 case INDEX_op_cmp_vec:
2084 case INDEX_op_cmpsel_vec:
2085 if (op->args[k] < ARRAY_SIZE(cond_name)
2086 && cond_name[op->args[k]]) {
2087 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
2088 } else {
2089 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
2090 }
2091 i = 1;
2092 break;
2093 case INDEX_op_qemu_ld_i32:
2094 case INDEX_op_qemu_st_i32:
2095 case INDEX_op_qemu_st8_i32:
2096 case INDEX_op_qemu_ld_i64:
2097 case INDEX_op_qemu_st_i64:
2098 {
2099 MemOpIdx oi = op->args[k++];
2100 MemOp op = get_memop(oi);
2101 unsigned ix = get_mmuidx(oi);
2102
2103 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
2104 col += ne_fprintf(f, ",$0x%x,%u", op, ix);
2105 } else {
2106 const char *s_al, *s_op;
2107 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
2108 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
2109 col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix);
2110 }
2111 i = 1;
2112 }
2113 break;
2114 case INDEX_op_bswap16_i32:
2115 case INDEX_op_bswap16_i64:
2116 case INDEX_op_bswap32_i32:
2117 case INDEX_op_bswap32_i64:
2118 case INDEX_op_bswap64_i64:
2119 {
2120 TCGArg flags = op->args[k];
2121 const char *name = NULL;
2122
2123 if (flags < ARRAY_SIZE(bswap_flag_name)) {
2124 name = bswap_flag_name[flags];
2125 }
2126 if (name) {
2127 col += ne_fprintf(f, ",%s", name);
2128 } else {
2129 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
2130 }
2131 i = k = 1;
2132 }
2133 break;
2134 default:
2135 i = 0;
2136 break;
2137 }
2138 switch (c) {
2139 case INDEX_op_set_label:
2140 case INDEX_op_br:
2141 case INDEX_op_brcond_i32:
2142 case INDEX_op_brcond_i64:
2143 case INDEX_op_brcond2_i32:
2144 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2145 arg_label(op->args[k])->id);
2146 i++, k++;
2147 break;
2148 case INDEX_op_mb:
2149 {
2150 TCGBar membar = op->args[k];
2151 const char *b_op, *m_op;
2152
2153 switch (membar & TCG_BAR_SC) {
2154 case 0:
2155 b_op = "none";
2156 break;
2157 case TCG_BAR_LDAQ:
2158 b_op = "acq";
2159 break;
2160 case TCG_BAR_STRL:
2161 b_op = "rel";
2162 break;
2163 case TCG_BAR_SC:
2164 b_op = "seq";
2165 break;
2166 default:
2167 g_assert_not_reached();
2168 }
2169
2170 switch (membar & TCG_MO_ALL) {
2171 case 0:
2172 m_op = "none";
2173 break;
2174 case TCG_MO_LD_LD:
2175 m_op = "rr";
2176 break;
2177 case TCG_MO_LD_ST:
2178 m_op = "rw";
2179 break;
2180 case TCG_MO_ST_LD:
2181 m_op = "wr";
2182 break;
2183 case TCG_MO_ST_ST:
2184 m_op = "ww";
2185 break;
2186 case TCG_MO_LD_LD | TCG_MO_LD_ST:
2187 m_op = "rr+rw";
2188 break;
2189 case TCG_MO_LD_LD | TCG_MO_ST_LD:
2190 m_op = "rr+wr";
2191 break;
2192 case TCG_MO_LD_LD | TCG_MO_ST_ST:
2193 m_op = "rr+ww";
2194 break;
2195 case TCG_MO_LD_ST | TCG_MO_ST_LD:
2196 m_op = "rw+wr";
2197 break;
2198 case TCG_MO_LD_ST | TCG_MO_ST_ST:
2199 m_op = "rw+ww";
2200 break;
2201 case TCG_MO_ST_LD | TCG_MO_ST_ST:
2202 m_op = "wr+ww";
2203 break;
2204 case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD:
2205 m_op = "rr+rw+wr";
2206 break;
2207 case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST:
2208 m_op = "rr+rw+ww";
2209 break;
2210 case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST:
2211 m_op = "rr+wr+ww";
2212 break;
2213 case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST:
2214 m_op = "rw+wr+ww";
2215 break;
2216 case TCG_MO_ALL:
2217 m_op = "all";
2218 break;
2219 default:
2220 g_assert_not_reached();
2221 }
2222
2223 col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op);
2224 i++, k++;
2225 }
2226 break;
2227 default:
2228 break;
2229 }
2230 for (; i < nb_cargs; i++, k++) {
2231 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2232 op->args[k]);
2233 }
2234 }
2235
2236 if (have_prefs || op->life) {
2237 for (; col < 40; ++col) {
2238 putc(' ', f);
2239 }
2240 }
2241
2242 if (op->life) {
2243 unsigned life = op->life;
2244
2245 if (life & (SYNC_ARG * 3)) {
2246 ne_fprintf(f, " sync:");
2247 for (i = 0; i < 2; ++i) {
2248 if (life & (SYNC_ARG << i)) {
2249 ne_fprintf(f, " %d", i);
2250 }
2251 }
2252 }
2253 life /= DEAD_ARG;
2254 if (life) {
2255 ne_fprintf(f, " dead:");
2256 for (i = 0; life; ++i, life >>= 1) {
2257 if (life & 1) {
2258 ne_fprintf(f, " %d", i);
2259 }
2260 }
2261 }
2262 }
2263
2264 if (have_prefs) {
2265 for (i = 0; i < nb_oargs; ++i) {
2266 TCGRegSet set = output_pref(op, i);
2267
2268 if (i == 0) {
2269 ne_fprintf(f, " pref=");
2270 } else {
2271 ne_fprintf(f, ",");
2272 }
2273 if (set == 0) {
2274 ne_fprintf(f, "none");
2275 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
2276 ne_fprintf(f, "all");
2277 #ifdef CONFIG_DEBUG_TCG
2278 } else if (tcg_regset_single(set)) {
2279 TCGReg reg = tcg_regset_first(set);
2280 ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
2281 #endif
2282 } else if (TCG_TARGET_NB_REGS <= 32) {
2283 ne_fprintf(f, "0x%x", (uint32_t)set);
2284 } else {
2285 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
2286 }
2287 }
2288 }
2289
2290 putc('\n', f);
2291 }
2292 }
2293
2294 /* we give more priority to constraints with less registers */
2295 static int get_constraint_priority(const TCGOpDef *def, int k)
2296 {
2297 const TCGArgConstraint *arg_ct = &def->args_ct[k];
2298 int n = ctpop64(arg_ct->regs);
2299
2300 /*
2301 * Sort constraints of a single register first, which includes output
2302 * aliases (which must exactly match the input already allocated).
2303 */
2304 if (n == 1 || arg_ct->oalias) {
2305 return INT_MAX;
2306 }
2307
2308 /*
2309 * Sort register pairs next, first then second immediately after.
2310 * Arbitrarily sort multiple pairs by the index of the first reg;
2311 * there shouldn't be many pairs.
2312 */
2313 switch (arg_ct->pair) {
2314 case 1:
2315 case 3:
2316 return (k + 1) * 2;
2317 case 2:
2318 return (arg_ct->pair_index + 1) * 2 - 1;
2319 }
2320
2321 /* Finally, sort by decreasing register count. */
2322 assert(n > 1);
2323 return -n;
2324 }
2325
2326 /* sort from highest priority to lowest */
2327 static void sort_constraints(TCGOpDef *def, int start, int n)
2328 {
2329 int i, j;
2330 TCGArgConstraint *a = def->args_ct;
2331
2332 for (i = 0; i < n; i++) {
2333 a[start + i].sort_index = start + i;
2334 }
2335 if (n <= 1) {
2336 return;
2337 }
2338 for (i = 0; i < n - 1; i++) {
2339 for (j = i + 1; j < n; j++) {
2340 int p1 = get_constraint_priority(def, a[start + i].sort_index);
2341 int p2 = get_constraint_priority(def, a[start + j].sort_index);
2342 if (p1 < p2) {
2343 int tmp = a[start + i].sort_index;
2344 a[start + i].sort_index = a[start + j].sort_index;
2345 a[start + j].sort_index = tmp;
2346 }
2347 }
2348 }
2349 }
2350
2351 static void process_op_defs(TCGContext *s)
2352 {
2353 TCGOpcode op;
2354
2355 for (op = 0; op < NB_OPS; op++) {
2356 TCGOpDef *def = &tcg_op_defs[op];
2357 const TCGTargetOpDef *tdefs;
2358 bool saw_alias_pair = false;
2359 int i, o, i2, o2, nb_args;
2360
2361 if (def->flags & TCG_OPF_NOT_PRESENT) {
2362 continue;
2363 }
2364
2365 nb_args = def->nb_iargs + def->nb_oargs;
2366 if (nb_args == 0) {
2367 continue;
2368 }
2369
2370 /*
2371 * Macro magic should make it impossible, but double-check that
2372 * the array index is in range. Since the signness of an enum
2373 * is implementation defined, force the result to unsigned.
2374 */
2375 unsigned con_set = tcg_target_op_def(op);
2376 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2377 tdefs = &constraint_sets[con_set];
2378
2379 for (i = 0; i < nb_args; i++) {
2380 const char *ct_str = tdefs->args_ct_str[i];
2381 bool input_p = i >= def->nb_oargs;
2382
2383 /* Incomplete TCGTargetOpDef entry. */
2384 tcg_debug_assert(ct_str != NULL);
2385
2386 switch (*ct_str) {
2387 case '0' ... '9':
2388 o = *ct_str - '0';
2389 tcg_debug_assert(input_p);
2390 tcg_debug_assert(o < def->nb_oargs);
2391 tcg_debug_assert(def->args_ct[o].regs != 0);
2392 tcg_debug_assert(!def->args_ct[o].oalias);
2393 def->args_ct[i] = def->args_ct[o];
2394 /* The output sets oalias. */
2395 def->args_ct[o].oalias = 1;
2396 def->args_ct[o].alias_index = i;
2397 /* The input sets ialias. */
2398 def->args_ct[i].ialias = 1;
2399 def->args_ct[i].alias_index = o;
2400 if (def->args_ct[i].pair) {
2401 saw_alias_pair = true;
2402 }
2403 tcg_debug_assert(ct_str[1] == '\0');
2404 continue;
2405
2406 case '&':
2407 tcg_debug_assert(!input_p);
2408 def->args_ct[i].newreg = true;
2409 ct_str++;
2410 break;
2411
2412 case 'p': /* plus */
2413 /* Allocate to the register after the previous. */
2414 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2415 o = i - 1;
2416 tcg_debug_assert(!def->args_ct[o].pair);
2417 tcg_debug_assert(!def->args_ct[o].ct);
2418 def->args_ct[i] = (TCGArgConstraint){
2419 .pair = 2,
2420 .pair_index = o,
2421 .regs = def->args_ct[o].regs << 1,
2422 };
2423 def->args_ct[o].pair = 1;
2424 def->args_ct[o].pair_index = i;
2425 tcg_debug_assert(ct_str[1] == '\0');
2426 continue;
2427
2428 case 'm': /* minus */
2429 /* Allocate to the register before the previous. */
2430 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2431 o = i - 1;
2432 tcg_debug_assert(!def->args_ct[o].pair);
2433 tcg_debug_assert(!def->args_ct[o].ct);
2434 def->args_ct[i] = (TCGArgConstraint){
2435 .pair = 1,
2436 .pair_index = o,
2437 .regs = def->args_ct[o].regs >> 1,
2438 };
2439 def->args_ct[o].pair = 2;
2440 def->args_ct[o].pair_index = i;
2441 tcg_debug_assert(ct_str[1] == '\0');
2442 continue;
2443 }
2444
2445 do {
2446 switch (*ct_str) {
2447 case 'i':
2448 def->args_ct[i].ct |= TCG_CT_CONST;
2449 break;
2450
2451 /* Include all of the target-specific constraints. */
2452
2453 #undef CONST
2454 #define CONST(CASE, MASK) \
2455 case CASE: def->args_ct[i].ct |= MASK; break;
2456 #define REGS(CASE, MASK) \
2457 case CASE: def->args_ct[i].regs |= MASK; break;
2458
2459 #include "tcg-target-con-str.h"
2460
2461 #undef REGS
2462 #undef CONST
2463 default:
2464 case '0' ... '9':
2465 case '&':
2466 case 'p':
2467 case 'm':
2468 /* Typo in TCGTargetOpDef constraint. */
2469 g_assert_not_reached();
2470 }
2471 } while (*++ct_str != '\0');
2472 }
2473
2474 /* TCGTargetOpDef entry with too much information? */
2475 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
2476
2477 /*
2478 * Fix up output pairs that are aliased with inputs.
2479 * When we created the alias, we copied pair from the output.
2480 * There are three cases:
2481 * (1a) Pairs of inputs alias pairs of outputs.
2482 * (1b) One input aliases the first of a pair of outputs.
2483 * (2) One input aliases the second of a pair of outputs.
2484 *
2485 * Case 1a is handled by making sure that the pair_index'es are
2486 * properly updated so that they appear the same as a pair of inputs.
2487 *
2488 * Case 1b is handled by setting the pair_index of the input to
2489 * itself, simply so it doesn't point to an unrelated argument.
2490 * Since we don't encounter the "second" during the input allocation
2491 * phase, nothing happens with the second half of the input pair.
2492 *
2493 * Case 2 is handled by setting the second input to pair=3, the
2494 * first output to pair=3, and the pair_index'es to match.
2495 */
2496 if (saw_alias_pair) {
2497 for (i = def->nb_oargs; i < nb_args; i++) {
2498 /*
2499 * Since [0-9pm] must be alone in the constraint string,
2500 * the only way they can both be set is if the pair comes
2501 * from the output alias.
2502 */
2503 if (!def->args_ct[i].ialias) {
2504 continue;
2505 }
2506 switch (def->args_ct[i].pair) {
2507 case 0:
2508 break;
2509 case 1:
2510 o = def->args_ct[i].alias_index;
2511 o2 = def->args_ct[o].pair_index;
2512 tcg_debug_assert(def->args_ct[o].pair == 1);
2513 tcg_debug_assert(def->args_ct[o2].pair == 2);
2514 if (def->args_ct[o2].oalias) {
2515 /* Case 1a */
2516 i2 = def->args_ct[o2].alias_index;
2517 tcg_debug_assert(def->args_ct[i2].pair == 2);
2518 def->args_ct[i2].pair_index = i;
2519 def->args_ct[i].pair_index = i2;
2520 } else {
2521 /* Case 1b */
2522 def->args_ct[i].pair_index = i;
2523 }
2524 break;
2525 case 2:
2526 o = def->args_ct[i].alias_index;
2527 o2 = def->args_ct[o].pair_index;
2528 tcg_debug_assert(def->args_ct[o].pair == 2);
2529 tcg_debug_assert(def->args_ct[o2].pair == 1);
2530 if (def->args_ct[o2].oalias) {
2531 /* Case 1a */
2532 i2 = def->args_ct[o2].alias_index;
2533 tcg_debug_assert(def->args_ct[i2].pair == 1);
2534 def->args_ct[i2].pair_index = i;
2535 def->args_ct[i].pair_index = i2;
2536 } else {
2537 /* Case 2 */
2538 def->args_ct[i].pair = 3;
2539 def->args_ct[o2].pair = 3;
2540 def->args_ct[i].pair_index = o2;
2541 def->args_ct[o2].pair_index = i;
2542 }
2543 break;
2544 default:
2545 g_assert_not_reached();
2546 }
2547 }
2548 }
2549
2550 /* sort the constraints (XXX: this is just an heuristic) */
2551 sort_constraints(def, 0, def->nb_oargs);
2552 sort_constraints(def, def->nb_oargs, def->nb_iargs);
2553 }
2554 }
2555
2556 static void remove_label_use(TCGOp *op, int idx)
2557 {
2558 TCGLabel *label = arg_label(op->args[idx]);
2559 TCGLabelUse *use;
2560
2561 QSIMPLEQ_FOREACH(use, &label->branches, next) {
2562 if (use->op == op) {
2563 QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next);
2564 return;
2565 }
2566 }
2567 g_assert_not_reached();
2568 }
2569
2570 void tcg_op_remove(TCGContext *s, TCGOp *op)
2571 {
2572 switch (op->opc) {
2573 case INDEX_op_br:
2574 remove_label_use(op, 0);
2575 break;
2576 case INDEX_op_brcond_i32:
2577 case INDEX_op_brcond_i64:
2578 remove_label_use(op, 3);
2579 break;
2580 case INDEX_op_brcond2_i32:
2581 remove_label_use(op, 5);
2582 break;
2583 default:
2584 break;
2585 }
2586
2587 QTAILQ_REMOVE(&s->ops, op, link);
2588 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
2589 s->nb_ops--;
2590
2591 #ifdef CONFIG_PROFILER
2592 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
2593 #endif
2594 }
2595
2596 void tcg_remove_ops_after(TCGOp *op)
2597 {
2598 TCGContext *s = tcg_ctx;
2599
2600 while (true) {
2601 TCGOp *last = tcg_last_op();
2602 if (last == op) {
2603 return;
2604 }
2605 tcg_op_remove(s, last);
2606 }
2607 }
2608
2609 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
2610 {
2611 TCGContext *s = tcg_ctx;
2612 TCGOp *op = NULL;
2613
2614 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
2615 QTAILQ_FOREACH(op, &s->free_ops, link) {
2616 if (nargs <= op->nargs) {
2617 QTAILQ_REMOVE(&s->free_ops, op, link);
2618 nargs = op->nargs;
2619 goto found;
2620 }
2621 }
2622 }
2623
2624 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
2625 nargs = MAX(4, nargs);
2626 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
2627
2628 found:
2629 memset(op, 0, offsetof(TCGOp, link));
2630 op->opc = opc;
2631 op->nargs = nargs;
2632
2633 /* Check for bitfield overflow. */
2634 tcg_debug_assert(op->nargs == nargs);
2635
2636 s->nb_ops++;
2637 return op;
2638 }
2639
2640 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
2641 {
2642 TCGOp *op = tcg_op_alloc(opc, nargs);
2643 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2644 return op;
2645 }
2646
2647 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2648 TCGOpcode opc, unsigned nargs)
2649 {
2650 TCGOp *new_op = tcg_op_alloc(opc, nargs);
2651 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
2652 return new_op;
2653 }
2654
2655 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2656 TCGOpcode opc, unsigned nargs)
2657 {
2658 TCGOp *new_op = tcg_op_alloc(opc, nargs);
2659 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
2660 return new_op;
2661 }
2662
2663 static void move_label_uses(TCGLabel *to, TCGLabel *from)
2664 {
2665 TCGLabelUse *u;
2666
2667 QSIMPLEQ_FOREACH(u, &from->branches, next) {
2668 TCGOp *op = u->op;
2669 switch (op->opc) {
2670 case INDEX_op_br:
2671 op->args[0] = label_arg(to);
2672 break;
2673 case INDEX_op_brcond_i32:
2674 case INDEX_op_brcond_i64:
2675 op->args[3] = label_arg(to);
2676 break;
2677 case INDEX_op_brcond2_i32:
2678 op->args[5] = label_arg(to);
2679 break;
2680 default:
2681 g_assert_not_reached();
2682 }
2683 }
2684
2685 QSIMPLEQ_CONCAT(&to->branches, &from->branches);
2686 }
2687
2688 /* Reachable analysis : remove unreachable code. */
2689 static void __attribute__((noinline))
2690 reachable_code_pass(TCGContext *s)
2691 {
2692 TCGOp *op, *op_next, *op_prev;
2693 bool dead = false;
2694
2695 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2696 bool remove = dead;
2697 TCGLabel *label;
2698
2699 switch (op->opc) {
2700 case INDEX_op_set_label:
2701 label = arg_label(op->args[0]);
2702
2703 /*
2704 * Note that the first op in the TB is always a load,
2705 * so there is always something before a label.
2706 */
2707 op_prev = QTAILQ_PREV(op, link);
2708
2709 /*
2710 * If we find two sequential labels, move all branches to
2711 * reference the second label and remove the first label.
2712 * Do this before branch to next optimization, so that the
2713 * middle label is out of the way.
2714 */
2715 if (op_prev->opc == INDEX_op_set_label) {
2716 move_label_uses(label, arg_label(op_prev->args[0]));
2717 tcg_op_remove(s, op_prev);
2718 op_prev = QTAILQ_PREV(op, link);
2719 }
2720
2721 /*
2722 * Optimization can fold conditional branches to unconditional.
2723 * If we find a label which is preceded by an unconditional
2724 * branch to next, remove the branch. We couldn't do this when
2725 * processing the branch because any dead code between the branch
2726 * and label had not yet been removed.
2727 */
2728 if (op_prev->opc == INDEX_op_br &&
2729 label == arg_label(op_prev->args[0])) {
2730 tcg_op_remove(s, op_prev);
2731 /* Fall through means insns become live again. */
2732 dead = false;
2733 }
2734
2735 if (QSIMPLEQ_EMPTY(&label->branches)) {
2736 /*
2737 * While there is an occasional backward branch, virtually
2738 * all branches generated by the translators are forward.
2739 * Which means that generally we will have already removed
2740 * all references to the label that will be, and there is
2741 * little to be gained by iterating.
2742 */
2743 remove = true;
2744 } else {
2745 /* Once we see a label, insns become live again. */
2746 dead = false;
2747 remove = false;
2748 }
2749 break;
2750
2751 case INDEX_op_br:
2752 case INDEX_op_exit_tb:
2753 case INDEX_op_goto_ptr:
2754 /* Unconditional branches; everything following is dead. */
2755 dead = true;
2756 break;
2757
2758 case INDEX_op_call:
2759 /* Notice noreturn helper calls, raising exceptions. */
2760 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
2761 dead = true;
2762 }
2763 break;
2764
2765 case INDEX_op_insn_start:
2766 /* Never remove -- we need to keep these for unwind. */
2767 remove = false;
2768 break;
2769
2770 default:
2771 break;
2772 }
2773
2774 if (remove) {
2775 tcg_op_remove(s, op);
2776 }
2777 }
2778 }
2779
2780 #define TS_DEAD 1
2781 #define TS_MEM 2
2782
2783 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2784 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2785
2786 /* For liveness_pass_1, the register preferences for a given temp. */
2787 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2788 {
2789 return ts->state_ptr;
2790 }
2791
2792 /* For liveness_pass_1, reset the preferences for a given temp to the
2793 * maximal regset for its type.
2794 */
2795 static inline void la_reset_pref(TCGTemp *ts)
2796 {
2797 *la_temp_pref(ts)
2798 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2799 }
2800
2801 /* liveness analysis: end of function: all temps are dead, and globals
2802 should be in memory. */
2803 static void la_func_end(TCGContext *s, int ng, int nt)
2804 {
2805 int i;
2806
2807 for (i = 0; i < ng; ++i) {
2808 s->temps[i].state = TS_DEAD | TS_MEM;
2809 la_reset_pref(&s->temps[i]);
2810 }
2811 for (i = ng; i < nt; ++i) {
2812 s->temps[i].state = TS_DEAD;
2813 la_reset_pref(&s->temps[i]);
2814 }
2815 }
2816
2817 /* liveness analysis: end of basic block: all temps are dead, globals
2818 and local temps should be in memory. */
2819 static void la_bb_end(TCGContext *s, int ng, int nt)
2820 {
2821 int i;
2822
2823 for (i = 0; i < nt; ++i) {
2824 TCGTemp *ts = &s->temps[i];
2825 int state;
2826
2827 switch (ts->kind) {
2828 case TEMP_FIXED:
2829 case TEMP_GLOBAL:
2830 case TEMP_TB:
2831 state = TS_DEAD | TS_MEM;
2832 break;
2833 case TEMP_EBB:
2834 case TEMP_CONST:
2835 state = TS_DEAD;
2836 break;
2837 default:
2838 g_assert_not_reached();
2839 }
2840 ts->state = state;
2841 la_reset_pref(ts);
2842 }
2843 }
2844
2845 /* liveness analysis: sync globals back to memory. */
2846 static void la_global_sync(TCGContext *s, int ng)
2847 {
2848 int i;
2849
2850 for (i = 0; i < ng; ++i) {
2851 int state = s->temps[i].state;
2852 s->temps[i].state = state | TS_MEM;
2853 if (state == TS_DEAD) {
2854 /* If the global was previously dead, reset prefs. */
2855 la_reset_pref(&s->temps[i]);
2856 }
2857 }
2858 }
2859
2860 /*
2861 * liveness analysis: conditional branch: all temps are dead unless
2862 * explicitly live-across-conditional-branch, globals and local temps
2863 * should be synced.
2864 */
2865 static void la_bb_sync(TCGContext *s, int ng, int nt)
2866 {
2867 la_global_sync(s, ng);
2868
2869 for (int i = ng; i < nt; ++i) {
2870 TCGTemp *ts = &s->temps[i];
2871 int state;
2872
2873 switch (ts->kind) {
2874 case TEMP_TB:
2875 state = ts->state;
2876 ts->state = state | TS_MEM;
2877 if (state != TS_DEAD) {
2878 continue;
2879 }
2880 break;
2881 case TEMP_EBB:
2882 case TEMP_CONST:
2883 continue;
2884 default:
2885 g_assert_not_reached();
2886 }
2887 la_reset_pref(&s->temps[i]);
2888 }
2889 }
2890
2891 /* liveness analysis: sync globals back to memory and kill. */
2892 static void la_global_kill(TCGContext *s, int ng)
2893 {
2894 int i;
2895
2896 for (i = 0; i < ng; i++) {
2897 s->temps[i].state = TS_DEAD | TS_MEM;
2898 la_reset_pref(&s->temps[i]);
2899 }
2900 }
2901
2902 /* liveness analysis: note live globals crossing calls. */
2903 static void la_cross_call(TCGContext *s, int nt)
2904 {
2905 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2906 int i;
2907
2908 for (i = 0; i < nt; i++) {
2909 TCGTemp *ts = &s->temps[i];
2910 if (!(ts->state & TS_DEAD)) {
2911 TCGRegSet *pset = la_temp_pref(ts);
2912 TCGRegSet set = *pset;
2913
2914 set &= mask;
2915 /* If the combination is not possible, restart. */
2916 if (set == 0) {
2917 set = tcg_target_available_regs[ts->type] & mask;
2918 }
2919 *pset = set;
2920 }
2921 }
2922 }
2923
2924 /*
2925 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
2926 * to TEMP_EBB, if possible.
2927 */
2928 static void __attribute__((noinline))
2929 liveness_pass_0(TCGContext *s)
2930 {
2931 void * const multiple_ebb = (void *)(uintptr_t)-1;
2932 int nb_temps = s->nb_temps;
2933 TCGOp *op, *ebb;
2934
2935 for (int i = s->nb_globals; i < nb_temps; ++i) {
2936 s->temps[i].state_ptr = NULL;
2937 }
2938
2939 /*
2940 * Represent each EBB by the op at which it begins. In the case of
2941 * the first EBB, this is the first op, otherwise it is a label.
2942 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
2943 * within a single EBB, else MULTIPLE_EBB.
2944 */
2945 ebb = QTAILQ_FIRST(&s->ops);
2946 QTAILQ_FOREACH(op, &s->ops, link) {
2947 const TCGOpDef *def;
2948 int nb_oargs, nb_iargs;
2949
2950 switch (op->opc) {
2951 case INDEX_op_set_label:
2952 ebb = op;
2953 continue;
2954 case INDEX_op_discard:
2955 continue;
2956 case INDEX_op_call:
2957 nb_oargs = TCGOP_CALLO(op);
2958 nb_iargs = TCGOP_CALLI(op);
2959 break;
2960 default:
2961 def = &tcg_op_defs[op->opc];
2962 nb_oargs = def->nb_oargs;
2963 nb_iargs = def->nb_iargs;
2964 break;
2965 }
2966
2967 for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
2968 TCGTemp *ts = arg_temp(op->args[i]);
2969
2970 if (ts->kind != TEMP_TB) {
2971 continue;
2972 }
2973 if (ts->state_ptr == NULL) {
2974 ts->state_ptr = ebb;
2975 } else if (ts->state_ptr != ebb) {
2976 ts->state_ptr = multiple_ebb;
2977 }
2978 }
2979 }
2980
2981 /*
2982 * For TEMP_TB that turned out not to be used beyond one EBB,
2983 * reduce the liveness to TEMP_EBB.
2984 */
2985 for (int i = s->nb_globals; i < nb_temps; ++i) {
2986 TCGTemp *ts = &s->temps[i];
2987 if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
2988 ts->kind = TEMP_EBB;
2989 }
2990 }
2991 }
2992
2993 /* Liveness analysis : update the opc_arg_life array to tell if a
2994 given input arguments is dead. Instructions updating dead
2995 temporaries are removed. */
2996 static void __attribute__((noinline))
2997 liveness_pass_1(TCGContext *s)
2998 {
2999 int nb_globals = s->nb_globals;
3000 int nb_temps = s->nb_temps;
3001 TCGOp *op, *op_prev;
3002 TCGRegSet *prefs;
3003 int i;
3004
3005 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
3006 for (i = 0; i < nb_temps; ++i) {
3007 s->temps[i].state_ptr = prefs + i;
3008 }
3009
3010 /* ??? Should be redundant with the exit_tb that ends the TB. */
3011 la_func_end(s, nb_globals, nb_temps);
3012
3013 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
3014 int nb_iargs, nb_oargs;
3015 TCGOpcode opc_new, opc_new2;
3016 bool have_opc_new2;
3017 TCGLifeData arg_life = 0;
3018 TCGTemp *ts;
3019 TCGOpcode opc = op->opc;
3020 const TCGOpDef *def = &tcg_op_defs[opc];
3021
3022 switch (opc) {
3023 case INDEX_op_call:
3024 {
3025 const TCGHelperInfo *info = tcg_call_info(op);
3026 int call_flags = tcg_call_flags(op);
3027
3028 nb_oargs = TCGOP_CALLO(op);
3029 nb_iargs = TCGOP_CALLI(op);
3030
3031 /* pure functions can be removed if their result is unused */
3032 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
3033 for (i = 0; i < nb_oargs; i++) {
3034 ts = arg_temp(op->args[i]);
3035 if (ts->state != TS_DEAD) {
3036 goto do_not_remove_call;
3037 }
3038 }
3039 goto do_remove;
3040 }
3041 do_not_remove_call:
3042
3043 /* Output args are dead. */
3044 for (i = 0; i < nb_oargs; i++) {
3045 ts = arg_temp(op->args[i]);
3046 if (ts->state & TS_DEAD) {
3047 arg_life |= DEAD_ARG << i;
3048 }
3049 if (ts->state & TS_MEM) {
3050 arg_life |= SYNC_ARG << i;
3051 }
3052 ts->state = TS_DEAD;
3053 la_reset_pref(ts);
3054 }
3055
3056 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3057 memset(op->output_pref, 0, sizeof(op->output_pref));
3058
3059 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
3060 TCG_CALL_NO_READ_GLOBALS))) {
3061 la_global_kill(s, nb_globals);
3062 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
3063 la_global_sync(s, nb_globals);
3064 }
3065
3066 /* Record arguments that die in this helper. */
3067 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3068 ts = arg_temp(op->args[i]);
3069 if (ts->state & TS_DEAD) {
3070 arg_life |= DEAD_ARG << i;
3071 }
3072 }
3073
3074 /* For all live registers, remove call-clobbered prefs. */
3075 la_cross_call(s, nb_temps);
3076
3077 /*
3078 * Input arguments are live for preceding opcodes.
3079 *
3080 * For those arguments that die, and will be allocated in
3081 * registers, clear the register set for that arg, to be
3082 * filled in below. For args that will be on the stack,
3083 * reset to any available reg. Process arguments in reverse
3084 * order so that if a temp is used more than once, the stack
3085 * reset to max happens before the register reset to 0.
3086 */
3087 for (i = nb_iargs - 1; i >= 0; i--) {
3088 const TCGCallArgumentLoc *loc = &info->in[i];
3089 ts = arg_temp(op->args[nb_oargs + i]);
3090
3091 if (ts->state & TS_DEAD) {
3092 switch (loc->kind) {
3093 case TCG_CALL_ARG_NORMAL:
3094 case TCG_CALL_ARG_EXTEND_U:
3095 case TCG_CALL_ARG_EXTEND_S:
3096 if (REG_P(loc)) {
3097 *la_temp_pref(ts) = 0;
3098 break;
3099 }
3100 /* fall through */
3101 default:
3102 *la_temp_pref(ts) =
3103 tcg_target_available_regs[ts->type];
3104 break;
3105 }
3106 ts->state &= ~TS_DEAD;
3107 }
3108 }
3109
3110 /*
3111 * For each input argument, add its input register to prefs.
3112 * If a temp is used once, this produces a single set bit;
3113 * if a temp is used multiple times, this produces a set.
3114 */
3115 for (i = 0; i < nb_iargs; i++) {
3116 const TCGCallArgumentLoc *loc = &info->in[i];
3117 ts = arg_temp(op->args[nb_oargs + i]);
3118
3119 switch (loc->kind) {
3120 case TCG_CALL_ARG_NORMAL:
3121 case TCG_CALL_ARG_EXTEND_U:
3122 case TCG_CALL_ARG_EXTEND_S:
3123 if (REG_P(loc)) {
3124 tcg_regset_set_reg(*la_temp_pref(ts),
3125 tcg_target_call_iarg_regs[loc->arg_slot]);
3126 }
3127 break;
3128 default:
3129 break;
3130 }
3131 }
3132 }
3133 break;
3134 case INDEX_op_insn_start:
3135 break;
3136 case INDEX_op_discard:
3137 /* mark the temporary as dead */
3138 ts = arg_temp(op->args[0]);
3139 ts->state = TS_DEAD;
3140 la_reset_pref(ts);
3141 break;
3142
3143 case INDEX_op_add2_i32:
3144 opc_new = INDEX_op_add_i32;
3145 goto do_addsub2;
3146 case INDEX_op_sub2_i32:
3147 opc_new = INDEX_op_sub_i32;
3148 goto do_addsub2;
3149 case INDEX_op_add2_i64:
3150 opc_new = INDEX_op_add_i64;
3151 goto do_addsub2;
3152 case INDEX_op_sub2_i64:
3153 opc_new = INDEX_op_sub_i64;
3154 do_addsub2:
3155 nb_iargs = 4;
3156 nb_oargs = 2;
3157 /* Test if the high part of the operation is dead, but not
3158 the low part. The result can be optimized to a simple
3159 add or sub. This happens often for x86_64 guest when the
3160 cpu mode is set to 32 bit. */
3161 if (arg_temp(op->args[1])->state == TS_DEAD) {
3162 if (arg_temp(op->args[0])->state == TS_DEAD) {
3163 goto do_remove;
3164 }
3165 /* Replace the opcode and adjust the args in place,
3166 leaving 3 unused args at the end. */
3167 op->opc = opc = opc_new;
3168 op->args[1] = op->args[2];
3169 op->args[2] = op->args[4];
3170 /* Fall through and mark the single-word operation live. */
3171 nb_iargs = 2;
3172 nb_oargs = 1;
3173 }
3174 goto do_not_remove;
3175
3176 case INDEX_op_mulu2_i32:
3177 opc_new = INDEX_op_mul_i32;
3178 opc_new2 = INDEX_op_muluh_i32;
3179 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
3180 goto do_mul2;
3181 case INDEX_op_muls2_i32:
3182 opc_new = INDEX_op_mul_i32;
3183 opc_new2 = INDEX_op_mulsh_i32;
3184 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
3185 goto do_mul2;
3186 case INDEX_op_mulu2_i64:
3187 opc_new = INDEX_op_mul_i64;
3188 opc_new2 = INDEX_op_muluh_i64;
3189 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
3190 goto do_mul2;
3191 case INDEX_op_muls2_i64:
3192 opc_new = INDEX_op_mul_i64;
3193 opc_new2 = INDEX_op_mulsh_i64;
3194 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
3195 goto do_mul2;
3196 do_mul2:
3197 nb_iargs = 2;
3198 nb_oargs = 2;
3199 if (arg_temp(op->args[1])->state == TS_DEAD) {
3200 if (arg_temp(op->args[0])->state == TS_DEAD) {
3201 /* Both parts of the operation are dead. */
3202 goto do_remove;
3203 }
3204 /* The high part of the operation is dead; generate the low. */
3205 op->opc = opc = opc_new;
3206 op->args[1] = op->args[2];
3207 op->args[2] = op->args[3];
3208 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
3209 /* The low part of the operation is dead; generate the high. */
3210 op->opc = opc = opc_new2;
3211 op->args[0] = op->args[1];
3212 op->args[1] = op->args[2];
3213 op->args[2] = op->args[3];
3214 } else {
3215 goto do_not_remove;
3216 }
3217 /* Mark the single-word operation live. */
3218 nb_oargs = 1;
3219 goto do_not_remove;
3220
3221 default:
3222 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3223 nb_iargs = def->nb_iargs;
3224 nb_oargs = def->nb_oargs;
3225
3226 /* Test if the operation can be removed because all
3227 its outputs are dead. We assume that nb_oargs == 0
3228 implies side effects */
3229 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
3230 for (i = 0; i < nb_oargs; i++) {
3231 if (arg_temp(op->args[i])->state != TS_DEAD) {
3232 goto do_not_remove;
3233 }
3234 }
3235 goto do_remove;
3236 }
3237 goto do_not_remove;
3238
3239 do_remove:
3240 tcg_op_remove(s, op);
3241 break;
3242
3243 do_not_remove:
3244 for (i = 0; i < nb_oargs; i++) {
3245 ts = arg_temp(op->args[i]);
3246
3247 /* Remember the preference of the uses that followed. */
3248 if (i < ARRAY_SIZE(op->output_pref)) {
3249 op->output_pref[i] = *la_temp_pref(ts);
3250 }
3251
3252 /* Output args are dead. */
3253 if (ts->state & TS_DEAD) {
3254 arg_life |= DEAD_ARG << i;
3255 }
3256 if (ts->state & TS_MEM) {
3257 arg_life |= SYNC_ARG << i;
3258 }
3259 ts->state = TS_DEAD;
3260 la_reset_pref(ts);
3261 }
3262
3263 /* If end of basic block, update. */
3264 if (def->flags & TCG_OPF_BB_EXIT) {
3265 la_func_end(s, nb_globals, nb_temps);
3266 } else if (def->flags & TCG_OPF_COND_BRANCH) {
3267 la_bb_sync(s, nb_globals, nb_temps);
3268 } else if (def->flags & TCG_OPF_BB_END) {
3269 la_bb_end(s, nb_globals, nb_temps);
3270 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3271 la_global_sync(s, nb_globals);
3272 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3273 la_cross_call(s, nb_temps);
3274 }
3275 }
3276
3277 /* Record arguments that die in this opcode. */
3278 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3279 ts = arg_temp(op->args[i]);
3280 if (ts->state & TS_DEAD) {
3281 arg_life |= DEAD_ARG << i;
3282 }
3283 }
3284
3285 /* Input arguments are live for preceding opcodes. */
3286 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3287 ts = arg_temp(op->args[i]);
3288 if (ts->state & TS_DEAD) {
3289 /* For operands that were dead, initially allow
3290 all regs for the type. */
3291 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
3292 ts->state &= ~TS_DEAD;
3293 }
3294 }
3295
3296 /* Incorporate constraints for this operand. */
3297 switch (opc) {
3298 case INDEX_op_mov_i32:
3299 case INDEX_op_mov_i64:
3300 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3301 have proper constraints. That said, special case
3302 moves to propagate preferences backward. */
3303 if (IS_DEAD_ARG(1)) {
3304 *la_temp_pref(arg_temp(op->args[0]))
3305 = *la_temp_pref(arg_temp(op->args[1]));
3306 }
3307 break;
3308
3309 default:
3310 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3311 const TCGArgConstraint *ct = &def->args_ct[i];
3312 TCGRegSet set, *pset;
3313
3314 ts = arg_temp(op->args[i]);
3315 pset = la_temp_pref(ts);
3316 set = *pset;
3317
3318 set &= ct->regs;
3319 if (ct->ialias) {
3320 set &= output_pref(op, ct->alias_index);
3321 }
3322 /* If the combination is not possible, restart. */
3323 if (set == 0) {
3324 set = ct->regs;
3325 }
3326 *pset = set;
3327 }
3328 break;
3329 }
3330 break;
3331 }
3332 op->life = arg_life;
3333 }
3334 }
3335
3336 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3337 static bool __attribute__((noinline))
3338 liveness_pass_2(TCGContext *s)
3339 {
3340 int nb_globals = s->nb_globals;
3341 int nb_temps, i;
3342 bool changes = false;
3343 TCGOp *op, *op_next;
3344
3345 /* Create a temporary for each indirect global. */
3346 for (i = 0; i < nb_globals; ++i) {
3347 TCGTemp *its = &s->temps[i];
3348 if (its->indirect_reg) {
3349 TCGTemp *dts = tcg_temp_alloc(s);
3350 dts->type = its->type;
3351 dts->base_type = its->base_type;
3352 dts->temp_subindex = its->temp_subindex;
3353 dts->kind = TEMP_EBB;
3354 its->state_ptr = dts;
3355 } else {
3356 its->state_ptr = NULL;
3357 }
3358 /* All globals begin dead. */
3359 its->state = TS_DEAD;
3360 }
3361 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3362 TCGTemp *its = &s->temps[i];
3363 its->state_ptr = NULL;
3364 its->state = TS_DEAD;
3365 }
3366
3367 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
3368 TCGOpcode opc = op->opc;
3369 const TCGOpDef *def = &tcg_op_defs[opc];
3370 TCGLifeData arg_life = op->life;
3371 int nb_iargs, nb_oargs, call_flags;
3372 TCGTemp *arg_ts, *dir_ts;
3373
3374 if (opc == INDEX_op_call) {
3375 nb_oargs = TCGOP_CALLO(op);
3376 nb_iargs = TCGOP_CALLI(op);
3377 call_flags = tcg_call_flags(op);
3378 } else {
3379 nb_iargs = def->nb_iargs;
3380 nb_oargs = def->nb_oargs;
3381
3382 /* Set flags similar to how calls require. */
3383 if (def->flags & TCG_OPF_COND_BRANCH) {
3384 /* Like reading globals: sync_globals */
3385 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3386 } else if (def->flags & TCG_OPF_BB_END) {
3387 /* Like writing globals: save_globals */
3388 call_flags = 0;
3389 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3390 /* Like reading globals: sync_globals */
3391 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3392 } else {
3393 /* No effect on globals. */
3394 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3395 TCG_CALL_NO_WRITE_GLOBALS);
3396 }
3397 }
3398
3399 /* Make sure that input arguments are available. */
3400 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3401 arg_ts = arg_temp(op->args[i]);
3402 dir_ts = arg_ts->state_ptr;
3403 if (dir_ts && arg_ts->state == TS_DEAD) {
3404 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3405 ? INDEX_op_ld_i32
3406 : INDEX_op_ld_i64);
3407 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3408
3409 lop->args[0] = temp_arg(dir_ts);
3410 lop->args[1] = temp_arg(arg_ts->mem_base);
3411 lop->args[2] = arg_ts->mem_offset;
3412
3413 /* Loaded, but synced with memory. */
3414 arg_ts->state = TS_MEM;
3415 }
3416 }
3417
3418 /* Perform input replacement, and mark inputs that became dead.
3419 No action is required except keeping temp_state up to date
3420 so that we reload when needed. */
3421 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3422 arg_ts = arg_temp(op->args[i]);
3423 dir_ts = arg_ts->state_ptr;
3424 if (dir_ts) {
3425 op->args[i] = temp_arg(dir_ts);
3426 changes = true;
3427 if (IS_DEAD_ARG(i)) {
3428 arg_ts->state = TS_DEAD;
3429 }
3430 }
3431 }
3432
3433 /* Liveness analysis should ensure that the following are
3434 all correct, for call sites and basic block end points. */
3435 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
3436 /* Nothing to do */
3437 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
3438 for (i = 0; i < nb_globals; ++i) {
3439 /* Liveness should see that globals are synced back,
3440 that is, either TS_DEAD or TS_MEM. */
3441 arg_ts = &s->temps[i];
3442 tcg_debug_assert(arg_ts->state_ptr == 0
3443 || arg_ts->state != 0);
3444 }
3445 } else {
3446 for (i = 0; i < nb_globals; ++i) {
3447 /* Liveness should see that globals are saved back,
3448 that is, TS_DEAD, waiting to be reloaded. */
3449 arg_ts = &s->temps[i];
3450 tcg_debug_assert(arg_ts->state_ptr == 0
3451 || arg_ts->state == TS_DEAD);
3452 }
3453 }
3454
3455 /* Outputs become available. */
3456 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
3457 arg_ts = arg_temp(op->args[0]);
3458 dir_ts = arg_ts->state_ptr;
3459 if (dir_ts) {
3460 op->args[0] = temp_arg(dir_ts);
3461 changes = true;
3462
3463 /* The output is now live and modified. */
3464 arg_ts->state = 0;
3465
3466 if (NEED_SYNC_ARG(0)) {
3467 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3468 ? INDEX_op_st_i32
3469 : INDEX_op_st_i64);
3470 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
3471 TCGTemp *out_ts = dir_ts;
3472
3473 if (IS_DEAD_ARG(0)) {
3474 out_ts = arg_temp(op->args[1]);
3475 arg_ts->state = TS_DEAD;
3476 tcg_op_remove(s, op);
3477 } else {
3478 arg_ts->state = TS_MEM;
3479 }
3480
3481 sop->args[0] = temp_arg(out_ts);
3482 sop->args[1] = temp_arg(arg_ts->mem_base);
3483 sop->args[2] = arg_ts->mem_offset;
3484 } else {
3485 tcg_debug_assert(!IS_DEAD_ARG(0));
3486 }
3487 }
3488 } else {
3489 for (i = 0; i < nb_oargs; i++) {
3490 arg_ts = arg_temp(op->args[i]);
3491 dir_ts = arg_ts->state_ptr;
3492 if (!dir_ts) {
3493 continue;
3494 }
3495 op->args[i] = temp_arg(dir_ts);
3496 changes = true;
3497
3498 /* The output is now live and modified. */
3499 arg_ts->state = 0;
3500
3501 /* Sync outputs upon their last write. */
3502 if (NEED_SYNC_ARG(i)) {
3503 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
3504 ? INDEX_op_st_i32
3505 : INDEX_op_st_i64);
3506 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
3507
3508 sop->args[0] = temp_arg(dir_ts);
3509 sop->args[1] = temp_arg(arg_ts->mem_base);
3510 sop->args[2] = arg_ts->mem_offset;
3511
3512 arg_ts->state = TS_MEM;
3513 }
3514 /* Drop outputs that are dead. */
3515 if (IS_DEAD_ARG(i)) {
3516 arg_ts->state = TS_DEAD;
3517 }
3518 }
3519 }
3520 }
3521
3522 return changes;
3523 }
3524
3525 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
3526 {
3527 intptr_t off;
3528 int size, align;
3529
3530 /* When allocating an object, look at the full type. */
3531 size = tcg_type_size(ts->base_type);
3532 switch (ts->base_type) {
3533 case TCG_TYPE_I32:
3534 align = 4;
3535 break;
3536 case TCG_TYPE_I64:
3537 case TCG_TYPE_V64:
3538 align = 8;
3539 break;
3540 case TCG_TYPE_I128:
3541 case TCG_TYPE_V128:
3542 case TCG_TYPE_V256:
3543 /*
3544 * Note that we do not require aligned storage for V256,
3545 * and that we provide alignment for I128 to match V128,
3546 * even if that's above what the host ABI requires.
3547 */
3548 align = 16;
3549 break;
3550 default:
3551 g_assert_not_reached();
3552 }
3553
3554 /*
3555 * Assume the stack is sufficiently aligned.
3556 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3557 * and do not require 16 byte vector alignment. This seems slightly
3558 * easier than fully parameterizing the above switch statement.
3559 */
3560 align = MIN(TCG_TARGET_STACK_ALIGN, align);
3561 off = ROUND_UP(s->current_frame_offset, align);
3562
3563 /* If we've exhausted the stack frame, restart with a smaller TB. */
3564 if (off + size > s->frame_end) {
3565 tcg_raise_tb_overflow(s);
3566 }
3567 s->current_frame_offset = off + size;
3568 #if defined(__sparc__)
3569 off += TCG_TARGET_STACK_BIAS;
3570 #endif
3571
3572 /* If the object was subdivided, assign memory to all the parts. */
3573 if (ts->base_type != ts->type) {
3574 int part_size = tcg_type_size(ts->type);
3575 int part_count = size / part_size;
3576
3577 /*
3578 * Each part is allocated sequentially in tcg_temp_new_internal.
3579 * Jump back to the first part by subtracting the current index.
3580 */
3581 ts -= ts->temp_subindex;
3582 for (int i = 0; i < part_count; ++i) {
3583 ts[i].mem_offset = off + i * part_size;
3584 ts[i].mem_base = s->frame_temp;
3585 ts[i].mem_allocated = 1;
3586 }
3587 } else {
3588 ts->mem_offset = off;
3589 ts->mem_base = s->frame_temp;
3590 ts->mem_allocated = 1;
3591 }
3592 }
3593
3594 /* Assign @reg to @ts, and update reg_to_temp[]. */
3595 static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
3596 {
3597 if (ts->val_type == TEMP_VAL_REG) {
3598 TCGReg old = ts->reg;
3599 tcg_debug_assert(s->reg_to_temp[old] == ts);
3600 if (old == reg) {
3601 return;
3602 }
3603 s->reg_to_temp[old] = NULL;
3604 }
3605 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3606 s->reg_to_temp[reg] = ts;
3607 ts->val_type = TEMP_VAL_REG;
3608 ts->reg = reg;
3609 }
3610
3611 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3612 static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
3613 {
3614 tcg_debug_assert(type != TEMP_VAL_REG);
3615 if (ts->val_type == TEMP_VAL_REG) {
3616 TCGReg reg = ts->reg;
3617 tcg_debug_assert(s->reg_to_temp[reg] == ts);
3618 s->reg_to_temp[reg] = NULL;
3619 }
3620 ts->val_type = type;
3621 }
3622
3623 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
3624
3625 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3626 mark it free; otherwise mark it dead. */
3627 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
3628 {
3629 TCGTempVal new_type;
3630
3631 switch (ts->kind) {
3632 case TEMP_FIXED:
3633 return;
3634 case TEMP_GLOBAL:
3635 case TEMP_TB:
3636 new_type = TEMP_VAL_MEM;
3637 break;
3638 case TEMP_EBB:
3639 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
3640 break;
3641 case TEMP_CONST:
3642 new_type = TEMP_VAL_CONST;
3643 break;
3644 default:
3645 g_assert_not_reached();
3646 }
3647 set_temp_val_nonreg(s, ts, new_type);
3648 }
3649
3650 /* Mark a temporary as dead. */
3651 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3652 {
3653 temp_free_or_dead(s, ts, 1);
3654 }
3655
3656 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3657 registers needs to be allocated to store a constant. If 'free_or_dead'
3658 is non-zero, subsequently release the temporary; if it is positive, the
3659 temp is dead; if it is negative, the temp is free. */
3660 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3661 TCGRegSet preferred_regs, int free_or_dead)
3662 {
3663 if (!temp_readonly(ts) && !ts->mem_coherent) {
3664 if (!ts->mem_allocated) {
3665 temp_allocate_frame(s, ts);
3666 }
3667 switch (ts->val_type) {
3668 case TEMP_VAL_CONST:
3669 /* If we're going to free the temp immediately, then we won't
3670 require it later in a register, so attempt to store the
3671 constant to memory directly. */
3672 if (free_or_dead
3673 && tcg_out_sti(s, ts->type, ts->val,
3674 ts->mem_base->reg, ts->mem_offset)) {
3675 break;
3676 }
3677 temp_load(s, ts, tcg_target_available_regs[ts->type],
3678 allocated_regs, preferred_regs);
3679 /* fallthrough */
3680
3681 case TEMP_VAL_REG:
3682 tcg_out_st(s, ts->type, ts->reg,
3683 ts->mem_base->reg, ts->mem_offset);
3684 break;
3685
3686 case TEMP_VAL_MEM:
3687 break;
3688
3689 case TEMP_VAL_DEAD:
3690 default:
3691 g_assert_not_reached();
3692 }
3693 ts->mem_coherent = 1;
3694 }
3695 if (free_or_dead) {
3696 temp_free_or_dead(s, ts, free_or_dead);
3697 }
3698 }
3699
3700 /* free register 'reg' by spilling the corresponding temporary if necessary */
3701 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
3702 {
3703 TCGTemp *ts = s->reg_to_temp[reg];
3704 if (ts != NULL) {
3705 temp_sync(s, ts, allocated_regs, 0, -1);
3706 }
3707 }
3708
3709 /**
3710 * tcg_reg_alloc:
3711 * @required_regs: Set of registers in which we must allocate.
3712 * @allocated_regs: Set of registers which must be avoided.
3713 * @preferred_regs: Set of registers we should prefer.
3714 * @rev: True if we search the registers in "indirect" order.
3715 *
3716 * The allocated register must be in @required_regs & ~@allocated_regs,
3717 * but if we can put it in @preferred_regs we may save a move later.
3718 */
3719 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3720 TCGRegSet allocated_regs,
3721 TCGRegSet preferred_regs, bool rev)
3722 {
3723 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3724 TCGRegSet reg_ct[2];
3725 const int *order;
3726
3727 reg_ct[1] = required_regs & ~allocated_regs;
3728 tcg_debug_assert(reg_ct[1] != 0);
3729 reg_ct[0] = reg_ct[1] & preferred_regs;
3730
3731 /* Skip the preferred_regs option if it cannot be satisfied,
3732 or if the preference made no difference. */
3733 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3734
3735 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3736
3737 /* Try free registers, preferences first. */
3738 for (j = f; j < 2; j++) {
3739 TCGRegSet set = reg_ct[j];
3740
3741 if (tcg_regset_single(set)) {
3742 /* One register in the set. */
3743 TCGReg reg = tcg_regset_first(set);
3744 if (s->reg_to_temp[reg] == NULL) {
3745 return reg;
3746 }
3747 } else {
3748 for (i = 0; i < n; i++) {
3749 TCGReg reg = order[i];
3750 if (s->reg_to_temp[reg] == NULL &&
3751 tcg_regset_test_reg(set, reg)) {
3752 return reg;
3753 }
3754 }
3755 }
3756 }
3757
3758 /* We must spill something. */
3759 for (j = f; j < 2; j++) {
3760 TCGRegSet set = reg_ct[j];
3761
3762 if (tcg_regset_single(set)) {
3763 /* One register in the set. */
3764 TCGReg reg = tcg_regset_first(set);
3765 tcg_reg_free(s, reg, allocated_regs);
3766 return reg;
3767 } else {
3768 for (i = 0; i < n; i++) {
3769 TCGReg reg = order[i];
3770 if (tcg_regset_test_reg(set, reg)) {
3771 tcg_reg_free(s, reg, allocated_regs);
3772 return reg;
3773 }
3774 }
3775 }
3776 }
3777
3778 g_assert_not_reached();
3779 }
3780
3781 static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
3782 TCGRegSet allocated_regs,
3783 TCGRegSet preferred_regs, bool rev)
3784 {
3785 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3786 TCGRegSet reg_ct[2];
3787 const int *order;
3788
3789 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
3790 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
3791 tcg_debug_assert(reg_ct[1] != 0);
3792 reg_ct[0] = reg_ct[1] & preferred_regs;
3793
3794 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3795
3796 /*
3797 * Skip the preferred_regs option if it cannot be satisfied,
3798 * or if the preference made no difference.
3799 */
3800 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3801
3802 /*
3803 * Minimize the number of flushes by looking for 2 free registers first,
3804 * then a single flush, then two flushes.
3805 */
3806 for (fmin = 2; fmin >= 0; fmin--) {
3807 for (j = k; j < 2; j++) {
3808 TCGRegSet set = reg_ct[j];
3809
3810 for (i = 0; i < n; i++) {
3811 TCGReg reg = order[i];
3812
3813 if (tcg_regset_test_reg(set, reg)) {
3814 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
3815 if (f >= fmin) {
3816 tcg_reg_free(s, reg, allocated_regs);
3817 tcg_reg_free(s, reg + 1, allocated_regs);
3818 return reg;
3819 }
3820 }
3821 }
3822 }
3823 }
3824 g_assert_not_reached();
3825 }
3826
3827 /* Make sure the temporary is in a register. If needed, allocate the register
3828 from DESIRED while avoiding ALLOCATED. */
3829 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
3830 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
3831 {
3832 TCGReg reg;
3833
3834 switch (ts->val_type) {
3835 case TEMP_VAL_REG:
3836 return;
3837 case TEMP_VAL_CONST:
3838 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3839 preferred_regs, ts->indirect_base);
3840 if (ts->type <= TCG_TYPE_I64) {
3841 tcg_out_movi(s, ts->type, reg, ts->val);
3842 } else {
3843 uint64_t val = ts->val;
3844 MemOp vece = MO_64;
3845
3846 /*
3847 * Find the minimal vector element that matches the constant.
3848 * The targets will, in general, have to do this search anyway,
3849 * do this generically.
3850 */
3851 if (val == dup_const(MO_8, val)) {
3852 vece = MO_8;
3853 } else if (val == dup_const(MO_16, val)) {
3854 vece = MO_16;
3855 } else if (val == dup_const(MO_32, val)) {
3856 vece = MO_32;
3857 }
3858
3859 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
3860 }
3861 ts->mem_coherent = 0;
3862 break;
3863 case TEMP_VAL_MEM:
3864 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3865 preferred_regs, ts->indirect_base);
3866 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3867 ts->mem_coherent = 1;
3868 break;
3869 case TEMP_VAL_DEAD:
3870 default:
3871 g_assert_not_reached();
3872 }
3873 set_temp_val_reg(s, ts, reg);
3874 }
3875
3876 /* Save a temporary to memory. 'allocated_regs' is used in case a
3877 temporary registers needs to be allocated to store a constant. */
3878 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
3879 {
3880 /* The liveness analysis already ensures that globals are back
3881 in memory. Keep an tcg_debug_assert for safety. */
3882 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
3883 }
3884
3885 /* save globals to their canonical location and assume they can be
3886 modified be the following code. 'allocated_regs' is used in case a
3887 temporary registers needs to be allocated to store a constant. */
3888 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
3889 {
3890 int i, n;
3891
3892 for (i = 0, n = s->nb_globals; i < n; i++) {
3893 temp_save(s, &s->temps[i], allocated_regs);
3894 }
3895 }
3896
3897 /* sync globals to their canonical location and assume they can be
3898 read by the following code. 'allocated_regs' is used in case a
3899 temporary registers needs to be allocated to store a constant. */
3900 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3901 {
3902 int i, n;
3903
3904 for (i = 0, n = s->nb_globals; i < n; i++) {
3905 TCGTemp *ts = &s->temps[i];
3906 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
3907 || ts->kind == TEMP_FIXED
3908 || ts->mem_coherent);
3909 }
3910 }
3911
3912 /* at the end of a basic block, we assume all temporaries are dead and
3913 all globals are stored at their canonical location. */
3914 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
3915 {
3916 int i;
3917
3918 for (i = s->nb_globals; i < s->nb_temps; i++) {
3919 TCGTemp *ts = &s->temps[i];
3920
3921 switch (ts->kind) {
3922 case TEMP_TB:
3923 temp_save(s, ts, allocated_regs);
3924 break;
3925 case TEMP_EBB:
3926 /* The liveness analysis already ensures that temps are dead.
3927 Keep an tcg_debug_assert for safety. */
3928 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3929 break;
3930 case TEMP_CONST:
3931 /* Similarly, we should have freed any allocated register. */
3932 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3933 break;
3934 default:
3935 g_assert_not_reached();
3936 }
3937 }
3938
3939 save_globals(s, allocated_regs);
3940 }
3941
3942 /*
3943 * At a conditional branch, we assume all temporaries are dead unless
3944 * explicitly live-across-conditional-branch; all globals and local
3945 * temps are synced to their location.
3946 */
3947 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3948 {
3949 sync_globals(s, allocated_regs);
3950
3951 for (int i = s->nb_globals; i < s->nb_temps; i++) {
3952 TCGTemp *ts = &s->temps[i];
3953 /*
3954 * The liveness analysis already ensures that temps are dead.
3955 * Keep tcg_debug_asserts for safety.
3956 */
3957 switch (ts->kind) {
3958 case TEMP_TB:
3959 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
3960 break;
3961 case TEMP_EBB:
3962 case TEMP_CONST:
3963 break;
3964 default:
3965 g_assert_not_reached();
3966 }
3967 }
3968 }
3969
3970 /*
3971 * Specialized code generation for INDEX_op_mov_* with a constant.
3972 */
3973 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
3974 tcg_target_ulong val, TCGLifeData arg_life,
3975 TCGRegSet preferred_regs)
3976 {
3977 /* ENV should not be modified. */
3978 tcg_debug_assert(!temp_readonly(ots));
3979
3980 /* The movi is not explicitly generated here. */
3981 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
3982 ots->val = val;
3983 ots->mem_coherent = 0;
3984 if (NEED_SYNC_ARG(0)) {
3985 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
3986 } else if (IS_DEAD_ARG(0)) {
3987 temp_dead(s, ots);
3988 }
3989 }
3990
3991 /*
3992 * Specialized code generation for INDEX_op_mov_*.
3993 */
3994 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
3995 {
3996 const TCGLifeData arg_life = op->life;
3997 TCGRegSet allocated_regs, preferred_regs;
3998 TCGTemp *ts, *ots;
3999 TCGType otype, itype;
4000 TCGReg oreg, ireg;
4001
4002 allocated_regs = s->reserved_regs;
4003 preferred_regs = output_pref(op, 0);
4004 ots = arg_temp(op->args[0]);
4005 ts = arg_temp(op->args[1]);
4006
4007 /* ENV should not be modified. */
4008 tcg_debug_assert(!temp_readonly(ots));
4009
4010 /* Note that otype != itype for no-op truncation. */
4011 otype = ots->type;
4012 itype = ts->type;
4013
4014 if (ts->val_type == TEMP_VAL_CONST) {
4015 /* propagate constant or generate sti */
4016 tcg_target_ulong val = ts->val;
4017 if (IS_DEAD_ARG(1)) {
4018 temp_dead(s, ts);
4019 }
4020 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
4021 return;
4022 }
4023
4024 /* If the source value is in memory we're going to be forced
4025 to have it in a register in order to perform the copy. Copy
4026 the SOURCE value into its own register first, that way we
4027 don't have to reload SOURCE the next time it is used. */
4028 if (ts->val_type == TEMP_VAL_MEM) {
4029 temp_load(s, ts, tcg_target_available_regs[itype],
4030 allocated_regs, preferred_regs);
4031 }
4032 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
4033 ireg = ts->reg;
4034
4035 if (IS_DEAD_ARG(0)) {
4036 /* mov to a non-saved dead register makes no sense (even with
4037 liveness analysis disabled). */
4038 tcg_debug_assert(NEED_SYNC_ARG(0));
4039 if (!ots->mem_allocated) {
4040 temp_allocate_frame(s, ots);
4041 }
4042 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
4043 if (IS_DEAD_ARG(1)) {
4044 temp_dead(s, ts);
4045 }
4046 temp_dead(s, ots);
4047 return;
4048 }
4049
4050 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
4051 /*
4052 * The mov can be suppressed. Kill input first, so that it
4053 * is unlinked from reg_to_temp, then set the output to the
4054 * reg that we saved from the input.
4055 */
4056 temp_dead(s, ts);
4057 oreg = ireg;
4058 } else {
4059 if (ots->val_type == TEMP_VAL_REG) {
4060 oreg = ots->reg;
4061 } else {
4062 /* Make sure to not spill the input register during allocation. */
4063 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
4064 allocated_regs | ((TCGRegSet)1 << ireg),
4065 preferred_regs, ots->indirect_base);
4066 }
4067 if (!tcg_out_mov(s, otype, oreg, ireg)) {
4068 /*
4069 * Cross register class move not supported.
4070 * Store the source register into the destination slot
4071 * and leave the destination temp as TEMP_VAL_MEM.
4072 */
4073 assert(!temp_readonly(ots));
4074 if (!ts->mem_allocated) {
4075 temp_allocate_frame(s, ots);
4076 }
4077 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
4078 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
4079 ots->mem_coherent = 1;
4080 return;
4081 }
4082 }
4083 set_temp_val_reg(s, ots, oreg);
4084 ots->mem_coherent = 0;
4085
4086 if (NEED_SYNC_ARG(0)) {
4087 temp_sync(s, ots, allocated_regs, 0, 0);
4088 }
4089 }
4090
4091 /*
4092 * Specialized code generation for INDEX_op_dup_vec.
4093 */
4094 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
4095 {
4096 const TCGLifeData arg_life = op->life;
4097 TCGRegSet dup_out_regs, dup_in_regs;
4098 TCGTemp *its, *ots;
4099 TCGType itype, vtype;
4100 unsigned vece;
4101 int lowpart_ofs;
4102 bool ok;
4103
4104 ots = arg_temp(op->args[0]);
4105 its = arg_temp(op->args[1]);
4106
4107 /* ENV should not be modified. */
4108 tcg_debug_assert(!temp_readonly(ots));
4109
4110 itype = its->type;
4111 vece = TCGOP_VECE(op);
4112 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4113
4114 if (its->val_type == TEMP_VAL_CONST) {
4115 /* Propagate constant via movi -> dupi. */
4116 tcg_target_ulong val = its->val;
4117 if (IS_DEAD_ARG(1)) {
4118 temp_dead(s, its);
4119 }
4120 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
4121 return;
4122 }
4123
4124 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
4125 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
4126
4127 /* Allocate the output register now. */
4128 if (ots->val_type != TEMP_VAL_REG) {
4129 TCGRegSet allocated_regs = s->reserved_regs;
4130 TCGReg oreg;
4131
4132 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
4133 /* Make sure to not spill the input register. */
4134 tcg_regset_set_reg(allocated_regs, its->reg);
4135 }
4136 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
4137 output_pref(op, 0), ots->indirect_base);
4138 set_temp_val_reg(s, ots, oreg);
4139 }
4140
4141 switch (its->val_type) {
4142 case TEMP_VAL_REG:
4143 /*
4144 * The dup constriaints must be broad, covering all possible VECE.
4145 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4146 * to fail, indicating that extra moves are required for that case.
4147 */
4148 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
4149 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
4150 goto done;
4151 }
4152 /* Try again from memory or a vector input register. */
4153 }
4154 if (!its->mem_coherent) {
4155 /*
4156 * The input register is not synced, and so an extra store
4157 * would be required to use memory. Attempt an integer-vector
4158 * register move first. We do not have a TCGRegSet for this.
4159 */
4160 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
4161 break;
4162 }
4163 /* Sync the temp back to its slot and load from there. */
4164 temp_sync(s, its, s->reserved_regs, 0, 0);
4165 }
4166 /* fall through */
4167
4168 case TEMP_VAL_MEM:
4169 lowpart_ofs = 0;
4170 if (HOST_BIG_ENDIAN) {
4171 lowpart_ofs = tcg_type_size(itype) - (1 << vece);
4172 }
4173 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
4174 its->mem_offset + lowpart_ofs)) {
4175 goto done;
4176 }
4177 /* Load the input into the destination vector register. */
4178 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
4179 break;
4180
4181 default:
4182 g_assert_not_reached();
4183 }
4184
4185 /* We now have a vector input register, so dup must succeed. */
4186 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
4187 tcg_debug_assert(ok);
4188
4189 done:
4190 ots->mem_coherent = 0;
4191 if (IS_DEAD_ARG(1)) {
4192 temp_dead(s, its);
4193 }
4194 if (NEED_SYNC_ARG(0)) {
4195 temp_sync(s, ots, s->reserved_regs, 0, 0);
4196 }
4197 if (IS_DEAD_ARG(0)) {
4198 temp_dead(s, ots);
4199 }
4200 }
4201
4202 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
4203 {
4204 const TCGLifeData arg_life = op->life;
4205 const TCGOpDef * const def = &tcg_op_defs[op->opc];
4206 TCGRegSet i_allocated_regs;
4207 TCGRegSet o_allocated_regs;
4208 int i, k, nb_iargs, nb_oargs;
4209 TCGReg reg;
4210 TCGArg arg;
4211 const TCGArgConstraint *arg_ct;
4212 TCGTemp *ts;
4213 TCGArg new_args[TCG_MAX_OP_ARGS];
4214 int const_args[TCG_MAX_OP_ARGS];
4215
4216 nb_oargs = def->nb_oargs;
4217 nb_iargs = def->nb_iargs;
4218
4219 /* copy constants */
4220 memcpy(new_args + nb_oargs + nb_iargs,
4221 op->args + nb_oargs + nb_iargs,
4222 sizeof(TCGArg) * def->nb_cargs);
4223
4224 i_allocated_regs = s->reserved_regs;
4225 o_allocated_regs = s->reserved_regs;
4226
4227 /* satisfy input constraints */
4228 for (k = 0; k < nb_iargs; k++) {
4229 TCGRegSet i_preferred_regs, i_required_regs;
4230 bool allocate_new_reg, copyto_new_reg;
4231 TCGTemp *ts2;
4232 int i1, i2;
4233
4234 i = def->args_ct[nb_oargs + k].sort_index;
4235 arg = op->args[i];
4236 arg_ct = &def->args_ct[i];
4237 ts = arg_temp(arg);
4238
4239 if (ts->val_type == TEMP_VAL_CONST
4240 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
4241 /* constant is OK for instruction */
4242 const_args[i] = 1;
4243 new_args[i] = ts->val;
4244 continue;
4245 }
4246
4247 reg = ts->reg;
4248 i_preferred_regs = 0;
4249 i_required_regs = arg_ct->regs;
4250 allocate_new_reg = false;
4251 copyto_new_reg = false;
4252
4253 switch (arg_ct->pair) {
4254 case 0: /* not paired */
4255 if (arg_ct->ialias) {
4256 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4257
4258 /*
4259 * If the input is readonly, then it cannot also be an
4260 * output and aliased to itself. If the input is not
4261 * dead after the instruction, we must allocate a new
4262 * register and move it.
4263 */
4264 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
4265 allocate_new_reg = true;
4266 } else if (ts->val_type == TEMP_VAL_REG) {
4267 /*
4268 * Check if the current register has already been
4269 * allocated for another input.
4270 */
4271 allocate_new_reg =
4272 tcg_regset_test_reg(i_allocated_regs, reg);
4273 }
4274 }
4275 if (!allocate_new_reg) {
4276 temp_load(s, ts, i_required_regs, i_allocated_regs,
4277 i_preferred_regs);
4278 reg = ts->reg;
4279 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
4280 }
4281 if (allocate_new_reg) {
4282 /*
4283 * Allocate a new register matching the constraint
4284 * and move the temporary register into it.
4285 */
4286 temp_load(s, ts, tcg_target_available_regs[ts->type],
4287 i_allocated_regs, 0);
4288 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
4289 i_preferred_regs, ts->indirect_base);
4290 copyto_new_reg = true;
4291 }
4292 break;
4293
4294 case 1:
4295 /* First of an input pair; if i1 == i2, the second is an output. */
4296 i1 = i;
4297 i2 = arg_ct->pair_index;
4298 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
4299
4300 /*
4301 * It is easier to default to allocating a new pair
4302 * and to identify a few cases where it's not required.
4303 */
4304 if (arg_ct->ialias) {
4305 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4306 if (IS_DEAD_ARG(i1) &&
4307 IS_DEAD_ARG(i2) &&
4308 !temp_readonly(ts) &&
4309 ts->val_type == TEMP_VAL_REG &&
4310 ts->reg < TCG_TARGET_NB_REGS - 1 &&
4311 tcg_regset_test_reg(i_required_regs, reg) &&
4312 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4313 !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
4314 (ts2
4315 ? ts2->val_type == TEMP_VAL_REG &&
4316 ts2->reg == reg + 1 &&
4317 !temp_readonly(ts2)
4318 : s->reg_to_temp[reg + 1] == NULL)) {
4319 break;
4320 }
4321 } else {
4322 /* Without aliasing, the pair must also be an input. */
4323 tcg_debug_assert(ts2);
4324 if (ts->val_type == TEMP_VAL_REG &&
4325 ts2->val_type == TEMP_VAL_REG &&
4326 ts2->reg == reg + 1 &&
4327 tcg_regset_test_reg(i_required_regs, reg)) {
4328 break;
4329 }
4330 }
4331 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4332 0, ts->indirect_base);
4333 goto do_pair;
4334
4335 case 2: /* pair second */
4336 reg = new_args[arg_ct->pair_index] + 1;
4337 goto do_pair;
4338
4339 case 3: /* ialias with second output, no first input */
4340 tcg_debug_assert(arg_ct->ialias);
4341 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4342
4343 if (IS_DEAD_ARG(i) &&
4344 !temp_readonly(ts) &&
4345 ts->val_type == TEMP_VAL_REG &&
4346 reg > 0 &&
4347 s->reg_to_temp[reg - 1] == NULL &&
4348 tcg_regset_test_reg(i_required_regs, reg) &&
4349 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4350 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4351 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4352 break;
4353 }
4354 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4355 i_allocated_regs, 0,
4356 ts->indirect_base);
4357 tcg_regset_set_reg(i_allocated_regs, reg);
4358 reg += 1;
4359 goto do_pair;
4360
4361 do_pair:
4362 /*
4363 * If an aliased input is not dead after the instruction,
4364 * we must allocate a new register and move it.
4365 */
4366 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4367 TCGRegSet t_allocated_regs = i_allocated_regs;
4368
4369 /*
4370 * Because of the alias, and the continued life, make sure
4371 * that the temp is somewhere *other* than the reg pair,
4372 * and we get a copy in reg.
4373 */
4374 tcg_regset_set_reg(t_allocated_regs, reg);
4375 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4376 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4377 /* If ts was already in reg, copy it somewhere else. */
4378 TCGReg nr;
4379 bool ok;
4380
4381 tcg_debug_assert(ts->kind != TEMP_FIXED);
4382 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4383 t_allocated_regs, 0, ts->indirect_base);
4384 ok = tcg_out_mov(s, ts->type, nr, reg);
4385 tcg_debug_assert(ok);
4386
4387 set_temp_val_reg(s, ts, nr);
4388 } else {
4389 temp_load(s, ts, tcg_target_available_regs[ts->type],
4390 t_allocated_regs, 0);
4391 copyto_new_reg = true;
4392 }
4393 } else {
4394 /* Preferably allocate to reg, otherwise copy. */
4395 i_required_regs = (TCGRegSet)1 << reg;
4396 temp_load(s, ts, i_required_regs, i_allocated_regs,
4397 i_preferred_regs);
4398 copyto_new_reg = ts->reg != reg;
4399 }
4400 break;
4401
4402 default:
4403 g_assert_not_reached();
4404 }
4405
4406 if (copyto_new_reg) {
4407 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4408 /*
4409 * Cross register class move not supported. Sync the
4410 * temp back to its slot and load from there.
4411 */
4412 temp_sync(s, ts, i_allocated_regs, 0, 0);
4413 tcg_out_ld(s, ts->type, reg,
4414 ts->mem_base->reg, ts->mem_offset);
4415 }
4416 }
4417 new_args[i] = reg;
4418 const_args[i] = 0;
4419 tcg_regset_set_reg(i_allocated_regs, reg);
4420 }
4421
4422 /* mark dead temporaries and free the associated registers */
4423 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4424 if (IS_DEAD_ARG(i)) {
4425 temp_dead(s, arg_temp(op->args[i]));
4426 }
4427 }
4428
4429 if (def->flags & TCG_OPF_COND_BRANCH) {
4430 tcg_reg_alloc_cbranch(s, i_allocated_regs);
4431 } else if (def->flags & TCG_OPF_BB_END) {
4432 tcg_reg_alloc_bb_end(s, i_allocated_regs);
4433 } else {
4434 if (def->flags & TCG_OPF_CALL_CLOBBER) {
4435 /* XXX: permit generic clobber register list ? */
4436 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4437 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
4438 tcg_reg_free(s, i, i_allocated_regs);
4439 }
4440 }
4441 }
4442 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
4443 /* sync globals if the op has side effects and might trigger
4444 an exception. */
4445 sync_globals(s, i_allocated_regs);
4446 }
4447
4448 /* satisfy the output constraints */
4449 for(k = 0; k < nb_oargs; k++) {
4450 i = def->args_ct[k].sort_index;
4451 arg = op->args[i];
4452 arg_ct = &def->args_ct[i];
4453 ts = arg_temp(arg);
4454
4455 /* ENV should not be modified. */
4456 tcg_debug_assert(!temp_readonly(ts));
4457
4458 switch (arg_ct->pair) {
4459 case 0: /* not paired */
4460 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
4461 reg = new_args[arg_ct->alias_index];
4462 } else if (arg_ct->newreg) {
4463 reg = tcg_reg_alloc(s, arg_ct->regs,
4464 i_allocated_regs | o_allocated_regs,
4465 output_pref(op, k), ts->indirect_base);
4466 } else {
4467 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
4468 output_pref(op, k), ts->indirect_base);
4469 }
4470 break;
4471
4472 case 1: /* first of pair */
4473 tcg_debug_assert(!arg_ct->newreg);
4474 if (arg_ct->oalias) {
4475 reg = new_args[arg_ct->alias_index];
4476 break;
4477 }
4478 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
4479 output_pref(op, k), ts->indirect_base);
4480 break;
4481
4482 case 2: /* second of pair */
4483 tcg_debug_assert(!arg_ct->newreg);
4484 if (arg_ct->oalias) {
4485 reg = new_args[arg_ct->alias_index];
4486 } else {
4487 reg = new_args[arg_ct->pair_index] + 1;
4488 }
4489 break;
4490
4491 case 3: /* first of pair, aliasing with a second input */
4492 tcg_debug_assert(!arg_ct->newreg);
4493 reg = new_args[arg_ct->pair_index] - 1;
4494 break;
4495
4496 default:
4497 g_assert_not_reached();
4498 }
4499 tcg_regset_set_reg(o_allocated_regs, reg);
4500 set_temp_val_reg(s, ts, reg);
4501 ts->mem_coherent = 0;
4502 new_args[i] = reg;
4503 }
4504 }
4505
4506 /* emit instruction */
4507 switch (op->opc) {
4508 case INDEX_op_ext8s_i32:
4509 tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
4510 break;
4511 case INDEX_op_ext8s_i64:
4512 tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
4513 break;
4514 case INDEX_op_ext8u_i32:
4515 case INDEX_op_ext8u_i64:
4516 tcg_out_ext8u(s, new_args[0], new_args[1]);
4517 break;
4518 case INDEX_op_ext16s_i32:
4519 tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
4520 break;
4521 case INDEX_op_ext16s_i64:
4522 tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
4523 break;
4524 case INDEX_op_ext16u_i32:
4525 case INDEX_op_ext16u_i64:
4526 tcg_out_ext16u(s, new_args[0], new_args[1]);
4527 break;
4528 case INDEX_op_ext32s_i64:
4529 tcg_out_ext32s(s, new_args[0], new_args[1]);
4530 break;
4531 case INDEX_op_ext32u_i64:
4532 tcg_out_ext32u(s, new_args[0], new_args[1]);
4533 break;
4534 case INDEX_op_ext_i32_i64:
4535 tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
4536 break;
4537 case INDEX_op_extu_i32_i64:
4538 tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
4539 break;
4540 default:
4541 if (def->flags & TCG_OPF_VECTOR) {
4542 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
4543 new_args, const_args);
4544 } else {
4545 tcg_out_op(s, op->opc, new_args, const_args);
4546 }
4547 break;
4548 }
4549
4550 /* move the outputs in the correct register if needed */
4551 for(i = 0; i < nb_oargs; i++) {
4552 ts = arg_temp(op->args[i]);
4553
4554 /* ENV should not be modified. */
4555 tcg_debug_assert(!temp_readonly(ts));
4556
4557 if (NEED_SYNC_ARG(i)) {
4558 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
4559 } else if (IS_DEAD_ARG(i)) {
4560 temp_dead(s, ts);
4561 }
4562 }
4563 }
4564
4565 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
4566 {
4567 const TCGLifeData arg_life = op->life;
4568 TCGTemp *ots, *itsl, *itsh;
4569 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4570
4571 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4572 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
4573 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
4574
4575 ots = arg_temp(op->args[0]);
4576 itsl = arg_temp(op->args[1]);
4577 itsh = arg_temp(op->args[2]);
4578
4579 /* ENV should not be modified. */
4580 tcg_debug_assert(!temp_readonly(ots));
4581
4582 /* Allocate the output register now. */
4583 if (ots->val_type != TEMP_VAL_REG) {
4584 TCGRegSet allocated_regs = s->reserved_regs;
4585 TCGRegSet dup_out_regs =
4586 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
4587 TCGReg oreg;
4588
4589 /* Make sure to not spill the input registers. */
4590 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
4591 tcg_regset_set_reg(allocated_regs, itsl->reg);
4592 }
4593 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
4594 tcg_regset_set_reg(allocated_regs, itsh->reg);
4595 }
4596
4597 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
4598 output_pref(op, 0), ots->indirect_base);
4599 set_temp_val_reg(s, ots, oreg);
4600 }
4601
4602 /* Promote dup2 of immediates to dupi_vec. */
4603 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
4604 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
4605 MemOp vece = MO_64;
4606
4607 if (val == dup_const(MO_8, val)) {
4608 vece = MO_8;
4609 } else if (val == dup_const(MO_16, val)) {
4610 vece = MO_16;
4611 } else if (val == dup_const(MO_32, val)) {
4612 vece = MO_32;
4613 }
4614
4615 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
4616 goto done;
4617 }
4618
4619 /* If the two inputs form one 64-bit value, try dupm_vec. */
4620 if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
4621 itsh->temp_subindex == !HOST_BIG_ENDIAN &&
4622 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
4623 TCGTemp *its = itsl - HOST_BIG_ENDIAN;
4624
4625 temp_sync(s, its + 0, s->reserved_regs, 0, 0);
4626 temp_sync(s, its + 1, s->reserved_regs, 0, 0);
4627
4628 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
4629 its->mem_base->reg, its->mem_offset)) {
4630 goto done;
4631 }
4632 }
4633
4634 /* Fall back to generic expansion. */
4635 return false;
4636
4637 done:
4638 ots->mem_coherent = 0;
4639 if (IS_DEAD_ARG(1)) {
4640 temp_dead(s, itsl);
4641 }
4642 if (IS_DEAD_ARG(2)) {
4643 temp_dead(s, itsh);
4644 }
4645 if (NEED_SYNC_ARG(0)) {
4646 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
4647 } else if (IS_DEAD_ARG(0)) {
4648 temp_dead(s, ots);
4649 }
4650 return true;
4651 }
4652
4653 static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
4654 TCGRegSet allocated_regs)
4655 {
4656 if (ts->val_type == TEMP_VAL_REG) {
4657 if (ts->reg != reg) {
4658 tcg_reg_free(s, reg, allocated_regs);
4659 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4660 /*
4661 * Cross register class move not supported. Sync the
4662 * temp back to its slot and load from there.
4663 */
4664 temp_sync(s, ts, allocated_regs, 0, 0);
4665 tcg_out_ld(s, ts->type, reg,
4666 ts->mem_base->reg, ts->mem_offset);
4667 }
4668 }
4669 } else {
4670 TCGRegSet arg_set = 0;
4671
4672 tcg_reg_free(s, reg, allocated_regs);
4673 tcg_regset_set_reg(arg_set, reg);
4674 temp_load(s, ts, arg_set, allocated_regs, 0);
4675 }
4676 }
4677
4678 static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
4679 TCGRegSet allocated_regs)
4680 {
4681 /*
4682 * When the destination is on the stack, load up the temp and store.
4683 * If there are many call-saved registers, the temp might live to
4684 * see another use; otherwise it'll be discarded.
4685 */
4686 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
4687 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
4688 TCG_TARGET_CALL_STACK_OFFSET +
4689 stk_slot * sizeof(tcg_target_long));
4690 }
4691
4692 static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
4693 TCGTemp *ts, TCGRegSet *allocated_regs)
4694 {
4695 if (REG_P(l)) {
4696 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
4697 load_arg_reg(s, reg, ts, *allocated_regs);
4698 tcg_regset_set_reg(*allocated_regs, reg);
4699 } else {
4700 load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
4701 ts, *allocated_regs);
4702 }
4703 }
4704
4705 static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
4706 intptr_t ref_off, TCGRegSet *allocated_regs)
4707 {
4708 TCGReg reg;
4709 int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
4710
4711 if (stk_slot < 0) {
4712 reg = tcg_target_call_iarg_regs[arg_slot];
4713 tcg_reg_free(s, reg, *allocated_regs);
4714 tcg_out_addi_ptr(s, reg, ref_base, ref_off);
4715 tcg_regset_set_reg(*allocated_regs, reg);
4716 } else {
4717 reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
4718 *allocated_regs, 0, false);
4719 tcg_out_addi_ptr(s, reg, ref_base, ref_off);
4720 tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
4721 TCG_TARGET_CALL_STACK_OFFSET
4722 + stk_slot * sizeof(tcg_target_long));
4723 }
4724 }
4725
4726 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
4727 {
4728 const int nb_oargs = TCGOP_CALLO(op);
4729 const int nb_iargs = TCGOP_CALLI(op);
4730 const TCGLifeData arg_life = op->life;
4731 const TCGHelperInfo *info = tcg_call_info(op);
4732 TCGRegSet allocated_regs = s->reserved_regs;
4733 int i;
4734
4735 /*
4736 * Move inputs into place in reverse order,
4737 * so that we place stacked arguments first.
4738 */
4739 for (i = nb_iargs - 1; i >= 0; --i) {
4740 const TCGCallArgumentLoc *loc = &info->in[i];
4741 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
4742
4743 switch (loc->kind) {
4744 case TCG_CALL_ARG_NORMAL:
4745 case TCG_CALL_ARG_EXTEND_U:
4746 case TCG_CALL_ARG_EXTEND_S:
4747 load_arg_normal(s, loc, ts, &allocated_regs);
4748 break;
4749 case TCG_CALL_ARG_BY_REF:
4750 load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
4751 load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
4752 TCG_TARGET_CALL_STACK_OFFSET
4753 + loc->ref_slot * sizeof(tcg_target_long),
4754 &allocated_regs);
4755 break;
4756 case TCG_CALL_ARG_BY_REF_N:
4757 load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
4758 break;
4759 default:
4760 g_assert_not_reached();
4761 }
4762 }
4763
4764 /* Mark dead temporaries and free the associated registers. */
4765 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
4766 if (IS_DEAD_ARG(i)) {
4767 temp_dead(s, arg_temp(op->args[i]));
4768 }
4769 }
4770
4771 /* Clobber call registers. */
4772 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
4773 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
4774 tcg_reg_free(s, i, allocated_regs);
4775 }
4776 }
4777
4778 /*
4779 * Save globals if they might be written by the helper,
4780 * sync them if they might be read.
4781 */
4782 if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
4783 /* Nothing to do */
4784 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
4785 sync_globals(s, allocated_regs);
4786 } else {
4787 save_globals(s, allocated_regs);
4788 }
4789
4790 /*
4791 * If the ABI passes a pointer to the returned struct as the first
4792 * argument, load that now. Pass a pointer to the output home slot.
4793 */
4794 if (info->out_kind == TCG_CALL_RET_BY_REF) {
4795 TCGTemp *ts = arg_temp(op->args[0]);
4796
4797 if (!ts->mem_allocated) {
4798 temp_allocate_frame(s, ts);
4799 }
4800 load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
4801 }
4802
4803 tcg_out_call(s, tcg_call_func(op), info);
4804
4805 /* Assign output registers and emit moves if needed. */
4806 switch (info->out_kind) {
4807 case TCG_CALL_RET_NORMAL:
4808 for (i = 0; i < nb_oargs; i++) {
4809 TCGTemp *ts = arg_temp(op->args[i]);
4810 TCGReg reg = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, i);
4811
4812 /* ENV should not be modified. */
4813 tcg_debug_assert(!temp_readonly(ts));
4814
4815 set_temp_val_reg(s, ts, reg);
4816 ts->mem_coherent = 0;
4817 }
4818 break;
4819
4820 case TCG_CALL_RET_BY_VEC:
4821 {
4822 TCGTemp *ts = arg_temp(op->args[0]);
4823
4824 tcg_debug_assert(ts->base_type == TCG_TYPE_I128);
4825 tcg_debug_assert(ts->temp_subindex == 0);
4826 if (!ts->mem_allocated) {
4827 temp_allocate_frame(s, ts);
4828 }
4829 tcg_out_st(s, TCG_TYPE_V128,
4830 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
4831 ts->mem_base->reg, ts->mem_offset);
4832 }
4833 /* fall through to mark all parts in memory */
4834
4835 case TCG_CALL_RET_BY_REF:
4836 /* The callee has performed a write through the reference. */
4837 for (i = 0; i < nb_oargs; i++) {
4838 TCGTemp *ts = arg_temp(op->args[i]);
4839 ts->val_type = TEMP_VAL_MEM;
4840 }
4841 break;
4842
4843 default:
4844 g_assert_not_reached();
4845 }
4846
4847 /* Flush or discard output registers as needed. */
4848 for (i = 0; i < nb_oargs; i++) {
4849 TCGTemp *ts = arg_temp(op->args[i]);
4850 if (NEED_SYNC_ARG(i)) {
4851 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
4852 } else if (IS_DEAD_ARG(i)) {
4853 temp_dead(s, ts);
4854 }
4855 }
4856 }
4857
4858 #ifdef CONFIG_PROFILER
4859
4860 /* avoid copy/paste errors */
4861 #define PROF_ADD(to, from, field) \
4862 do { \
4863 (to)->field += qatomic_read(&((from)->field)); \
4864 } while (0)
4865
4866 #define PROF_MAX(to, from, field) \
4867 do { \
4868 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
4869 if (val__ > (to)->field) { \
4870 (to)->field = val__; \
4871 } \
4872 } while (0)
4873
4874 /* Pass in a zero'ed @prof */
4875 static inline
4876 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
4877 {
4878 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
4879 unsigned int i;
4880
4881 for (i = 0; i < n_ctxs; i++) {
4882 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
4883 const TCGProfile *orig = &s->prof;
4884
4885 if (counters) {
4886 PROF_ADD(prof, orig, cpu_exec_time);
4887 PROF_ADD(prof, orig, tb_count1);
4888 PROF_ADD(prof, orig, tb_count);
4889 PROF_ADD(prof, orig, op_count);
4890 PROF_MAX(prof, orig, op_count_max);
4891 PROF_ADD(prof, orig, temp_count);
4892 PROF_MAX(prof, orig, temp_count_max);
4893 PROF_ADD(prof, orig, del_op_count);
4894 PROF_ADD(prof, orig, code_in_len);
4895 PROF_ADD(prof, orig, code_out_len);
4896 PROF_ADD(prof, orig, search_out_len);
4897 PROF_ADD(prof, orig, interm_time);
4898 PROF_ADD(prof, orig, code_time);
4899 PROF_ADD(prof, orig, la_time);
4900 PROF_ADD(prof, orig, opt_time);
4901 PROF_ADD(prof, orig, restore_count);
4902 PROF_ADD(prof, orig, restore_time);
4903 }
4904 if (table) {
4905 int i;
4906
4907 for (i = 0; i < NB_OPS; i++) {
4908 PROF_ADD(prof, orig, table_op_count[i]);
4909 }
4910 }
4911 }
4912 }
4913
4914 #undef PROF_ADD
4915 #undef PROF_MAX
4916
4917 static void tcg_profile_snapshot_counters(TCGProfile *prof)
4918 {
4919 tcg_profile_snapshot(prof, true, false);
4920 }
4921
4922 static void tcg_profile_snapshot_table(TCGProfile *prof)
4923 {
4924 tcg_profile_snapshot(prof, false, true);
4925 }
4926
4927 void tcg_dump_op_count(GString *buf)
4928 {
4929 TCGProfile prof = {};
4930 int i;
4931
4932 tcg_profile_snapshot_table(&prof);
4933 for (i = 0; i < NB_OPS; i++) {
4934 g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
4935 prof.table_op_count[i]);
4936 }
4937 }
4938
4939 int64_t tcg_cpu_exec_time(void)
4940 {
4941 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
4942 unsigned int i;
4943 int64_t ret = 0;
4944
4945 for (i = 0; i < n_ctxs; i++) {
4946 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
4947 const TCGProfile *prof = &s->prof;
4948
4949 ret += qatomic_read(&prof->cpu_exec_time);
4950 }
4951 return ret;
4952 }
4953 #else
4954 void tcg_dump_op_count(GString *buf)
4955 {
4956 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
4957 }
4958
4959 int64_t tcg_cpu_exec_time(void)
4960 {
4961 error_report("%s: TCG profiler not compiled", __func__);
4962 exit(EXIT_FAILURE);
4963 }
4964 #endif
4965
4966
4967 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
4968 {
4969 #ifdef CONFIG_PROFILER
4970 TCGProfile *prof = &s->prof;
4971 #endif
4972 int i, num_insns;
4973 TCGOp *op;
4974
4975 #ifdef CONFIG_PROFILER
4976 {
4977 int n = 0;
4978
4979 QTAILQ_FOREACH(op, &s->ops, link) {
4980 n++;
4981 }
4982 qatomic_set(&prof->op_count, prof->op_count + n);
4983 if (n > prof->op_count_max) {
4984 qatomic_set(&prof->op_count_max, n);
4985 }
4986
4987 n = s->nb_temps;
4988 qatomic_set(&prof->temp_count, prof->temp_count + n);
4989 if (n > prof->temp_count_max) {
4990 qatomic_set(&prof->temp_count_max, n);
4991 }
4992 }
4993 #endif
4994
4995 #ifdef DEBUG_DISAS
4996 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
4997 && qemu_log_in_addr_range(pc_start))) {
4998 FILE *logfile = qemu_log_trylock();
4999 if (logfile) {
5000 fprintf(logfile, "OP:\n");
5001 tcg_dump_ops(s, logfile, false);
5002 fprintf(logfile, "\n");
5003 qemu_log_unlock(logfile);
5004 }
5005 }
5006 #endif
5007
5008 #ifdef CONFIG_DEBUG_TCG
5009 /* Ensure all labels referenced have been emitted. */
5010 {
5011 TCGLabel *l;
5012 bool error = false;
5013
5014 QSIMPLEQ_FOREACH(l, &s->labels, next) {
5015 if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) {
5016 qemu_log_mask(CPU_LOG_TB_OP,
5017 "$L%d referenced but not present.\n", l->id);
5018 error = true;
5019 }
5020 }
5021 assert(!error);
5022 }
5023 #endif
5024
5025 #ifdef CONFIG_PROFILER
5026 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
5027 #endif
5028
5029 #ifdef USE_TCG_OPTIMIZATIONS
5030 tcg_optimize(s);
5031 #endif
5032
5033 #ifdef CONFIG_PROFILER
5034 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
5035 qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
5036 #endif
5037
5038 reachable_code_pass(s);
5039 liveness_pass_0(s);
5040 liveness_pass_1(s);
5041
5042 if (s->nb_indirects > 0) {
5043 #ifdef DEBUG_DISAS
5044 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
5045 && qemu_log_in_addr_range(pc_start))) {
5046 FILE *logfile = qemu_log_trylock();
5047 if (logfile) {
5048 fprintf(logfile, "OP before indirect lowering:\n");
5049 tcg_dump_ops(s, logfile, false);
5050 fprintf(logfile, "\n");
5051 qemu_log_unlock(logfile);
5052 }
5053 }
5054 #endif
5055 /* Replace indirect temps with direct temps. */
5056 if (liveness_pass_2(s)) {
5057 /* If changes were made, re-run liveness. */
5058 liveness_pass_1(s);
5059 }
5060 }
5061
5062 #ifdef CONFIG_PROFILER
5063 qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
5064 #endif
5065
5066 #ifdef DEBUG_DISAS
5067 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
5068 && qemu_log_in_addr_range(pc_start))) {
5069 FILE *logfile = qemu_log_trylock();
5070 if (logfile) {
5071 fprintf(logfile, "OP after optimization and liveness analysis:\n");
5072 tcg_dump_ops(s, logfile, true);
5073 fprintf(logfile, "\n");
5074 qemu_log_unlock(logfile);
5075 }
5076 }
5077 #endif
5078
5079 /* Initialize goto_tb jump offsets. */
5080 tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
5081 tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
5082 tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
5083 tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
5084
5085 tcg_reg_alloc_start(s);
5086
5087 /*
5088 * Reset the buffer pointers when restarting after overflow.
5089 * TODO: Move this into translate-all.c with the rest of the
5090 * buffer management. Having only this done here is confusing.
5091 */
5092 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
5093 s->code_ptr = s->code_buf;
5094
5095 #ifdef TCG_TARGET_NEED_LDST_LABELS
5096 QSIMPLEQ_INIT(&s->ldst_labels);
5097 #endif
5098 #ifdef TCG_TARGET_NEED_POOL_LABELS
5099 s->pool_labels = NULL;
5100 #endif
5101
5102 num_insns = -1;
5103 QTAILQ_FOREACH(op, &s->ops, link) {
5104 TCGOpcode opc = op->opc;
5105
5106 #ifdef CONFIG_PROFILER
5107 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
5108 #endif
5109
5110 switch (opc) {
5111 case INDEX_op_mov_i32:
5112 case INDEX_op_mov_i64:
5113 case INDEX_op_mov_vec:
5114 tcg_reg_alloc_mov(s, op);
5115 break;
5116 case INDEX_op_dup_vec:
5117 tcg_reg_alloc_dup(s, op);
5118 break;
5119 case INDEX_op_insn_start:
5120 if (num_insns >= 0) {
5121 size_t off = tcg_current_code_size(s);
5122 s->gen_insn_end_off[num_insns] = off;
5123 /* Assert that we do not overflow our stored offset. */
5124 assert(s->gen_insn_end_off[num_insns] == off);
5125 }
5126 num_insns++;
5127 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
5128 target_ulong a;
5129 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
5130 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
5131 #else
5132 a = op->args[i];
5133 #endif
5134 s->gen_insn_data[num_insns][i] = a;
5135 }
5136 break;
5137 case INDEX_op_discard:
5138 temp_dead(s, arg_temp(op->args[0]));
5139 break;
5140 case INDEX_op_set_label:
5141 tcg_reg_alloc_bb_end(s, s->reserved_regs);
5142 tcg_out_label(s, arg_label(op->args[0]));
5143 break;
5144 case INDEX_op_call:
5145 tcg_reg_alloc_call(s, op);
5146 break;
5147 case INDEX_op_exit_tb:
5148 tcg_out_exit_tb(s, op->args[0]);
5149 break;
5150 case INDEX_op_goto_tb:
5151 tcg_out_goto_tb(s, op->args[0]);
5152 break;
5153 case INDEX_op_dup2_vec:
5154 if (tcg_reg_alloc_dup2(s, op)) {
5155 break;
5156 }
5157 /* fall through */
5158 default:
5159 /* Sanity check that we've not introduced any unhandled opcodes. */
5160 tcg_debug_assert(tcg_op_supported(opc));
5161 /* Note: in order to speed up the code, it would be much
5162 faster to have specialized register allocator functions for
5163 some common argument patterns */
5164 tcg_reg_alloc_op(s, op);
5165 break;
5166 }
5167 /* Test for (pending) buffer overflow. The assumption is that any
5168 one operation beginning below the high water mark cannot overrun
5169 the buffer completely. Thus we can test for overflow after
5170 generating code without having to check during generation. */
5171 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
5172 return -1;
5173 }
5174 /* Test for TB overflow, as seen by gen_insn_end_off. */
5175 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
5176 return -2;
5177 }
5178 }
5179 tcg_debug_assert(num_insns >= 0);
5180 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
5181
5182 /* Generate TB finalization at the end of block */
5183 #ifdef TCG_TARGET_NEED_LDST_LABELS
5184 i = tcg_out_ldst_finalize(s);
5185 if (i < 0) {
5186 return i;
5187 }
5188 #endif
5189 #ifdef TCG_TARGET_NEED_POOL_LABELS
5190 i = tcg_out_pool_finalize(s);
5191 if (i < 0) {
5192 return i;
5193 }
5194 #endif
5195 if (!tcg_resolve_relocs(s)) {
5196 return -2;
5197 }
5198
5199 #ifndef CONFIG_TCG_INTERPRETER
5200 /* flush instruction cache */
5201 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
5202 (uintptr_t)s->code_buf,
5203 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
5204 #endif
5205
5206 return tcg_current_code_size(s);
5207 }
5208
5209 #ifdef CONFIG_PROFILER
5210 void tcg_dump_info(GString *buf)
5211 {
5212 TCGProfile prof = {};
5213 const TCGProfile *s;
5214 int64_t tb_count;
5215 int64_t tb_div_count;
5216 int64_t tot;
5217
5218 tcg_profile_snapshot_counters(&prof);
5219 s = &prof;
5220 tb_count = s->tb_count;
5221 tb_div_count = tb_count ? tb_count : 1;
5222 tot = s->interm_time + s->code_time;
5223
5224 g_string_append_printf(buf, "JIT cycles %" PRId64
5225 " (%0.3f s at 2.4 GHz)\n",
5226 tot, tot / 2.4e9);
5227 g_string_append_printf(buf, "translated TBs %" PRId64
5228 " (aborted=%" PRId64 " %0.1f%%)\n",
5229 tb_count, s->tb_count1 - tb_count,
5230 (double)(s->tb_count1 - s->tb_count)
5231 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
5232 g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
5233 (double)s->op_count / tb_div_count, s->op_count_max);
5234 g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
5235 (double)s->del_op_count / tb_div_count);
5236 g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
5237 (double)s->temp_count / tb_div_count,
5238 s->temp_count_max);
5239 g_string_append_printf(buf, "avg host code/TB %0.1f\n",
5240 (double)s->code_out_len / tb_div_count);
5241 g_string_append_printf(buf, "avg search data/TB %0.1f\n",
5242 (double)s->search_out_len / tb_div_count);
5243
5244 g_string_append_printf(buf, "cycles/op %0.1f\n",
5245 s->op_count ? (double)tot / s->op_count : 0);
5246 g_string_append_printf(buf, "cycles/in byte %0.1f\n",
5247 s->code_in_len ? (double)tot / s->code_in_len : 0);
5248 g_string_append_printf(buf, "cycles/out byte %0.1f\n",
5249 s->code_out_len ? (double)tot / s->code_out_len : 0);
5250 g_string_append_printf(buf, "cycles/search byte %0.1f\n",
5251 s->search_out_len ?
5252 (double)tot / s->search_out_len : 0);
5253 if (tot == 0) {
5254 tot = 1;
5255 }
5256 g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
5257 (double)s->interm_time / tot * 100.0);
5258 g_string_append_printf(buf, " gen_code time %0.1f%%\n",
5259 (double)s->code_time / tot * 100.0);
5260 g_string_append_printf(buf, "optim./code time %0.1f%%\n",
5261 (double)s->opt_time / (s->code_time ?
5262 s->code_time : 1)
5263 * 100.0);
5264 g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
5265 (double)s->la_time / (s->code_time ?
5266 s->code_time : 1) * 100.0);
5267 g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
5268 s->restore_count);
5269 g_string_append_printf(buf, " avg cycles %0.1f\n",
5270 s->restore_count ?
5271 (double)s->restore_time / s->restore_count : 0);
5272 }
5273 #else
5274 void tcg_dump_info(GString *buf)
5275 {
5276 g_string_append_printf(buf, "[TCG profiler not compiled]\n");
5277 }
5278 #endif
5279
5280 #ifdef ELF_HOST_MACHINE
5281 /* In order to use this feature, the backend needs to do three things:
5282
5283 (1) Define ELF_HOST_MACHINE to indicate both what value to
5284 put into the ELF image and to indicate support for the feature.
5285
5286 (2) Define tcg_register_jit. This should create a buffer containing
5287 the contents of a .debug_frame section that describes the post-
5288 prologue unwind info for the tcg machine.
5289
5290 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
5291 */
5292
5293 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
5294 typedef enum {
5295 JIT_NOACTION = 0,
5296 JIT_REGISTER_FN,
5297 JIT_UNREGISTER_FN
5298 } jit_actions_t;
5299
5300 struct jit_code_entry {
5301 struct jit_code_entry *next_entry;
5302 struct jit_code_entry *prev_entry;
5303 const void *symfile_addr;
5304 uint64_t symfile_size;
5305 };
5306
5307 struct jit_descriptor {
5308 uint32_t version;
5309 uint32_t action_flag;
5310 struct jit_code_entry *relevant_entry;
5311 struct jit_code_entry *first_entry;
5312 };
5313
5314 void __jit_debug_register_code(void) __attribute__((noinline));
5315 void __jit_debug_register_code(void)
5316 {
5317 asm("");
5318 }
5319
5320 /* Must statically initialize the version, because GDB may check
5321 the version before we can set it. */
5322 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
5323
5324 /* End GDB interface. */
5325
5326 static int find_string(const char *strtab, const char *str)
5327 {
5328 const char *p = strtab + 1;
5329
5330 while (1) {
5331 if (strcmp(p, str) == 0) {
5332 return p - strtab;
5333 }
5334 p += strlen(p) + 1;
5335 }
5336 }
5337
5338 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
5339 const void *debug_frame,
5340 size_t debug_frame_size)
5341 {
5342 struct __attribute__((packed)) DebugInfo {
5343 uint32_t len;
5344 uint16_t version;
5345 uint32_t abbrev;
5346 uint8_t ptr_size;
5347 uint8_t cu_die;
5348 uint16_t cu_lang;
5349 uintptr_t cu_low_pc;
5350 uintptr_t cu_high_pc;
5351 uint8_t fn_die;
5352 char fn_name[16];
5353 uintptr_t fn_low_pc;
5354 uintptr_t fn_high_pc;
5355 uint8_t cu_eoc;
5356 };
5357
5358 struct ElfImage {
5359 ElfW(Ehdr) ehdr;
5360 ElfW(Phdr) phdr;
5361 ElfW(Shdr) shdr[7];
5362 ElfW(Sym) sym[2];
5363 struct DebugInfo di;
5364 uint8_t da[24];
5365 char str[80];
5366 };
5367
5368 struct ElfImage *img;
5369
5370 static const struct ElfImage img_template = {
5371 .ehdr = {
5372 .e_ident[EI_MAG0] = ELFMAG0,
5373 .e_ident[EI_MAG1] = ELFMAG1,
5374 .e_ident[EI_MAG2] = ELFMAG2,
5375 .e_ident[EI_MAG3] = ELFMAG3,
5376 .e_ident[EI_CLASS] = ELF_CLASS,
5377 .e_ident[EI_DATA] = ELF_DATA,
5378 .e_ident[EI_VERSION] = EV_CURRENT,
5379 .e_type = ET_EXEC,
5380 .e_machine = ELF_HOST_MACHINE,
5381 .e_version = EV_CURRENT,
5382 .e_phoff = offsetof(struct ElfImage, phdr),
5383 .e_shoff = offsetof(struct ElfImage, shdr),
5384 .e_ehsize = sizeof(ElfW(Shdr)),
5385 .e_phentsize = sizeof(ElfW(Phdr)),
5386 .e_phnum = 1,
5387 .e_shentsize = sizeof(ElfW(Shdr)),
5388 .e_shnum = ARRAY_SIZE(img->shdr),
5389 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
5390 #ifdef ELF_HOST_FLAGS
5391 .e_flags = ELF_HOST_FLAGS,
5392 #endif
5393 #ifdef ELF_OSABI
5394 .e_ident[EI_OSABI] = ELF_OSABI,
5395 #endif
5396 },
5397 .phdr = {
5398 .p_type = PT_LOAD,
5399 .p_flags = PF_X,
5400 },
5401 .shdr = {
5402 [0] = { .sh_type = SHT_NULL },
5403 /* Trick: The contents of code_gen_buffer are not present in
5404 this fake ELF file; that got allocated elsewhere. Therefore
5405 we mark .text as SHT_NOBITS (similar to .bss) so that readers
5406 will not look for contents. We can record any address. */
5407 [1] = { /* .text */
5408 .sh_type = SHT_NOBITS,
5409 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
5410 },
5411 [2] = { /* .debug_info */
5412 .sh_type = SHT_PROGBITS,
5413 .sh_offset = offsetof(struct ElfImage, di),
5414 .sh_size = sizeof(struct DebugInfo),
5415 },
5416 [3] = { /* .debug_abbrev */
5417 .sh_type = SHT_PROGBITS,
5418 .sh_offset = offsetof(struct ElfImage, da),
5419 .sh_size = sizeof(img->da),
5420 },
5421 [4] = { /* .debug_frame */
5422 .sh_type = SHT_PROGBITS,
5423 .sh_offset = sizeof(struct ElfImage),
5424 },
5425 [5] = { /* .symtab */
5426 .sh_type = SHT_SYMTAB,
5427 .sh_offset = offsetof(struct ElfImage, sym),
5428 .sh_size = sizeof(img->sym),
5429 .sh_info = 1,
5430 .sh_link = ARRAY_SIZE(img->shdr) - 1,
5431 .sh_entsize = sizeof(ElfW(Sym)),
5432 },
5433 [6] = { /* .strtab */
5434 .sh_type = SHT_STRTAB,
5435 .sh_offset = offsetof(struct ElfImage, str),
5436 .sh_size = sizeof(img->str),
5437 }
5438 },
5439 .sym = {
5440 [1] = { /* code_gen_buffer */
5441 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
5442 .st_shndx = 1,
5443 }
5444 },
5445 .di = {
5446 .len = sizeof(struct DebugInfo) - 4,
5447 .version = 2,
5448 .ptr_size = sizeof(void *),
5449 .cu_die = 1,
5450 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
5451 .fn_die = 2,
5452 .fn_name = "code_gen_buffer"
5453 },
5454 .da = {
5455 1, /* abbrev number (the cu) */
5456 0x11, 1, /* DW_TAG_compile_unit, has children */
5457 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5458 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5459 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5460 0, 0, /* end of abbrev */
5461 2, /* abbrev number (the fn) */
5462 0x2e, 0, /* DW_TAG_subprogram, no children */
5463 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5464 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5465 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5466 0, 0, /* end of abbrev */
5467 0 /* no more abbrev */
5468 },
5469 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5470 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
5471 };
5472
5473 /* We only need a single jit entry; statically allocate it. */
5474 static struct jit_code_entry one_entry;
5475
5476 uintptr_t buf = (uintptr_t)buf_ptr;
5477 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
5478 DebugFrameHeader *dfh;
5479
5480 img = g_malloc(img_size);
5481 *img = img_template;
5482
5483 img->phdr.p_vaddr = buf;
5484 img->phdr.p_paddr = buf;
5485 img->phdr.p_memsz = buf_size;
5486
5487 img->shdr[1].sh_name = find_string(img->str, ".text");
5488 img->shdr[1].sh_addr = buf;
5489 img->shdr[1].sh_size = buf_size;
5490
5491 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
5492 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
5493
5494 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
5495 img->shdr[4].sh_size = debug_frame_size;
5496
5497 img->shdr[5].sh_name = find_string(img->str, ".symtab");
5498 img->shdr[6].sh_name = find_string(img->str, ".strtab");
5499
5500 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
5501 img->sym[1].st_value = buf;
5502 img->sym[1].st_size = buf_size;
5503
5504 img->di.cu_low_pc = buf;
5505 img->di.cu_high_pc = buf + buf_size;
5506 img->di.fn_low_pc = buf;
5507 img->di.fn_high_pc = buf + buf_size;
5508
5509 dfh = (DebugFrameHeader *)(img + 1);
5510 memcpy(dfh, debug_frame, debug_frame_size);
5511 dfh->fde.func_start = buf;
5512 dfh->fde.func_len = buf_size;
5513
5514 #ifdef DEBUG_JIT
5515 /* Enable this block to be able to debug the ELF image file creation.
5516 One can use readelf, objdump, or other inspection utilities. */
5517 {
5518 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
5519 FILE *f = fopen(jit, "w+b");
5520 if (f) {
5521 if (fwrite(img, img_size, 1, f) != img_size) {
5522 /* Avoid stupid unused return value warning for fwrite. */
5523 }
5524 fclose(f);
5525 }
5526 }
5527 #endif
5528
5529 one_entry.symfile_addr = img;
5530 one_entry.symfile_size = img_size;
5531
5532 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
5533 __jit_debug_descriptor.relevant_entry = &one_entry;
5534 __jit_debug_descriptor.first_entry = &one_entry;
5535 __jit_debug_register_code();
5536 }
5537 #else
5538 /* No support for the feature. Provide the entry point expected by exec.c,
5539 and implement the internal function we declared earlier. */
5540
5541 static void tcg_register_jit_int(const void *buf, size_t size,
5542 const void *debug_frame,
5543 size_t debug_frame_size)
5544 {
5545 }
5546
5547 void tcg_register_jit(const void *buf, size_t buf_size)
5548 {
5549 }
5550 #endif /* ELF_HOST_MACHINE */
5551
5552 #if !TCG_TARGET_MAYBE_vec
5553 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
5554 {
5555 g_assert_not_reached();
5556 }
5557 #endif