2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "exec/memop.h"
30 #include "qemu/bitops.h"
31 #include "qemu/plugin.h"
32 #include "qemu/queue.h"
33 #include "tcg/tcg-mo.h"
34 #include "tcg-target.h"
35 #include "qemu/int128.h"
36 #include "tcg/tcg-cond.h"
38 /* XXX: make safe guess about sizes */
39 #define MAX_OP_PER_INSTR 266
41 #if HOST_LONG_BITS == 32
42 #define MAX_OPC_PARAM_PER_ARG 2
44 #define MAX_OPC_PARAM_PER_ARG 1
46 #define MAX_OPC_PARAM_IARGS 6
47 #define MAX_OPC_PARAM_OARGS 1
48 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
50 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
51 * and up to 4 + N parameters on 64-bit archs
52 * (N = number of input arguments + output arguments). */
53 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
55 #define CPU_TEMP_BUF_NLONGS 128
56 #define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long))
58 /* Default target word size to pointer size. */
59 #ifndef TCG_TARGET_REG_BITS
60 # if UINTPTR_MAX == UINT32_MAX
61 # define TCG_TARGET_REG_BITS 32
62 # elif UINTPTR_MAX == UINT64_MAX
63 # define TCG_TARGET_REG_BITS 64
65 # error Unknown pointer size for tcg target
69 #if TCG_TARGET_REG_BITS == 32
70 typedef int32_t tcg_target_long
;
71 typedef uint32_t tcg_target_ulong
;
72 #define TCG_PRIlx PRIx32
73 #define TCG_PRIld PRId32
74 #elif TCG_TARGET_REG_BITS == 64
75 typedef int64_t tcg_target_long
;
76 typedef uint64_t tcg_target_ulong
;
77 #define TCG_PRIlx PRIx64
78 #define TCG_PRIld PRId64
83 /* Oversized TCG guests make things like MTTCG hard
84 * as we can't use atomics for cputlb updates.
86 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
87 #define TCG_OVERSIZED_GUEST 1
89 #define TCG_OVERSIZED_GUEST 0
92 #if TCG_TARGET_NB_REGS <= 32
93 typedef uint32_t TCGRegSet
;
94 #elif TCG_TARGET_NB_REGS <= 64
95 typedef uint64_t TCGRegSet
;
100 #if TCG_TARGET_REG_BITS == 32
101 /* Turn some undef macros into false macros. */
102 #define TCG_TARGET_HAS_extrl_i64_i32 0
103 #define TCG_TARGET_HAS_extrh_i64_i32 0
104 #define TCG_TARGET_HAS_div_i64 0
105 #define TCG_TARGET_HAS_rem_i64 0
106 #define TCG_TARGET_HAS_div2_i64 0
107 #define TCG_TARGET_HAS_rot_i64 0
108 #define TCG_TARGET_HAS_ext8s_i64 0
109 #define TCG_TARGET_HAS_ext16s_i64 0
110 #define TCG_TARGET_HAS_ext32s_i64 0
111 #define TCG_TARGET_HAS_ext8u_i64 0
112 #define TCG_TARGET_HAS_ext16u_i64 0
113 #define TCG_TARGET_HAS_ext32u_i64 0
114 #define TCG_TARGET_HAS_bswap16_i64 0
115 #define TCG_TARGET_HAS_bswap32_i64 0
116 #define TCG_TARGET_HAS_bswap64_i64 0
117 #define TCG_TARGET_HAS_neg_i64 0
118 #define TCG_TARGET_HAS_not_i64 0
119 #define TCG_TARGET_HAS_andc_i64 0
120 #define TCG_TARGET_HAS_orc_i64 0
121 #define TCG_TARGET_HAS_eqv_i64 0
122 #define TCG_TARGET_HAS_nand_i64 0
123 #define TCG_TARGET_HAS_nor_i64 0
124 #define TCG_TARGET_HAS_clz_i64 0
125 #define TCG_TARGET_HAS_ctz_i64 0
126 #define TCG_TARGET_HAS_ctpop_i64 0
127 #define TCG_TARGET_HAS_deposit_i64 0
128 #define TCG_TARGET_HAS_extract_i64 0
129 #define TCG_TARGET_HAS_sextract_i64 0
130 #define TCG_TARGET_HAS_extract2_i64 0
131 #define TCG_TARGET_HAS_movcond_i64 0
132 #define TCG_TARGET_HAS_add2_i64 0
133 #define TCG_TARGET_HAS_sub2_i64 0
134 #define TCG_TARGET_HAS_mulu2_i64 0
135 #define TCG_TARGET_HAS_muls2_i64 0
136 #define TCG_TARGET_HAS_muluh_i64 0
137 #define TCG_TARGET_HAS_mulsh_i64 0
138 /* Turn some undef macros into true macros. */
139 #define TCG_TARGET_HAS_add2_i32 1
140 #define TCG_TARGET_HAS_sub2_i32 1
143 #ifndef TCG_TARGET_deposit_i32_valid
144 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
146 #ifndef TCG_TARGET_deposit_i64_valid
147 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
149 #ifndef TCG_TARGET_extract_i32_valid
150 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
152 #ifndef TCG_TARGET_extract_i64_valid
153 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
156 /* Only one of DIV or DIV2 should be defined. */
157 #if defined(TCG_TARGET_HAS_div_i32)
158 #define TCG_TARGET_HAS_div2_i32 0
159 #elif defined(TCG_TARGET_HAS_div2_i32)
160 #define TCG_TARGET_HAS_div_i32 0
161 #define TCG_TARGET_HAS_rem_i32 0
163 #if defined(TCG_TARGET_HAS_div_i64)
164 #define TCG_TARGET_HAS_div2_i64 0
165 #elif defined(TCG_TARGET_HAS_div2_i64)
166 #define TCG_TARGET_HAS_div_i64 0
167 #define TCG_TARGET_HAS_rem_i64 0
170 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
171 #if TCG_TARGET_REG_BITS == 32 \
172 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
173 || defined(TCG_TARGET_HAS_muluh_i32))
174 # error "Missing unsigned widening multiply"
177 #if !defined(TCG_TARGET_HAS_v64) \
178 && !defined(TCG_TARGET_HAS_v128) \
179 && !defined(TCG_TARGET_HAS_v256)
180 #define TCG_TARGET_MAYBE_vec 0
181 #define TCG_TARGET_HAS_abs_vec 0
182 #define TCG_TARGET_HAS_neg_vec 0
183 #define TCG_TARGET_HAS_not_vec 0
184 #define TCG_TARGET_HAS_andc_vec 0
185 #define TCG_TARGET_HAS_orc_vec 0
186 #define TCG_TARGET_HAS_roti_vec 0
187 #define TCG_TARGET_HAS_rots_vec 0
188 #define TCG_TARGET_HAS_rotv_vec 0
189 #define TCG_TARGET_HAS_shi_vec 0
190 #define TCG_TARGET_HAS_shs_vec 0
191 #define TCG_TARGET_HAS_shv_vec 0
192 #define TCG_TARGET_HAS_mul_vec 0
193 #define TCG_TARGET_HAS_sat_vec 0
194 #define TCG_TARGET_HAS_minmax_vec 0
195 #define TCG_TARGET_HAS_bitsel_vec 0
196 #define TCG_TARGET_HAS_cmpsel_vec 0
198 #define TCG_TARGET_MAYBE_vec 1
200 #ifndef TCG_TARGET_HAS_v64
201 #define TCG_TARGET_HAS_v64 0
203 #ifndef TCG_TARGET_HAS_v128
204 #define TCG_TARGET_HAS_v128 0
206 #ifndef TCG_TARGET_HAS_v256
207 #define TCG_TARGET_HAS_v256 0
210 #ifndef TARGET_INSN_START_EXTRA_WORDS
211 # define TARGET_INSN_START_WORDS 1
213 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
216 typedef enum TCGOpcode
{
217 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
218 #include "tcg/tcg-opc.h"
223 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
224 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
225 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
227 #ifndef TCG_TARGET_INSN_UNIT_SIZE
228 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
229 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
230 typedef uint8_t tcg_insn_unit
;
231 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
232 typedef uint16_t tcg_insn_unit
;
233 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
234 typedef uint32_t tcg_insn_unit
;
235 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
236 typedef uint64_t tcg_insn_unit
;
238 /* The port better have done this. */
242 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
243 # define tcg_debug_assert(X) do { assert(X); } while (0)
245 # define tcg_debug_assert(X) \
246 do { if (!(X)) { __builtin_unreachable(); } } while (0)
249 typedef struct TCGRelocation TCGRelocation
;
250 struct TCGRelocation
{
251 QSIMPLEQ_ENTRY(TCGRelocation
) next
;
257 typedef struct TCGLabel TCGLabel
;
259 unsigned present
: 1;
260 unsigned has_value
: 1;
265 const tcg_insn_unit
*value_ptr
;
267 QSIMPLEQ_HEAD(, TCGRelocation
) relocs
;
268 QSIMPLEQ_ENTRY(TCGLabel
) next
;
271 typedef struct TCGPool
{
272 struct TCGPool
*next
;
274 uint8_t data
[] __attribute__ ((aligned
));
277 #define TCG_POOL_CHUNK_SIZE 32768
279 #define TCG_MAX_TEMPS 512
280 #define TCG_MAX_INSNS 512
282 /* when the size of the arguments of a called function is smaller than
283 this value, they are statically allocated in the TB stack frame */
284 #define TCG_STATIC_CALL_ARGS_SIZE 128
286 typedef enum TCGType
{
294 TCG_TYPE_COUNT
, /* number of different types */
296 /* An alias for the size of the host register. */
297 #if TCG_TARGET_REG_BITS == 32
298 TCG_TYPE_REG
= TCG_TYPE_I32
,
300 TCG_TYPE_REG
= TCG_TYPE_I64
,
303 /* An alias for the size of the native pointer. */
304 #if UINTPTR_MAX == UINT32_MAX
305 TCG_TYPE_PTR
= TCG_TYPE_I32
,
307 TCG_TYPE_PTR
= TCG_TYPE_I64
,
310 /* An alias for the size of the target "long", aka register. */
311 #if TARGET_LONG_BITS == 64
312 TCG_TYPE_TL
= TCG_TYPE_I64
,
314 TCG_TYPE_TL
= TCG_TYPE_I32
,
320 * @memop: MemOp value
322 * Extract the alignment size from the memop.
324 static inline unsigned get_alignment_bits(MemOp memop
)
326 unsigned a
= memop
& MO_AMASK
;
329 /* No alignment required. */
331 } else if (a
== MO_ALIGN
) {
332 /* A natural alignment requirement. */
335 /* A specific alignment requirement. */
338 #if defined(CONFIG_SOFTMMU)
339 /* The requested alignment cannot overlap the TLB flags. */
340 tcg_debug_assert((TLB_FLAGS_MASK
& ((1 << a
) - 1)) == 0);
345 typedef tcg_target_ulong TCGArg
;
347 /* Define type and accessor macros for TCG variables.
349 TCG variables are the inputs and outputs of TCG ops, as described
350 in tcg/README. Target CPU front-end code uses these types to deal
351 with TCG variables as it emits TCG code via the tcg_gen_* functions.
352 They come in several flavours:
353 * TCGv_i32 : 32 bit integer type
354 * TCGv_i64 : 64 bit integer type
355 * TCGv_ptr : a host pointer type
356 * TCGv_vec : a host vector type; the exact size is not exposed
357 to the CPU front-end code.
358 * TCGv : an integer type the same size as target_ulong
359 (an alias for either TCGv_i32 or TCGv_i64)
360 The compiler's type checking will complain if you mix them
361 up and pass the wrong sized TCGv to a function.
363 Users of tcg_gen_* don't need to know about any of the internal
364 details of these, and should treat them as opaque types.
365 You won't be able to look inside them in a debugger either.
367 Internal implementation details follow:
369 Note that there is no definition of the structs TCGv_i32_d etc anywhere.
370 This is deliberate, because the values we store in variables of type
371 TCGv_i32 are not really pointers-to-structures. They're just small
372 integers, but keeping them in pointer types like this means that the
373 compiler will complain if you accidentally pass a TCGv_i32 to a
374 function which takes a TCGv_i64, and so on. Only the internals of
375 TCG need to care about the actual contents of the types. */
377 typedef struct TCGv_i32_d
*TCGv_i32
;
378 typedef struct TCGv_i64_d
*TCGv_i64
;
379 typedef struct TCGv_ptr_d
*TCGv_ptr
;
380 typedef struct TCGv_vec_d
*TCGv_vec
;
381 typedef TCGv_ptr TCGv_env
;
382 #if TARGET_LONG_BITS == 32
383 #define TCGv TCGv_i32
384 #elif TARGET_LONG_BITS == 64
385 #define TCGv TCGv_i64
387 #error Unhandled TARGET_LONG_BITS value
391 /* Helper does not read globals (either directly or through an exception). It
392 implies TCG_CALL_NO_WRITE_GLOBALS. */
393 #define TCG_CALL_NO_READ_GLOBALS 0x0001
394 /* Helper does not write globals */
395 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002
396 /* Helper can be safely suppressed if the return value is not used. */
397 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004
398 /* Helper is QEMU_NORETURN. */
399 #define TCG_CALL_NO_RETURN 0x0008
401 /* convenience version of most used call flags */
402 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
403 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
404 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
405 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
406 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
408 /* Used to align parameters. See the comment before tcgv_i32_temp. */
409 #define TCG_CALL_DUMMY_ARG ((TCGArg)0)
411 typedef enum TCGTempVal
{
418 typedef enum TCGTempKind
{
419 /* Temp is dead at the end of all basic blocks. */
421 /* Temp is saved across basic blocks but dead at the end of TBs. */
423 /* Temp is saved across both basic blocks and translation blocks. */
425 /* Temp is in a fixed register. */
427 /* Temp is a fixed constant. */
431 typedef struct TCGTemp
{
433 TCGTempVal val_type
:8;
437 unsigned int indirect_reg
:1;
438 unsigned int indirect_base
:1;
439 unsigned int mem_coherent
:1;
440 unsigned int mem_allocated
:1;
441 unsigned int temp_allocated
:1;
444 struct TCGTemp
*mem_base
;
448 /* Pass-specific information that can be stored for a temporary.
449 One word worth of integer data, and one pointer to data
450 allocated separately. */
455 typedef struct TCGContext TCGContext
;
457 typedef struct TCGTempSet
{
458 unsigned long l
[BITS_TO_LONGS(TCG_MAX_TEMPS
)];
461 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
462 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
463 There are never more than 2 outputs, which means that we can store all
464 dead + sync data within 16 bits. */
467 typedef uint16_t TCGLifeData
;
469 /* The layout here is designed to avoid a bitfield crossing of
470 a 32-bit boundary, which would cause GCC to add extra padding. */
471 typedef struct TCGOp
{
472 TCGOpcode opc
: 8; /* 8 */
474 /* Parameters for this opcode. See below. */
475 unsigned param1
: 4; /* 12 */
476 unsigned param2
: 4; /* 16 */
478 /* Lifetime data of the operands. */
479 unsigned life
: 16; /* 32 */
481 /* Next and previous opcodes. */
482 QTAILQ_ENTRY(TCGOp
) link
;
484 QSIMPLEQ_ENTRY(TCGOp
) plugin_link
;
487 /* Arguments for the opcode. */
488 TCGArg args
[MAX_OPC_PARAM
];
490 /* Register preferences for the output(s). */
491 TCGRegSet output_pref
[2];
494 #define TCGOP_CALLI(X) (X)->param1
495 #define TCGOP_CALLO(X) (X)->param2
497 #define TCGOP_VECL(X) (X)->param1
498 #define TCGOP_VECE(X) (X)->param2
500 /* Make sure operands fit in the bitfields above. */
501 QEMU_BUILD_BUG_ON(NB_OPS
> (1 << 8));
503 typedef struct TCGProfile
{
504 int64_t cpu_exec_time
;
507 int64_t op_count
; /* total insn count */
508 int op_count_max
; /* max insn per TB */
511 int64_t del_op_count
;
513 int64_t code_out_len
;
514 int64_t search_out_len
;
519 int64_t restore_count
;
520 int64_t restore_time
;
521 int64_t table_op_count
[NB_OPS
];
525 uint8_t *pool_cur
, *pool_end
;
526 TCGPool
*pool_first
, *pool_current
, *pool_first_large
;
533 /* goto_tb support */
534 tcg_insn_unit
*code_buf
;
535 uint16_t *tb_jmp_reset_offset
; /* tb->jmp_reset_offset */
536 uintptr_t *tb_jmp_insn_offset
; /* tb->jmp_target_arg if direct_jump */
537 uintptr_t *tb_jmp_target_addr
; /* tb->jmp_target_arg if !direct_jump */
539 TCGRegSet reserved_regs
;
540 uint32_t tb_cflags
; /* cflags of the current TB */
541 intptr_t current_frame_offset
;
542 intptr_t frame_start
;
546 tcg_insn_unit
*code_ptr
;
548 #ifdef CONFIG_PROFILER
552 #ifdef CONFIG_DEBUG_TCG
554 int goto_tb_issue_mask
;
555 const TCGOpcode
*vecop_list
;
558 /* Code generation. Note that we specifically do not use tcg_insn_unit
559 here, because there's too much arithmetic throughout that relies
560 on addition and subtraction working on bytes. Rely on the GCC
561 extension that allows arithmetic on void*. */
562 void *code_gen_buffer
;
563 size_t code_gen_buffer_size
;
567 /* Threshold to flush the translated code buffer. */
568 void *code_gen_highwater
;
570 size_t tb_phys_invalidate_count
;
572 /* Track which vCPU triggers events */
573 CPUState
*cpu
; /* *_trans */
575 /* These structures are private to tcg-target.c.inc. */
576 #ifdef TCG_TARGET_NEED_LDST_LABELS
577 QSIMPLEQ_HEAD(, TCGLabelQemuLdst
) ldst_labels
;
579 #ifdef TCG_TARGET_NEED_POOL_LABELS
580 struct TCGLabelPoolData
*pool_labels
;
583 TCGLabel
*exitreq_label
;
587 * We keep one plugin_tb struct per TCGContext. Note that on every TB
588 * translation we clear but do not free its contents; this way we
589 * avoid a lot of malloc/free churn, since after a few TB's it's
590 * unlikely that we'll need to allocate either more instructions or more
591 * space for instructions (for variable-instruction-length ISAs).
593 struct qemu_plugin_tb
*plugin_tb
;
595 /* descriptor of the instruction being translated */
596 struct qemu_plugin_insn
*plugin_insn
;
598 /* list to quickly access the injected ops */
599 QSIMPLEQ_HEAD(, TCGOp
) plugin_ops
;
602 GHashTable
*const_table
[TCG_TYPE_COUNT
];
603 TCGTempSet free_temps
[TCG_TYPE_COUNT
* 2];
604 TCGTemp temps
[TCG_MAX_TEMPS
]; /* globals first, temps after */
606 QTAILQ_HEAD(, TCGOp
) ops
, free_ops
;
607 QSIMPLEQ_HEAD(, TCGLabel
) labels
;
609 /* Tells which temporary holds a given register.
610 It does not take into account fixed registers */
611 TCGTemp
*reg_to_temp
[TCG_TARGET_NB_REGS
];
613 uint16_t gen_insn_end_off
[TCG_MAX_INSNS
];
614 target_ulong gen_insn_data
[TCG_MAX_INSNS
][TARGET_INSN_START_WORDS
];
616 /* Exit to translator on overflow. */
617 sigjmp_buf jmp_trans
;
620 static inline bool temp_readonly(TCGTemp
*ts
)
622 return ts
->kind
>= TEMP_FIXED
;
625 extern __thread TCGContext
*tcg_ctx
;
626 extern const void *tcg_code_gen_epilogue
;
627 extern uintptr_t tcg_splitwx_diff
;
628 extern TCGv_env cpu_env
;
630 bool in_code_gen_buffer(const void *p
);
632 #ifdef CONFIG_DEBUG_TCG
633 const void *tcg_splitwx_to_rx(void *rw
);
634 void *tcg_splitwx_to_rw(const void *rx
);
636 static inline const void *tcg_splitwx_to_rx(void *rw
)
638 return rw
? rw
+ tcg_splitwx_diff
: NULL
;
641 static inline void *tcg_splitwx_to_rw(const void *rx
)
643 return rx
? (void *)rx
- tcg_splitwx_diff
: NULL
;
647 static inline size_t temp_idx(TCGTemp
*ts
)
649 ptrdiff_t n
= ts
- tcg_ctx
->temps
;
650 tcg_debug_assert(n
>= 0 && n
< tcg_ctx
->nb_temps
);
654 static inline TCGArg
temp_arg(TCGTemp
*ts
)
656 return (uintptr_t)ts
;
659 static inline TCGTemp
*arg_temp(TCGArg a
)
661 return (TCGTemp
*)(uintptr_t)a
;
664 /* Using the offset of a temporary, relative to TCGContext, rather than
665 its index means that we don't use 0. That leaves offset 0 free for
666 a NULL representation without having to leave index 0 unused. */
667 static inline TCGTemp
*tcgv_i32_temp(TCGv_i32 v
)
669 uintptr_t o
= (uintptr_t)v
;
670 TCGTemp
*t
= (void *)tcg_ctx
+ o
;
671 tcg_debug_assert(offsetof(TCGContext
, temps
[temp_idx(t
)]) == o
);
675 static inline TCGTemp
*tcgv_i64_temp(TCGv_i64 v
)
677 return tcgv_i32_temp((TCGv_i32
)v
);
680 static inline TCGTemp
*tcgv_ptr_temp(TCGv_ptr v
)
682 return tcgv_i32_temp((TCGv_i32
)v
);
685 static inline TCGTemp
*tcgv_vec_temp(TCGv_vec v
)
687 return tcgv_i32_temp((TCGv_i32
)v
);
690 static inline TCGArg
tcgv_i32_arg(TCGv_i32 v
)
692 return temp_arg(tcgv_i32_temp(v
));
695 static inline TCGArg
tcgv_i64_arg(TCGv_i64 v
)
697 return temp_arg(tcgv_i64_temp(v
));
700 static inline TCGArg
tcgv_ptr_arg(TCGv_ptr v
)
702 return temp_arg(tcgv_ptr_temp(v
));
705 static inline TCGArg
tcgv_vec_arg(TCGv_vec v
)
707 return temp_arg(tcgv_vec_temp(v
));
710 static inline TCGv_i32
temp_tcgv_i32(TCGTemp
*t
)
712 (void)temp_idx(t
); /* trigger embedded assert */
713 return (TCGv_i32
)((void *)t
- (void *)tcg_ctx
);
716 static inline TCGv_i64
temp_tcgv_i64(TCGTemp
*t
)
718 return (TCGv_i64
)temp_tcgv_i32(t
);
721 static inline TCGv_ptr
temp_tcgv_ptr(TCGTemp
*t
)
723 return (TCGv_ptr
)temp_tcgv_i32(t
);
726 static inline TCGv_vec
temp_tcgv_vec(TCGTemp
*t
)
728 return (TCGv_vec
)temp_tcgv_i32(t
);
731 #if TCG_TARGET_REG_BITS == 32
732 static inline TCGv_i32
TCGV_LOW(TCGv_i64 t
)
734 return temp_tcgv_i32(tcgv_i64_temp(t
));
737 static inline TCGv_i32
TCGV_HIGH(TCGv_i64 t
)
739 return temp_tcgv_i32(tcgv_i64_temp(t
) + 1);
743 static inline TCGArg
tcg_get_insn_param(TCGOp
*op
, int arg
)
745 return op
->args
[arg
];
748 static inline void tcg_set_insn_param(TCGOp
*op
, int arg
, TCGArg v
)
753 static inline target_ulong
tcg_get_insn_start_param(TCGOp
*op
, int arg
)
755 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
756 return tcg_get_insn_param(op
, arg
);
758 return tcg_get_insn_param(op
, arg
* 2) |
759 ((uint64_t)tcg_get_insn_param(op
, arg
* 2 + 1) << 32);
763 static inline void tcg_set_insn_start_param(TCGOp
*op
, int arg
, target_ulong v
)
765 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
766 tcg_set_insn_param(op
, arg
, v
);
768 tcg_set_insn_param(op
, arg
* 2, v
);
769 tcg_set_insn_param(op
, arg
* 2 + 1, v
>> 32);
773 /* The last op that was emitted. */
774 static inline TCGOp
*tcg_last_op(void)
776 return QTAILQ_LAST(&tcg_ctx
->ops
);
779 /* Test for whether to terminate the TB for using too many opcodes. */
780 static inline bool tcg_op_buf_full(void)
782 /* This is not a hard limit, it merely stops translation when
783 * we have produced "enough" opcodes. We want to limit TB size
784 * such that a RISC host can reasonably use a 16-bit signed
785 * branch within the TB. We also need to be mindful of the
786 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
787 * and TCGContext.gen_insn_end_off[].
789 return tcg_ctx
->nb_ops
>= 4000;
792 /* pool based memory allocation */
794 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */
795 void *tcg_malloc_internal(TCGContext
*s
, int size
);
796 void tcg_pool_reset(TCGContext
*s
);
797 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
);
799 void tb_destroy(TranslationBlock
*tb
);
800 void tcg_region_reset_all(void);
802 size_t tcg_code_size(void);
803 size_t tcg_code_capacity(void);
805 void tcg_tb_insert(TranslationBlock
*tb
);
806 void tcg_tb_remove(TranslationBlock
*tb
);
807 size_t tcg_tb_phys_invalidate_count(void);
808 TranslationBlock
*tcg_tb_lookup(uintptr_t tc_ptr
);
809 void tcg_tb_foreach(GTraverseFunc func
, gpointer user_data
);
810 size_t tcg_nb_tbs(void);
812 /* user-mode: Called with mmap_lock held. */
813 static inline void *tcg_malloc(int size
)
815 TCGContext
*s
= tcg_ctx
;
816 uint8_t *ptr
, *ptr_end
;
818 /* ??? This is a weak placeholder for minimum malloc alignment. */
819 size
= QEMU_ALIGN_UP(size
, 8);
822 ptr_end
= ptr
+ size
;
823 if (unlikely(ptr_end
> s
->pool_end
)) {
824 return tcg_malloc_internal(tcg_ctx
, size
);
826 s
->pool_cur
= ptr_end
;
831 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
);
832 void tcg_register_thread(void);
833 void tcg_prologue_init(TCGContext
*s
);
834 void tcg_func_start(TCGContext
*s
);
836 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
);
838 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
);
840 TCGTemp
*tcg_global_mem_new_internal(TCGType
, TCGv_ptr
,
841 intptr_t, const char *);
842 TCGTemp
*tcg_temp_new_internal(TCGType
, bool);
843 void tcg_temp_free_internal(TCGTemp
*);
844 TCGv_vec
tcg_temp_new_vec(TCGType type
);
845 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
);
847 static inline void tcg_temp_free_i32(TCGv_i32 arg
)
849 tcg_temp_free_internal(tcgv_i32_temp(arg
));
852 static inline void tcg_temp_free_i64(TCGv_i64 arg
)
854 tcg_temp_free_internal(tcgv_i64_temp(arg
));
857 static inline void tcg_temp_free_ptr(TCGv_ptr arg
)
859 tcg_temp_free_internal(tcgv_ptr_temp(arg
));
862 static inline void tcg_temp_free_vec(TCGv_vec arg
)
864 tcg_temp_free_internal(tcgv_vec_temp(arg
));
867 static inline TCGv_i32
tcg_global_mem_new_i32(TCGv_ptr reg
, intptr_t offset
,
870 TCGTemp
*t
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
871 return temp_tcgv_i32(t
);
874 static inline TCGv_i32
tcg_temp_new_i32(void)
876 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_I32
, false);
877 return temp_tcgv_i32(t
);
880 static inline TCGv_i32
tcg_temp_local_new_i32(void)
882 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_I32
, true);
883 return temp_tcgv_i32(t
);
886 static inline TCGv_i64
tcg_global_mem_new_i64(TCGv_ptr reg
, intptr_t offset
,
889 TCGTemp
*t
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
890 return temp_tcgv_i64(t
);
893 static inline TCGv_i64
tcg_temp_new_i64(void)
895 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_I64
, false);
896 return temp_tcgv_i64(t
);
899 static inline TCGv_i64
tcg_temp_local_new_i64(void)
901 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_I64
, true);
902 return temp_tcgv_i64(t
);
905 static inline TCGv_ptr
tcg_global_mem_new_ptr(TCGv_ptr reg
, intptr_t offset
,
908 TCGTemp
*t
= tcg_global_mem_new_internal(TCG_TYPE_PTR
, reg
, offset
, name
);
909 return temp_tcgv_ptr(t
);
912 static inline TCGv_ptr
tcg_temp_new_ptr(void)
914 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_PTR
, false);
915 return temp_tcgv_ptr(t
);
918 static inline TCGv_ptr
tcg_temp_local_new_ptr(void)
920 TCGTemp
*t
= tcg_temp_new_internal(TCG_TYPE_PTR
, true);
921 return temp_tcgv_ptr(t
);
924 #if defined(CONFIG_DEBUG_TCG)
925 /* If you call tcg_clear_temp_count() at the start of a section of
926 * code which is not supposed to leak any TCG temporaries, then
927 * calling tcg_check_temp_count() at the end of the section will
928 * return 1 if the section did in fact leak a temporary.
930 void tcg_clear_temp_count(void);
931 int tcg_check_temp_count(void);
933 #define tcg_clear_temp_count() do { } while (0)
934 #define tcg_check_temp_count() 0
937 int64_t tcg_cpu_exec_time(void);
938 void tcg_dump_info(void);
939 void tcg_dump_op_count(void);
941 #define TCG_CT_CONST 1 /* any constant of register size */
943 typedef struct TCGArgConstraint
{
945 unsigned alias_index
: 4;
946 unsigned sort_index
: 4;
953 #define TCG_MAX_OP_ARGS 16
955 /* Bits for TCGOpDef->flags, 8 bits available, all used. */
957 /* Instruction exits the translation block. */
958 TCG_OPF_BB_EXIT
= 0x01,
959 /* Instruction defines the end of a basic block. */
960 TCG_OPF_BB_END
= 0x02,
961 /* Instruction clobbers call registers and potentially update globals. */
962 TCG_OPF_CALL_CLOBBER
= 0x04,
963 /* Instruction has side effects: it cannot be removed if its outputs
964 are not used, and might trigger exceptions. */
965 TCG_OPF_SIDE_EFFECTS
= 0x08,
966 /* Instruction operands are 64-bits (otherwise 32-bits). */
967 TCG_OPF_64BIT
= 0x10,
968 /* Instruction is optional and not implemented by the host, or insn
969 is generic and should not be implemened by the host. */
970 TCG_OPF_NOT_PRESENT
= 0x20,
971 /* Instruction operands are vectors. */
972 TCG_OPF_VECTOR
= 0x40,
973 /* Instruction is a conditional branch. */
974 TCG_OPF_COND_BRANCH
= 0x80
977 typedef struct TCGOpDef
{
979 uint8_t nb_oargs
, nb_iargs
, nb_cargs
, nb_args
;
981 TCGArgConstraint
*args_ct
;
984 extern TCGOpDef tcg_op_defs
[];
985 extern const size_t tcg_op_defs_max
;
987 typedef struct TCGTargetOpDef
{
989 const char *args_ct_str
[TCG_MAX_OP_ARGS
];
992 #define tcg_abort() \
994 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
998 bool tcg_op_supported(TCGOpcode op
);
1000 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
);
1002 TCGOp
*tcg_emit_op(TCGOpcode opc
);
1003 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
);
1004 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*op
, TCGOpcode opc
);
1005 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*op
, TCGOpcode opc
);
1008 * tcg_remove_ops_after:
1009 * @op: target operation
1011 * Discard any opcodes emitted since @op. Expected usage is to save
1012 * a starting point with tcg_last_op(), speculatively emit opcodes,
1013 * then decide whether or not to keep those opcodes after the fact.
1015 void tcg_remove_ops_after(TCGOp
*op
);
1017 void tcg_optimize(TCGContext
*s
);
1019 /* Allocate a new temporary and initialize it with a constant. */
1020 TCGv_i32
tcg_const_i32(int32_t val
);
1021 TCGv_i64
tcg_const_i64(int64_t val
);
1022 TCGv_i32
tcg_const_local_i32(int32_t val
);
1023 TCGv_i64
tcg_const_local_i64(int64_t val
);
1024 TCGv_vec
tcg_const_zeros_vec(TCGType
);
1025 TCGv_vec
tcg_const_ones_vec(TCGType
);
1026 TCGv_vec
tcg_const_zeros_vec_matching(TCGv_vec
);
1027 TCGv_vec
tcg_const_ones_vec_matching(TCGv_vec
);
1030 * Locate or create a read-only temporary that is a constant.
1031 * This kind of temporary need not be freed, but for convenience
1032 * will be silently ignored by tcg_temp_free_*.
1034 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
);
1036 static inline TCGv_i32
tcg_constant_i32(int32_t val
)
1038 return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32
, val
));
1041 static inline TCGv_i64
tcg_constant_i64(int64_t val
)
1043 return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64
, val
));
1046 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
);
1047 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
);
1049 #if UINTPTR_MAX == UINT32_MAX
1050 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1051 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1053 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1054 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1057 TCGLabel
*gen_new_label(void);
1063 * Encode a label for storage in the TCG opcode stream.
1066 static inline TCGArg
label_arg(TCGLabel
*l
)
1068 return (uintptr_t)l
;
1075 * The opposite of label_arg. Retrieve a label from the
1076 * encoding of the TCG opcode stream.
1079 static inline TCGLabel
*arg_label(TCGArg i
)
1081 return (TCGLabel
*)(uintptr_t)i
;
1086 * @a, @b: addresses to be differenced
1088 * There are many places within the TCG backends where we need a byte
1089 * difference between two pointers. While this can be accomplished
1090 * with local casting, it's easy to get wrong -- especially if one is
1091 * concerned with the signedness of the result.
1093 * This version relies on GCC's void pointer arithmetic to get the
1097 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a
, const void *b
)
1104 * @s: the tcg context
1105 * @target: address of the target
1107 * Produce a pc-relative difference, from the current code_ptr
1108 * to the destination address.
1111 static inline ptrdiff_t tcg_pcrel_diff(TCGContext
*s
, const void *target
)
1113 return tcg_ptr_byte_diff(target
, tcg_splitwx_to_rx(s
->code_ptr
));
1118 * @s: the tcg context
1119 * @target: address of the target
1121 * Produce a difference, from the beginning of the current TB code
1122 * to the destination address.
1124 static inline ptrdiff_t tcg_tbrel_diff(TCGContext
*s
, const void *target
)
1126 return tcg_ptr_byte_diff(target
, tcg_splitwx_to_rx(s
->code_buf
));
1130 * tcg_current_code_size
1131 * @s: the tcg context
1133 * Compute the current code size within the translation block.
1134 * This is used to fill in qemu's data structures for goto_tb.
1137 static inline size_t tcg_current_code_size(TCGContext
*s
)
1139 return tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
);
1142 /* Combine the MemOp and mmu_idx parameters into a single value. */
1143 typedef uint32_t TCGMemOpIdx
;
1147 * @op: memory operation
1150 * Encode these values into a single parameter.
1152 static inline TCGMemOpIdx
make_memop_idx(MemOp op
, unsigned idx
)
1154 tcg_debug_assert(idx
<= 15);
1155 return (op
<< 4) | idx
;
1160 * @oi: combined op/idx parameter
1162 * Extract the memory operation from the combined value.
1164 static inline MemOp
get_memop(TCGMemOpIdx oi
)
1171 * @oi: combined op/idx parameter
1173 * Extract the mmu index from the combined value.
1175 static inline unsigned get_mmuidx(TCGMemOpIdx oi
)
1182 * @env: pointer to CPUArchState for the CPU
1183 * @tb_ptr: address of generated code for the TB to execute
1185 * Start executing code from a given translation block.
1186 * Where translation blocks have been linked, execution
1187 * may proceed from the given TB into successive ones.
1188 * Control eventually returns only when some action is needed
1189 * from the top-level loop: either control must pass to a TB
1190 * which has not yet been directly linked, or an asynchronous
1191 * event such as an interrupt needs handling.
1193 * Return: The return value is the value passed to the corresponding
1194 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1195 * The value is either zero or a 4-byte aligned pointer to that TB combined
1196 * with additional information in its two least significant bits. The
1197 * additional information is encoded as follows:
1198 * 0, 1: the link between this TB and the next is via the specified
1199 * TB index (0 or 1). That is, we left the TB via (the equivalent
1200 * of) "goto_tb <index>". The main loop uses this to determine
1201 * how to link the TB just executed to the next.
1202 * 2: we are using instruction counting code generation, and we
1203 * did not start executing this TB because the instruction counter
1204 * would hit zero midway through it. In this case the pointer
1205 * returned is the TB we were about to execute, and the caller must
1206 * arrange to execute the remaining count of instructions.
1207 * 3: we stopped because the CPU's exit_request flag was set
1208 * (usually meaning that there is an interrupt that needs to be
1209 * handled). The pointer returned is the TB we were about to execute
1210 * when we noticed the pending exit request.
1212 * If the bottom two bits indicate an exit-via-index then the CPU
1213 * state is correctly synchronised and ready for execution of the next
1214 * TB (and in particular the guest PC is the address to execute next).
1215 * Otherwise, we gave up on execution of this TB before it started, and
1216 * the caller must fix up the CPU state by calling the CPU's
1217 * synchronize_from_tb() method with the TB pointer we return (falling
1218 * back to calling the CPU's set_pc method with tb->pb if no
1219 * synchronize_from_tb() method exists).
1221 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1222 * to this default (which just calls the prologue.code emitted by
1223 * tcg_target_qemu_prologue()).
1225 #define TB_EXIT_MASK 3
1226 #define TB_EXIT_IDX0 0
1227 #define TB_EXIT_IDX1 1
1228 #define TB_EXIT_IDXMAX 1
1229 #define TB_EXIT_REQUESTED 3
1231 #ifdef CONFIG_TCG_INTERPRETER
1232 uintptr_t tcg_qemu_tb_exec(CPUArchState
*env
, const void *tb_ptr
);
1234 typedef uintptr_t tcg_prologue_fn(CPUArchState
*env
, const void *tb_ptr
);
1235 extern tcg_prologue_fn
*tcg_qemu_tb_exec
;
1238 void tcg_register_jit(const void *buf
, size_t buf_size
);
1240 #if TCG_TARGET_MAYBE_vec
1241 /* Return zero if the tuple (opc, type, vece) is unsupportable;
1242 return > 0 if it is directly supportable;
1243 return < 0 if we must call tcg_expand_vec_op. */
1244 int tcg_can_emit_vec_op(TCGOpcode
, TCGType
, unsigned);
1246 static inline int tcg_can_emit_vec_op(TCGOpcode o
, TCGType t
, unsigned ve
)
1252 /* Expand the tuple (opc, type, vece) on the given arguments. */
1253 void tcg_expand_vec_op(TCGOpcode
, TCGType
, unsigned, TCGArg
, ...);
1255 /* Replicate a constant C accoring to the log2 of the element size. */
1256 uint64_t dup_const(unsigned vece
, uint64_t c
);
1258 #define dup_const(VECE, C) \
1259 (__builtin_constant_p(VECE) \
1260 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
1261 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
1262 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
1263 : (VECE) == MO_64 ? (uint64_t)(C) \
1264 : (qemu_build_not_reached_always(), 0)) \
1265 : dup_const(VECE, C))
1268 * Memory helpers that will be used by TCG generated code.
1270 #ifdef CONFIG_SOFTMMU
1271 /* Value zero-extended to tcg register size. */
1272 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1273 TCGMemOpIdx oi
, uintptr_t retaddr
);
1274 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1275 TCGMemOpIdx oi
, uintptr_t retaddr
);
1276 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1277 TCGMemOpIdx oi
, uintptr_t retaddr
);
1278 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1279 TCGMemOpIdx oi
, uintptr_t retaddr
);
1280 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1281 TCGMemOpIdx oi
, uintptr_t retaddr
);
1282 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1283 TCGMemOpIdx oi
, uintptr_t retaddr
);
1284 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1285 TCGMemOpIdx oi
, uintptr_t retaddr
);
1287 /* Value sign-extended to tcg register size. */
1288 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
1289 TCGMemOpIdx oi
, uintptr_t retaddr
);
1290 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1291 TCGMemOpIdx oi
, uintptr_t retaddr
);
1292 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1293 TCGMemOpIdx oi
, uintptr_t retaddr
);
1294 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1295 TCGMemOpIdx oi
, uintptr_t retaddr
);
1296 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1297 TCGMemOpIdx oi
, uintptr_t retaddr
);
1299 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
1300 TCGMemOpIdx oi
, uintptr_t retaddr
);
1301 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1302 TCGMemOpIdx oi
, uintptr_t retaddr
);
1303 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1304 TCGMemOpIdx oi
, uintptr_t retaddr
);
1305 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1306 TCGMemOpIdx oi
, uintptr_t retaddr
);
1307 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1308 TCGMemOpIdx oi
, uintptr_t retaddr
);
1309 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1310 TCGMemOpIdx oi
, uintptr_t retaddr
);
1311 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1312 TCGMemOpIdx oi
, uintptr_t retaddr
);
1314 /* Temporary aliases until backends are converted. */
1315 #ifdef TARGET_WORDS_BIGENDIAN
1316 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1317 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1318 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1319 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1320 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1321 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1322 # define helper_ret_stw_mmu helper_be_stw_mmu
1323 # define helper_ret_stl_mmu helper_be_stl_mmu
1324 # define helper_ret_stq_mmu helper_be_stq_mmu
1326 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1327 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1328 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1329 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1330 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1331 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1332 # define helper_ret_stw_mmu helper_le_stw_mmu
1333 # define helper_ret_stl_mmu helper_le_stl_mmu
1334 # define helper_ret_stq_mmu helper_le_stq_mmu
1337 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState
*env
, target_ulong addr
,
1338 uint32_t cmpv
, uint32_t newv
,
1339 TCGMemOpIdx oi
, uintptr_t retaddr
);
1340 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState
*env
, target_ulong addr
,
1341 uint32_t cmpv
, uint32_t newv
,
1342 TCGMemOpIdx oi
, uintptr_t retaddr
);
1343 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState
*env
, target_ulong addr
,
1344 uint32_t cmpv
, uint32_t newv
,
1345 TCGMemOpIdx oi
, uintptr_t retaddr
);
1346 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState
*env
, target_ulong addr
,
1347 uint64_t cmpv
, uint64_t newv
,
1348 TCGMemOpIdx oi
, uintptr_t retaddr
);
1349 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState
*env
, target_ulong addr
,
1350 uint32_t cmpv
, uint32_t newv
,
1351 TCGMemOpIdx oi
, uintptr_t retaddr
);
1352 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState
*env
, target_ulong addr
,
1353 uint32_t cmpv
, uint32_t newv
,
1354 TCGMemOpIdx oi
, uintptr_t retaddr
);
1355 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState
*env
, target_ulong addr
,
1356 uint64_t cmpv
, uint64_t newv
,
1357 TCGMemOpIdx oi
, uintptr_t retaddr
);
1359 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
1360 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
1361 (CPUArchState *env, target_ulong addr, TYPE val, \
1362 TCGMemOpIdx oi, uintptr_t retaddr);
1364 #ifdef CONFIG_ATOMIC64
1365 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1366 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1367 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1368 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1369 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1370 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
1371 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
1372 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1374 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1375 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1376 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1377 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1378 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1379 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1382 GEN_ATOMIC_HELPER_ALL(fetch_add
)
1383 GEN_ATOMIC_HELPER_ALL(fetch_sub
)
1384 GEN_ATOMIC_HELPER_ALL(fetch_and
)
1385 GEN_ATOMIC_HELPER_ALL(fetch_or
)
1386 GEN_ATOMIC_HELPER_ALL(fetch_xor
)
1387 GEN_ATOMIC_HELPER_ALL(fetch_smin
)
1388 GEN_ATOMIC_HELPER_ALL(fetch_umin
)
1389 GEN_ATOMIC_HELPER_ALL(fetch_smax
)
1390 GEN_ATOMIC_HELPER_ALL(fetch_umax
)
1392 GEN_ATOMIC_HELPER_ALL(add_fetch
)
1393 GEN_ATOMIC_HELPER_ALL(sub_fetch
)
1394 GEN_ATOMIC_HELPER_ALL(and_fetch
)
1395 GEN_ATOMIC_HELPER_ALL(or_fetch
)
1396 GEN_ATOMIC_HELPER_ALL(xor_fetch
)
1397 GEN_ATOMIC_HELPER_ALL(smin_fetch
)
1398 GEN_ATOMIC_HELPER_ALL(umin_fetch
)
1399 GEN_ATOMIC_HELPER_ALL(smax_fetch
)
1400 GEN_ATOMIC_HELPER_ALL(umax_fetch
)
1402 GEN_ATOMIC_HELPER_ALL(xchg
)
1404 #undef GEN_ATOMIC_HELPER_ALL
1405 #undef GEN_ATOMIC_HELPER
1406 #endif /* CONFIG_SOFTMMU */
1409 * These aren't really a "proper" helpers because TCG cannot manage Int128.
1410 * However, use the same format as the others, for use by the backends.
1412 * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
1413 * the ld/st functions are only defined if HAVE_ATOMIC128,
1414 * as defined by <qemu/atomic128.h>.
1416 Int128
helper_atomic_cmpxchgo_le_mmu(CPUArchState
*env
, target_ulong addr
,
1417 Int128 cmpv
, Int128 newv
,
1418 TCGMemOpIdx oi
, uintptr_t retaddr
);
1419 Int128
helper_atomic_cmpxchgo_be_mmu(CPUArchState
*env
, target_ulong addr
,
1420 Int128 cmpv
, Int128 newv
,
1421 TCGMemOpIdx oi
, uintptr_t retaddr
);
1423 Int128
helper_atomic_ldo_le_mmu(CPUArchState
*env
, target_ulong addr
,
1424 TCGMemOpIdx oi
, uintptr_t retaddr
);
1425 Int128
helper_atomic_ldo_be_mmu(CPUArchState
*env
, target_ulong addr
,
1426 TCGMemOpIdx oi
, uintptr_t retaddr
);
1427 void helper_atomic_sto_le_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
1428 TCGMemOpIdx oi
, uintptr_t retaddr
);
1429 void helper_atomic_sto_be_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
1430 TCGMemOpIdx oi
, uintptr_t retaddr
);
1432 #ifdef CONFIG_DEBUG_TCG
1433 void tcg_assert_listed_vecop(TCGOpcode
);
1435 static inline void tcg_assert_listed_vecop(TCGOpcode op
) { }
1438 static inline const TCGOpcode
*tcg_swap_vecop_list(const TCGOpcode
*n
)
1440 #ifdef CONFIG_DEBUG_TCG
1441 const TCGOpcode
*o
= tcg_ctx
->vecop_list
;
1442 tcg_ctx
->vecop_list
= n
;
1449 bool tcg_can_emit_vecop_list(const TCGOpcode
*, TCGType
, unsigned);