]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg.h
tcg: Introduce temp_arg, export temp_idx
[mirror_qemu.git] / tcg / tcg.h
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef TCG_H
26 #define TCG_H
27
28 #include "qemu-common.h"
29 #include "cpu.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
32 #include "tcg-mo.h"
33 #include "tcg-target.h"
34
35 /* XXX: make safe guess about sizes */
36 #define MAX_OP_PER_INSTR 266
37
38 #if HOST_LONG_BITS == 32
39 #define MAX_OPC_PARAM_PER_ARG 2
40 #else
41 #define MAX_OPC_PARAM_PER_ARG 1
42 #endif
43 #define MAX_OPC_PARAM_IARGS 5
44 #define MAX_OPC_PARAM_OARGS 1
45 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
46
47 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
48 * and up to 4 + N parameters on 64-bit archs
49 * (N = number of input arguments + output arguments). */
50 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
51 #define OPC_BUF_SIZE 640
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
53
54 #define CPU_TEMP_BUF_NLONGS 128
55
56 /* Default target word size to pointer size. */
57 #ifndef TCG_TARGET_REG_BITS
58 # if UINTPTR_MAX == UINT32_MAX
59 # define TCG_TARGET_REG_BITS 32
60 # elif UINTPTR_MAX == UINT64_MAX
61 # define TCG_TARGET_REG_BITS 64
62 # else
63 # error Unknown pointer size for tcg target
64 # endif
65 #endif
66
67 #if TCG_TARGET_REG_BITS == 32
68 typedef int32_t tcg_target_long;
69 typedef uint32_t tcg_target_ulong;
70 #define TCG_PRIlx PRIx32
71 #define TCG_PRIld PRId32
72 #elif TCG_TARGET_REG_BITS == 64
73 typedef int64_t tcg_target_long;
74 typedef uint64_t tcg_target_ulong;
75 #define TCG_PRIlx PRIx64
76 #define TCG_PRIld PRId64
77 #else
78 #error unsupported
79 #endif
80
81 /* Oversized TCG guests make things like MTTCG hard
82 * as we can't use atomics for cputlb updates.
83 */
84 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
85 #define TCG_OVERSIZED_GUEST 1
86 #else
87 #define TCG_OVERSIZED_GUEST 0
88 #endif
89
90 #if TCG_TARGET_NB_REGS <= 32
91 typedef uint32_t TCGRegSet;
92 #elif TCG_TARGET_NB_REGS <= 64
93 typedef uint64_t TCGRegSet;
94 #else
95 #error unsupported
96 #endif
97
98 #if TCG_TARGET_REG_BITS == 32
99 /* Turn some undef macros into false macros. */
100 #define TCG_TARGET_HAS_extrl_i64_i32 0
101 #define TCG_TARGET_HAS_extrh_i64_i32 0
102 #define TCG_TARGET_HAS_div_i64 0
103 #define TCG_TARGET_HAS_rem_i64 0
104 #define TCG_TARGET_HAS_div2_i64 0
105 #define TCG_TARGET_HAS_rot_i64 0
106 #define TCG_TARGET_HAS_ext8s_i64 0
107 #define TCG_TARGET_HAS_ext16s_i64 0
108 #define TCG_TARGET_HAS_ext32s_i64 0
109 #define TCG_TARGET_HAS_ext8u_i64 0
110 #define TCG_TARGET_HAS_ext16u_i64 0
111 #define TCG_TARGET_HAS_ext32u_i64 0
112 #define TCG_TARGET_HAS_bswap16_i64 0
113 #define TCG_TARGET_HAS_bswap32_i64 0
114 #define TCG_TARGET_HAS_bswap64_i64 0
115 #define TCG_TARGET_HAS_neg_i64 0
116 #define TCG_TARGET_HAS_not_i64 0
117 #define TCG_TARGET_HAS_andc_i64 0
118 #define TCG_TARGET_HAS_orc_i64 0
119 #define TCG_TARGET_HAS_eqv_i64 0
120 #define TCG_TARGET_HAS_nand_i64 0
121 #define TCG_TARGET_HAS_nor_i64 0
122 #define TCG_TARGET_HAS_clz_i64 0
123 #define TCG_TARGET_HAS_ctz_i64 0
124 #define TCG_TARGET_HAS_ctpop_i64 0
125 #define TCG_TARGET_HAS_deposit_i64 0
126 #define TCG_TARGET_HAS_extract_i64 0
127 #define TCG_TARGET_HAS_sextract_i64 0
128 #define TCG_TARGET_HAS_movcond_i64 0
129 #define TCG_TARGET_HAS_add2_i64 0
130 #define TCG_TARGET_HAS_sub2_i64 0
131 #define TCG_TARGET_HAS_mulu2_i64 0
132 #define TCG_TARGET_HAS_muls2_i64 0
133 #define TCG_TARGET_HAS_muluh_i64 0
134 #define TCG_TARGET_HAS_mulsh_i64 0
135 /* Turn some undef macros into true macros. */
136 #define TCG_TARGET_HAS_add2_i32 1
137 #define TCG_TARGET_HAS_sub2_i32 1
138 #endif
139
140 #ifndef TCG_TARGET_deposit_i32_valid
141 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
142 #endif
143 #ifndef TCG_TARGET_deposit_i64_valid
144 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
145 #endif
146 #ifndef TCG_TARGET_extract_i32_valid
147 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
148 #endif
149 #ifndef TCG_TARGET_extract_i64_valid
150 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
151 #endif
152
153 /* Only one of DIV or DIV2 should be defined. */
154 #if defined(TCG_TARGET_HAS_div_i32)
155 #define TCG_TARGET_HAS_div2_i32 0
156 #elif defined(TCG_TARGET_HAS_div2_i32)
157 #define TCG_TARGET_HAS_div_i32 0
158 #define TCG_TARGET_HAS_rem_i32 0
159 #endif
160 #if defined(TCG_TARGET_HAS_div_i64)
161 #define TCG_TARGET_HAS_div2_i64 0
162 #elif defined(TCG_TARGET_HAS_div2_i64)
163 #define TCG_TARGET_HAS_div_i64 0
164 #define TCG_TARGET_HAS_rem_i64 0
165 #endif
166
167 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
168 #if TCG_TARGET_REG_BITS == 32 \
169 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
170 || defined(TCG_TARGET_HAS_muluh_i32))
171 # error "Missing unsigned widening multiply"
172 #endif
173
174 #ifndef TARGET_INSN_START_EXTRA_WORDS
175 # define TARGET_INSN_START_WORDS 1
176 #else
177 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
178 #endif
179
180 typedef enum TCGOpcode {
181 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
182 #include "tcg-opc.h"
183 #undef DEF
184 NB_OPS,
185 } TCGOpcode;
186
187 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
188 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
189 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
190
191 #ifndef TCG_TARGET_INSN_UNIT_SIZE
192 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
193 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
194 typedef uint8_t tcg_insn_unit;
195 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
196 typedef uint16_t tcg_insn_unit;
197 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
198 typedef uint32_t tcg_insn_unit;
199 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
200 typedef uint64_t tcg_insn_unit;
201 #else
202 /* The port better have done this. */
203 #endif
204
205
206 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
207 # define tcg_debug_assert(X) do { assert(X); } while (0)
208 #elif QEMU_GNUC_PREREQ(4, 5)
209 # define tcg_debug_assert(X) \
210 do { if (!(X)) { __builtin_unreachable(); } } while (0)
211 #else
212 # define tcg_debug_assert(X) do { (void)(X); } while (0)
213 #endif
214
215 typedef struct TCGRelocation {
216 struct TCGRelocation *next;
217 int type;
218 tcg_insn_unit *ptr;
219 intptr_t addend;
220 } TCGRelocation;
221
222 typedef struct TCGLabel {
223 unsigned has_value : 1;
224 unsigned id : 31;
225 union {
226 uintptr_t value;
227 tcg_insn_unit *value_ptr;
228 TCGRelocation *first_reloc;
229 } u;
230 } TCGLabel;
231
232 typedef struct TCGPool {
233 struct TCGPool *next;
234 int size;
235 uint8_t data[0] __attribute__ ((aligned));
236 } TCGPool;
237
238 #define TCG_POOL_CHUNK_SIZE 32768
239
240 #define TCG_MAX_TEMPS 512
241 #define TCG_MAX_INSNS 512
242
243 /* when the size of the arguments of a called function is smaller than
244 this value, they are statically allocated in the TB stack frame */
245 #define TCG_STATIC_CALL_ARGS_SIZE 128
246
247 typedef enum TCGType {
248 TCG_TYPE_I32,
249 TCG_TYPE_I64,
250 TCG_TYPE_COUNT, /* number of different types */
251
252 /* An alias for the size of the host register. */
253 #if TCG_TARGET_REG_BITS == 32
254 TCG_TYPE_REG = TCG_TYPE_I32,
255 #else
256 TCG_TYPE_REG = TCG_TYPE_I64,
257 #endif
258
259 /* An alias for the size of the native pointer. */
260 #if UINTPTR_MAX == UINT32_MAX
261 TCG_TYPE_PTR = TCG_TYPE_I32,
262 #else
263 TCG_TYPE_PTR = TCG_TYPE_I64,
264 #endif
265
266 /* An alias for the size of the target "long", aka register. */
267 #if TARGET_LONG_BITS == 64
268 TCG_TYPE_TL = TCG_TYPE_I64,
269 #else
270 TCG_TYPE_TL = TCG_TYPE_I32,
271 #endif
272 } TCGType;
273
274 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
275 typedef enum TCGMemOp {
276 MO_8 = 0,
277 MO_16 = 1,
278 MO_32 = 2,
279 MO_64 = 3,
280 MO_SIZE = 3, /* Mask for the above. */
281
282 MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
283
284 MO_BSWAP = 8, /* Host reverse endian. */
285 #ifdef HOST_WORDS_BIGENDIAN
286 MO_LE = MO_BSWAP,
287 MO_BE = 0,
288 #else
289 MO_LE = 0,
290 MO_BE = MO_BSWAP,
291 #endif
292 #ifdef TARGET_WORDS_BIGENDIAN
293 MO_TE = MO_BE,
294 #else
295 MO_TE = MO_LE,
296 #endif
297
298 /* MO_UNALN accesses are never checked for alignment.
299 * MO_ALIGN accesses will result in a call to the CPU's
300 * do_unaligned_access hook if the guest address is not aligned.
301 * The default depends on whether the target CPU defines ALIGNED_ONLY.
302 *
303 * Some architectures (e.g. ARMv8) need the address which is aligned
304 * to a size more than the size of the memory access.
305 * Some architectures (e.g. SPARCv9) need an address which is aligned,
306 * but less strictly than the natural alignment.
307 *
308 * MO_ALIGN supposes the alignment size is the size of a memory access.
309 *
310 * There are three options:
311 * - unaligned access permitted (MO_UNALN).
312 * - an alignment to the size of an access (MO_ALIGN);
313 * - an alignment to a specified size, which may be more or less than
314 * the access size (MO_ALIGN_x where 'x' is a size in bytes);
315 */
316 MO_ASHIFT = 4,
317 MO_AMASK = 7 << MO_ASHIFT,
318 #ifdef ALIGNED_ONLY
319 MO_ALIGN = 0,
320 MO_UNALN = MO_AMASK,
321 #else
322 MO_ALIGN = MO_AMASK,
323 MO_UNALN = 0,
324 #endif
325 MO_ALIGN_2 = 1 << MO_ASHIFT,
326 MO_ALIGN_4 = 2 << MO_ASHIFT,
327 MO_ALIGN_8 = 3 << MO_ASHIFT,
328 MO_ALIGN_16 = 4 << MO_ASHIFT,
329 MO_ALIGN_32 = 5 << MO_ASHIFT,
330 MO_ALIGN_64 = 6 << MO_ASHIFT,
331
332 /* Combinations of the above, for ease of use. */
333 MO_UB = MO_8,
334 MO_UW = MO_16,
335 MO_UL = MO_32,
336 MO_SB = MO_SIGN | MO_8,
337 MO_SW = MO_SIGN | MO_16,
338 MO_SL = MO_SIGN | MO_32,
339 MO_Q = MO_64,
340
341 MO_LEUW = MO_LE | MO_UW,
342 MO_LEUL = MO_LE | MO_UL,
343 MO_LESW = MO_LE | MO_SW,
344 MO_LESL = MO_LE | MO_SL,
345 MO_LEQ = MO_LE | MO_Q,
346
347 MO_BEUW = MO_BE | MO_UW,
348 MO_BEUL = MO_BE | MO_UL,
349 MO_BESW = MO_BE | MO_SW,
350 MO_BESL = MO_BE | MO_SL,
351 MO_BEQ = MO_BE | MO_Q,
352
353 MO_TEUW = MO_TE | MO_UW,
354 MO_TEUL = MO_TE | MO_UL,
355 MO_TESW = MO_TE | MO_SW,
356 MO_TESL = MO_TE | MO_SL,
357 MO_TEQ = MO_TE | MO_Q,
358
359 MO_SSIZE = MO_SIZE | MO_SIGN,
360 } TCGMemOp;
361
362 /**
363 * get_alignment_bits
364 * @memop: TCGMemOp value
365 *
366 * Extract the alignment size from the memop.
367 */
368 static inline unsigned get_alignment_bits(TCGMemOp memop)
369 {
370 unsigned a = memop & MO_AMASK;
371
372 if (a == MO_UNALN) {
373 /* No alignment required. */
374 a = 0;
375 } else if (a == MO_ALIGN) {
376 /* A natural alignment requirement. */
377 a = memop & MO_SIZE;
378 } else {
379 /* A specific alignment requirement. */
380 a = a >> MO_ASHIFT;
381 }
382 #if defined(CONFIG_SOFTMMU)
383 /* The requested alignment cannot overlap the TLB flags. */
384 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
385 #endif
386 return a;
387 }
388
389 typedef tcg_target_ulong TCGArg;
390
391 /* Define type and accessor macros for TCG variables.
392
393 TCG variables are the inputs and outputs of TCG ops, as described
394 in tcg/README. Target CPU front-end code uses these types to deal
395 with TCG variables as it emits TCG code via the tcg_gen_* functions.
396 They come in several flavours:
397 * TCGv_i32 : 32 bit integer type
398 * TCGv_i64 : 64 bit integer type
399 * TCGv_ptr : a host pointer type
400 * TCGv : an integer type the same size as target_ulong
401 (an alias for either TCGv_i32 or TCGv_i64)
402 The compiler's type checking will complain if you mix them
403 up and pass the wrong sized TCGv to a function.
404
405 Users of tcg_gen_* don't need to know about any of the internal
406 details of these, and should treat them as opaque types.
407 You won't be able to look inside them in a debugger either.
408
409 Internal implementation details follow:
410
411 Note that there is no definition of the structs TCGv_i32_d etc anywhere.
412 This is deliberate, because the values we store in variables of type
413 TCGv_i32 are not really pointers-to-structures. They're just small
414 integers, but keeping them in pointer types like this means that the
415 compiler will complain if you accidentally pass a TCGv_i32 to a
416 function which takes a TCGv_i64, and so on. Only the internals of
417 TCG need to care about the actual contents of the types, and they always
418 box and unbox via the MAKE_TCGV_* and GET_TCGV_* functions.
419 Converting to and from intptr_t rather than int reduces the number
420 of sign-extension instructions that get implied on 64-bit hosts. */
421
422 typedef struct TCGv_i32_d *TCGv_i32;
423 typedef struct TCGv_i64_d *TCGv_i64;
424 typedef struct TCGv_ptr_d *TCGv_ptr;
425 typedef TCGv_ptr TCGv_env;
426 #if TARGET_LONG_BITS == 32
427 #define TCGv TCGv_i32
428 #elif TARGET_LONG_BITS == 64
429 #define TCGv TCGv_i64
430 #else
431 #error Unhandled TARGET_LONG_BITS value
432 #endif
433
434 static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
435 {
436 return (TCGv_i32)i;
437 }
438
439 static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
440 {
441 return (TCGv_i64)i;
442 }
443
444 static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
445 {
446 return (TCGv_ptr)i;
447 }
448
449 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
450 {
451 return (intptr_t)t;
452 }
453
454 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
455 {
456 return (intptr_t)t;
457 }
458
459 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
460 {
461 return (intptr_t)t;
462 }
463
464 #if TCG_TARGET_REG_BITS == 32
465 #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
466 #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
467 #endif
468
469 #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
470 #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
471 #define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
472
473 /* Dummy definition to avoid compiler warnings. */
474 #define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
475 #define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
476 #define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
477
478 #define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
479 #define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
480 #define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
481
482 /* call flags */
483 /* Helper does not read globals (either directly or through an exception). It
484 implies TCG_CALL_NO_WRITE_GLOBALS. */
485 #define TCG_CALL_NO_READ_GLOBALS 0x0010
486 /* Helper does not write globals */
487 #define TCG_CALL_NO_WRITE_GLOBALS 0x0020
488 /* Helper can be safely suppressed if the return value is not used. */
489 #define TCG_CALL_NO_SIDE_EFFECTS 0x0040
490
491 /* convenience version of most used call flags */
492 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
493 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
494 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
495 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
496 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
497
498 /* used to align parameters */
499 #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1)
500 #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1))
501
502 /* Conditions. Note that these are laid out for easy manipulation by
503 the functions below:
504 bit 0 is used for inverting;
505 bit 1 is signed,
506 bit 2 is unsigned,
507 bit 3 is used with bit 0 for swapping signed/unsigned. */
508 typedef enum {
509 /* non-signed */
510 TCG_COND_NEVER = 0 | 0 | 0 | 0,
511 TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
512 TCG_COND_EQ = 8 | 0 | 0 | 0,
513 TCG_COND_NE = 8 | 0 | 0 | 1,
514 /* signed */
515 TCG_COND_LT = 0 | 0 | 2 | 0,
516 TCG_COND_GE = 0 | 0 | 2 | 1,
517 TCG_COND_LE = 8 | 0 | 2 | 0,
518 TCG_COND_GT = 8 | 0 | 2 | 1,
519 /* unsigned */
520 TCG_COND_LTU = 0 | 4 | 0 | 0,
521 TCG_COND_GEU = 0 | 4 | 0 | 1,
522 TCG_COND_LEU = 8 | 4 | 0 | 0,
523 TCG_COND_GTU = 8 | 4 | 0 | 1,
524 } TCGCond;
525
526 /* Invert the sense of the comparison. */
527 static inline TCGCond tcg_invert_cond(TCGCond c)
528 {
529 return (TCGCond)(c ^ 1);
530 }
531
532 /* Swap the operands in a comparison. */
533 static inline TCGCond tcg_swap_cond(TCGCond c)
534 {
535 return c & 6 ? (TCGCond)(c ^ 9) : c;
536 }
537
538 /* Create an "unsigned" version of a "signed" comparison. */
539 static inline TCGCond tcg_unsigned_cond(TCGCond c)
540 {
541 return c & 2 ? (TCGCond)(c ^ 6) : c;
542 }
543
544 /* Must a comparison be considered unsigned? */
545 static inline bool is_unsigned_cond(TCGCond c)
546 {
547 return (c & 4) != 0;
548 }
549
550 /* Create a "high" version of a double-word comparison.
551 This removes equality from a LTE or GTE comparison. */
552 static inline TCGCond tcg_high_cond(TCGCond c)
553 {
554 switch (c) {
555 case TCG_COND_GE:
556 case TCG_COND_LE:
557 case TCG_COND_GEU:
558 case TCG_COND_LEU:
559 return (TCGCond)(c ^ 8);
560 default:
561 return c;
562 }
563 }
564
565 typedef enum TCGTempVal {
566 TEMP_VAL_DEAD,
567 TEMP_VAL_REG,
568 TEMP_VAL_MEM,
569 TEMP_VAL_CONST,
570 } TCGTempVal;
571
572 typedef struct TCGTemp {
573 TCGReg reg:8;
574 TCGTempVal val_type:8;
575 TCGType base_type:8;
576 TCGType type:8;
577 unsigned int fixed_reg:1;
578 unsigned int indirect_reg:1;
579 unsigned int indirect_base:1;
580 unsigned int mem_coherent:1;
581 unsigned int mem_allocated:1;
582 /* If true, the temp is saved across both basic blocks and
583 translation blocks. */
584 unsigned int temp_global:1;
585 /* If true, the temp is saved across basic blocks but dead
586 at the end of translation blocks. If false, the temp is
587 dead at the end of basic blocks. */
588 unsigned int temp_local:1;
589 unsigned int temp_allocated:1;
590
591 tcg_target_long val;
592 struct TCGTemp *mem_base;
593 intptr_t mem_offset;
594 const char *name;
595 } TCGTemp;
596
597 typedef struct TCGContext TCGContext;
598
599 typedef struct TCGTempSet {
600 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
601 } TCGTempSet;
602
603 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
604 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
605 There are never more than 2 outputs, which means that we can store all
606 dead + sync data within 16 bits. */
607 #define DEAD_ARG 4
608 #define SYNC_ARG 1
609 typedef uint16_t TCGLifeData;
610
611 /* The layout here is designed to avoid a bitfield crossing of
612 a 32-bit boundary, which would cause GCC to add extra padding. */
613 typedef struct TCGOp {
614 TCGOpcode opc : 8; /* 8 */
615
616 /* The number of out and in parameter for a call. */
617 unsigned calli : 4; /* 12 */
618 unsigned callo : 2; /* 14 */
619 unsigned : 2; /* 16 */
620
621 /* Index of the prev/next op, or 0 for the end of the list. */
622 unsigned prev : 16; /* 32 */
623 unsigned next : 16; /* 48 */
624
625 /* Lifetime data of the operands. */
626 unsigned life : 16; /* 64 */
627
628 /* Arguments for the opcode. */
629 TCGArg args[MAX_OPC_PARAM];
630 } TCGOp;
631
632 /* Make sure that we don't expand the structure without noticing. */
633 QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM);
634
635 /* Make sure operands fit in the bitfields above. */
636 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
637 QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16));
638
639 struct TCGContext {
640 uint8_t *pool_cur, *pool_end;
641 TCGPool *pool_first, *pool_current, *pool_first_large;
642 int nb_labels;
643 int nb_globals;
644 int nb_temps;
645 int nb_indirects;
646
647 /* goto_tb support */
648 tcg_insn_unit *code_buf;
649 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
650 uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
651 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
652
653 TCGRegSet reserved_regs;
654 intptr_t current_frame_offset;
655 intptr_t frame_start;
656 intptr_t frame_end;
657 TCGTemp *frame_temp;
658
659 tcg_insn_unit *code_ptr;
660
661 #ifdef CONFIG_PROFILER
662 /* profiling info */
663 int64_t tb_count1;
664 int64_t tb_count;
665 int64_t op_count; /* total insn count */
666 int op_count_max; /* max insn per TB */
667 int64_t temp_count;
668 int temp_count_max;
669 int64_t del_op_count;
670 int64_t code_in_len;
671 int64_t code_out_len;
672 int64_t search_out_len;
673 int64_t interm_time;
674 int64_t code_time;
675 int64_t la_time;
676 int64_t opt_time;
677 int64_t restore_count;
678 int64_t restore_time;
679 #endif
680
681 #ifdef CONFIG_DEBUG_TCG
682 int temps_in_use;
683 int goto_tb_issue_mask;
684 #endif
685
686 int gen_next_op_idx;
687
688 /* Code generation. Note that we specifically do not use tcg_insn_unit
689 here, because there's too much arithmetic throughout that relies
690 on addition and subtraction working on bytes. Rely on the GCC
691 extension that allows arithmetic on void*. */
692 void *code_gen_prologue;
693 void *code_gen_epilogue;
694 void *code_gen_buffer;
695 size_t code_gen_buffer_size;
696 void *code_gen_ptr;
697 void *data_gen_ptr;
698
699 /* Threshold to flush the translated code buffer. */
700 void *code_gen_highwater;
701
702 TBContext tb_ctx;
703
704 /* Track which vCPU triggers events */
705 CPUState *cpu; /* *_trans */
706 TCGv_env tcg_env; /* *_exec */
707
708 /* These structures are private to tcg-target.inc.c. */
709 #ifdef TCG_TARGET_NEED_LDST_LABELS
710 struct TCGLabelQemuLdst *ldst_labels;
711 #endif
712 #ifdef TCG_TARGET_NEED_POOL_LABELS
713 struct TCGLabelPoolData *pool_labels;
714 #endif
715
716 TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
717 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
718
719 /* Tells which temporary holds a given register.
720 It does not take into account fixed registers */
721 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
722
723 TCGOp gen_op_buf[OPC_BUF_SIZE];
724
725 uint16_t gen_insn_end_off[TCG_MAX_INSNS];
726 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
727 };
728
729 extern TCGContext tcg_ctx;
730 extern bool parallel_cpus;
731
732 static inline size_t temp_idx(TCGTemp *ts)
733 {
734 ptrdiff_t n = ts - tcg_ctx.temps;
735 tcg_debug_assert(n >= 0 && n < tcg_ctx.nb_temps);
736 return n;
737 }
738
739 static inline TCGArg temp_arg(TCGTemp *ts)
740 {
741 return temp_idx(ts);
742 }
743
744 static inline TCGTemp *arg_temp(TCGArg a)
745 {
746 return a == TCG_CALL_DUMMY_ARG ? NULL : &tcg_ctx.temps[a];
747 }
748
749 static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
750 {
751 tcg_ctx.gen_op_buf[op_idx].args[arg] = v;
752 }
753
754 /* The number of opcodes emitted so far. */
755 static inline int tcg_op_buf_count(void)
756 {
757 return tcg_ctx.gen_next_op_idx;
758 }
759
760 /* Test for whether to terminate the TB for using too many opcodes. */
761 static inline bool tcg_op_buf_full(void)
762 {
763 return tcg_op_buf_count() >= OPC_MAX_SIZE;
764 }
765
766 /* pool based memory allocation */
767
768 /* tb_lock must be held for tcg_malloc_internal. */
769 void *tcg_malloc_internal(TCGContext *s, int size);
770 void tcg_pool_reset(TCGContext *s);
771 TranslationBlock *tcg_tb_alloc(TCGContext *s);
772
773 /* Called with tb_lock held. */
774 static inline void *tcg_malloc(int size)
775 {
776 TCGContext *s = &tcg_ctx;
777 uint8_t *ptr, *ptr_end;
778
779 /* ??? This is a weak placeholder for minimum malloc alignment. */
780 size = QEMU_ALIGN_UP(size, 8);
781
782 ptr = s->pool_cur;
783 ptr_end = ptr + size;
784 if (unlikely(ptr_end > s->pool_end)) {
785 return tcg_malloc_internal(&tcg_ctx, size);
786 } else {
787 s->pool_cur = ptr_end;
788 return ptr;
789 }
790 }
791
792 void tcg_context_init(TCGContext *s);
793 void tcg_prologue_init(TCGContext *s);
794 void tcg_func_start(TCGContext *s);
795
796 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
797
798 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
799
800 int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);
801
802 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
803 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
804
805 TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
806 TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
807
808 void tcg_temp_free_i32(TCGv_i32 arg);
809 void tcg_temp_free_i64(TCGv_i64 arg);
810
811 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
812 const char *name)
813 {
814 int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
815 return MAKE_TCGV_I32(idx);
816 }
817
818 static inline TCGv_i32 tcg_temp_new_i32(void)
819 {
820 return tcg_temp_new_internal_i32(0);
821 }
822
823 static inline TCGv_i32 tcg_temp_local_new_i32(void)
824 {
825 return tcg_temp_new_internal_i32(1);
826 }
827
828 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
829 const char *name)
830 {
831 int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
832 return MAKE_TCGV_I64(idx);
833 }
834
835 static inline TCGv_i64 tcg_temp_new_i64(void)
836 {
837 return tcg_temp_new_internal_i64(0);
838 }
839
840 static inline TCGv_i64 tcg_temp_local_new_i64(void)
841 {
842 return tcg_temp_new_internal_i64(1);
843 }
844
845 #if defined(CONFIG_DEBUG_TCG)
846 /* If you call tcg_clear_temp_count() at the start of a section of
847 * code which is not supposed to leak any TCG temporaries, then
848 * calling tcg_check_temp_count() at the end of the section will
849 * return 1 if the section did in fact leak a temporary.
850 */
851 void tcg_clear_temp_count(void);
852 int tcg_check_temp_count(void);
853 #else
854 #define tcg_clear_temp_count() do { } while (0)
855 #define tcg_check_temp_count() 0
856 #endif
857
858 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
859 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
860
861 #define TCG_CT_ALIAS 0x80
862 #define TCG_CT_IALIAS 0x40
863 #define TCG_CT_NEWREG 0x20 /* output requires a new register */
864 #define TCG_CT_REG 0x01
865 #define TCG_CT_CONST 0x02 /* any constant of register size */
866
867 typedef struct TCGArgConstraint {
868 uint16_t ct;
869 uint8_t alias_index;
870 union {
871 TCGRegSet regs;
872 } u;
873 } TCGArgConstraint;
874
875 #define TCG_MAX_OP_ARGS 16
876
877 /* Bits for TCGOpDef->flags, 8 bits available. */
878 enum {
879 /* Instruction defines the end of a basic block. */
880 TCG_OPF_BB_END = 0x01,
881 /* Instruction clobbers call registers and potentially update globals. */
882 TCG_OPF_CALL_CLOBBER = 0x02,
883 /* Instruction has side effects: it cannot be removed if its outputs
884 are not used, and might trigger exceptions. */
885 TCG_OPF_SIDE_EFFECTS = 0x04,
886 /* Instruction operands are 64-bits (otherwise 32-bits). */
887 TCG_OPF_64BIT = 0x08,
888 /* Instruction is optional and not implemented by the host, or insn
889 is generic and should not be implemened by the host. */
890 TCG_OPF_NOT_PRESENT = 0x10,
891 };
892
893 typedef struct TCGOpDef {
894 const char *name;
895 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
896 uint8_t flags;
897 TCGArgConstraint *args_ct;
898 int *sorted_args;
899 #if defined(CONFIG_DEBUG_TCG)
900 int used;
901 #endif
902 } TCGOpDef;
903
904 extern TCGOpDef tcg_op_defs[];
905 extern const size_t tcg_op_defs_max;
906
907 typedef struct TCGTargetOpDef {
908 TCGOpcode op;
909 const char *args_ct_str[TCG_MAX_OP_ARGS];
910 } TCGTargetOpDef;
911
912 #define tcg_abort() \
913 do {\
914 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
915 abort();\
916 } while (0)
917
918 #if UINTPTR_MAX == UINT32_MAX
919 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
920 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
921
922 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
923 #define tcg_global_reg_new_ptr(R, N) \
924 TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
925 #define tcg_global_mem_new_ptr(R, O, N) \
926 TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
927 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
928 #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
929 #else
930 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
931 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
932
933 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
934 #define tcg_global_reg_new_ptr(R, N) \
935 TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
936 #define tcg_global_mem_new_ptr(R, O, N) \
937 TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
938 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
939 #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
940 #endif
941
942 bool tcg_op_supported(TCGOpcode op);
943
944 void tcg_gen_callN(TCGContext *s, void *func,
945 TCGArg ret, int nargs, TCGArg *args);
946
947 void tcg_op_remove(TCGContext *s, TCGOp *op);
948 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
949 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
950
951 void tcg_optimize(TCGContext *s);
952
953 /* only used for debugging purposes */
954 void tcg_dump_ops(TCGContext *s);
955
956 TCGv_i32 tcg_const_i32(int32_t val);
957 TCGv_i64 tcg_const_i64(int64_t val);
958 TCGv_i32 tcg_const_local_i32(int32_t val);
959 TCGv_i64 tcg_const_local_i64(int64_t val);
960
961 TCGLabel *gen_new_label(void);
962
963 /**
964 * label_arg
965 * @l: label
966 *
967 * Encode a label for storage in the TCG opcode stream.
968 */
969
970 static inline TCGArg label_arg(TCGLabel *l)
971 {
972 return (uintptr_t)l;
973 }
974
975 /**
976 * arg_label
977 * @i: value
978 *
979 * The opposite of label_arg. Retrieve a label from the
980 * encoding of the TCG opcode stream.
981 */
982
983 static inline TCGLabel *arg_label(TCGArg i)
984 {
985 return (TCGLabel *)(uintptr_t)i;
986 }
987
988 /**
989 * tcg_ptr_byte_diff
990 * @a, @b: addresses to be differenced
991 *
992 * There are many places within the TCG backends where we need a byte
993 * difference between two pointers. While this can be accomplished
994 * with local casting, it's easy to get wrong -- especially if one is
995 * concerned with the signedness of the result.
996 *
997 * This version relies on GCC's void pointer arithmetic to get the
998 * correct result.
999 */
1000
1001 static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1002 {
1003 return a - b;
1004 }
1005
1006 /**
1007 * tcg_pcrel_diff
1008 * @s: the tcg context
1009 * @target: address of the target
1010 *
1011 * Produce a pc-relative difference, from the current code_ptr
1012 * to the destination address.
1013 */
1014
1015 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1016 {
1017 return tcg_ptr_byte_diff(target, s->code_ptr);
1018 }
1019
1020 /**
1021 * tcg_current_code_size
1022 * @s: the tcg context
1023 *
1024 * Compute the current code size within the translation block.
1025 * This is used to fill in qemu's data structures for goto_tb.
1026 */
1027
1028 static inline size_t tcg_current_code_size(TCGContext *s)
1029 {
1030 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1031 }
1032
1033 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
1034 typedef uint32_t TCGMemOpIdx;
1035
1036 /**
1037 * make_memop_idx
1038 * @op: memory operation
1039 * @idx: mmu index
1040 *
1041 * Encode these values into a single parameter.
1042 */
1043 static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1044 {
1045 tcg_debug_assert(idx <= 15);
1046 return (op << 4) | idx;
1047 }
1048
1049 /**
1050 * get_memop
1051 * @oi: combined op/idx parameter
1052 *
1053 * Extract the memory operation from the combined value.
1054 */
1055 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1056 {
1057 return oi >> 4;
1058 }
1059
1060 /**
1061 * get_mmuidx
1062 * @oi: combined op/idx parameter
1063 *
1064 * Extract the mmu index from the combined value.
1065 */
1066 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1067 {
1068 return oi & 15;
1069 }
1070
1071 /**
1072 * tcg_qemu_tb_exec:
1073 * @env: pointer to CPUArchState for the CPU
1074 * @tb_ptr: address of generated code for the TB to execute
1075 *
1076 * Start executing code from a given translation block.
1077 * Where translation blocks have been linked, execution
1078 * may proceed from the given TB into successive ones.
1079 * Control eventually returns only when some action is needed
1080 * from the top-level loop: either control must pass to a TB
1081 * which has not yet been directly linked, or an asynchronous
1082 * event such as an interrupt needs handling.
1083 *
1084 * Return: The return value is the value passed to the corresponding
1085 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1086 * The value is either zero or a 4-byte aligned pointer to that TB combined
1087 * with additional information in its two least significant bits. The
1088 * additional information is encoded as follows:
1089 * 0, 1: the link between this TB and the next is via the specified
1090 * TB index (0 or 1). That is, we left the TB via (the equivalent
1091 * of) "goto_tb <index>". The main loop uses this to determine
1092 * how to link the TB just executed to the next.
1093 * 2: we are using instruction counting code generation, and we
1094 * did not start executing this TB because the instruction counter
1095 * would hit zero midway through it. In this case the pointer
1096 * returned is the TB we were about to execute, and the caller must
1097 * arrange to execute the remaining count of instructions.
1098 * 3: we stopped because the CPU's exit_request flag was set
1099 * (usually meaning that there is an interrupt that needs to be
1100 * handled). The pointer returned is the TB we were about to execute
1101 * when we noticed the pending exit request.
1102 *
1103 * If the bottom two bits indicate an exit-via-index then the CPU
1104 * state is correctly synchronised and ready for execution of the next
1105 * TB (and in particular the guest PC is the address to execute next).
1106 * Otherwise, we gave up on execution of this TB before it started, and
1107 * the caller must fix up the CPU state by calling the CPU's
1108 * synchronize_from_tb() method with the TB pointer we return (falling
1109 * back to calling the CPU's set_pc method with tb->pb if no
1110 * synchronize_from_tb() method exists).
1111 *
1112 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1113 * to this default (which just calls the prologue.code emitted by
1114 * tcg_target_qemu_prologue()).
1115 */
1116 #define TB_EXIT_MASK 3
1117 #define TB_EXIT_IDX0 0
1118 #define TB_EXIT_IDX1 1
1119 #define TB_EXIT_REQUESTED 3
1120
1121 #ifdef HAVE_TCG_QEMU_TB_EXEC
1122 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1123 #else
1124 # define tcg_qemu_tb_exec(env, tb_ptr) \
1125 ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
1126 #endif
1127
1128 void tcg_register_jit(void *buf, size_t buf_size);
1129
1130 /*
1131 * Memory helpers that will be used by TCG generated code.
1132 */
1133 #ifdef CONFIG_SOFTMMU
1134 /* Value zero-extended to tcg register size. */
1135 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1136 TCGMemOpIdx oi, uintptr_t retaddr);
1137 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1138 TCGMemOpIdx oi, uintptr_t retaddr);
1139 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1140 TCGMemOpIdx oi, uintptr_t retaddr);
1141 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1142 TCGMemOpIdx oi, uintptr_t retaddr);
1143 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1144 TCGMemOpIdx oi, uintptr_t retaddr);
1145 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1146 TCGMemOpIdx oi, uintptr_t retaddr);
1147 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1148 TCGMemOpIdx oi, uintptr_t retaddr);
1149
1150 /* Value sign-extended to tcg register size. */
1151 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1152 TCGMemOpIdx oi, uintptr_t retaddr);
1153 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1154 TCGMemOpIdx oi, uintptr_t retaddr);
1155 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1156 TCGMemOpIdx oi, uintptr_t retaddr);
1157 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1158 TCGMemOpIdx oi, uintptr_t retaddr);
1159 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1160 TCGMemOpIdx oi, uintptr_t retaddr);
1161
1162 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1163 TCGMemOpIdx oi, uintptr_t retaddr);
1164 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1165 TCGMemOpIdx oi, uintptr_t retaddr);
1166 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1167 TCGMemOpIdx oi, uintptr_t retaddr);
1168 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1169 TCGMemOpIdx oi, uintptr_t retaddr);
1170 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1171 TCGMemOpIdx oi, uintptr_t retaddr);
1172 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1173 TCGMemOpIdx oi, uintptr_t retaddr);
1174 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1175 TCGMemOpIdx oi, uintptr_t retaddr);
1176
1177 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1178 TCGMemOpIdx oi, uintptr_t retaddr);
1179 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1180 TCGMemOpIdx oi, uintptr_t retaddr);
1181 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1182 TCGMemOpIdx oi, uintptr_t retaddr);
1183 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1184 TCGMemOpIdx oi, uintptr_t retaddr);
1185 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1186 TCGMemOpIdx oi, uintptr_t retaddr);
1187 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1188 TCGMemOpIdx oi, uintptr_t retaddr);
1189 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1190 TCGMemOpIdx oi, uintptr_t retaddr);
1191
1192 /* Temporary aliases until backends are converted. */
1193 #ifdef TARGET_WORDS_BIGENDIAN
1194 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1195 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1196 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1197 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1198 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1199 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1200 # define helper_ret_stw_mmu helper_be_stw_mmu
1201 # define helper_ret_stl_mmu helper_be_stl_mmu
1202 # define helper_ret_stq_mmu helper_be_stq_mmu
1203 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1204 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1205 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1206 #else
1207 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1208 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1209 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1210 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1211 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1212 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1213 # define helper_ret_stw_mmu helper_le_stw_mmu
1214 # define helper_ret_stl_mmu helper_le_stl_mmu
1215 # define helper_ret_stq_mmu helper_le_stq_mmu
1216 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1217 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1218 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1219 #endif
1220
1221 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1222 uint32_t cmpv, uint32_t newv,
1223 TCGMemOpIdx oi, uintptr_t retaddr);
1224 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1225 uint32_t cmpv, uint32_t newv,
1226 TCGMemOpIdx oi, uintptr_t retaddr);
1227 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1228 uint32_t cmpv, uint32_t newv,
1229 TCGMemOpIdx oi, uintptr_t retaddr);
1230 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1231 uint64_t cmpv, uint64_t newv,
1232 TCGMemOpIdx oi, uintptr_t retaddr);
1233 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1234 uint32_t cmpv, uint32_t newv,
1235 TCGMemOpIdx oi, uintptr_t retaddr);
1236 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1237 uint32_t cmpv, uint32_t newv,
1238 TCGMemOpIdx oi, uintptr_t retaddr);
1239 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1240 uint64_t cmpv, uint64_t newv,
1241 TCGMemOpIdx oi, uintptr_t retaddr);
1242
1243 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
1244 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
1245 (CPUArchState *env, target_ulong addr, TYPE val, \
1246 TCGMemOpIdx oi, uintptr_t retaddr);
1247
1248 #ifdef CONFIG_ATOMIC64
1249 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1250 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1251 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1252 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1253 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1254 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
1255 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
1256 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1257 #else
1258 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1259 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1260 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1261 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1262 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1263 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1264 #endif
1265
1266 GEN_ATOMIC_HELPER_ALL(fetch_add)
1267 GEN_ATOMIC_HELPER_ALL(fetch_sub)
1268 GEN_ATOMIC_HELPER_ALL(fetch_and)
1269 GEN_ATOMIC_HELPER_ALL(fetch_or)
1270 GEN_ATOMIC_HELPER_ALL(fetch_xor)
1271
1272 GEN_ATOMIC_HELPER_ALL(add_fetch)
1273 GEN_ATOMIC_HELPER_ALL(sub_fetch)
1274 GEN_ATOMIC_HELPER_ALL(and_fetch)
1275 GEN_ATOMIC_HELPER_ALL(or_fetch)
1276 GEN_ATOMIC_HELPER_ALL(xor_fetch)
1277
1278 GEN_ATOMIC_HELPER_ALL(xchg)
1279
1280 #undef GEN_ATOMIC_HELPER_ALL
1281 #undef GEN_ATOMIC_HELPER
1282 #endif /* CONFIG_SOFTMMU */
1283
1284 #ifdef CONFIG_ATOMIC128
1285 #include "qemu/int128.h"
1286
1287 /* These aren't really a "proper" helpers because TCG cannot manage Int128.
1288 However, use the same format as the others, for use by the backends. */
1289 Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1290 Int128 cmpv, Int128 newv,
1291 TCGMemOpIdx oi, uintptr_t retaddr);
1292 Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1293 Int128 cmpv, Int128 newv,
1294 TCGMemOpIdx oi, uintptr_t retaddr);
1295
1296 Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1297 TCGMemOpIdx oi, uintptr_t retaddr);
1298 Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1299 TCGMemOpIdx oi, uintptr_t retaddr);
1300 void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1301 TCGMemOpIdx oi, uintptr_t retaddr);
1302 void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1303 TCGMemOpIdx oi, uintptr_t retaddr);
1304
1305 #endif /* CONFIG_ATOMIC128 */
1306
1307 #endif /* TCG_H */