]>
git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 /* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
23 * Without assertions, the interpreter runs much faster. */
24 #if defined(CONFIG_DEBUG_TCG)
25 # define tci_assert(cond) assert(cond)
27 # define tci_assert(cond) ((void)0)
30 #include "qemu-common.h"
31 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
32 #include "exec/cpu_ldst.h"
33 #include "tcg/tcg-op.h"
34 #include "qemu/compiler.h"
36 #if MAX_OPC_PARAM_IARGS != 6
37 # error Fix needed, number of supported input arguments changed!
39 #if TCG_TARGET_REG_BITS == 32
40 typedef uint64_t (*helper_function
)(tcg_target_ulong
, tcg_target_ulong
,
41 tcg_target_ulong
, tcg_target_ulong
,
42 tcg_target_ulong
, tcg_target_ulong
,
43 tcg_target_ulong
, tcg_target_ulong
,
44 tcg_target_ulong
, tcg_target_ulong
,
45 tcg_target_ulong
, tcg_target_ulong
);
47 typedef uint64_t (*helper_function
)(tcg_target_ulong
, tcg_target_ulong
,
48 tcg_target_ulong
, tcg_target_ulong
,
49 tcg_target_ulong
, tcg_target_ulong
);
52 __thread
uintptr_t tci_tb_ptr
;
54 static tcg_target_ulong
tci_read_reg(const tcg_target_ulong
*regs
, TCGReg index
)
56 tci_assert(index
< TCG_TARGET_NB_REGS
);
61 tci_write_reg(tcg_target_ulong
*regs
, TCGReg index
, tcg_target_ulong value
)
63 tci_assert(index
< TCG_TARGET_NB_REGS
);
64 tci_assert(index
!= TCG_AREG0
);
65 tci_assert(index
!= TCG_REG_CALL_STACK
);
69 #if TCG_TARGET_REG_BITS == 32
70 static void tci_write_reg64(tcg_target_ulong
*regs
, uint32_t high_index
,
71 uint32_t low_index
, uint64_t value
)
73 tci_write_reg(regs
, low_index
, value
);
74 tci_write_reg(regs
, high_index
, value
>> 32);
78 #if TCG_TARGET_REG_BITS == 32
79 /* Create a 64 bit value from two 32 bit values. */
80 static uint64_t tci_uint64(uint32_t high
, uint32_t low
)
82 return ((uint64_t)high
<< 32) + low
;
86 /* Read constant byte from bytecode. */
87 static uint8_t tci_read_b(const uint8_t **tb_ptr
)
89 return *(tb_ptr
[0]++);
92 /* Read register number from bytecode. */
93 static TCGReg
tci_read_r(const uint8_t **tb_ptr
)
95 uint8_t regno
= tci_read_b(tb_ptr
);
96 tci_assert(regno
< TCG_TARGET_NB_REGS
);
100 /* Read constant (native size) from bytecode. */
101 static tcg_target_ulong
tci_read_i(const uint8_t **tb_ptr
)
103 tcg_target_ulong value
= *(const tcg_target_ulong
*)(*tb_ptr
);
104 *tb_ptr
+= sizeof(value
);
108 /* Read unsigned constant (32 bit) from bytecode. */
109 static uint32_t tci_read_i32(const uint8_t **tb_ptr
)
111 uint32_t value
= *(const uint32_t *)(*tb_ptr
);
112 *tb_ptr
+= sizeof(value
);
116 /* Read signed constant (32 bit) from bytecode. */
117 static int32_t tci_read_s32(const uint8_t **tb_ptr
)
119 int32_t value
= *(const int32_t *)(*tb_ptr
);
120 *tb_ptr
+= sizeof(value
);
124 #if TCG_TARGET_REG_BITS == 64
125 /* Read constant (64 bit) from bytecode. */
126 static uint64_t tci_read_i64(const uint8_t **tb_ptr
)
128 uint64_t value
= *(const uint64_t *)(*tb_ptr
);
129 *tb_ptr
+= sizeof(value
);
134 /* Read indexed register (native size) from bytecode. */
135 static tcg_target_ulong
136 tci_read_rval(const tcg_target_ulong
*regs
, const uint8_t **tb_ptr
)
138 tcg_target_ulong value
= tci_read_reg(regs
, **tb_ptr
);
143 #if TCG_TARGET_REG_BITS == 32
144 /* Read two indexed registers (2 * 32 bit) from bytecode. */
145 static uint64_t tci_read_r64(const tcg_target_ulong
*regs
,
146 const uint8_t **tb_ptr
)
148 uint32_t low
= tci_read_rval(regs
, tb_ptr
);
149 return tci_uint64(tci_read_rval(regs
, tb_ptr
), low
);
151 #elif TCG_TARGET_REG_BITS == 64
152 /* Read indexed register (64 bit) from bytecode. */
153 static uint64_t tci_read_r64(const tcg_target_ulong
*regs
,
154 const uint8_t **tb_ptr
)
156 return tci_read_rval(regs
, tb_ptr
);
160 /* Read indexed register(s) with target address from bytecode. */
162 tci_read_ulong(const tcg_target_ulong
*regs
, const uint8_t **tb_ptr
)
164 target_ulong taddr
= tci_read_rval(regs
, tb_ptr
);
165 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
166 taddr
+= (uint64_t)tci_read_rval(regs
, tb_ptr
) << 32;
171 static tcg_target_ulong
tci_read_label(const uint8_t **tb_ptr
)
173 tcg_target_ulong label
= tci_read_i(tb_ptr
);
174 tci_assert(label
!= 0);
179 * Load sets of arguments all at once. The naming convention is:
180 * tci_args_<arguments>
181 * where arguments is a sequence of
184 * s = signed ldst offset
187 static void tci_args_rr(const uint8_t **tb_ptr
,
188 TCGReg
*r0
, TCGReg
*r1
)
190 *r0
= tci_read_r(tb_ptr
);
191 *r1
= tci_read_r(tb_ptr
);
194 static void tci_args_rrr(const uint8_t **tb_ptr
,
195 TCGReg
*r0
, TCGReg
*r1
, TCGReg
*r2
)
197 *r0
= tci_read_r(tb_ptr
);
198 *r1
= tci_read_r(tb_ptr
);
199 *r2
= tci_read_r(tb_ptr
);
202 static void tci_args_rrs(const uint8_t **tb_ptr
,
203 TCGReg
*r0
, TCGReg
*r1
, int32_t *i2
)
205 *r0
= tci_read_r(tb_ptr
);
206 *r1
= tci_read_r(tb_ptr
);
207 *i2
= tci_read_s32(tb_ptr
);
210 static bool tci_compare32(uint32_t u0
, uint32_t u1
, TCGCond condition
)
247 g_assert_not_reached();
252 static bool tci_compare64(uint64_t u0
, uint64_t u1
, TCGCond condition
)
289 g_assert_not_reached();
295 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
296 #define qemu_ld_leuw \
297 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
298 #define qemu_ld_leul \
299 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
300 #define qemu_ld_leq \
301 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
302 #define qemu_ld_beuw \
303 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
304 #define qemu_ld_beul \
305 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
306 #define qemu_ld_beq \
307 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
308 #define qemu_st_b(X) \
309 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
310 #define qemu_st_lew(X) \
311 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
312 #define qemu_st_lel(X) \
313 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
314 #define qemu_st_leq(X) \
315 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
316 #define qemu_st_bew(X) \
317 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
318 #define qemu_st_bel(X) \
319 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
320 #define qemu_st_beq(X) \
321 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
323 #if TCG_TARGET_REG_BITS == 64
324 # define CASE_32_64(x) \
325 case glue(glue(INDEX_op_, x), _i64): \
326 case glue(glue(INDEX_op_, x), _i32):
327 # define CASE_64(x) \
328 case glue(glue(INDEX_op_, x), _i64):
330 # define CASE_32_64(x) \
331 case glue(glue(INDEX_op_, x), _i32):
335 /* Interpret pseudo code in tb. */
337 * Disable CFI checks.
338 * One possible operation in the pseudo code is a call to binary code.
339 * Therefore, disable CFI checks in the interpreter function
341 uintptr_t QEMU_DISABLE_CFI
tcg_qemu_tb_exec(CPUArchState
*env
,
342 const void *v_tb_ptr
)
344 const uint8_t *tb_ptr
= v_tb_ptr
;
345 tcg_target_ulong regs
[TCG_TARGET_NB_REGS
];
346 long tcg_temps
[CPU_TEMP_BUF_NLONGS
];
347 uintptr_t sp_value
= (uintptr_t)(tcg_temps
+ CPU_TEMP_BUF_NLONGS
);
350 regs
[TCG_AREG0
] = (tcg_target_ulong
)env
;
351 regs
[TCG_REG_CALL_STACK
] = sp_value
;
355 TCGOpcode opc
= tb_ptr
[0];
356 #if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
357 uint8_t op_size
= tb_ptr
[1];
358 const uint8_t *old_code_ptr
= tb_ptr
;
364 tcg_target_ulong label
;
371 #if TCG_TARGET_REG_BITS == 32
378 /* Skip opcode and size entry. */
383 t0
= tci_read_i(&tb_ptr
);
384 tci_tb_ptr
= (uintptr_t)tb_ptr
;
385 #if TCG_TARGET_REG_BITS == 32
386 tmp64
= ((helper_function
)t0
)(tci_read_reg(regs
, TCG_REG_R0
),
387 tci_read_reg(regs
, TCG_REG_R1
),
388 tci_read_reg(regs
, TCG_REG_R2
),
389 tci_read_reg(regs
, TCG_REG_R3
),
390 tci_read_reg(regs
, TCG_REG_R4
),
391 tci_read_reg(regs
, TCG_REG_R5
),
392 tci_read_reg(regs
, TCG_REG_R6
),
393 tci_read_reg(regs
, TCG_REG_R7
),
394 tci_read_reg(regs
, TCG_REG_R8
),
395 tci_read_reg(regs
, TCG_REG_R9
),
396 tci_read_reg(regs
, TCG_REG_R10
),
397 tci_read_reg(regs
, TCG_REG_R11
));
398 tci_write_reg(regs
, TCG_REG_R0
, tmp64
);
399 tci_write_reg(regs
, TCG_REG_R1
, tmp64
>> 32);
401 tmp64
= ((helper_function
)t0
)(tci_read_reg(regs
, TCG_REG_R0
),
402 tci_read_reg(regs
, TCG_REG_R1
),
403 tci_read_reg(regs
, TCG_REG_R2
),
404 tci_read_reg(regs
, TCG_REG_R3
),
405 tci_read_reg(regs
, TCG_REG_R4
),
406 tci_read_reg(regs
, TCG_REG_R5
));
407 tci_write_reg(regs
, TCG_REG_R0
, tmp64
);
411 label
= tci_read_label(&tb_ptr
);
412 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);
413 tb_ptr
= (uint8_t *)label
;
415 case INDEX_op_setcond_i32
:
417 t1
= tci_read_rval(regs
, &tb_ptr
);
418 t2
= tci_read_rval(regs
, &tb_ptr
);
419 condition
= *tb_ptr
++;
420 tci_write_reg(regs
, t0
, tci_compare32(t1
, t2
, condition
));
422 #if TCG_TARGET_REG_BITS == 32
423 case INDEX_op_setcond2_i32
:
425 tmp64
= tci_read_r64(regs
, &tb_ptr
);
426 v64
= tci_read_r64(regs
, &tb_ptr
);
427 condition
= *tb_ptr
++;
428 tci_write_reg(regs
, t0
, tci_compare64(tmp64
, v64
, condition
));
430 #elif TCG_TARGET_REG_BITS == 64
431 case INDEX_op_setcond_i64
:
433 t1
= tci_read_rval(regs
, &tb_ptr
);
434 t2
= tci_read_rval(regs
, &tb_ptr
);
435 condition
= *tb_ptr
++;
436 tci_write_reg(regs
, t0
, tci_compare64(t1
, t2
, condition
));
440 tci_args_rr(&tb_ptr
, &r0
, &r1
);
443 case INDEX_op_tci_movi_i32
:
445 t1
= tci_read_i32(&tb_ptr
);
446 tci_write_reg(regs
, t0
, t1
);
449 /* Load/store operations (32 bit). */
452 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
453 ptr
= (void *)(regs
[r1
] + ofs
);
454 regs
[r0
] = *(uint8_t *)ptr
;
457 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
458 ptr
= (void *)(regs
[r1
] + ofs
);
459 regs
[r0
] = *(int8_t *)ptr
;
462 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
463 ptr
= (void *)(regs
[r1
] + ofs
);
464 regs
[r0
] = *(uint16_t *)ptr
;
467 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
468 ptr
= (void *)(regs
[r1
] + ofs
);
469 regs
[r0
] = *(int16_t *)ptr
;
471 case INDEX_op_ld_i32
:
473 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
474 ptr
= (void *)(regs
[r1
] + ofs
);
475 regs
[r0
] = *(uint32_t *)ptr
;
478 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
479 ptr
= (void *)(regs
[r1
] + ofs
);
480 *(uint8_t *)ptr
= regs
[r0
];
483 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
484 ptr
= (void *)(regs
[r1
] + ofs
);
485 *(uint16_t *)ptr
= regs
[r0
];
487 case INDEX_op_st_i32
:
489 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
490 ptr
= (void *)(regs
[r1
] + ofs
);
491 *(uint32_t *)ptr
= regs
[r0
];
494 /* Arithmetic operations (mixed 32/64 bit). */
497 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
498 regs
[r0
] = regs
[r1
] + regs
[r2
];
501 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
502 regs
[r0
] = regs
[r1
] - regs
[r2
];
505 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
506 regs
[r0
] = regs
[r1
] * regs
[r2
];
509 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
510 regs
[r0
] = regs
[r1
] & regs
[r2
];
513 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
514 regs
[r0
] = regs
[r1
] | regs
[r2
];
517 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
518 regs
[r0
] = regs
[r1
] ^ regs
[r2
];
521 /* Arithmetic operations (32 bit). */
523 case INDEX_op_div_i32
:
524 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
525 regs
[r0
] = (int32_t)regs
[r1
] / (int32_t)regs
[r2
];
527 case INDEX_op_divu_i32
:
528 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
529 regs
[r0
] = (uint32_t)regs
[r1
] / (uint32_t)regs
[r2
];
531 case INDEX_op_rem_i32
:
532 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
533 regs
[r0
] = (int32_t)regs
[r1
] % (int32_t)regs
[r2
];
535 case INDEX_op_remu_i32
:
536 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
537 regs
[r0
] = (uint32_t)regs
[r1
] % (uint32_t)regs
[r2
];
540 /* Shift/rotate operations (32 bit). */
542 case INDEX_op_shl_i32
:
543 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
544 regs
[r0
] = (uint32_t)regs
[r1
] << (regs
[r2
] & 31);
546 case INDEX_op_shr_i32
:
547 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
548 regs
[r0
] = (uint32_t)regs
[r1
] >> (regs
[r2
] & 31);
550 case INDEX_op_sar_i32
:
551 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
552 regs
[r0
] = (int32_t)regs
[r1
] >> (regs
[r2
] & 31);
554 #if TCG_TARGET_HAS_rot_i32
555 case INDEX_op_rotl_i32
:
556 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
557 regs
[r0
] = rol32(regs
[r1
], regs
[r2
] & 31);
559 case INDEX_op_rotr_i32
:
560 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
561 regs
[r0
] = ror32(regs
[r1
], regs
[r2
] & 31);
564 #if TCG_TARGET_HAS_deposit_i32
565 case INDEX_op_deposit_i32
:
567 t1
= tci_read_rval(regs
, &tb_ptr
);
568 t2
= tci_read_rval(regs
, &tb_ptr
);
571 tmp32
= (((1 << tmp8
) - 1) << tmp16
);
572 tci_write_reg(regs
, t0
, (t1
& ~tmp32
) | ((t2
<< tmp16
) & tmp32
));
575 case INDEX_op_brcond_i32
:
576 t0
= tci_read_rval(regs
, &tb_ptr
);
577 t1
= tci_read_rval(regs
, &tb_ptr
);
578 condition
= *tb_ptr
++;
579 label
= tci_read_label(&tb_ptr
);
580 if (tci_compare32(t0
, t1
, condition
)) {
581 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);
582 tb_ptr
= (uint8_t *)label
;
586 #if TCG_TARGET_REG_BITS == 32
587 case INDEX_op_add2_i32
:
590 tmp64
= tci_read_r64(regs
, &tb_ptr
);
591 tmp64
+= tci_read_r64(regs
, &tb_ptr
);
592 tci_write_reg64(regs
, t1
, t0
, tmp64
);
594 case INDEX_op_sub2_i32
:
597 tmp64
= tci_read_r64(regs
, &tb_ptr
);
598 tmp64
-= tci_read_r64(regs
, &tb_ptr
);
599 tci_write_reg64(regs
, t1
, t0
, tmp64
);
601 case INDEX_op_brcond2_i32
:
602 tmp64
= tci_read_r64(regs
, &tb_ptr
);
603 v64
= tci_read_r64(regs
, &tb_ptr
);
604 condition
= *tb_ptr
++;
605 label
= tci_read_label(&tb_ptr
);
606 if (tci_compare64(tmp64
, v64
, condition
)) {
607 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);
608 tb_ptr
= (uint8_t *)label
;
612 case INDEX_op_mulu2_i32
:
615 t2
= tci_read_rval(regs
, &tb_ptr
);
616 tmp64
= (uint32_t)tci_read_rval(regs
, &tb_ptr
);
617 tci_write_reg64(regs
, t1
, t0
, (uint32_t)t2
* tmp64
);
619 #endif /* TCG_TARGET_REG_BITS == 32 */
620 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
622 tci_args_rr(&tb_ptr
, &r0
, &r1
);
623 regs
[r0
] = (int8_t)regs
[r1
];
626 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
628 tci_args_rr(&tb_ptr
, &r0
, &r1
);
629 regs
[r0
] = (int16_t)regs
[r1
];
632 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
634 tci_args_rr(&tb_ptr
, &r0
, &r1
);
635 regs
[r0
] = (uint8_t)regs
[r1
];
638 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
640 tci_args_rr(&tb_ptr
, &r0
, &r1
);
641 regs
[r0
] = (uint16_t)regs
[r1
];
644 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
646 tci_args_rr(&tb_ptr
, &r0
, &r1
);
647 regs
[r0
] = bswap16(regs
[r1
]);
650 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
652 tci_args_rr(&tb_ptr
, &r0
, &r1
);
653 regs
[r0
] = bswap32(regs
[r1
]);
656 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
658 tci_args_rr(&tb_ptr
, &r0
, &r1
);
659 regs
[r0
] = ~regs
[r1
];
662 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
664 tci_args_rr(&tb_ptr
, &r0
, &r1
);
665 regs
[r0
] = -regs
[r1
];
668 #if TCG_TARGET_REG_BITS == 64
669 case INDEX_op_tci_movi_i64
:
671 t1
= tci_read_i64(&tb_ptr
);
672 tci_write_reg(regs
, t0
, t1
);
675 /* Load/store operations (64 bit). */
677 case INDEX_op_ld32s_i64
:
678 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
679 ptr
= (void *)(regs
[r1
] + ofs
);
680 regs
[r0
] = *(int32_t *)ptr
;
682 case INDEX_op_ld_i64
:
683 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
684 ptr
= (void *)(regs
[r1
] + ofs
);
685 regs
[r0
] = *(uint64_t *)ptr
;
687 case INDEX_op_st_i64
:
688 tci_args_rrs(&tb_ptr
, &r0
, &r1
, &ofs
);
689 ptr
= (void *)(regs
[r1
] + ofs
);
690 *(uint64_t *)ptr
= regs
[r0
];
693 /* Arithmetic operations (64 bit). */
695 case INDEX_op_div_i64
:
696 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
697 regs
[r0
] = (int64_t)regs
[r1
] / (int64_t)regs
[r2
];
699 case INDEX_op_divu_i64
:
700 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
701 regs
[r0
] = (uint64_t)regs
[r1
] / (uint64_t)regs
[r2
];
703 case INDEX_op_rem_i64
:
704 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
705 regs
[r0
] = (int64_t)regs
[r1
] % (int64_t)regs
[r2
];
707 case INDEX_op_remu_i64
:
708 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
709 regs
[r0
] = (uint64_t)regs
[r1
] % (uint64_t)regs
[r2
];
712 /* Shift/rotate operations (64 bit). */
714 case INDEX_op_shl_i64
:
715 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
716 regs
[r0
] = regs
[r1
] << (regs
[r2
] & 63);
718 case INDEX_op_shr_i64
:
719 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
720 regs
[r0
] = regs
[r1
] >> (regs
[r2
] & 63);
722 case INDEX_op_sar_i64
:
723 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
724 regs
[r0
] = (int64_t)regs
[r1
] >> (regs
[r2
] & 63);
726 #if TCG_TARGET_HAS_rot_i64
727 case INDEX_op_rotl_i64
:
728 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
729 regs
[r0
] = rol64(regs
[r1
], regs
[r2
] & 63);
731 case INDEX_op_rotr_i64
:
732 tci_args_rrr(&tb_ptr
, &r0
, &r1
, &r2
);
733 regs
[r0
] = ror64(regs
[r1
], regs
[r2
] & 63);
736 #if TCG_TARGET_HAS_deposit_i64
737 case INDEX_op_deposit_i64
:
739 t1
= tci_read_rval(regs
, &tb_ptr
);
740 t2
= tci_read_rval(regs
, &tb_ptr
);
743 tmp64
= (((1ULL << tmp8
) - 1) << tmp16
);
744 tci_write_reg(regs
, t0
, (t1
& ~tmp64
) | ((t2
<< tmp16
) & tmp64
));
747 case INDEX_op_brcond_i64
:
748 t0
= tci_read_rval(regs
, &tb_ptr
);
749 t1
= tci_read_rval(regs
, &tb_ptr
);
750 condition
= *tb_ptr
++;
751 label
= tci_read_label(&tb_ptr
);
752 if (tci_compare64(t0
, t1
, condition
)) {
753 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);
754 tb_ptr
= (uint8_t *)label
;
758 case INDEX_op_ext32s_i64
:
759 case INDEX_op_ext_i32_i64
:
760 tci_args_rr(&tb_ptr
, &r0
, &r1
);
761 regs
[r0
] = (int32_t)regs
[r1
];
763 case INDEX_op_ext32u_i64
:
764 case INDEX_op_extu_i32_i64
:
765 tci_args_rr(&tb_ptr
, &r0
, &r1
);
766 regs
[r0
] = (uint32_t)regs
[r1
];
768 #if TCG_TARGET_HAS_bswap64_i64
769 case INDEX_op_bswap64_i64
:
770 tci_args_rr(&tb_ptr
, &r0
, &r1
);
771 regs
[r0
] = bswap64(regs
[r1
]);
774 #endif /* TCG_TARGET_REG_BITS == 64 */
776 /* QEMU specific operations. */
778 case INDEX_op_exit_tb
:
779 ret
= *(uint64_t *)tb_ptr
;
782 case INDEX_op_goto_tb
:
783 /* Jump address is aligned */
784 tb_ptr
= QEMU_ALIGN_PTR_UP(tb_ptr
, 4);
785 t0
= qatomic_read((int32_t *)tb_ptr
);
786 tb_ptr
+= sizeof(int32_t);
787 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);
788 tb_ptr
+= (int32_t)t0
;
790 case INDEX_op_qemu_ld_i32
:
792 taddr
= tci_read_ulong(regs
, &tb_ptr
);
793 oi
= tci_read_i(&tb_ptr
);
794 switch (get_memop(oi
) & (MO_BSWAP
| MO_SSIZE
)) {
799 tmp32
= (int8_t)qemu_ld_ub
;
802 tmp32
= qemu_ld_leuw
;
805 tmp32
= (int16_t)qemu_ld_leuw
;
808 tmp32
= qemu_ld_leul
;
811 tmp32
= qemu_ld_beuw
;
814 tmp32
= (int16_t)qemu_ld_beuw
;
817 tmp32
= qemu_ld_beul
;
820 g_assert_not_reached();
822 tci_write_reg(regs
, t0
, tmp32
);
824 case INDEX_op_qemu_ld_i64
:
826 if (TCG_TARGET_REG_BITS
== 32) {
829 taddr
= tci_read_ulong(regs
, &tb_ptr
);
830 oi
= tci_read_i(&tb_ptr
);
831 switch (get_memop(oi
) & (MO_BSWAP
| MO_SSIZE
)) {
836 tmp64
= (int8_t)qemu_ld_ub
;
839 tmp64
= qemu_ld_leuw
;
842 tmp64
= (int16_t)qemu_ld_leuw
;
845 tmp64
= qemu_ld_leul
;
848 tmp64
= (int32_t)qemu_ld_leul
;
854 tmp64
= qemu_ld_beuw
;
857 tmp64
= (int16_t)qemu_ld_beuw
;
860 tmp64
= qemu_ld_beul
;
863 tmp64
= (int32_t)qemu_ld_beul
;
869 g_assert_not_reached();
871 tci_write_reg(regs
, t0
, tmp64
);
872 if (TCG_TARGET_REG_BITS
== 32) {
873 tci_write_reg(regs
, t1
, tmp64
>> 32);
876 case INDEX_op_qemu_st_i32
:
877 t0
= tci_read_rval(regs
, &tb_ptr
);
878 taddr
= tci_read_ulong(regs
, &tb_ptr
);
879 oi
= tci_read_i(&tb_ptr
);
880 switch (get_memop(oi
) & (MO_BSWAP
| MO_SIZE
)) {
897 g_assert_not_reached();
900 case INDEX_op_qemu_st_i64
:
901 tmp64
= tci_read_r64(regs
, &tb_ptr
);
902 taddr
= tci_read_ulong(regs
, &tb_ptr
);
903 oi
= tci_read_i(&tb_ptr
);
904 switch (get_memop(oi
) & (MO_BSWAP
| MO_SIZE
)) {
927 g_assert_not_reached();
931 /* Ensure ordering for all kinds */
935 g_assert_not_reached();
937 tci_assert(tb_ptr
== old_code_ptr
+ op_size
);