}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
+ set_jmp_insn_offset(s, which);
+ s->code_ptr += 2;
+ set_jmp_reset_offset(s, which);
+}
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
case glue(glue(INDEX_op_,x),_i64)
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_goto_tb:
- a0 = args[0];
- /*
- * branch displacement must be aligned for atomic patching;
- * see if we need to add extra nop before branch
- */
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
- tcg_out16(s, NOP);
- }
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- set_jmp_insn_offset(s, a0);
- s->code_ptr += 2;
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
a0 = args[0];
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}