2 * plugin-gen.c - TCG-related bits of plugin infrastructure
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
45 #include "qemu/osdep.h"
46 #include "qemu/plugin.h"
49 #include "tcg/tcg-temp-internal.h"
50 #include "tcg/tcg-op.h"
51 #include "exec/exec-all.h"
52 #include "exec/plugin-gen.h"
53 #include "exec/translator.h"
54 #include "exec/helper-proto-common.h"
56 #define HELPER_H "accel/tcg/plugin-helpers.h"
57 #include "exec/helper-info.c.inc"
61 # define CONFIG_SOFTMMU_GATE 1
63 # define CONFIG_SOFTMMU_GATE 0
67 * plugin_cb_start TCG op args[]:
68 * 0: enum plugin_gen_from
69 * 1: enum plugin_gen_cb
70 * 2: set to 1 for mem callback that is a write, 0 otherwise.
73 enum plugin_gen_from
{
77 PLUGIN_GEN_AFTER_INSN
,
83 PLUGIN_GEN_CB_UDATA_R
,
86 PLUGIN_GEN_ENABLE_MEM_HELPER
,
87 PLUGIN_GEN_DISABLE_MEM_HELPER
,
92 * These helpers are stubs that get dynamically switched out for calls
93 * direct to the plugin if they are subscribed to.
95 void HELPER(plugin_vcpu_udata_cb_no_wg
)(uint32_t cpu_index
, void *udata
)
98 void HELPER(plugin_vcpu_udata_cb_no_rwg
)(uint32_t cpu_index
, void *udata
)
101 void HELPER(plugin_vcpu_mem_cb
)(unsigned int vcpu_index
,
102 qemu_plugin_meminfo_t info
, uint64_t vaddr
,
106 static void gen_empty_udata_cb(void (*gen_helper
)(TCGv_i32
, TCGv_ptr
))
108 TCGv_i32 cpu_index
= tcg_temp_ebb_new_i32();
109 TCGv_ptr udata
= tcg_temp_ebb_new_ptr();
111 tcg_gen_movi_ptr(udata
, 0);
112 tcg_gen_ld_i32(cpu_index
, tcg_env
,
113 -offsetof(ArchCPU
, env
) + offsetof(CPUState
, cpu_index
));
114 gen_helper(cpu_index
, udata
);
116 tcg_temp_free_ptr(udata
);
117 tcg_temp_free_i32(cpu_index
);
120 static void gen_empty_udata_cb_no_wg(void)
122 gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg
);
125 static void gen_empty_udata_cb_no_rwg(void)
127 gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg
);
131 * For now we only support addi_i64.
132 * When we support more ops, we can generate one empty inline cb for each.
134 static void gen_empty_inline_cb(void)
136 TCGv_i32 cpu_index
= tcg_temp_ebb_new_i32();
137 TCGv_ptr cpu_index_as_ptr
= tcg_temp_ebb_new_ptr();
138 TCGv_i64 val
= tcg_temp_ebb_new_i64();
139 TCGv_ptr ptr
= tcg_temp_ebb_new_ptr();
141 tcg_gen_ld_i32(cpu_index
, tcg_env
,
142 -offsetof(ArchCPU
, env
) + offsetof(CPUState
, cpu_index
));
143 /* second operand will be replaced by immediate value */
144 tcg_gen_mul_i32(cpu_index
, cpu_index
, cpu_index
);
145 tcg_gen_ext_i32_ptr(cpu_index_as_ptr
, cpu_index
);
147 tcg_gen_movi_ptr(ptr
, 0);
148 tcg_gen_add_ptr(ptr
, ptr
, cpu_index_as_ptr
);
149 tcg_gen_ld_i64(val
, ptr
, 0);
150 /* second operand will be replaced by immediate value */
151 tcg_gen_add_i64(val
, val
, val
);
153 tcg_gen_st_i64(val
, ptr
, 0);
154 tcg_temp_free_ptr(ptr
);
155 tcg_temp_free_i64(val
);
156 tcg_temp_free_ptr(cpu_index_as_ptr
);
157 tcg_temp_free_i32(cpu_index
);
160 static void gen_empty_mem_cb(TCGv_i64 addr
, uint32_t info
)
162 TCGv_i32 cpu_index
= tcg_temp_ebb_new_i32();
163 TCGv_i32 meminfo
= tcg_temp_ebb_new_i32();
164 TCGv_ptr udata
= tcg_temp_ebb_new_ptr();
166 tcg_gen_movi_i32(meminfo
, info
);
167 tcg_gen_movi_ptr(udata
, 0);
168 tcg_gen_ld_i32(cpu_index
, tcg_env
,
169 -offsetof(ArchCPU
, env
) + offsetof(CPUState
, cpu_index
));
171 gen_helper_plugin_vcpu_mem_cb(cpu_index
, meminfo
, addr
, udata
);
173 tcg_temp_free_ptr(udata
);
174 tcg_temp_free_i32(meminfo
);
175 tcg_temp_free_i32(cpu_index
);
179 * Share the same function for enable/disable. When enabling, the NULL
180 * pointer will be overwritten later.
182 static void gen_empty_mem_helper(void)
184 TCGv_ptr ptr
= tcg_temp_ebb_new_ptr();
186 tcg_gen_movi_ptr(ptr
, 0);
187 tcg_gen_st_ptr(ptr
, tcg_env
, offsetof(CPUState
, plugin_mem_cbs
) -
188 offsetof(ArchCPU
, env
));
189 tcg_temp_free_ptr(ptr
);
192 static void gen_plugin_cb_start(enum plugin_gen_from from
,
193 enum plugin_gen_cb type
, unsigned wr
)
195 tcg_gen_plugin_cb_start(from
, type
, wr
);
198 static void gen_wrapped(enum plugin_gen_from from
,
199 enum plugin_gen_cb type
, void (*func
)(void))
201 gen_plugin_cb_start(from
, type
, 0);
203 tcg_gen_plugin_cb_end();
206 static void plugin_gen_empty_callback(enum plugin_gen_from from
)
209 case PLUGIN_GEN_AFTER_INSN
:
210 gen_wrapped(from
, PLUGIN_GEN_DISABLE_MEM_HELPER
,
211 gen_empty_mem_helper
);
213 case PLUGIN_GEN_FROM_INSN
:
215 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
216 * the first callback of an instruction
218 gen_wrapped(from
, PLUGIN_GEN_ENABLE_MEM_HELPER
,
219 gen_empty_mem_helper
);
221 case PLUGIN_GEN_FROM_TB
:
222 gen_wrapped(from
, PLUGIN_GEN_CB_UDATA
, gen_empty_udata_cb_no_rwg
);
223 gen_wrapped(from
, PLUGIN_GEN_CB_UDATA_R
, gen_empty_udata_cb_no_wg
);
224 gen_wrapped(from
, PLUGIN_GEN_CB_INLINE
, gen_empty_inline_cb
);
227 g_assert_not_reached();
231 void plugin_gen_empty_mem_callback(TCGv_i64 addr
, uint32_t info
)
233 enum qemu_plugin_mem_rw rw
= get_plugin_meminfo_rw(info
);
235 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM
, PLUGIN_GEN_CB_MEM
, rw
);
236 gen_empty_mem_cb(addr
, info
);
237 tcg_gen_plugin_cb_end();
239 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM
, PLUGIN_GEN_CB_INLINE
, rw
);
240 gen_empty_inline_cb();
241 tcg_gen_plugin_cb_end();
244 static TCGOp
*find_op(TCGOp
*op
, TCGOpcode opc
)
247 if (op
->opc
== opc
) {
250 op
= QTAILQ_NEXT(op
, link
);
255 static TCGOp
*rm_ops_range(TCGOp
*begin
, TCGOp
*end
)
257 TCGOp
*ret
= QTAILQ_NEXT(end
, link
);
259 QTAILQ_REMOVE_SEVERAL(&tcg_ctx
->ops
, begin
, end
, link
);
263 /* remove all ops until (and including) plugin_cb_end */
264 static TCGOp
*rm_ops(TCGOp
*op
)
266 TCGOp
*end_op
= find_op(op
, INDEX_op_plugin_cb_end
);
268 tcg_debug_assert(end_op
);
269 return rm_ops_range(op
, end_op
);
272 static TCGOp
*copy_op_nocheck(TCGOp
**begin_op
, TCGOp
*op
)
274 TCGOp
*old_op
= QTAILQ_NEXT(*begin_op
, link
);
275 unsigned nargs
= old_op
->nargs
;
278 op
= tcg_op_insert_after(tcg_ctx
, op
, old_op
->opc
, nargs
);
279 memcpy(op
->args
, old_op
->args
, sizeof(op
->args
[0]) * nargs
);
284 static TCGOp
*copy_op(TCGOp
**begin_op
, TCGOp
*op
, TCGOpcode opc
)
286 op
= copy_op_nocheck(begin_op
, op
);
287 tcg_debug_assert((*begin_op
)->opc
== opc
);
291 static TCGOp
*copy_const_ptr(TCGOp
**begin_op
, TCGOp
*op
, void *ptr
)
293 if (UINTPTR_MAX
== UINT32_MAX
) {
295 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
296 op
->args
[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr
));
299 op
= copy_op(begin_op
, op
, INDEX_op_mov_i64
);
300 op
->args
[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr
));
305 static TCGOp
*copy_ld_i32(TCGOp
**begin_op
, TCGOp
*op
)
307 return copy_op(begin_op
, op
, INDEX_op_ld_i32
);
310 static TCGOp
*copy_ext_i32_ptr(TCGOp
**begin_op
, TCGOp
*op
)
312 if (UINTPTR_MAX
== UINT32_MAX
) {
313 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
315 op
= copy_op(begin_op
, op
, INDEX_op_ext_i32_i64
);
320 static TCGOp
*copy_add_ptr(TCGOp
**begin_op
, TCGOp
*op
)
322 if (UINTPTR_MAX
== UINT32_MAX
) {
323 op
= copy_op(begin_op
, op
, INDEX_op_add_i32
);
325 op
= copy_op(begin_op
, op
, INDEX_op_add_i64
);
330 static TCGOp
*copy_ld_i64(TCGOp
**begin_op
, TCGOp
*op
)
332 if (TCG_TARGET_REG_BITS
== 32) {
334 op
= copy_ld_i32(begin_op
, op
);
335 op
= copy_ld_i32(begin_op
, op
);
338 op
= copy_op(begin_op
, op
, INDEX_op_ld_i64
);
343 static TCGOp
*copy_st_i64(TCGOp
**begin_op
, TCGOp
*op
)
345 if (TCG_TARGET_REG_BITS
== 32) {
347 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
348 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
351 op
= copy_op(begin_op
, op
, INDEX_op_st_i64
);
356 static TCGOp
*copy_add_i64(TCGOp
**begin_op
, TCGOp
*op
, uint64_t v
)
358 if (TCG_TARGET_REG_BITS
== 32) {
359 /* all 32-bit backends must implement add2_i32 */
360 g_assert(TCG_TARGET_HAS_add2_i32
);
361 op
= copy_op(begin_op
, op
, INDEX_op_add2_i32
);
362 op
->args
[4] = tcgv_i32_arg(tcg_constant_i32(v
));
363 op
->args
[5] = tcgv_i32_arg(tcg_constant_i32(v
>> 32));
365 op
= copy_op(begin_op
, op
, INDEX_op_add_i64
);
366 op
->args
[2] = tcgv_i64_arg(tcg_constant_i64(v
));
371 static TCGOp
*copy_mul_i32(TCGOp
**begin_op
, TCGOp
*op
, uint32_t v
)
373 op
= copy_op(begin_op
, op
, INDEX_op_mul_i32
);
374 op
->args
[2] = tcgv_i32_arg(tcg_constant_i32(v
));
378 static TCGOp
*copy_st_ptr(TCGOp
**begin_op
, TCGOp
*op
)
380 if (UINTPTR_MAX
== UINT32_MAX
) {
382 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
385 op
= copy_st_i64(begin_op
, op
);
390 static TCGOp
*copy_call(TCGOp
**begin_op
, TCGOp
*op
, void *func
, int *cb_idx
)
395 /* copy all ops until the call */
397 op
= copy_op_nocheck(begin_op
, op
);
398 } while (op
->opc
!= INDEX_op_call
);
400 /* fill in the op call */
402 TCGOP_CALLI(op
) = TCGOP_CALLI(old_op
);
403 TCGOP_CALLO(op
) = TCGOP_CALLO(old_op
);
404 tcg_debug_assert(op
->life
== 0);
406 func_idx
= TCGOP_CALLO(op
) + TCGOP_CALLI(op
);
408 op
->args
[func_idx
] = (uintptr_t)func
;
414 * When we append/replace ops here we are sensitive to changing patterns of
415 * TCGOps generated by the tcg_gen_FOO calls when we generated the
416 * empty callbacks. This will assert very quickly in a debug build as
417 * we assert the ops we are replacing are the correct ones.
419 static TCGOp
*append_udata_cb(const struct qemu_plugin_dyn_cb
*cb
,
420 TCGOp
*begin_op
, TCGOp
*op
, int *cb_idx
)
423 op
= copy_const_ptr(&begin_op
, op
, cb
->userp
);
425 /* copy the ld_i32, but note that we only have to copy it once */
427 op
= copy_op(&begin_op
, op
, INDEX_op_ld_i32
);
429 begin_op
= QTAILQ_NEXT(begin_op
, link
);
430 tcg_debug_assert(begin_op
&& begin_op
->opc
== INDEX_op_ld_i32
);
434 op
= copy_call(&begin_op
, op
, cb
->f
.vcpu_udata
, cb_idx
);
439 static TCGOp
*append_inline_cb(const struct qemu_plugin_dyn_cb
*cb
,
440 TCGOp
*begin_op
, TCGOp
*op
,
443 char *ptr
= cb
->userp
;
444 size_t elem_size
= 0;
446 op
= copy_ld_i32(&begin_op
, op
);
447 op
= copy_mul_i32(&begin_op
, op
, elem_size
);
448 op
= copy_ext_i32_ptr(&begin_op
, op
);
449 op
= copy_const_ptr(&begin_op
, op
, ptr
+ offset
);
450 op
= copy_add_ptr(&begin_op
, op
);
451 op
= copy_ld_i64(&begin_op
, op
);
452 op
= copy_add_i64(&begin_op
, op
, cb
->inline_insn
.imm
);
453 op
= copy_st_i64(&begin_op
, op
);
457 static TCGOp
*append_mem_cb(const struct qemu_plugin_dyn_cb
*cb
,
458 TCGOp
*begin_op
, TCGOp
*op
, int *cb_idx
)
460 enum plugin_gen_cb type
= begin_op
->args
[1];
462 tcg_debug_assert(type
== PLUGIN_GEN_CB_MEM
);
464 /* const_i32 == mov_i32 ("info", so it remains as is) */
465 op
= copy_op(&begin_op
, op
, INDEX_op_mov_i32
);
468 op
= copy_const_ptr(&begin_op
, op
, cb
->userp
);
470 /* copy the ld_i32, but note that we only have to copy it once */
472 op
= copy_op(&begin_op
, op
, INDEX_op_ld_i32
);
474 begin_op
= QTAILQ_NEXT(begin_op
, link
);
475 tcg_debug_assert(begin_op
&& begin_op
->opc
== INDEX_op_ld_i32
);
478 if (type
== PLUGIN_GEN_CB_MEM
) {
480 op
= copy_call(&begin_op
, op
, cb
->f
.vcpu_udata
, cb_idx
);
486 typedef TCGOp
*(*inject_fn
)(const struct qemu_plugin_dyn_cb
*cb
,
487 TCGOp
*begin_op
, TCGOp
*op
, int *intp
);
488 typedef bool (*op_ok_fn
)(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
);
490 static bool op_ok(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
)
495 static bool op_rw(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
)
500 return !!(cb
->rw
& (w
+ 1));
503 static void inject_cb_type(const GArray
*cbs
, TCGOp
*begin_op
,
504 inject_fn inject
, op_ok_fn ok
)
511 if (!cbs
|| cbs
->len
== 0) {
516 end_op
= find_op(begin_op
, INDEX_op_plugin_cb_end
);
517 tcg_debug_assert(end_op
);
520 for (i
= 0; i
< cbs
->len
; i
++) {
521 struct qemu_plugin_dyn_cb
*cb
=
522 &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, i
);
524 if (!ok(begin_op
, cb
)) {
527 op
= inject(cb
, begin_op
, op
, &cb_idx
);
529 rm_ops_range(begin_op
, end_op
);
533 inject_udata_cb(const GArray
*cbs
, TCGOp
*begin_op
)
535 inject_cb_type(cbs
, begin_op
, append_udata_cb
, op_ok
);
539 inject_inline_cb(const GArray
*cbs
, TCGOp
*begin_op
, op_ok_fn ok
)
541 inject_cb_type(cbs
, begin_op
, append_inline_cb
, ok
);
545 inject_mem_cb(const GArray
*cbs
, TCGOp
*begin_op
)
547 inject_cb_type(cbs
, begin_op
, append_mem_cb
, op_rw
);
550 /* we could change the ops in place, but we can reuse more code by copying */
551 static void inject_mem_helper(TCGOp
*begin_op
, GArray
*arr
)
553 TCGOp
*orig_op
= begin_op
;
557 end_op
= find_op(begin_op
, INDEX_op_plugin_cb_end
);
558 tcg_debug_assert(end_op
);
561 op
= copy_const_ptr(&begin_op
, end_op
, arr
);
564 op
= copy_st_ptr(&begin_op
, op
);
566 rm_ops_range(orig_op
, end_op
);
570 * Tracking memory accesses performed from helpers requires extra work.
571 * If an instruction is emulated with helpers, we do two things:
572 * (1) copy the CB descriptors, and keep track of it so that they can be
573 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
574 * that we can read them at run-time (i.e. when the helper executes).
575 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
577 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
578 * is possible that the code we generate after the instruction is
579 * dead, we also add checks before generating tb_exit etc.
581 static void inject_mem_enable_helper(struct qemu_plugin_tb
*ptb
,
582 struct qemu_plugin_insn
*plugin_insn
,
589 cbs
[0] = plugin_insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_REGULAR
];
590 cbs
[1] = plugin_insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_INLINE
];
593 for (i
= 0; i
< ARRAY_SIZE(cbs
); i
++) {
594 n_cbs
+= cbs
[i
]->len
;
597 plugin_insn
->mem_helper
= plugin_insn
->calls_helpers
&& n_cbs
;
598 if (likely(!plugin_insn
->mem_helper
)) {
602 ptb
->mem_helper
= true;
604 arr
= g_array_sized_new(false, false,
605 sizeof(struct qemu_plugin_dyn_cb
), n_cbs
);
607 for (i
= 0; i
< ARRAY_SIZE(cbs
); i
++) {
608 g_array_append_vals(arr
, cbs
[i
]->data
, cbs
[i
]->len
);
611 qemu_plugin_add_dyn_cb_arr(arr
);
612 inject_mem_helper(begin_op
, arr
);
615 static void inject_mem_disable_helper(struct qemu_plugin_insn
*plugin_insn
,
618 if (likely(!plugin_insn
->mem_helper
)) {
622 inject_mem_helper(begin_op
, NULL
);
625 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
626 void plugin_gen_disable_mem_helpers(void)
629 * We could emit the clearing unconditionally and be done. However, this can
630 * be wasteful if for instance plugins don't track memory accesses, or if
631 * most TBs don't use helpers. Instead, emit the clearing iff the TB calls
632 * helpers that might access guest memory.
634 * Note: we do not reset plugin_tb->mem_helper here; a TB might have several
635 * exit points, and we want to emit the clearing from all of them.
637 if (!tcg_ctx
->plugin_tb
->mem_helper
) {
640 tcg_gen_st_ptr(tcg_constant_ptr(NULL
), tcg_env
,
641 offsetof(CPUState
, plugin_mem_cbs
) - offsetof(ArchCPU
, env
));
644 static void plugin_gen_tb_udata(const struct qemu_plugin_tb
*ptb
,
647 inject_udata_cb(ptb
->cbs
[PLUGIN_CB_REGULAR
], begin_op
);
650 static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb
*ptb
,
653 inject_udata_cb(ptb
->cbs
[PLUGIN_CB_REGULAR_R
], begin_op
);
656 static void plugin_gen_tb_inline(const struct qemu_plugin_tb
*ptb
,
659 inject_inline_cb(ptb
->cbs
[PLUGIN_CB_INLINE
], begin_op
, op_ok
);
662 static void plugin_gen_insn_udata(const struct qemu_plugin_tb
*ptb
,
663 TCGOp
*begin_op
, int insn_idx
)
665 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
667 inject_udata_cb(insn
->cbs
[PLUGIN_CB_INSN
][PLUGIN_CB_REGULAR
], begin_op
);
670 static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb
*ptb
,
671 TCGOp
*begin_op
, int insn_idx
)
673 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
675 inject_udata_cb(insn
->cbs
[PLUGIN_CB_INSN
][PLUGIN_CB_REGULAR_R
], begin_op
);
678 static void plugin_gen_insn_inline(const struct qemu_plugin_tb
*ptb
,
679 TCGOp
*begin_op
, int insn_idx
)
681 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
682 inject_inline_cb(insn
->cbs
[PLUGIN_CB_INSN
][PLUGIN_CB_INLINE
],
686 static void plugin_gen_mem_regular(const struct qemu_plugin_tb
*ptb
,
687 TCGOp
*begin_op
, int insn_idx
)
689 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
690 inject_mem_cb(insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_REGULAR
], begin_op
);
693 static void plugin_gen_mem_inline(const struct qemu_plugin_tb
*ptb
,
694 TCGOp
*begin_op
, int insn_idx
)
697 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
699 cbs
= insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_INLINE
];
700 inject_inline_cb(cbs
, begin_op
, op_rw
);
703 static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb
*ptb
,
704 TCGOp
*begin_op
, int insn_idx
)
706 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
707 inject_mem_enable_helper(ptb
, insn
, begin_op
);
710 static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb
*ptb
,
711 TCGOp
*begin_op
, int insn_idx
)
713 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
714 inject_mem_disable_helper(insn
, begin_op
);
717 /* #define DEBUG_PLUGIN_GEN_OPS */
718 static void pr_ops(void)
720 #ifdef DEBUG_PLUGIN_GEN_OPS
724 QTAILQ_FOREACH(op
, &tcg_ctx
->ops
, link
) {
725 const char *name
= "";
726 const char *type
= "";
728 if (op
->opc
== INDEX_op_plugin_cb_start
) {
729 switch (op
->args
[0]) {
730 case PLUGIN_GEN_FROM_TB
:
733 case PLUGIN_GEN_FROM_INSN
:
736 case PLUGIN_GEN_FROM_MEM
:
739 case PLUGIN_GEN_AFTER_INSN
:
745 switch (op
->args
[1]) {
746 case PLUGIN_GEN_CB_UDATA
:
749 case PLUGIN_GEN_CB_INLINE
:
752 case PLUGIN_GEN_CB_MEM
:
755 case PLUGIN_GEN_ENABLE_MEM_HELPER
:
756 type
= "enable mem helper";
758 case PLUGIN_GEN_DISABLE_MEM_HELPER
:
759 type
= "disable mem helper";
765 printf("op[%2i]: %s %s %s\n", i
, tcg_op_defs
[op
->opc
].name
, name
, type
);
771 static void plugin_gen_inject(struct qemu_plugin_tb
*plugin_tb
)
778 QTAILQ_FOREACH(op
, &tcg_ctx
->ops
, link
) {
780 case INDEX_op_insn_start
:
783 case INDEX_op_plugin_cb_start
:
785 enum plugin_gen_from from
= op
->args
[0];
786 enum plugin_gen_cb type
= op
->args
[1];
789 case PLUGIN_GEN_FROM_TB
:
791 g_assert(insn_idx
== -1);
794 case PLUGIN_GEN_CB_UDATA
:
795 plugin_gen_tb_udata(plugin_tb
, op
);
797 case PLUGIN_GEN_CB_UDATA_R
:
798 plugin_gen_tb_udata_r(plugin_tb
, op
);
800 case PLUGIN_GEN_CB_INLINE
:
801 plugin_gen_tb_inline(plugin_tb
, op
);
804 g_assert_not_reached();
808 case PLUGIN_GEN_FROM_INSN
:
810 g_assert(insn_idx
>= 0);
813 case PLUGIN_GEN_CB_UDATA
:
814 plugin_gen_insn_udata(plugin_tb
, op
, insn_idx
);
816 case PLUGIN_GEN_CB_UDATA_R
:
817 plugin_gen_insn_udata_r(plugin_tb
, op
, insn_idx
);
819 case PLUGIN_GEN_CB_INLINE
:
820 plugin_gen_insn_inline(plugin_tb
, op
, insn_idx
);
822 case PLUGIN_GEN_ENABLE_MEM_HELPER
:
823 plugin_gen_enable_mem_helper(plugin_tb
, op
, insn_idx
);
826 g_assert_not_reached();
830 case PLUGIN_GEN_FROM_MEM
:
832 g_assert(insn_idx
>= 0);
835 case PLUGIN_GEN_CB_MEM
:
836 plugin_gen_mem_regular(plugin_tb
, op
, insn_idx
);
838 case PLUGIN_GEN_CB_INLINE
:
839 plugin_gen_mem_inline(plugin_tb
, op
, insn_idx
);
842 g_assert_not_reached();
847 case PLUGIN_GEN_AFTER_INSN
:
849 g_assert(insn_idx
>= 0);
852 case PLUGIN_GEN_DISABLE_MEM_HELPER
:
853 plugin_gen_disable_mem_helper(plugin_tb
, op
, insn_idx
);
856 g_assert_not_reached();
861 g_assert_not_reached();
866 /* plugins don't care about any other ops */
873 bool plugin_gen_tb_start(CPUState
*cpu
, const DisasContextBase
*db
,
878 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS
, cpu
->plugin_state
->event_mask
)) {
879 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
882 /* reset callbacks */
883 for (i
= 0; i
< PLUGIN_N_CB_SUBTYPES
; i
++) {
885 g_array_set_size(ptb
->cbs
[i
], 0);
892 ptb
->vaddr
= db
->pc_first
;
894 ptb
->haddr1
= db
->host_addr
[0];
896 ptb
->mem_only
= mem_only
;
897 ptb
->mem_helper
= false;
899 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB
);
902 tcg_ctx
->plugin_insn
= NULL
;
907 void plugin_gen_insn_start(CPUState
*cpu
, const DisasContextBase
*db
)
909 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
910 struct qemu_plugin_insn
*pinsn
;
912 pinsn
= qemu_plugin_tb_insn_get(ptb
, db
->pc_next
);
913 tcg_ctx
->plugin_insn
= pinsn
;
914 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN
);
917 * Detect page crossing to get the new host address.
918 * Note that we skip this when haddr1 == NULL, e.g. when we're
919 * fetching instructions from a region not backed by RAM.
921 if (ptb
->haddr1
== NULL
) {
923 } else if (is_same_page(db
, db
->pc_next
)) {
924 pinsn
->haddr
= ptb
->haddr1
+ pinsn
->vaddr
- ptb
->vaddr
;
926 if (ptb
->vaddr2
== -1) {
927 ptb
->vaddr2
= TARGET_PAGE_ALIGN(db
->pc_first
);
928 get_page_addr_code_hostp(cpu_env(cpu
), ptb
->vaddr2
, &ptb
->haddr2
);
930 pinsn
->haddr
= ptb
->haddr2
+ pinsn
->vaddr
- ptb
->vaddr2
;
934 void plugin_gen_insn_end(void)
936 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN
);
940 * There are cases where we never get to finalise a translation - for
941 * example a page fault during translation. As a result we shouldn't
942 * do any clean-up here and make sure things are reset in
943 * plugin_gen_tb_start.
945 void plugin_gen_tb_end(CPUState
*cpu
, size_t num_insns
)
947 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
949 /* translator may have removed instructions, update final count */
950 g_assert(num_insns
<= ptb
->n
);
953 /* collect instrumentation requests */
954 qemu_plugin_tb_trans_cb(cpu
, ptb
);
956 /* inject the instrumentation at the appropriate places */
957 plugin_gen_inject(ptb
);