]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/plugin-gen.c
plugins: Merge alloc_tcg_plugin_context into plugin_gen_tb_start
[mirror_qemu.git] / accel / tcg / plugin-gen.c
CommitLineData
38b47b19
EC
1/*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
917d7f8d
RH
17 * Instead, during TB translation we add "plugin_cb" marker opcodes
18 * for all possible instrumentation events, and then once we collect the
19 * instrumentation requests from plugins, we generate code for those markers
20 * or remove them if they have no requests.
38b47b19
EC
21 */
22#include "qemu/osdep.h"
c0061471 23#include "qemu/plugin.h"
b384c734 24#include "qemu/log.h"
cac9b0fd 25#include "cpu.h"
38b47b19 26#include "tcg/tcg.h"
47f7313d 27#include "tcg/tcg-temp-internal.h"
38b47b19 28#include "tcg/tcg-op.h"
38b47b19
EC
29#include "exec/exec-all.h"
30#include "exec/plugin-gen.h"
31#include "exec/translator.h"
32
38b47b19
EC
33enum plugin_gen_from {
34 PLUGIN_GEN_FROM_TB,
35 PLUGIN_GEN_FROM_INSN,
38b47b19 36 PLUGIN_GEN_AFTER_INSN,
74bb8acc 37 PLUGIN_GEN_AFTER_TB,
38b47b19
EC
38};
39
38b47b19
EC
40/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
41void plugin_gen_disable_mem_helpers(void)
42{
74bb8acc
RH
43 if (tcg_ctx->plugin_insn) {
44 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
38b47b19 45 }
38b47b19
EC
46}
47
ac977170
RH
48static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
49 struct qemu_plugin_insn *insn)
38b47b19 50{
ac977170 51 GArray *arr;
db409c01 52 size_t len;
ac977170
RH
53
54 /*
55 * Tracking memory accesses performed from helpers requires extra work.
56 * If an instruction is emulated with helpers, we do two things:
57 * (1) copy the CB descriptors, and keep track of it so that they can be
80f034c5 58 * freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the
ac977170
RH
59 * descriptors, so that we can read them at run-time
60 * (i.e. when the helper executes).
61 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
62 *
63 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
64 * is possible that the code we generate after the instruction is
65 * dead, we also add checks before generating tb_exit etc.
66 */
67 if (!insn->calls_helpers) {
68 return;
69 }
70
db409c01 71 if (!insn->mem_cbs || !insn->mem_cbs->len) {
ac977170
RH
72 insn->mem_helper = false;
73 return;
74 }
75 insn->mem_helper = true;
76 ptb->mem_helper = true;
77
db409c01
RH
78 /*
79 * TODO: It seems like we should be able to use ref/unref
80 * to avoid needing to actually copy this array.
81 * Alternately, perhaps we could allocate new memory adjacent
82 * to the TranslationBlock itself, so that we do not have to
83 * actively manage the lifetime after this.
84 */
85 len = insn->mem_cbs->len;
ac977170 86 arr = g_array_sized_new(false, false,
db409c01
RH
87 sizeof(struct qemu_plugin_dyn_cb), len);
88 memcpy(arr->data, insn->mem_cbs->data,
89 len * sizeof(struct qemu_plugin_dyn_cb));
ac977170
RH
90 qemu_plugin_add_dyn_cb_arr(arr);
91
92 tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
80f034c5 93 offsetof(CPUState, neg.plugin_mem_cbs) -
ac977170 94 offsetof(ArchCPU, env));
38b47b19
EC
95}
96
74bb8acc 97static void gen_disable_mem_helper(void)
38b47b19 98{
74bb8acc 99 tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
80f034c5 100 offsetof(CPUState, neg.plugin_mem_cbs) -
74bb8acc 101 offsetof(ArchCPU, env));
38b47b19
EC
102}
103
21a3f62f
RH
104static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
105{
106 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
107
108 tcg_gen_ld_i32(cpu_index, tcg_env,
109 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
110 tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL,
111 tcgv_i32_temp(cpu_index),
112 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
113 tcg_temp_free_i32(cpu_index);
114}
115
116static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
117{
118 GArray *arr = cb->inline_insn.entry.score->data;
119 size_t offset = cb->inline_insn.entry.offset;
120 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
121 TCGv_i64 val = tcg_temp_ebb_new_i64();
122 TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
123
124 tcg_gen_ld_i32(cpu_index, tcg_env,
125 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
126 tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr));
127 tcg_gen_ext_i32_ptr(ptr, cpu_index);
128 tcg_temp_free_i32(cpu_index);
129
130 tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data);
131 tcg_gen_ld_i64(val, ptr, offset);
132 tcg_gen_addi_i64(val, val, cb->inline_insn.imm);
133 tcg_gen_st_i64(val, ptr, offset);
134
135 tcg_temp_free_i64(val);
136 tcg_temp_free_ptr(ptr);
137}
138
8a2927f2
RH
139static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb,
140 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
141{
142 TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
143
144 tcg_gen_ld_i32(cpu_index, tcg_env,
145 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
146 tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL,
147 tcgv_i32_temp(cpu_index),
148 tcgv_i32_temp(tcg_constant_i32(meminfo)),
149 tcgv_i64_temp(addr),
150 tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
151 tcg_temp_free_i32(cpu_index);
152}
153
7e53aa21
RH
154static void inject_cb(struct qemu_plugin_dyn_cb *cb)
155
156{
157 switch (cb->type) {
158 case PLUGIN_CB_REGULAR:
159 gen_udata_cb(cb);
160 break;
161 case PLUGIN_CB_INLINE:
162 gen_inline_cb(cb);
163 break;
164 default:
165 g_assert_not_reached();
166 }
167}
168
169static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
170 enum qemu_plugin_mem_rw rw,
171 qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
172{
173 if (cb->rw & rw) {
174 switch (cb->type) {
175 case PLUGIN_CB_MEM_REGULAR:
176 gen_mem_cb(cb, meminfo, addr);
177 break;
178 default:
179 inject_cb(cb);
180 break;
181 }
182 }
183}
184
3fd62e73 185static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
38b47b19 186{
a0948bb7 187 TCGOp *op, *next;
453d50ce 188 int insn_idx = -1;
38b47b19 189
b384c734 190 if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
e763953a 191 && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
b384c734
RH
192 FILE *logfile = qemu_log_trylock();
193 if (logfile) {
194 fprintf(logfile, "OP before plugin injection:\n");
195 tcg_dump_ops(tcg_ctx, logfile, false);
196 fprintf(logfile, "\n");
197 qemu_log_unlock(logfile);
198 }
199 }
453d50ce 200
a0948bb7
RH
201 /*
202 * While injecting code, we cannot afford to reuse any ebb temps
203 * that might be live within the existing opcode stream.
204 * The simplest solution is to release them all and create new.
205 */
206 memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
207
208 QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
453d50ce
AB
209 switch (op->opc) {
210 case INDEX_op_insn_start:
38b47b19 211 insn_idx++;
453d50ce 212 break;
a0948bb7
RH
213
214 case INDEX_op_plugin_cb:
215 {
216 enum plugin_gen_from from = op->args[0];
217 struct qemu_plugin_insn *insn = NULL;
21a3f62f
RH
218 const GArray *cbs;
219 int i, n;
a0948bb7
RH
220
221 if (insn_idx >= 0) {
222 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
223 }
224
225 tcg_ctx->emit_before_op = op;
226
227 switch (from) {
74bb8acc
RH
228 case PLUGIN_GEN_AFTER_TB:
229 if (plugin_tb->mem_helper) {
230 gen_disable_mem_helper();
231 }
232 break;
233
a0948bb7
RH
234 case PLUGIN_GEN_AFTER_INSN:
235 assert(insn != NULL);
74bb8acc
RH
236 if (insn->mem_helper) {
237 gen_disable_mem_helper();
238 }
a0948bb7 239 break;
21a3f62f
RH
240
241 case PLUGIN_GEN_FROM_TB:
242 assert(insn == NULL);
243
db409c01 244 cbs = plugin_tb->cbs;
21a3f62f 245 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
7e53aa21
RH
246 inject_cb(
247 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
21a3f62f
RH
248 }
249 break;
250
ac977170
RH
251 case PLUGIN_GEN_FROM_INSN:
252 assert(insn != NULL);
253
254 gen_enable_mem_helper(plugin_tb, insn);
255
db409c01 256 cbs = insn->insn_cbs;
ac977170 257 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
7e53aa21
RH
258 inject_cb(
259 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
ac977170
RH
260 }
261 break;
262
a0948bb7
RH
263 default:
264 g_assert_not_reached();
265 }
266
267 tcg_ctx->emit_before_op = NULL;
268 tcg_op_remove(tcg_ctx, op);
269 break;
270 }
271
8a2927f2 272 case INDEX_op_plugin_mem_cb:
453d50ce 273 {
8a2927f2
RH
274 TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
275 qemu_plugin_meminfo_t meminfo = op->args[1];
7e53aa21
RH
276 enum qemu_plugin_mem_rw rw =
277 (qemu_plugin_mem_is_store(meminfo)
278 ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
8a2927f2
RH
279 struct qemu_plugin_insn *insn;
280 const GArray *cbs;
7e53aa21 281 int i, n;
453d50ce 282
8a2927f2
RH
283 assert(insn_idx >= 0);
284 insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
453d50ce 285
8a2927f2
RH
286 tcg_ctx->emit_before_op = op;
287
db409c01 288 cbs = insn->mem_cbs;
8a2927f2 289 for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
7e53aa21
RH
290 inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
291 rw, meminfo, addr);
453d50ce 292 }
8a2927f2
RH
293
294 tcg_ctx->emit_before_op = NULL;
295 tcg_op_remove(tcg_ctx, op);
453d50ce
AB
296 break;
297 }
8a2927f2 298
453d50ce
AB
299 default:
300 /* plugins don't care about any other ops */
301 break;
38b47b19 302 }
38b47b19 303 }
38b47b19
EC
304}
305
e5013259 306bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
38b47b19 307{
34e5e1dd 308 struct qemu_plugin_tb *ptb;
38b47b19 309
34e5e1dd
RH
310 if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
311 cpu->plugin_state->event_mask)) {
312 return false;
313 }
314
315 tcg_ctx->plugin_db = db;
316 tcg_ctx->plugin_insn = NULL;
317 ptb = tcg_ctx->plugin_tb;
6f15c076 318
34e5e1dd
RH
319 if (ptb) {
320 /* Reset callbacks */
db409c01
RH
321 if (ptb->cbs) {
322 g_array_set_size(ptb->cbs, 0);
6f15c076
AB
323 }
324 ptb->n = 0;
3fd62e73 325 ptb->mem_helper = false;
34e5e1dd
RH
326 } else {
327 ptb = g_new0(struct qemu_plugin_tb, 1);
328 tcg_ctx->plugin_tb = ptb;
329 ptb->insns = g_ptr_array_new();
38b47b19 330 }
6f15c076 331
34e5e1dd
RH
332 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
333 return true;
38b47b19
EC
334}
335
336void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
337{
338 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
5e379b08
RH
339 struct qemu_plugin_insn *insn;
340 size_t n = db->num_insns;
341 vaddr pc;
342
343 assert(n >= 1);
344 ptb->n = n;
345 if (n <= ptb->insns->len) {
346 insn = g_ptr_array_index(ptb->insns, n - 1);
5e379b08
RH
347 } else {
348 assert(n - 1 == ptb->insns->len);
349 insn = g_new0(struct qemu_plugin_insn, 1);
5e379b08
RH
350 g_ptr_array_add(ptb->insns, insn);
351 }
38b47b19 352
5e379b08
RH
353 tcg_ctx->plugin_insn = insn;
354 insn->calls_helpers = false;
355 insn->mem_helper = false;
356 if (insn->insn_cbs) {
357 g_array_set_size(insn->insn_cbs, 0);
358 }
359 if (insn->mem_cbs) {
360 g_array_set_size(insn->mem_cbs, 0);
361 }
362
363 pc = db->pc_next;
364 insn->vaddr = pc;
38b47b19 365
5c48b011 366 tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
38b47b19
EC
367}
368
369void plugin_gen_insn_end(void)
370{
36bc99bc
RH
371 const DisasContextBase *db = tcg_ctx->plugin_db;
372 struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
373
374 pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
375
5c48b011 376 tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
38b47b19
EC
377}
378
6f15c076
AB
379/*
380 * There are cases where we never get to finalise a translation - for
381 * example a page fault during translation. As a result we shouldn't
382 * do any clean-up here and make sure things are reset in
383 * plugin_gen_tb_start.
384 */
a392277d 385void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
38b47b19
EC
386{
387 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
38b47b19 388
a392277d
MB
389 /* translator may have removed instructions, update final count */
390 g_assert(num_insns <= ptb->n);
391 ptb->n = num_insns;
392
38b47b19
EC
393 /* collect instrumentation requests */
394 qemu_plugin_tb_trans_cb(cpu, ptb);
395
396 /* inject the instrumentation at the appropriate places */
397 plugin_gen_inject(ptb);
38b47b19 398}