]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/plugin-gen.c
qemu-img: Change info key names for protocol nodes
[mirror_qemu.git] / accel / tcg / plugin-gen.c
1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
21 *
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
30 *
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
39 *
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
44 */
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-op.h"
48 #include "exec/exec-all.h"
49 #include "exec/plugin-gen.h"
50 #include "exec/translator.h"
51
52 #ifdef CONFIG_SOFTMMU
53 # define CONFIG_SOFTMMU_GATE 1
54 #else
55 # define CONFIG_SOFTMMU_GATE 0
56 #endif
57
58 /*
59 * plugin_cb_start TCG op args[]:
60 * 0: enum plugin_gen_from
61 * 1: enum plugin_gen_cb
62 * 2: set to 1 for mem callback that is a write, 0 otherwise.
63 */
64
65 enum plugin_gen_from {
66 PLUGIN_GEN_FROM_TB,
67 PLUGIN_GEN_FROM_INSN,
68 PLUGIN_GEN_FROM_MEM,
69 PLUGIN_GEN_AFTER_INSN,
70 PLUGIN_GEN_N_FROMS,
71 };
72
73 enum plugin_gen_cb {
74 PLUGIN_GEN_CB_UDATA,
75 PLUGIN_GEN_CB_INLINE,
76 PLUGIN_GEN_CB_MEM,
77 PLUGIN_GEN_ENABLE_MEM_HELPER,
78 PLUGIN_GEN_DISABLE_MEM_HELPER,
79 PLUGIN_GEN_N_CBS,
80 };
81
82 /*
83 * These helpers are stubs that get dynamically switched out for calls
84 * direct to the plugin if they are subscribed to.
85 */
86 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
87 { }
88
89 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
90 qemu_plugin_meminfo_t info, uint64_t vaddr,
91 void *userdata)
92 { }
93
94 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
95 {
96 TCGv_i32 cpu_index = tcg_temp_new_i32();
97 TCGv_i32 meminfo = tcg_const_i32(info);
98 TCGv_i64 vaddr64 = tcg_temp_new_i64();
99 TCGv_ptr udata = tcg_const_ptr(NULL);
100
101 tcg_gen_ld_i32(cpu_index, cpu_env,
102 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
103 tcg_gen_extu_tl_i64(vaddr64, vaddr);
104
105 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
106
107 tcg_temp_free_ptr(udata);
108 tcg_temp_free_i64(vaddr64);
109 tcg_temp_free_i32(meminfo);
110 tcg_temp_free_i32(cpu_index);
111 }
112
113 static void gen_empty_udata_cb(void)
114 {
115 TCGv_i32 cpu_index = tcg_temp_new_i32();
116 TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
117
118 tcg_gen_ld_i32(cpu_index, cpu_env,
119 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
120 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
121
122 tcg_temp_free_ptr(udata);
123 tcg_temp_free_i32(cpu_index);
124 }
125
126 /*
127 * For now we only support addi_i64.
128 * When we support more ops, we can generate one empty inline cb for each.
129 */
130 static void gen_empty_inline_cb(void)
131 {
132 TCGv_i64 val = tcg_temp_new_i64();
133 TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
134
135 tcg_gen_ld_i64(val, ptr, 0);
136 /* pass an immediate != 0 so that it doesn't get optimized away */
137 tcg_gen_addi_i64(val, val, 0xdeadface);
138 tcg_gen_st_i64(val, ptr, 0);
139 tcg_temp_free_ptr(ptr);
140 tcg_temp_free_i64(val);
141 }
142
143 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
144 {
145 do_gen_mem_cb(addr, info);
146 }
147
148 /*
149 * Share the same function for enable/disable. When enabling, the NULL
150 * pointer will be overwritten later.
151 */
152 static void gen_empty_mem_helper(void)
153 {
154 TCGv_ptr ptr;
155
156 ptr = tcg_const_ptr(NULL);
157 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
158 offsetof(ArchCPU, env));
159 tcg_temp_free_ptr(ptr);
160 }
161
162 static void gen_plugin_cb_start(enum plugin_gen_from from,
163 enum plugin_gen_cb type, unsigned wr)
164 {
165 tcg_gen_plugin_cb_start(from, type, wr);
166 }
167
168 static void gen_wrapped(enum plugin_gen_from from,
169 enum plugin_gen_cb type, void (*func)(void))
170 {
171 gen_plugin_cb_start(from, type, 0);
172 func();
173 tcg_gen_plugin_cb_end();
174 }
175
176 static void plugin_gen_empty_callback(enum plugin_gen_from from)
177 {
178 switch (from) {
179 case PLUGIN_GEN_AFTER_INSN:
180 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
181 gen_empty_mem_helper);
182 break;
183 case PLUGIN_GEN_FROM_INSN:
184 /*
185 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
186 * the first callback of an instruction
187 */
188 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
189 gen_empty_mem_helper);
190 /* fall through */
191 case PLUGIN_GEN_FROM_TB:
192 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
193 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
194 break;
195 default:
196 g_assert_not_reached();
197 }
198 }
199
200 union mem_gen_fn {
201 void (*mem_fn)(TCGv, uint32_t);
202 void (*inline_fn)(void);
203 };
204
205 static void gen_mem_wrapped(enum plugin_gen_cb type,
206 const union mem_gen_fn *f, TCGv addr,
207 uint32_t info, bool is_mem)
208 {
209 enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
210
211 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
212 if (is_mem) {
213 f->mem_fn(addr, info);
214 } else {
215 f->inline_fn();
216 }
217 tcg_gen_plugin_cb_end();
218 }
219
220 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
221 {
222 union mem_gen_fn fn;
223
224 fn.mem_fn = gen_empty_mem_cb;
225 gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
226
227 fn.inline_fn = gen_empty_inline_cb;
228 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
229 }
230
231 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
232 {
233 while (op) {
234 if (op->opc == opc) {
235 return op;
236 }
237 op = QTAILQ_NEXT(op, link);
238 }
239 return NULL;
240 }
241
242 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
243 {
244 TCGOp *ret = QTAILQ_NEXT(end, link);
245
246 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
247 return ret;
248 }
249
250 /* remove all ops until (and including) plugin_cb_end */
251 static TCGOp *rm_ops(TCGOp *op)
252 {
253 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
254
255 tcg_debug_assert(end_op);
256 return rm_ops_range(op, end_op);
257 }
258
259 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
260 {
261 TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
262 unsigned nargs = old_op->nargs;
263
264 *begin_op = old_op;
265 op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
266 memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
267
268 return op;
269 }
270
271 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
272 {
273 op = copy_op_nocheck(begin_op, op);
274 tcg_debug_assert((*begin_op)->opc == opc);
275 return op;
276 }
277
278 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
279 {
280 if (TCG_TARGET_REG_BITS == 32) {
281 /* mov_i32 */
282 op = copy_op(begin_op, op, INDEX_op_mov_i32);
283 /* mov_i32 w/ $0 */
284 op = copy_op(begin_op, op, INDEX_op_mov_i32);
285 } else {
286 /* extu_i32_i64 */
287 op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
288 }
289 return op;
290 }
291
292 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
293 {
294 if (TCG_TARGET_REG_BITS == 32) {
295 /* 2x mov_i32 */
296 op = copy_op(begin_op, op, INDEX_op_mov_i32);
297 op = copy_op(begin_op, op, INDEX_op_mov_i32);
298 } else {
299 /* mov_i64 */
300 op = copy_op(begin_op, op, INDEX_op_mov_i64);
301 }
302 return op;
303 }
304
305 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
306 {
307 if (UINTPTR_MAX == UINT32_MAX) {
308 /* mov_i32 */
309 op = copy_op(begin_op, op, INDEX_op_mov_i32);
310 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
311 } else {
312 /* mov_i64 */
313 op = copy_op(begin_op, op, INDEX_op_mov_i64);
314 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
315 }
316 return op;
317 }
318
319 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
320 {
321 if (TARGET_LONG_BITS == 32) {
322 /* extu_i32_i64 */
323 op = copy_extu_i32_i64(begin_op, op);
324 } else {
325 /* mov_i64 */
326 op = copy_mov_i64(begin_op, op);
327 }
328 return op;
329 }
330
331 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
332 {
333 if (TCG_TARGET_REG_BITS == 32) {
334 /* 2x ld_i32 */
335 op = copy_op(begin_op, op, INDEX_op_ld_i32);
336 op = copy_op(begin_op, op, INDEX_op_ld_i32);
337 } else {
338 /* ld_i64 */
339 op = copy_op(begin_op, op, INDEX_op_ld_i64);
340 }
341 return op;
342 }
343
344 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
345 {
346 if (TCG_TARGET_REG_BITS == 32) {
347 /* 2x st_i32 */
348 op = copy_op(begin_op, op, INDEX_op_st_i32);
349 op = copy_op(begin_op, op, INDEX_op_st_i32);
350 } else {
351 /* st_i64 */
352 op = copy_op(begin_op, op, INDEX_op_st_i64);
353 }
354 return op;
355 }
356
357 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
358 {
359 if (TCG_TARGET_REG_BITS == 32) {
360 /* all 32-bit backends must implement add2_i32 */
361 g_assert(TCG_TARGET_HAS_add2_i32);
362 op = copy_op(begin_op, op, INDEX_op_add2_i32);
363 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
364 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
365 } else {
366 op = copy_op(begin_op, op, INDEX_op_add_i64);
367 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
368 }
369 return op;
370 }
371
372 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
373 {
374 if (UINTPTR_MAX == UINT32_MAX) {
375 /* st_i32 */
376 op = copy_op(begin_op, op, INDEX_op_st_i32);
377 } else {
378 /* st_i64 */
379 op = copy_st_i64(begin_op, op);
380 }
381 return op;
382 }
383
384 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
385 void *func, int *cb_idx)
386 {
387 TCGOp *old_op;
388 int func_idx;
389
390 /* copy all ops until the call */
391 do {
392 op = copy_op_nocheck(begin_op, op);
393 } while (op->opc != INDEX_op_call);
394
395 /* fill in the op call */
396 old_op = *begin_op;
397 TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
398 TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
399 tcg_debug_assert(op->life == 0);
400
401 func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
402 *cb_idx = func_idx;
403 op->args[func_idx] = (uintptr_t)func;
404
405 return op;
406 }
407
408 /*
409 * When we append/replace ops here we are sensitive to changing patterns of
410 * TCGOps generated by the tcg_gen_FOO calls when we generated the
411 * empty callbacks. This will assert very quickly in a debug build as
412 * we assert the ops we are replacing are the correct ones.
413 */
414 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
415 TCGOp *begin_op, TCGOp *op, int *cb_idx)
416 {
417 /* const_ptr */
418 op = copy_const_ptr(&begin_op, op, cb->userp);
419
420 /* copy the ld_i32, but note that we only have to copy it once */
421 if (*cb_idx == -1) {
422 op = copy_op(&begin_op, op, INDEX_op_ld_i32);
423 } else {
424 begin_op = QTAILQ_NEXT(begin_op, link);
425 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
426 }
427
428 /* call */
429 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
430 cb->f.vcpu_udata, cb_idx);
431
432 return op;
433 }
434
435 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
436 TCGOp *begin_op, TCGOp *op,
437 int *unused)
438 {
439 /* const_ptr */
440 op = copy_const_ptr(&begin_op, op, cb->userp);
441
442 /* ld_i64 */
443 op = copy_ld_i64(&begin_op, op);
444
445 /* add_i64 */
446 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
447
448 /* st_i64 */
449 op = copy_st_i64(&begin_op, op);
450
451 return op;
452 }
453
454 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
455 TCGOp *begin_op, TCGOp *op, int *cb_idx)
456 {
457 enum plugin_gen_cb type = begin_op->args[1];
458
459 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
460
461 /* const_i32 == mov_i32 ("info", so it remains as is) */
462 op = copy_op(&begin_op, op, INDEX_op_mov_i32);
463
464 /* const_ptr */
465 op = copy_const_ptr(&begin_op, op, cb->userp);
466
467 /* copy the ld_i32, but note that we only have to copy it once */
468 if (*cb_idx == -1) {
469 op = copy_op(&begin_op, op, INDEX_op_ld_i32);
470 } else {
471 begin_op = QTAILQ_NEXT(begin_op, link);
472 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
473 }
474
475 /* extu_tl_i64 */
476 op = copy_extu_tl_i64(&begin_op, op);
477
478 if (type == PLUGIN_GEN_CB_MEM) {
479 /* call */
480 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
481 cb->f.vcpu_udata, cb_idx);
482 }
483
484 return op;
485 }
486
487 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
488 TCGOp *begin_op, TCGOp *op, int *intp);
489 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
490
491 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
492 {
493 return true;
494 }
495
496 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
497 {
498 int w;
499
500 w = op->args[2];
501 return !!(cb->rw & (w + 1));
502 }
503
504 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
505 inject_fn inject, op_ok_fn ok)
506 {
507 TCGOp *end_op;
508 TCGOp *op;
509 int cb_idx = -1;
510 int i;
511
512 if (!cbs || cbs->len == 0) {
513 rm_ops(begin_op);
514 return;
515 }
516
517 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
518 tcg_debug_assert(end_op);
519
520 op = end_op;
521 for (i = 0; i < cbs->len; i++) {
522 struct qemu_plugin_dyn_cb *cb =
523 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
524
525 if (!ok(begin_op, cb)) {
526 continue;
527 }
528 op = inject(cb, begin_op, op, &cb_idx);
529 }
530 rm_ops_range(begin_op, end_op);
531 }
532
533 static void
534 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
535 {
536 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
537 }
538
539 static void
540 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
541 {
542 inject_cb_type(cbs, begin_op, append_inline_cb, ok);
543 }
544
545 static void
546 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
547 {
548 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
549 }
550
551 /* we could change the ops in place, but we can reuse more code by copying */
552 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
553 {
554 TCGOp *orig_op = begin_op;
555 TCGOp *end_op;
556 TCGOp *op;
557
558 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
559 tcg_debug_assert(end_op);
560
561 /* const ptr */
562 op = copy_const_ptr(&begin_op, end_op, arr);
563
564 /* st_ptr */
565 op = copy_st_ptr(&begin_op, op);
566
567 rm_ops_range(orig_op, end_op);
568 }
569
570 /*
571 * Tracking memory accesses performed from helpers requires extra work.
572 * If an instruction is emulated with helpers, we do two things:
573 * (1) copy the CB descriptors, and keep track of it so that they can be
574 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
575 * that we can read them at run-time (i.e. when the helper executes).
576 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
577 *
578 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
579 * is possible that the code we generate after the instruction is
580 * dead, we also add checks before generating tb_exit etc.
581 */
582 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
583 TCGOp *begin_op)
584 {
585 GArray *cbs[2];
586 GArray *arr;
587 size_t n_cbs, i;
588
589 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
590 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
591
592 n_cbs = 0;
593 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
594 n_cbs += cbs[i]->len;
595 }
596
597 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
598 if (likely(!plugin_insn->mem_helper)) {
599 rm_ops(begin_op);
600 return;
601 }
602
603 arr = g_array_sized_new(false, false,
604 sizeof(struct qemu_plugin_dyn_cb), n_cbs);
605
606 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
607 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
608 }
609
610 qemu_plugin_add_dyn_cb_arr(arr);
611 inject_mem_helper(begin_op, arr);
612 }
613
614 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
615 TCGOp *begin_op)
616 {
617 if (likely(!plugin_insn->mem_helper)) {
618 rm_ops(begin_op);
619 return;
620 }
621 inject_mem_helper(begin_op, NULL);
622 }
623
624 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
625 void plugin_gen_disable_mem_helpers(void)
626 {
627 TCGv_ptr ptr;
628
629 if (likely(tcg_ctx->plugin_insn == NULL ||
630 !tcg_ctx->plugin_insn->mem_helper)) {
631 return;
632 }
633 ptr = tcg_const_ptr(NULL);
634 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
635 offsetof(ArchCPU, env));
636 tcg_temp_free_ptr(ptr);
637 tcg_ctx->plugin_insn->mem_helper = false;
638 }
639
640 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
641 TCGOp *begin_op)
642 {
643 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
644 }
645
646 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
647 TCGOp *begin_op)
648 {
649 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
650 }
651
652 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
653 TCGOp *begin_op, int insn_idx)
654 {
655 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
656
657 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
658 }
659
660 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
661 TCGOp *begin_op, int insn_idx)
662 {
663 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
664 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
665 begin_op, op_ok);
666 }
667
668 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
669 TCGOp *begin_op, int insn_idx)
670 {
671 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
672 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
673 }
674
675 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
676 TCGOp *begin_op, int insn_idx)
677 {
678 const GArray *cbs;
679 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
680
681 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
682 inject_inline_cb(cbs, begin_op, op_rw);
683 }
684
685 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
686 TCGOp *begin_op, int insn_idx)
687 {
688 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
689 inject_mem_enable_helper(insn, begin_op);
690 }
691
692 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
693 TCGOp *begin_op, int insn_idx)
694 {
695 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
696 inject_mem_disable_helper(insn, begin_op);
697 }
698
699 /* #define DEBUG_PLUGIN_GEN_OPS */
700 static void pr_ops(void)
701 {
702 #ifdef DEBUG_PLUGIN_GEN_OPS
703 TCGOp *op;
704 int i = 0;
705
706 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
707 const char *name = "";
708 const char *type = "";
709
710 if (op->opc == INDEX_op_plugin_cb_start) {
711 switch (op->args[0]) {
712 case PLUGIN_GEN_FROM_TB:
713 name = "tb";
714 break;
715 case PLUGIN_GEN_FROM_INSN:
716 name = "insn";
717 break;
718 case PLUGIN_GEN_FROM_MEM:
719 name = "mem";
720 break;
721 case PLUGIN_GEN_AFTER_INSN:
722 name = "after insn";
723 break;
724 default:
725 break;
726 }
727 switch (op->args[1]) {
728 case PLUGIN_GEN_CB_UDATA:
729 type = "udata";
730 break;
731 case PLUGIN_GEN_CB_INLINE:
732 type = "inline";
733 break;
734 case PLUGIN_GEN_CB_MEM:
735 type = "mem";
736 break;
737 case PLUGIN_GEN_ENABLE_MEM_HELPER:
738 type = "enable mem helper";
739 break;
740 case PLUGIN_GEN_DISABLE_MEM_HELPER:
741 type = "disable mem helper";
742 break;
743 default:
744 break;
745 }
746 }
747 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
748 i++;
749 }
750 #endif
751 }
752
753 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
754 {
755 TCGOp *op;
756 int insn_idx = -1;
757
758 pr_ops();
759
760 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
761 switch (op->opc) {
762 case INDEX_op_insn_start:
763 insn_idx++;
764 break;
765 case INDEX_op_plugin_cb_start:
766 {
767 enum plugin_gen_from from = op->args[0];
768 enum plugin_gen_cb type = op->args[1];
769
770 switch (from) {
771 case PLUGIN_GEN_FROM_TB:
772 {
773 g_assert(insn_idx == -1);
774
775 switch (type) {
776 case PLUGIN_GEN_CB_UDATA:
777 plugin_gen_tb_udata(plugin_tb, op);
778 break;
779 case PLUGIN_GEN_CB_INLINE:
780 plugin_gen_tb_inline(plugin_tb, op);
781 break;
782 default:
783 g_assert_not_reached();
784 }
785 break;
786 }
787 case PLUGIN_GEN_FROM_INSN:
788 {
789 g_assert(insn_idx >= 0);
790
791 switch (type) {
792 case PLUGIN_GEN_CB_UDATA:
793 plugin_gen_insn_udata(plugin_tb, op, insn_idx);
794 break;
795 case PLUGIN_GEN_CB_INLINE:
796 plugin_gen_insn_inline(plugin_tb, op, insn_idx);
797 break;
798 case PLUGIN_GEN_ENABLE_MEM_HELPER:
799 plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
800 break;
801 default:
802 g_assert_not_reached();
803 }
804 break;
805 }
806 case PLUGIN_GEN_FROM_MEM:
807 {
808 g_assert(insn_idx >= 0);
809
810 switch (type) {
811 case PLUGIN_GEN_CB_MEM:
812 plugin_gen_mem_regular(plugin_tb, op, insn_idx);
813 break;
814 case PLUGIN_GEN_CB_INLINE:
815 plugin_gen_mem_inline(plugin_tb, op, insn_idx);
816 break;
817 default:
818 g_assert_not_reached();
819 }
820
821 break;
822 }
823 case PLUGIN_GEN_AFTER_INSN:
824 {
825 g_assert(insn_idx >= 0);
826
827 switch (type) {
828 case PLUGIN_GEN_DISABLE_MEM_HELPER:
829 plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
830 break;
831 default:
832 g_assert_not_reached();
833 }
834 break;
835 }
836 default:
837 g_assert_not_reached();
838 }
839 break;
840 }
841 default:
842 /* plugins don't care about any other ops */
843 break;
844 }
845 }
846 pr_ops();
847 }
848
849 bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
850 bool mem_only)
851 {
852 bool ret = false;
853
854 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
855 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
856 int i;
857
858 /* reset callbacks */
859 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
860 if (ptb->cbs[i]) {
861 g_array_set_size(ptb->cbs[i], 0);
862 }
863 }
864 ptb->n = 0;
865
866 ret = true;
867
868 ptb->vaddr = db->pc_first;
869 ptb->vaddr2 = -1;
870 ptb->haddr1 = db->host_addr[0];
871 ptb->haddr2 = NULL;
872 ptb->mem_only = mem_only;
873
874 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
875 }
876
877 tcg_ctx->plugin_insn = NULL;
878
879 return ret;
880 }
881
882 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
883 {
884 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
885 struct qemu_plugin_insn *pinsn;
886
887 pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
888 tcg_ctx->plugin_insn = pinsn;
889 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
890
891 /*
892 * Detect page crossing to get the new host address.
893 * Note that we skip this when haddr1 == NULL, e.g. when we're
894 * fetching instructions from a region not backed by RAM.
895 */
896 if (ptb->haddr1 == NULL) {
897 pinsn->haddr = NULL;
898 } else if (is_same_page(db, db->pc_next)) {
899 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
900 } else {
901 if (ptb->vaddr2 == -1) {
902 ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
903 get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
904 }
905 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
906 }
907 }
908
909 void plugin_gen_insn_end(void)
910 {
911 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
912 }
913
914 /*
915 * There are cases where we never get to finalise a translation - for
916 * example a page fault during translation. As a result we shouldn't
917 * do any clean-up here and make sure things are reset in
918 * plugin_gen_tb_start.
919 */
920 void plugin_gen_tb_end(CPUState *cpu)
921 {
922 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
923
924 /* collect instrumentation requests */
925 qemu_plugin_tb_trans_cb(cpu, ptb);
926
927 /* inject the instrumentation at the appropriate places */
928 plugin_gen_inject(ptb);
929 }