]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/plugin-gen.c
iotests: Add mirror-ready-cancel-error test
[mirror_qemu.git] / accel / tcg / plugin-gen.c
1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
3 *
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 *
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
13 *
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
21 *
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
30 *
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
39 *
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
44 */
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-op.h"
48 #include "trace/mem.h"
49 #include "exec/exec-all.h"
50 #include "exec/plugin-gen.h"
51 #include "exec/translator.h"
52
53 #ifdef CONFIG_SOFTMMU
54 # define CONFIG_SOFTMMU_GATE 1
55 #else
56 # define CONFIG_SOFTMMU_GATE 0
57 #endif
58
59 /*
60 * plugin_cb_start TCG op args[]:
61 * 0: enum plugin_gen_from
62 * 1: enum plugin_gen_cb
63 * 2: set to 1 for mem callback that is a write, 0 otherwise.
64 */
65
66 enum plugin_gen_from {
67 PLUGIN_GEN_FROM_TB,
68 PLUGIN_GEN_FROM_INSN,
69 PLUGIN_GEN_FROM_MEM,
70 PLUGIN_GEN_AFTER_INSN,
71 PLUGIN_GEN_N_FROMS,
72 };
73
74 enum plugin_gen_cb {
75 PLUGIN_GEN_CB_UDATA,
76 PLUGIN_GEN_CB_INLINE,
77 PLUGIN_GEN_CB_MEM,
78 PLUGIN_GEN_ENABLE_MEM_HELPER,
79 PLUGIN_GEN_DISABLE_MEM_HELPER,
80 PLUGIN_GEN_N_CBS,
81 };
82
83 /*
84 * These helpers are stubs that get dynamically switched out for calls
85 * direct to the plugin if they are subscribed to.
86 */
87 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
88 { }
89
90 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
91 qemu_plugin_meminfo_t info, uint64_t vaddr,
92 void *userdata)
93 { }
94
95 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
96 {
97 TCGv_i32 cpu_index = tcg_temp_new_i32();
98 TCGv_i32 meminfo = tcg_const_i32(info);
99 TCGv_i64 vaddr64 = tcg_temp_new_i64();
100 TCGv_ptr udata = tcg_const_ptr(NULL);
101
102 tcg_gen_ld_i32(cpu_index, cpu_env,
103 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
104 tcg_gen_extu_tl_i64(vaddr64, vaddr);
105
106 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
107
108 tcg_temp_free_ptr(udata);
109 tcg_temp_free_i64(vaddr64);
110 tcg_temp_free_i32(meminfo);
111 tcg_temp_free_i32(cpu_index);
112 }
113
114 static void gen_empty_udata_cb(void)
115 {
116 TCGv_i32 cpu_index = tcg_temp_new_i32();
117 TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
118
119 tcg_gen_ld_i32(cpu_index, cpu_env,
120 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
121 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
122
123 tcg_temp_free_ptr(udata);
124 tcg_temp_free_i32(cpu_index);
125 }
126
127 /*
128 * For now we only support addi_i64.
129 * When we support more ops, we can generate one empty inline cb for each.
130 */
131 static void gen_empty_inline_cb(void)
132 {
133 TCGv_i64 val = tcg_temp_new_i64();
134 TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
135
136 tcg_gen_ld_i64(val, ptr, 0);
137 /* pass an immediate != 0 so that it doesn't get optimized away */
138 tcg_gen_addi_i64(val, val, 0xdeadface);
139 tcg_gen_st_i64(val, ptr, 0);
140 tcg_temp_free_ptr(ptr);
141 tcg_temp_free_i64(val);
142 }
143
144 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
145 {
146 do_gen_mem_cb(addr, info);
147 }
148
149 /*
150 * Share the same function for enable/disable. When enabling, the NULL
151 * pointer will be overwritten later.
152 */
153 static void gen_empty_mem_helper(void)
154 {
155 TCGv_ptr ptr;
156
157 ptr = tcg_const_ptr(NULL);
158 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
159 offsetof(ArchCPU, env));
160 tcg_temp_free_ptr(ptr);
161 }
162
163 static void gen_plugin_cb_start(enum plugin_gen_from from,
164 enum plugin_gen_cb type, unsigned wr)
165 {
166 TCGOp *op;
167
168 tcg_gen_plugin_cb_start(from, type, wr);
169 op = tcg_last_op();
170 QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link);
171 }
172
173 static void gen_wrapped(enum plugin_gen_from from,
174 enum plugin_gen_cb type, void (*func)(void))
175 {
176 gen_plugin_cb_start(from, type, 0);
177 func();
178 tcg_gen_plugin_cb_end();
179 }
180
181 static void plugin_gen_empty_callback(enum plugin_gen_from from)
182 {
183 switch (from) {
184 case PLUGIN_GEN_AFTER_INSN:
185 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
186 gen_empty_mem_helper);
187 break;
188 case PLUGIN_GEN_FROM_INSN:
189 /*
190 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
191 * the first callback of an instruction
192 */
193 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
194 gen_empty_mem_helper);
195 /* fall through */
196 case PLUGIN_GEN_FROM_TB:
197 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
198 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
199 break;
200 default:
201 g_assert_not_reached();
202 }
203 }
204
205 union mem_gen_fn {
206 void (*mem_fn)(TCGv, uint32_t);
207 void (*inline_fn)(void);
208 };
209
210 static void gen_mem_wrapped(enum plugin_gen_cb type,
211 const union mem_gen_fn *f, TCGv addr,
212 uint32_t info, bool is_mem)
213 {
214 int wr = !!(info & TRACE_MEM_ST);
215
216 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
217 if (is_mem) {
218 f->mem_fn(addr, info);
219 } else {
220 f->inline_fn();
221 }
222 tcg_gen_plugin_cb_end();
223 }
224
225 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
226 {
227 union mem_gen_fn fn;
228
229 fn.mem_fn = gen_empty_mem_cb;
230 gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
231
232 fn.inline_fn = gen_empty_inline_cb;
233 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
234 }
235
236 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
237 {
238 while (op) {
239 if (op->opc == opc) {
240 return op;
241 }
242 op = QTAILQ_NEXT(op, link);
243 }
244 return NULL;
245 }
246
247 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
248 {
249 TCGOp *ret = QTAILQ_NEXT(end, link);
250
251 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
252 return ret;
253 }
254
255 /* remove all ops until (and including) plugin_cb_end */
256 static TCGOp *rm_ops(TCGOp *op)
257 {
258 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
259
260 tcg_debug_assert(end_op);
261 return rm_ops_range(op, end_op);
262 }
263
264 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
265 {
266 *begin_op = QTAILQ_NEXT(*begin_op, link);
267 tcg_debug_assert(*begin_op);
268 op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
269 memcpy(op->args, (*begin_op)->args, sizeof(op->args));
270 return op;
271 }
272
273 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
274 {
275 op = copy_op_nocheck(begin_op, op);
276 tcg_debug_assert((*begin_op)->opc == opc);
277 return op;
278 }
279
280 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
281 {
282 if (TCG_TARGET_REG_BITS == 32) {
283 /* mov_i32 */
284 op = copy_op(begin_op, op, INDEX_op_mov_i32);
285 /* mov_i32 w/ $0 */
286 op = copy_op(begin_op, op, INDEX_op_mov_i32);
287 } else {
288 /* extu_i32_i64 */
289 op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
290 }
291 return op;
292 }
293
294 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
295 {
296 if (TCG_TARGET_REG_BITS == 32) {
297 /* 2x mov_i32 */
298 op = copy_op(begin_op, op, INDEX_op_mov_i32);
299 op = copy_op(begin_op, op, INDEX_op_mov_i32);
300 } else {
301 /* mov_i64 */
302 op = copy_op(begin_op, op, INDEX_op_mov_i64);
303 }
304 return op;
305 }
306
307 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
308 {
309 if (UINTPTR_MAX == UINT32_MAX) {
310 /* mov_i32 */
311 op = copy_op(begin_op, op, INDEX_op_mov_i32);
312 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
313 } else {
314 /* mov_i64 */
315 op = copy_op(begin_op, op, INDEX_op_mov_i64);
316 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
317 }
318 return op;
319 }
320
321 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
322 {
323 if (TARGET_LONG_BITS == 32) {
324 /* extu_i32_i64 */
325 op = copy_extu_i32_i64(begin_op, op);
326 } else {
327 /* mov_i64 */
328 op = copy_mov_i64(begin_op, op);
329 }
330 return op;
331 }
332
333 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
334 {
335 if (TCG_TARGET_REG_BITS == 32) {
336 /* 2x ld_i32 */
337 op = copy_op(begin_op, op, INDEX_op_ld_i32);
338 op = copy_op(begin_op, op, INDEX_op_ld_i32);
339 } else {
340 /* ld_i64 */
341 op = copy_op(begin_op, op, INDEX_op_ld_i64);
342 }
343 return op;
344 }
345
346 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
347 {
348 if (TCG_TARGET_REG_BITS == 32) {
349 /* 2x st_i32 */
350 op = copy_op(begin_op, op, INDEX_op_st_i32);
351 op = copy_op(begin_op, op, INDEX_op_st_i32);
352 } else {
353 /* st_i64 */
354 op = copy_op(begin_op, op, INDEX_op_st_i64);
355 }
356 return op;
357 }
358
359 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
360 {
361 if (TCG_TARGET_REG_BITS == 32) {
362 /* all 32-bit backends must implement add2_i32 */
363 g_assert(TCG_TARGET_HAS_add2_i32);
364 op = copy_op(begin_op, op, INDEX_op_add2_i32);
365 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
366 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
367 } else {
368 op = copy_op(begin_op, op, INDEX_op_add_i64);
369 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
370 }
371 return op;
372 }
373
374 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
375 {
376 if (UINTPTR_MAX == UINT32_MAX) {
377 /* st_i32 */
378 op = copy_op(begin_op, op, INDEX_op_st_i32);
379 } else {
380 /* st_i64 */
381 op = copy_st_i64(begin_op, op);
382 }
383 return op;
384 }
385
386 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
387 void *func, int *cb_idx)
388 {
389 /* copy all ops until the call */
390 do {
391 op = copy_op_nocheck(begin_op, op);
392 } while (op->opc != INDEX_op_call);
393
394 /* fill in the op call */
395 op->param1 = (*begin_op)->param1;
396 op->param2 = (*begin_op)->param2;
397 tcg_debug_assert(op->life == 0);
398 if (*cb_idx == -1) {
399 int i;
400
401 /*
402 * Instead of working out the position of the callback in args[], just
403 * look for @empty_func, since it should be a unique pointer.
404 */
405 for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
406 if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
407 *cb_idx = i;
408 break;
409 }
410 }
411 tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
412 }
413 op->args[*cb_idx] = (uintptr_t)func;
414 op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1];
415
416 return op;
417 }
418
419 /*
420 * When we append/replace ops here we are sensitive to changing patterns of
421 * TCGOps generated by the tcg_gen_FOO calls when we generated the
422 * empty callbacks. This will assert very quickly in a debug build as
423 * we assert the ops we are replacing are the correct ones.
424 */
425 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
426 TCGOp *begin_op, TCGOp *op, int *cb_idx)
427 {
428 /* const_ptr */
429 op = copy_const_ptr(&begin_op, op, cb->userp);
430
431 /* copy the ld_i32, but note that we only have to copy it once */
432 begin_op = QTAILQ_NEXT(begin_op, link);
433 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
434 if (*cb_idx == -1) {
435 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
436 memcpy(op->args, begin_op->args, sizeof(op->args));
437 }
438
439 /* call */
440 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
441 cb->f.vcpu_udata, cb_idx);
442
443 return op;
444 }
445
446 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
447 TCGOp *begin_op, TCGOp *op,
448 int *unused)
449 {
450 /* const_ptr */
451 op = copy_const_ptr(&begin_op, op, cb->userp);
452
453 /* ld_i64 */
454 op = copy_ld_i64(&begin_op, op);
455
456 /* add_i64 */
457 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
458
459 /* st_i64 */
460 op = copy_st_i64(&begin_op, op);
461
462 return op;
463 }
464
465 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
466 TCGOp *begin_op, TCGOp *op, int *cb_idx)
467 {
468 enum plugin_gen_cb type = begin_op->args[1];
469
470 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
471
472 /* const_i32 == mov_i32 ("info", so it remains as is) */
473 op = copy_op(&begin_op, op, INDEX_op_mov_i32);
474
475 /* const_ptr */
476 op = copy_const_ptr(&begin_op, op, cb->userp);
477
478 /* copy the ld_i32, but note that we only have to copy it once */
479 begin_op = QTAILQ_NEXT(begin_op, link);
480 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
481 if (*cb_idx == -1) {
482 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
483 memcpy(op->args, begin_op->args, sizeof(op->args));
484 }
485
486 /* extu_tl_i64 */
487 op = copy_extu_tl_i64(&begin_op, op);
488
489 if (type == PLUGIN_GEN_CB_MEM) {
490 /* call */
491 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
492 cb->f.vcpu_udata, cb_idx);
493 }
494
495 return op;
496 }
497
498 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
499 TCGOp *begin_op, TCGOp *op, int *intp);
500 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
501
502 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
503 {
504 return true;
505 }
506
507 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
508 {
509 int w;
510
511 w = op->args[2];
512 return !!(cb->rw & (w + 1));
513 }
514
515 static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
516 inject_fn inject, op_ok_fn ok)
517 {
518 TCGOp *end_op;
519 TCGOp *op;
520 int cb_idx = -1;
521 int i;
522
523 if (!cbs || cbs->len == 0) {
524 rm_ops(begin_op);
525 return;
526 }
527
528 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
529 tcg_debug_assert(end_op);
530
531 op = end_op;
532 for (i = 0; i < cbs->len; i++) {
533 struct qemu_plugin_dyn_cb *cb =
534 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
535
536 if (!ok(begin_op, cb)) {
537 continue;
538 }
539 op = inject(cb, begin_op, op, &cb_idx);
540 }
541 rm_ops_range(begin_op, end_op);
542 }
543
544 static void
545 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
546 {
547 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
548 }
549
550 static void
551 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
552 {
553 inject_cb_type(cbs, begin_op, append_inline_cb, ok);
554 }
555
556 static void
557 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
558 {
559 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
560 }
561
562 /* we could change the ops in place, but we can reuse more code by copying */
563 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
564 {
565 TCGOp *orig_op = begin_op;
566 TCGOp *end_op;
567 TCGOp *op;
568
569 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
570 tcg_debug_assert(end_op);
571
572 /* const ptr */
573 op = copy_const_ptr(&begin_op, end_op, arr);
574
575 /* st_ptr */
576 op = copy_st_ptr(&begin_op, op);
577
578 rm_ops_range(orig_op, end_op);
579 }
580
581 /*
582 * Tracking memory accesses performed from helpers requires extra work.
583 * If an instruction is emulated with helpers, we do two things:
584 * (1) copy the CB descriptors, and keep track of it so that they can be
585 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
586 * that we can read them at run-time (i.e. when the helper executes).
587 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
588 *
589 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
590 * is possible that the code we generate after the instruction is
591 * dead, we also add checks before generating tb_exit etc.
592 */
593 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
594 TCGOp *begin_op)
595 {
596 GArray *cbs[2];
597 GArray *arr;
598 size_t n_cbs, i;
599
600 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
601 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
602
603 n_cbs = 0;
604 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
605 n_cbs += cbs[i]->len;
606 }
607
608 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
609 if (likely(!plugin_insn->mem_helper)) {
610 rm_ops(begin_op);
611 return;
612 }
613
614 arr = g_array_sized_new(false, false,
615 sizeof(struct qemu_plugin_dyn_cb), n_cbs);
616
617 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
618 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
619 }
620
621 qemu_plugin_add_dyn_cb_arr(arr);
622 inject_mem_helper(begin_op, arr);
623 }
624
625 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
626 TCGOp *begin_op)
627 {
628 if (likely(!plugin_insn->mem_helper)) {
629 rm_ops(begin_op);
630 return;
631 }
632 inject_mem_helper(begin_op, NULL);
633 }
634
635 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
636 void plugin_gen_disable_mem_helpers(void)
637 {
638 TCGv_ptr ptr;
639
640 if (likely(tcg_ctx->plugin_insn == NULL ||
641 !tcg_ctx->plugin_insn->mem_helper)) {
642 return;
643 }
644 ptr = tcg_const_ptr(NULL);
645 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
646 offsetof(ArchCPU, env));
647 tcg_temp_free_ptr(ptr);
648 tcg_ctx->plugin_insn->mem_helper = false;
649 }
650
651 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
652 TCGOp *begin_op)
653 {
654 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
655 }
656
657 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
658 TCGOp *begin_op)
659 {
660 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
661 }
662
663 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
664 TCGOp *begin_op, int insn_idx)
665 {
666 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
667
668 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
669 }
670
671 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
672 TCGOp *begin_op, int insn_idx)
673 {
674 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
675 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
676 begin_op, op_ok);
677 }
678
679 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
680 TCGOp *begin_op, int insn_idx)
681 {
682 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
683 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
684 }
685
686 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
687 TCGOp *begin_op, int insn_idx)
688 {
689 const GArray *cbs;
690 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
691
692 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
693 inject_inline_cb(cbs, begin_op, op_rw);
694 }
695
696 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
697 TCGOp *begin_op, int insn_idx)
698 {
699 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
700 inject_mem_enable_helper(insn, begin_op);
701 }
702
703 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
704 TCGOp *begin_op, int insn_idx)
705 {
706 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
707 inject_mem_disable_helper(insn, begin_op);
708 }
709
710 static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op,
711 int insn_idx)
712 {
713 enum plugin_gen_from from = begin_op->args[0];
714 enum plugin_gen_cb type = begin_op->args[1];
715
716 switch (from) {
717 case PLUGIN_GEN_FROM_TB:
718 switch (type) {
719 case PLUGIN_GEN_CB_UDATA:
720 plugin_gen_tb_udata(ptb, begin_op);
721 return;
722 case PLUGIN_GEN_CB_INLINE:
723 plugin_gen_tb_inline(ptb, begin_op);
724 return;
725 default:
726 g_assert_not_reached();
727 }
728 case PLUGIN_GEN_FROM_INSN:
729 switch (type) {
730 case PLUGIN_GEN_CB_UDATA:
731 plugin_gen_insn_udata(ptb, begin_op, insn_idx);
732 return;
733 case PLUGIN_GEN_CB_INLINE:
734 plugin_gen_insn_inline(ptb, begin_op, insn_idx);
735 return;
736 case PLUGIN_GEN_ENABLE_MEM_HELPER:
737 plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx);
738 return;
739 default:
740 g_assert_not_reached();
741 }
742 case PLUGIN_GEN_FROM_MEM:
743 switch (type) {
744 case PLUGIN_GEN_CB_MEM:
745 plugin_gen_mem_regular(ptb, begin_op, insn_idx);
746 return;
747 case PLUGIN_GEN_CB_INLINE:
748 plugin_gen_mem_inline(ptb, begin_op, insn_idx);
749 return;
750 default:
751 g_assert_not_reached();
752 }
753 case PLUGIN_GEN_AFTER_INSN:
754 switch (type) {
755 case PLUGIN_GEN_DISABLE_MEM_HELPER:
756 plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx);
757 return;
758 default:
759 g_assert_not_reached();
760 }
761 default:
762 g_assert_not_reached();
763 }
764 }
765
766 /* #define DEBUG_PLUGIN_GEN_OPS */
767 static void pr_ops(void)
768 {
769 #ifdef DEBUG_PLUGIN_GEN_OPS
770 TCGOp *op;
771 int i = 0;
772
773 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
774 const char *name = "";
775 const char *type = "";
776
777 if (op->opc == INDEX_op_plugin_cb_start) {
778 switch (op->args[0]) {
779 case PLUGIN_GEN_FROM_TB:
780 name = "tb";
781 break;
782 case PLUGIN_GEN_FROM_INSN:
783 name = "insn";
784 break;
785 case PLUGIN_GEN_FROM_MEM:
786 name = "mem";
787 break;
788 case PLUGIN_GEN_AFTER_INSN:
789 name = "after insn";
790 break;
791 default:
792 break;
793 }
794 switch (op->args[1]) {
795 case PLUGIN_GEN_CB_UDATA:
796 type = "udata";
797 break;
798 case PLUGIN_GEN_CB_INLINE:
799 type = "inline";
800 break;
801 case PLUGIN_GEN_CB_MEM:
802 type = "mem";
803 break;
804 case PLUGIN_GEN_ENABLE_MEM_HELPER:
805 type = "enable mem helper";
806 break;
807 case PLUGIN_GEN_DISABLE_MEM_HELPER:
808 type = "disable mem helper";
809 break;
810 default:
811 break;
812 }
813 }
814 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
815 i++;
816 }
817 #endif
818 }
819
820 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
821 {
822 TCGOp *op;
823 int insn_idx;
824
825 pr_ops();
826 insn_idx = -1;
827 QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) {
828 enum plugin_gen_from from = op->args[0];
829 enum plugin_gen_cb type = op->args[1];
830
831 tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start);
832 /* ENABLE_MEM_HELPER is the first callback of an instruction */
833 if (from == PLUGIN_GEN_FROM_INSN &&
834 type == PLUGIN_GEN_ENABLE_MEM_HELPER) {
835 insn_idx++;
836 }
837 plugin_inject_cb(plugin_tb, op, insn_idx);
838 }
839 pr_ops();
840 }
841
842 bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
843 {
844 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
845 bool ret = false;
846
847 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
848 ret = true;
849
850 QSIMPLEQ_INIT(&tcg_ctx->plugin_ops);
851 ptb->vaddr = tb->pc;
852 ptb->vaddr2 = -1;
853 get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
854 ptb->haddr2 = NULL;
855 ptb->mem_only = mem_only;
856
857 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
858 }
859 return ret;
860 }
861
862 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
863 {
864 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
865 struct qemu_plugin_insn *pinsn;
866
867 pinsn = qemu_plugin_tb_insn_get(ptb);
868 tcg_ctx->plugin_insn = pinsn;
869 pinsn->vaddr = db->pc_next;
870 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
871
872 /*
873 * Detect page crossing to get the new host address.
874 * Note that we skip this when haddr1 == NULL, e.g. when we're
875 * fetching instructions from a region not backed by RAM.
876 */
877 if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
878 unlikely((db->pc_next & TARGET_PAGE_MASK) !=
879 (db->pc_first & TARGET_PAGE_MASK))) {
880 get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
881 &ptb->haddr2);
882 ptb->vaddr2 = db->pc_next;
883 }
884 if (likely(ptb->vaddr2 == -1)) {
885 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
886 } else {
887 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
888 }
889 }
890
891 void plugin_gen_insn_end(void)
892 {
893 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
894 }
895
896 void plugin_gen_tb_end(CPUState *cpu)
897 {
898 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
899 int i;
900
901 /* collect instrumentation requests */
902 qemu_plugin_tb_trans_cb(cpu, ptb);
903
904 /* inject the instrumentation at the appropriate places */
905 plugin_gen_inject(ptb);
906
907 /* clean up */
908 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
909 if (ptb->cbs[i]) {
910 g_array_set_size(ptb->cbs[i], 0);
911 }
912 }
913 ptb->n = 0;
914 tcg_ctx->plugin_insn = NULL;
915 }