]>
Commit | Line | Data |
---|---|---|
54cb65d8 EC |
1 | /* |
2 | * QEMU Plugin Core code | |
3 | * | |
4 | * This is the core code that deals with injecting instrumentation into the code | |
5 | * | |
6 | * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> | |
7 | * Copyright (C) 2019, Linaro | |
8 | * | |
9 | * License: GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | * SPDX-License-Identifier: GPL-2.0-or-later | |
13 | */ | |
14 | #include "qemu/osdep.h" | |
15 | #include "qemu/error-report.h" | |
16 | #include "qemu/config-file.h" | |
17 | #include "qapi/error.h" | |
ac90871c | 18 | #include "qemu/lockable.h" |
54cb65d8 EC |
19 | #include "qemu/option.h" |
20 | #include "qemu/rcu_queue.h" | |
21 | #include "qemu/xxhash.h" | |
22 | #include "qemu/rcu.h" | |
23 | #include "hw/core/cpu.h" | |
24 | #include "exec/cpu-common.h" | |
25 | ||
54cb65d8 | 26 | #include "exec/exec-all.h" |
548c9609 | 27 | #include "exec/tb-flush.h" |
54cb65d8 EC |
28 | #include "tcg/tcg.h" |
29 | #include "tcg/tcg-op.h" | |
54cb65d8 | 30 | #include "plugin.h" |
c905a368 | 31 | #include "qemu/compiler.h" |
54cb65d8 EC |
32 | |
33 | struct qemu_plugin_cb { | |
34 | struct qemu_plugin_ctx *ctx; | |
35 | union qemu_plugin_cb_sig f; | |
36 | void *udata; | |
37 | QLIST_ENTRY(qemu_plugin_cb) entry; | |
38 | }; | |
39 | ||
40 | struct qemu_plugin_state plugin; | |
41 | ||
42 | struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) | |
43 | { | |
44 | struct qemu_plugin_ctx *ctx; | |
45 | qemu_plugin_id_t *id_p; | |
46 | ||
47 | id_p = g_hash_table_lookup(plugin.id_ht, &id); | |
48 | ctx = container_of(id_p, struct qemu_plugin_ctx, id); | |
49 | if (ctx == NULL) { | |
50 | error_report("plugin: invalid plugin id %" PRIu64, id); | |
51 | abort(); | |
52 | } | |
53 | return ctx; | |
54 | } | |
55 | ||
56 | static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) | |
57 | { | |
58 | bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); | |
a976a99a | 59 | tcg_flush_jmp_cache(cpu); |
54cb65d8 EC |
60 | } |
61 | ||
62 | static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) | |
63 | { | |
64 | CPUState *cpu = container_of(k, CPUState, cpu_index); | |
65 | run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); | |
66 | ||
fb13735a | 67 | if (DEVICE(cpu)->realized) { |
54cb65d8 EC |
68 | async_run_on_cpu(cpu, plugin_cpu_update__async, mask); |
69 | } else { | |
70 | plugin_cpu_update__async(cpu, mask); | |
71 | } | |
72 | } | |
73 | ||
74 | void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, | |
75 | enum qemu_plugin_event ev) | |
76 | { | |
77 | struct qemu_plugin_cb *cb = ctx->callbacks[ev]; | |
78 | ||
79 | if (cb == NULL) { | |
80 | return; | |
81 | } | |
82 | QLIST_REMOVE_RCU(cb, entry); | |
83 | g_free(cb); | |
84 | ctx->callbacks[ev] = NULL; | |
85 | if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { | |
86 | clear_bit(ev, plugin.mask); | |
87 | g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); | |
88 | } | |
89 | } | |
90 | ||
c905a368 DB |
91 | /* |
92 | * Disable CFI checks. | |
93 | * The callback function has been loaded from an external library so we do not | |
94 | * have type information | |
95 | */ | |
96 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
97 | static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) |
98 | { | |
99 | struct qemu_plugin_cb *cb, *next; | |
100 | ||
101 | switch (ev) { | |
102 | case QEMU_PLUGIN_EV_VCPU_INIT: | |
103 | case QEMU_PLUGIN_EV_VCPU_EXIT: | |
104 | case QEMU_PLUGIN_EV_VCPU_IDLE: | |
105 | case QEMU_PLUGIN_EV_VCPU_RESUME: | |
106 | /* iterate safely; plugins might uninstall themselves at any time */ | |
107 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
108 | qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; | |
109 | ||
110 | func(cb->ctx->id, cpu->cpu_index); | |
111 | } | |
112 | break; | |
113 | default: | |
114 | g_assert_not_reached(); | |
115 | } | |
116 | } | |
117 | ||
c905a368 DB |
118 | /* |
119 | * Disable CFI checks. | |
120 | * The callback function has been loaded from an external library so we do not | |
121 | * have type information | |
122 | */ | |
123 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
124 | static void plugin_cb__simple(enum qemu_plugin_event ev) |
125 | { | |
126 | struct qemu_plugin_cb *cb, *next; | |
127 | ||
128 | switch (ev) { | |
129 | case QEMU_PLUGIN_EV_FLUSH: | |
130 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
131 | qemu_plugin_simple_cb_t func = cb->f.simple; | |
132 | ||
133 | func(cb->ctx->id); | |
134 | } | |
135 | break; | |
136 | default: | |
137 | g_assert_not_reached(); | |
138 | } | |
139 | } | |
140 | ||
c905a368 DB |
141 | /* |
142 | * Disable CFI checks. | |
143 | * The callback function has been loaded from an external library so we do not | |
144 | * have type information | |
145 | */ | |
146 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
147 | static void plugin_cb__udata(enum qemu_plugin_event ev) |
148 | { | |
149 | struct qemu_plugin_cb *cb, *next; | |
150 | ||
151 | switch (ev) { | |
152 | case QEMU_PLUGIN_EV_ATEXIT: | |
153 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
154 | qemu_plugin_udata_cb_t func = cb->f.udata; | |
155 | ||
156 | func(cb->ctx->id, cb->udata); | |
157 | } | |
158 | break; | |
159 | default: | |
160 | g_assert_not_reached(); | |
161 | } | |
162 | } | |
163 | ||
164 | static void | |
165 | do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
166 | void *func, void *udata) | |
167 | { | |
168 | struct qemu_plugin_ctx *ctx; | |
169 | ||
ac90871c | 170 | QEMU_LOCK_GUARD(&plugin.lock); |
54cb65d8 EC |
171 | ctx = plugin_id_to_ctx_locked(id); |
172 | /* if the plugin is on its way out, ignore this request */ | |
173 | if (unlikely(ctx->uninstalling)) { | |
ac90871c | 174 | return; |
54cb65d8 EC |
175 | } |
176 | if (func) { | |
177 | struct qemu_plugin_cb *cb = ctx->callbacks[ev]; | |
178 | ||
179 | if (cb) { | |
180 | cb->f.generic = func; | |
181 | cb->udata = udata; | |
182 | } else { | |
183 | cb = g_new(struct qemu_plugin_cb, 1); | |
184 | cb->ctx = ctx; | |
185 | cb->f.generic = func; | |
186 | cb->udata = udata; | |
187 | ctx->callbacks[ev] = cb; | |
188 | QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); | |
189 | if (!test_bit(ev, plugin.mask)) { | |
190 | set_bit(ev, plugin.mask); | |
191 | g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, | |
192 | NULL); | |
193 | } | |
194 | } | |
195 | } else { | |
196 | plugin_unregister_cb__locked(ctx, ev); | |
197 | } | |
54cb65d8 EC |
198 | } |
199 | ||
200 | void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
201 | void *func) | |
202 | { | |
203 | do_plugin_register_cb(id, ev, func, NULL); | |
204 | } | |
205 | ||
206 | void | |
207 | plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
208 | void *func, void *udata) | |
209 | { | |
210 | do_plugin_register_cb(id, ev, func, udata); | |
211 | } | |
212 | ||
213 | void qemu_plugin_vcpu_init_hook(CPUState *cpu) | |
214 | { | |
215 | bool success; | |
216 | ||
217 | qemu_rec_mutex_lock(&plugin.lock); | |
218 | plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); | |
219 | success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, | |
220 | &cpu->cpu_index); | |
221 | g_assert(success); | |
222 | qemu_rec_mutex_unlock(&plugin.lock); | |
223 | ||
224 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); | |
225 | } | |
226 | ||
227 | void qemu_plugin_vcpu_exit_hook(CPUState *cpu) | |
228 | { | |
229 | bool success; | |
230 | ||
231 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); | |
232 | ||
233 | qemu_rec_mutex_lock(&plugin.lock); | |
234 | success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); | |
235 | g_assert(success); | |
236 | qemu_rec_mutex_unlock(&plugin.lock); | |
237 | } | |
238 | ||
239 | struct plugin_for_each_args { | |
240 | struct qemu_plugin_ctx *ctx; | |
241 | qemu_plugin_vcpu_simple_cb_t cb; | |
242 | }; | |
243 | ||
244 | static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) | |
245 | { | |
246 | struct plugin_for_each_args *args = udata; | |
247 | int cpu_index = *(int *)k; | |
248 | ||
249 | args->cb(args->ctx->id, cpu_index); | |
250 | } | |
251 | ||
252 | void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, | |
253 | qemu_plugin_vcpu_simple_cb_t cb) | |
254 | { | |
255 | struct plugin_for_each_args args; | |
256 | ||
257 | if (cb == NULL) { | |
258 | return; | |
259 | } | |
260 | qemu_rec_mutex_lock(&plugin.lock); | |
261 | args.ctx = plugin_id_to_ctx_locked(id); | |
262 | args.cb = cb; | |
263 | g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); | |
264 | qemu_rec_mutex_unlock(&plugin.lock); | |
265 | } | |
266 | ||
267 | /* Allocate and return a callback record */ | |
268 | static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) | |
269 | { | |
270 | GArray *cbs = *arr; | |
271 | ||
272 | if (!cbs) { | |
273 | cbs = g_array_sized_new(false, false, | |
274 | sizeof(struct qemu_plugin_dyn_cb), 1); | |
275 | *arr = cbs; | |
276 | } | |
277 | ||
278 | g_array_set_size(cbs, cbs->len + 1); | |
279 | return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); | |
280 | } | |
281 | ||
282 | void plugin_register_inline_op(GArray **arr, | |
283 | enum qemu_plugin_mem_rw rw, | |
284 | enum qemu_plugin_op op, void *ptr, | |
285 | uint64_t imm) | |
286 | { | |
287 | struct qemu_plugin_dyn_cb *dyn_cb; | |
288 | ||
289 | dyn_cb = plugin_get_dyn_cb(arr); | |
290 | dyn_cb->userp = ptr; | |
291 | dyn_cb->type = PLUGIN_CB_INLINE; | |
292 | dyn_cb->rw = rw; | |
293 | dyn_cb->inline_insn.op = op; | |
294 | dyn_cb->inline_insn.imm = imm; | |
295 | } | |
296 | ||
c7bb41b4 RH |
297 | void plugin_register_dyn_cb__udata(GArray **arr, |
298 | qemu_plugin_vcpu_udata_cb_t cb, | |
299 | enum qemu_plugin_cb_flags flags, | |
300 | void *udata) | |
54cb65d8 EC |
301 | { |
302 | struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); | |
303 | ||
304 | dyn_cb->userp = udata; | |
c7bb41b4 | 305 | /* Note flags are discarded as unused. */ |
54cb65d8 EC |
306 | dyn_cb->f.vcpu_udata = cb; |
307 | dyn_cb->type = PLUGIN_CB_REGULAR; | |
308 | } | |
309 | ||
310 | void plugin_register_vcpu_mem_cb(GArray **arr, | |
311 | void *cb, | |
312 | enum qemu_plugin_cb_flags flags, | |
313 | enum qemu_plugin_mem_rw rw, | |
314 | void *udata) | |
315 | { | |
316 | struct qemu_plugin_dyn_cb *dyn_cb; | |
317 | ||
318 | dyn_cb = plugin_get_dyn_cb(arr); | |
319 | dyn_cb->userp = udata; | |
c7bb41b4 | 320 | /* Note flags are discarded as unused. */ |
54cb65d8 EC |
321 | dyn_cb->type = PLUGIN_CB_REGULAR; |
322 | dyn_cb->rw = rw; | |
323 | dyn_cb->f.generic = cb; | |
324 | } | |
325 | ||
c905a368 DB |
326 | /* |
327 | * Disable CFI checks. | |
328 | * The callback function has been loaded from an external library so we do not | |
329 | * have type information | |
330 | */ | |
331 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
332 | void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) |
333 | { | |
334 | struct qemu_plugin_cb *cb, *next; | |
335 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; | |
336 | ||
337 | /* no plugin_mask check here; caller should have checked */ | |
338 | ||
339 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
340 | qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; | |
341 | ||
342 | func(cb->ctx->id, tb); | |
343 | } | |
344 | } | |
345 | ||
c905a368 DB |
346 | /* |
347 | * Disable CFI checks. | |
348 | * The callback function has been loaded from an external library so we do not | |
349 | * have type information | |
350 | */ | |
351 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
352 | void |
353 | qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, | |
354 | uint64_t a3, uint64_t a4, uint64_t a5, | |
355 | uint64_t a6, uint64_t a7, uint64_t a8) | |
356 | { | |
357 | struct qemu_plugin_cb *cb, *next; | |
358 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; | |
359 | ||
360 | if (!test_bit(ev, cpu->plugin_mask)) { | |
361 | return; | |
362 | } | |
363 | ||
364 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
365 | qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; | |
366 | ||
367 | func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); | |
368 | } | |
369 | } | |
370 | ||
c905a368 DB |
371 | /* |
372 | * Disable CFI checks. | |
373 | * The callback function has been loaded from an external library so we do not | |
374 | * have type information | |
375 | */ | |
376 | QEMU_DISABLE_CFI | |
54cb65d8 EC |
377 | void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) |
378 | { | |
379 | struct qemu_plugin_cb *cb, *next; | |
380 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; | |
381 | ||
382 | if (!test_bit(ev, cpu->plugin_mask)) { | |
383 | return; | |
384 | } | |
385 | ||
386 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
387 | qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; | |
388 | ||
389 | func(cb->ctx->id, cpu->cpu_index, num, ret); | |
390 | } | |
391 | } | |
392 | ||
393 | void qemu_plugin_vcpu_idle_cb(CPUState *cpu) | |
394 | { | |
395 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); | |
396 | } | |
397 | ||
398 | void qemu_plugin_vcpu_resume_cb(CPUState *cpu) | |
399 | { | |
400 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); | |
401 | } | |
402 | ||
403 | void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, | |
404 | qemu_plugin_vcpu_simple_cb_t cb) | |
405 | { | |
406 | plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); | |
407 | } | |
408 | ||
409 | void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, | |
410 | qemu_plugin_vcpu_simple_cb_t cb) | |
411 | { | |
412 | plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); | |
413 | } | |
414 | ||
415 | void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, | |
416 | qemu_plugin_simple_cb_t cb) | |
417 | { | |
418 | plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); | |
419 | } | |
420 | ||
421 | static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) | |
422 | { | |
423 | g_array_free((GArray *) p, true); | |
424 | return true; | |
425 | } | |
426 | ||
427 | void qemu_plugin_flush_cb(void) | |
428 | { | |
429 | qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); | |
430 | qht_reset(&plugin.dyn_cb_arr_ht); | |
431 | ||
432 | plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); | |
433 | } | |
434 | ||
435 | void exec_inline_op(struct qemu_plugin_dyn_cb *cb) | |
436 | { | |
437 | uint64_t *val = cb->userp; | |
438 | ||
439 | switch (cb->inline_insn.op) { | |
440 | case QEMU_PLUGIN_INLINE_ADD_U64: | |
441 | *val += cb->inline_insn.imm; | |
442 | break; | |
443 | default: | |
444 | g_assert_not_reached(); | |
445 | } | |
446 | } | |
447 | ||
37aff087 RH |
448 | void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, |
449 | MemOpIdx oi, enum qemu_plugin_mem_rw rw) | |
54cb65d8 EC |
450 | { |
451 | GArray *arr = cpu->plugin_mem_cbs; | |
452 | size_t i; | |
453 | ||
454 | if (arr == NULL) { | |
455 | return; | |
456 | } | |
457 | for (i = 0; i < arr->len; i++) { | |
458 | struct qemu_plugin_dyn_cb *cb = | |
459 | &g_array_index(arr, struct qemu_plugin_dyn_cb, i); | |
54cb65d8 | 460 | |
37aff087 | 461 | if (!(rw & cb->rw)) { |
54cb65d8 EC |
462 | break; |
463 | } | |
464 | switch (cb->type) { | |
465 | case PLUGIN_CB_REGULAR: | |
37aff087 RH |
466 | cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw), |
467 | vaddr, cb->userp); | |
54cb65d8 EC |
468 | break; |
469 | case PLUGIN_CB_INLINE: | |
470 | exec_inline_op(cb); | |
471 | break; | |
472 | default: | |
473 | g_assert_not_reached(); | |
474 | } | |
475 | } | |
476 | } | |
477 | ||
478 | void qemu_plugin_atexit_cb(void) | |
479 | { | |
480 | plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); | |
481 | } | |
482 | ||
483 | void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, | |
484 | qemu_plugin_udata_cb_t cb, | |
485 | void *udata) | |
486 | { | |
487 | plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); | |
488 | } | |
489 | ||
f7e68c9c AB |
490 | /* |
491 | * Handle exit from linux-user. Unlike the normal atexit() mechanism | |
492 | * we need to handle the clean-up manually as it's possible threads | |
493 | * are still running. We need to remove all callbacks from code | |
494 | * generation, flush the current translations and then we can safely | |
495 | * trigger the exit callbacks. | |
496 | */ | |
497 | ||
498 | void qemu_plugin_user_exit(void) | |
499 | { | |
500 | enum qemu_plugin_event ev; | |
501 | CPUState *cpu; | |
502 | ||
2bbbc1be EC |
503 | /* |
504 | * Locking order: we must acquire locks in an order that is consistent | |
505 | * with the one in fork_start(). That is: | |
506 | * - start_exclusive(), which acquires qemu_cpu_list_lock, | |
507 | * must be called before acquiring plugin.lock. | |
508 | * - tb_flush(), which acquires mmap_lock(), must be called | |
509 | * while plugin.lock is not held. | |
510 | */ | |
f7e68c9c AB |
511 | start_exclusive(); |
512 | ||
2bbbc1be | 513 | qemu_rec_mutex_lock(&plugin.lock); |
f7e68c9c AB |
514 | /* un-register all callbacks except the final AT_EXIT one */ |
515 | for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { | |
516 | if (ev != QEMU_PLUGIN_EV_ATEXIT) { | |
f4554923 RH |
517 | struct qemu_plugin_cb *cb, *next; |
518 | ||
519 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
520 | plugin_unregister_cb__locked(cb->ctx, ev); | |
f7e68c9c AB |
521 | } |
522 | } | |
523 | } | |
f7e68c9c AB |
524 | CPU_FOREACH(cpu) { |
525 | qemu_plugin_disable_mem_helpers(cpu); | |
526 | } | |
2bbbc1be | 527 | qemu_rec_mutex_unlock(&plugin.lock); |
f7e68c9c | 528 | |
2bbbc1be | 529 | tb_flush(current_cpu); |
f7e68c9c AB |
530 | end_exclusive(); |
531 | ||
532 | /* now it's safe to handle the exit case */ | |
533 | qemu_plugin_atexit_cb(); | |
534 | } | |
535 | ||
f7e15aff AB |
536 | /* |
537 | * Helpers for *-user to ensure locks are sane across fork() events. | |
538 | */ | |
539 | ||
540 | void qemu_plugin_user_prefork_lock(void) | |
541 | { | |
542 | qemu_rec_mutex_lock(&plugin.lock); | |
543 | } | |
544 | ||
545 | void qemu_plugin_user_postfork(bool is_child) | |
546 | { | |
547 | if (is_child) { | |
548 | /* should we just reset via plugin_init? */ | |
549 | qemu_rec_mutex_init(&plugin.lock); | |
550 | } else { | |
551 | qemu_rec_mutex_unlock(&plugin.lock); | |
552 | } | |
553 | } | |
554 | ||
54cb65d8 EC |
555 | static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) |
556 | { | |
557 | return ap == bp; | |
558 | } | |
559 | ||
560 | static void __attribute__((__constructor__)) plugin_init(void) | |
561 | { | |
562 | int i; | |
563 | ||
564 | for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { | |
565 | QLIST_INIT(&plugin.cb_lists[i]); | |
566 | } | |
567 | qemu_rec_mutex_init(&plugin.lock); | |
568 | plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); | |
569 | plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); | |
570 | QTAILQ_INIT(&plugin.ctxs); | |
571 | qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, | |
572 | QHT_MODE_AUTO_RESIZE); | |
573 | atexit(qemu_plugin_atexit_cb); | |
574 | } |