]> git.proxmox.com Git - mirror_qemu.git/blame - monitor/monitor.c
hw/xen: Create initial XenStore nodes
[mirror_qemu.git] / monitor / monitor.c
CommitLineData
1d95db74
KW
1/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "monitor-internal.h"
27#include "qapi/error.h"
f2098725 28#include "qapi/opts-visitor.h"
1d95db74 29#include "qapi/qapi-emit-events.h"
f2098725 30#include "qapi/qapi-visit-control.h"
1d95db74 31#include "qapi/qmp/qdict.h"
1d95db74
KW
32#include "qemu/error-report.h"
33#include "qemu/option.h"
34#include "sysemu/qtest.h"
35#include "trace.h"
36
37/*
38 * To prevent flooding clients, events can be throttled. The
39 * throttling is calculated globally, rather than per-Monitor
40 * instance.
41 */
42typedef struct MonitorQAPIEventState {
43 QAPIEvent event; /* Throttling state for this event type and... */
44 QDict *data; /* ... data, see qapi_event_throttle_equal() */
45 QEMUTimer *timer; /* Timer for handling delayed events */
46 QDict *qdict; /* Delayed event (if any) */
47} MonitorQAPIEventState;
48
49typedef struct {
50 int64_t rate; /* Minimum time (in ns) between two events */
51} MonitorQAPIEventConf;
52
53/* Shared monitor I/O thread */
54IOThread *mon_iothread;
55
9ce44e2c
KW
56/* Coroutine to dispatch the requests received from I/O thread */
57Coroutine *qmp_dispatcher_co;
58
59/* Set to true when the dispatcher coroutine should terminate */
60bool qmp_dispatcher_co_shutdown;
61
62/*
63 * qmp_dispatcher_co_busy is used for synchronisation between the
64 * monitor thread and the main thread to ensure that the dispatcher
65 * coroutine never gets scheduled a second time when it's already
66 * scheduled (scheduling the same coroutine twice is forbidden).
67 *
68 * It is true if the coroutine is active and processing requests.
69 * Additional requests may then be pushed onto mon->qmp_requests,
70 * and @qmp_dispatcher_co_shutdown may be set without further ado.
71 * @qmp_dispatcher_co_busy must not be woken up in this case.
72 *
73 * If false, you also have to set @qmp_dispatcher_co_busy to true and
74 * wake up @qmp_dispatcher_co after pushing the new requests.
75 *
76 * The coroutine will automatically change this variable back to false
77 * before it yields. Nobody else may set the variable to false.
78 *
79 * Access must be atomic for thread safety.
80 */
81bool qmp_dispatcher_co_busy;
1d95db74 82
e69ee454
KW
83/*
84 * Protects mon_list, monitor_qapi_event_state, coroutine_mon,
85 * monitor_destroyed.
86 */
1d95db74
KW
87QemuMutex monitor_lock;
88static GHashTable *monitor_qapi_event_state;
e69ee454 89static GHashTable *coroutine_mon; /* Maps Coroutine* to Monitor* */
1d95db74
KW
90
91MonitorList mon_list;
92int mon_refcount;
93static bool monitor_destroyed;
94
947e4744
KW
95Monitor *monitor_cur(void)
96{
e69ee454
KW
97 Monitor *mon;
98
99 qemu_mutex_lock(&monitor_lock);
100 mon = g_hash_table_lookup(coroutine_mon, qemu_coroutine_self());
101 qemu_mutex_unlock(&monitor_lock);
102
103 return mon;
947e4744
KW
104}
105
106/**
107 * Sets a new current monitor and returns the old one.
e69ee454
KW
108 *
109 * If a non-NULL monitor is set for a coroutine, another call
110 * resetting it to NULL is required before the coroutine terminates,
111 * otherwise a stale entry would remain in the hash table.
947e4744 112 */
e69ee454 113Monitor *monitor_set_cur(Coroutine *co, Monitor *mon)
947e4744 114{
e69ee454
KW
115 Monitor *old_monitor = monitor_cur();
116
117 qemu_mutex_lock(&monitor_lock);
118 if (mon) {
119 g_hash_table_replace(coroutine_mon, co, mon);
120 } else {
121 g_hash_table_remove(coroutine_mon, co);
122 }
123 qemu_mutex_unlock(&monitor_lock);
947e4744 124
947e4744
KW
125 return old_monitor;
126}
1d95db74
KW
127
128/**
129 * Is the current monitor, if any, a QMP monitor?
130 */
131bool monitor_cur_is_qmp(void)
132{
947e4744
KW
133 Monitor *cur_mon = monitor_cur();
134
1d95db74
KW
135 return cur_mon && monitor_is_qmp(cur_mon);
136}
137
138/**
139 * Is @mon is using readline?
140 * Note: not all HMP monitors use readline, e.g., gdbserver has a
141 * non-interactive HMP monitor, so readline is not used there.
142 */
92082416 143static inline bool monitor_uses_readline(const MonitorHMP *mon)
1d95db74 144{
92082416 145 return mon->use_readline;
1d95db74
KW
146}
147
148static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
149{
92082416
KW
150 if (monitor_is_qmp(mon)) {
151 return false;
152 }
153
154 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
1d95db74
KW
155}
156
157static void monitor_flush_locked(Monitor *mon);
158
bf7b1eab 159static gboolean monitor_unblocked(void *do_not_use, GIOCondition cond,
1d95db74
KW
160 void *opaque)
161{
162 Monitor *mon = opaque;
163
164 qemu_mutex_lock(&mon->mon_lock);
165 mon->out_watch = 0;
166 monitor_flush_locked(mon);
167 qemu_mutex_unlock(&mon->mon_lock);
168 return FALSE;
169}
170
171/* Caller must hold mon->mon_lock */
172static void monitor_flush_locked(Monitor *mon)
173{
174 int rc;
175 size_t len;
176 const char *buf;
177
178 if (mon->skip_flush) {
179 return;
180 }
181
20076f4a
MA
182 buf = mon->outbuf->str;
183 len = mon->outbuf->len;
1d95db74
KW
184
185 if (len && !mon->mux_out) {
186 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
187 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
188 /* all flushed or error */
20076f4a 189 g_string_truncate(mon->outbuf, 0);
1d95db74
KW
190 return;
191 }
192 if (rc > 0) {
193 /* partial write */
20076f4a 194 g_string_erase(mon->outbuf, 0, rc);
1d95db74
KW
195 }
196 if (mon->out_watch == 0) {
197 mon->out_watch =
198 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
199 monitor_unblocked, mon);
200 }
201 }
202}
203
204void monitor_flush(Monitor *mon)
205{
206 qemu_mutex_lock(&mon->mon_lock);
207 monitor_flush_locked(mon);
208 qemu_mutex_unlock(&mon->mon_lock);
209}
210
211/* flush at every end of line */
212int monitor_puts(Monitor *mon, const char *str)
213{
214 int i;
215 char c;
216
217 qemu_mutex_lock(&mon->mon_lock);
218 for (i = 0; str[i]; i++) {
219 c = str[i];
220 if (c == '\n') {
20076f4a 221 g_string_append_c(mon->outbuf, '\r');
1d95db74 222 }
20076f4a 223 g_string_append_c(mon->outbuf, c);
1d95db74
KW
224 if (c == '\n') {
225 monitor_flush_locked(mon);
226 }
227 }
228 qemu_mutex_unlock(&mon->mon_lock);
229
230 return i;
231}
232
233int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
234{
235 char *buf;
236 int n;
237
238 if (!mon) {
239 return -1;
240 }
241
242 if (monitor_is_qmp(mon)) {
243 return -1;
244 }
245
246 buf = g_strdup_vprintf(fmt, ap);
247 n = monitor_puts(mon, buf);
248 g_free(buf);
249 return n;
250}
251
252int monitor_printf(Monitor *mon, const char *fmt, ...)
253{
254 int ret;
255
256 va_list ap;
257 va_start(ap, fmt);
258 ret = monitor_vprintf(mon, fmt, ap);
259 va_end(ap);
260 return ret;
261}
262
dd00d7fa
MA
263void monitor_printc(Monitor *mon, int c)
264{
265 monitor_printf(mon, "'");
266 switch(c) {
267 case '\'':
268 monitor_printf(mon, "\\'");
269 break;
270 case '\\':
271 monitor_printf(mon, "\\\\");
272 break;
273 case '\n':
274 monitor_printf(mon, "\\n");
275 break;
276 case '\r':
277 monitor_printf(mon, "\\r");
278 break;
279 default:
280 if (c >= 32 && c <= 126) {
281 monitor_printf(mon, "%c", c);
282 } else {
283 monitor_printf(mon, "\\x%02x", c);
284 }
285 break;
286 }
287 monitor_printf(mon, "'");
288}
289
1d95db74
KW
290/*
291 * Print to current monitor if we have one, else to stderr.
292 */
293int error_vprintf(const char *fmt, va_list ap)
294{
947e4744
KW
295 Monitor *cur_mon = monitor_cur();
296
1d95db74
KW
297 if (cur_mon && !monitor_cur_is_qmp()) {
298 return monitor_vprintf(cur_mon, fmt, ap);
299 }
300 return vfprintf(stderr, fmt, ap);
301}
302
303int error_vprintf_unless_qmp(const char *fmt, va_list ap)
304{
947e4744
KW
305 Monitor *cur_mon = monitor_cur();
306
1d95db74
KW
307 if (!cur_mon) {
308 return vfprintf(stderr, fmt, ap);
309 }
310 if (!monitor_cur_is_qmp()) {
311 return monitor_vprintf(cur_mon, fmt, ap);
312 }
313 return -1;
314}
315
756a98dd
MAL
316int error_printf_unless_qmp(const char *fmt, ...)
317{
318 va_list ap;
319 int ret;
320
321 va_start(ap, fmt);
322 ret = error_vprintf_unless_qmp(fmt, ap);
323 va_end(ap);
324 return ret;
325}
1d95db74
KW
326
327static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
328 /* Limit guest-triggerable events to 1 per second */
329 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
330 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
331 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
332 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
333 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
334 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
722a3c78 335 [QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE] = { 1000 * SCALE_MS },
1d95db74
KW
336};
337
338/*
339 * Return the clock to use for recording an event's time.
340 * It's QEMU_CLOCK_REALTIME, except for qtests it's
341 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
342 * Beware: result is invalid before configure_accelerator().
343 */
344static inline QEMUClockType monitor_get_event_clock(void)
345{
346 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
347}
348
349/*
350 * Broadcast an event to all monitors.
351 * @qdict is the event object. Its member "event" must match @event.
352 * Caller must hold monitor_lock.
353 */
354static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
355{
356 Monitor *mon;
357 MonitorQMP *qmp_mon;
358
359 trace_monitor_protocol_event_emit(event, qdict);
360 QTAILQ_FOREACH(mon, &mon_list, entry) {
361 if (!monitor_is_qmp(mon)) {
362 continue;
363 }
364
365 qmp_mon = container_of(mon, MonitorQMP, common);
366 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
367 qmp_send_response(qmp_mon, qdict);
368 }
369 }
370}
371
372static void monitor_qapi_event_handler(void *opaque);
373
374/*
375 * Queue a new event for emission to Monitor instances,
376 * applying any rate limiting if required.
377 */
378static void
379monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
380{
381 MonitorQAPIEventConf *evconf;
382 MonitorQAPIEventState *evstate;
383
384 assert(event < QAPI_EVENT__MAX);
385 evconf = &monitor_qapi_event_conf[event];
386 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
387
a8e2ab5d 388 QEMU_LOCK_GUARD(&monitor_lock);
1d95db74
KW
389
390 if (!evconf->rate) {
391 /* Unthrottled event */
392 monitor_qapi_event_emit(event, qdict);
393 } else {
394 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
395 MonitorQAPIEventState key = { .event = event, .data = data };
396
397 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
398 assert(!evstate || timer_pending(evstate->timer));
399
400 if (evstate) {
401 /*
402 * Timer is pending for (at least) evconf->rate ns after
403 * last send. Store event for sending when timer fires,
404 * replacing a prior stored event if any.
405 */
406 qobject_unref(evstate->qdict);
407 evstate->qdict = qobject_ref(qdict);
408 } else {
409 /*
410 * Last send was (at least) evconf->rate ns ago.
411 * Send immediately, and arm the timer to call
412 * monitor_qapi_event_handler() in evconf->rate ns. Any
413 * events arriving before then will be delayed until then.
414 */
415 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
416
417 monitor_qapi_event_emit(event, qdict);
418
419 evstate = g_new(MonitorQAPIEventState, 1);
420 evstate->event = event;
421 evstate->data = qobject_ref(data);
422 evstate->qdict = NULL;
423 evstate->timer = timer_new_ns(monitor_get_event_clock(),
424 monitor_qapi_event_handler,
425 evstate);
426 g_hash_table_add(monitor_qapi_event_state, evstate);
427 timer_mod_ns(evstate->timer, now + evconf->rate);
428 }
429 }
1d95db74
KW
430}
431
432void qapi_event_emit(QAPIEvent event, QDict *qdict)
433{
434 /*
435 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
436 * would deadlock on monitor_lock. Work around by queueing
437 * events in thread-local storage.
438 * TODO: remove this, make it re-enter safe.
439 */
440 typedef struct MonitorQapiEvent {
441 QAPIEvent event;
442 QDict *qdict;
443 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
444 } MonitorQapiEvent;
445 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
446 static __thread bool reentered;
447 MonitorQapiEvent *ev;
448
449 if (!reentered) {
450 QSIMPLEQ_INIT(&event_queue);
451 }
452
453 ev = g_new(MonitorQapiEvent, 1);
454 ev->qdict = qobject_ref(qdict);
455 ev->event = event;
456 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
457 if (reentered) {
458 return;
459 }
460
461 reentered = true;
462
463 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
464 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
465 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
466 qobject_unref(ev->qdict);
467 g_free(ev);
468 }
469
470 reentered = false;
471}
472
473/*
474 * This function runs evconf->rate ns after sending a throttled
475 * event.
476 * If another event has since been stored, send it.
477 */
478static void monitor_qapi_event_handler(void *opaque)
479{
480 MonitorQAPIEventState *evstate = opaque;
481 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
482
483 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
a8e2ab5d 484 QEMU_LOCK_GUARD(&monitor_lock);
1d95db74
KW
485
486 if (evstate->qdict) {
487 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
488
489 monitor_qapi_event_emit(evstate->event, evstate->qdict);
490 qobject_unref(evstate->qdict);
491 evstate->qdict = NULL;
492 timer_mod_ns(evstate->timer, now + evconf->rate);
493 } else {
494 g_hash_table_remove(monitor_qapi_event_state, evstate);
495 qobject_unref(evstate->data);
496 timer_free(evstate->timer);
497 g_free(evstate);
498 }
1d95db74
KW
499}
500
501static unsigned int qapi_event_throttle_hash(const void *key)
502{
503 const MonitorQAPIEventState *evstate = key;
504 unsigned int hash = evstate->event * 255;
505
506 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
507 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
508 }
509
510 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
511 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
512 }
513
77ae2302
DH
514 if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
515 hash += g_str_hash(qdict_get_str(evstate->data, "qom-path"));
516 }
517
1d95db74
KW
518 return hash;
519}
520
521static gboolean qapi_event_throttle_equal(const void *a, const void *b)
522{
523 const MonitorQAPIEventState *eva = a;
524 const MonitorQAPIEventState *evb = b;
525
526 if (eva->event != evb->event) {
527 return FALSE;
528 }
529
530 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
531 return !strcmp(qdict_get_str(eva->data, "id"),
532 qdict_get_str(evb->data, "id"));
533 }
534
535 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
536 return !strcmp(qdict_get_str(eva->data, "node-name"),
537 qdict_get_str(evb->data, "node-name"));
538 }
539
77ae2302
DH
540 if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) {
541 return !strcmp(qdict_get_str(eva->data, "qom-path"),
542 qdict_get_str(evb->data, "qom-path"));
543 }
544
1d95db74
KW
545 return TRUE;
546}
547
548int monitor_suspend(Monitor *mon)
549{
550 if (monitor_is_hmp_non_interactive(mon)) {
551 return -ENOTTY;
552 }
553
d73415a3 554 qatomic_inc(&mon->suspend_cnt);
1d95db74
KW
555
556 if (mon->use_io_thread) {
557 /*
558 * Kick I/O thread to make sure this takes effect. It'll be
559 * evaluated again in prepare() of the watch object.
560 */
561 aio_notify(iothread_get_aio_context(mon_iothread));
562 }
563
564 trace_monitor_suspend(mon, 1);
565 return 0;
566}
567
568static void monitor_accept_input(void *opaque)
569{
570 Monitor *mon = opaque;
571
572 qemu_chr_fe_accept_input(&mon->chr);
573}
574
575void monitor_resume(Monitor *mon)
576{
577 if (monitor_is_hmp_non_interactive(mon)) {
578 return;
579 }
580
d73415a3 581 if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) {
1d95db74
KW
582 AioContext *ctx;
583
584 if (mon->use_io_thread) {
585 ctx = iothread_get_aio_context(mon_iothread);
586 } else {
587 ctx = qemu_get_aio_context();
588 }
589
590 if (!monitor_is_qmp(mon)) {
591 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
592 assert(hmp_mon->rs);
593 readline_show_prompt(hmp_mon->rs);
594 }
595
596 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
597 }
598
599 trace_monitor_suspend(mon, -1);
600}
601
602int monitor_can_read(void *opaque)
603{
604 Monitor *mon = opaque;
605
d73415a3 606 return !qatomic_mb_read(&mon->suspend_cnt);
1d95db74
KW
607}
608
609void monitor_list_append(Monitor *mon)
610{
611 qemu_mutex_lock(&monitor_lock);
612 /*
613 * This prevents inserting new monitors during monitor_cleanup().
614 * A cleaner solution would involve the main thread telling other
615 * threads to terminate, waiting for their termination.
616 */
617 if (!monitor_destroyed) {
618 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
619 mon = NULL;
620 }
621 qemu_mutex_unlock(&monitor_lock);
622
623 if (mon) {
624 monitor_data_destroy(mon);
625 g_free(mon);
626 }
627}
628
629static void monitor_iothread_init(void)
630{
631 mon_iothread = iothread_create("mon_iothread", &error_abort);
632}
633
92082416 634void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
1d95db74
KW
635 bool use_io_thread)
636{
637 if (use_io_thread && !mon_iothread) {
638 monitor_iothread_init();
639 }
640 qemu_mutex_init(&mon->mon_lock);
92082416 641 mon->is_qmp = is_qmp;
20076f4a 642 mon->outbuf = g_string_new(NULL);
1d95db74
KW
643 mon->skip_flush = skip_flush;
644 mon->use_io_thread = use_io_thread;
1d95db74
KW
645}
646
647void monitor_data_destroy(Monitor *mon)
648{
649 g_free(mon->mon_cpu_path);
650 qemu_chr_fe_deinit(&mon->chr, false);
651 if (monitor_is_qmp(mon)) {
652 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
653 } else {
654 readline_free(container_of(mon, MonitorHMP, common)->rs);
655 }
20076f4a 656 g_string_free(mon->outbuf, true);
1d95db74
KW
657 qemu_mutex_destroy(&mon->mon_lock);
658}
659
1d95db74
KW
660void monitor_cleanup(void)
661{
9ce44e2c 662 /*
357bda95
KW
663 * The dispatcher needs to stop before destroying the monitor and
664 * the I/O thread.
9ce44e2c
KW
665 *
666 * We need to poll both qemu_aio_context and iohandler_ctx to make
667 * sure that the dispatcher coroutine keeps making progress and
668 * eventually terminates. qemu_aio_context is automatically
669 * polled by calling AIO_WAIT_WHILE on it, but we must poll
670 * iohandler_ctx manually.
c81219a7
KW
671 *
672 * Letting the iothread continue while shutting down the dispatcher
673 * means that new requests may still be coming in. This is okay,
674 * we'll just leave them in the queue without sending a response
675 * and monitor_data_destroy() will free them.
9ce44e2c
KW
676 */
677 qmp_dispatcher_co_shutdown = true;
678 if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
679 aio_co_wake(qmp_dispatcher_co);
680 }
681
682 AIO_WAIT_WHILE(qemu_get_aio_context(),
683 (aio_poll(iohandler_get_aio_context(), false),
684 qatomic_mb_read(&qmp_dispatcher_co_busy)));
685
c81219a7
KW
686 /*
687 * We need to explicitly stop the I/O thread (but not destroy it),
688 * clean up the monitor resources, then destroy the I/O thread since
689 * we need to unregister from chardev below in
690 * monitor_data_destroy(), and chardev is not thread-safe yet
691 */
692 if (mon_iothread) {
693 iothread_stop(mon_iothread);
694 }
695
357bda95
KW
696 /* Flush output buffers and destroy monitors */
697 qemu_mutex_lock(&monitor_lock);
698 monitor_destroyed = true;
699 while (!QTAILQ_EMPTY(&mon_list)) {
700 Monitor *mon = QTAILQ_FIRST(&mon_list);
701 QTAILQ_REMOVE(&mon_list, mon, entry);
702 /* Permit QAPI event emission from character frontend release */
703 qemu_mutex_unlock(&monitor_lock);
704 monitor_flush(mon);
705 monitor_data_destroy(mon);
706 qemu_mutex_lock(&monitor_lock);
707 g_free(mon);
708 }
709 qemu_mutex_unlock(&monitor_lock);
710
1d95db74
KW
711 if (mon_iothread) {
712 iothread_destroy(mon_iothread);
713 mon_iothread = NULL;
714 }
715}
716
717static void monitor_qapi_event_init(void)
718{
719 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
720 qapi_event_throttle_equal);
721}
722
9d2b5f2c 723void monitor_init_globals(void)
1d95db74
KW
724{
725 monitor_qapi_event_init();
726 qemu_mutex_init(&monitor_lock);
e69ee454 727 coroutine_mon = g_hash_table_new(NULL, NULL);
1d95db74
KW
728
729 /*
730 * The dispatcher BH must run in the main loop thread, since we
731 * have commands assuming that context. It would be nice to get
732 * rid of those assumptions.
733 */
9ce44e2c
KW
734 qmp_dispatcher_co = qemu_coroutine_create(monitor_qmp_dispatcher_co, NULL);
735 qatomic_mb_set(&qmp_dispatcher_co_busy, true);
736 aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
1d95db74
KW
737}
738
a2f411c4 739int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp)
c3e95551 740{
50707b39 741 ERRP_GUARD();
c3e95551 742 Chardev *chr;
f2098725
KW
743
744 chr = qemu_chr_find(opts->chardev);
745 if (chr == NULL) {
746 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
c3e95551
KW
747 return -1;
748 }
749
a2f411c4
KW
750 if (!opts->has_mode) {
751 opts->mode = allow_hmp ? MONITOR_MODE_READLINE : MONITOR_MODE_CONTROL;
752 }
753
f2098725
KW
754 switch (opts->mode) {
755 case MONITOR_MODE_CONTROL:
50707b39 756 monitor_init_qmp(chr, opts->pretty, errp);
f2098725
KW
757 break;
758 case MONITOR_MODE_READLINE:
a2f411c4
KW
759 if (!allow_hmp) {
760 error_setg(errp, "Only QMP is supported");
761 return -1;
762 }
f2098725 763 if (opts->pretty) {
283d845c
DB
764 error_setg(errp, "'pretty' is not compatible with HMP monitors");
765 return -1;
f2098725 766 }
50707b39 767 monitor_init_hmp(chr, true, errp);
f2098725
KW
768 break;
769 default:
770 g_assert_not_reached();
c3e95551
KW
771 }
772
50707b39 773 return *errp ? -1 : 0;
f2098725
KW
774}
775
776int monitor_init_opts(QemuOpts *opts, Error **errp)
777{
778 Visitor *v;
779 MonitorOptions *options;
b11a093c 780 int ret;
f2098725
KW
781
782 v = opts_visitor_new(opts);
b11a093c 783 visit_type_MonitorOptions(v, NULL, &options, errp);
f2098725 784 visit_free(v);
b11a093c
MA
785 if (!options) {
786 return -1;
c3e95551
KW
787 }
788
b11a093c 789 ret = monitor_init(options, true, errp);
f2098725 790 qapi_free_MonitorOptions(options);
b11a093c 791 return ret;
c3e95551
KW
792}
793
1d95db74
KW
794QemuOptsList qemu_mon_opts = {
795 .name = "mon",
796 .implied_opt_name = "chardev",
797 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
798 .desc = {
799 {
800 .name = "mode",
801 .type = QEMU_OPT_STRING,
802 },{
803 .name = "chardev",
804 .type = QEMU_OPT_STRING,
805 },{
806 .name = "pretty",
807 .type = QEMU_OPT_BOOL,
808 },
809 { /* end of list */ }
810 },
811};