]> git.proxmox.com Git - mirror_qemu.git/blame - monitor/monitor.c
i386: Update new x86_apicid parsing rules with die_offset support
[mirror_qemu.git] / monitor / monitor.c
CommitLineData
1d95db74
KW
1/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "monitor-internal.h"
27#include "qapi/error.h"
28#include "qapi/qapi-emit-events.h"
29#include "qapi/qmp/qdict.h"
30#include "qapi/qmp/qstring.h"
31#include "qemu/error-report.h"
32#include "qemu/option.h"
33#include "sysemu/qtest.h"
34#include "trace.h"
35
36/*
37 * To prevent flooding clients, events can be throttled. The
38 * throttling is calculated globally, rather than per-Monitor
39 * instance.
40 */
41typedef struct MonitorQAPIEventState {
42 QAPIEvent event; /* Throttling state for this event type and... */
43 QDict *data; /* ... data, see qapi_event_throttle_equal() */
44 QEMUTimer *timer; /* Timer for handling delayed events */
45 QDict *qdict; /* Delayed event (if any) */
46} MonitorQAPIEventState;
47
48typedef struct {
49 int64_t rate; /* Minimum time (in ns) between two events */
50} MonitorQAPIEventConf;
51
52/* Shared monitor I/O thread */
53IOThread *mon_iothread;
54
55/* Bottom half to dispatch the requests received from I/O thread */
56QEMUBH *qmp_dispatcher_bh;
57
58/* Protects mon_list, monitor_qapi_event_state, monitor_destroyed. */
59QemuMutex monitor_lock;
60static GHashTable *monitor_qapi_event_state;
61
62MonitorList mon_list;
63int mon_refcount;
64static bool monitor_destroyed;
65
66__thread Monitor *cur_mon;
67
68/**
69 * Is the current monitor, if any, a QMP monitor?
70 */
71bool monitor_cur_is_qmp(void)
72{
73 return cur_mon && monitor_is_qmp(cur_mon);
74}
75
76/**
77 * Is @mon is using readline?
78 * Note: not all HMP monitors use readline, e.g., gdbserver has a
79 * non-interactive HMP monitor, so readline is not used there.
80 */
92082416 81static inline bool monitor_uses_readline(const MonitorHMP *mon)
1d95db74 82{
92082416 83 return mon->use_readline;
1d95db74
KW
84}
85
86static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
87{
92082416
KW
88 if (monitor_is_qmp(mon)) {
89 return false;
90 }
91
92 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
1d95db74
KW
93}
94
95static void monitor_flush_locked(Monitor *mon);
96
97static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
98 void *opaque)
99{
100 Monitor *mon = opaque;
101
102 qemu_mutex_lock(&mon->mon_lock);
103 mon->out_watch = 0;
104 monitor_flush_locked(mon);
105 qemu_mutex_unlock(&mon->mon_lock);
106 return FALSE;
107}
108
109/* Caller must hold mon->mon_lock */
110static void monitor_flush_locked(Monitor *mon)
111{
112 int rc;
113 size_t len;
114 const char *buf;
115
116 if (mon->skip_flush) {
117 return;
118 }
119
120 buf = qstring_get_str(mon->outbuf);
121 len = qstring_get_length(mon->outbuf);
122
123 if (len && !mon->mux_out) {
124 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
125 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
126 /* all flushed or error */
127 qobject_unref(mon->outbuf);
128 mon->outbuf = qstring_new();
129 return;
130 }
131 if (rc > 0) {
132 /* partial write */
133 QString *tmp = qstring_from_str(buf + rc);
134 qobject_unref(mon->outbuf);
135 mon->outbuf = tmp;
136 }
137 if (mon->out_watch == 0) {
138 mon->out_watch =
139 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
140 monitor_unblocked, mon);
141 }
142 }
143}
144
145void monitor_flush(Monitor *mon)
146{
147 qemu_mutex_lock(&mon->mon_lock);
148 monitor_flush_locked(mon);
149 qemu_mutex_unlock(&mon->mon_lock);
150}
151
152/* flush at every end of line */
153int monitor_puts(Monitor *mon, const char *str)
154{
155 int i;
156 char c;
157
158 qemu_mutex_lock(&mon->mon_lock);
159 for (i = 0; str[i]; i++) {
160 c = str[i];
161 if (c == '\n') {
162 qstring_append_chr(mon->outbuf, '\r');
163 }
164 qstring_append_chr(mon->outbuf, c);
165 if (c == '\n') {
166 monitor_flush_locked(mon);
167 }
168 }
169 qemu_mutex_unlock(&mon->mon_lock);
170
171 return i;
172}
173
174int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
175{
176 char *buf;
177 int n;
178
179 if (!mon) {
180 return -1;
181 }
182
183 if (monitor_is_qmp(mon)) {
184 return -1;
185 }
186
187 buf = g_strdup_vprintf(fmt, ap);
188 n = monitor_puts(mon, buf);
189 g_free(buf);
190 return n;
191}
192
193int monitor_printf(Monitor *mon, const char *fmt, ...)
194{
195 int ret;
196
197 va_list ap;
198 va_start(ap, fmt);
199 ret = monitor_vprintf(mon, fmt, ap);
200 va_end(ap);
201 return ret;
202}
203
204/*
205 * Print to current monitor if we have one, else to stderr.
206 */
207int error_vprintf(const char *fmt, va_list ap)
208{
209 if (cur_mon && !monitor_cur_is_qmp()) {
210 return monitor_vprintf(cur_mon, fmt, ap);
211 }
212 return vfprintf(stderr, fmt, ap);
213}
214
215int error_vprintf_unless_qmp(const char *fmt, va_list ap)
216{
217 if (!cur_mon) {
218 return vfprintf(stderr, fmt, ap);
219 }
220 if (!monitor_cur_is_qmp()) {
221 return monitor_vprintf(cur_mon, fmt, ap);
222 }
223 return -1;
224}
225
226
227static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
228 /* Limit guest-triggerable events to 1 per second */
229 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
230 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
231 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
232 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
233 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
234 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
235};
236
237/*
238 * Return the clock to use for recording an event's time.
239 * It's QEMU_CLOCK_REALTIME, except for qtests it's
240 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
241 * Beware: result is invalid before configure_accelerator().
242 */
243static inline QEMUClockType monitor_get_event_clock(void)
244{
245 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
246}
247
248/*
249 * Broadcast an event to all monitors.
250 * @qdict is the event object. Its member "event" must match @event.
251 * Caller must hold monitor_lock.
252 */
253static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
254{
255 Monitor *mon;
256 MonitorQMP *qmp_mon;
257
258 trace_monitor_protocol_event_emit(event, qdict);
259 QTAILQ_FOREACH(mon, &mon_list, entry) {
260 if (!monitor_is_qmp(mon)) {
261 continue;
262 }
263
264 qmp_mon = container_of(mon, MonitorQMP, common);
265 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
266 qmp_send_response(qmp_mon, qdict);
267 }
268 }
269}
270
271static void monitor_qapi_event_handler(void *opaque);
272
273/*
274 * Queue a new event for emission to Monitor instances,
275 * applying any rate limiting if required.
276 */
277static void
278monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
279{
280 MonitorQAPIEventConf *evconf;
281 MonitorQAPIEventState *evstate;
282
283 assert(event < QAPI_EVENT__MAX);
284 evconf = &monitor_qapi_event_conf[event];
285 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
286
287 qemu_mutex_lock(&monitor_lock);
288
289 if (!evconf->rate) {
290 /* Unthrottled event */
291 monitor_qapi_event_emit(event, qdict);
292 } else {
293 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
294 MonitorQAPIEventState key = { .event = event, .data = data };
295
296 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
297 assert(!evstate || timer_pending(evstate->timer));
298
299 if (evstate) {
300 /*
301 * Timer is pending for (at least) evconf->rate ns after
302 * last send. Store event for sending when timer fires,
303 * replacing a prior stored event if any.
304 */
305 qobject_unref(evstate->qdict);
306 evstate->qdict = qobject_ref(qdict);
307 } else {
308 /*
309 * Last send was (at least) evconf->rate ns ago.
310 * Send immediately, and arm the timer to call
311 * monitor_qapi_event_handler() in evconf->rate ns. Any
312 * events arriving before then will be delayed until then.
313 */
314 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
315
316 monitor_qapi_event_emit(event, qdict);
317
318 evstate = g_new(MonitorQAPIEventState, 1);
319 evstate->event = event;
320 evstate->data = qobject_ref(data);
321 evstate->qdict = NULL;
322 evstate->timer = timer_new_ns(monitor_get_event_clock(),
323 monitor_qapi_event_handler,
324 evstate);
325 g_hash_table_add(monitor_qapi_event_state, evstate);
326 timer_mod_ns(evstate->timer, now + evconf->rate);
327 }
328 }
329
330 qemu_mutex_unlock(&monitor_lock);
331}
332
333void qapi_event_emit(QAPIEvent event, QDict *qdict)
334{
335 /*
336 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
337 * would deadlock on monitor_lock. Work around by queueing
338 * events in thread-local storage.
339 * TODO: remove this, make it re-enter safe.
340 */
341 typedef struct MonitorQapiEvent {
342 QAPIEvent event;
343 QDict *qdict;
344 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
345 } MonitorQapiEvent;
346 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
347 static __thread bool reentered;
348 MonitorQapiEvent *ev;
349
350 if (!reentered) {
351 QSIMPLEQ_INIT(&event_queue);
352 }
353
354 ev = g_new(MonitorQapiEvent, 1);
355 ev->qdict = qobject_ref(qdict);
356 ev->event = event;
357 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
358 if (reentered) {
359 return;
360 }
361
362 reentered = true;
363
364 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
365 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
366 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
367 qobject_unref(ev->qdict);
368 g_free(ev);
369 }
370
371 reentered = false;
372}
373
374/*
375 * This function runs evconf->rate ns after sending a throttled
376 * event.
377 * If another event has since been stored, send it.
378 */
379static void monitor_qapi_event_handler(void *opaque)
380{
381 MonitorQAPIEventState *evstate = opaque;
382 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
383
384 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
385 qemu_mutex_lock(&monitor_lock);
386
387 if (evstate->qdict) {
388 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
389
390 monitor_qapi_event_emit(evstate->event, evstate->qdict);
391 qobject_unref(evstate->qdict);
392 evstate->qdict = NULL;
393 timer_mod_ns(evstate->timer, now + evconf->rate);
394 } else {
395 g_hash_table_remove(monitor_qapi_event_state, evstate);
396 qobject_unref(evstate->data);
397 timer_free(evstate->timer);
398 g_free(evstate);
399 }
400
401 qemu_mutex_unlock(&monitor_lock);
402}
403
404static unsigned int qapi_event_throttle_hash(const void *key)
405{
406 const MonitorQAPIEventState *evstate = key;
407 unsigned int hash = evstate->event * 255;
408
409 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
410 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
411 }
412
413 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
414 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
415 }
416
417 return hash;
418}
419
420static gboolean qapi_event_throttle_equal(const void *a, const void *b)
421{
422 const MonitorQAPIEventState *eva = a;
423 const MonitorQAPIEventState *evb = b;
424
425 if (eva->event != evb->event) {
426 return FALSE;
427 }
428
429 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
430 return !strcmp(qdict_get_str(eva->data, "id"),
431 qdict_get_str(evb->data, "id"));
432 }
433
434 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
435 return !strcmp(qdict_get_str(eva->data, "node-name"),
436 qdict_get_str(evb->data, "node-name"));
437 }
438
439 return TRUE;
440}
441
442int monitor_suspend(Monitor *mon)
443{
444 if (monitor_is_hmp_non_interactive(mon)) {
445 return -ENOTTY;
446 }
447
448 atomic_inc(&mon->suspend_cnt);
449
450 if (mon->use_io_thread) {
451 /*
452 * Kick I/O thread to make sure this takes effect. It'll be
453 * evaluated again in prepare() of the watch object.
454 */
455 aio_notify(iothread_get_aio_context(mon_iothread));
456 }
457
458 trace_monitor_suspend(mon, 1);
459 return 0;
460}
461
462static void monitor_accept_input(void *opaque)
463{
464 Monitor *mon = opaque;
465
466 qemu_chr_fe_accept_input(&mon->chr);
467}
468
469void monitor_resume(Monitor *mon)
470{
471 if (monitor_is_hmp_non_interactive(mon)) {
472 return;
473 }
474
475 if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
476 AioContext *ctx;
477
478 if (mon->use_io_thread) {
479 ctx = iothread_get_aio_context(mon_iothread);
480 } else {
481 ctx = qemu_get_aio_context();
482 }
483
484 if (!monitor_is_qmp(mon)) {
485 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
486 assert(hmp_mon->rs);
487 readline_show_prompt(hmp_mon->rs);
488 }
489
490 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
491 }
492
493 trace_monitor_suspend(mon, -1);
494}
495
496int monitor_can_read(void *opaque)
497{
498 Monitor *mon = opaque;
499
500 return !atomic_mb_read(&mon->suspend_cnt);
501}
502
503void monitor_list_append(Monitor *mon)
504{
505 qemu_mutex_lock(&monitor_lock);
506 /*
507 * This prevents inserting new monitors during monitor_cleanup().
508 * A cleaner solution would involve the main thread telling other
509 * threads to terminate, waiting for their termination.
510 */
511 if (!monitor_destroyed) {
512 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
513 mon = NULL;
514 }
515 qemu_mutex_unlock(&monitor_lock);
516
517 if (mon) {
518 monitor_data_destroy(mon);
519 g_free(mon);
520 }
521}
522
523static void monitor_iothread_init(void)
524{
525 mon_iothread = iothread_create("mon_iothread", &error_abort);
526}
527
92082416 528void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
1d95db74
KW
529 bool use_io_thread)
530{
531 if (use_io_thread && !mon_iothread) {
532 monitor_iothread_init();
533 }
534 qemu_mutex_init(&mon->mon_lock);
92082416 535 mon->is_qmp = is_qmp;
1d95db74
KW
536 mon->outbuf = qstring_new();
537 mon->skip_flush = skip_flush;
538 mon->use_io_thread = use_io_thread;
1d95db74
KW
539}
540
541void monitor_data_destroy(Monitor *mon)
542{
543 g_free(mon->mon_cpu_path);
544 qemu_chr_fe_deinit(&mon->chr, false);
545 if (monitor_is_qmp(mon)) {
546 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
547 } else {
548 readline_free(container_of(mon, MonitorHMP, common)->rs);
549 }
550 qobject_unref(mon->outbuf);
551 qemu_mutex_destroy(&mon->mon_lock);
552}
553
1d95db74
KW
554void monitor_cleanup(void)
555{
556 /*
557 * We need to explicitly stop the I/O thread (but not destroy it),
558 * clean up the monitor resources, then destroy the I/O thread since
559 * we need to unregister from chardev below in
560 * monitor_data_destroy(), and chardev is not thread-safe yet
561 */
562 if (mon_iothread) {
563 iothread_stop(mon_iothread);
564 }
565
566 /* Flush output buffers and destroy monitors */
567 qemu_mutex_lock(&monitor_lock);
568 monitor_destroyed = true;
569 while (!QTAILQ_EMPTY(&mon_list)) {
570 Monitor *mon = QTAILQ_FIRST(&mon_list);
571 QTAILQ_REMOVE(&mon_list, mon, entry);
572 /* Permit QAPI event emission from character frontend release */
573 qemu_mutex_unlock(&monitor_lock);
574 monitor_flush(mon);
575 monitor_data_destroy(mon);
576 qemu_mutex_lock(&monitor_lock);
577 g_free(mon);
578 }
579 qemu_mutex_unlock(&monitor_lock);
580
581 /* QEMUBHs needs to be deleted before destroying the I/O thread */
582 qemu_bh_delete(qmp_dispatcher_bh);
583 qmp_dispatcher_bh = NULL;
584 if (mon_iothread) {
585 iothread_destroy(mon_iothread);
586 mon_iothread = NULL;
587 }
588}
589
590static void monitor_qapi_event_init(void)
591{
592 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
593 qapi_event_throttle_equal);
594}
595
596void monitor_init_globals_core(void)
597{
598 monitor_qapi_event_init();
599 qemu_mutex_init(&monitor_lock);
600
601 /*
602 * The dispatcher BH must run in the main loop thread, since we
603 * have commands assuming that context. It would be nice to get
604 * rid of those assumptions.
605 */
606 qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
607 monitor_qmp_bh_dispatcher,
608 NULL);
609}
610
611QemuOptsList qemu_mon_opts = {
612 .name = "mon",
613 .implied_opt_name = "chardev",
614 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
615 .desc = {
616 {
617 .name = "mode",
618 .type = QEMU_OPT_STRING,
619 },{
620 .name = "chardev",
621 .type = QEMU_OPT_STRING,
622 },{
623 .name = "pretty",
624 .type = QEMU_OPT_BOOL,
625 },
626 { /* end of list */ }
627 },
628};