]> git.proxmox.com Git - mirror_qemu.git/blob - block/qapi.c
MAINTAINERS: Change my email address
[mirror_qemu.git] / block / qapi.c
1 /*
2 * Block layer qmp and info dump related functions
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "block/qapi.h"
28 #include "block/block_int.h"
29 #include "block/throttle-groups.h"
30 #include "block/write-threshold.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-block-core.h"
33 #include "qapi/qobject-output-visitor.h"
34 #include "qapi/qapi-visit-block-core.h"
35 #include "qapi/qmp/qbool.h"
36 #include "qapi/qmp/qdict.h"
37 #include "qapi/qmp/qlist.h"
38 #include "qapi/qmp/qnum.h"
39 #include "qapi/qmp/qstring.h"
40 #include "qemu/qemu-print.h"
41 #include "sysemu/block-backend.h"
42 #include "qemu/cutils.h"
43
44 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
45 BlockDriverState *bs,
46 bool flat,
47 Error **errp)
48 {
49 ImageInfo **p_image_info;
50 BlockDriverState *bs0, *backing;
51 BlockDeviceInfo *info;
52
53 if (!bs->drv) {
54 error_setg(errp, "Block device %s is ejected", bs->node_name);
55 return NULL;
56 }
57
58 bdrv_refresh_filename(bs);
59
60 info = g_malloc0(sizeof(*info));
61 info->file = g_strdup(bs->filename);
62 info->ro = bdrv_is_read_only(bs);
63 info->drv = g_strdup(bs->drv->format_name);
64 info->encrypted = bs->encrypted;
65
66 info->cache = g_new(BlockdevCacheInfo, 1);
67 *info->cache = (BlockdevCacheInfo) {
68 .writeback = blk ? blk_enable_write_cache(blk) : true,
69 .direct = !!(bs->open_flags & BDRV_O_NOCACHE),
70 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
71 };
72
73 if (bs->node_name[0]) {
74 info->has_node_name = true;
75 info->node_name = g_strdup(bs->node_name);
76 }
77
78 backing = bdrv_cow_bs(bs);
79 if (backing) {
80 info->has_backing_file = true;
81 info->backing_file = g_strdup(backing->filename);
82 }
83
84 if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
85 info->has_dirty_bitmaps = true;
86 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
87 }
88
89 info->detect_zeroes = bs->detect_zeroes;
90
91 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
92 ThrottleConfig cfg;
93 BlockBackendPublic *blkp = blk_get_public(blk);
94
95 throttle_group_get_config(&blkp->throttle_group_member, &cfg);
96
97 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
98 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
99 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
100
101 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
102 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
103 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
104
105 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
106 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
107 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
108 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
109 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
110 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
111
112 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
113 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
114 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
115 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
116 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
117 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
118
119 info->has_bps_max_length = info->has_bps_max;
120 info->bps_max_length =
121 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
122 info->has_bps_rd_max_length = info->has_bps_rd_max;
123 info->bps_rd_max_length =
124 cfg.buckets[THROTTLE_BPS_READ].burst_length;
125 info->has_bps_wr_max_length = info->has_bps_wr_max;
126 info->bps_wr_max_length =
127 cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
128
129 info->has_iops_max_length = info->has_iops_max;
130 info->iops_max_length =
131 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
132 info->has_iops_rd_max_length = info->has_iops_rd_max;
133 info->iops_rd_max_length =
134 cfg.buckets[THROTTLE_OPS_READ].burst_length;
135 info->has_iops_wr_max_length = info->has_iops_wr_max;
136 info->iops_wr_max_length =
137 cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
138
139 info->has_iops_size = cfg.op_size;
140 info->iops_size = cfg.op_size;
141
142 info->has_group = true;
143 info->group =
144 g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
145 }
146
147 info->write_threshold = bdrv_write_threshold_get(bs);
148
149 bs0 = bs;
150 p_image_info = &info->image;
151 info->backing_file_depth = 0;
152 while (1) {
153 Error *local_err = NULL;
154 bdrv_query_image_info(bs0, p_image_info, &local_err);
155 if (local_err) {
156 error_propagate(errp, local_err);
157 qapi_free_BlockDeviceInfo(info);
158 return NULL;
159 }
160
161 /* stop gathering data for flat output */
162 if (flat) {
163 break;
164 }
165
166 if (bs0->drv && bdrv_filter_or_cow_child(bs0)) {
167 /*
168 * Put any filtered child here (for backwards compatibility to when
169 * we put bs0->backing here, which might be any filtered child).
170 */
171 info->backing_file_depth++;
172 bs0 = bdrv_filter_or_cow_bs(bs0);
173 (*p_image_info)->has_backing_image = true;
174 p_image_info = &((*p_image_info)->backing_image);
175 } else {
176 break;
177 }
178
179 /* Skip automatically inserted nodes that the user isn't aware of for
180 * query-block (blk != NULL), but not for query-named-block-nodes */
181 if (blk) {
182 bs0 = bdrv_skip_implicit_filters(bs0);
183 }
184 }
185
186 return info;
187 }
188
189 /*
190 * Returns 0 on success, with *p_list either set to describe snapshot
191 * information, or NULL because there are no snapshots. Returns -errno on
192 * error, with *p_list untouched.
193 */
194 int bdrv_query_snapshot_info_list(BlockDriverState *bs,
195 SnapshotInfoList **p_list,
196 Error **errp)
197 {
198 int i, sn_count;
199 QEMUSnapshotInfo *sn_tab = NULL;
200 SnapshotInfoList *head = NULL, **tail = &head;
201 SnapshotInfo *info;
202
203 sn_count = bdrv_snapshot_list(bs, &sn_tab);
204 if (sn_count < 0) {
205 const char *dev = bdrv_get_device_name(bs);
206 switch (sn_count) {
207 case -ENOMEDIUM:
208 error_setg(errp, "Device '%s' is not inserted", dev);
209 break;
210 case -ENOTSUP:
211 error_setg(errp,
212 "Device '%s' does not support internal snapshots",
213 dev);
214 break;
215 default:
216 error_setg_errno(errp, -sn_count,
217 "Can't list snapshots of device '%s'", dev);
218 break;
219 }
220 return sn_count;
221 }
222
223 for (i = 0; i < sn_count; i++) {
224 info = g_new0(SnapshotInfo, 1);
225 info->id = g_strdup(sn_tab[i].id_str);
226 info->name = g_strdup(sn_tab[i].name);
227 info->vm_state_size = sn_tab[i].vm_state_size;
228 info->date_sec = sn_tab[i].date_sec;
229 info->date_nsec = sn_tab[i].date_nsec;
230 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
231 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
232 info->icount = sn_tab[i].icount;
233 info->has_icount = sn_tab[i].icount != -1ULL;
234
235 QAPI_LIST_APPEND(tail, info);
236 }
237
238 g_free(sn_tab);
239 *p_list = head;
240 return 0;
241 }
242
243 /**
244 * bdrv_query_image_info:
245 * @bs: block device to examine
246 * @p_info: location to store image information
247 * @errp: location to store error information
248 *
249 * Store "flat" image information in @p_info.
250 *
251 * "Flat" means it does *not* query backing image information,
252 * i.e. (*pinfo)->has_backing_image will be set to false and
253 * (*pinfo)->backing_image to NULL even when the image does in fact have
254 * a backing image.
255 *
256 * @p_info will be set only on success. On error, store error in @errp.
257 */
258 void bdrv_query_image_info(BlockDriverState *bs,
259 ImageInfo **p_info,
260 Error **errp)
261 {
262 int64_t size;
263 const char *backing_filename;
264 BlockDriverInfo bdi;
265 int ret;
266 Error *err = NULL;
267 ImageInfo *info;
268
269 aio_context_acquire(bdrv_get_aio_context(bs));
270
271 size = bdrv_getlength(bs);
272 if (size < 0) {
273 error_setg_errno(errp, -size, "Can't get image size '%s'",
274 bs->exact_filename);
275 goto out;
276 }
277
278 bdrv_refresh_filename(bs);
279
280 info = g_new0(ImageInfo, 1);
281 info->filename = g_strdup(bs->filename);
282 info->format = g_strdup(bdrv_get_format_name(bs));
283 info->virtual_size = size;
284 info->actual_size = bdrv_get_allocated_file_size(bs);
285 info->has_actual_size = info->actual_size >= 0;
286 if (bs->encrypted) {
287 info->encrypted = true;
288 info->has_encrypted = true;
289 }
290 if (bdrv_get_info(bs, &bdi) >= 0) {
291 if (bdi.cluster_size != 0) {
292 info->cluster_size = bdi.cluster_size;
293 info->has_cluster_size = true;
294 }
295 info->dirty_flag = bdi.is_dirty;
296 info->has_dirty_flag = true;
297 }
298 info->format_specific = bdrv_get_specific_info(bs, &err);
299 if (err) {
300 error_propagate(errp, err);
301 qapi_free_ImageInfo(info);
302 goto out;
303 }
304 info->has_format_specific = info->format_specific != NULL;
305
306 backing_filename = bs->backing_file;
307 if (backing_filename[0] != '\0') {
308 char *backing_filename2;
309
310 info->backing_filename = g_strdup(backing_filename);
311 info->has_backing_filename = true;
312 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
313
314 /* Always report the full_backing_filename if present, even if it's the
315 * same as backing_filename. That they are same is useful info. */
316 if (backing_filename2) {
317 info->full_backing_filename = g_strdup(backing_filename2);
318 info->has_full_backing_filename = true;
319 }
320
321 if (bs->backing_format[0]) {
322 info->backing_filename_format = g_strdup(bs->backing_format);
323 info->has_backing_filename_format = true;
324 }
325 g_free(backing_filename2);
326 }
327
328 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
329 switch (ret) {
330 case 0:
331 if (info->snapshots) {
332 info->has_snapshots = true;
333 }
334 break;
335 /* recoverable error */
336 case -ENOMEDIUM:
337 case -ENOTSUP:
338 error_free(err);
339 break;
340 default:
341 error_propagate(errp, err);
342 qapi_free_ImageInfo(info);
343 goto out;
344 }
345
346 *p_info = info;
347
348 out:
349 aio_context_release(bdrv_get_aio_context(bs));
350 }
351
352 /* @p_info will be set only on success. */
353 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
354 Error **errp)
355 {
356 BlockInfo *info = g_malloc0(sizeof(*info));
357 BlockDriverState *bs = blk_bs(blk);
358 char *qdev;
359
360 /* Skip automatically inserted nodes that the user isn't aware of */
361 bs = bdrv_skip_implicit_filters(bs);
362
363 info->device = g_strdup(blk_name(blk));
364 info->type = g_strdup("unknown");
365 info->locked = blk_dev_is_medium_locked(blk);
366 info->removable = blk_dev_has_removable_media(blk);
367
368 qdev = blk_get_attached_dev_id(blk);
369 if (qdev && *qdev) {
370 info->has_qdev = true;
371 info->qdev = qdev;
372 } else {
373 g_free(qdev);
374 }
375
376 if (blk_dev_has_tray(blk)) {
377 info->has_tray_open = true;
378 info->tray_open = blk_dev_is_tray_open(blk);
379 }
380
381 if (blk_iostatus_is_enabled(blk)) {
382 info->has_io_status = true;
383 info->io_status = blk_iostatus(blk);
384 }
385
386 if (bs && bs->drv) {
387 info->has_inserted = true;
388 info->inserted = bdrv_block_device_info(blk, bs, false, errp);
389 if (info->inserted == NULL) {
390 goto err;
391 }
392 }
393
394 *p_info = info;
395 return;
396
397 err:
398 qapi_free_BlockInfo(info);
399 }
400
401 static uint64List *uint64_list(uint64_t *list, int size)
402 {
403 int i;
404 uint64List *out_list = NULL;
405 uint64List **tail = &out_list;
406
407 for (i = 0; i < size; i++) {
408 QAPI_LIST_APPEND(tail, list[i]);
409 }
410
411 return out_list;
412 }
413
414 static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
415 bool *not_null,
416 BlockLatencyHistogramInfo **info)
417 {
418 *not_null = hist->bins != NULL;
419 if (*not_null) {
420 *info = g_new0(BlockLatencyHistogramInfo, 1);
421
422 (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
423 (*info)->bins = uint64_list(hist->bins, hist->nbins);
424 }
425 }
426
427 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
428 {
429 BlockAcctStats *stats = blk_get_stats(blk);
430 BlockAcctTimedStats *ts = NULL;
431
432 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
433 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
434 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
435 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
436 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
437 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
438
439 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
440 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
441 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
442 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
443
444 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
445 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
446 ds->invalid_flush_operations =
447 stats->invalid_ops[BLOCK_ACCT_FLUSH];
448 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
449
450 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
451 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
452 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
453 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
454 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
455 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
456 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
457 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
458
459 ds->has_idle_time_ns = stats->last_access_time_ns > 0;
460 if (ds->has_idle_time_ns) {
461 ds->idle_time_ns = block_acct_idle_time_ns(stats);
462 }
463
464 ds->account_invalid = stats->account_invalid;
465 ds->account_failed = stats->account_failed;
466
467 while ((ts = block_acct_interval_next(stats, ts))) {
468 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
469
470 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
471 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
472 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
473
474 dev_stats->interval_length = ts->interval_length;
475
476 dev_stats->min_rd_latency_ns = timed_average_min(rd);
477 dev_stats->max_rd_latency_ns = timed_average_max(rd);
478 dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
479
480 dev_stats->min_wr_latency_ns = timed_average_min(wr);
481 dev_stats->max_wr_latency_ns = timed_average_max(wr);
482 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
483
484 dev_stats->min_flush_latency_ns = timed_average_min(fl);
485 dev_stats->max_flush_latency_ns = timed_average_max(fl);
486 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
487
488 dev_stats->avg_rd_queue_depth =
489 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
490 dev_stats->avg_wr_queue_depth =
491 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
492
493 QAPI_LIST_PREPEND(ds->timed_stats, dev_stats);
494 }
495
496 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
497 &ds->has_rd_latency_histogram,
498 &ds->rd_latency_histogram);
499 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
500 &ds->has_wr_latency_histogram,
501 &ds->wr_latency_histogram);
502 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
503 &ds->has_flush_latency_histogram,
504 &ds->flush_latency_histogram);
505 }
506
507 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
508 bool blk_level)
509 {
510 BdrvChild *parent_child;
511 BlockDriverState *filter_or_cow_bs;
512 BlockStats *s = NULL;
513
514 s = g_malloc0(sizeof(*s));
515 s->stats = g_malloc0(sizeof(*s->stats));
516
517 if (!bs) {
518 return s;
519 }
520
521 /* Skip automatically inserted nodes that the user isn't aware of in
522 * a BlockBackend-level command. Stay at the exact node for a node-level
523 * command. */
524 if (blk_level) {
525 bs = bdrv_skip_implicit_filters(bs);
526 }
527
528 if (bdrv_get_node_name(bs)[0]) {
529 s->has_node_name = true;
530 s->node_name = g_strdup(bdrv_get_node_name(bs));
531 }
532
533 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
534
535 s->driver_specific = bdrv_get_specific_stats(bs);
536 if (s->driver_specific) {
537 s->has_driver_specific = true;
538 }
539
540 parent_child = bdrv_primary_child(bs);
541 if (!parent_child ||
542 !(parent_child->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED)))
543 {
544 BdrvChild *c;
545
546 /*
547 * Look for a unique data-storing child. We do not need to look for
548 * filtered children, as there would be only one and it would have been
549 * the primary child.
550 */
551 parent_child = NULL;
552 QLIST_FOREACH(c, &bs->children, next) {
553 if (c->role & BDRV_CHILD_DATA) {
554 if (parent_child) {
555 /*
556 * There are multiple data-storing children and we cannot
557 * choose between them.
558 */
559 parent_child = NULL;
560 break;
561 }
562 parent_child = c;
563 }
564 }
565 }
566 if (parent_child) {
567 s->has_parent = true;
568 s->parent = bdrv_query_bds_stats(parent_child->bs, blk_level);
569 }
570
571 filter_or_cow_bs = bdrv_filter_or_cow_bs(bs);
572 if (blk_level && filter_or_cow_bs) {
573 /*
574 * Put any filtered or COW child here (for backwards
575 * compatibility to when we put bs0->backing here, which might
576 * be either)
577 */
578 s->has_backing = true;
579 s->backing = bdrv_query_bds_stats(filter_or_cow_bs, blk_level);
580 }
581
582 return s;
583 }
584
585 BlockInfoList *qmp_query_block(Error **errp)
586 {
587 BlockInfoList *head = NULL, **p_next = &head;
588 BlockBackend *blk;
589 Error *local_err = NULL;
590
591 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
592 BlockInfoList *info;
593
594 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
595 continue;
596 }
597
598 info = g_malloc0(sizeof(*info));
599 bdrv_query_info(blk, &info->value, &local_err);
600 if (local_err) {
601 error_propagate(errp, local_err);
602 g_free(info);
603 qapi_free_BlockInfoList(head);
604 return NULL;
605 }
606
607 *p_next = info;
608 p_next = &info->next;
609 }
610
611 return head;
612 }
613
614 BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
615 bool query_nodes,
616 Error **errp)
617 {
618 BlockStatsList *head = NULL, **tail = &head;
619 BlockBackend *blk;
620 BlockDriverState *bs;
621
622 /* Just to be safe if query_nodes is not always initialized */
623 if (has_query_nodes && query_nodes) {
624 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
625 AioContext *ctx = bdrv_get_aio_context(bs);
626
627 aio_context_acquire(ctx);
628 QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false));
629 aio_context_release(ctx);
630 }
631 } else {
632 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
633 AioContext *ctx = blk_get_aio_context(blk);
634 BlockStats *s;
635 char *qdev;
636
637 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
638 continue;
639 }
640
641 aio_context_acquire(ctx);
642 s = bdrv_query_bds_stats(blk_bs(blk), true);
643 s->has_device = true;
644 s->device = g_strdup(blk_name(blk));
645
646 qdev = blk_get_attached_dev_id(blk);
647 if (qdev && *qdev) {
648 s->has_qdev = true;
649 s->qdev = qdev;
650 } else {
651 g_free(qdev);
652 }
653
654 bdrv_query_blk_stats(s->stats, blk);
655 aio_context_release(ctx);
656
657 QAPI_LIST_APPEND(tail, s);
658 }
659 }
660
661 return head;
662 }
663
664 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
665 {
666 char clock_buf[128];
667 char icount_buf[128] = {0};
668 int64_t secs;
669 char *sizing = NULL;
670
671 if (!sn) {
672 qemu_printf("%-10s%-17s%8s%20s%13s%11s",
673 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK", "ICOUNT");
674 } else {
675 g_autoptr(GDateTime) date = g_date_time_new_from_unix_local(sn->date_sec);
676 g_autofree char *date_buf = g_date_time_format(date, "%Y-%m-%d %H:%M:%S");
677
678 secs = sn->vm_clock_nsec / 1000000000;
679 snprintf(clock_buf, sizeof(clock_buf),
680 "%02d:%02d:%02d.%03d",
681 (int)(secs / 3600),
682 (int)((secs / 60) % 60),
683 (int)(secs % 60),
684 (int)((sn->vm_clock_nsec / 1000000) % 1000));
685 sizing = size_to_str(sn->vm_state_size);
686 if (sn->icount != -1ULL) {
687 snprintf(icount_buf, sizeof(icount_buf),
688 "%"PRId64, sn->icount);
689 }
690 qemu_printf("%-9s %-16s %8s%20s%13s%11s",
691 sn->id_str, sn->name,
692 sizing,
693 date_buf,
694 clock_buf,
695 icount_buf);
696 }
697 g_free(sizing);
698 }
699
700 static void dump_qdict(int indentation, QDict *dict);
701 static void dump_qlist(int indentation, QList *list);
702
703 static void dump_qobject(int comp_indent, QObject *obj)
704 {
705 switch (qobject_type(obj)) {
706 case QTYPE_QNUM: {
707 QNum *value = qobject_to(QNum, obj);
708 char *tmp = qnum_to_string(value);
709 qemu_printf("%s", tmp);
710 g_free(tmp);
711 break;
712 }
713 case QTYPE_QSTRING: {
714 QString *value = qobject_to(QString, obj);
715 qemu_printf("%s", qstring_get_str(value));
716 break;
717 }
718 case QTYPE_QDICT: {
719 QDict *value = qobject_to(QDict, obj);
720 dump_qdict(comp_indent, value);
721 break;
722 }
723 case QTYPE_QLIST: {
724 QList *value = qobject_to(QList, obj);
725 dump_qlist(comp_indent, value);
726 break;
727 }
728 case QTYPE_QBOOL: {
729 QBool *value = qobject_to(QBool, obj);
730 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
731 break;
732 }
733 default:
734 abort();
735 }
736 }
737
738 static void dump_qlist(int indentation, QList *list)
739 {
740 const QListEntry *entry;
741 int i = 0;
742
743 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
744 QType type = qobject_type(entry->value);
745 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
746 qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
747 composite ? '\n' : ' ');
748 dump_qobject(indentation + 1, entry->value);
749 if (!composite) {
750 qemu_printf("\n");
751 }
752 }
753 }
754
755 static void dump_qdict(int indentation, QDict *dict)
756 {
757 const QDictEntry *entry;
758
759 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
760 QType type = qobject_type(entry->value);
761 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
762 char *key = g_malloc(strlen(entry->key) + 1);
763 int i;
764
765 /* replace dashes with spaces in key (variable) names */
766 for (i = 0; entry->key[i]; i++) {
767 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
768 }
769 key[i] = 0;
770 qemu_printf("%*s%s:%c", indentation * 4, "", key,
771 composite ? '\n' : ' ');
772 dump_qobject(indentation + 1, entry->value);
773 if (!composite) {
774 qemu_printf("\n");
775 }
776 g_free(key);
777 }
778 }
779
780 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
781 {
782 QObject *obj, *data;
783 Visitor *v = qobject_output_visitor_new(&obj);
784
785 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
786 visit_complete(v, &obj);
787 data = qdict_get(qobject_to(QDict, obj), "data");
788 dump_qobject(1, data);
789 qobject_unref(obj);
790 visit_free(v);
791 }
792
793 void bdrv_image_info_dump(ImageInfo *info)
794 {
795 char *size_buf, *dsize_buf;
796 if (!info->has_actual_size) {
797 dsize_buf = g_strdup("unavailable");
798 } else {
799 dsize_buf = size_to_str(info->actual_size);
800 }
801 size_buf = size_to_str(info->virtual_size);
802 qemu_printf("image: %s\n"
803 "file format: %s\n"
804 "virtual size: %s (%" PRId64 " bytes)\n"
805 "disk size: %s\n",
806 info->filename, info->format, size_buf,
807 info->virtual_size,
808 dsize_buf);
809 g_free(size_buf);
810 g_free(dsize_buf);
811
812 if (info->has_encrypted && info->encrypted) {
813 qemu_printf("encrypted: yes\n");
814 }
815
816 if (info->has_cluster_size) {
817 qemu_printf("cluster_size: %" PRId64 "\n",
818 info->cluster_size);
819 }
820
821 if (info->has_dirty_flag && info->dirty_flag) {
822 qemu_printf("cleanly shut down: no\n");
823 }
824
825 if (info->has_backing_filename) {
826 qemu_printf("backing file: %s", info->backing_filename);
827 if (!info->has_full_backing_filename) {
828 qemu_printf(" (cannot determine actual path)");
829 } else if (strcmp(info->backing_filename,
830 info->full_backing_filename) != 0) {
831 qemu_printf(" (actual path: %s)", info->full_backing_filename);
832 }
833 qemu_printf("\n");
834 if (info->has_backing_filename_format) {
835 qemu_printf("backing file format: %s\n",
836 info->backing_filename_format);
837 }
838 }
839
840 if (info->has_snapshots) {
841 SnapshotInfoList *elem;
842
843 qemu_printf("Snapshot list:\n");
844 bdrv_snapshot_dump(NULL);
845 qemu_printf("\n");
846
847 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
848 * we convert to the block layer's native QEMUSnapshotInfo for now.
849 */
850 for (elem = info->snapshots; elem; elem = elem->next) {
851 QEMUSnapshotInfo sn = {
852 .vm_state_size = elem->value->vm_state_size,
853 .date_sec = elem->value->date_sec,
854 .date_nsec = elem->value->date_nsec,
855 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
856 elem->value->vm_clock_nsec,
857 .icount = elem->value->has_icount ?
858 elem->value->icount : -1ULL,
859 };
860
861 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
862 pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
863 bdrv_snapshot_dump(&sn);
864 qemu_printf("\n");
865 }
866 }
867
868 if (info->has_format_specific) {
869 qemu_printf("Format specific information:\n");
870 bdrv_image_info_specific_dump(info->format_specific);
871 }
872 }