]> git.proxmox.com Git - mirror_qemu.git/blob - block/qapi.c
Merge remote-tracking branch 'remotes/xtensa/tags/20191023-xtensa' into staging
[mirror_qemu.git] / block / qapi.c
1 /*
2 * Block layer qmp and info dump related functions
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "block/qapi.h"
28 #include "block/block_int.h"
29 #include "block/throttle-groups.h"
30 #include "block/write-threshold.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-block-core.h"
33 #include "qapi/qobject-output-visitor.h"
34 #include "qapi/qapi-visit-block-core.h"
35 #include "qapi/qmp/qbool.h"
36 #include "qapi/qmp/qdict.h"
37 #include "qapi/qmp/qlist.h"
38 #include "qapi/qmp/qnum.h"
39 #include "qapi/qmp/qstring.h"
40 #include "qemu/qemu-print.h"
41 #include "sysemu/block-backend.h"
42 #include "qemu/cutils.h"
43
44 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
45 BlockDriverState *bs, Error **errp)
46 {
47 ImageInfo **p_image_info;
48 BlockDriverState *bs0;
49 BlockDeviceInfo *info;
50
51 if (!bs->drv) {
52 error_setg(errp, "Block device %s is ejected", bs->node_name);
53 return NULL;
54 }
55
56 bdrv_refresh_filename(bs);
57
58 info = g_malloc0(sizeof(*info));
59 info->file = g_strdup(bs->filename);
60 info->ro = bs->read_only;
61 info->drv = g_strdup(bs->drv->format_name);
62 info->encrypted = bs->encrypted;
63 info->encryption_key_missing = false;
64
65 info->cache = g_new(BlockdevCacheInfo, 1);
66 *info->cache = (BlockdevCacheInfo) {
67 .writeback = blk ? blk_enable_write_cache(blk) : true,
68 .direct = !!(bs->open_flags & BDRV_O_NOCACHE),
69 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
70 };
71
72 if (bs->node_name[0]) {
73 info->has_node_name = true;
74 info->node_name = g_strdup(bs->node_name);
75 }
76
77 if (bs->backing_file[0]) {
78 info->has_backing_file = true;
79 info->backing_file = g_strdup(bs->backing_file);
80 }
81
82 if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
83 info->has_dirty_bitmaps = true;
84 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
85 }
86
87 info->detect_zeroes = bs->detect_zeroes;
88
89 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
90 ThrottleConfig cfg;
91 BlockBackendPublic *blkp = blk_get_public(blk);
92
93 throttle_group_get_config(&blkp->throttle_group_member, &cfg);
94
95 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
96 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
97 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
98
99 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
100 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
101 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
102
103 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
104 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
105 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
106 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
107 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
108 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
109
110 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
111 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
112 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
113 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
114 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
115 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
116
117 info->has_bps_max_length = info->has_bps_max;
118 info->bps_max_length =
119 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
120 info->has_bps_rd_max_length = info->has_bps_rd_max;
121 info->bps_rd_max_length =
122 cfg.buckets[THROTTLE_BPS_READ].burst_length;
123 info->has_bps_wr_max_length = info->has_bps_wr_max;
124 info->bps_wr_max_length =
125 cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
126
127 info->has_iops_max_length = info->has_iops_max;
128 info->iops_max_length =
129 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
130 info->has_iops_rd_max_length = info->has_iops_rd_max;
131 info->iops_rd_max_length =
132 cfg.buckets[THROTTLE_OPS_READ].burst_length;
133 info->has_iops_wr_max_length = info->has_iops_wr_max;
134 info->iops_wr_max_length =
135 cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
136
137 info->has_iops_size = cfg.op_size;
138 info->iops_size = cfg.op_size;
139
140 info->has_group = true;
141 info->group =
142 g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
143 }
144
145 info->write_threshold = bdrv_write_threshold_get(bs);
146
147 bs0 = bs;
148 p_image_info = &info->image;
149 info->backing_file_depth = 0;
150 while (1) {
151 Error *local_err = NULL;
152 bdrv_query_image_info(bs0, p_image_info, &local_err);
153 if (local_err) {
154 error_propagate(errp, local_err);
155 qapi_free_BlockDeviceInfo(info);
156 return NULL;
157 }
158
159 if (bs0->drv && bs0->backing) {
160 info->backing_file_depth++;
161 bs0 = bs0->backing->bs;
162 (*p_image_info)->has_backing_image = true;
163 p_image_info = &((*p_image_info)->backing_image);
164 } else {
165 break;
166 }
167
168 /* Skip automatically inserted nodes that the user isn't aware of for
169 * query-block (blk != NULL), but not for query-named-block-nodes */
170 while (blk && bs0->drv && bs0->implicit) {
171 bs0 = backing_bs(bs0);
172 assert(bs0);
173 }
174 }
175
176 return info;
177 }
178
179 /*
180 * Returns 0 on success, with *p_list either set to describe snapshot
181 * information, or NULL because there are no snapshots. Returns -errno on
182 * error, with *p_list untouched.
183 */
184 int bdrv_query_snapshot_info_list(BlockDriverState *bs,
185 SnapshotInfoList **p_list,
186 Error **errp)
187 {
188 int i, sn_count;
189 QEMUSnapshotInfo *sn_tab = NULL;
190 SnapshotInfoList *info_list, *cur_item = NULL, *head = NULL;
191 SnapshotInfo *info;
192
193 sn_count = bdrv_snapshot_list(bs, &sn_tab);
194 if (sn_count < 0) {
195 const char *dev = bdrv_get_device_name(bs);
196 switch (sn_count) {
197 case -ENOMEDIUM:
198 error_setg(errp, "Device '%s' is not inserted", dev);
199 break;
200 case -ENOTSUP:
201 error_setg(errp,
202 "Device '%s' does not support internal snapshots",
203 dev);
204 break;
205 default:
206 error_setg_errno(errp, -sn_count,
207 "Can't list snapshots of device '%s'", dev);
208 break;
209 }
210 return sn_count;
211 }
212
213 for (i = 0; i < sn_count; i++) {
214 info = g_new0(SnapshotInfo, 1);
215 info->id = g_strdup(sn_tab[i].id_str);
216 info->name = g_strdup(sn_tab[i].name);
217 info->vm_state_size = sn_tab[i].vm_state_size;
218 info->date_sec = sn_tab[i].date_sec;
219 info->date_nsec = sn_tab[i].date_nsec;
220 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
221 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
222
223 info_list = g_new0(SnapshotInfoList, 1);
224 info_list->value = info;
225
226 /* XXX: waiting for the qapi to support qemu-queue.h types */
227 if (!cur_item) {
228 head = cur_item = info_list;
229 } else {
230 cur_item->next = info_list;
231 cur_item = info_list;
232 }
233
234 }
235
236 g_free(sn_tab);
237 *p_list = head;
238 return 0;
239 }
240
241 /**
242 * bdrv_query_image_info:
243 * @bs: block device to examine
244 * @p_info: location to store image information
245 * @errp: location to store error information
246 *
247 * Store "flat" image information in @p_info.
248 *
249 * "Flat" means it does *not* query backing image information,
250 * i.e. (*pinfo)->has_backing_image will be set to false and
251 * (*pinfo)->backing_image to NULL even when the image does in fact have
252 * a backing image.
253 *
254 * @p_info will be set only on success. On error, store error in @errp.
255 */
256 void bdrv_query_image_info(BlockDriverState *bs,
257 ImageInfo **p_info,
258 Error **errp)
259 {
260 int64_t size;
261 const char *backing_filename;
262 BlockDriverInfo bdi;
263 int ret;
264 Error *err = NULL;
265 ImageInfo *info;
266
267 aio_context_acquire(bdrv_get_aio_context(bs));
268
269 size = bdrv_getlength(bs);
270 if (size < 0) {
271 error_setg_errno(errp, -size, "Can't get image size '%s'",
272 bs->exact_filename);
273 goto out;
274 }
275
276 bdrv_refresh_filename(bs);
277
278 info = g_new0(ImageInfo, 1);
279 info->filename = g_strdup(bs->filename);
280 info->format = g_strdup(bdrv_get_format_name(bs));
281 info->virtual_size = size;
282 info->actual_size = bdrv_get_allocated_file_size(bs);
283 info->has_actual_size = info->actual_size >= 0;
284 if (bdrv_is_encrypted(bs)) {
285 info->encrypted = true;
286 info->has_encrypted = true;
287 }
288 if (bdrv_get_info(bs, &bdi) >= 0) {
289 if (bdi.cluster_size != 0) {
290 info->cluster_size = bdi.cluster_size;
291 info->has_cluster_size = true;
292 }
293 info->dirty_flag = bdi.is_dirty;
294 info->has_dirty_flag = true;
295 }
296 info->format_specific = bdrv_get_specific_info(bs, &err);
297 if (err) {
298 error_propagate(errp, err);
299 qapi_free_ImageInfo(info);
300 goto out;
301 }
302 info->has_format_specific = info->format_specific != NULL;
303
304 backing_filename = bs->backing_file;
305 if (backing_filename[0] != '\0') {
306 char *backing_filename2;
307 info->backing_filename = g_strdup(backing_filename);
308 info->has_backing_filename = true;
309 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
310
311 /* Always report the full_backing_filename if present, even if it's the
312 * same as backing_filename. That they are same is useful info. */
313 if (backing_filename2) {
314 info->full_backing_filename = g_strdup(backing_filename2);
315 info->has_full_backing_filename = true;
316 }
317
318 if (bs->backing_format[0]) {
319 info->backing_filename_format = g_strdup(bs->backing_format);
320 info->has_backing_filename_format = true;
321 }
322 g_free(backing_filename2);
323 }
324
325 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
326 switch (ret) {
327 case 0:
328 if (info->snapshots) {
329 info->has_snapshots = true;
330 }
331 break;
332 /* recoverable error */
333 case -ENOMEDIUM:
334 case -ENOTSUP:
335 error_free(err);
336 break;
337 default:
338 error_propagate(errp, err);
339 qapi_free_ImageInfo(info);
340 goto out;
341 }
342
343 *p_info = info;
344
345 out:
346 aio_context_release(bdrv_get_aio_context(bs));
347 }
348
349 /* @p_info will be set only on success. */
350 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
351 Error **errp)
352 {
353 BlockInfo *info = g_malloc0(sizeof(*info));
354 BlockDriverState *bs = blk_bs(blk);
355 char *qdev;
356
357 /* Skip automatically inserted nodes that the user isn't aware of */
358 while (bs && bs->drv && bs->implicit) {
359 bs = backing_bs(bs);
360 }
361
362 info->device = g_strdup(blk_name(blk));
363 info->type = g_strdup("unknown");
364 info->locked = blk_dev_is_medium_locked(blk);
365 info->removable = blk_dev_has_removable_media(blk);
366
367 qdev = blk_get_attached_dev_id(blk);
368 if (qdev && *qdev) {
369 info->has_qdev = true;
370 info->qdev = qdev;
371 } else {
372 g_free(qdev);
373 }
374
375 if (blk_dev_has_tray(blk)) {
376 info->has_tray_open = true;
377 info->tray_open = blk_dev_is_tray_open(blk);
378 }
379
380 if (blk_iostatus_is_enabled(blk)) {
381 info->has_io_status = true;
382 info->io_status = blk_iostatus(blk);
383 }
384
385 if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) {
386 info->has_dirty_bitmaps = true;
387 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
388 }
389
390 if (bs && bs->drv) {
391 info->has_inserted = true;
392 info->inserted = bdrv_block_device_info(blk, bs, errp);
393 if (info->inserted == NULL) {
394 goto err;
395 }
396 }
397
398 *p_info = info;
399 return;
400
401 err:
402 qapi_free_BlockInfo(info);
403 }
404
405 static uint64List *uint64_list(uint64_t *list, int size)
406 {
407 int i;
408 uint64List *out_list = NULL;
409 uint64List **pout_list = &out_list;
410
411 for (i = 0; i < size; i++) {
412 uint64List *entry = g_new(uint64List, 1);
413 entry->value = list[i];
414 *pout_list = entry;
415 pout_list = &entry->next;
416 }
417
418 *pout_list = NULL;
419
420 return out_list;
421 }
422
423 static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
424 bool *not_null,
425 BlockLatencyHistogramInfo **info)
426 {
427 *not_null = hist->bins != NULL;
428 if (*not_null) {
429 *info = g_new0(BlockLatencyHistogramInfo, 1);
430
431 (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
432 (*info)->bins = uint64_list(hist->bins, hist->nbins);
433 }
434 }
435
436 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
437 {
438 BlockAcctStats *stats = blk_get_stats(blk);
439 BlockAcctTimedStats *ts = NULL;
440
441 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
442 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
443 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
444 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
445 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
446 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
447
448 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
449 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
450 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
451 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
452
453 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
454 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
455 ds->invalid_flush_operations =
456 stats->invalid_ops[BLOCK_ACCT_FLUSH];
457 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
458
459 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
460 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
461 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
462 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
463 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
464 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
465 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
466 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
467
468 ds->has_idle_time_ns = stats->last_access_time_ns > 0;
469 if (ds->has_idle_time_ns) {
470 ds->idle_time_ns = block_acct_idle_time_ns(stats);
471 }
472
473 ds->account_invalid = stats->account_invalid;
474 ds->account_failed = stats->account_failed;
475
476 while ((ts = block_acct_interval_next(stats, ts))) {
477 BlockDeviceTimedStatsList *timed_stats =
478 g_malloc0(sizeof(*timed_stats));
479 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
480 timed_stats->next = ds->timed_stats;
481 timed_stats->value = dev_stats;
482 ds->timed_stats = timed_stats;
483
484 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
485 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
486 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
487
488 dev_stats->interval_length = ts->interval_length;
489
490 dev_stats->min_rd_latency_ns = timed_average_min(rd);
491 dev_stats->max_rd_latency_ns = timed_average_max(rd);
492 dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
493
494 dev_stats->min_wr_latency_ns = timed_average_min(wr);
495 dev_stats->max_wr_latency_ns = timed_average_max(wr);
496 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
497
498 dev_stats->min_flush_latency_ns = timed_average_min(fl);
499 dev_stats->max_flush_latency_ns = timed_average_max(fl);
500 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
501
502 dev_stats->avg_rd_queue_depth =
503 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
504 dev_stats->avg_wr_queue_depth =
505 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
506 }
507
508 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
509 &ds->has_rd_latency_histogram,
510 &ds->rd_latency_histogram);
511 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
512 &ds->has_wr_latency_histogram,
513 &ds->wr_latency_histogram);
514 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
515 &ds->has_flush_latency_histogram,
516 &ds->flush_latency_histogram);
517 }
518
519 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
520 bool blk_level)
521 {
522 BlockStats *s = NULL;
523
524 s = g_malloc0(sizeof(*s));
525 s->stats = g_malloc0(sizeof(*s->stats));
526
527 if (!bs) {
528 return s;
529 }
530
531 /* Skip automatically inserted nodes that the user isn't aware of in
532 * a BlockBackend-level command. Stay at the exact node for a node-level
533 * command. */
534 while (blk_level && bs->drv && bs->implicit) {
535 bs = backing_bs(bs);
536 assert(bs);
537 }
538
539 if (bdrv_get_node_name(bs)[0]) {
540 s->has_node_name = true;
541 s->node_name = g_strdup(bdrv_get_node_name(bs));
542 }
543
544 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
545
546 s->driver_specific = bdrv_get_specific_stats(bs);
547 if (s->driver_specific) {
548 s->has_driver_specific = true;
549 }
550
551 if (bs->file) {
552 s->has_parent = true;
553 s->parent = bdrv_query_bds_stats(bs->file->bs, blk_level);
554 }
555
556 if (blk_level && bs->backing) {
557 s->has_backing = true;
558 s->backing = bdrv_query_bds_stats(bs->backing->bs, blk_level);
559 }
560
561 return s;
562 }
563
564 BlockInfoList *qmp_query_block(Error **errp)
565 {
566 BlockInfoList *head = NULL, **p_next = &head;
567 BlockBackend *blk;
568 Error *local_err = NULL;
569
570 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
571 BlockInfoList *info;
572
573 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
574 continue;
575 }
576
577 info = g_malloc0(sizeof(*info));
578 bdrv_query_info(blk, &info->value, &local_err);
579 if (local_err) {
580 error_propagate(errp, local_err);
581 g_free(info);
582 qapi_free_BlockInfoList(head);
583 return NULL;
584 }
585
586 *p_next = info;
587 p_next = &info->next;
588 }
589
590 return head;
591 }
592
593 BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
594 bool query_nodes,
595 Error **errp)
596 {
597 BlockStatsList *head = NULL, **p_next = &head;
598 BlockBackend *blk;
599 BlockDriverState *bs;
600
601 /* Just to be safe if query_nodes is not always initialized */
602 if (has_query_nodes && query_nodes) {
603 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
604 BlockStatsList *info = g_malloc0(sizeof(*info));
605 AioContext *ctx = bdrv_get_aio_context(bs);
606
607 aio_context_acquire(ctx);
608 info->value = bdrv_query_bds_stats(bs, false);
609 aio_context_release(ctx);
610
611 *p_next = info;
612 p_next = &info->next;
613 }
614 } else {
615 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
616 BlockStatsList *info;
617 AioContext *ctx = blk_get_aio_context(blk);
618 BlockStats *s;
619 char *qdev;
620
621 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
622 continue;
623 }
624
625 aio_context_acquire(ctx);
626 s = bdrv_query_bds_stats(blk_bs(blk), true);
627 s->has_device = true;
628 s->device = g_strdup(blk_name(blk));
629
630 qdev = blk_get_attached_dev_id(blk);
631 if (qdev && *qdev) {
632 s->has_qdev = true;
633 s->qdev = qdev;
634 } else {
635 g_free(qdev);
636 }
637
638 bdrv_query_blk_stats(s->stats, blk);
639 aio_context_release(ctx);
640
641 info = g_malloc0(sizeof(*info));
642 info->value = s;
643 *p_next = info;
644 p_next = &info->next;
645 }
646 }
647
648 return head;
649 }
650
651 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
652 {
653 char date_buf[128], clock_buf[128];
654 struct tm tm;
655 time_t ti;
656 int64_t secs;
657 char *sizing = NULL;
658
659 if (!sn) {
660 qemu_printf("%-10s%-20s%7s%20s%15s",
661 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
662 } else {
663 ti = sn->date_sec;
664 localtime_r(&ti, &tm);
665 strftime(date_buf, sizeof(date_buf),
666 "%Y-%m-%d %H:%M:%S", &tm);
667 secs = sn->vm_clock_nsec / 1000000000;
668 snprintf(clock_buf, sizeof(clock_buf),
669 "%02d:%02d:%02d.%03d",
670 (int)(secs / 3600),
671 (int)((secs / 60) % 60),
672 (int)(secs % 60),
673 (int)((sn->vm_clock_nsec / 1000000) % 1000));
674 sizing = size_to_str(sn->vm_state_size);
675 qemu_printf("%-10s%-20s%7s%20s%15s",
676 sn->id_str, sn->name,
677 sizing,
678 date_buf,
679 clock_buf);
680 }
681 g_free(sizing);
682 }
683
684 static void dump_qdict(int indentation, QDict *dict);
685 static void dump_qlist(int indentation, QList *list);
686
687 static void dump_qobject(int comp_indent, QObject *obj)
688 {
689 switch (qobject_type(obj)) {
690 case QTYPE_QNUM: {
691 QNum *value = qobject_to(QNum, obj);
692 char *tmp = qnum_to_string(value);
693 qemu_printf("%s", tmp);
694 g_free(tmp);
695 break;
696 }
697 case QTYPE_QSTRING: {
698 QString *value = qobject_to(QString, obj);
699 qemu_printf("%s", qstring_get_str(value));
700 break;
701 }
702 case QTYPE_QDICT: {
703 QDict *value = qobject_to(QDict, obj);
704 dump_qdict(comp_indent, value);
705 break;
706 }
707 case QTYPE_QLIST: {
708 QList *value = qobject_to(QList, obj);
709 dump_qlist(comp_indent, value);
710 break;
711 }
712 case QTYPE_QBOOL: {
713 QBool *value = qobject_to(QBool, obj);
714 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
715 break;
716 }
717 default:
718 abort();
719 }
720 }
721
722 static void dump_qlist(int indentation, QList *list)
723 {
724 const QListEntry *entry;
725 int i = 0;
726
727 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
728 QType type = qobject_type(entry->value);
729 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
730 qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
731 composite ? '\n' : ' ');
732 dump_qobject(indentation + 1, entry->value);
733 if (!composite) {
734 qemu_printf("\n");
735 }
736 }
737 }
738
739 static void dump_qdict(int indentation, QDict *dict)
740 {
741 const QDictEntry *entry;
742
743 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
744 QType type = qobject_type(entry->value);
745 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
746 char *key = g_malloc(strlen(entry->key) + 1);
747 int i;
748
749 /* replace dashes with spaces in key (variable) names */
750 for (i = 0; entry->key[i]; i++) {
751 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
752 }
753 key[i] = 0;
754 qemu_printf("%*s%s:%c", indentation * 4, "", key,
755 composite ? '\n' : ' ');
756 dump_qobject(indentation + 1, entry->value);
757 if (!composite) {
758 qemu_printf("\n");
759 }
760 g_free(key);
761 }
762 }
763
764 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
765 {
766 QObject *obj, *data;
767 Visitor *v = qobject_output_visitor_new(&obj);
768
769 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
770 visit_complete(v, &obj);
771 data = qdict_get(qobject_to(QDict, obj), "data");
772 dump_qobject(1, data);
773 qobject_unref(obj);
774 visit_free(v);
775 }
776
777 void bdrv_image_info_dump(ImageInfo *info)
778 {
779 char *size_buf, *dsize_buf;
780 if (!info->has_actual_size) {
781 dsize_buf = g_strdup("unavailable");
782 } else {
783 dsize_buf = size_to_str(info->actual_size);
784 }
785 size_buf = size_to_str(info->virtual_size);
786 qemu_printf("image: %s\n"
787 "file format: %s\n"
788 "virtual size: %s (%" PRId64 " bytes)\n"
789 "disk size: %s\n",
790 info->filename, info->format, size_buf,
791 info->virtual_size,
792 dsize_buf);
793 g_free(size_buf);
794 g_free(dsize_buf);
795
796 if (info->has_encrypted && info->encrypted) {
797 qemu_printf("encrypted: yes\n");
798 }
799
800 if (info->has_cluster_size) {
801 qemu_printf("cluster_size: %" PRId64 "\n",
802 info->cluster_size);
803 }
804
805 if (info->has_dirty_flag && info->dirty_flag) {
806 qemu_printf("cleanly shut down: no\n");
807 }
808
809 if (info->has_backing_filename) {
810 qemu_printf("backing file: %s", info->backing_filename);
811 if (!info->has_full_backing_filename) {
812 qemu_printf(" (cannot determine actual path)");
813 } else if (strcmp(info->backing_filename,
814 info->full_backing_filename) != 0) {
815 qemu_printf(" (actual path: %s)", info->full_backing_filename);
816 }
817 qemu_printf("\n");
818 if (info->has_backing_filename_format) {
819 qemu_printf("backing file format: %s\n",
820 info->backing_filename_format);
821 }
822 }
823
824 if (info->has_snapshots) {
825 SnapshotInfoList *elem;
826
827 qemu_printf("Snapshot list:\n");
828 bdrv_snapshot_dump(NULL);
829 qemu_printf("\n");
830
831 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
832 * we convert to the block layer's native QEMUSnapshotInfo for now.
833 */
834 for (elem = info->snapshots; elem; elem = elem->next) {
835 QEMUSnapshotInfo sn = {
836 .vm_state_size = elem->value->vm_state_size,
837 .date_sec = elem->value->date_sec,
838 .date_nsec = elem->value->date_nsec,
839 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
840 elem->value->vm_clock_nsec,
841 };
842
843 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
844 pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
845 bdrv_snapshot_dump(&sn);
846 qemu_printf("\n");
847 }
848 }
849
850 if (info->has_format_specific) {
851 qemu_printf("Format specific information:\n");
852 bdrv_image_info_specific_dump(info->format_specific);
853 }
854 }