]> git.proxmox.com Git - qemu.git/blame - block/vdi.c
vdi: move aiocb fields to locals
[qemu.git] / block / vdi.c
CommitLineData
9aebd98a
SW
1/*
2 * Block driver for the Virtual Disk Image (VDI) format
3 *
641543b7 4 * Copyright (c) 2009, 2012 Stefan Weil
9aebd98a
SW
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) version 3 or any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Reference:
20 * http://forums.virtualbox.org/viewtopic.php?t=8046
21 *
22 * This driver supports create / read / write operations on VDI images.
23 *
24 * Todo (see also TODO in code):
25 *
26 * Some features like snapshots are still missing.
27 *
28 * Deallocation of zero-filled blocks and shrinking images are missing, too
29 * (might be added to common block layer).
30 *
31 * Allocation of blocks could be optimized (less writes to block map and
32 * header).
33 *
34 * Read and write of adjacents blocks could be done in one operation
35 * (current code uses one operation per block (1 MiB).
36 *
37 * The code is not thread safe (missing locks for changes in header and
38 * block table, no problem with current QEMU).
39 *
40 * Hints:
41 *
42 * Blocks (VDI documentation) correspond to clusters (QEMU).
43 * QEMU's backing files could be implemented using VDI snapshot files (TODO).
44 * VDI snapshot files may also contain the complete machine state.
45 * Maybe this machine state can be converted to QEMU PC machine snapshot data.
46 *
47 * The driver keeps a block cache (little endian entries) in memory.
48 * For the standard block size (1 MiB), a 1 TiB disk will use 4 MiB RAM,
49 * so this seems to be reasonable.
50 */
51
52#include "qemu-common.h"
53#include "block_int.h"
54#include "module.h"
fc9d106c 55#include "migration.h"
9aebd98a 56
ee682d27 57#if defined(CONFIG_UUID)
9aebd98a
SW
58#include <uuid/uuid.h>
59#else
60/* TODO: move uuid emulation to some central place in QEMU. */
61#include "sysemu.h" /* UUID_FMT */
62typedef unsigned char uuid_t[16];
63void uuid_generate(uuid_t out);
64int uuid_is_null(const uuid_t uu);
65void uuid_unparse(const uuid_t uu, char *out);
66#endif
67
68/* Code configuration options. */
69
70/* Enable debug messages. */
71//~ #define CONFIG_VDI_DEBUG
72
73/* Support write operations on VDI images. */
74#define CONFIG_VDI_WRITE
75
76/* Support non-standard block (cluster) size. This is untested.
77 * Maybe it will be needed for very large images.
78 */
79//~ #define CONFIG_VDI_BLOCK_SIZE
80
81/* Support static (fixed, pre-allocated) images. */
82#define CONFIG_VDI_STATIC_IMAGE
83
84/* Command line option for static images. */
85#define BLOCK_OPT_STATIC "static"
86
87#define KiB 1024
88#define MiB (KiB * KiB)
89
90#define SECTOR_SIZE 512
99cce9fa 91#define DEFAULT_CLUSTER_SIZE (1 * MiB)
9aebd98a
SW
92
93#if defined(CONFIG_VDI_DEBUG)
94#define logout(fmt, ...) \
95 fprintf(stderr, "vdi\t%-24s" fmt, __func__, ##__VA_ARGS__)
96#else
97#define logout(fmt, ...) ((void)0)
98#endif
99
100/* Image signature. */
101#define VDI_SIGNATURE 0xbeda107f
102
103/* Image version. */
104#define VDI_VERSION_1_1 0x00010001
105
106/* Image type. */
107#define VDI_TYPE_DYNAMIC 1
108#define VDI_TYPE_STATIC 2
109
110/* Innotek / SUN images use these strings in header.text:
111 * "<<< innotek VirtualBox Disk Image >>>\n"
112 * "<<< Sun xVM VirtualBox Disk Image >>>\n"
113 * "<<< Sun VirtualBox Disk Image >>>\n"
114 * The value does not matter, so QEMU created images use a different text.
115 */
116#define VDI_TEXT "<<< QEMU VM Virtual Disk Image >>>\n"
117
c794b4e0
ES
118/* A never-allocated block; semantically arbitrary content. */
119#define VDI_UNALLOCATED 0xffffffffU
120
121/* A discarded (no longer allocated) block; semantically zero-filled. */
122#define VDI_DISCARDED 0xfffffffeU
123
124#define VDI_IS_ALLOCATED(X) ((X) < VDI_DISCARDED)
9aebd98a 125
ee682d27 126#if !defined(CONFIG_UUID)
9aebd98a
SW
127void uuid_generate(uuid_t out)
128{
4f3669ea 129 memset(out, 0, sizeof(uuid_t));
9aebd98a
SW
130}
131
132int uuid_is_null(const uuid_t uu)
133{
134 uuid_t null_uuid = { 0 };
4f3669ea 135 return memcmp(uu, null_uuid, sizeof(uuid_t)) == 0;
9aebd98a
SW
136}
137
138void uuid_unparse(const uuid_t uu, char *out)
139{
140 snprintf(out, 37, UUID_FMT,
141 uu[0], uu[1], uu[2], uu[3], uu[4], uu[5], uu[6], uu[7],
142 uu[8], uu[9], uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]);
143}
144#endif
145
146typedef struct {
147 BlockDriverAIOCB common;
9aebd98a 148 uint8_t *buf;
9aebd98a 149 void *orig_buf;
9aebd98a
SW
150} VdiAIOCB;
151
152typedef struct {
153 char text[0x40];
154 uint32_t signature;
155 uint32_t version;
156 uint32_t header_size;
157 uint32_t image_type;
158 uint32_t image_flags;
159 char description[256];
160 uint32_t offset_bmap;
161 uint32_t offset_data;
162 uint32_t cylinders; /* disk geometry, unused here */
163 uint32_t heads; /* disk geometry, unused here */
164 uint32_t sectors; /* disk geometry, unused here */
165 uint32_t sector_size;
166 uint32_t unused1;
167 uint64_t disk_size;
168 uint32_t block_size;
169 uint32_t block_extra; /* unused here */
170 uint32_t blocks_in_image;
171 uint32_t blocks_allocated;
172 uuid_t uuid_image;
173 uuid_t uuid_last_snap;
174 uuid_t uuid_link;
175 uuid_t uuid_parent;
176 uint64_t unused2[7];
177} VdiHeader;
178
179typedef struct {
9aebd98a
SW
180 /* The block map entries are little endian (even in memory). */
181 uint32_t *bmap;
182 /* Size of block (bytes). */
183 uint32_t block_size;
184 /* Size of block (sectors). */
185 uint32_t block_sectors;
186 /* First sector of block map. */
187 uint32_t bmap_sector;
4ff9786c 188 /* VDI header (converted to host endianness). */
9aebd98a 189 VdiHeader header;
fc9d106c
KW
190
191 Error *migration_blocker;
9aebd98a
SW
192} BDRVVdiState;
193
194/* Change UUID from little endian (IPRT = VirtualBox format) to big endian
195 * format (network byte order, standard, see RFC 4122) and vice versa.
196 */
197static void uuid_convert(uuid_t uuid)
198{
199 bswap32s((uint32_t *)&uuid[0]);
200 bswap16s((uint16_t *)&uuid[4]);
201 bswap16s((uint16_t *)&uuid[6]);
202}
203
204static void vdi_header_to_cpu(VdiHeader *header)
205{
206 le32_to_cpus(&header->signature);
207 le32_to_cpus(&header->version);
208 le32_to_cpus(&header->header_size);
209 le32_to_cpus(&header->image_type);
210 le32_to_cpus(&header->image_flags);
211 le32_to_cpus(&header->offset_bmap);
212 le32_to_cpus(&header->offset_data);
213 le32_to_cpus(&header->cylinders);
214 le32_to_cpus(&header->heads);
215 le32_to_cpus(&header->sectors);
216 le32_to_cpus(&header->sector_size);
217 le64_to_cpus(&header->disk_size);
218 le32_to_cpus(&header->block_size);
219 le32_to_cpus(&header->block_extra);
220 le32_to_cpus(&header->blocks_in_image);
221 le32_to_cpus(&header->blocks_allocated);
222 uuid_convert(header->uuid_image);
223 uuid_convert(header->uuid_last_snap);
224 uuid_convert(header->uuid_link);
225 uuid_convert(header->uuid_parent);
226}
227
228static void vdi_header_to_le(VdiHeader *header)
229{
230 cpu_to_le32s(&header->signature);
231 cpu_to_le32s(&header->version);
232 cpu_to_le32s(&header->header_size);
233 cpu_to_le32s(&header->image_type);
234 cpu_to_le32s(&header->image_flags);
235 cpu_to_le32s(&header->offset_bmap);
236 cpu_to_le32s(&header->offset_data);
237 cpu_to_le32s(&header->cylinders);
238 cpu_to_le32s(&header->heads);
239 cpu_to_le32s(&header->sectors);
240 cpu_to_le32s(&header->sector_size);
241 cpu_to_le64s(&header->disk_size);
242 cpu_to_le32s(&header->block_size);
243 cpu_to_le32s(&header->block_extra);
244 cpu_to_le32s(&header->blocks_in_image);
245 cpu_to_le32s(&header->blocks_allocated);
246 cpu_to_le32s(&header->blocks_allocated);
247 uuid_convert(header->uuid_image);
248 uuid_convert(header->uuid_last_snap);
249 uuid_convert(header->uuid_link);
250 uuid_convert(header->uuid_parent);
251}
252
253#if defined(CONFIG_VDI_DEBUG)
254static void vdi_header_print(VdiHeader *header)
255{
256 char uuid[37];
257 logout("text %s", header->text);
258 logout("signature 0x%04x\n", header->signature);
259 logout("header size 0x%04x\n", header->header_size);
260 logout("image type 0x%04x\n", header->image_type);
261 logout("image flags 0x%04x\n", header->image_flags);
262 logout("description %s\n", header->description);
263 logout("offset bmap 0x%04x\n", header->offset_bmap);
264 logout("offset data 0x%04x\n", header->offset_data);
265 logout("cylinders 0x%04x\n", header->cylinders);
266 logout("heads 0x%04x\n", header->heads);
267 logout("sectors 0x%04x\n", header->sectors);
268 logout("sector size 0x%04x\n", header->sector_size);
269 logout("image size 0x%" PRIx64 " B (%" PRIu64 " MiB)\n",
270 header->disk_size, header->disk_size / MiB);
271 logout("block size 0x%04x\n", header->block_size);
272 logout("block extra 0x%04x\n", header->block_extra);
273 logout("blocks tot. 0x%04x\n", header->blocks_in_image);
274 logout("blocks all. 0x%04x\n", header->blocks_allocated);
275 uuid_unparse(header->uuid_image, uuid);
276 logout("uuid image %s\n", uuid);
277 uuid_unparse(header->uuid_last_snap, uuid);
278 logout("uuid snap %s\n", uuid);
279 uuid_unparse(header->uuid_link, uuid);
280 logout("uuid link %s\n", uuid);
281 uuid_unparse(header->uuid_parent, uuid);
282 logout("uuid parent %s\n", uuid);
283}
284#endif
285
9ac228e0 286static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res)
9aebd98a
SW
287{
288 /* TODO: additional checks possible. */
289 BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
9aebd98a
SW
290 uint32_t blocks_allocated = 0;
291 uint32_t block;
292 uint32_t *bmap;
293 logout("\n");
294
7267c094 295 bmap = g_malloc(s->header.blocks_in_image * sizeof(uint32_t));
9aebd98a
SW
296 memset(bmap, 0xff, s->header.blocks_in_image * sizeof(uint32_t));
297
298 /* Check block map and value of blocks_allocated. */
299 for (block = 0; block < s->header.blocks_in_image; block++) {
300 uint32_t bmap_entry = le32_to_cpu(s->bmap[block]);
c794b4e0 301 if (VDI_IS_ALLOCATED(bmap_entry)) {
9aebd98a
SW
302 if (bmap_entry < s->header.blocks_in_image) {
303 blocks_allocated++;
c794b4e0 304 if (!VDI_IS_ALLOCATED(bmap[bmap_entry])) {
9aebd98a
SW
305 bmap[bmap_entry] = bmap_entry;
306 } else {
307 fprintf(stderr, "ERROR: block index %" PRIu32
308 " also used by %" PRIu32 "\n", bmap[bmap_entry], bmap_entry);
9ac228e0 309 res->corruptions++;
9aebd98a
SW
310 }
311 } else {
312 fprintf(stderr, "ERROR: block index %" PRIu32
313 " too large, is %" PRIu32 "\n", block, bmap_entry);
9ac228e0 314 res->corruptions++;
9aebd98a
SW
315 }
316 }
317 }
318 if (blocks_allocated != s->header.blocks_allocated) {
319 fprintf(stderr, "ERROR: allocated blocks mismatch, is %" PRIu32
320 ", should be %" PRIu32 "\n",
321 blocks_allocated, s->header.blocks_allocated);
9ac228e0 322 res->corruptions++;
9aebd98a
SW
323 }
324
7267c094 325 g_free(bmap);
9aebd98a 326
9ac228e0 327 return 0;
9aebd98a
SW
328}
329
330static int vdi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
331{
332 /* TODO: vdi_get_info would be needed for machine snapshots.
333 vm_state_offset is still missing. */
334 BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
335 logout("\n");
336 bdi->cluster_size = s->block_size;
337 bdi->vm_state_offset = 0;
338 return 0;
339}
340
341static int vdi_make_empty(BlockDriverState *bs)
342{
343 /* TODO: missing code. */
344 logout("\n");
345 /* The return value for missing code must be 0, see block.c. */
346 return 0;
347}
348
349static int vdi_probe(const uint8_t *buf, int buf_size, const char *filename)
350{
351 const VdiHeader *header = (const VdiHeader *)buf;
352 int result = 0;
353
354 logout("\n");
355
356 if (buf_size < sizeof(*header)) {
357 /* Header too small, no VDI. */
358 } else if (le32_to_cpu(header->signature) == VDI_SIGNATURE) {
359 result = 100;
360 }
361
362 if (result == 0) {
363 logout("no vdi image\n");
364 } else {
365 logout("%s", header->text);
366 }
367
368 return result;
369}
370
66f82cee 371static int vdi_open(BlockDriverState *bs, int flags)
9aebd98a
SW
372{
373 BDRVVdiState *s = bs->opaque;
374 VdiHeader header;
375 size_t bmap_size;
9aebd98a
SW
376
377 logout("\n");
378
66f82cee 379 if (bdrv_read(bs->file, 0, (uint8_t *)&header, 1) < 0) {
9aebd98a
SW
380 goto fail;
381 }
382
383 vdi_header_to_cpu(&header);
384#if defined(CONFIG_VDI_DEBUG)
385 vdi_header_print(&header);
386#endif
387
f21dc3a4
SW
388 if (header.disk_size % SECTOR_SIZE != 0) {
389 /* 'VBoxManage convertfromraw' can create images with odd disk sizes.
390 We accept them but round the disk size to the next multiple of
391 SECTOR_SIZE. */
392 logout("odd disk size %" PRIu64 " B, round up\n", header.disk_size);
393 header.disk_size += SECTOR_SIZE - 1;
394 header.disk_size &= ~(SECTOR_SIZE - 1);
395 }
396
9aebd98a
SW
397 if (header.version != VDI_VERSION_1_1) {
398 logout("unsupported version %u.%u\n",
399 header.version >> 16, header.version & 0xffff);
400 goto fail;
401 } else if (header.offset_bmap % SECTOR_SIZE != 0) {
402 /* We only support block maps which start on a sector boundary. */
403 logout("unsupported block map offset 0x%x B\n", header.offset_bmap);
404 goto fail;
405 } else if (header.offset_data % SECTOR_SIZE != 0) {
406 /* We only support data blocks which start on a sector boundary. */
407 logout("unsupported data offset 0x%x B\n", header.offset_data);
408 goto fail;
409 } else if (header.sector_size != SECTOR_SIZE) {
410 logout("unsupported sector size %u B\n", header.sector_size);
411 goto fail;
412 } else if (header.block_size != 1 * MiB) {
413 logout("unsupported block size %u B\n", header.block_size);
414 goto fail;
f21dc3a4
SW
415 } else if (header.disk_size >
416 (uint64_t)header.blocks_in_image * header.block_size) {
417 logout("unsupported disk size %" PRIu64 " B\n", header.disk_size);
9aebd98a
SW
418 goto fail;
419 } else if (!uuid_is_null(header.uuid_link)) {
420 logout("link uuid != 0, unsupported\n");
421 goto fail;
422 } else if (!uuid_is_null(header.uuid_parent)) {
423 logout("parent uuid != 0, unsupported\n");
424 goto fail;
425 }
426
427 bs->total_sectors = header.disk_size / SECTOR_SIZE;
428
429 s->block_size = header.block_size;
430 s->block_sectors = header.block_size / SECTOR_SIZE;
431 s->bmap_sector = header.offset_bmap / SECTOR_SIZE;
432 s->header = header;
433
434 bmap_size = header.blocks_in_image * sizeof(uint32_t);
6eea90eb 435 bmap_size = (bmap_size + SECTOR_SIZE - 1) / SECTOR_SIZE;
b76b6e95 436 if (bmap_size > 0) {
7267c094 437 s->bmap = g_malloc(bmap_size * SECTOR_SIZE);
b76b6e95 438 }
66f82cee 439 if (bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap, bmap_size) < 0) {
9aebd98a
SW
440 goto fail_free_bmap;
441 }
442
fc9d106c
KW
443 /* Disable migration when vdi images are used */
444 error_set(&s->migration_blocker,
445 QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
446 "vdi", bs->device_name, "live migration");
447 migrate_add_blocker(s->migration_blocker);
448
9aebd98a
SW
449 return 0;
450
451 fail_free_bmap:
7267c094 452 g_free(s->bmap);
9aebd98a
SW
453
454 fail:
9aebd98a
SW
455 return -1;
456}
457
e850b35a
SH
458static int coroutine_fn vdi_co_is_allocated(BlockDriverState *bs,
459 int64_t sector_num, int nb_sectors, int *pnum)
9aebd98a
SW
460{
461 /* TODO: Check for too large sector_num (in bdrv_is_allocated or here). */
462 BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
463 size_t bmap_index = sector_num / s->block_sectors;
464 size_t sector_in_block = sector_num % s->block_sectors;
465 int n_sectors = s->block_sectors - sector_in_block;
466 uint32_t bmap_entry = le32_to_cpu(s->bmap[bmap_index]);
467 logout("%p, %" PRId64 ", %d, %p\n", bs, sector_num, nb_sectors, pnum);
468 if (n_sectors > nb_sectors) {
469 n_sectors = nb_sectors;
470 }
471 *pnum = n_sectors;
c794b4e0 472 return VDI_IS_ALLOCATED(bmap_entry);
9aebd98a
SW
473}
474
9aebd98a
SW
475static AIOPool vdi_aio_pool = {
476 .aiocb_size = sizeof(VdiAIOCB),
9aebd98a
SW
477};
478
bfc45fc1 479static VdiAIOCB *vdi_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov)
9aebd98a
SW
480{
481 VdiAIOCB *acb;
482
bfc45fc1 483 logout("%p, %p\n", bs, qiov);
9aebd98a 484
3d46a75a 485 acb = qemu_aio_get(&vdi_aio_pool, bs, NULL, NULL);
91977c2e
PB
486
487 if (qiov->niov > 1) {
488 acb->buf = qemu_blockalign(bs, qiov->size);
489 acb->orig_buf = acb->buf;
91977c2e
PB
490 } else {
491 acb->buf = (uint8_t *)qiov->iov->iov_base;
9aebd98a
SW
492 }
493 return acb;
494}
495
4de659e8
PB
496static int vdi_co_readv(BlockDriverState *bs,
497 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
9aebd98a 498{
4de659e8 499 VdiAIOCB *acb;
9aebd98a
SW
500 BDRVVdiState *s = bs->opaque;
501 uint32_t bmap_entry;
502 uint32_t block_index;
503 uint32_t sector_in_block;
504 uint32_t n_sectors;
bfc45fc1
PB
505 struct iovec hd_iov;
506 QEMUIOVector hd_qiov;
4de659e8
PB
507 int ret;
508
509 logout("\n");
bfc45fc1 510 acb = vdi_aio_setup(bs, qiov);
9aebd98a 511
3d46a75a 512restart:
bfc45fc1
PB
513 block_index = sector_num / s->block_sectors;
514 sector_in_block = sector_num % s->block_sectors;
9aebd98a 515 n_sectors = s->block_sectors - sector_in_block;
bfc45fc1
PB
516 if (n_sectors > nb_sectors) {
517 n_sectors = nb_sectors;
9aebd98a
SW
518 }
519
520 logout("will read %u sectors starting at sector %" PRIu64 "\n",
bfc45fc1 521 n_sectors, sector_num);
9aebd98a
SW
522
523 /* prepare next AIO request */
9aebd98a 524 bmap_entry = le32_to_cpu(s->bmap[block_index]);
c794b4e0 525 if (!VDI_IS_ALLOCATED(bmap_entry)) {
9aebd98a
SW
526 /* Block not allocated, return zeros, no need to wait. */
527 memset(acb->buf, 0, n_sectors * SECTOR_SIZE);
3d46a75a 528 ret = 0;
9aebd98a
SW
529 } else {
530 uint64_t offset = s->header.offset_data / SECTOR_SIZE +
531 (uint64_t)bmap_entry * s->block_sectors +
532 sector_in_block;
bfc45fc1
PB
533 hd_iov.iov_base = (void *)acb->buf;
534 hd_iov.iov_len = n_sectors * SECTOR_SIZE;
535 qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
536 ret = bdrv_co_readv(bs->file, offset, n_sectors, &hd_qiov);
3d46a75a 537 }
bfc45fc1 538 logout("%u sectors read\n", n_sectors);
0c7bfc32 539
bfc45fc1
PB
540 nb_sectors -= n_sectors;
541 sector_num += n_sectors;
542 acb->buf += n_sectors * SECTOR_SIZE;
0c7bfc32 543
bfc45fc1 544 if (ret >= 0 && nb_sectors > 0) {
3d46a75a 545 goto restart;
9aebd98a 546 }
3d46a75a 547
bfc45fc1
PB
548 if (acb->orig_buf) {
549 qemu_iovec_from_buffer(qiov, acb->orig_buf, qiov->size);
9aebd98a
SW
550 qemu_vfree(acb->orig_buf);
551 }
9aebd98a 552 qemu_aio_release(acb);
3d46a75a 553 return ret;
9aebd98a
SW
554}
555
4de659e8 556static int vdi_co_writev(BlockDriverState *bs,
3d46a75a 557 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
9aebd98a
SW
558{
559 VdiAIOCB *acb;
9aebd98a
SW
560 BDRVVdiState *s = bs->opaque;
561 uint32_t bmap_entry;
562 uint32_t block_index;
563 uint32_t sector_in_block;
564 uint32_t n_sectors;
bfc45fc1
PB
565 uint32_t bmap_first = VDI_UNALLOCATED;
566 uint32_t bmap_last = VDI_UNALLOCATED;
567 struct iovec hd_iov;
568 QEMUIOVector hd_qiov;
569 uint8_t *block = NULL;
4de659e8
PB
570 int ret;
571
572 logout("\n");
bfc45fc1
PB
573 acb = vdi_aio_setup(bs, qiov);
574 if (acb->orig_buf) {
575 qemu_iovec_to_buffer(qiov, acb->buf);
576 }
9aebd98a 577
3d46a75a 578restart:
bfc45fc1
PB
579 block_index = sector_num / s->block_sectors;
580 sector_in_block = sector_num % s->block_sectors;
9aebd98a 581 n_sectors = s->block_sectors - sector_in_block;
bfc45fc1
PB
582 if (n_sectors > nb_sectors) {
583 n_sectors = nb_sectors;
9aebd98a
SW
584 }
585
586 logout("will write %u sectors starting at sector %" PRIu64 "\n",
bfc45fc1 587 n_sectors, sector_num);
9aebd98a
SW
588
589 /* prepare next AIO request */
9aebd98a 590 bmap_entry = le32_to_cpu(s->bmap[block_index]);
c794b4e0 591 if (!VDI_IS_ALLOCATED(bmap_entry)) {
9aebd98a
SW
592 /* Allocate new block and write to it. */
593 uint64_t offset;
9aebd98a
SW
594 bmap_entry = s->header.blocks_allocated;
595 s->bmap[block_index] = cpu_to_le32(bmap_entry);
596 s->header.blocks_allocated++;
597 offset = s->header.offset_data / SECTOR_SIZE +
598 (uint64_t)bmap_entry * s->block_sectors;
9aebd98a 599 if (block == NULL) {
641543b7 600 block = g_malloc(s->block_size);
bfc45fc1 601 bmap_first = block_index;
9aebd98a 602 }
bfc45fc1 603 bmap_last = block_index;
641543b7
SW
604 /* Copy data to be written to new block and zero unused parts. */
605 memset(block, 0, sector_in_block * SECTOR_SIZE);
9aebd98a
SW
606 memcpy(block + sector_in_block * SECTOR_SIZE,
607 acb->buf, n_sectors * SECTOR_SIZE);
641543b7
SW
608 memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
609 (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
bfc45fc1
PB
610 hd_iov.iov_base = (void *)block;
611 hd_iov.iov_len = s->block_size;
612 qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
613 ret = bdrv_co_writev(bs->file, offset, s->block_sectors, &hd_qiov);
9aebd98a
SW
614 } else {
615 uint64_t offset = s->header.offset_data / SECTOR_SIZE +
616 (uint64_t)bmap_entry * s->block_sectors +
617 sector_in_block;
bfc45fc1
PB
618 hd_iov.iov_base = (void *)acb->buf;
619 hd_iov.iov_len = n_sectors * SECTOR_SIZE;
620 qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
621 ret = bdrv_co_writev(bs->file, offset, n_sectors, &hd_qiov);
3d46a75a 622 }
0c7bfc32 623
bfc45fc1
PB
624 nb_sectors -= n_sectors;
625 sector_num += n_sectors;
626 acb->buf += n_sectors * SECTOR_SIZE;
0c7bfc32 627
bfc45fc1
PB
628 logout("%u sectors written\n", n_sectors);
629 if (ret >= 0 && nb_sectors > 0) {
3d46a75a 630 goto restart;
9aebd98a 631 }
9aebd98a 632
0c7bfc32
PB
633 logout("finished data write\n");
634 if (ret >= 0) {
635 ret = 0;
bfc45fc1
PB
636 if (block) {
637 VdiHeader *header = (VdiHeader *) block;
0c7bfc32 638 logout("now writing modified header\n");
bfc45fc1 639 assert(VDI_IS_ALLOCATED(bmap_first));
0c7bfc32
PB
640 *header = s->header;
641 vdi_header_to_le(header);
bfc45fc1
PB
642 hd_iov.iov_base = block;
643 hd_iov.iov_len = SECTOR_SIZE;
644 qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
645 ret = bdrv_co_writev(bs->file, 0, 1, &hd_qiov);
0c7bfc32 646 }
bfc45fc1
PB
647 g_free(block);
648 block = NULL;
649 if (ret >= 0 && VDI_IS_ALLOCATED(bmap_first)) {
0c7bfc32
PB
650 /* One or more new blocks were allocated. */
651 uint64_t offset;
0c7bfc32
PB
652 logout("now writing modified block map entry %u...%u\n",
653 bmap_first, bmap_last);
654 /* Write modified sectors from block map. */
655 bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
656 bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
657 n_sectors = bmap_last - bmap_first + 1;
658 offset = s->bmap_sector + bmap_first;
bfc45fc1 659 hd_iov.iov_base = (void *)((uint8_t *)&s->bmap[0] +
0c7bfc32 660 bmap_first * SECTOR_SIZE);
bfc45fc1
PB
661 hd_iov.iov_len = n_sectors * SECTOR_SIZE;
662 qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
0c7bfc32
PB
663 logout("will write %u block map sectors starting from entry %u\n",
664 n_sectors, bmap_first);
bfc45fc1 665 ret = bdrv_co_writev(bs->file, offset, n_sectors, &hd_qiov);
0c7bfc32
PB
666 }
667 }
668
bfc45fc1 669 if (acb->orig_buf) {
9aebd98a
SW
670 qemu_vfree(acb->orig_buf);
671 }
9aebd98a 672 qemu_aio_release(acb);
3d46a75a 673 return ret;
9aebd98a
SW
674}
675
9aebd98a
SW
676static int vdi_create(const char *filename, QEMUOptionParameter *options)
677{
678 int fd;
679 int result = 0;
680 uint64_t bytes = 0;
681 uint32_t blocks;
99cce9fa 682 size_t block_size = DEFAULT_CLUSTER_SIZE;
9aebd98a
SW
683 uint32_t image_type = VDI_TYPE_DYNAMIC;
684 VdiHeader header;
685 size_t i;
686 size_t bmap_size;
687 uint32_t *bmap;
688
689 logout("\n");
690
691 /* Read out options. */
692 while (options && options->name) {
693 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
694 bytes = options->value.n;
695#if defined(CONFIG_VDI_BLOCK_SIZE)
696 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
697 if (options->value.n) {
698 /* TODO: Additional checks (SECTOR_SIZE * 2^n, ...). */
699 block_size = options->value.n;
700 }
701#endif
702#if defined(CONFIG_VDI_STATIC_IMAGE)
703 } else if (!strcmp(options->name, BLOCK_OPT_STATIC)) {
6eea90eb
SW
704 if (options->value.n) {
705 image_type = VDI_TYPE_STATIC;
706 }
9aebd98a
SW
707#endif
708 }
709 options++;
710 }
711
712 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
713 0644);
714 if (fd < 0) {
715 return -errno;
716 }
717
f21dc3a4
SW
718 /* We need enough blocks to store the given disk size,
719 so always round up. */
720 blocks = (bytes + block_size - 1) / block_size;
721
9aebd98a
SW
722 bmap_size = blocks * sizeof(uint32_t);
723 bmap_size = ((bmap_size + SECTOR_SIZE - 1) & ~(SECTOR_SIZE -1));
724
725 memset(&header, 0, sizeof(header));
1786dc15 726 pstrcpy(header.text, sizeof(header.text), VDI_TEXT);
9aebd98a
SW
727 header.signature = VDI_SIGNATURE;
728 header.version = VDI_VERSION_1_1;
729 header.header_size = 0x180;
730 header.image_type = image_type;
731 header.offset_bmap = 0x200;
732 header.offset_data = 0x200 + bmap_size;
733 header.sector_size = SECTOR_SIZE;
734 header.disk_size = bytes;
735 header.block_size = block_size;
736 header.blocks_in_image = blocks;
6eea90eb
SW
737 if (image_type == VDI_TYPE_STATIC) {
738 header.blocks_allocated = blocks;
739 }
9aebd98a
SW
740 uuid_generate(header.uuid_image);
741 uuid_generate(header.uuid_last_snap);
742 /* There is no need to set header.uuid_link or header.uuid_parent here. */
743#if defined(CONFIG_VDI_DEBUG)
744 vdi_header_print(&header);
745#endif
746 vdi_header_to_le(&header);
747 if (write(fd, &header, sizeof(header)) < 0) {
748 result = -errno;
749 }
750
b76b6e95
SW
751 bmap = NULL;
752 if (bmap_size > 0) {
7267c094 753 bmap = (uint32_t *)g_malloc0(bmap_size);
b76b6e95 754 }
9aebd98a
SW
755 for (i = 0; i < blocks; i++) {
756 if (image_type == VDI_TYPE_STATIC) {
757 bmap[i] = i;
758 } else {
759 bmap[i] = VDI_UNALLOCATED;
760 }
761 }
762 if (write(fd, bmap, bmap_size) < 0) {
763 result = -errno;
764 }
7267c094 765 g_free(bmap);
9aebd98a
SW
766 if (image_type == VDI_TYPE_STATIC) {
767 if (ftruncate(fd, sizeof(header) + bmap_size + blocks * block_size)) {
768 result = -errno;
769 }
770 }
771
772 if (close(fd) < 0) {
773 result = -errno;
774 }
775
776 return result;
777}
778
779static void vdi_close(BlockDriverState *bs)
780{
fc9d106c 781 BDRVVdiState *s = bs->opaque;
6ac5f388
KW
782
783 g_free(s->bmap);
784
fc9d106c
KW
785 migrate_del_blocker(s->migration_blocker);
786 error_free(s->migration_blocker);
9aebd98a
SW
787}
788
9aebd98a
SW
789static QEMUOptionParameter vdi_create_options[] = {
790 {
791 .name = BLOCK_OPT_SIZE,
792 .type = OPT_SIZE,
793 .help = "Virtual disk size"
794 },
795#if defined(CONFIG_VDI_BLOCK_SIZE)
796 {
797 .name = BLOCK_OPT_CLUSTER_SIZE,
798 .type = OPT_SIZE,
99cce9fa
KW
799 .help = "VDI cluster (block) size",
800 .value = { .n = DEFAULT_CLUSTER_SIZE },
9aebd98a
SW
801 },
802#endif
803#if defined(CONFIG_VDI_STATIC_IMAGE)
804 {
805 .name = BLOCK_OPT_STATIC,
806 .type = OPT_FLAG,
807 .help = "VDI static (pre-allocated) image"
808 },
809#endif
810 /* TODO: An additional option to set UUID values might be useful. */
811 { NULL }
812};
813
814static BlockDriver bdrv_vdi = {
815 .format_name = "vdi",
816 .instance_size = sizeof(BDRVVdiState),
817 .bdrv_probe = vdi_probe,
818 .bdrv_open = vdi_open,
819 .bdrv_close = vdi_close,
820 .bdrv_create = vdi_create,
e850b35a 821 .bdrv_co_is_allocated = vdi_co_is_allocated,
9aebd98a
SW
822 .bdrv_make_empty = vdi_make_empty,
823
3d46a75a 824 .bdrv_co_readv = vdi_co_readv,
9aebd98a 825#if defined(CONFIG_VDI_WRITE)
3d46a75a 826 .bdrv_co_writev = vdi_co_writev,
9aebd98a
SW
827#endif
828
829 .bdrv_get_info = vdi_get_info,
830
831 .create_options = vdi_create_options,
832 .bdrv_check = vdi_check,
833};
834
835static void bdrv_vdi_init(void)
836{
837 logout("\n");
838 bdrv_register(&bdrv_vdi);
839}
840
841block_init(bdrv_vdi_init);