]> git.proxmox.com Git - mirror_qemu.git/blob - block/parallels.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / block / parallels.c
1 /*
2 * Block driver for Parallels disk image format
3 *
4 * Copyright (c) 2007 Alex Beregszaszi
5 * Copyright (c) 2015 Denis V. Lunev <den@openvz.org>
6 *
7 * This code was originally based on comparing different disk images created
8 * by Parallels. Currently it is based on opened OpenVZ sources
9 * available at
10 * http://git.openvz.org/?p=ploop;a=summary
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * THE SOFTWARE.
29 */
30
31 #include "qemu/osdep.h"
32 #include "qapi/error.h"
33 #include "block/block_int.h"
34 #include "block/qdict.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qapi/qmp/qdict.h"
39 #include "qapi/qobject-input-visitor.h"
40 #include "qapi/qapi-visit-block-core.h"
41 #include "qemu/bswap.h"
42 #include "qemu/bitmap.h"
43 #include "migration/blocker.h"
44 #include "parallels.h"
45
46 /**************************************************************/
47
48 #define HEADER_MAGIC "WithoutFreeSpace"
49 #define HEADER_MAGIC2 "WithouFreSpacExt"
50 #define HEADER_VERSION 2
51 #define HEADER_INUSE_MAGIC (0x746F6E59)
52 #define MAX_PARALLELS_IMAGE_FACTOR (1ull << 32)
53
54 static QEnumLookup prealloc_mode_lookup = {
55 .array = (const char *const[]) {
56 "falloc",
57 "truncate",
58 },
59 .size = PRL_PREALLOC_MODE__MAX
60 };
61
62 #define PARALLELS_OPT_PREALLOC_MODE "prealloc-mode"
63 #define PARALLELS_OPT_PREALLOC_SIZE "prealloc-size"
64
65 static QemuOptsList parallels_runtime_opts = {
66 .name = "parallels",
67 .head = QTAILQ_HEAD_INITIALIZER(parallels_runtime_opts.head),
68 .desc = {
69 {
70 .name = PARALLELS_OPT_PREALLOC_SIZE,
71 .type = QEMU_OPT_SIZE,
72 .help = "Preallocation size on image expansion",
73 .def_value_str = "128M",
74 },
75 {
76 .name = PARALLELS_OPT_PREALLOC_MODE,
77 .type = QEMU_OPT_STRING,
78 .help = "Preallocation mode on image expansion "
79 "(allowed values: falloc, truncate)",
80 .def_value_str = "falloc",
81 },
82 { /* end of list */ },
83 },
84 };
85
86 static QemuOptsList parallels_create_opts = {
87 .name = "parallels-create-opts",
88 .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head),
89 .desc = {
90 {
91 .name = BLOCK_OPT_SIZE,
92 .type = QEMU_OPT_SIZE,
93 .help = "Virtual disk size",
94 },
95 {
96 .name = BLOCK_OPT_CLUSTER_SIZE,
97 .type = QEMU_OPT_SIZE,
98 .help = "Parallels image cluster size",
99 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE),
100 },
101 { /* end of list */ }
102 }
103 };
104
105
106 static int64_t bat2sect(BDRVParallelsState *s, uint32_t idx)
107 {
108 return (uint64_t)le32_to_cpu(s->bat_bitmap[idx]) * s->off_multiplier;
109 }
110
111 static uint32_t bat_entry_off(uint32_t idx)
112 {
113 return sizeof(ParallelsHeader) + sizeof(uint32_t) * idx;
114 }
115
116 static int64_t seek_to_sector(BDRVParallelsState *s, int64_t sector_num)
117 {
118 uint32_t index, offset;
119
120 index = sector_num / s->tracks;
121 offset = sector_num % s->tracks;
122
123 /* not allocated */
124 if ((index >= s->bat_size) || (s->bat_bitmap[index] == 0)) {
125 return -1;
126 }
127 return bat2sect(s, index) + offset;
128 }
129
130 static int cluster_remainder(BDRVParallelsState *s, int64_t sector_num,
131 int nb_sectors)
132 {
133 int ret = s->tracks - sector_num % s->tracks;
134 return MIN(nb_sectors, ret);
135 }
136
137 static int64_t block_status(BDRVParallelsState *s, int64_t sector_num,
138 int nb_sectors, int *pnum)
139 {
140 int64_t start_off = -2, prev_end_off = -2;
141
142 *pnum = 0;
143 while (nb_sectors > 0 || start_off == -2) {
144 int64_t offset = seek_to_sector(s, sector_num);
145 int to_end;
146
147 if (start_off == -2) {
148 start_off = offset;
149 prev_end_off = offset;
150 } else if (offset != prev_end_off) {
151 break;
152 }
153
154 to_end = cluster_remainder(s, sector_num, nb_sectors);
155 nb_sectors -= to_end;
156 sector_num += to_end;
157 *pnum += to_end;
158
159 if (offset > 0) {
160 prev_end_off += to_end;
161 }
162 }
163 return start_off;
164 }
165
166 static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
167 int nb_sectors, int *pnum)
168 {
169 int ret;
170 BDRVParallelsState *s = bs->opaque;
171 int64_t pos, space, idx, to_allocate, i, len;
172
173 pos = block_status(s, sector_num, nb_sectors, pnum);
174 if (pos > 0) {
175 return pos;
176 }
177
178 idx = sector_num / s->tracks;
179 to_allocate = DIV_ROUND_UP(sector_num + *pnum, s->tracks) - idx;
180
181 /* This function is called only by parallels_co_writev(), which will never
182 * pass a sector_num at or beyond the end of the image (because the block
183 * layer never passes such a sector_num to that function). Therefore, idx
184 * is always below s->bat_size.
185 * block_status() will limit *pnum so that sector_num + *pnum will not
186 * exceed the image end. Therefore, idx + to_allocate cannot exceed
187 * s->bat_size.
188 * Note that s->bat_size is an unsigned int, therefore idx + to_allocate
189 * will always fit into a uint32_t. */
190 assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
191
192 space = to_allocate * s->tracks;
193 len = bdrv_getlength(bs->file->bs);
194 if (len < 0) {
195 return len;
196 }
197 if (s->data_end + space > (len >> BDRV_SECTOR_BITS)) {
198 space += s->prealloc_size;
199 if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) {
200 ret = bdrv_pwrite_zeroes(bs->file,
201 s->data_end << BDRV_SECTOR_BITS,
202 space << BDRV_SECTOR_BITS, 0);
203 } else {
204 ret = bdrv_truncate(bs->file,
205 (s->data_end + space) << BDRV_SECTOR_BITS,
206 PREALLOC_MODE_OFF, NULL);
207 }
208 if (ret < 0) {
209 return ret;
210 }
211 }
212
213 /* Try to read from backing to fill empty clusters
214 * FIXME: 1. previous write_zeroes may be redundant
215 * 2. most of data we read from backing will be rewritten by
216 * parallels_co_writev. On aligned-to-cluster write we do not need
217 * this read at all.
218 * 3. it would be good to combine write of data from backing and new
219 * data into one write call */
220 if (bs->backing) {
221 int64_t nb_cow_sectors = to_allocate * s->tracks;
222 int64_t nb_cow_bytes = nb_cow_sectors << BDRV_SECTOR_BITS;
223 QEMUIOVector qiov;
224 struct iovec iov = {
225 .iov_len = nb_cow_bytes,
226 .iov_base = qemu_blockalign(bs, nb_cow_bytes)
227 };
228 qemu_iovec_init_external(&qiov, &iov, 1);
229
230 ret = bdrv_co_preadv(bs->backing, idx * s->tracks * BDRV_SECTOR_SIZE,
231 nb_cow_bytes, &qiov, 0);
232 if (ret < 0) {
233 qemu_vfree(iov.iov_base);
234 return ret;
235 }
236
237 ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE,
238 nb_cow_bytes, &qiov, 0);
239 qemu_vfree(iov.iov_base);
240 if (ret < 0) {
241 return ret;
242 }
243 }
244
245 for (i = 0; i < to_allocate; i++) {
246 s->bat_bitmap[idx + i] = cpu_to_le32(s->data_end / s->off_multiplier);
247 s->data_end += s->tracks;
248 bitmap_set(s->bat_dirty_bmap,
249 bat_entry_off(idx + i) / s->bat_dirty_block, 1);
250 }
251
252 return bat2sect(s, idx) + sector_num % s->tracks;
253 }
254
255
256 static coroutine_fn int parallels_co_flush_to_os(BlockDriverState *bs)
257 {
258 BDRVParallelsState *s = bs->opaque;
259 unsigned long size = DIV_ROUND_UP(s->header_size, s->bat_dirty_block);
260 unsigned long bit;
261
262 qemu_co_mutex_lock(&s->lock);
263
264 bit = find_first_bit(s->bat_dirty_bmap, size);
265 while (bit < size) {
266 uint32_t off = bit * s->bat_dirty_block;
267 uint32_t to_write = s->bat_dirty_block;
268 int ret;
269
270 if (off + to_write > s->header_size) {
271 to_write = s->header_size - off;
272 }
273 ret = bdrv_pwrite(bs->file, off, (uint8_t *)s->header + off,
274 to_write);
275 if (ret < 0) {
276 qemu_co_mutex_unlock(&s->lock);
277 return ret;
278 }
279 bit = find_next_bit(s->bat_dirty_bmap, size, bit + 1);
280 }
281 bitmap_zero(s->bat_dirty_bmap, size);
282
283 qemu_co_mutex_unlock(&s->lock);
284 return 0;
285 }
286
287
288 static int coroutine_fn parallels_co_block_status(BlockDriverState *bs,
289 bool want_zero,
290 int64_t offset,
291 int64_t bytes,
292 int64_t *pnum,
293 int64_t *map,
294 BlockDriverState **file)
295 {
296 BDRVParallelsState *s = bs->opaque;
297 int count;
298
299 assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
300 qemu_co_mutex_lock(&s->lock);
301 offset = block_status(s, offset >> BDRV_SECTOR_BITS,
302 bytes >> BDRV_SECTOR_BITS, &count);
303 qemu_co_mutex_unlock(&s->lock);
304
305 *pnum = count * BDRV_SECTOR_SIZE;
306 if (offset < 0) {
307 return 0;
308 }
309
310 *map = offset * BDRV_SECTOR_SIZE;
311 *file = bs->file->bs;
312 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
313 }
314
315 static coroutine_fn int parallels_co_writev(BlockDriverState *bs,
316 int64_t sector_num, int nb_sectors,
317 QEMUIOVector *qiov, int flags)
318 {
319 BDRVParallelsState *s = bs->opaque;
320 uint64_t bytes_done = 0;
321 QEMUIOVector hd_qiov;
322 int ret = 0;
323
324 assert(!flags);
325 qemu_iovec_init(&hd_qiov, qiov->niov);
326
327 while (nb_sectors > 0) {
328 int64_t position;
329 int n, nbytes;
330
331 qemu_co_mutex_lock(&s->lock);
332 position = allocate_clusters(bs, sector_num, nb_sectors, &n);
333 qemu_co_mutex_unlock(&s->lock);
334 if (position < 0) {
335 ret = (int)position;
336 break;
337 }
338
339 nbytes = n << BDRV_SECTOR_BITS;
340
341 qemu_iovec_reset(&hd_qiov);
342 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
343
344 ret = bdrv_co_pwritev(bs->file, position * BDRV_SECTOR_SIZE, nbytes,
345 &hd_qiov, 0);
346 if (ret < 0) {
347 break;
348 }
349
350 nb_sectors -= n;
351 sector_num += n;
352 bytes_done += nbytes;
353 }
354
355 qemu_iovec_destroy(&hd_qiov);
356 return ret;
357 }
358
359 static coroutine_fn int parallels_co_readv(BlockDriverState *bs,
360 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
361 {
362 BDRVParallelsState *s = bs->opaque;
363 uint64_t bytes_done = 0;
364 QEMUIOVector hd_qiov;
365 int ret = 0;
366
367 qemu_iovec_init(&hd_qiov, qiov->niov);
368
369 while (nb_sectors > 0) {
370 int64_t position;
371 int n, nbytes;
372
373 qemu_co_mutex_lock(&s->lock);
374 position = block_status(s, sector_num, nb_sectors, &n);
375 qemu_co_mutex_unlock(&s->lock);
376
377 nbytes = n << BDRV_SECTOR_BITS;
378
379 qemu_iovec_reset(&hd_qiov);
380 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
381
382 if (position < 0) {
383 if (bs->backing) {
384 ret = bdrv_co_preadv(bs->backing, sector_num * BDRV_SECTOR_SIZE,
385 nbytes, &hd_qiov, 0);
386 if (ret < 0) {
387 break;
388 }
389 } else {
390 qemu_iovec_memset(&hd_qiov, 0, 0, nbytes);
391 }
392 } else {
393 ret = bdrv_co_preadv(bs->file, position * BDRV_SECTOR_SIZE, nbytes,
394 &hd_qiov, 0);
395 if (ret < 0) {
396 break;
397 }
398 }
399
400 nb_sectors -= n;
401 sector_num += n;
402 bytes_done += nbytes;
403 }
404
405 qemu_iovec_destroy(&hd_qiov);
406 return ret;
407 }
408
409
410 static int coroutine_fn parallels_co_check(BlockDriverState *bs,
411 BdrvCheckResult *res,
412 BdrvCheckMode fix)
413 {
414 BDRVParallelsState *s = bs->opaque;
415 int64_t size, prev_off, high_off;
416 int ret;
417 uint32_t i;
418 bool flush_bat = false;
419 int cluster_size = s->tracks << BDRV_SECTOR_BITS;
420
421 size = bdrv_getlength(bs->file->bs);
422 if (size < 0) {
423 res->check_errors++;
424 return size;
425 }
426
427 qemu_co_mutex_lock(&s->lock);
428 if (s->header_unclean) {
429 fprintf(stderr, "%s image was not closed correctly\n",
430 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR");
431 res->corruptions++;
432 if (fix & BDRV_FIX_ERRORS) {
433 /* parallels_close will do the job right */
434 res->corruptions_fixed++;
435 s->header_unclean = false;
436 }
437 }
438
439 res->bfi.total_clusters = s->bat_size;
440 res->bfi.compressed_clusters = 0; /* compression is not supported */
441
442 high_off = 0;
443 prev_off = 0;
444 for (i = 0; i < s->bat_size; i++) {
445 int64_t off = bat2sect(s, i) << BDRV_SECTOR_BITS;
446 if (off == 0) {
447 prev_off = 0;
448 continue;
449 }
450
451 /* cluster outside the image */
452 if (off > size) {
453 fprintf(stderr, "%s cluster %u is outside image\n",
454 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
455 res->corruptions++;
456 if (fix & BDRV_FIX_ERRORS) {
457 prev_off = 0;
458 s->bat_bitmap[i] = 0;
459 res->corruptions_fixed++;
460 flush_bat = true;
461 continue;
462 }
463 }
464
465 res->bfi.allocated_clusters++;
466 if (off > high_off) {
467 high_off = off;
468 }
469
470 if (prev_off != 0 && (prev_off + cluster_size) != off) {
471 res->bfi.fragmented_clusters++;
472 }
473 prev_off = off;
474 }
475
476 ret = 0;
477 if (flush_bat) {
478 ret = bdrv_pwrite_sync(bs->file, 0, s->header, s->header_size);
479 if (ret < 0) {
480 res->check_errors++;
481 goto out;
482 }
483 }
484
485 res->image_end_offset = high_off + cluster_size;
486 if (size > res->image_end_offset) {
487 int64_t count;
488 count = DIV_ROUND_UP(size - res->image_end_offset, cluster_size);
489 fprintf(stderr, "%s space leaked at the end of the image %" PRId64 "\n",
490 fix & BDRV_FIX_LEAKS ? "Repairing" : "ERROR",
491 size - res->image_end_offset);
492 res->leaks += count;
493 if (fix & BDRV_FIX_LEAKS) {
494 Error *local_err = NULL;
495 ret = bdrv_truncate(bs->file, res->image_end_offset,
496 PREALLOC_MODE_OFF, &local_err);
497 if (ret < 0) {
498 error_report_err(local_err);
499 res->check_errors++;
500 goto out;
501 }
502 res->leaks_fixed += count;
503 }
504 }
505
506 out:
507 qemu_co_mutex_unlock(&s->lock);
508 return ret;
509 }
510
511
512 static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts,
513 Error **errp)
514 {
515 BlockdevCreateOptionsParallels *parallels_opts;
516 BlockDriverState *bs;
517 BlockBackend *blk;
518 int64_t total_size, cl_size;
519 uint32_t bat_entries, bat_sectors;
520 ParallelsHeader header;
521 uint8_t tmp[BDRV_SECTOR_SIZE];
522 int ret;
523
524 assert(opts->driver == BLOCKDEV_DRIVER_PARALLELS);
525 parallels_opts = &opts->u.parallels;
526
527 /* Sanity checks */
528 total_size = parallels_opts->size;
529
530 if (parallels_opts->has_cluster_size) {
531 cl_size = parallels_opts->cluster_size;
532 } else {
533 cl_size = DEFAULT_CLUSTER_SIZE;
534 }
535
536 /* XXX What is the real limit here? This is an insanely large maximum. */
537 if (cl_size >= INT64_MAX / MAX_PARALLELS_IMAGE_FACTOR) {
538 error_setg(errp, "Cluster size is too large");
539 return -EINVAL;
540 }
541 if (total_size >= MAX_PARALLELS_IMAGE_FACTOR * cl_size) {
542 error_setg(errp, "Image size is too large for this cluster size");
543 return -E2BIG;
544 }
545
546 if (!QEMU_IS_ALIGNED(total_size, BDRV_SECTOR_SIZE)) {
547 error_setg(errp, "Image size must be a multiple of 512 bytes");
548 return -EINVAL;
549 }
550
551 if (!QEMU_IS_ALIGNED(cl_size, BDRV_SECTOR_SIZE)) {
552 error_setg(errp, "Cluster size must be a multiple of 512 bytes");
553 return -EINVAL;
554 }
555
556 /* Create BlockBackend to write to the image */
557 bs = bdrv_open_blockdev_ref(parallels_opts->file, errp);
558 if (bs == NULL) {
559 return -EIO;
560 }
561
562 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
563 ret = blk_insert_bs(blk, bs, errp);
564 if (ret < 0) {
565 goto out;
566 }
567 blk_set_allow_write_beyond_eof(blk, true);
568
569 /* Create image format */
570 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
571 if (ret < 0) {
572 goto out;
573 }
574
575 bat_entries = DIV_ROUND_UP(total_size, cl_size);
576 bat_sectors = DIV_ROUND_UP(bat_entry_off(bat_entries), cl_size);
577 bat_sectors = (bat_sectors * cl_size) >> BDRV_SECTOR_BITS;
578
579 memset(&header, 0, sizeof(header));
580 memcpy(header.magic, HEADER_MAGIC2, sizeof(header.magic));
581 header.version = cpu_to_le32(HEADER_VERSION);
582 /* don't care much about geometry, it is not used on image level */
583 header.heads = cpu_to_le32(HEADS_NUMBER);
584 header.cylinders = cpu_to_le32(total_size / BDRV_SECTOR_SIZE
585 / HEADS_NUMBER / SEC_IN_CYL);
586 header.tracks = cpu_to_le32(cl_size >> BDRV_SECTOR_BITS);
587 header.bat_entries = cpu_to_le32(bat_entries);
588 header.nb_sectors = cpu_to_le64(DIV_ROUND_UP(total_size, BDRV_SECTOR_SIZE));
589 header.data_off = cpu_to_le32(bat_sectors);
590
591 /* write all the data */
592 memset(tmp, 0, sizeof(tmp));
593 memcpy(tmp, &header, sizeof(header));
594
595 ret = blk_pwrite(blk, 0, tmp, BDRV_SECTOR_SIZE, 0);
596 if (ret < 0) {
597 goto exit;
598 }
599 ret = blk_pwrite_zeroes(blk, BDRV_SECTOR_SIZE,
600 (bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
601 if (ret < 0) {
602 goto exit;
603 }
604
605 ret = 0;
606 out:
607 blk_unref(blk);
608 bdrv_unref(bs);
609 return ret;
610
611 exit:
612 error_setg_errno(errp, -ret, "Failed to create Parallels image");
613 goto out;
614 }
615
616 static int coroutine_fn parallels_co_create_opts(const char *filename,
617 QemuOpts *opts,
618 Error **errp)
619 {
620 BlockdevCreateOptions *create_options = NULL;
621 Error *local_err = NULL;
622 BlockDriverState *bs = NULL;
623 QDict *qdict;
624 Visitor *v;
625 int ret;
626
627 static const QDictRenames opt_renames[] = {
628 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
629 { NULL, NULL },
630 };
631
632 /* Parse options and convert legacy syntax */
633 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &parallels_create_opts,
634 true);
635
636 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
637 ret = -EINVAL;
638 goto done;
639 }
640
641 /* Create and open the file (protocol layer) */
642 ret = bdrv_create_file(filename, opts, &local_err);
643 if (ret < 0) {
644 error_propagate(errp, local_err);
645 goto done;
646 }
647
648 bs = bdrv_open(filename, NULL, NULL,
649 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
650 if (bs == NULL) {
651 ret = -EIO;
652 goto done;
653 }
654
655 /* Now get the QAPI type BlockdevCreateOptions */
656 qdict_put_str(qdict, "driver", "parallels");
657 qdict_put_str(qdict, "file", bs->node_name);
658
659 v = qobject_input_visitor_new_flat_confused(qdict, errp);
660 if (!v) {
661 ret = -EINVAL;
662 goto done;
663 }
664
665 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
666 visit_free(v);
667
668 if (local_err) {
669 error_propagate(errp, local_err);
670 ret = -EINVAL;
671 goto done;
672 }
673
674 /* Silently round up sizes */
675 create_options->u.parallels.size =
676 ROUND_UP(create_options->u.parallels.size, BDRV_SECTOR_SIZE);
677 create_options->u.parallels.cluster_size =
678 ROUND_UP(create_options->u.parallels.cluster_size, BDRV_SECTOR_SIZE);
679
680 /* Create the Parallels image (format layer) */
681 ret = parallels_co_create(create_options, errp);
682 if (ret < 0) {
683 goto done;
684 }
685 ret = 0;
686
687 done:
688 qobject_unref(qdict);
689 bdrv_unref(bs);
690 qapi_free_BlockdevCreateOptions(create_options);
691 return ret;
692 }
693
694
695 static int parallels_probe(const uint8_t *buf, int buf_size,
696 const char *filename)
697 {
698 const ParallelsHeader *ph = (const void *)buf;
699
700 if (buf_size < sizeof(ParallelsHeader)) {
701 return 0;
702 }
703
704 if ((!memcmp(ph->magic, HEADER_MAGIC, 16) ||
705 !memcmp(ph->magic, HEADER_MAGIC2, 16)) &&
706 (le32_to_cpu(ph->version) == HEADER_VERSION)) {
707 return 100;
708 }
709
710 return 0;
711 }
712
713 static int parallels_update_header(BlockDriverState *bs)
714 {
715 BDRVParallelsState *s = bs->opaque;
716 unsigned size = MAX(bdrv_opt_mem_align(bs->file->bs),
717 sizeof(ParallelsHeader));
718
719 if (size > s->header_size) {
720 size = s->header_size;
721 }
722 return bdrv_pwrite_sync(bs->file, 0, s->header, size);
723 }
724
725 static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
726 Error **errp)
727 {
728 BDRVParallelsState *s = bs->opaque;
729 ParallelsHeader ph;
730 int ret, size, i;
731 QemuOpts *opts = NULL;
732 Error *local_err = NULL;
733 char *buf;
734
735 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
736 false, errp);
737 if (!bs->file) {
738 return -EINVAL;
739 }
740
741 ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
742 if (ret < 0) {
743 goto fail;
744 }
745
746 bs->total_sectors = le64_to_cpu(ph.nb_sectors);
747
748 if (le32_to_cpu(ph.version) != HEADER_VERSION) {
749 goto fail_format;
750 }
751 if (!memcmp(ph.magic, HEADER_MAGIC, 16)) {
752 s->off_multiplier = 1;
753 bs->total_sectors = 0xffffffff & bs->total_sectors;
754 } else if (!memcmp(ph.magic, HEADER_MAGIC2, 16)) {
755 s->off_multiplier = le32_to_cpu(ph.tracks);
756 } else {
757 goto fail_format;
758 }
759
760 s->tracks = le32_to_cpu(ph.tracks);
761 if (s->tracks == 0) {
762 error_setg(errp, "Invalid image: Zero sectors per track");
763 ret = -EINVAL;
764 goto fail;
765 }
766 if (s->tracks > INT32_MAX/513) {
767 error_setg(errp, "Invalid image: Too big cluster");
768 ret = -EFBIG;
769 goto fail;
770 }
771
772 s->bat_size = le32_to_cpu(ph.bat_entries);
773 if (s->bat_size > INT_MAX / sizeof(uint32_t)) {
774 error_setg(errp, "Catalog too large");
775 ret = -EFBIG;
776 goto fail;
777 }
778
779 size = bat_entry_off(s->bat_size);
780 s->header_size = ROUND_UP(size, bdrv_opt_mem_align(bs->file->bs));
781 s->header = qemu_try_blockalign(bs->file->bs, s->header_size);
782 if (s->header == NULL) {
783 ret = -ENOMEM;
784 goto fail;
785 }
786 s->data_end = le32_to_cpu(ph.data_off);
787 if (s->data_end == 0) {
788 s->data_end = ROUND_UP(bat_entry_off(s->bat_size), BDRV_SECTOR_SIZE);
789 }
790 if (s->data_end < s->header_size) {
791 /* there is not enough unused space to fit to block align between BAT
792 and actual data. We can't avoid read-modify-write... */
793 s->header_size = size;
794 }
795
796 ret = bdrv_pread(bs->file, 0, s->header, s->header_size);
797 if (ret < 0) {
798 goto fail;
799 }
800 s->bat_bitmap = (uint32_t *)(s->header + 1);
801
802 for (i = 0; i < s->bat_size; i++) {
803 int64_t off = bat2sect(s, i);
804 if (off >= s->data_end) {
805 s->data_end = off + s->tracks;
806 }
807 }
808
809 if (le32_to_cpu(ph.inuse) == HEADER_INUSE_MAGIC) {
810 /* Image was not closed correctly. The check is mandatory */
811 s->header_unclean = true;
812 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
813 error_setg(errp, "parallels: Image was not closed correctly; "
814 "cannot be opened read/write");
815 ret = -EACCES;
816 goto fail;
817 }
818 }
819
820 opts = qemu_opts_create(&parallels_runtime_opts, NULL, 0, &local_err);
821 if (local_err != NULL) {
822 goto fail_options;
823 }
824
825 qemu_opts_absorb_qdict(opts, options, &local_err);
826 if (local_err != NULL) {
827 goto fail_options;
828 }
829
830 s->prealloc_size =
831 qemu_opt_get_size_del(opts, PARALLELS_OPT_PREALLOC_SIZE, 0);
832 s->prealloc_size = MAX(s->tracks, s->prealloc_size >> BDRV_SECTOR_BITS);
833 buf = qemu_opt_get_del(opts, PARALLELS_OPT_PREALLOC_MODE);
834 s->prealloc_mode = qapi_enum_parse(&prealloc_mode_lookup, buf,
835 PRL_PREALLOC_MODE_FALLOCATE,
836 &local_err);
837 g_free(buf);
838 if (local_err != NULL) {
839 goto fail_options;
840 }
841
842 if (!bdrv_has_zero_init(bs->file->bs)) {
843 s->prealloc_mode = PRL_PREALLOC_MODE_FALLOCATE;
844 }
845
846 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_INACTIVE)) {
847 s->header->inuse = cpu_to_le32(HEADER_INUSE_MAGIC);
848 ret = parallels_update_header(bs);
849 if (ret < 0) {
850 goto fail;
851 }
852 }
853
854 s->bat_dirty_block = 4 * getpagesize();
855 s->bat_dirty_bmap =
856 bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
857
858 /* Disable migration until bdrv_invalidate_cache method is added */
859 error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
860 "does not support live migration",
861 bdrv_get_device_or_node_name(bs));
862 ret = migrate_add_blocker(s->migration_blocker, &local_err);
863 if (local_err) {
864 error_propagate(errp, local_err);
865 error_free(s->migration_blocker);
866 goto fail;
867 }
868 qemu_co_mutex_init(&s->lock);
869 return 0;
870
871 fail_format:
872 error_setg(errp, "Image not in Parallels format");
873 ret = -EINVAL;
874 fail:
875 qemu_vfree(s->header);
876 return ret;
877
878 fail_options:
879 error_propagate(errp, local_err);
880 ret = -EINVAL;
881 goto fail;
882 }
883
884
885 static void parallels_close(BlockDriverState *bs)
886 {
887 BDRVParallelsState *s = bs->opaque;
888
889 if ((bs->open_flags & BDRV_O_RDWR) && !(bs->open_flags & BDRV_O_INACTIVE)) {
890 s->header->inuse = 0;
891 parallels_update_header(bs);
892 bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS,
893 PREALLOC_MODE_OFF, NULL);
894 }
895
896 g_free(s->bat_dirty_bmap);
897 qemu_vfree(s->header);
898
899 migrate_del_blocker(s->migration_blocker);
900 error_free(s->migration_blocker);
901 }
902
903 static BlockDriver bdrv_parallels = {
904 .format_name = "parallels",
905 .instance_size = sizeof(BDRVParallelsState),
906 .bdrv_probe = parallels_probe,
907 .bdrv_open = parallels_open,
908 .bdrv_close = parallels_close,
909 .bdrv_child_perm = bdrv_format_default_perms,
910 .bdrv_co_block_status = parallels_co_block_status,
911 .bdrv_has_zero_init = bdrv_has_zero_init_1,
912 .bdrv_co_flush_to_os = parallels_co_flush_to_os,
913 .bdrv_co_readv = parallels_co_readv,
914 .bdrv_co_writev = parallels_co_writev,
915 .supports_backing = true,
916 .bdrv_co_create = parallels_co_create,
917 .bdrv_co_create_opts = parallels_co_create_opts,
918 .bdrv_co_check = parallels_co_check,
919 .create_opts = &parallels_create_opts,
920 };
921
922 static void bdrv_parallels_init(void)
923 {
924 bdrv_register(&bdrv_parallels);
925 }
926
927 block_init(bdrv_parallels_init);