]> git.proxmox.com Git - qemu.git/blob - block/qed.c
Merge remote-tracking branch 'kraxel/seabios-5a02306' into staging
[qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu-timer.h"
16 #include "trace.h"
17 #include "qed.h"
18 #include "qerror.h"
19 #include "migration.h"
20
21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
22 {
23 QEDAIOCB *acb = (QEDAIOCB *)blockacb;
24 bool finished = false;
25
26 /* Wait for the request to finish */
27 acb->finished = &finished;
28 while (!finished) {
29 qemu_aio_wait();
30 }
31 }
32
33 static AIOPool qed_aio_pool = {
34 .aiocb_size = sizeof(QEDAIOCB),
35 .cancel = qed_aio_cancel,
36 };
37
38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
39 const char *filename)
40 {
41 const QEDHeader *header = (const QEDHeader *)buf;
42
43 if (buf_size < sizeof(*header)) {
44 return 0;
45 }
46 if (le32_to_cpu(header->magic) != QED_MAGIC) {
47 return 0;
48 }
49 return 100;
50 }
51
52 /**
53 * Check whether an image format is raw
54 *
55 * @fmt: Backing file format, may be NULL
56 */
57 static bool qed_fmt_is_raw(const char *fmt)
58 {
59 return fmt && strcmp(fmt, "raw") == 0;
60 }
61
62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
63 {
64 cpu->magic = le32_to_cpu(le->magic);
65 cpu->cluster_size = le32_to_cpu(le->cluster_size);
66 cpu->table_size = le32_to_cpu(le->table_size);
67 cpu->header_size = le32_to_cpu(le->header_size);
68 cpu->features = le64_to_cpu(le->features);
69 cpu->compat_features = le64_to_cpu(le->compat_features);
70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
72 cpu->image_size = le64_to_cpu(le->image_size);
73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
75 }
76
77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
78 {
79 le->magic = cpu_to_le32(cpu->magic);
80 le->cluster_size = cpu_to_le32(cpu->cluster_size);
81 le->table_size = cpu_to_le32(cpu->table_size);
82 le->header_size = cpu_to_le32(cpu->header_size);
83 le->features = cpu_to_le64(cpu->features);
84 le->compat_features = cpu_to_le64(cpu->compat_features);
85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
87 le->image_size = cpu_to_le64(cpu->image_size);
88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
90 }
91
92 int qed_write_header_sync(BDRVQEDState *s)
93 {
94 QEDHeader le;
95 int ret;
96
97 qed_header_cpu_to_le(&s->header, &le);
98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
99 if (ret != sizeof(le)) {
100 return ret;
101 }
102 return 0;
103 }
104
105 typedef struct {
106 GenericCB gencb;
107 BDRVQEDState *s;
108 struct iovec iov;
109 QEMUIOVector qiov;
110 int nsectors;
111 uint8_t *buf;
112 } QEDWriteHeaderCB;
113
114 static void qed_write_header_cb(void *opaque, int ret)
115 {
116 QEDWriteHeaderCB *write_header_cb = opaque;
117
118 qemu_vfree(write_header_cb->buf);
119 gencb_complete(write_header_cb, ret);
120 }
121
122 static void qed_write_header_read_cb(void *opaque, int ret)
123 {
124 QEDWriteHeaderCB *write_header_cb = opaque;
125 BDRVQEDState *s = write_header_cb->s;
126
127 if (ret) {
128 qed_write_header_cb(write_header_cb, ret);
129 return;
130 }
131
132 /* Update header */
133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
134
135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
136 write_header_cb->nsectors, qed_write_header_cb,
137 write_header_cb);
138 }
139
140 /**
141 * Update header in-place (does not rewrite backing filename or other strings)
142 *
143 * This function only updates known header fields in-place and does not affect
144 * extra data after the QED header.
145 */
146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
147 void *opaque)
148 {
149 /* We must write full sectors for O_DIRECT but cannot necessarily generate
150 * the data following the header if an unrecognized compat feature is
151 * active. Therefore, first read the sectors containing the header, update
152 * them, and write back.
153 */
154
155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
156 BDRV_SECTOR_SIZE;
157 size_t len = nsectors * BDRV_SECTOR_SIZE;
158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
159 cb, opaque);
160
161 write_header_cb->s = s;
162 write_header_cb->nsectors = nsectors;
163 write_header_cb->buf = qemu_blockalign(s->bs, len);
164 write_header_cb->iov.iov_base = write_header_cb->buf;
165 write_header_cb->iov.iov_len = len;
166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
167
168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
169 qed_write_header_read_cb, write_header_cb);
170 }
171
172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
173 {
174 uint64_t table_entries;
175 uint64_t l2_size;
176
177 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
178 l2_size = table_entries * cluster_size;
179
180 return l2_size * table_entries;
181 }
182
183 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
184 {
185 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
186 cluster_size > QED_MAX_CLUSTER_SIZE) {
187 return false;
188 }
189 if (cluster_size & (cluster_size - 1)) {
190 return false; /* not power of 2 */
191 }
192 return true;
193 }
194
195 static bool qed_is_table_size_valid(uint32_t table_size)
196 {
197 if (table_size < QED_MIN_TABLE_SIZE ||
198 table_size > QED_MAX_TABLE_SIZE) {
199 return false;
200 }
201 if (table_size & (table_size - 1)) {
202 return false; /* not power of 2 */
203 }
204 return true;
205 }
206
207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
208 uint32_t table_size)
209 {
210 if (image_size % BDRV_SECTOR_SIZE != 0) {
211 return false; /* not multiple of sector size */
212 }
213 if (image_size > qed_max_image_size(cluster_size, table_size)) {
214 return false; /* image is too large */
215 }
216 return true;
217 }
218
219 /**
220 * Read a string of known length from the image file
221 *
222 * @file: Image file
223 * @offset: File offset to start of string, in bytes
224 * @n: String length in bytes
225 * @buf: Destination buffer
226 * @buflen: Destination buffer length in bytes
227 * @ret: 0 on success, -errno on failure
228 *
229 * The string is NUL-terminated.
230 */
231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
232 char *buf, size_t buflen)
233 {
234 int ret;
235 if (n >= buflen) {
236 return -EINVAL;
237 }
238 ret = bdrv_pread(file, offset, buf, n);
239 if (ret < 0) {
240 return ret;
241 }
242 buf[n] = '\0';
243 return 0;
244 }
245
246 /**
247 * Allocate new clusters
248 *
249 * @s: QED state
250 * @n: Number of contiguous clusters to allocate
251 * @ret: Offset of first allocated cluster
252 *
253 * This function only produces the offset where the new clusters should be
254 * written. It updates BDRVQEDState but does not make any changes to the image
255 * file.
256 */
257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
258 {
259 uint64_t offset = s->file_size;
260 s->file_size += n * s->header.cluster_size;
261 return offset;
262 }
263
264 QEDTable *qed_alloc_table(BDRVQEDState *s)
265 {
266 /* Honor O_DIRECT memory alignment requirements */
267 return qemu_blockalign(s->bs,
268 s->header.cluster_size * s->header.table_size);
269 }
270
271 /**
272 * Allocate a new zeroed L2 table
273 */
274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
275 {
276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
277
278 l2_table->table = qed_alloc_table(s);
279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
280
281 memset(l2_table->table->offsets, 0,
282 s->header.cluster_size * s->header.table_size);
283 return l2_table;
284 }
285
286 static void qed_aio_next_io(void *opaque, int ret);
287
288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
289 {
290 assert(!s->allocating_write_reqs_plugged);
291
292 s->allocating_write_reqs_plugged = true;
293 }
294
295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
296 {
297 QEDAIOCB *acb;
298
299 assert(s->allocating_write_reqs_plugged);
300
301 s->allocating_write_reqs_plugged = false;
302
303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
304 if (acb) {
305 qed_aio_next_io(acb, 0);
306 }
307 }
308
309 static void qed_finish_clear_need_check(void *opaque, int ret)
310 {
311 /* Do nothing */
312 }
313
314 static void qed_flush_after_clear_need_check(void *opaque, int ret)
315 {
316 BDRVQEDState *s = opaque;
317
318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
319
320 /* No need to wait until flush completes */
321 qed_unplug_allocating_write_reqs(s);
322 }
323
324 static void qed_clear_need_check(void *opaque, int ret)
325 {
326 BDRVQEDState *s = opaque;
327
328 if (ret) {
329 qed_unplug_allocating_write_reqs(s);
330 return;
331 }
332
333 s->header.features &= ~QED_F_NEED_CHECK;
334 qed_write_header(s, qed_flush_after_clear_need_check, s);
335 }
336
337 static void qed_need_check_timer_cb(void *opaque)
338 {
339 BDRVQEDState *s = opaque;
340
341 /* The timer should only fire when allocating writes have drained */
342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
343
344 trace_qed_need_check_timer_cb(s);
345
346 qed_plug_allocating_write_reqs(s);
347
348 /* Ensure writes are on disk before clearing flag */
349 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
350 }
351
352 static void qed_start_need_check_timer(BDRVQEDState *s)
353 {
354 trace_qed_start_need_check_timer(s);
355
356 /* Use vm_clock so we don't alter the image file while suspended for
357 * migration.
358 */
359 qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) +
360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
361 }
362
363 /* It's okay to call this multiple times or when no timer is started */
364 static void qed_cancel_need_check_timer(BDRVQEDState *s)
365 {
366 trace_qed_cancel_need_check_timer(s);
367 qemu_del_timer(s->need_check_timer);
368 }
369
370 static void bdrv_qed_rebind(BlockDriverState *bs)
371 {
372 BDRVQEDState *s = bs->opaque;
373 s->bs = bs;
374 }
375
376 static int bdrv_qed_open(BlockDriverState *bs, int flags)
377 {
378 BDRVQEDState *s = bs->opaque;
379 QEDHeader le_header;
380 int64_t file_size;
381 int ret;
382
383 s->bs = bs;
384 QSIMPLEQ_INIT(&s->allocating_write_reqs);
385
386 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
387 if (ret < 0) {
388 return ret;
389 }
390 qed_header_le_to_cpu(&le_header, &s->header);
391
392 if (s->header.magic != QED_MAGIC) {
393 return -EINVAL;
394 }
395 if (s->header.features & ~QED_FEATURE_MASK) {
396 /* image uses unsupported feature bits */
397 char buf[64];
398 snprintf(buf, sizeof(buf), "%" PRIx64,
399 s->header.features & ~QED_FEATURE_MASK);
400 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
401 bs->device_name, "QED", buf);
402 return -ENOTSUP;
403 }
404 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
405 return -EINVAL;
406 }
407
408 /* Round down file size to the last cluster */
409 file_size = bdrv_getlength(bs->file);
410 if (file_size < 0) {
411 return file_size;
412 }
413 s->file_size = qed_start_of_cluster(s, file_size);
414
415 if (!qed_is_table_size_valid(s->header.table_size)) {
416 return -EINVAL;
417 }
418 if (!qed_is_image_size_valid(s->header.image_size,
419 s->header.cluster_size,
420 s->header.table_size)) {
421 return -EINVAL;
422 }
423 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
424 return -EINVAL;
425 }
426
427 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
428 sizeof(uint64_t);
429 s->l2_shift = ffs(s->header.cluster_size) - 1;
430 s->l2_mask = s->table_nelems - 1;
431 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
432
433 if ((s->header.features & QED_F_BACKING_FILE)) {
434 if ((uint64_t)s->header.backing_filename_offset +
435 s->header.backing_filename_size >
436 s->header.cluster_size * s->header.header_size) {
437 return -EINVAL;
438 }
439
440 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
441 s->header.backing_filename_size, bs->backing_file,
442 sizeof(bs->backing_file));
443 if (ret < 0) {
444 return ret;
445 }
446
447 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
448 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
449 }
450 }
451
452 /* Reset unknown autoclear feature bits. This is a backwards
453 * compatibility mechanism that allows images to be opened by older
454 * programs, which "knock out" unknown feature bits. When an image is
455 * opened by a newer program again it can detect that the autoclear
456 * feature is no longer valid.
457 */
458 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
459 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
460 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
461
462 ret = qed_write_header_sync(s);
463 if (ret) {
464 return ret;
465 }
466
467 /* From here on only known autoclear feature bits are valid */
468 bdrv_flush(bs->file);
469 }
470
471 s->l1_table = qed_alloc_table(s);
472 qed_init_l2_cache(&s->l2_cache);
473
474 ret = qed_read_l1_table_sync(s);
475 if (ret) {
476 goto out;
477 }
478
479 /* If image was not closed cleanly, check consistency */
480 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
481 /* Read-only images cannot be fixed. There is no risk of corruption
482 * since write operations are not possible. Therefore, allow
483 * potentially inconsistent images to be opened read-only. This can
484 * aid data recovery from an otherwise inconsistent image.
485 */
486 if (!bdrv_is_read_only(bs->file) &&
487 !(flags & BDRV_O_INCOMING)) {
488 BdrvCheckResult result = {0};
489
490 ret = qed_check(s, &result, true);
491 if (ret) {
492 goto out;
493 }
494 }
495 }
496
497 s->need_check_timer = qemu_new_timer_ns(vm_clock,
498 qed_need_check_timer_cb, s);
499
500 out:
501 if (ret) {
502 qed_free_l2_cache(&s->l2_cache);
503 qemu_vfree(s->l1_table);
504 }
505 return ret;
506 }
507
508 static void bdrv_qed_close(BlockDriverState *bs)
509 {
510 BDRVQEDState *s = bs->opaque;
511
512 qed_cancel_need_check_timer(s);
513 qemu_free_timer(s->need_check_timer);
514
515 /* Ensure writes reach stable storage */
516 bdrv_flush(bs->file);
517
518 /* Clean shutdown, no check required on next open */
519 if (s->header.features & QED_F_NEED_CHECK) {
520 s->header.features &= ~QED_F_NEED_CHECK;
521 qed_write_header_sync(s);
522 }
523
524 qed_free_l2_cache(&s->l2_cache);
525 qemu_vfree(s->l1_table);
526 }
527
528 static int qed_create(const char *filename, uint32_t cluster_size,
529 uint64_t image_size, uint32_t table_size,
530 const char *backing_file, const char *backing_fmt)
531 {
532 QEDHeader header = {
533 .magic = QED_MAGIC,
534 .cluster_size = cluster_size,
535 .table_size = table_size,
536 .header_size = 1,
537 .features = 0,
538 .compat_features = 0,
539 .l1_table_offset = cluster_size,
540 .image_size = image_size,
541 };
542 QEDHeader le_header;
543 uint8_t *l1_table = NULL;
544 size_t l1_size = header.cluster_size * header.table_size;
545 int ret = 0;
546 BlockDriverState *bs = NULL;
547
548 ret = bdrv_create_file(filename, NULL);
549 if (ret < 0) {
550 return ret;
551 }
552
553 ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB);
554 if (ret < 0) {
555 return ret;
556 }
557
558 /* File must start empty and grow, check truncate is supported */
559 ret = bdrv_truncate(bs, 0);
560 if (ret < 0) {
561 goto out;
562 }
563
564 if (backing_file) {
565 header.features |= QED_F_BACKING_FILE;
566 header.backing_filename_offset = sizeof(le_header);
567 header.backing_filename_size = strlen(backing_file);
568
569 if (qed_fmt_is_raw(backing_fmt)) {
570 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
571 }
572 }
573
574 qed_header_cpu_to_le(&header, &le_header);
575 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
576 if (ret < 0) {
577 goto out;
578 }
579 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
580 header.backing_filename_size);
581 if (ret < 0) {
582 goto out;
583 }
584
585 l1_table = g_malloc0(l1_size);
586 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
587 if (ret < 0) {
588 goto out;
589 }
590
591 ret = 0; /* success */
592 out:
593 g_free(l1_table);
594 bdrv_delete(bs);
595 return ret;
596 }
597
598 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options)
599 {
600 uint64_t image_size = 0;
601 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
602 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
603 const char *backing_file = NULL;
604 const char *backing_fmt = NULL;
605
606 while (options && options->name) {
607 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
608 image_size = options->value.n;
609 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
610 backing_file = options->value.s;
611 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
612 backing_fmt = options->value.s;
613 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
614 if (options->value.n) {
615 cluster_size = options->value.n;
616 }
617 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
618 if (options->value.n) {
619 table_size = options->value.n;
620 }
621 }
622 options++;
623 }
624
625 if (!qed_is_cluster_size_valid(cluster_size)) {
626 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
627 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
628 return -EINVAL;
629 }
630 if (!qed_is_table_size_valid(table_size)) {
631 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
632 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
633 return -EINVAL;
634 }
635 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
636 fprintf(stderr, "QED image size must be a non-zero multiple of "
637 "cluster size and less than %" PRIu64 " bytes\n",
638 qed_max_image_size(cluster_size, table_size));
639 return -EINVAL;
640 }
641
642 return qed_create(filename, cluster_size, image_size, table_size,
643 backing_file, backing_fmt);
644 }
645
646 typedef struct {
647 Coroutine *co;
648 int is_allocated;
649 int *pnum;
650 } QEDIsAllocatedCB;
651
652 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
653 {
654 QEDIsAllocatedCB *cb = opaque;
655 *cb->pnum = len / BDRV_SECTOR_SIZE;
656 cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO);
657 if (cb->co) {
658 qemu_coroutine_enter(cb->co, NULL);
659 }
660 }
661
662 static int coroutine_fn bdrv_qed_co_is_allocated(BlockDriverState *bs,
663 int64_t sector_num,
664 int nb_sectors, int *pnum)
665 {
666 BDRVQEDState *s = bs->opaque;
667 uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
668 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
669 QEDIsAllocatedCB cb = {
670 .is_allocated = -1,
671 .pnum = pnum,
672 };
673 QEDRequest request = { .l2_table = NULL };
674
675 qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb);
676
677 /* Now sleep if the callback wasn't invoked immediately */
678 while (cb.is_allocated == -1) {
679 cb.co = qemu_coroutine_self();
680 qemu_coroutine_yield();
681 }
682
683 qed_unref_l2_cache_entry(request.l2_table);
684
685 return cb.is_allocated;
686 }
687
688 static int bdrv_qed_make_empty(BlockDriverState *bs)
689 {
690 return -ENOTSUP;
691 }
692
693 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
694 {
695 return acb->common.bs->opaque;
696 }
697
698 /**
699 * Read from the backing file or zero-fill if no backing file
700 *
701 * @s: QED state
702 * @pos: Byte position in device
703 * @qiov: Destination I/O vector
704 * @cb: Completion function
705 * @opaque: User data for completion function
706 *
707 * This function reads qiov->size bytes starting at pos from the backing file.
708 * If there is no backing file then zeroes are read.
709 */
710 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
711 QEMUIOVector *qiov,
712 BlockDriverCompletionFunc *cb, void *opaque)
713 {
714 uint64_t backing_length = 0;
715 size_t size;
716
717 /* If there is a backing file, get its length. Treat the absence of a
718 * backing file like a zero length backing file.
719 */
720 if (s->bs->backing_hd) {
721 int64_t l = bdrv_getlength(s->bs->backing_hd);
722 if (l < 0) {
723 cb(opaque, l);
724 return;
725 }
726 backing_length = l;
727 }
728
729 /* Zero all sectors if reading beyond the end of the backing file */
730 if (pos >= backing_length ||
731 pos + qiov->size > backing_length) {
732 qemu_iovec_memset(qiov, 0, 0, qiov->size);
733 }
734
735 /* Complete now if there are no backing file sectors to read */
736 if (pos >= backing_length) {
737 cb(opaque, 0);
738 return;
739 }
740
741 /* If the read straddles the end of the backing file, shorten it */
742 size = MIN((uint64_t)backing_length - pos, qiov->size);
743
744 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
745 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
746 qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
747 }
748
749 typedef struct {
750 GenericCB gencb;
751 BDRVQEDState *s;
752 QEMUIOVector qiov;
753 struct iovec iov;
754 uint64_t offset;
755 } CopyFromBackingFileCB;
756
757 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
758 {
759 CopyFromBackingFileCB *copy_cb = opaque;
760 qemu_vfree(copy_cb->iov.iov_base);
761 gencb_complete(&copy_cb->gencb, ret);
762 }
763
764 static void qed_copy_from_backing_file_write(void *opaque, int ret)
765 {
766 CopyFromBackingFileCB *copy_cb = opaque;
767 BDRVQEDState *s = copy_cb->s;
768
769 if (ret) {
770 qed_copy_from_backing_file_cb(copy_cb, ret);
771 return;
772 }
773
774 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
775 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
776 &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
777 qed_copy_from_backing_file_cb, copy_cb);
778 }
779
780 /**
781 * Copy data from backing file into the image
782 *
783 * @s: QED state
784 * @pos: Byte position in device
785 * @len: Number of bytes
786 * @offset: Byte offset in image file
787 * @cb: Completion function
788 * @opaque: User data for completion function
789 */
790 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
791 uint64_t len, uint64_t offset,
792 BlockDriverCompletionFunc *cb,
793 void *opaque)
794 {
795 CopyFromBackingFileCB *copy_cb;
796
797 /* Skip copy entirely if there is no work to do */
798 if (len == 0) {
799 cb(opaque, 0);
800 return;
801 }
802
803 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
804 copy_cb->s = s;
805 copy_cb->offset = offset;
806 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
807 copy_cb->iov.iov_len = len;
808 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
809
810 qed_read_backing_file(s, pos, &copy_cb->qiov,
811 qed_copy_from_backing_file_write, copy_cb);
812 }
813
814 /**
815 * Link one or more contiguous clusters into a table
816 *
817 * @s: QED state
818 * @table: L2 table
819 * @index: First cluster index
820 * @n: Number of contiguous clusters
821 * @cluster: First cluster offset
822 *
823 * The cluster offset may be an allocated byte offset in the image file, the
824 * zero cluster marker, or the unallocated cluster marker.
825 */
826 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
827 unsigned int n, uint64_t cluster)
828 {
829 int i;
830 for (i = index; i < index + n; i++) {
831 table->offsets[i] = cluster;
832 if (!qed_offset_is_unalloc_cluster(cluster) &&
833 !qed_offset_is_zero_cluster(cluster)) {
834 cluster += s->header.cluster_size;
835 }
836 }
837 }
838
839 static void qed_aio_complete_bh(void *opaque)
840 {
841 QEDAIOCB *acb = opaque;
842 BlockDriverCompletionFunc *cb = acb->common.cb;
843 void *user_opaque = acb->common.opaque;
844 int ret = acb->bh_ret;
845 bool *finished = acb->finished;
846
847 qemu_bh_delete(acb->bh);
848 qemu_aio_release(acb);
849
850 /* Invoke callback */
851 cb(user_opaque, ret);
852
853 /* Signal cancel completion */
854 if (finished) {
855 *finished = true;
856 }
857 }
858
859 static void qed_aio_complete(QEDAIOCB *acb, int ret)
860 {
861 BDRVQEDState *s = acb_to_s(acb);
862
863 trace_qed_aio_complete(s, acb, ret);
864
865 /* Free resources */
866 qemu_iovec_destroy(&acb->cur_qiov);
867 qed_unref_l2_cache_entry(acb->request.l2_table);
868
869 /* Free the buffer we may have allocated for zero writes */
870 if (acb->flags & QED_AIOCB_ZERO) {
871 qemu_vfree(acb->qiov->iov[0].iov_base);
872 acb->qiov->iov[0].iov_base = NULL;
873 }
874
875 /* Arrange for a bh to invoke the completion function */
876 acb->bh_ret = ret;
877 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
878 qemu_bh_schedule(acb->bh);
879
880 /* Start next allocating write request waiting behind this one. Note that
881 * requests enqueue themselves when they first hit an unallocated cluster
882 * but they wait until the entire request is finished before waking up the
883 * next request in the queue. This ensures that we don't cycle through
884 * requests multiple times but rather finish one at a time completely.
885 */
886 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
887 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
888 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
889 if (acb) {
890 qed_aio_next_io(acb, 0);
891 } else if (s->header.features & QED_F_NEED_CHECK) {
892 qed_start_need_check_timer(s);
893 }
894 }
895 }
896
897 /**
898 * Commit the current L2 table to the cache
899 */
900 static void qed_commit_l2_update(void *opaque, int ret)
901 {
902 QEDAIOCB *acb = opaque;
903 BDRVQEDState *s = acb_to_s(acb);
904 CachedL2Table *l2_table = acb->request.l2_table;
905 uint64_t l2_offset = l2_table->offset;
906
907 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
908
909 /* This is guaranteed to succeed because we just committed the entry to the
910 * cache.
911 */
912 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
913 assert(acb->request.l2_table != NULL);
914
915 qed_aio_next_io(opaque, ret);
916 }
917
918 /**
919 * Update L1 table with new L2 table offset and write it out
920 */
921 static void qed_aio_write_l1_update(void *opaque, int ret)
922 {
923 QEDAIOCB *acb = opaque;
924 BDRVQEDState *s = acb_to_s(acb);
925 int index;
926
927 if (ret) {
928 qed_aio_complete(acb, ret);
929 return;
930 }
931
932 index = qed_l1_index(s, acb->cur_pos);
933 s->l1_table->offsets[index] = acb->request.l2_table->offset;
934
935 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
936 }
937
938 /**
939 * Update L2 table with new cluster offsets and write them out
940 */
941 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
942 {
943 BDRVQEDState *s = acb_to_s(acb);
944 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
945 int index;
946
947 if (ret) {
948 goto err;
949 }
950
951 if (need_alloc) {
952 qed_unref_l2_cache_entry(acb->request.l2_table);
953 acb->request.l2_table = qed_new_l2_table(s);
954 }
955
956 index = qed_l2_index(s, acb->cur_pos);
957 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
958 offset);
959
960 if (need_alloc) {
961 /* Write out the whole new L2 table */
962 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
963 qed_aio_write_l1_update, acb);
964 } else {
965 /* Write out only the updated part of the L2 table */
966 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
967 qed_aio_next_io, acb);
968 }
969 return;
970
971 err:
972 qed_aio_complete(acb, ret);
973 }
974
975 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
976 {
977 QEDAIOCB *acb = opaque;
978 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
979 }
980
981 /**
982 * Flush new data clusters before updating the L2 table
983 *
984 * This flush is necessary when a backing file is in use. A crash during an
985 * allocating write could result in empty clusters in the image. If the write
986 * only touched a subregion of the cluster, then backing image sectors have
987 * been lost in the untouched region. The solution is to flush after writing a
988 * new data cluster and before updating the L2 table.
989 */
990 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
991 {
992 QEDAIOCB *acb = opaque;
993 BDRVQEDState *s = acb_to_s(acb);
994
995 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
996 qed_aio_complete(acb, -EIO);
997 }
998 }
999
1000 /**
1001 * Write data to the image file
1002 */
1003 static void qed_aio_write_main(void *opaque, int ret)
1004 {
1005 QEDAIOCB *acb = opaque;
1006 BDRVQEDState *s = acb_to_s(acb);
1007 uint64_t offset = acb->cur_cluster +
1008 qed_offset_into_cluster(s, acb->cur_pos);
1009 BlockDriverCompletionFunc *next_fn;
1010
1011 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1012
1013 if (ret) {
1014 qed_aio_complete(acb, ret);
1015 return;
1016 }
1017
1018 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1019 next_fn = qed_aio_next_io;
1020 } else {
1021 if (s->bs->backing_hd) {
1022 next_fn = qed_aio_write_flush_before_l2_update;
1023 } else {
1024 next_fn = qed_aio_write_l2_update_cb;
1025 }
1026 }
1027
1028 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1029 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1030 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1031 next_fn, acb);
1032 }
1033
1034 /**
1035 * Populate back untouched region of new data cluster
1036 */
1037 static void qed_aio_write_postfill(void *opaque, int ret)
1038 {
1039 QEDAIOCB *acb = opaque;
1040 BDRVQEDState *s = acb_to_s(acb);
1041 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1042 uint64_t len =
1043 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1044 uint64_t offset = acb->cur_cluster +
1045 qed_offset_into_cluster(s, acb->cur_pos) +
1046 acb->cur_qiov.size;
1047
1048 if (ret) {
1049 qed_aio_complete(acb, ret);
1050 return;
1051 }
1052
1053 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1054 qed_copy_from_backing_file(s, start, len, offset,
1055 qed_aio_write_main, acb);
1056 }
1057
1058 /**
1059 * Populate front untouched region of new data cluster
1060 */
1061 static void qed_aio_write_prefill(void *opaque, int ret)
1062 {
1063 QEDAIOCB *acb = opaque;
1064 BDRVQEDState *s = acb_to_s(acb);
1065 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1066 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1067
1068 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1069 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1070 qed_aio_write_postfill, acb);
1071 }
1072
1073 /**
1074 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1075 */
1076 static bool qed_should_set_need_check(BDRVQEDState *s)
1077 {
1078 /* The flush before L2 update path ensures consistency */
1079 if (s->bs->backing_hd) {
1080 return false;
1081 }
1082
1083 return !(s->header.features & QED_F_NEED_CHECK);
1084 }
1085
1086 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1087 {
1088 QEDAIOCB *acb = opaque;
1089
1090 if (ret) {
1091 qed_aio_complete(acb, ret);
1092 return;
1093 }
1094
1095 qed_aio_write_l2_update(acb, 0, 1);
1096 }
1097
1098 /**
1099 * Write new data cluster
1100 *
1101 * @acb: Write request
1102 * @len: Length in bytes
1103 *
1104 * This path is taken when writing to previously unallocated clusters.
1105 */
1106 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1107 {
1108 BDRVQEDState *s = acb_to_s(acb);
1109 BlockDriverCompletionFunc *cb;
1110
1111 /* Cancel timer when the first allocating request comes in */
1112 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1113 qed_cancel_need_check_timer(s);
1114 }
1115
1116 /* Freeze this request if another allocating write is in progress */
1117 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1118 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1119 }
1120 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1121 s->allocating_write_reqs_plugged) {
1122 return; /* wait for existing request to finish */
1123 }
1124
1125 acb->cur_nclusters = qed_bytes_to_clusters(s,
1126 qed_offset_into_cluster(s, acb->cur_pos) + len);
1127 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1128
1129 if (acb->flags & QED_AIOCB_ZERO) {
1130 /* Skip ahead if the clusters are already zero */
1131 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1132 qed_aio_next_io(acb, 0);
1133 return;
1134 }
1135
1136 cb = qed_aio_write_zero_cluster;
1137 } else {
1138 cb = qed_aio_write_prefill;
1139 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1140 }
1141
1142 if (qed_should_set_need_check(s)) {
1143 s->header.features |= QED_F_NEED_CHECK;
1144 qed_write_header(s, cb, acb);
1145 } else {
1146 cb(acb, 0);
1147 }
1148 }
1149
1150 /**
1151 * Write data cluster in place
1152 *
1153 * @acb: Write request
1154 * @offset: Cluster offset in bytes
1155 * @len: Length in bytes
1156 *
1157 * This path is taken when writing to already allocated clusters.
1158 */
1159 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1160 {
1161 /* Allocate buffer for zero writes */
1162 if (acb->flags & QED_AIOCB_ZERO) {
1163 struct iovec *iov = acb->qiov->iov;
1164
1165 if (!iov->iov_base) {
1166 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1167 memset(iov->iov_base, 0, iov->iov_len);
1168 }
1169 }
1170
1171 /* Calculate the I/O vector */
1172 acb->cur_cluster = offset;
1173 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1174
1175 /* Do the actual write */
1176 qed_aio_write_main(acb, 0);
1177 }
1178
1179 /**
1180 * Write data cluster
1181 *
1182 * @opaque: Write request
1183 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1184 * or -errno
1185 * @offset: Cluster offset in bytes
1186 * @len: Length in bytes
1187 *
1188 * Callback from qed_find_cluster().
1189 */
1190 static void qed_aio_write_data(void *opaque, int ret,
1191 uint64_t offset, size_t len)
1192 {
1193 QEDAIOCB *acb = opaque;
1194
1195 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1196
1197 acb->find_cluster_ret = ret;
1198
1199 switch (ret) {
1200 case QED_CLUSTER_FOUND:
1201 qed_aio_write_inplace(acb, offset, len);
1202 break;
1203
1204 case QED_CLUSTER_L2:
1205 case QED_CLUSTER_L1:
1206 case QED_CLUSTER_ZERO:
1207 qed_aio_write_alloc(acb, len);
1208 break;
1209
1210 default:
1211 qed_aio_complete(acb, ret);
1212 break;
1213 }
1214 }
1215
1216 /**
1217 * Read data cluster
1218 *
1219 * @opaque: Read request
1220 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1221 * or -errno
1222 * @offset: Cluster offset in bytes
1223 * @len: Length in bytes
1224 *
1225 * Callback from qed_find_cluster().
1226 */
1227 static void qed_aio_read_data(void *opaque, int ret,
1228 uint64_t offset, size_t len)
1229 {
1230 QEDAIOCB *acb = opaque;
1231 BDRVQEDState *s = acb_to_s(acb);
1232 BlockDriverState *bs = acb->common.bs;
1233
1234 /* Adjust offset into cluster */
1235 offset += qed_offset_into_cluster(s, acb->cur_pos);
1236
1237 trace_qed_aio_read_data(s, acb, ret, offset, len);
1238
1239 if (ret < 0) {
1240 goto err;
1241 }
1242
1243 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1244
1245 /* Handle zero cluster and backing file reads */
1246 if (ret == QED_CLUSTER_ZERO) {
1247 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1248 qed_aio_next_io(acb, 0);
1249 return;
1250 } else if (ret != QED_CLUSTER_FOUND) {
1251 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1252 qed_aio_next_io, acb);
1253 return;
1254 }
1255
1256 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1257 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1258 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1259 qed_aio_next_io, acb);
1260 return;
1261
1262 err:
1263 qed_aio_complete(acb, ret);
1264 }
1265
1266 /**
1267 * Begin next I/O or complete the request
1268 */
1269 static void qed_aio_next_io(void *opaque, int ret)
1270 {
1271 QEDAIOCB *acb = opaque;
1272 BDRVQEDState *s = acb_to_s(acb);
1273 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1274 qed_aio_write_data : qed_aio_read_data;
1275
1276 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1277
1278 /* Handle I/O error */
1279 if (ret) {
1280 qed_aio_complete(acb, ret);
1281 return;
1282 }
1283
1284 acb->qiov_offset += acb->cur_qiov.size;
1285 acb->cur_pos += acb->cur_qiov.size;
1286 qemu_iovec_reset(&acb->cur_qiov);
1287
1288 /* Complete request */
1289 if (acb->cur_pos >= acb->end_pos) {
1290 qed_aio_complete(acb, 0);
1291 return;
1292 }
1293
1294 /* Find next cluster and start I/O */
1295 qed_find_cluster(s, &acb->request,
1296 acb->cur_pos, acb->end_pos - acb->cur_pos,
1297 io_fn, acb);
1298 }
1299
1300 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1301 int64_t sector_num,
1302 QEMUIOVector *qiov, int nb_sectors,
1303 BlockDriverCompletionFunc *cb,
1304 void *opaque, int flags)
1305 {
1306 QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque);
1307
1308 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1309 opaque, flags);
1310
1311 acb->flags = flags;
1312 acb->finished = NULL;
1313 acb->qiov = qiov;
1314 acb->qiov_offset = 0;
1315 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1316 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1317 acb->request.l2_table = NULL;
1318 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1319
1320 /* Start request */
1321 qed_aio_next_io(acb, 0);
1322 return &acb->common;
1323 }
1324
1325 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1326 int64_t sector_num,
1327 QEMUIOVector *qiov, int nb_sectors,
1328 BlockDriverCompletionFunc *cb,
1329 void *opaque)
1330 {
1331 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1332 }
1333
1334 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1335 int64_t sector_num,
1336 QEMUIOVector *qiov, int nb_sectors,
1337 BlockDriverCompletionFunc *cb,
1338 void *opaque)
1339 {
1340 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1341 opaque, QED_AIOCB_WRITE);
1342 }
1343
1344 typedef struct {
1345 Coroutine *co;
1346 int ret;
1347 bool done;
1348 } QEDWriteZeroesCB;
1349
1350 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1351 {
1352 QEDWriteZeroesCB *cb = opaque;
1353
1354 cb->done = true;
1355 cb->ret = ret;
1356 if (cb->co) {
1357 qemu_coroutine_enter(cb->co, NULL);
1358 }
1359 }
1360
1361 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1362 int64_t sector_num,
1363 int nb_sectors)
1364 {
1365 BlockDriverAIOCB *blockacb;
1366 QEDWriteZeroesCB cb = { .done = false };
1367 QEMUIOVector qiov;
1368 struct iovec iov;
1369
1370 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1371 * then it will be allocated during request processing.
1372 */
1373 iov.iov_base = NULL,
1374 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1375
1376 qemu_iovec_init_external(&qiov, &iov, 1);
1377 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1378 qed_co_write_zeroes_cb, &cb,
1379 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1380 if (!blockacb) {
1381 return -EIO;
1382 }
1383 if (!cb.done) {
1384 cb.co = qemu_coroutine_self();
1385 qemu_coroutine_yield();
1386 }
1387 assert(cb.done);
1388 return cb.ret;
1389 }
1390
1391 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1392 {
1393 BDRVQEDState *s = bs->opaque;
1394 uint64_t old_image_size;
1395 int ret;
1396
1397 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1398 s->header.table_size)) {
1399 return -EINVAL;
1400 }
1401
1402 /* Shrinking is currently not supported */
1403 if ((uint64_t)offset < s->header.image_size) {
1404 return -ENOTSUP;
1405 }
1406
1407 old_image_size = s->header.image_size;
1408 s->header.image_size = offset;
1409 ret = qed_write_header_sync(s);
1410 if (ret < 0) {
1411 s->header.image_size = old_image_size;
1412 }
1413 return ret;
1414 }
1415
1416 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1417 {
1418 BDRVQEDState *s = bs->opaque;
1419 return s->header.image_size;
1420 }
1421
1422 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1423 {
1424 BDRVQEDState *s = bs->opaque;
1425
1426 memset(bdi, 0, sizeof(*bdi));
1427 bdi->cluster_size = s->header.cluster_size;
1428 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1429 return 0;
1430 }
1431
1432 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1433 const char *backing_file,
1434 const char *backing_fmt)
1435 {
1436 BDRVQEDState *s = bs->opaque;
1437 QEDHeader new_header, le_header;
1438 void *buffer;
1439 size_t buffer_len, backing_file_len;
1440 int ret;
1441
1442 /* Refuse to set backing filename if unknown compat feature bits are
1443 * active. If the image uses an unknown compat feature then we may not
1444 * know the layout of data following the header structure and cannot safely
1445 * add a new string.
1446 */
1447 if (backing_file && (s->header.compat_features &
1448 ~QED_COMPAT_FEATURE_MASK)) {
1449 return -ENOTSUP;
1450 }
1451
1452 memcpy(&new_header, &s->header, sizeof(new_header));
1453
1454 new_header.features &= ~(QED_F_BACKING_FILE |
1455 QED_F_BACKING_FORMAT_NO_PROBE);
1456
1457 /* Adjust feature flags */
1458 if (backing_file) {
1459 new_header.features |= QED_F_BACKING_FILE;
1460
1461 if (qed_fmt_is_raw(backing_fmt)) {
1462 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1463 }
1464 }
1465
1466 /* Calculate new header size */
1467 backing_file_len = 0;
1468
1469 if (backing_file) {
1470 backing_file_len = strlen(backing_file);
1471 }
1472
1473 buffer_len = sizeof(new_header);
1474 new_header.backing_filename_offset = buffer_len;
1475 new_header.backing_filename_size = backing_file_len;
1476 buffer_len += backing_file_len;
1477
1478 /* Make sure we can rewrite header without failing */
1479 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1480 return -ENOSPC;
1481 }
1482
1483 /* Prepare new header */
1484 buffer = g_malloc(buffer_len);
1485
1486 qed_header_cpu_to_le(&new_header, &le_header);
1487 memcpy(buffer, &le_header, sizeof(le_header));
1488 buffer_len = sizeof(le_header);
1489
1490 if (backing_file) {
1491 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1492 buffer_len += backing_file_len;
1493 }
1494
1495 /* Write new header */
1496 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1497 g_free(buffer);
1498 if (ret == 0) {
1499 memcpy(&s->header, &new_header, sizeof(new_header));
1500 }
1501 return ret;
1502 }
1503
1504 static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
1505 {
1506 BDRVQEDState *s = bs->opaque;
1507
1508 bdrv_qed_close(bs);
1509 memset(s, 0, sizeof(BDRVQEDState));
1510 bdrv_qed_open(bs, bs->open_flags);
1511 }
1512
1513 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1514 BdrvCheckMode fix)
1515 {
1516 BDRVQEDState *s = bs->opaque;
1517
1518 return qed_check(s, result, !!fix);
1519 }
1520
1521 static QEMUOptionParameter qed_create_options[] = {
1522 {
1523 .name = BLOCK_OPT_SIZE,
1524 .type = OPT_SIZE,
1525 .help = "Virtual disk size (in bytes)"
1526 }, {
1527 .name = BLOCK_OPT_BACKING_FILE,
1528 .type = OPT_STRING,
1529 .help = "File name of a base image"
1530 }, {
1531 .name = BLOCK_OPT_BACKING_FMT,
1532 .type = OPT_STRING,
1533 .help = "Image format of the base image"
1534 }, {
1535 .name = BLOCK_OPT_CLUSTER_SIZE,
1536 .type = OPT_SIZE,
1537 .help = "Cluster size (in bytes)",
1538 .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1539 }, {
1540 .name = BLOCK_OPT_TABLE_SIZE,
1541 .type = OPT_SIZE,
1542 .help = "L1/L2 table size (in clusters)"
1543 },
1544 { /* end of list */ }
1545 };
1546
1547 static BlockDriver bdrv_qed = {
1548 .format_name = "qed",
1549 .instance_size = sizeof(BDRVQEDState),
1550 .create_options = qed_create_options,
1551
1552 .bdrv_probe = bdrv_qed_probe,
1553 .bdrv_rebind = bdrv_qed_rebind,
1554 .bdrv_open = bdrv_qed_open,
1555 .bdrv_close = bdrv_qed_close,
1556 .bdrv_create = bdrv_qed_create,
1557 .bdrv_co_is_allocated = bdrv_qed_co_is_allocated,
1558 .bdrv_make_empty = bdrv_qed_make_empty,
1559 .bdrv_aio_readv = bdrv_qed_aio_readv,
1560 .bdrv_aio_writev = bdrv_qed_aio_writev,
1561 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes,
1562 .bdrv_truncate = bdrv_qed_truncate,
1563 .bdrv_getlength = bdrv_qed_getlength,
1564 .bdrv_get_info = bdrv_qed_get_info,
1565 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1566 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1567 .bdrv_check = bdrv_qed_check,
1568 };
1569
1570 static void bdrv_qed_init(void)
1571 {
1572 bdrv_register(&bdrv_qed);
1573 }
1574
1575 block_init(bdrv_qed_init);