]> git.proxmox.com Git - qemu.git/blob - block/qed.c
Merge remote-tracking branch 'stefanha/block' into staging
[qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/timer.h"
16 #include "trace.h"
17 #include "qed.h"
18 #include "qapi/qmp/qerror.h"
19 #include "migration/migration.h"
20
21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
22 {
23 QEDAIOCB *acb = (QEDAIOCB *)blockacb;
24 bool finished = false;
25
26 /* Wait for the request to finish */
27 acb->finished = &finished;
28 while (!finished) {
29 qemu_aio_wait();
30 }
31 }
32
33 static const AIOCBInfo qed_aiocb_info = {
34 .aiocb_size = sizeof(QEDAIOCB),
35 .cancel = qed_aio_cancel,
36 };
37
38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
39 const char *filename)
40 {
41 const QEDHeader *header = (const QEDHeader *)buf;
42
43 if (buf_size < sizeof(*header)) {
44 return 0;
45 }
46 if (le32_to_cpu(header->magic) != QED_MAGIC) {
47 return 0;
48 }
49 return 100;
50 }
51
52 /**
53 * Check whether an image format is raw
54 *
55 * @fmt: Backing file format, may be NULL
56 */
57 static bool qed_fmt_is_raw(const char *fmt)
58 {
59 return fmt && strcmp(fmt, "raw") == 0;
60 }
61
62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
63 {
64 cpu->magic = le32_to_cpu(le->magic);
65 cpu->cluster_size = le32_to_cpu(le->cluster_size);
66 cpu->table_size = le32_to_cpu(le->table_size);
67 cpu->header_size = le32_to_cpu(le->header_size);
68 cpu->features = le64_to_cpu(le->features);
69 cpu->compat_features = le64_to_cpu(le->compat_features);
70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
72 cpu->image_size = le64_to_cpu(le->image_size);
73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
75 }
76
77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
78 {
79 le->magic = cpu_to_le32(cpu->magic);
80 le->cluster_size = cpu_to_le32(cpu->cluster_size);
81 le->table_size = cpu_to_le32(cpu->table_size);
82 le->header_size = cpu_to_le32(cpu->header_size);
83 le->features = cpu_to_le64(cpu->features);
84 le->compat_features = cpu_to_le64(cpu->compat_features);
85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
87 le->image_size = cpu_to_le64(cpu->image_size);
88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
90 }
91
92 int qed_write_header_sync(BDRVQEDState *s)
93 {
94 QEDHeader le;
95 int ret;
96
97 qed_header_cpu_to_le(&s->header, &le);
98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
99 if (ret != sizeof(le)) {
100 return ret;
101 }
102 return 0;
103 }
104
105 typedef struct {
106 GenericCB gencb;
107 BDRVQEDState *s;
108 struct iovec iov;
109 QEMUIOVector qiov;
110 int nsectors;
111 uint8_t *buf;
112 } QEDWriteHeaderCB;
113
114 static void qed_write_header_cb(void *opaque, int ret)
115 {
116 QEDWriteHeaderCB *write_header_cb = opaque;
117
118 qemu_vfree(write_header_cb->buf);
119 gencb_complete(write_header_cb, ret);
120 }
121
122 static void qed_write_header_read_cb(void *opaque, int ret)
123 {
124 QEDWriteHeaderCB *write_header_cb = opaque;
125 BDRVQEDState *s = write_header_cb->s;
126
127 if (ret) {
128 qed_write_header_cb(write_header_cb, ret);
129 return;
130 }
131
132 /* Update header */
133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
134
135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
136 write_header_cb->nsectors, qed_write_header_cb,
137 write_header_cb);
138 }
139
140 /**
141 * Update header in-place (does not rewrite backing filename or other strings)
142 *
143 * This function only updates known header fields in-place and does not affect
144 * extra data after the QED header.
145 */
146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
147 void *opaque)
148 {
149 /* We must write full sectors for O_DIRECT but cannot necessarily generate
150 * the data following the header if an unrecognized compat feature is
151 * active. Therefore, first read the sectors containing the header, update
152 * them, and write back.
153 */
154
155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
156 BDRV_SECTOR_SIZE;
157 size_t len = nsectors * BDRV_SECTOR_SIZE;
158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
159 cb, opaque);
160
161 write_header_cb->s = s;
162 write_header_cb->nsectors = nsectors;
163 write_header_cb->buf = qemu_blockalign(s->bs, len);
164 write_header_cb->iov.iov_base = write_header_cb->buf;
165 write_header_cb->iov.iov_len = len;
166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
167
168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
169 qed_write_header_read_cb, write_header_cb);
170 }
171
172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
173 {
174 uint64_t table_entries;
175 uint64_t l2_size;
176
177 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
178 l2_size = table_entries * cluster_size;
179
180 return l2_size * table_entries;
181 }
182
183 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
184 {
185 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
186 cluster_size > QED_MAX_CLUSTER_SIZE) {
187 return false;
188 }
189 if (cluster_size & (cluster_size - 1)) {
190 return false; /* not power of 2 */
191 }
192 return true;
193 }
194
195 static bool qed_is_table_size_valid(uint32_t table_size)
196 {
197 if (table_size < QED_MIN_TABLE_SIZE ||
198 table_size > QED_MAX_TABLE_SIZE) {
199 return false;
200 }
201 if (table_size & (table_size - 1)) {
202 return false; /* not power of 2 */
203 }
204 return true;
205 }
206
207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
208 uint32_t table_size)
209 {
210 if (image_size % BDRV_SECTOR_SIZE != 0) {
211 return false; /* not multiple of sector size */
212 }
213 if (image_size > qed_max_image_size(cluster_size, table_size)) {
214 return false; /* image is too large */
215 }
216 return true;
217 }
218
219 /**
220 * Read a string of known length from the image file
221 *
222 * @file: Image file
223 * @offset: File offset to start of string, in bytes
224 * @n: String length in bytes
225 * @buf: Destination buffer
226 * @buflen: Destination buffer length in bytes
227 * @ret: 0 on success, -errno on failure
228 *
229 * The string is NUL-terminated.
230 */
231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
232 char *buf, size_t buflen)
233 {
234 int ret;
235 if (n >= buflen) {
236 return -EINVAL;
237 }
238 ret = bdrv_pread(file, offset, buf, n);
239 if (ret < 0) {
240 return ret;
241 }
242 buf[n] = '\0';
243 return 0;
244 }
245
246 /**
247 * Allocate new clusters
248 *
249 * @s: QED state
250 * @n: Number of contiguous clusters to allocate
251 * @ret: Offset of first allocated cluster
252 *
253 * This function only produces the offset where the new clusters should be
254 * written. It updates BDRVQEDState but does not make any changes to the image
255 * file.
256 */
257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
258 {
259 uint64_t offset = s->file_size;
260 s->file_size += n * s->header.cluster_size;
261 return offset;
262 }
263
264 QEDTable *qed_alloc_table(BDRVQEDState *s)
265 {
266 /* Honor O_DIRECT memory alignment requirements */
267 return qemu_blockalign(s->bs,
268 s->header.cluster_size * s->header.table_size);
269 }
270
271 /**
272 * Allocate a new zeroed L2 table
273 */
274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
275 {
276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
277
278 l2_table->table = qed_alloc_table(s);
279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
280
281 memset(l2_table->table->offsets, 0,
282 s->header.cluster_size * s->header.table_size);
283 return l2_table;
284 }
285
286 static void qed_aio_next_io(void *opaque, int ret);
287
288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
289 {
290 assert(!s->allocating_write_reqs_plugged);
291
292 s->allocating_write_reqs_plugged = true;
293 }
294
295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
296 {
297 QEDAIOCB *acb;
298
299 assert(s->allocating_write_reqs_plugged);
300
301 s->allocating_write_reqs_plugged = false;
302
303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
304 if (acb) {
305 qed_aio_next_io(acb, 0);
306 }
307 }
308
309 static void qed_finish_clear_need_check(void *opaque, int ret)
310 {
311 /* Do nothing */
312 }
313
314 static void qed_flush_after_clear_need_check(void *opaque, int ret)
315 {
316 BDRVQEDState *s = opaque;
317
318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
319
320 /* No need to wait until flush completes */
321 qed_unplug_allocating_write_reqs(s);
322 }
323
324 static void qed_clear_need_check(void *opaque, int ret)
325 {
326 BDRVQEDState *s = opaque;
327
328 if (ret) {
329 qed_unplug_allocating_write_reqs(s);
330 return;
331 }
332
333 s->header.features &= ~QED_F_NEED_CHECK;
334 qed_write_header(s, qed_flush_after_clear_need_check, s);
335 }
336
337 static void qed_need_check_timer_cb(void *opaque)
338 {
339 BDRVQEDState *s = opaque;
340
341 /* The timer should only fire when allocating writes have drained */
342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
343
344 trace_qed_need_check_timer_cb(s);
345
346 qed_plug_allocating_write_reqs(s);
347
348 /* Ensure writes are on disk before clearing flag */
349 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
350 }
351
352 static void qed_start_need_check_timer(BDRVQEDState *s)
353 {
354 trace_qed_start_need_check_timer(s);
355
356 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
357 * migration.
358 */
359 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
361 }
362
363 /* It's okay to call this multiple times or when no timer is started */
364 static void qed_cancel_need_check_timer(BDRVQEDState *s)
365 {
366 trace_qed_cancel_need_check_timer(s);
367 timer_del(s->need_check_timer);
368 }
369
370 static void bdrv_qed_rebind(BlockDriverState *bs)
371 {
372 BDRVQEDState *s = bs->opaque;
373 s->bs = bs;
374 }
375
376 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags)
377 {
378 BDRVQEDState *s = bs->opaque;
379 QEDHeader le_header;
380 int64_t file_size;
381 int ret;
382
383 s->bs = bs;
384 QSIMPLEQ_INIT(&s->allocating_write_reqs);
385
386 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
387 if (ret < 0) {
388 return ret;
389 }
390 qed_header_le_to_cpu(&le_header, &s->header);
391
392 if (s->header.magic != QED_MAGIC) {
393 return -EMEDIUMTYPE;
394 }
395 if (s->header.features & ~QED_FEATURE_MASK) {
396 /* image uses unsupported feature bits */
397 char buf[64];
398 snprintf(buf, sizeof(buf), "%" PRIx64,
399 s->header.features & ~QED_FEATURE_MASK);
400 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
401 bs->device_name, "QED", buf);
402 return -ENOTSUP;
403 }
404 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
405 return -EINVAL;
406 }
407
408 /* Round down file size to the last cluster */
409 file_size = bdrv_getlength(bs->file);
410 if (file_size < 0) {
411 return file_size;
412 }
413 s->file_size = qed_start_of_cluster(s, file_size);
414
415 if (!qed_is_table_size_valid(s->header.table_size)) {
416 return -EINVAL;
417 }
418 if (!qed_is_image_size_valid(s->header.image_size,
419 s->header.cluster_size,
420 s->header.table_size)) {
421 return -EINVAL;
422 }
423 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
424 return -EINVAL;
425 }
426
427 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
428 sizeof(uint64_t);
429 s->l2_shift = ffs(s->header.cluster_size) - 1;
430 s->l2_mask = s->table_nelems - 1;
431 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
432
433 if ((s->header.features & QED_F_BACKING_FILE)) {
434 if ((uint64_t)s->header.backing_filename_offset +
435 s->header.backing_filename_size >
436 s->header.cluster_size * s->header.header_size) {
437 return -EINVAL;
438 }
439
440 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
441 s->header.backing_filename_size, bs->backing_file,
442 sizeof(bs->backing_file));
443 if (ret < 0) {
444 return ret;
445 }
446
447 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
448 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
449 }
450 }
451
452 /* Reset unknown autoclear feature bits. This is a backwards
453 * compatibility mechanism that allows images to be opened by older
454 * programs, which "knock out" unknown feature bits. When an image is
455 * opened by a newer program again it can detect that the autoclear
456 * feature is no longer valid.
457 */
458 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
459 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
460 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
461
462 ret = qed_write_header_sync(s);
463 if (ret) {
464 return ret;
465 }
466
467 /* From here on only known autoclear feature bits are valid */
468 bdrv_flush(bs->file);
469 }
470
471 s->l1_table = qed_alloc_table(s);
472 qed_init_l2_cache(&s->l2_cache);
473
474 ret = qed_read_l1_table_sync(s);
475 if (ret) {
476 goto out;
477 }
478
479 /* If image was not closed cleanly, check consistency */
480 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
481 /* Read-only images cannot be fixed. There is no risk of corruption
482 * since write operations are not possible. Therefore, allow
483 * potentially inconsistent images to be opened read-only. This can
484 * aid data recovery from an otherwise inconsistent image.
485 */
486 if (!bdrv_is_read_only(bs->file) &&
487 !(flags & BDRV_O_INCOMING)) {
488 BdrvCheckResult result = {0};
489
490 ret = qed_check(s, &result, true);
491 if (ret) {
492 goto out;
493 }
494 }
495 }
496
497 s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
498 qed_need_check_timer_cb, s);
499
500 out:
501 if (ret) {
502 qed_free_l2_cache(&s->l2_cache);
503 qemu_vfree(s->l1_table);
504 }
505 return ret;
506 }
507
508 /* We have nothing to do for QED reopen, stubs just return
509 * success */
510 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
511 BlockReopenQueue *queue, Error **errp)
512 {
513 return 0;
514 }
515
516 static void bdrv_qed_close(BlockDriverState *bs)
517 {
518 BDRVQEDState *s = bs->opaque;
519
520 qed_cancel_need_check_timer(s);
521 timer_free(s->need_check_timer);
522
523 /* Ensure writes reach stable storage */
524 bdrv_flush(bs->file);
525
526 /* Clean shutdown, no check required on next open */
527 if (s->header.features & QED_F_NEED_CHECK) {
528 s->header.features &= ~QED_F_NEED_CHECK;
529 qed_write_header_sync(s);
530 }
531
532 qed_free_l2_cache(&s->l2_cache);
533 qemu_vfree(s->l1_table);
534 }
535
536 static int qed_create(const char *filename, uint32_t cluster_size,
537 uint64_t image_size, uint32_t table_size,
538 const char *backing_file, const char *backing_fmt)
539 {
540 QEDHeader header = {
541 .magic = QED_MAGIC,
542 .cluster_size = cluster_size,
543 .table_size = table_size,
544 .header_size = 1,
545 .features = 0,
546 .compat_features = 0,
547 .l1_table_offset = cluster_size,
548 .image_size = image_size,
549 };
550 QEDHeader le_header;
551 uint8_t *l1_table = NULL;
552 size_t l1_size = header.cluster_size * header.table_size;
553 int ret = 0;
554 BlockDriverState *bs = NULL;
555
556 ret = bdrv_create_file(filename, NULL);
557 if (ret < 0) {
558 return ret;
559 }
560
561 ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB);
562 if (ret < 0) {
563 return ret;
564 }
565
566 /* File must start empty and grow, check truncate is supported */
567 ret = bdrv_truncate(bs, 0);
568 if (ret < 0) {
569 goto out;
570 }
571
572 if (backing_file) {
573 header.features |= QED_F_BACKING_FILE;
574 header.backing_filename_offset = sizeof(le_header);
575 header.backing_filename_size = strlen(backing_file);
576
577 if (qed_fmt_is_raw(backing_fmt)) {
578 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
579 }
580 }
581
582 qed_header_cpu_to_le(&header, &le_header);
583 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
584 if (ret < 0) {
585 goto out;
586 }
587 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
588 header.backing_filename_size);
589 if (ret < 0) {
590 goto out;
591 }
592
593 l1_table = g_malloc0(l1_size);
594 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
595 if (ret < 0) {
596 goto out;
597 }
598
599 ret = 0; /* success */
600 out:
601 g_free(l1_table);
602 bdrv_unref(bs);
603 return ret;
604 }
605
606 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options)
607 {
608 uint64_t image_size = 0;
609 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
610 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
611 const char *backing_file = NULL;
612 const char *backing_fmt = NULL;
613
614 while (options && options->name) {
615 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
616 image_size = options->value.n;
617 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
618 backing_file = options->value.s;
619 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
620 backing_fmt = options->value.s;
621 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
622 if (options->value.n) {
623 cluster_size = options->value.n;
624 }
625 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
626 if (options->value.n) {
627 table_size = options->value.n;
628 }
629 }
630 options++;
631 }
632
633 if (!qed_is_cluster_size_valid(cluster_size)) {
634 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
635 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
636 return -EINVAL;
637 }
638 if (!qed_is_table_size_valid(table_size)) {
639 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
640 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
641 return -EINVAL;
642 }
643 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
644 fprintf(stderr, "QED image size must be a non-zero multiple of "
645 "cluster size and less than %" PRIu64 " bytes\n",
646 qed_max_image_size(cluster_size, table_size));
647 return -EINVAL;
648 }
649
650 return qed_create(filename, cluster_size, image_size, table_size,
651 backing_file, backing_fmt);
652 }
653
654 typedef struct {
655 BlockDriverState *bs;
656 Coroutine *co;
657 uint64_t pos;
658 int64_t status;
659 int *pnum;
660 } QEDIsAllocatedCB;
661
662 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
663 {
664 QEDIsAllocatedCB *cb = opaque;
665 BDRVQEDState *s = cb->bs->opaque;
666 *cb->pnum = len / BDRV_SECTOR_SIZE;
667 switch (ret) {
668 case QED_CLUSTER_FOUND:
669 offset |= qed_offset_into_cluster(s, cb->pos);
670 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
671 break;
672 case QED_CLUSTER_ZERO:
673 cb->status = BDRV_BLOCK_ZERO;
674 break;
675 case QED_CLUSTER_L2:
676 case QED_CLUSTER_L1:
677 cb->status = 0;
678 break;
679 default:
680 assert(ret < 0);
681 cb->status = ret;
682 break;
683 }
684
685 if (cb->co) {
686 qemu_coroutine_enter(cb->co, NULL);
687 }
688 }
689
690 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
691 int64_t sector_num,
692 int nb_sectors, int *pnum)
693 {
694 BDRVQEDState *s = bs->opaque;
695 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
696 QEDIsAllocatedCB cb = {
697 .bs = bs,
698 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
699 .status = BDRV_BLOCK_OFFSET_MASK,
700 .pnum = pnum,
701 };
702 QEDRequest request = { .l2_table = NULL };
703
704 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
705
706 /* Now sleep if the callback wasn't invoked immediately */
707 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
708 cb.co = qemu_coroutine_self();
709 qemu_coroutine_yield();
710 }
711
712 qed_unref_l2_cache_entry(request.l2_table);
713
714 return cb.status;
715 }
716
717 static int bdrv_qed_make_empty(BlockDriverState *bs)
718 {
719 return -ENOTSUP;
720 }
721
722 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
723 {
724 return acb->common.bs->opaque;
725 }
726
727 /**
728 * Read from the backing file or zero-fill if no backing file
729 *
730 * @s: QED state
731 * @pos: Byte position in device
732 * @qiov: Destination I/O vector
733 * @cb: Completion function
734 * @opaque: User data for completion function
735 *
736 * This function reads qiov->size bytes starting at pos from the backing file.
737 * If there is no backing file then zeroes are read.
738 */
739 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
740 QEMUIOVector *qiov,
741 BlockDriverCompletionFunc *cb, void *opaque)
742 {
743 uint64_t backing_length = 0;
744 size_t size;
745
746 /* If there is a backing file, get its length. Treat the absence of a
747 * backing file like a zero length backing file.
748 */
749 if (s->bs->backing_hd) {
750 int64_t l = bdrv_getlength(s->bs->backing_hd);
751 if (l < 0) {
752 cb(opaque, l);
753 return;
754 }
755 backing_length = l;
756 }
757
758 /* Zero all sectors if reading beyond the end of the backing file */
759 if (pos >= backing_length ||
760 pos + qiov->size > backing_length) {
761 qemu_iovec_memset(qiov, 0, 0, qiov->size);
762 }
763
764 /* Complete now if there are no backing file sectors to read */
765 if (pos >= backing_length) {
766 cb(opaque, 0);
767 return;
768 }
769
770 /* If the read straddles the end of the backing file, shorten it */
771 size = MIN((uint64_t)backing_length - pos, qiov->size);
772
773 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
774 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
775 qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
776 }
777
778 typedef struct {
779 GenericCB gencb;
780 BDRVQEDState *s;
781 QEMUIOVector qiov;
782 struct iovec iov;
783 uint64_t offset;
784 } CopyFromBackingFileCB;
785
786 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
787 {
788 CopyFromBackingFileCB *copy_cb = opaque;
789 qemu_vfree(copy_cb->iov.iov_base);
790 gencb_complete(&copy_cb->gencb, ret);
791 }
792
793 static void qed_copy_from_backing_file_write(void *opaque, int ret)
794 {
795 CopyFromBackingFileCB *copy_cb = opaque;
796 BDRVQEDState *s = copy_cb->s;
797
798 if (ret) {
799 qed_copy_from_backing_file_cb(copy_cb, ret);
800 return;
801 }
802
803 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
804 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
805 &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
806 qed_copy_from_backing_file_cb, copy_cb);
807 }
808
809 /**
810 * Copy data from backing file into the image
811 *
812 * @s: QED state
813 * @pos: Byte position in device
814 * @len: Number of bytes
815 * @offset: Byte offset in image file
816 * @cb: Completion function
817 * @opaque: User data for completion function
818 */
819 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
820 uint64_t len, uint64_t offset,
821 BlockDriverCompletionFunc *cb,
822 void *opaque)
823 {
824 CopyFromBackingFileCB *copy_cb;
825
826 /* Skip copy entirely if there is no work to do */
827 if (len == 0) {
828 cb(opaque, 0);
829 return;
830 }
831
832 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
833 copy_cb->s = s;
834 copy_cb->offset = offset;
835 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
836 copy_cb->iov.iov_len = len;
837 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
838
839 qed_read_backing_file(s, pos, &copy_cb->qiov,
840 qed_copy_from_backing_file_write, copy_cb);
841 }
842
843 /**
844 * Link one or more contiguous clusters into a table
845 *
846 * @s: QED state
847 * @table: L2 table
848 * @index: First cluster index
849 * @n: Number of contiguous clusters
850 * @cluster: First cluster offset
851 *
852 * The cluster offset may be an allocated byte offset in the image file, the
853 * zero cluster marker, or the unallocated cluster marker.
854 */
855 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
856 unsigned int n, uint64_t cluster)
857 {
858 int i;
859 for (i = index; i < index + n; i++) {
860 table->offsets[i] = cluster;
861 if (!qed_offset_is_unalloc_cluster(cluster) &&
862 !qed_offset_is_zero_cluster(cluster)) {
863 cluster += s->header.cluster_size;
864 }
865 }
866 }
867
868 static void qed_aio_complete_bh(void *opaque)
869 {
870 QEDAIOCB *acb = opaque;
871 BlockDriverCompletionFunc *cb = acb->common.cb;
872 void *user_opaque = acb->common.opaque;
873 int ret = acb->bh_ret;
874 bool *finished = acb->finished;
875
876 qemu_bh_delete(acb->bh);
877 qemu_aio_release(acb);
878
879 /* Invoke callback */
880 cb(user_opaque, ret);
881
882 /* Signal cancel completion */
883 if (finished) {
884 *finished = true;
885 }
886 }
887
888 static void qed_aio_complete(QEDAIOCB *acb, int ret)
889 {
890 BDRVQEDState *s = acb_to_s(acb);
891
892 trace_qed_aio_complete(s, acb, ret);
893
894 /* Free resources */
895 qemu_iovec_destroy(&acb->cur_qiov);
896 qed_unref_l2_cache_entry(acb->request.l2_table);
897
898 /* Free the buffer we may have allocated for zero writes */
899 if (acb->flags & QED_AIOCB_ZERO) {
900 qemu_vfree(acb->qiov->iov[0].iov_base);
901 acb->qiov->iov[0].iov_base = NULL;
902 }
903
904 /* Arrange for a bh to invoke the completion function */
905 acb->bh_ret = ret;
906 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
907 qemu_bh_schedule(acb->bh);
908
909 /* Start next allocating write request waiting behind this one. Note that
910 * requests enqueue themselves when they first hit an unallocated cluster
911 * but they wait until the entire request is finished before waking up the
912 * next request in the queue. This ensures that we don't cycle through
913 * requests multiple times but rather finish one at a time completely.
914 */
915 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
916 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
917 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
918 if (acb) {
919 qed_aio_next_io(acb, 0);
920 } else if (s->header.features & QED_F_NEED_CHECK) {
921 qed_start_need_check_timer(s);
922 }
923 }
924 }
925
926 /**
927 * Commit the current L2 table to the cache
928 */
929 static void qed_commit_l2_update(void *opaque, int ret)
930 {
931 QEDAIOCB *acb = opaque;
932 BDRVQEDState *s = acb_to_s(acb);
933 CachedL2Table *l2_table = acb->request.l2_table;
934 uint64_t l2_offset = l2_table->offset;
935
936 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
937
938 /* This is guaranteed to succeed because we just committed the entry to the
939 * cache.
940 */
941 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
942 assert(acb->request.l2_table != NULL);
943
944 qed_aio_next_io(opaque, ret);
945 }
946
947 /**
948 * Update L1 table with new L2 table offset and write it out
949 */
950 static void qed_aio_write_l1_update(void *opaque, int ret)
951 {
952 QEDAIOCB *acb = opaque;
953 BDRVQEDState *s = acb_to_s(acb);
954 int index;
955
956 if (ret) {
957 qed_aio_complete(acb, ret);
958 return;
959 }
960
961 index = qed_l1_index(s, acb->cur_pos);
962 s->l1_table->offsets[index] = acb->request.l2_table->offset;
963
964 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
965 }
966
967 /**
968 * Update L2 table with new cluster offsets and write them out
969 */
970 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
971 {
972 BDRVQEDState *s = acb_to_s(acb);
973 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
974 int index;
975
976 if (ret) {
977 goto err;
978 }
979
980 if (need_alloc) {
981 qed_unref_l2_cache_entry(acb->request.l2_table);
982 acb->request.l2_table = qed_new_l2_table(s);
983 }
984
985 index = qed_l2_index(s, acb->cur_pos);
986 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
987 offset);
988
989 if (need_alloc) {
990 /* Write out the whole new L2 table */
991 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
992 qed_aio_write_l1_update, acb);
993 } else {
994 /* Write out only the updated part of the L2 table */
995 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
996 qed_aio_next_io, acb);
997 }
998 return;
999
1000 err:
1001 qed_aio_complete(acb, ret);
1002 }
1003
1004 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1005 {
1006 QEDAIOCB *acb = opaque;
1007 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1008 }
1009
1010 /**
1011 * Flush new data clusters before updating the L2 table
1012 *
1013 * This flush is necessary when a backing file is in use. A crash during an
1014 * allocating write could result in empty clusters in the image. If the write
1015 * only touched a subregion of the cluster, then backing image sectors have
1016 * been lost in the untouched region. The solution is to flush after writing a
1017 * new data cluster and before updating the L2 table.
1018 */
1019 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1020 {
1021 QEDAIOCB *acb = opaque;
1022 BDRVQEDState *s = acb_to_s(acb);
1023
1024 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
1025 qed_aio_complete(acb, -EIO);
1026 }
1027 }
1028
1029 /**
1030 * Write data to the image file
1031 */
1032 static void qed_aio_write_main(void *opaque, int ret)
1033 {
1034 QEDAIOCB *acb = opaque;
1035 BDRVQEDState *s = acb_to_s(acb);
1036 uint64_t offset = acb->cur_cluster +
1037 qed_offset_into_cluster(s, acb->cur_pos);
1038 BlockDriverCompletionFunc *next_fn;
1039
1040 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1041
1042 if (ret) {
1043 qed_aio_complete(acb, ret);
1044 return;
1045 }
1046
1047 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1048 next_fn = qed_aio_next_io;
1049 } else {
1050 if (s->bs->backing_hd) {
1051 next_fn = qed_aio_write_flush_before_l2_update;
1052 } else {
1053 next_fn = qed_aio_write_l2_update_cb;
1054 }
1055 }
1056
1057 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1058 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1059 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1060 next_fn, acb);
1061 }
1062
1063 /**
1064 * Populate back untouched region of new data cluster
1065 */
1066 static void qed_aio_write_postfill(void *opaque, int ret)
1067 {
1068 QEDAIOCB *acb = opaque;
1069 BDRVQEDState *s = acb_to_s(acb);
1070 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1071 uint64_t len =
1072 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1073 uint64_t offset = acb->cur_cluster +
1074 qed_offset_into_cluster(s, acb->cur_pos) +
1075 acb->cur_qiov.size;
1076
1077 if (ret) {
1078 qed_aio_complete(acb, ret);
1079 return;
1080 }
1081
1082 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1083 qed_copy_from_backing_file(s, start, len, offset,
1084 qed_aio_write_main, acb);
1085 }
1086
1087 /**
1088 * Populate front untouched region of new data cluster
1089 */
1090 static void qed_aio_write_prefill(void *opaque, int ret)
1091 {
1092 QEDAIOCB *acb = opaque;
1093 BDRVQEDState *s = acb_to_s(acb);
1094 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1095 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1096
1097 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1098 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1099 qed_aio_write_postfill, acb);
1100 }
1101
1102 /**
1103 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1104 */
1105 static bool qed_should_set_need_check(BDRVQEDState *s)
1106 {
1107 /* The flush before L2 update path ensures consistency */
1108 if (s->bs->backing_hd) {
1109 return false;
1110 }
1111
1112 return !(s->header.features & QED_F_NEED_CHECK);
1113 }
1114
1115 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1116 {
1117 QEDAIOCB *acb = opaque;
1118
1119 if (ret) {
1120 qed_aio_complete(acb, ret);
1121 return;
1122 }
1123
1124 qed_aio_write_l2_update(acb, 0, 1);
1125 }
1126
1127 /**
1128 * Write new data cluster
1129 *
1130 * @acb: Write request
1131 * @len: Length in bytes
1132 *
1133 * This path is taken when writing to previously unallocated clusters.
1134 */
1135 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1136 {
1137 BDRVQEDState *s = acb_to_s(acb);
1138 BlockDriverCompletionFunc *cb;
1139
1140 /* Cancel timer when the first allocating request comes in */
1141 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1142 qed_cancel_need_check_timer(s);
1143 }
1144
1145 /* Freeze this request if another allocating write is in progress */
1146 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1147 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1148 }
1149 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1150 s->allocating_write_reqs_plugged) {
1151 return; /* wait for existing request to finish */
1152 }
1153
1154 acb->cur_nclusters = qed_bytes_to_clusters(s,
1155 qed_offset_into_cluster(s, acb->cur_pos) + len);
1156 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1157
1158 if (acb->flags & QED_AIOCB_ZERO) {
1159 /* Skip ahead if the clusters are already zero */
1160 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1161 qed_aio_next_io(acb, 0);
1162 return;
1163 }
1164
1165 cb = qed_aio_write_zero_cluster;
1166 } else {
1167 cb = qed_aio_write_prefill;
1168 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1169 }
1170
1171 if (qed_should_set_need_check(s)) {
1172 s->header.features |= QED_F_NEED_CHECK;
1173 qed_write_header(s, cb, acb);
1174 } else {
1175 cb(acb, 0);
1176 }
1177 }
1178
1179 /**
1180 * Write data cluster in place
1181 *
1182 * @acb: Write request
1183 * @offset: Cluster offset in bytes
1184 * @len: Length in bytes
1185 *
1186 * This path is taken when writing to already allocated clusters.
1187 */
1188 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1189 {
1190 /* Allocate buffer for zero writes */
1191 if (acb->flags & QED_AIOCB_ZERO) {
1192 struct iovec *iov = acb->qiov->iov;
1193
1194 if (!iov->iov_base) {
1195 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1196 memset(iov->iov_base, 0, iov->iov_len);
1197 }
1198 }
1199
1200 /* Calculate the I/O vector */
1201 acb->cur_cluster = offset;
1202 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1203
1204 /* Do the actual write */
1205 qed_aio_write_main(acb, 0);
1206 }
1207
1208 /**
1209 * Write data cluster
1210 *
1211 * @opaque: Write request
1212 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1213 * or -errno
1214 * @offset: Cluster offset in bytes
1215 * @len: Length in bytes
1216 *
1217 * Callback from qed_find_cluster().
1218 */
1219 static void qed_aio_write_data(void *opaque, int ret,
1220 uint64_t offset, size_t len)
1221 {
1222 QEDAIOCB *acb = opaque;
1223
1224 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1225
1226 acb->find_cluster_ret = ret;
1227
1228 switch (ret) {
1229 case QED_CLUSTER_FOUND:
1230 qed_aio_write_inplace(acb, offset, len);
1231 break;
1232
1233 case QED_CLUSTER_L2:
1234 case QED_CLUSTER_L1:
1235 case QED_CLUSTER_ZERO:
1236 qed_aio_write_alloc(acb, len);
1237 break;
1238
1239 default:
1240 qed_aio_complete(acb, ret);
1241 break;
1242 }
1243 }
1244
1245 /**
1246 * Read data cluster
1247 *
1248 * @opaque: Read request
1249 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1250 * or -errno
1251 * @offset: Cluster offset in bytes
1252 * @len: Length in bytes
1253 *
1254 * Callback from qed_find_cluster().
1255 */
1256 static void qed_aio_read_data(void *opaque, int ret,
1257 uint64_t offset, size_t len)
1258 {
1259 QEDAIOCB *acb = opaque;
1260 BDRVQEDState *s = acb_to_s(acb);
1261 BlockDriverState *bs = acb->common.bs;
1262
1263 /* Adjust offset into cluster */
1264 offset += qed_offset_into_cluster(s, acb->cur_pos);
1265
1266 trace_qed_aio_read_data(s, acb, ret, offset, len);
1267
1268 if (ret < 0) {
1269 goto err;
1270 }
1271
1272 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1273
1274 /* Handle zero cluster and backing file reads */
1275 if (ret == QED_CLUSTER_ZERO) {
1276 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1277 qed_aio_next_io(acb, 0);
1278 return;
1279 } else if (ret != QED_CLUSTER_FOUND) {
1280 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1281 qed_aio_next_io, acb);
1282 return;
1283 }
1284
1285 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1286 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1287 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1288 qed_aio_next_io, acb);
1289 return;
1290
1291 err:
1292 qed_aio_complete(acb, ret);
1293 }
1294
1295 /**
1296 * Begin next I/O or complete the request
1297 */
1298 static void qed_aio_next_io(void *opaque, int ret)
1299 {
1300 QEDAIOCB *acb = opaque;
1301 BDRVQEDState *s = acb_to_s(acb);
1302 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1303 qed_aio_write_data : qed_aio_read_data;
1304
1305 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1306
1307 /* Handle I/O error */
1308 if (ret) {
1309 qed_aio_complete(acb, ret);
1310 return;
1311 }
1312
1313 acb->qiov_offset += acb->cur_qiov.size;
1314 acb->cur_pos += acb->cur_qiov.size;
1315 qemu_iovec_reset(&acb->cur_qiov);
1316
1317 /* Complete request */
1318 if (acb->cur_pos >= acb->end_pos) {
1319 qed_aio_complete(acb, 0);
1320 return;
1321 }
1322
1323 /* Find next cluster and start I/O */
1324 qed_find_cluster(s, &acb->request,
1325 acb->cur_pos, acb->end_pos - acb->cur_pos,
1326 io_fn, acb);
1327 }
1328
1329 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1330 int64_t sector_num,
1331 QEMUIOVector *qiov, int nb_sectors,
1332 BlockDriverCompletionFunc *cb,
1333 void *opaque, int flags)
1334 {
1335 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1336
1337 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1338 opaque, flags);
1339
1340 acb->flags = flags;
1341 acb->finished = NULL;
1342 acb->qiov = qiov;
1343 acb->qiov_offset = 0;
1344 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1345 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1346 acb->request.l2_table = NULL;
1347 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1348
1349 /* Start request */
1350 qed_aio_next_io(acb, 0);
1351 return &acb->common;
1352 }
1353
1354 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1355 int64_t sector_num,
1356 QEMUIOVector *qiov, int nb_sectors,
1357 BlockDriverCompletionFunc *cb,
1358 void *opaque)
1359 {
1360 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1361 }
1362
1363 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1364 int64_t sector_num,
1365 QEMUIOVector *qiov, int nb_sectors,
1366 BlockDriverCompletionFunc *cb,
1367 void *opaque)
1368 {
1369 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1370 opaque, QED_AIOCB_WRITE);
1371 }
1372
1373 typedef struct {
1374 Coroutine *co;
1375 int ret;
1376 bool done;
1377 } QEDWriteZeroesCB;
1378
1379 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1380 {
1381 QEDWriteZeroesCB *cb = opaque;
1382
1383 cb->done = true;
1384 cb->ret = ret;
1385 if (cb->co) {
1386 qemu_coroutine_enter(cb->co, NULL);
1387 }
1388 }
1389
1390 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1391 int64_t sector_num,
1392 int nb_sectors)
1393 {
1394 BlockDriverAIOCB *blockacb;
1395 BDRVQEDState *s = bs->opaque;
1396 QEDWriteZeroesCB cb = { .done = false };
1397 QEMUIOVector qiov;
1398 struct iovec iov;
1399
1400 /* Refuse if there are untouched backing file sectors */
1401 if (bs->backing_hd) {
1402 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1403 return -ENOTSUP;
1404 }
1405 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1406 return -ENOTSUP;
1407 }
1408 }
1409
1410 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1411 * then it will be allocated during request processing.
1412 */
1413 iov.iov_base = NULL,
1414 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1415
1416 qemu_iovec_init_external(&qiov, &iov, 1);
1417 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1418 qed_co_write_zeroes_cb, &cb,
1419 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1420 if (!blockacb) {
1421 return -EIO;
1422 }
1423 if (!cb.done) {
1424 cb.co = qemu_coroutine_self();
1425 qemu_coroutine_yield();
1426 }
1427 assert(cb.done);
1428 return cb.ret;
1429 }
1430
1431 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1432 {
1433 BDRVQEDState *s = bs->opaque;
1434 uint64_t old_image_size;
1435 int ret;
1436
1437 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1438 s->header.table_size)) {
1439 return -EINVAL;
1440 }
1441
1442 /* Shrinking is currently not supported */
1443 if ((uint64_t)offset < s->header.image_size) {
1444 return -ENOTSUP;
1445 }
1446
1447 old_image_size = s->header.image_size;
1448 s->header.image_size = offset;
1449 ret = qed_write_header_sync(s);
1450 if (ret < 0) {
1451 s->header.image_size = old_image_size;
1452 }
1453 return ret;
1454 }
1455
1456 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1457 {
1458 BDRVQEDState *s = bs->opaque;
1459 return s->header.image_size;
1460 }
1461
1462 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1463 {
1464 BDRVQEDState *s = bs->opaque;
1465
1466 memset(bdi, 0, sizeof(*bdi));
1467 bdi->cluster_size = s->header.cluster_size;
1468 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1469 return 0;
1470 }
1471
1472 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1473 const char *backing_file,
1474 const char *backing_fmt)
1475 {
1476 BDRVQEDState *s = bs->opaque;
1477 QEDHeader new_header, le_header;
1478 void *buffer;
1479 size_t buffer_len, backing_file_len;
1480 int ret;
1481
1482 /* Refuse to set backing filename if unknown compat feature bits are
1483 * active. If the image uses an unknown compat feature then we may not
1484 * know the layout of data following the header structure and cannot safely
1485 * add a new string.
1486 */
1487 if (backing_file && (s->header.compat_features &
1488 ~QED_COMPAT_FEATURE_MASK)) {
1489 return -ENOTSUP;
1490 }
1491
1492 memcpy(&new_header, &s->header, sizeof(new_header));
1493
1494 new_header.features &= ~(QED_F_BACKING_FILE |
1495 QED_F_BACKING_FORMAT_NO_PROBE);
1496
1497 /* Adjust feature flags */
1498 if (backing_file) {
1499 new_header.features |= QED_F_BACKING_FILE;
1500
1501 if (qed_fmt_is_raw(backing_fmt)) {
1502 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1503 }
1504 }
1505
1506 /* Calculate new header size */
1507 backing_file_len = 0;
1508
1509 if (backing_file) {
1510 backing_file_len = strlen(backing_file);
1511 }
1512
1513 buffer_len = sizeof(new_header);
1514 new_header.backing_filename_offset = buffer_len;
1515 new_header.backing_filename_size = backing_file_len;
1516 buffer_len += backing_file_len;
1517
1518 /* Make sure we can rewrite header without failing */
1519 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1520 return -ENOSPC;
1521 }
1522
1523 /* Prepare new header */
1524 buffer = g_malloc(buffer_len);
1525
1526 qed_header_cpu_to_le(&new_header, &le_header);
1527 memcpy(buffer, &le_header, sizeof(le_header));
1528 buffer_len = sizeof(le_header);
1529
1530 if (backing_file) {
1531 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1532 buffer_len += backing_file_len;
1533 }
1534
1535 /* Write new header */
1536 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1537 g_free(buffer);
1538 if (ret == 0) {
1539 memcpy(&s->header, &new_header, sizeof(new_header));
1540 }
1541 return ret;
1542 }
1543
1544 static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
1545 {
1546 BDRVQEDState *s = bs->opaque;
1547
1548 bdrv_qed_close(bs);
1549 memset(s, 0, sizeof(BDRVQEDState));
1550 bdrv_qed_open(bs, NULL, bs->open_flags);
1551 }
1552
1553 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1554 BdrvCheckMode fix)
1555 {
1556 BDRVQEDState *s = bs->opaque;
1557
1558 return qed_check(s, result, !!fix);
1559 }
1560
1561 static QEMUOptionParameter qed_create_options[] = {
1562 {
1563 .name = BLOCK_OPT_SIZE,
1564 .type = OPT_SIZE,
1565 .help = "Virtual disk size (in bytes)"
1566 }, {
1567 .name = BLOCK_OPT_BACKING_FILE,
1568 .type = OPT_STRING,
1569 .help = "File name of a base image"
1570 }, {
1571 .name = BLOCK_OPT_BACKING_FMT,
1572 .type = OPT_STRING,
1573 .help = "Image format of the base image"
1574 }, {
1575 .name = BLOCK_OPT_CLUSTER_SIZE,
1576 .type = OPT_SIZE,
1577 .help = "Cluster size (in bytes)",
1578 .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1579 }, {
1580 .name = BLOCK_OPT_TABLE_SIZE,
1581 .type = OPT_SIZE,
1582 .help = "L1/L2 table size (in clusters)"
1583 },
1584 { /* end of list */ }
1585 };
1586
1587 static BlockDriver bdrv_qed = {
1588 .format_name = "qed",
1589 .instance_size = sizeof(BDRVQEDState),
1590 .create_options = qed_create_options,
1591
1592 .bdrv_probe = bdrv_qed_probe,
1593 .bdrv_rebind = bdrv_qed_rebind,
1594 .bdrv_open = bdrv_qed_open,
1595 .bdrv_close = bdrv_qed_close,
1596 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1597 .bdrv_create = bdrv_qed_create,
1598 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1599 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1600 .bdrv_make_empty = bdrv_qed_make_empty,
1601 .bdrv_aio_readv = bdrv_qed_aio_readv,
1602 .bdrv_aio_writev = bdrv_qed_aio_writev,
1603 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes,
1604 .bdrv_truncate = bdrv_qed_truncate,
1605 .bdrv_getlength = bdrv_qed_getlength,
1606 .bdrv_get_info = bdrv_qed_get_info,
1607 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1608 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1609 .bdrv_check = bdrv_qed_check,
1610 };
1611
1612 static void bdrv_qed_init(void)
1613 {
1614 bdrv_register(&bdrv_qed);
1615 }
1616
1617 block_init(bdrv_qed_init);