]>
Commit | Line | Data |
---|---|---|
75411d23 SH |
1 | /* |
2 | * QEMU Enhanced Disk Format | |
3 | * | |
4 | * Copyright IBM, Corp. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
80c71a24 | 15 | #include "qemu/osdep.h" |
da34e65c | 16 | #include "qapi/error.h" |
1de7afc9 | 17 | #include "qemu/timer.h" |
58369e22 | 18 | #include "qemu/bswap.h" |
eabba580 | 19 | #include "trace.h" |
75411d23 | 20 | #include "qed.h" |
7b1b5d19 | 21 | #include "qapi/qmp/qerror.h" |
8a56fdad | 22 | #include "sysemu/block-backend.h" |
75411d23 SH |
23 | |
24 | static int bdrv_qed_probe(const uint8_t *buf, int buf_size, | |
25 | const char *filename) | |
26 | { | |
27 | const QEDHeader *header = (const QEDHeader *)buf; | |
28 | ||
29 | if (buf_size < sizeof(*header)) { | |
30 | return 0; | |
31 | } | |
32 | if (le32_to_cpu(header->magic) != QED_MAGIC) { | |
33 | return 0; | |
34 | } | |
35 | return 100; | |
36 | } | |
37 | ||
38 | /** | |
39 | * Check whether an image format is raw | |
40 | * | |
41 | * @fmt: Backing file format, may be NULL | |
42 | */ | |
43 | static bool qed_fmt_is_raw(const char *fmt) | |
44 | { | |
45 | return fmt && strcmp(fmt, "raw") == 0; | |
46 | } | |
47 | ||
48 | static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) | |
49 | { | |
50 | cpu->magic = le32_to_cpu(le->magic); | |
51 | cpu->cluster_size = le32_to_cpu(le->cluster_size); | |
52 | cpu->table_size = le32_to_cpu(le->table_size); | |
53 | cpu->header_size = le32_to_cpu(le->header_size); | |
54 | cpu->features = le64_to_cpu(le->features); | |
55 | cpu->compat_features = le64_to_cpu(le->compat_features); | |
56 | cpu->autoclear_features = le64_to_cpu(le->autoclear_features); | |
57 | cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); | |
58 | cpu->image_size = le64_to_cpu(le->image_size); | |
59 | cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); | |
60 | cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); | |
61 | } | |
62 | ||
63 | static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) | |
64 | { | |
65 | le->magic = cpu_to_le32(cpu->magic); | |
66 | le->cluster_size = cpu_to_le32(cpu->cluster_size); | |
67 | le->table_size = cpu_to_le32(cpu->table_size); | |
68 | le->header_size = cpu_to_le32(cpu->header_size); | |
69 | le->features = cpu_to_le64(cpu->features); | |
70 | le->compat_features = cpu_to_le64(cpu->compat_features); | |
71 | le->autoclear_features = cpu_to_le64(cpu->autoclear_features); | |
72 | le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); | |
73 | le->image_size = cpu_to_le64(cpu->image_size); | |
74 | le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); | |
75 | le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); | |
76 | } | |
77 | ||
b10170ac | 78 | int qed_write_header_sync(BDRVQEDState *s) |
75411d23 SH |
79 | { |
80 | QEDHeader le; | |
81 | int ret; | |
82 | ||
83 | qed_header_cpu_to_le(&s->header, &le); | |
d9ca2ea2 | 84 | ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); |
75411d23 SH |
85 | if (ret != sizeof(le)) { |
86 | return ret; | |
87 | } | |
88 | return 0; | |
89 | } | |
90 | ||
01979a98 SH |
91 | /** |
92 | * Update header in-place (does not rewrite backing filename or other strings) | |
93 | * | |
94 | * This function only updates known header fields in-place and does not affect | |
95 | * extra data after the QED header. | |
96 | */ | |
f13d712b | 97 | static int qed_write_header(BDRVQEDState *s) |
01979a98 SH |
98 | { |
99 | /* We must write full sectors for O_DIRECT but cannot necessarily generate | |
100 | * the data following the header if an unrecognized compat feature is | |
101 | * active. Therefore, first read the sectors containing the header, update | |
102 | * them, and write back. | |
103 | */ | |
104 | ||
c41a73ff | 105 | int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE); |
01979a98 | 106 | size_t len = nsectors * BDRV_SECTOR_SIZE; |
7076309a KW |
107 | uint8_t *buf; |
108 | struct iovec iov; | |
109 | QEMUIOVector qiov; | |
110 | int ret; | |
111 | ||
112 | buf = qemu_blockalign(s->bs, len); | |
113 | iov = (struct iovec) { | |
114 | .iov_base = buf, | |
115 | .iov_len = len, | |
116 | }; | |
117 | qemu_iovec_init_external(&qiov, &iov, 1); | |
118 | ||
119 | ret = bdrv_preadv(s->bs->file, 0, &qiov); | |
120 | if (ret < 0) { | |
121 | goto out; | |
122 | } | |
123 | ||
124 | /* Update header */ | |
125 | qed_header_cpu_to_le(&s->header, (QEDHeader *) buf); | |
126 | ||
127 | ret = bdrv_pwritev(s->bs->file, 0, &qiov); | |
128 | if (ret < 0) { | |
129 | goto out; | |
130 | } | |
131 | ||
132 | ret = 0; | |
133 | out: | |
134 | qemu_vfree(buf); | |
f13d712b | 135 | return ret; |
01979a98 SH |
136 | } |
137 | ||
75411d23 SH |
138 | static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) |
139 | { | |
140 | uint64_t table_entries; | |
141 | uint64_t l2_size; | |
142 | ||
143 | table_entries = (table_size * cluster_size) / sizeof(uint64_t); | |
144 | l2_size = table_entries * cluster_size; | |
145 | ||
146 | return l2_size * table_entries; | |
147 | } | |
148 | ||
149 | static bool qed_is_cluster_size_valid(uint32_t cluster_size) | |
150 | { | |
151 | if (cluster_size < QED_MIN_CLUSTER_SIZE || | |
152 | cluster_size > QED_MAX_CLUSTER_SIZE) { | |
153 | return false; | |
154 | } | |
155 | if (cluster_size & (cluster_size - 1)) { | |
156 | return false; /* not power of 2 */ | |
157 | } | |
158 | return true; | |
159 | } | |
160 | ||
161 | static bool qed_is_table_size_valid(uint32_t table_size) | |
162 | { | |
163 | if (table_size < QED_MIN_TABLE_SIZE || | |
164 | table_size > QED_MAX_TABLE_SIZE) { | |
165 | return false; | |
166 | } | |
167 | if (table_size & (table_size - 1)) { | |
168 | return false; /* not power of 2 */ | |
169 | } | |
170 | return true; | |
171 | } | |
172 | ||
173 | static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, | |
174 | uint32_t table_size) | |
175 | { | |
176 | if (image_size % BDRV_SECTOR_SIZE != 0) { | |
177 | return false; /* not multiple of sector size */ | |
178 | } | |
179 | if (image_size > qed_max_image_size(cluster_size, table_size)) { | |
180 | return false; /* image is too large */ | |
181 | } | |
182 | return true; | |
183 | } | |
184 | ||
185 | /** | |
186 | * Read a string of known length from the image file | |
187 | * | |
188 | * @file: Image file | |
189 | * @offset: File offset to start of string, in bytes | |
190 | * @n: String length in bytes | |
191 | * @buf: Destination buffer | |
192 | * @buflen: Destination buffer length in bytes | |
193 | * @ret: 0 on success, -errno on failure | |
194 | * | |
195 | * The string is NUL-terminated. | |
196 | */ | |
cf2ab8fc | 197 | static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, |
75411d23 SH |
198 | char *buf, size_t buflen) |
199 | { | |
200 | int ret; | |
201 | if (n >= buflen) { | |
202 | return -EINVAL; | |
203 | } | |
204 | ret = bdrv_pread(file, offset, buf, n); | |
205 | if (ret < 0) { | |
206 | return ret; | |
207 | } | |
208 | buf[n] = '\0'; | |
209 | return 0; | |
210 | } | |
211 | ||
eabba580 SH |
212 | /** |
213 | * Allocate new clusters | |
214 | * | |
215 | * @s: QED state | |
216 | * @n: Number of contiguous clusters to allocate | |
217 | * @ret: Offset of first allocated cluster | |
218 | * | |
219 | * This function only produces the offset where the new clusters should be | |
220 | * written. It updates BDRVQEDState but does not make any changes to the image | |
221 | * file. | |
222 | */ | |
223 | static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) | |
224 | { | |
225 | uint64_t offset = s->file_size; | |
226 | s->file_size += n * s->header.cluster_size; | |
227 | return offset; | |
228 | } | |
229 | ||
298800ca SH |
230 | QEDTable *qed_alloc_table(BDRVQEDState *s) |
231 | { | |
232 | /* Honor O_DIRECT memory alignment requirements */ | |
233 | return qemu_blockalign(s->bs, | |
234 | s->header.cluster_size * s->header.table_size); | |
235 | } | |
236 | ||
eabba580 SH |
237 | /** |
238 | * Allocate a new zeroed L2 table | |
239 | */ | |
240 | static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) | |
241 | { | |
242 | CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); | |
243 | ||
244 | l2_table->table = qed_alloc_table(s); | |
245 | l2_table->offset = qed_alloc_clusters(s, s->header.table_size); | |
246 | ||
247 | memset(l2_table->table->offsets, 0, | |
248 | s->header.cluster_size * s->header.table_size); | |
249 | return l2_table; | |
250 | } | |
251 | ||
6f321e93 SH |
252 | static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
253 | { | |
254 | assert(!s->allocating_write_reqs_plugged); | |
255 | ||
256 | s->allocating_write_reqs_plugged = true; | |
257 | } | |
258 | ||
259 | static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) | |
260 | { | |
6f321e93 SH |
261 | assert(s->allocating_write_reqs_plugged); |
262 | ||
263 | s->allocating_write_reqs_plugged = false; | |
0806c3b5 | 264 | qemu_co_enter_next(&s->allocating_write_reqs); |
6f321e93 SH |
265 | } |
266 | ||
6f321e93 SH |
267 | static void qed_clear_need_check(void *opaque, int ret) |
268 | { | |
269 | BDRVQEDState *s = opaque; | |
270 | ||
271 | if (ret) { | |
272 | qed_unplug_allocating_write_reqs(s); | |
273 | return; | |
274 | } | |
275 | ||
276 | s->header.features &= ~QED_F_NEED_CHECK; | |
f13d712b KW |
277 | ret = qed_write_header(s); |
278 | (void) ret; | |
279 | ||
280 | qed_unplug_allocating_write_reqs(s); | |
281 | ||
282 | ret = bdrv_flush(s->bs); | |
283 | (void) ret; | |
6f321e93 SH |
284 | } |
285 | ||
286 | static void qed_need_check_timer_cb(void *opaque) | |
287 | { | |
288 | BDRVQEDState *s = opaque; | |
289 | ||
290 | /* The timer should only fire when allocating writes have drained */ | |
0806c3b5 | 291 | assert(!s->allocating_acb); |
6f321e93 SH |
292 | |
293 | trace_qed_need_check_timer_cb(s); | |
294 | ||
2f47da5f | 295 | qed_acquire(s); |
6f321e93 SH |
296 | qed_plug_allocating_write_reqs(s); |
297 | ||
298 | /* Ensure writes are on disk before clearing flag */ | |
6653a73d | 299 | bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s); |
2f47da5f PB |
300 | qed_release(s); |
301 | } | |
302 | ||
303 | void qed_acquire(BDRVQEDState *s) | |
304 | { | |
305 | aio_context_acquire(bdrv_get_aio_context(s->bs)); | |
306 | } | |
307 | ||
308 | void qed_release(BDRVQEDState *s) | |
309 | { | |
310 | aio_context_release(bdrv_get_aio_context(s->bs)); | |
6f321e93 SH |
311 | } |
312 | ||
313 | static void qed_start_need_check_timer(BDRVQEDState *s) | |
314 | { | |
315 | trace_qed_start_need_check_timer(s); | |
316 | ||
bc72ad67 | 317 | /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for |
6f321e93 SH |
318 | * migration. |
319 | */ | |
bc72ad67 | 320 | timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + |
73bcb24d | 321 | NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT); |
6f321e93 SH |
322 | } |
323 | ||
324 | /* It's okay to call this multiple times or when no timer is started */ | |
325 | static void qed_cancel_need_check_timer(BDRVQEDState *s) | |
326 | { | |
327 | trace_qed_cancel_need_check_timer(s); | |
bc72ad67 | 328 | timer_del(s->need_check_timer); |
6f321e93 SH |
329 | } |
330 | ||
a8c868c3 SH |
331 | static void bdrv_qed_detach_aio_context(BlockDriverState *bs) |
332 | { | |
333 | BDRVQEDState *s = bs->opaque; | |
334 | ||
335 | qed_cancel_need_check_timer(s); | |
336 | timer_free(s->need_check_timer); | |
337 | } | |
338 | ||
339 | static void bdrv_qed_attach_aio_context(BlockDriverState *bs, | |
340 | AioContext *new_context) | |
341 | { | |
342 | BDRVQEDState *s = bs->opaque; | |
343 | ||
344 | s->need_check_timer = aio_timer_new(new_context, | |
345 | QEMU_CLOCK_VIRTUAL, SCALE_NS, | |
346 | qed_need_check_timer_cb, s); | |
347 | if (s->header.features & QED_F_NEED_CHECK) { | |
348 | qed_start_need_check_timer(s); | |
349 | } | |
350 | } | |
351 | ||
6653a73d FZ |
352 | static void bdrv_qed_drain(BlockDriverState *bs) |
353 | { | |
354 | BDRVQEDState *s = bs->opaque; | |
355 | ||
356 | /* Fire the timer immediately in order to start doing I/O as soon as the | |
357 | * header is flushed. | |
358 | */ | |
359 | if (s->need_check_timer && timer_pending(s->need_check_timer)) { | |
360 | qed_cancel_need_check_timer(s); | |
361 | qed_need_check_timer_cb(s); | |
362 | } | |
363 | } | |
364 | ||
4e4bf5c4 KW |
365 | static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags, |
366 | Error **errp) | |
75411d23 SH |
367 | { |
368 | BDRVQEDState *s = bs->opaque; | |
369 | QEDHeader le_header; | |
370 | int64_t file_size; | |
371 | int ret; | |
372 | ||
373 | s->bs = bs; | |
0806c3b5 | 374 | qemu_co_queue_init(&s->allocating_write_reqs); |
75411d23 | 375 | |
cf2ab8fc | 376 | ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); |
75411d23 SH |
377 | if (ret < 0) { |
378 | return ret; | |
379 | } | |
75411d23 SH |
380 | qed_header_le_to_cpu(&le_header, &s->header); |
381 | ||
382 | if (s->header.magic != QED_MAGIC) { | |
76abe407 PB |
383 | error_setg(errp, "Image not in QED format"); |
384 | return -EINVAL; | |
75411d23 SH |
385 | } |
386 | if (s->header.features & ~QED_FEATURE_MASK) { | |
10b758e8 | 387 | /* image uses unsupported feature bits */ |
a55448b3 HR |
388 | error_setg(errp, "Unsupported QED features: %" PRIx64, |
389 | s->header.features & ~QED_FEATURE_MASK); | |
10b758e8 | 390 | return -ENOTSUP; |
75411d23 SH |
391 | } |
392 | if (!qed_is_cluster_size_valid(s->header.cluster_size)) { | |
393 | return -EINVAL; | |
394 | } | |
395 | ||
396 | /* Round down file size to the last cluster */ | |
9a4f4c31 | 397 | file_size = bdrv_getlength(bs->file->bs); |
75411d23 SH |
398 | if (file_size < 0) { |
399 | return file_size; | |
400 | } | |
401 | s->file_size = qed_start_of_cluster(s, file_size); | |
402 | ||
403 | if (!qed_is_table_size_valid(s->header.table_size)) { | |
404 | return -EINVAL; | |
405 | } | |
406 | if (!qed_is_image_size_valid(s->header.image_size, | |
407 | s->header.cluster_size, | |
408 | s->header.table_size)) { | |
409 | return -EINVAL; | |
410 | } | |
411 | if (!qed_check_table_offset(s, s->header.l1_table_offset)) { | |
412 | return -EINVAL; | |
413 | } | |
414 | ||
415 | s->table_nelems = (s->header.cluster_size * s->header.table_size) / | |
416 | sizeof(uint64_t); | |
786a4ea8 | 417 | s->l2_shift = ctz32(s->header.cluster_size); |
75411d23 | 418 | s->l2_mask = s->table_nelems - 1; |
786a4ea8 | 419 | s->l1_shift = s->l2_shift + ctz32(s->table_nelems); |
75411d23 | 420 | |
0adfa1ed SH |
421 | /* Header size calculation must not overflow uint32_t */ |
422 | if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { | |
423 | return -EINVAL; | |
424 | } | |
425 | ||
75411d23 SH |
426 | if ((s->header.features & QED_F_BACKING_FILE)) { |
427 | if ((uint64_t)s->header.backing_filename_offset + | |
428 | s->header.backing_filename_size > | |
429 | s->header.cluster_size * s->header.header_size) { | |
430 | return -EINVAL; | |
431 | } | |
432 | ||
cf2ab8fc | 433 | ret = qed_read_string(bs->file, s->header.backing_filename_offset, |
75411d23 SH |
434 | s->header.backing_filename_size, bs->backing_file, |
435 | sizeof(bs->backing_file)); | |
436 | if (ret < 0) { | |
437 | return ret; | |
438 | } | |
439 | ||
440 | if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { | |
441 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); | |
442 | } | |
443 | } | |
444 | ||
445 | /* Reset unknown autoclear feature bits. This is a backwards | |
446 | * compatibility mechanism that allows images to be opened by older | |
447 | * programs, which "knock out" unknown feature bits. When an image is | |
448 | * opened by a newer program again it can detect that the autoclear | |
449 | * feature is no longer valid. | |
450 | */ | |
451 | if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && | |
04c01a5c | 452 | !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { |
75411d23 SH |
453 | s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; |
454 | ||
455 | ret = qed_write_header_sync(s); | |
456 | if (ret) { | |
457 | return ret; | |
458 | } | |
459 | ||
460 | /* From here on only known autoclear feature bits are valid */ | |
9a4f4c31 | 461 | bdrv_flush(bs->file->bs); |
75411d23 SH |
462 | } |
463 | ||
298800ca SH |
464 | s->l1_table = qed_alloc_table(s); |
465 | qed_init_l2_cache(&s->l2_cache); | |
466 | ||
467 | ret = qed_read_l1_table_sync(s); | |
01979a98 SH |
468 | if (ret) { |
469 | goto out; | |
470 | } | |
471 | ||
472 | /* If image was not closed cleanly, check consistency */ | |
058f8f16 | 473 | if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { |
01979a98 SH |
474 | /* Read-only images cannot be fixed. There is no risk of corruption |
475 | * since write operations are not possible. Therefore, allow | |
476 | * potentially inconsistent images to be opened read-only. This can | |
477 | * aid data recovery from an otherwise inconsistent image. | |
478 | */ | |
9a4f4c31 | 479 | if (!bdrv_is_read_only(bs->file->bs) && |
04c01a5c | 480 | !(flags & BDRV_O_INACTIVE)) { |
01979a98 SH |
481 | BdrvCheckResult result = {0}; |
482 | ||
483 | ret = qed_check(s, &result, true); | |
6f321e93 SH |
484 | if (ret) { |
485 | goto out; | |
486 | } | |
01979a98 SH |
487 | } |
488 | } | |
489 | ||
a8c868c3 | 490 | bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); |
6f321e93 | 491 | |
01979a98 | 492 | out: |
298800ca SH |
493 | if (ret) { |
494 | qed_free_l2_cache(&s->l2_cache); | |
495 | qemu_vfree(s->l1_table); | |
496 | } | |
75411d23 SH |
497 | return ret; |
498 | } | |
499 | ||
4e4bf5c4 KW |
500 | static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, |
501 | Error **errp) | |
502 | { | |
503 | bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, | |
504 | false, errp); | |
505 | if (!bs->file) { | |
506 | return -EINVAL; | |
507 | } | |
508 | ||
509 | return bdrv_qed_do_open(bs, options, flags, errp); | |
510 | } | |
511 | ||
3baca891 | 512 | static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) |
d34682cd KW |
513 | { |
514 | BDRVQEDState *s = bs->opaque; | |
515 | ||
cf081fca | 516 | bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; |
d34682cd KW |
517 | } |
518 | ||
f9cb20f1 JC |
519 | /* We have nothing to do for QED reopen, stubs just return |
520 | * success */ | |
521 | static int bdrv_qed_reopen_prepare(BDRVReopenState *state, | |
522 | BlockReopenQueue *queue, Error **errp) | |
523 | { | |
524 | return 0; | |
525 | } | |
526 | ||
75411d23 SH |
527 | static void bdrv_qed_close(BlockDriverState *bs) |
528 | { | |
298800ca SH |
529 | BDRVQEDState *s = bs->opaque; |
530 | ||
a8c868c3 | 531 | bdrv_qed_detach_aio_context(bs); |
6f321e93 | 532 | |
01979a98 | 533 | /* Ensure writes reach stable storage */ |
9a4f4c31 | 534 | bdrv_flush(bs->file->bs); |
01979a98 SH |
535 | |
536 | /* Clean shutdown, no check required on next open */ | |
537 | if (s->header.features & QED_F_NEED_CHECK) { | |
538 | s->header.features &= ~QED_F_NEED_CHECK; | |
539 | qed_write_header_sync(s); | |
540 | } | |
541 | ||
298800ca SH |
542 | qed_free_l2_cache(&s->l2_cache); |
543 | qemu_vfree(s->l1_table); | |
75411d23 SH |
544 | } |
545 | ||
75411d23 SH |
546 | static int qed_create(const char *filename, uint32_t cluster_size, |
547 | uint64_t image_size, uint32_t table_size, | |
0fea6b79 | 548 | const char *backing_file, const char *backing_fmt, |
4ab15590 | 549 | QemuOpts *opts, Error **errp) |
75411d23 SH |
550 | { |
551 | QEDHeader header = { | |
552 | .magic = QED_MAGIC, | |
553 | .cluster_size = cluster_size, | |
554 | .table_size = table_size, | |
555 | .header_size = 1, | |
556 | .features = 0, | |
557 | .compat_features = 0, | |
558 | .l1_table_offset = cluster_size, | |
559 | .image_size = image_size, | |
560 | }; | |
561 | QEDHeader le_header; | |
562 | uint8_t *l1_table = NULL; | |
563 | size_t l1_size = header.cluster_size * header.table_size; | |
34b5d2c6 | 564 | Error *local_err = NULL; |
75411d23 | 565 | int ret = 0; |
8a56fdad | 566 | BlockBackend *blk; |
75411d23 | 567 | |
4ab15590 | 568 | ret = bdrv_create_file(filename, opts, &local_err); |
75411d23 | 569 | if (ret < 0) { |
0fea6b79 | 570 | error_propagate(errp, local_err); |
75411d23 SH |
571 | return ret; |
572 | } | |
573 | ||
efaa7c4e | 574 | blk = blk_new_open(filename, NULL, NULL, |
55880601 KW |
575 | BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, |
576 | &local_err); | |
8a56fdad | 577 | if (blk == NULL) { |
0fea6b79 | 578 | error_propagate(errp, local_err); |
8a56fdad | 579 | return -EIO; |
75411d23 SH |
580 | } |
581 | ||
8a56fdad KW |
582 | blk_set_allow_write_beyond_eof(blk, true); |
583 | ||
c743849b | 584 | /* File must start empty and grow, check truncate is supported */ |
ed3d2ec9 | 585 | ret = blk_truncate(blk, 0, errp); |
c743849b SH |
586 | if (ret < 0) { |
587 | goto out; | |
588 | } | |
589 | ||
75411d23 SH |
590 | if (backing_file) { |
591 | header.features |= QED_F_BACKING_FILE; | |
592 | header.backing_filename_offset = sizeof(le_header); | |
593 | header.backing_filename_size = strlen(backing_file); | |
594 | ||
595 | if (qed_fmt_is_raw(backing_fmt)) { | |
596 | header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
597 | } | |
598 | } | |
599 | ||
600 | qed_header_cpu_to_le(&header, &le_header); | |
8341f00d | 601 | ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); |
75411d23 SH |
602 | if (ret < 0) { |
603 | goto out; | |
604 | } | |
8a56fdad | 605 | ret = blk_pwrite(blk, sizeof(le_header), backing_file, |
8341f00d | 606 | header.backing_filename_size, 0); |
75411d23 SH |
607 | if (ret < 0) { |
608 | goto out; | |
609 | } | |
610 | ||
7267c094 | 611 | l1_table = g_malloc0(l1_size); |
8341f00d | 612 | ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); |
75411d23 SH |
613 | if (ret < 0) { |
614 | goto out; | |
615 | } | |
616 | ||
617 | ret = 0; /* success */ | |
618 | out: | |
7267c094 | 619 | g_free(l1_table); |
8a56fdad | 620 | blk_unref(blk); |
75411d23 SH |
621 | return ret; |
622 | } | |
623 | ||
7ab74849 | 624 | static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp) |
75411d23 SH |
625 | { |
626 | uint64_t image_size = 0; | |
627 | uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; | |
628 | uint32_t table_size = QED_DEFAULT_TABLE_SIZE; | |
7ab74849 CL |
629 | char *backing_file = NULL; |
630 | char *backing_fmt = NULL; | |
631 | int ret; | |
632 | ||
c2eb918e HT |
633 | image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), |
634 | BDRV_SECTOR_SIZE); | |
7ab74849 CL |
635 | backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); |
636 | backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); | |
637 | cluster_size = qemu_opt_get_size_del(opts, | |
638 | BLOCK_OPT_CLUSTER_SIZE, | |
639 | QED_DEFAULT_CLUSTER_SIZE); | |
640 | table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, | |
641 | QED_DEFAULT_TABLE_SIZE); | |
75411d23 SH |
642 | |
643 | if (!qed_is_cluster_size_valid(cluster_size)) { | |
5ff679b4 AG |
644 | error_setg(errp, "QED cluster size must be within range [%u, %u] " |
645 | "and power of 2", | |
646 | QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); | |
7ab74849 CL |
647 | ret = -EINVAL; |
648 | goto finish; | |
75411d23 SH |
649 | } |
650 | if (!qed_is_table_size_valid(table_size)) { | |
5ff679b4 AG |
651 | error_setg(errp, "QED table size must be within range [%u, %u] " |
652 | "and power of 2", | |
653 | QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); | |
7ab74849 CL |
654 | ret = -EINVAL; |
655 | goto finish; | |
75411d23 SH |
656 | } |
657 | if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { | |
5ff679b4 AG |
658 | error_setg(errp, "QED image size must be a non-zero multiple of " |
659 | "cluster size and less than %" PRIu64 " bytes", | |
660 | qed_max_image_size(cluster_size, table_size)); | |
7ab74849 CL |
661 | ret = -EINVAL; |
662 | goto finish; | |
75411d23 SH |
663 | } |
664 | ||
7ab74849 | 665 | ret = qed_create(filename, cluster_size, image_size, table_size, |
4ab15590 | 666 | backing_file, backing_fmt, opts, errp); |
7ab74849 CL |
667 | |
668 | finish: | |
669 | g_free(backing_file); | |
670 | g_free(backing_fmt); | |
671 | return ret; | |
75411d23 SH |
672 | } |
673 | ||
298800ca | 674 | typedef struct { |
4bc74be9 | 675 | BlockDriverState *bs; |
b7d5a5b8 | 676 | Coroutine *co; |
4bc74be9 PB |
677 | uint64_t pos; |
678 | int64_t status; | |
298800ca | 679 | int *pnum; |
53f1dfd1 | 680 | BlockDriverState **file; |
298800ca SH |
681 | } QEDIsAllocatedCB; |
682 | ||
683 | static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) | |
684 | { | |
685 | QEDIsAllocatedCB *cb = opaque; | |
4bc74be9 | 686 | BDRVQEDState *s = cb->bs->opaque; |
298800ca | 687 | *cb->pnum = len / BDRV_SECTOR_SIZE; |
4bc74be9 PB |
688 | switch (ret) { |
689 | case QED_CLUSTER_FOUND: | |
690 | offset |= qed_offset_into_cluster(s, cb->pos); | |
691 | cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; | |
53f1dfd1 | 692 | *cb->file = cb->bs->file->bs; |
4bc74be9 PB |
693 | break; |
694 | case QED_CLUSTER_ZERO: | |
695 | cb->status = BDRV_BLOCK_ZERO; | |
696 | break; | |
697 | case QED_CLUSTER_L2: | |
698 | case QED_CLUSTER_L1: | |
699 | cb->status = 0; | |
700 | break; | |
701 | default: | |
702 | assert(ret < 0); | |
703 | cb->status = ret; | |
704 | break; | |
705 | } | |
706 | ||
b7d5a5b8 | 707 | if (cb->co) { |
b9e413dd | 708 | aio_co_wake(cb->co); |
b7d5a5b8 | 709 | } |
298800ca SH |
710 | } |
711 | ||
b6b8a333 | 712 | static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, |
b7d5a5b8 | 713 | int64_t sector_num, |
67a0fd2a FZ |
714 | int nb_sectors, int *pnum, |
715 | BlockDriverState **file) | |
75411d23 | 716 | { |
298800ca | 717 | BDRVQEDState *s = bs->opaque; |
298800ca SH |
718 | size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; |
719 | QEDIsAllocatedCB cb = { | |
4bc74be9 PB |
720 | .bs = bs, |
721 | .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, | |
722 | .status = BDRV_BLOCK_OFFSET_MASK, | |
298800ca | 723 | .pnum = pnum, |
53f1dfd1 | 724 | .file = file, |
298800ca SH |
725 | }; |
726 | QEDRequest request = { .l2_table = NULL }; | |
0f21b7a1 KW |
727 | uint64_t offset; |
728 | int ret; | |
298800ca | 729 | |
0f21b7a1 KW |
730 | ret = qed_find_cluster(s, &request, cb.pos, &len, &offset); |
731 | qed_is_allocated_cb(&cb, ret, offset, len); | |
298800ca | 732 | |
0f21b7a1 KW |
733 | /* The callback was invoked immediately */ |
734 | assert(cb.status != BDRV_BLOCK_OFFSET_MASK); | |
298800ca | 735 | |
298800ca SH |
736 | qed_unref_l2_cache_entry(request.l2_table); |
737 | ||
4bc74be9 | 738 | return cb.status; |
75411d23 SH |
739 | } |
740 | ||
eabba580 SH |
741 | static BDRVQEDState *acb_to_s(QEDAIOCB *acb) |
742 | { | |
48cc565e | 743 | return acb->bs->opaque; |
eabba580 SH |
744 | } |
745 | ||
746 | /** | |
747 | * Read from the backing file or zero-fill if no backing file | |
748 | * | |
f06ee3d4 KW |
749 | * @s: QED state |
750 | * @pos: Byte position in device | |
751 | * @qiov: Destination I/O vector | |
752 | * @backing_qiov: Possibly shortened copy of qiov, to be allocated here | |
753 | * @cb: Completion function | |
754 | * @opaque: User data for completion function | |
eabba580 SH |
755 | * |
756 | * This function reads qiov->size bytes starting at pos from the backing file. | |
757 | * If there is no backing file then zeroes are read. | |
758 | */ | |
e85c5281 KW |
759 | static int qed_read_backing_file(BDRVQEDState *s, uint64_t pos, |
760 | QEMUIOVector *qiov, | |
761 | QEMUIOVector **backing_qiov) | |
eabba580 | 762 | { |
eabba580 SH |
763 | uint64_t backing_length = 0; |
764 | size_t size; | |
e85c5281 | 765 | int ret; |
eabba580 SH |
766 | |
767 | /* If there is a backing file, get its length. Treat the absence of a | |
768 | * backing file like a zero length backing file. | |
769 | */ | |
760e0063 KW |
770 | if (s->bs->backing) { |
771 | int64_t l = bdrv_getlength(s->bs->backing->bs); | |
eabba580 | 772 | if (l < 0) { |
e85c5281 | 773 | return l; |
eabba580 SH |
774 | } |
775 | backing_length = l; | |
776 | } | |
777 | ||
778 | /* Zero all sectors if reading beyond the end of the backing file */ | |
779 | if (pos >= backing_length || | |
780 | pos + qiov->size > backing_length) { | |
3d9b4925 | 781 | qemu_iovec_memset(qiov, 0, 0, qiov->size); |
eabba580 SH |
782 | } |
783 | ||
784 | /* Complete now if there are no backing file sectors to read */ | |
785 | if (pos >= backing_length) { | |
e85c5281 | 786 | return 0; |
eabba580 SH |
787 | } |
788 | ||
789 | /* If the read straddles the end of the backing file, shorten it */ | |
790 | size = MIN((uint64_t)backing_length - pos, qiov->size); | |
791 | ||
f06ee3d4 KW |
792 | assert(*backing_qiov == NULL); |
793 | *backing_qiov = g_new(QEMUIOVector, 1); | |
794 | qemu_iovec_init(*backing_qiov, qiov->niov); | |
795 | qemu_iovec_concat(*backing_qiov, qiov, 0, size); | |
796 | ||
820100fd | 797 | BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); |
e85c5281 KW |
798 | ret = bdrv_preadv(s->bs->backing, pos, *backing_qiov); |
799 | if (ret < 0) { | |
800 | return ret; | |
801 | } | |
802 | return 0; | |
eabba580 SH |
803 | } |
804 | ||
eabba580 SH |
805 | /** |
806 | * Copy data from backing file into the image | |
807 | * | |
808 | * @s: QED state | |
809 | * @pos: Byte position in device | |
810 | * @len: Number of bytes | |
811 | * @offset: Byte offset in image file | |
eabba580 | 812 | */ |
b4ac32f3 KW |
813 | static int qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, |
814 | uint64_t len, uint64_t offset) | |
eabba580 | 815 | { |
0f7aa24d KW |
816 | QEMUIOVector qiov; |
817 | QEMUIOVector *backing_qiov = NULL; | |
818 | struct iovec iov; | |
e85c5281 | 819 | int ret; |
eabba580 SH |
820 | |
821 | /* Skip copy entirely if there is no work to do */ | |
822 | if (len == 0) { | |
b4ac32f3 | 823 | return 0; |
eabba580 SH |
824 | } |
825 | ||
0f7aa24d KW |
826 | iov = (struct iovec) { |
827 | .iov_base = qemu_blockalign(s->bs, len), | |
828 | .iov_len = len, | |
829 | }; | |
830 | qemu_iovec_init_external(&qiov, &iov, 1); | |
831 | ||
832 | ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov); | |
833 | ||
834 | if (backing_qiov) { | |
835 | qemu_iovec_destroy(backing_qiov); | |
836 | g_free(backing_qiov); | |
837 | backing_qiov = NULL; | |
838 | } | |
839 | ||
840 | if (ret) { | |
841 | goto out; | |
842 | } | |
eabba580 | 843 | |
0f7aa24d KW |
844 | BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); |
845 | ret = bdrv_pwritev(s->bs->file, offset, &qiov); | |
846 | if (ret < 0) { | |
847 | goto out; | |
848 | } | |
849 | ret = 0; | |
850 | out: | |
851 | qemu_vfree(iov.iov_base); | |
b4ac32f3 | 852 | return ret; |
eabba580 SH |
853 | } |
854 | ||
855 | /** | |
856 | * Link one or more contiguous clusters into a table | |
857 | * | |
858 | * @s: QED state | |
859 | * @table: L2 table | |
860 | * @index: First cluster index | |
861 | * @n: Number of contiguous clusters | |
21df65b6 AL |
862 | * @cluster: First cluster offset |
863 | * | |
864 | * The cluster offset may be an allocated byte offset in the image file, the | |
865 | * zero cluster marker, or the unallocated cluster marker. | |
eabba580 SH |
866 | */ |
867 | static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, | |
868 | unsigned int n, uint64_t cluster) | |
869 | { | |
870 | int i; | |
871 | for (i = index; i < index + n; i++) { | |
872 | table->offsets[i] = cluster; | |
21df65b6 AL |
873 | if (!qed_offset_is_unalloc_cluster(cluster) && |
874 | !qed_offset_is_zero_cluster(cluster)) { | |
875 | cluster += s->header.cluster_size; | |
876 | } | |
eabba580 SH |
877 | } |
878 | } | |
879 | ||
48cc565e | 880 | static void qed_aio_complete(QEDAIOCB *acb) |
eabba580 | 881 | { |
1919631e | 882 | BDRVQEDState *s = acb_to_s(acb); |
eabba580 SH |
883 | |
884 | /* Free resources */ | |
885 | qemu_iovec_destroy(&acb->cur_qiov); | |
886 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
887 | ||
0e71be19 SH |
888 | /* Free the buffer we may have allocated for zero writes */ |
889 | if (acb->flags & QED_AIOCB_ZERO) { | |
890 | qemu_vfree(acb->qiov->iov[0].iov_base); | |
891 | acb->qiov->iov[0].iov_base = NULL; | |
892 | } | |
893 | ||
eabba580 SH |
894 | /* Start next allocating write request waiting behind this one. Note that |
895 | * requests enqueue themselves when they first hit an unallocated cluster | |
896 | * but they wait until the entire request is finished before waking up the | |
897 | * next request in the queue. This ensures that we don't cycle through | |
898 | * requests multiple times but rather finish one at a time completely. | |
899 | */ | |
0806c3b5 KW |
900 | if (acb == s->allocating_acb) { |
901 | s->allocating_acb = NULL; | |
902 | if (!qemu_co_queue_empty(&s->allocating_write_reqs)) { | |
903 | qemu_co_enter_next(&s->allocating_write_reqs); | |
6f321e93 SH |
904 | } else if (s->header.features & QED_F_NEED_CHECK) { |
905 | qed_start_need_check_timer(s); | |
eabba580 SH |
906 | } |
907 | } | |
908 | } | |
909 | ||
910 | /** | |
fae25ac7 | 911 | * Update L1 table with new L2 table offset and write it out |
eabba580 | 912 | */ |
fb18de21 | 913 | static int qed_aio_write_l1_update(QEDAIOCB *acb) |
eabba580 | 914 | { |
eabba580 SH |
915 | BDRVQEDState *s = acb_to_s(acb); |
916 | CachedL2Table *l2_table = acb->request.l2_table; | |
e4fc8781 | 917 | uint64_t l2_offset = l2_table->offset; |
fb18de21 | 918 | int index, ret; |
eabba580 | 919 | |
fae25ac7 KW |
920 | index = qed_l1_index(s, acb->cur_pos); |
921 | s->l1_table->offsets[index] = l2_table->offset; | |
922 | ||
923 | ret = qed_write_l1_table(s, index, 1); | |
924 | ||
925 | /* Commit the current L2 table to the cache */ | |
eabba580 SH |
926 | qed_commit_l2_cache_entry(&s->l2_cache, l2_table); |
927 | ||
928 | /* This is guaranteed to succeed because we just committed the entry to the | |
929 | * cache. | |
930 | */ | |
e4fc8781 | 931 | acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
eabba580 SH |
932 | assert(acb->request.l2_table != NULL); |
933 | ||
fb18de21 | 934 | return ret; |
eabba580 SH |
935 | } |
936 | ||
eabba580 SH |
937 | |
938 | /** | |
939 | * Update L2 table with new cluster offsets and write them out | |
940 | */ | |
88d2dd72 | 941 | static int qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset) |
eabba580 | 942 | { |
eabba580 SH |
943 | BDRVQEDState *s = acb_to_s(acb); |
944 | bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; | |
88d2dd72 | 945 | int index, ret; |
eabba580 SH |
946 | |
947 | if (need_alloc) { | |
948 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
949 | acb->request.l2_table = qed_new_l2_table(s); | |
950 | } | |
951 | ||
952 | index = qed_l2_index(s, acb->cur_pos); | |
953 | qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, | |
0e71be19 | 954 | offset); |
eabba580 SH |
955 | |
956 | if (need_alloc) { | |
957 | /* Write out the whole new L2 table */ | |
453e53e2 | 958 | ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true); |
fb18de21 | 959 | if (ret) { |
88d2dd72 | 960 | return ret; |
fb18de21 | 961 | } |
88d2dd72 | 962 | return qed_aio_write_l1_update(acb); |
eabba580 SH |
963 | } else { |
964 | /* Write out only the updated part of the L2 table */ | |
453e53e2 KW |
965 | ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, |
966 | false); | |
88d2dd72 KW |
967 | if (ret) { |
968 | return ret; | |
969 | } | |
eabba580 | 970 | } |
88d2dd72 | 971 | return 0; |
eabba580 SH |
972 | } |
973 | ||
eabba580 SH |
974 | /** |
975 | * Write data to the image file | |
976 | */ | |
eaf0bc56 | 977 | static int qed_aio_write_main(QEDAIOCB *acb) |
eabba580 | 978 | { |
eabba580 SH |
979 | BDRVQEDState *s = acb_to_s(acb); |
980 | uint64_t offset = acb->cur_cluster + | |
981 | qed_offset_into_cluster(s, acb->cur_pos); | |
eaf0bc56 | 982 | int ret; |
eabba580 | 983 | |
eaf0bc56 | 984 | trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size); |
eabba580 | 985 | |
a4d8f1ae KW |
986 | BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); |
987 | ret = bdrv_pwritev(s->bs->file, offset, &acb->cur_qiov); | |
eaf0bc56 KW |
988 | if (ret < 0) { |
989 | return ret; | |
a4d8f1ae KW |
990 | } |
991 | ||
eaf0bc56 | 992 | if (acb->find_cluster_ret != QED_CLUSTER_FOUND) { |
760e0063 | 993 | if (s->bs->backing) { |
a4d8f1ae KW |
994 | /* |
995 | * Flush new data clusters before updating the L2 table | |
996 | * | |
997 | * This flush is necessary when a backing file is in use. A crash | |
998 | * during an allocating write could result in empty clusters in the | |
999 | * image. If the write only touched a subregion of the cluster, | |
1000 | * then backing image sectors have been lost in the untouched | |
1001 | * region. The solution is to flush after writing a new data | |
1002 | * cluster and before updating the L2 table. | |
1003 | */ | |
1004 | ret = bdrv_flush(s->bs->file->bs); | |
eaf0bc56 KW |
1005 | if (ret < 0) { |
1006 | return ret; | |
1007 | } | |
88d2dd72 KW |
1008 | } |
1009 | ret = qed_aio_write_l2_update(acb, acb->cur_cluster); | |
eaf0bc56 KW |
1010 | if (ret < 0) { |
1011 | return ret; | |
88d2dd72 | 1012 | } |
eabba580 | 1013 | } |
eaf0bc56 | 1014 | return 0; |
eabba580 SH |
1015 | } |
1016 | ||
1017 | /** | |
b4ac32f3 | 1018 | * Populate untouched regions of new data cluster |
eabba580 | 1019 | */ |
a101341a | 1020 | static int qed_aio_write_cow(QEDAIOCB *acb) |
eabba580 | 1021 | { |
eabba580 | 1022 | BDRVQEDState *s = acb_to_s(acb); |
b4ac32f3 | 1023 | uint64_t start, len, offset; |
a101341a | 1024 | int ret; |
eabba580 | 1025 | |
b4ac32f3 KW |
1026 | /* Populate front untouched region of new data cluster */ |
1027 | start = qed_start_of_cluster(s, acb->cur_pos); | |
1028 | len = qed_offset_into_cluster(s, acb->cur_pos); | |
1029 | ||
1030 | trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); | |
1031 | ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster); | |
a101341a KW |
1032 | if (ret < 0) { |
1033 | return ret; | |
eabba580 SH |
1034 | } |
1035 | ||
b4ac32f3 KW |
1036 | /* Populate back untouched region of new data cluster */ |
1037 | start = acb->cur_pos + acb->cur_qiov.size; | |
1038 | len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; | |
1039 | offset = acb->cur_cluster + | |
1040 | qed_offset_into_cluster(s, acb->cur_pos) + | |
1041 | acb->cur_qiov.size; | |
eabba580 | 1042 | |
b4ac32f3 KW |
1043 | trace_qed_aio_write_postfill(s, acb, start, len, offset); |
1044 | ret = qed_copy_from_backing_file(s, start, len, offset); | |
eaf0bc56 | 1045 | if (ret < 0) { |
a101341a | 1046 | return ret; |
eaf0bc56 | 1047 | } |
a101341a KW |
1048 | |
1049 | return qed_aio_write_main(acb); | |
eabba580 SH |
1050 | } |
1051 | ||
0d09c797 SH |
1052 | /** |
1053 | * Check if the QED_F_NEED_CHECK bit should be set during allocating write | |
1054 | */ | |
1055 | static bool qed_should_set_need_check(BDRVQEDState *s) | |
1056 | { | |
1057 | /* The flush before L2 update path ensures consistency */ | |
760e0063 | 1058 | if (s->bs->backing) { |
0d09c797 SH |
1059 | return false; |
1060 | } | |
1061 | ||
1062 | return !(s->header.features & QED_F_NEED_CHECK); | |
1063 | } | |
1064 | ||
eabba580 SH |
1065 | /** |
1066 | * Write new data cluster | |
1067 | * | |
1068 | * @acb: Write request | |
1069 | * @len: Length in bytes | |
1070 | * | |
1071 | * This path is taken when writing to previously unallocated clusters. | |
1072 | */ | |
d6daddcd | 1073 | static int qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
eabba580 SH |
1074 | { |
1075 | BDRVQEDState *s = acb_to_s(acb); | |
f13d712b | 1076 | int ret; |
eabba580 | 1077 | |
6f321e93 | 1078 | /* Cancel timer when the first allocating request comes in */ |
0806c3b5 | 1079 | if (s->allocating_acb == NULL) { |
6f321e93 SH |
1080 | qed_cancel_need_check_timer(s); |
1081 | } | |
1082 | ||
eabba580 | 1083 | /* Freeze this request if another allocating write is in progress */ |
0806c3b5 KW |
1084 | if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) { |
1085 | if (s->allocating_acb != NULL) { | |
1086 | qemu_co_queue_wait(&s->allocating_write_reqs, NULL); | |
1087 | assert(s->allocating_acb == NULL); | |
1088 | } | |
1089 | s->allocating_acb = acb; | |
1090 | return -EAGAIN; /* start over with looking up table entries */ | |
eabba580 SH |
1091 | } |
1092 | ||
1093 | acb->cur_nclusters = qed_bytes_to_clusters(s, | |
1094 | qed_offset_into_cluster(s, acb->cur_pos) + len); | |
1b093c48 | 1095 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 | 1096 | |
0e71be19 SH |
1097 | if (acb->flags & QED_AIOCB_ZERO) { |
1098 | /* Skip ahead if the clusters are already zero */ | |
1099 | if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { | |
d6daddcd | 1100 | return 0; |
0e71be19 | 1101 | } |
0e71be19 | 1102 | } else { |
0e71be19 SH |
1103 | acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); |
1104 | } | |
1105 | ||
0d09c797 SH |
1106 | if (qed_should_set_need_check(s)) { |
1107 | s->header.features |= QED_F_NEED_CHECK; | |
f13d712b | 1108 | ret = qed_write_header(s); |
a101341a | 1109 | if (ret < 0) { |
d6daddcd | 1110 | return ret; |
a101341a KW |
1111 | } |
1112 | } | |
1113 | ||
1114 | if (acb->flags & QED_AIOCB_ZERO) { | |
1115 | ret = qed_aio_write_l2_update(acb, 1); | |
0d09c797 | 1116 | } else { |
a101341a | 1117 | ret = qed_aio_write_cow(acb); |
01979a98 | 1118 | } |
a101341a | 1119 | if (ret < 0) { |
d6daddcd | 1120 | return ret; |
a101341a | 1121 | } |
d6daddcd | 1122 | return 0; |
eabba580 SH |
1123 | } |
1124 | ||
1125 | /** | |
1126 | * Write data cluster in place | |
1127 | * | |
1128 | * @acb: Write request | |
1129 | * @offset: Cluster offset in bytes | |
1130 | * @len: Length in bytes | |
1131 | * | |
1132 | * This path is taken when writing to already allocated clusters. | |
1133 | */ | |
d6daddcd | 1134 | static int qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) |
eabba580 | 1135 | { |
0e71be19 SH |
1136 | /* Allocate buffer for zero writes */ |
1137 | if (acb->flags & QED_AIOCB_ZERO) { | |
1138 | struct iovec *iov = acb->qiov->iov; | |
1139 | ||
1140 | if (!iov->iov_base) { | |
48cc565e | 1141 | iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len); |
4f4896db | 1142 | if (iov->iov_base == NULL) { |
d6daddcd | 1143 | return -ENOMEM; |
4f4896db | 1144 | } |
0e71be19 SH |
1145 | memset(iov->iov_base, 0, iov->iov_len); |
1146 | } | |
1147 | } | |
1148 | ||
eabba580 SH |
1149 | /* Calculate the I/O vector */ |
1150 | acb->cur_cluster = offset; | |
1b093c48 | 1151 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 SH |
1152 | |
1153 | /* Do the actual write */ | |
d6daddcd | 1154 | return qed_aio_write_main(acb); |
eabba580 SH |
1155 | } |
1156 | ||
1157 | /** | |
1158 | * Write data cluster | |
1159 | * | |
1160 | * @opaque: Write request | |
0596be7e | 1161 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 |
eabba580 SH |
1162 | * @offset: Cluster offset in bytes |
1163 | * @len: Length in bytes | |
eabba580 | 1164 | */ |
0596be7e KW |
1165 | static int qed_aio_write_data(void *opaque, int ret, |
1166 | uint64_t offset, size_t len) | |
eabba580 SH |
1167 | { |
1168 | QEDAIOCB *acb = opaque; | |
1169 | ||
1170 | trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); | |
1171 | ||
1172 | acb->find_cluster_ret = ret; | |
1173 | ||
1174 | switch (ret) { | |
1175 | case QED_CLUSTER_FOUND: | |
0596be7e | 1176 | return qed_aio_write_inplace(acb, offset, len); |
eabba580 SH |
1177 | |
1178 | case QED_CLUSTER_L2: | |
1179 | case QED_CLUSTER_L1: | |
21df65b6 | 1180 | case QED_CLUSTER_ZERO: |
0596be7e | 1181 | return qed_aio_write_alloc(acb, len); |
eabba580 SH |
1182 | |
1183 | default: | |
0596be7e | 1184 | g_assert_not_reached(); |
d6daddcd | 1185 | } |
eabba580 SH |
1186 | } |
1187 | ||
1188 | /** | |
1189 | * Read data cluster | |
1190 | * | |
1191 | * @opaque: Read request | |
0596be7e | 1192 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 |
eabba580 SH |
1193 | * @offset: Cluster offset in bytes |
1194 | * @len: Length in bytes | |
eabba580 | 1195 | */ |
0596be7e | 1196 | static int qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len) |
eabba580 SH |
1197 | { |
1198 | QEDAIOCB *acb = opaque; | |
1199 | BDRVQEDState *s = acb_to_s(acb); | |
48cc565e | 1200 | BlockDriverState *bs = acb->bs; |
eabba580 SH |
1201 | |
1202 | /* Adjust offset into cluster */ | |
1203 | offset += qed_offset_into_cluster(s, acb->cur_pos); | |
1204 | ||
1205 | trace_qed_aio_read_data(s, acb, ret, offset, len); | |
1206 | ||
1b093c48 | 1207 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 | 1208 | |
21df65b6 AL |
1209 | /* Handle zero cluster and backing file reads */ |
1210 | if (ret == QED_CLUSTER_ZERO) { | |
3d9b4925 | 1211 | qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); |
0596be7e | 1212 | return 0; |
21df65b6 | 1213 | } else if (ret != QED_CLUSTER_FOUND) { |
0596be7e KW |
1214 | return qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1215 | &acb->backing_qiov); | |
eabba580 SH |
1216 | } |
1217 | ||
1218 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | |
3e248cdc KW |
1219 | ret = bdrv_preadv(bs->file, offset, &acb->cur_qiov); |
1220 | if (ret < 0) { | |
0596be7e | 1221 | return ret; |
3e248cdc | 1222 | } |
0596be7e | 1223 | return 0; |
eabba580 SH |
1224 | } |
1225 | ||
1226 | /** | |
1227 | * Begin next I/O or complete the request | |
1228 | */ | |
48cc565e | 1229 | static int qed_aio_next_io(QEDAIOCB *acb) |
eabba580 | 1230 | { |
eabba580 | 1231 | BDRVQEDState *s = acb_to_s(acb); |
0f21b7a1 KW |
1232 | uint64_t offset; |
1233 | size_t len; | |
dddf8db1 | 1234 | int ret; |
eabba580 | 1235 | |
01859874 KW |
1236 | while (1) { |
1237 | trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size); | |
eabba580 | 1238 | |
01859874 KW |
1239 | if (acb->backing_qiov) { |
1240 | qemu_iovec_destroy(acb->backing_qiov); | |
1241 | g_free(acb->backing_qiov); | |
1242 | acb->backing_qiov = NULL; | |
1243 | } | |
f06ee3d4 | 1244 | |
01859874 KW |
1245 | acb->qiov_offset += acb->cur_qiov.size; |
1246 | acb->cur_pos += acb->cur_qiov.size; | |
1247 | qemu_iovec_reset(&acb->cur_qiov); | |
eabba580 | 1248 | |
01859874 KW |
1249 | /* Complete request */ |
1250 | if (acb->cur_pos >= acb->end_pos) { | |
48cc565e KW |
1251 | ret = 0; |
1252 | break; | |
01859874 | 1253 | } |
eabba580 | 1254 | |
01859874 KW |
1255 | /* Find next cluster and start I/O */ |
1256 | len = acb->end_pos - acb->cur_pos; | |
1257 | ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset); | |
1258 | if (ret < 0) { | |
48cc565e | 1259 | break; |
01859874 | 1260 | } |
0596be7e | 1261 | |
01859874 KW |
1262 | if (acb->flags & QED_AIOCB_WRITE) { |
1263 | ret = qed_aio_write_data(acb, ret, offset, len); | |
1264 | } else { | |
1265 | ret = qed_aio_read_data(acb, ret, offset, len); | |
1266 | } | |
0596be7e | 1267 | |
0806c3b5 | 1268 | if (ret < 0 && ret != -EAGAIN) { |
48cc565e | 1269 | break; |
0596be7e | 1270 | } |
0596be7e | 1271 | } |
eabba580 | 1272 | |
48cc565e KW |
1273 | trace_qed_aio_complete(s, acb, ret); |
1274 | qed_aio_complete(acb); | |
1275 | return ret; | |
89f89709 KW |
1276 | } |
1277 | ||
1278 | static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num, | |
1279 | QEMUIOVector *qiov, int nb_sectors, | |
1280 | int flags) | |
1281 | { | |
48cc565e KW |
1282 | QEDAIOCB acb = { |
1283 | .bs = bs, | |
1284 | .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE, | |
1285 | .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE, | |
1286 | .qiov = qiov, | |
1287 | .flags = flags, | |
89f89709 | 1288 | }; |
48cc565e | 1289 | qemu_iovec_init(&acb.cur_qiov, qiov->niov); |
eabba580 | 1290 | |
48cc565e | 1291 | trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags); |
eabba580 SH |
1292 | |
1293 | /* Start request */ | |
48cc565e | 1294 | return qed_aio_next_io(&acb); |
75411d23 SH |
1295 | } |
1296 | ||
89f89709 KW |
1297 | static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs, |
1298 | int64_t sector_num, int nb_sectors, | |
1299 | QEMUIOVector *qiov) | |
75411d23 | 1300 | { |
89f89709 | 1301 | return qed_co_request(bs, sector_num, qiov, nb_sectors, 0); |
75411d23 SH |
1302 | } |
1303 | ||
89f89709 KW |
1304 | static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs, |
1305 | int64_t sector_num, int nb_sectors, | |
1306 | QEMUIOVector *qiov) | |
0e71be19 | 1307 | { |
89f89709 | 1308 | return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE); |
0e71be19 SH |
1309 | } |
1310 | ||
49a2e483 EB |
1311 | static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, |
1312 | int64_t offset, | |
1313 | int count, | |
1314 | BdrvRequestFlags flags) | |
0e71be19 | 1315 | { |
ef72f76e | 1316 | BDRVQEDState *s = bs->opaque; |
0e71be19 SH |
1317 | QEMUIOVector qiov; |
1318 | struct iovec iov; | |
1319 | ||
49a2e483 EB |
1320 | /* Fall back if the request is not aligned */ |
1321 | if (qed_offset_into_cluster(s, offset) || | |
1322 | qed_offset_into_cluster(s, count)) { | |
1323 | return -ENOTSUP; | |
ef72f76e SH |
1324 | } |
1325 | ||
0e71be19 SH |
1326 | /* Zero writes start without an I/O buffer. If a buffer becomes necessary |
1327 | * then it will be allocated during request processing. | |
1328 | */ | |
49a2e483 EB |
1329 | iov.iov_base = NULL; |
1330 | iov.iov_len = count; | |
0e71be19 SH |
1331 | |
1332 | qemu_iovec_init_external(&qiov, &iov, 1); | |
89f89709 KW |
1333 | return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov, |
1334 | count >> BDRV_SECTOR_BITS, | |
1335 | QED_AIOCB_WRITE | QED_AIOCB_ZERO); | |
0e71be19 SH |
1336 | } |
1337 | ||
4bff28b8 | 1338 | static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset, Error **errp) |
75411d23 | 1339 | { |
77a5a000 SH |
1340 | BDRVQEDState *s = bs->opaque; |
1341 | uint64_t old_image_size; | |
1342 | int ret; | |
1343 | ||
1344 | if (!qed_is_image_size_valid(offset, s->header.cluster_size, | |
1345 | s->header.table_size)) { | |
f59adb32 | 1346 | error_setg(errp, "Invalid image size specified"); |
77a5a000 SH |
1347 | return -EINVAL; |
1348 | } | |
1349 | ||
77a5a000 | 1350 | if ((uint64_t)offset < s->header.image_size) { |
f59adb32 | 1351 | error_setg(errp, "Shrinking images is currently not supported"); |
77a5a000 SH |
1352 | return -ENOTSUP; |
1353 | } | |
1354 | ||
1355 | old_image_size = s->header.image_size; | |
1356 | s->header.image_size = offset; | |
1357 | ret = qed_write_header_sync(s); | |
1358 | if (ret < 0) { | |
1359 | s->header.image_size = old_image_size; | |
f59adb32 | 1360 | error_setg_errno(errp, -ret, "Failed to update the image size"); |
77a5a000 SH |
1361 | } |
1362 | return ret; | |
75411d23 SH |
1363 | } |
1364 | ||
1365 | static int64_t bdrv_qed_getlength(BlockDriverState *bs) | |
1366 | { | |
1367 | BDRVQEDState *s = bs->opaque; | |
1368 | return s->header.image_size; | |
1369 | } | |
1370 | ||
1371 | static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | |
1372 | { | |
1373 | BDRVQEDState *s = bs->opaque; | |
1374 | ||
1375 | memset(bdi, 0, sizeof(*bdi)); | |
1376 | bdi->cluster_size = s->header.cluster_size; | |
d68dbee8 | 1377 | bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; |
95de6d70 PB |
1378 | bdi->unallocated_blocks_are_zero = true; |
1379 | bdi->can_write_zeroes_with_unmap = true; | |
75411d23 SH |
1380 | return 0; |
1381 | } | |
1382 | ||
1383 | static int bdrv_qed_change_backing_file(BlockDriverState *bs, | |
1384 | const char *backing_file, | |
1385 | const char *backing_fmt) | |
1386 | { | |
1387 | BDRVQEDState *s = bs->opaque; | |
1388 | QEDHeader new_header, le_header; | |
1389 | void *buffer; | |
1390 | size_t buffer_len, backing_file_len; | |
1391 | int ret; | |
1392 | ||
1393 | /* Refuse to set backing filename if unknown compat feature bits are | |
1394 | * active. If the image uses an unknown compat feature then we may not | |
1395 | * know the layout of data following the header structure and cannot safely | |
1396 | * add a new string. | |
1397 | */ | |
1398 | if (backing_file && (s->header.compat_features & | |
1399 | ~QED_COMPAT_FEATURE_MASK)) { | |
1400 | return -ENOTSUP; | |
1401 | } | |
1402 | ||
1403 | memcpy(&new_header, &s->header, sizeof(new_header)); | |
1404 | ||
1405 | new_header.features &= ~(QED_F_BACKING_FILE | | |
1406 | QED_F_BACKING_FORMAT_NO_PROBE); | |
1407 | ||
1408 | /* Adjust feature flags */ | |
1409 | if (backing_file) { | |
1410 | new_header.features |= QED_F_BACKING_FILE; | |
1411 | ||
1412 | if (qed_fmt_is_raw(backing_fmt)) { | |
1413 | new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
1414 | } | |
1415 | } | |
1416 | ||
1417 | /* Calculate new header size */ | |
1418 | backing_file_len = 0; | |
1419 | ||
1420 | if (backing_file) { | |
1421 | backing_file_len = strlen(backing_file); | |
1422 | } | |
1423 | ||
1424 | buffer_len = sizeof(new_header); | |
1425 | new_header.backing_filename_offset = buffer_len; | |
1426 | new_header.backing_filename_size = backing_file_len; | |
1427 | buffer_len += backing_file_len; | |
1428 | ||
1429 | /* Make sure we can rewrite header without failing */ | |
1430 | if (buffer_len > new_header.header_size * new_header.cluster_size) { | |
1431 | return -ENOSPC; | |
1432 | } | |
1433 | ||
1434 | /* Prepare new header */ | |
7267c094 | 1435 | buffer = g_malloc(buffer_len); |
75411d23 SH |
1436 | |
1437 | qed_header_cpu_to_le(&new_header, &le_header); | |
1438 | memcpy(buffer, &le_header, sizeof(le_header)); | |
1439 | buffer_len = sizeof(le_header); | |
1440 | ||
feba23b1 PB |
1441 | if (backing_file) { |
1442 | memcpy(buffer + buffer_len, backing_file, backing_file_len); | |
1443 | buffer_len += backing_file_len; | |
1444 | } | |
75411d23 SH |
1445 | |
1446 | /* Write new header */ | |
d9ca2ea2 | 1447 | ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); |
7267c094 | 1448 | g_free(buffer); |
75411d23 SH |
1449 | if (ret == 0) { |
1450 | memcpy(&s->header, &new_header, sizeof(new_header)); | |
1451 | } | |
1452 | return ret; | |
1453 | } | |
1454 | ||
5a8a30db | 1455 | static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp) |
c82954e5 BC |
1456 | { |
1457 | BDRVQEDState *s = bs->opaque; | |
5a8a30db KW |
1458 | Error *local_err = NULL; |
1459 | int ret; | |
c82954e5 BC |
1460 | |
1461 | bdrv_qed_close(bs); | |
3456a8d1 | 1462 | |
c82954e5 | 1463 | memset(s, 0, sizeof(BDRVQEDState)); |
4e4bf5c4 | 1464 | ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err); |
5a8a30db | 1465 | if (local_err) { |
e43bfd9c MA |
1466 | error_propagate(errp, local_err); |
1467 | error_prepend(errp, "Could not reopen qed layer: "); | |
5a8a30db KW |
1468 | return; |
1469 | } else if (ret < 0) { | |
1470 | error_setg_errno(errp, -ret, "Could not reopen qed layer"); | |
1471 | return; | |
1472 | } | |
c82954e5 BC |
1473 | } |
1474 | ||
4534ff54 KW |
1475 | static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, |
1476 | BdrvCheckMode fix) | |
75411d23 | 1477 | { |
01979a98 SH |
1478 | BDRVQEDState *s = bs->opaque; |
1479 | ||
4534ff54 | 1480 | return qed_check(s, result, !!fix); |
75411d23 SH |
1481 | } |
1482 | ||
7ab74849 CL |
1483 | static QemuOptsList qed_create_opts = { |
1484 | .name = "qed-create-opts", | |
1485 | .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), | |
1486 | .desc = { | |
1487 | { | |
1488 | .name = BLOCK_OPT_SIZE, | |
1489 | .type = QEMU_OPT_SIZE, | |
1490 | .help = "Virtual disk size" | |
1491 | }, | |
1492 | { | |
1493 | .name = BLOCK_OPT_BACKING_FILE, | |
1494 | .type = QEMU_OPT_STRING, | |
1495 | .help = "File name of a base image" | |
1496 | }, | |
1497 | { | |
1498 | .name = BLOCK_OPT_BACKING_FMT, | |
1499 | .type = QEMU_OPT_STRING, | |
1500 | .help = "Image format of the base image" | |
1501 | }, | |
1502 | { | |
1503 | .name = BLOCK_OPT_CLUSTER_SIZE, | |
1504 | .type = QEMU_OPT_SIZE, | |
1505 | .help = "Cluster size (in bytes)", | |
1506 | .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) | |
1507 | }, | |
1508 | { | |
1509 | .name = BLOCK_OPT_TABLE_SIZE, | |
1510 | .type = QEMU_OPT_SIZE, | |
1511 | .help = "L1/L2 table size (in clusters)" | |
1512 | }, | |
1513 | { /* end of list */ } | |
1514 | } | |
75411d23 SH |
1515 | }; |
1516 | ||
1517 | static BlockDriver bdrv_qed = { | |
1518 | .format_name = "qed", | |
1519 | .instance_size = sizeof(BDRVQEDState), | |
7ab74849 | 1520 | .create_opts = &qed_create_opts, |
8ee79e70 | 1521 | .supports_backing = true, |
75411d23 SH |
1522 | |
1523 | .bdrv_probe = bdrv_qed_probe, | |
1524 | .bdrv_open = bdrv_qed_open, | |
1525 | .bdrv_close = bdrv_qed_close, | |
f9cb20f1 | 1526 | .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, |
862f215f | 1527 | .bdrv_child_perm = bdrv_format_default_perms, |
c282e1fd | 1528 | .bdrv_create = bdrv_qed_create, |
3ac21627 | 1529 | .bdrv_has_zero_init = bdrv_has_zero_init_1, |
b6b8a333 | 1530 | .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, |
89f89709 KW |
1531 | .bdrv_co_readv = bdrv_qed_co_readv, |
1532 | .bdrv_co_writev = bdrv_qed_co_writev, | |
49a2e483 | 1533 | .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes, |
75411d23 SH |
1534 | .bdrv_truncate = bdrv_qed_truncate, |
1535 | .bdrv_getlength = bdrv_qed_getlength, | |
1536 | .bdrv_get_info = bdrv_qed_get_info, | |
d34682cd | 1537 | .bdrv_refresh_limits = bdrv_qed_refresh_limits, |
75411d23 | 1538 | .bdrv_change_backing_file = bdrv_qed_change_backing_file, |
c82954e5 | 1539 | .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, |
75411d23 | 1540 | .bdrv_check = bdrv_qed_check, |
a8c868c3 SH |
1541 | .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, |
1542 | .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, | |
6653a73d | 1543 | .bdrv_drain = bdrv_qed_drain, |
75411d23 SH |
1544 | }; |
1545 | ||
1546 | static void bdrv_qed_init(void) | |
1547 | { | |
1548 | bdrv_register(&bdrv_qed); | |
1549 | } | |
1550 | ||
1551 | block_init(bdrv_qed_init); |