]>
Commit | Line | Data |
---|---|---|
75411d23 SH |
1 | /* |
2 | * QEMU Enhanced Disk Format | |
3 | * | |
4 | * Copyright IBM, Corp. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
eabba580 | 15 | #include "trace.h" |
75411d23 SH |
16 | #include "qed.h" |
17 | ||
eabba580 SH |
18 | static void qed_aio_cancel(BlockDriverAIOCB *blockacb) |
19 | { | |
20 | QEDAIOCB *acb = (QEDAIOCB *)blockacb; | |
21 | bool finished = false; | |
22 | ||
23 | /* Wait for the request to finish */ | |
24 | acb->finished = &finished; | |
25 | while (!finished) { | |
26 | qemu_aio_wait(); | |
27 | } | |
28 | } | |
29 | ||
30 | static AIOPool qed_aio_pool = { | |
31 | .aiocb_size = sizeof(QEDAIOCB), | |
32 | .cancel = qed_aio_cancel, | |
33 | }; | |
34 | ||
75411d23 SH |
35 | static int bdrv_qed_probe(const uint8_t *buf, int buf_size, |
36 | const char *filename) | |
37 | { | |
38 | const QEDHeader *header = (const QEDHeader *)buf; | |
39 | ||
40 | if (buf_size < sizeof(*header)) { | |
41 | return 0; | |
42 | } | |
43 | if (le32_to_cpu(header->magic) != QED_MAGIC) { | |
44 | return 0; | |
45 | } | |
46 | return 100; | |
47 | } | |
48 | ||
49 | /** | |
50 | * Check whether an image format is raw | |
51 | * | |
52 | * @fmt: Backing file format, may be NULL | |
53 | */ | |
54 | static bool qed_fmt_is_raw(const char *fmt) | |
55 | { | |
56 | return fmt && strcmp(fmt, "raw") == 0; | |
57 | } | |
58 | ||
59 | static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) | |
60 | { | |
61 | cpu->magic = le32_to_cpu(le->magic); | |
62 | cpu->cluster_size = le32_to_cpu(le->cluster_size); | |
63 | cpu->table_size = le32_to_cpu(le->table_size); | |
64 | cpu->header_size = le32_to_cpu(le->header_size); | |
65 | cpu->features = le64_to_cpu(le->features); | |
66 | cpu->compat_features = le64_to_cpu(le->compat_features); | |
67 | cpu->autoclear_features = le64_to_cpu(le->autoclear_features); | |
68 | cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); | |
69 | cpu->image_size = le64_to_cpu(le->image_size); | |
70 | cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); | |
71 | cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); | |
72 | } | |
73 | ||
74 | static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) | |
75 | { | |
76 | le->magic = cpu_to_le32(cpu->magic); | |
77 | le->cluster_size = cpu_to_le32(cpu->cluster_size); | |
78 | le->table_size = cpu_to_le32(cpu->table_size); | |
79 | le->header_size = cpu_to_le32(cpu->header_size); | |
80 | le->features = cpu_to_le64(cpu->features); | |
81 | le->compat_features = cpu_to_le64(cpu->compat_features); | |
82 | le->autoclear_features = cpu_to_le64(cpu->autoclear_features); | |
83 | le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); | |
84 | le->image_size = cpu_to_le64(cpu->image_size); | |
85 | le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); | |
86 | le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); | |
87 | } | |
88 | ||
89 | static int qed_write_header_sync(BDRVQEDState *s) | |
90 | { | |
91 | QEDHeader le; | |
92 | int ret; | |
93 | ||
94 | qed_header_cpu_to_le(&s->header, &le); | |
95 | ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); | |
96 | if (ret != sizeof(le)) { | |
97 | return ret; | |
98 | } | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) | |
103 | { | |
104 | uint64_t table_entries; | |
105 | uint64_t l2_size; | |
106 | ||
107 | table_entries = (table_size * cluster_size) / sizeof(uint64_t); | |
108 | l2_size = table_entries * cluster_size; | |
109 | ||
110 | return l2_size * table_entries; | |
111 | } | |
112 | ||
113 | static bool qed_is_cluster_size_valid(uint32_t cluster_size) | |
114 | { | |
115 | if (cluster_size < QED_MIN_CLUSTER_SIZE || | |
116 | cluster_size > QED_MAX_CLUSTER_SIZE) { | |
117 | return false; | |
118 | } | |
119 | if (cluster_size & (cluster_size - 1)) { | |
120 | return false; /* not power of 2 */ | |
121 | } | |
122 | return true; | |
123 | } | |
124 | ||
125 | static bool qed_is_table_size_valid(uint32_t table_size) | |
126 | { | |
127 | if (table_size < QED_MIN_TABLE_SIZE || | |
128 | table_size > QED_MAX_TABLE_SIZE) { | |
129 | return false; | |
130 | } | |
131 | if (table_size & (table_size - 1)) { | |
132 | return false; /* not power of 2 */ | |
133 | } | |
134 | return true; | |
135 | } | |
136 | ||
137 | static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, | |
138 | uint32_t table_size) | |
139 | { | |
140 | if (image_size % BDRV_SECTOR_SIZE != 0) { | |
141 | return false; /* not multiple of sector size */ | |
142 | } | |
143 | if (image_size > qed_max_image_size(cluster_size, table_size)) { | |
144 | return false; /* image is too large */ | |
145 | } | |
146 | return true; | |
147 | } | |
148 | ||
149 | /** | |
150 | * Read a string of known length from the image file | |
151 | * | |
152 | * @file: Image file | |
153 | * @offset: File offset to start of string, in bytes | |
154 | * @n: String length in bytes | |
155 | * @buf: Destination buffer | |
156 | * @buflen: Destination buffer length in bytes | |
157 | * @ret: 0 on success, -errno on failure | |
158 | * | |
159 | * The string is NUL-terminated. | |
160 | */ | |
161 | static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, | |
162 | char *buf, size_t buflen) | |
163 | { | |
164 | int ret; | |
165 | if (n >= buflen) { | |
166 | return -EINVAL; | |
167 | } | |
168 | ret = bdrv_pread(file, offset, buf, n); | |
169 | if (ret < 0) { | |
170 | return ret; | |
171 | } | |
172 | buf[n] = '\0'; | |
173 | return 0; | |
174 | } | |
175 | ||
eabba580 SH |
176 | /** |
177 | * Allocate new clusters | |
178 | * | |
179 | * @s: QED state | |
180 | * @n: Number of contiguous clusters to allocate | |
181 | * @ret: Offset of first allocated cluster | |
182 | * | |
183 | * This function only produces the offset where the new clusters should be | |
184 | * written. It updates BDRVQEDState but does not make any changes to the image | |
185 | * file. | |
186 | */ | |
187 | static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) | |
188 | { | |
189 | uint64_t offset = s->file_size; | |
190 | s->file_size += n * s->header.cluster_size; | |
191 | return offset; | |
192 | } | |
193 | ||
298800ca SH |
194 | QEDTable *qed_alloc_table(BDRVQEDState *s) |
195 | { | |
196 | /* Honor O_DIRECT memory alignment requirements */ | |
197 | return qemu_blockalign(s->bs, | |
198 | s->header.cluster_size * s->header.table_size); | |
199 | } | |
200 | ||
eabba580 SH |
201 | /** |
202 | * Allocate a new zeroed L2 table | |
203 | */ | |
204 | static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) | |
205 | { | |
206 | CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); | |
207 | ||
208 | l2_table->table = qed_alloc_table(s); | |
209 | l2_table->offset = qed_alloc_clusters(s, s->header.table_size); | |
210 | ||
211 | memset(l2_table->table->offsets, 0, | |
212 | s->header.cluster_size * s->header.table_size); | |
213 | return l2_table; | |
214 | } | |
215 | ||
216 | static void qed_aio_next_io(void *opaque, int ret); | |
217 | ||
75411d23 SH |
218 | static int bdrv_qed_open(BlockDriverState *bs, int flags) |
219 | { | |
220 | BDRVQEDState *s = bs->opaque; | |
221 | QEDHeader le_header; | |
222 | int64_t file_size; | |
223 | int ret; | |
224 | ||
225 | s->bs = bs; | |
eabba580 | 226 | QSIMPLEQ_INIT(&s->allocating_write_reqs); |
75411d23 SH |
227 | |
228 | ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); | |
229 | if (ret < 0) { | |
230 | return ret; | |
231 | } | |
232 | ret = 0; /* ret should always be 0 or -errno */ | |
233 | qed_header_le_to_cpu(&le_header, &s->header); | |
234 | ||
235 | if (s->header.magic != QED_MAGIC) { | |
236 | return -EINVAL; | |
237 | } | |
238 | if (s->header.features & ~QED_FEATURE_MASK) { | |
239 | return -ENOTSUP; /* image uses unsupported feature bits */ | |
240 | } | |
241 | if (!qed_is_cluster_size_valid(s->header.cluster_size)) { | |
242 | return -EINVAL; | |
243 | } | |
244 | ||
245 | /* Round down file size to the last cluster */ | |
246 | file_size = bdrv_getlength(bs->file); | |
247 | if (file_size < 0) { | |
248 | return file_size; | |
249 | } | |
250 | s->file_size = qed_start_of_cluster(s, file_size); | |
251 | ||
252 | if (!qed_is_table_size_valid(s->header.table_size)) { | |
253 | return -EINVAL; | |
254 | } | |
255 | if (!qed_is_image_size_valid(s->header.image_size, | |
256 | s->header.cluster_size, | |
257 | s->header.table_size)) { | |
258 | return -EINVAL; | |
259 | } | |
260 | if (!qed_check_table_offset(s, s->header.l1_table_offset)) { | |
261 | return -EINVAL; | |
262 | } | |
263 | ||
264 | s->table_nelems = (s->header.cluster_size * s->header.table_size) / | |
265 | sizeof(uint64_t); | |
266 | s->l2_shift = ffs(s->header.cluster_size) - 1; | |
267 | s->l2_mask = s->table_nelems - 1; | |
268 | s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; | |
269 | ||
270 | if ((s->header.features & QED_F_BACKING_FILE)) { | |
271 | if ((uint64_t)s->header.backing_filename_offset + | |
272 | s->header.backing_filename_size > | |
273 | s->header.cluster_size * s->header.header_size) { | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | ret = qed_read_string(bs->file, s->header.backing_filename_offset, | |
278 | s->header.backing_filename_size, bs->backing_file, | |
279 | sizeof(bs->backing_file)); | |
280 | if (ret < 0) { | |
281 | return ret; | |
282 | } | |
283 | ||
284 | if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { | |
285 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); | |
286 | } | |
287 | } | |
288 | ||
289 | /* Reset unknown autoclear feature bits. This is a backwards | |
290 | * compatibility mechanism that allows images to be opened by older | |
291 | * programs, which "knock out" unknown feature bits. When an image is | |
292 | * opened by a newer program again it can detect that the autoclear | |
293 | * feature is no longer valid. | |
294 | */ | |
295 | if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && | |
296 | !bdrv_is_read_only(bs->file)) { | |
297 | s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; | |
298 | ||
299 | ret = qed_write_header_sync(s); | |
300 | if (ret) { | |
301 | return ret; | |
302 | } | |
303 | ||
304 | /* From here on only known autoclear feature bits are valid */ | |
305 | bdrv_flush(bs->file); | |
306 | } | |
307 | ||
298800ca SH |
308 | s->l1_table = qed_alloc_table(s); |
309 | qed_init_l2_cache(&s->l2_cache); | |
310 | ||
311 | ret = qed_read_l1_table_sync(s); | |
312 | if (ret) { | |
313 | qed_free_l2_cache(&s->l2_cache); | |
314 | qemu_vfree(s->l1_table); | |
315 | } | |
75411d23 SH |
316 | return ret; |
317 | } | |
318 | ||
319 | static void bdrv_qed_close(BlockDriverState *bs) | |
320 | { | |
298800ca SH |
321 | BDRVQEDState *s = bs->opaque; |
322 | ||
323 | qed_free_l2_cache(&s->l2_cache); | |
324 | qemu_vfree(s->l1_table); | |
75411d23 SH |
325 | } |
326 | ||
327 | static int bdrv_qed_flush(BlockDriverState *bs) | |
328 | { | |
329 | return bdrv_flush(bs->file); | |
330 | } | |
331 | ||
332 | static int qed_create(const char *filename, uint32_t cluster_size, | |
333 | uint64_t image_size, uint32_t table_size, | |
334 | const char *backing_file, const char *backing_fmt) | |
335 | { | |
336 | QEDHeader header = { | |
337 | .magic = QED_MAGIC, | |
338 | .cluster_size = cluster_size, | |
339 | .table_size = table_size, | |
340 | .header_size = 1, | |
341 | .features = 0, | |
342 | .compat_features = 0, | |
343 | .l1_table_offset = cluster_size, | |
344 | .image_size = image_size, | |
345 | }; | |
346 | QEDHeader le_header; | |
347 | uint8_t *l1_table = NULL; | |
348 | size_t l1_size = header.cluster_size * header.table_size; | |
349 | int ret = 0; | |
350 | BlockDriverState *bs = NULL; | |
351 | ||
352 | ret = bdrv_create_file(filename, NULL); | |
353 | if (ret < 0) { | |
354 | return ret; | |
355 | } | |
356 | ||
357 | ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB); | |
358 | if (ret < 0) { | |
359 | return ret; | |
360 | } | |
361 | ||
362 | if (backing_file) { | |
363 | header.features |= QED_F_BACKING_FILE; | |
364 | header.backing_filename_offset = sizeof(le_header); | |
365 | header.backing_filename_size = strlen(backing_file); | |
366 | ||
367 | if (qed_fmt_is_raw(backing_fmt)) { | |
368 | header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
369 | } | |
370 | } | |
371 | ||
372 | qed_header_cpu_to_le(&header, &le_header); | |
373 | ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); | |
374 | if (ret < 0) { | |
375 | goto out; | |
376 | } | |
377 | ret = bdrv_pwrite(bs, sizeof(le_header), backing_file, | |
378 | header.backing_filename_size); | |
379 | if (ret < 0) { | |
380 | goto out; | |
381 | } | |
382 | ||
383 | l1_table = qemu_mallocz(l1_size); | |
384 | ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); | |
385 | if (ret < 0) { | |
386 | goto out; | |
387 | } | |
388 | ||
389 | ret = 0; /* success */ | |
390 | out: | |
391 | qemu_free(l1_table); | |
392 | bdrv_delete(bs); | |
393 | return ret; | |
394 | } | |
395 | ||
396 | static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options) | |
397 | { | |
398 | uint64_t image_size = 0; | |
399 | uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; | |
400 | uint32_t table_size = QED_DEFAULT_TABLE_SIZE; | |
401 | const char *backing_file = NULL; | |
402 | const char *backing_fmt = NULL; | |
403 | ||
404 | while (options && options->name) { | |
405 | if (!strcmp(options->name, BLOCK_OPT_SIZE)) { | |
406 | image_size = options->value.n; | |
407 | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { | |
408 | backing_file = options->value.s; | |
409 | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { | |
410 | backing_fmt = options->value.s; | |
411 | } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { | |
412 | if (options->value.n) { | |
413 | cluster_size = options->value.n; | |
414 | } | |
415 | } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { | |
416 | if (options->value.n) { | |
417 | table_size = options->value.n; | |
418 | } | |
419 | } | |
420 | options++; | |
421 | } | |
422 | ||
423 | if (!qed_is_cluster_size_valid(cluster_size)) { | |
424 | fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n", | |
425 | QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); | |
426 | return -EINVAL; | |
427 | } | |
428 | if (!qed_is_table_size_valid(table_size)) { | |
429 | fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n", | |
430 | QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); | |
431 | return -EINVAL; | |
432 | } | |
433 | if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { | |
434 | fprintf(stderr, "QED image size must be a non-zero multiple of " | |
435 | "cluster size and less than %" PRIu64 " bytes\n", | |
436 | qed_max_image_size(cluster_size, table_size)); | |
437 | return -EINVAL; | |
438 | } | |
439 | ||
440 | return qed_create(filename, cluster_size, image_size, table_size, | |
441 | backing_file, backing_fmt); | |
442 | } | |
443 | ||
298800ca SH |
444 | typedef struct { |
445 | int is_allocated; | |
446 | int *pnum; | |
447 | } QEDIsAllocatedCB; | |
448 | ||
449 | static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) | |
450 | { | |
451 | QEDIsAllocatedCB *cb = opaque; | |
452 | *cb->pnum = len / BDRV_SECTOR_SIZE; | |
453 | cb->is_allocated = ret == QED_CLUSTER_FOUND; | |
454 | } | |
455 | ||
75411d23 SH |
456 | static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num, |
457 | int nb_sectors, int *pnum) | |
458 | { | |
298800ca SH |
459 | BDRVQEDState *s = bs->opaque; |
460 | uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; | |
461 | size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; | |
462 | QEDIsAllocatedCB cb = { | |
463 | .is_allocated = -1, | |
464 | .pnum = pnum, | |
465 | }; | |
466 | QEDRequest request = { .l2_table = NULL }; | |
467 | ||
468 | async_context_push(); | |
469 | ||
470 | qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb); | |
471 | ||
472 | while (cb.is_allocated == -1) { | |
473 | qemu_aio_wait(); | |
474 | } | |
475 | ||
476 | async_context_pop(); | |
477 | ||
478 | qed_unref_l2_cache_entry(request.l2_table); | |
479 | ||
480 | return cb.is_allocated; | |
75411d23 SH |
481 | } |
482 | ||
483 | static int bdrv_qed_make_empty(BlockDriverState *bs) | |
484 | { | |
485 | return -ENOTSUP; | |
486 | } | |
487 | ||
eabba580 SH |
488 | static BDRVQEDState *acb_to_s(QEDAIOCB *acb) |
489 | { | |
490 | return acb->common.bs->opaque; | |
491 | } | |
492 | ||
493 | /** | |
494 | * Read from the backing file or zero-fill if no backing file | |
495 | * | |
496 | * @s: QED state | |
497 | * @pos: Byte position in device | |
498 | * @qiov: Destination I/O vector | |
499 | * @cb: Completion function | |
500 | * @opaque: User data for completion function | |
501 | * | |
502 | * This function reads qiov->size bytes starting at pos from the backing file. | |
503 | * If there is no backing file then zeroes are read. | |
504 | */ | |
505 | static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, | |
506 | QEMUIOVector *qiov, | |
507 | BlockDriverCompletionFunc *cb, void *opaque) | |
508 | { | |
509 | BlockDriverAIOCB *aiocb; | |
510 | uint64_t backing_length = 0; | |
511 | size_t size; | |
512 | ||
513 | /* If there is a backing file, get its length. Treat the absence of a | |
514 | * backing file like a zero length backing file. | |
515 | */ | |
516 | if (s->bs->backing_hd) { | |
517 | int64_t l = bdrv_getlength(s->bs->backing_hd); | |
518 | if (l < 0) { | |
519 | cb(opaque, l); | |
520 | return; | |
521 | } | |
522 | backing_length = l; | |
523 | } | |
524 | ||
525 | /* Zero all sectors if reading beyond the end of the backing file */ | |
526 | if (pos >= backing_length || | |
527 | pos + qiov->size > backing_length) { | |
528 | qemu_iovec_memset(qiov, 0, qiov->size); | |
529 | } | |
530 | ||
531 | /* Complete now if there are no backing file sectors to read */ | |
532 | if (pos >= backing_length) { | |
533 | cb(opaque, 0); | |
534 | return; | |
535 | } | |
536 | ||
537 | /* If the read straddles the end of the backing file, shorten it */ | |
538 | size = MIN((uint64_t)backing_length - pos, qiov->size); | |
539 | ||
540 | BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING); | |
541 | aiocb = bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, | |
542 | qiov, size / BDRV_SECTOR_SIZE, cb, opaque); | |
543 | if (!aiocb) { | |
544 | cb(opaque, -EIO); | |
545 | } | |
546 | } | |
547 | ||
548 | typedef struct { | |
549 | GenericCB gencb; | |
550 | BDRVQEDState *s; | |
551 | QEMUIOVector qiov; | |
552 | struct iovec iov; | |
553 | uint64_t offset; | |
554 | } CopyFromBackingFileCB; | |
555 | ||
556 | static void qed_copy_from_backing_file_cb(void *opaque, int ret) | |
557 | { | |
558 | CopyFromBackingFileCB *copy_cb = opaque; | |
559 | qemu_vfree(copy_cb->iov.iov_base); | |
560 | gencb_complete(©_cb->gencb, ret); | |
561 | } | |
562 | ||
563 | static void qed_copy_from_backing_file_write(void *opaque, int ret) | |
564 | { | |
565 | CopyFromBackingFileCB *copy_cb = opaque; | |
566 | BDRVQEDState *s = copy_cb->s; | |
567 | BlockDriverAIOCB *aiocb; | |
568 | ||
569 | if (ret) { | |
570 | qed_copy_from_backing_file_cb(copy_cb, ret); | |
571 | return; | |
572 | } | |
573 | ||
574 | BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); | |
575 | aiocb = bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, | |
576 | ©_cb->qiov, | |
577 | copy_cb->qiov.size / BDRV_SECTOR_SIZE, | |
578 | qed_copy_from_backing_file_cb, copy_cb); | |
579 | if (!aiocb) { | |
580 | qed_copy_from_backing_file_cb(copy_cb, -EIO); | |
581 | } | |
582 | } | |
583 | ||
584 | /** | |
585 | * Copy data from backing file into the image | |
586 | * | |
587 | * @s: QED state | |
588 | * @pos: Byte position in device | |
589 | * @len: Number of bytes | |
590 | * @offset: Byte offset in image file | |
591 | * @cb: Completion function | |
592 | * @opaque: User data for completion function | |
593 | */ | |
594 | static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, | |
595 | uint64_t len, uint64_t offset, | |
596 | BlockDriverCompletionFunc *cb, | |
597 | void *opaque) | |
598 | { | |
599 | CopyFromBackingFileCB *copy_cb; | |
600 | ||
601 | /* Skip copy entirely if there is no work to do */ | |
602 | if (len == 0) { | |
603 | cb(opaque, 0); | |
604 | return; | |
605 | } | |
606 | ||
607 | copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); | |
608 | copy_cb->s = s; | |
609 | copy_cb->offset = offset; | |
610 | copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); | |
611 | copy_cb->iov.iov_len = len; | |
612 | qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); | |
613 | ||
614 | qed_read_backing_file(s, pos, ©_cb->qiov, | |
615 | qed_copy_from_backing_file_write, copy_cb); | |
616 | } | |
617 | ||
618 | /** | |
619 | * Link one or more contiguous clusters into a table | |
620 | * | |
621 | * @s: QED state | |
622 | * @table: L2 table | |
623 | * @index: First cluster index | |
624 | * @n: Number of contiguous clusters | |
625 | * @cluster: First cluster byte offset in image file | |
626 | */ | |
627 | static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, | |
628 | unsigned int n, uint64_t cluster) | |
629 | { | |
630 | int i; | |
631 | for (i = index; i < index + n; i++) { | |
632 | table->offsets[i] = cluster; | |
633 | cluster += s->header.cluster_size; | |
634 | } | |
635 | } | |
636 | ||
637 | static void qed_aio_complete_bh(void *opaque) | |
638 | { | |
639 | QEDAIOCB *acb = opaque; | |
640 | BlockDriverCompletionFunc *cb = acb->common.cb; | |
641 | void *user_opaque = acb->common.opaque; | |
642 | int ret = acb->bh_ret; | |
643 | bool *finished = acb->finished; | |
644 | ||
645 | qemu_bh_delete(acb->bh); | |
646 | qemu_aio_release(acb); | |
647 | ||
648 | /* Invoke callback */ | |
649 | cb(user_opaque, ret); | |
650 | ||
651 | /* Signal cancel completion */ | |
652 | if (finished) { | |
653 | *finished = true; | |
654 | } | |
655 | } | |
656 | ||
657 | static void qed_aio_complete(QEDAIOCB *acb, int ret) | |
658 | { | |
659 | BDRVQEDState *s = acb_to_s(acb); | |
660 | ||
661 | trace_qed_aio_complete(s, acb, ret); | |
662 | ||
663 | /* Free resources */ | |
664 | qemu_iovec_destroy(&acb->cur_qiov); | |
665 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
666 | ||
667 | /* Arrange for a bh to invoke the completion function */ | |
668 | acb->bh_ret = ret; | |
669 | acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); | |
670 | qemu_bh_schedule(acb->bh); | |
671 | ||
672 | /* Start next allocating write request waiting behind this one. Note that | |
673 | * requests enqueue themselves when they first hit an unallocated cluster | |
674 | * but they wait until the entire request is finished before waking up the | |
675 | * next request in the queue. This ensures that we don't cycle through | |
676 | * requests multiple times but rather finish one at a time completely. | |
677 | */ | |
678 | if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { | |
679 | QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); | |
680 | acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); | |
681 | if (acb) { | |
682 | qed_aio_next_io(acb, 0); | |
683 | } | |
684 | } | |
685 | } | |
686 | ||
687 | /** | |
688 | * Commit the current L2 table to the cache | |
689 | */ | |
690 | static void qed_commit_l2_update(void *opaque, int ret) | |
691 | { | |
692 | QEDAIOCB *acb = opaque; | |
693 | BDRVQEDState *s = acb_to_s(acb); | |
694 | CachedL2Table *l2_table = acb->request.l2_table; | |
695 | ||
696 | qed_commit_l2_cache_entry(&s->l2_cache, l2_table); | |
697 | ||
698 | /* This is guaranteed to succeed because we just committed the entry to the | |
699 | * cache. | |
700 | */ | |
701 | acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, | |
702 | l2_table->offset); | |
703 | assert(acb->request.l2_table != NULL); | |
704 | ||
705 | qed_aio_next_io(opaque, ret); | |
706 | } | |
707 | ||
708 | /** | |
709 | * Update L1 table with new L2 table offset and write it out | |
710 | */ | |
711 | static void qed_aio_write_l1_update(void *opaque, int ret) | |
712 | { | |
713 | QEDAIOCB *acb = opaque; | |
714 | BDRVQEDState *s = acb_to_s(acb); | |
715 | int index; | |
716 | ||
717 | if (ret) { | |
718 | qed_aio_complete(acb, ret); | |
719 | return; | |
720 | } | |
721 | ||
722 | index = qed_l1_index(s, acb->cur_pos); | |
723 | s->l1_table->offsets[index] = acb->request.l2_table->offset; | |
724 | ||
725 | qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); | |
726 | } | |
727 | ||
728 | /** | |
729 | * Update L2 table with new cluster offsets and write them out | |
730 | */ | |
731 | static void qed_aio_write_l2_update(void *opaque, int ret) | |
732 | { | |
733 | QEDAIOCB *acb = opaque; | |
734 | BDRVQEDState *s = acb_to_s(acb); | |
735 | bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; | |
736 | int index; | |
737 | ||
738 | if (ret) { | |
739 | goto err; | |
740 | } | |
741 | ||
742 | if (need_alloc) { | |
743 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
744 | acb->request.l2_table = qed_new_l2_table(s); | |
745 | } | |
746 | ||
747 | index = qed_l2_index(s, acb->cur_pos); | |
748 | qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, | |
749 | acb->cur_cluster); | |
750 | ||
751 | if (need_alloc) { | |
752 | /* Write out the whole new L2 table */ | |
753 | qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, | |
754 | qed_aio_write_l1_update, acb); | |
755 | } else { | |
756 | /* Write out only the updated part of the L2 table */ | |
757 | qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, | |
758 | qed_aio_next_io, acb); | |
759 | } | |
760 | return; | |
761 | ||
762 | err: | |
763 | qed_aio_complete(acb, ret); | |
764 | } | |
765 | ||
766 | /** | |
767 | * Flush new data clusters before updating the L2 table | |
768 | * | |
769 | * This flush is necessary when a backing file is in use. A crash during an | |
770 | * allocating write could result in empty clusters in the image. If the write | |
771 | * only touched a subregion of the cluster, then backing image sectors have | |
772 | * been lost in the untouched region. The solution is to flush after writing a | |
773 | * new data cluster and before updating the L2 table. | |
774 | */ | |
775 | static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) | |
776 | { | |
777 | QEDAIOCB *acb = opaque; | |
778 | BDRVQEDState *s = acb_to_s(acb); | |
779 | ||
780 | if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update, opaque)) { | |
781 | qed_aio_complete(acb, -EIO); | |
782 | } | |
783 | } | |
784 | ||
785 | /** | |
786 | * Write data to the image file | |
787 | */ | |
788 | static void qed_aio_write_main(void *opaque, int ret) | |
789 | { | |
790 | QEDAIOCB *acb = opaque; | |
791 | BDRVQEDState *s = acb_to_s(acb); | |
792 | uint64_t offset = acb->cur_cluster + | |
793 | qed_offset_into_cluster(s, acb->cur_pos); | |
794 | BlockDriverCompletionFunc *next_fn; | |
795 | BlockDriverAIOCB *file_acb; | |
796 | ||
797 | trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); | |
798 | ||
799 | if (ret) { | |
800 | qed_aio_complete(acb, ret); | |
801 | return; | |
802 | } | |
803 | ||
804 | if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { | |
805 | next_fn = qed_aio_next_io; | |
806 | } else { | |
807 | if (s->bs->backing_hd) { | |
808 | next_fn = qed_aio_write_flush_before_l2_update; | |
809 | } else { | |
810 | next_fn = qed_aio_write_l2_update; | |
811 | } | |
812 | } | |
813 | ||
814 | BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); | |
815 | file_acb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, | |
816 | &acb->cur_qiov, | |
817 | acb->cur_qiov.size / BDRV_SECTOR_SIZE, | |
818 | next_fn, acb); | |
819 | if (!file_acb) { | |
820 | qed_aio_complete(acb, -EIO); | |
821 | } | |
822 | } | |
823 | ||
824 | /** | |
825 | * Populate back untouched region of new data cluster | |
826 | */ | |
827 | static void qed_aio_write_postfill(void *opaque, int ret) | |
828 | { | |
829 | QEDAIOCB *acb = opaque; | |
830 | BDRVQEDState *s = acb_to_s(acb); | |
831 | uint64_t start = acb->cur_pos + acb->cur_qiov.size; | |
832 | uint64_t len = | |
833 | qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; | |
834 | uint64_t offset = acb->cur_cluster + | |
835 | qed_offset_into_cluster(s, acb->cur_pos) + | |
836 | acb->cur_qiov.size; | |
837 | ||
838 | if (ret) { | |
839 | qed_aio_complete(acb, ret); | |
840 | return; | |
841 | } | |
842 | ||
843 | trace_qed_aio_write_postfill(s, acb, start, len, offset); | |
844 | qed_copy_from_backing_file(s, start, len, offset, | |
845 | qed_aio_write_main, acb); | |
846 | } | |
847 | ||
848 | /** | |
849 | * Populate front untouched region of new data cluster | |
850 | */ | |
851 | static void qed_aio_write_prefill(void *opaque, int ret) | |
852 | { | |
853 | QEDAIOCB *acb = opaque; | |
854 | BDRVQEDState *s = acb_to_s(acb); | |
855 | uint64_t start = qed_start_of_cluster(s, acb->cur_pos); | |
856 | uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); | |
857 | ||
858 | trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); | |
859 | qed_copy_from_backing_file(s, start, len, acb->cur_cluster, | |
860 | qed_aio_write_postfill, acb); | |
861 | } | |
862 | ||
863 | /** | |
864 | * Write new data cluster | |
865 | * | |
866 | * @acb: Write request | |
867 | * @len: Length in bytes | |
868 | * | |
869 | * This path is taken when writing to previously unallocated clusters. | |
870 | */ | |
871 | static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) | |
872 | { | |
873 | BDRVQEDState *s = acb_to_s(acb); | |
874 | ||
875 | /* Freeze this request if another allocating write is in progress */ | |
876 | if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { | |
877 | QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); | |
878 | } | |
879 | if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { | |
880 | return; /* wait for existing request to finish */ | |
881 | } | |
882 | ||
883 | acb->cur_nclusters = qed_bytes_to_clusters(s, | |
884 | qed_offset_into_cluster(s, acb->cur_pos) + len); | |
885 | acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); | |
886 | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); | |
887 | ||
888 | /* Write new cluster */ | |
889 | qed_aio_write_prefill(acb, 0); | |
890 | } | |
891 | ||
892 | /** | |
893 | * Write data cluster in place | |
894 | * | |
895 | * @acb: Write request | |
896 | * @offset: Cluster offset in bytes | |
897 | * @len: Length in bytes | |
898 | * | |
899 | * This path is taken when writing to already allocated clusters. | |
900 | */ | |
901 | static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) | |
902 | { | |
903 | /* Calculate the I/O vector */ | |
904 | acb->cur_cluster = offset; | |
905 | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); | |
906 | ||
907 | /* Do the actual write */ | |
908 | qed_aio_write_main(acb, 0); | |
909 | } | |
910 | ||
911 | /** | |
912 | * Write data cluster | |
913 | * | |
914 | * @opaque: Write request | |
915 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, | |
916 | * or -errno | |
917 | * @offset: Cluster offset in bytes | |
918 | * @len: Length in bytes | |
919 | * | |
920 | * Callback from qed_find_cluster(). | |
921 | */ | |
922 | static void qed_aio_write_data(void *opaque, int ret, | |
923 | uint64_t offset, size_t len) | |
924 | { | |
925 | QEDAIOCB *acb = opaque; | |
926 | ||
927 | trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); | |
928 | ||
929 | acb->find_cluster_ret = ret; | |
930 | ||
931 | switch (ret) { | |
932 | case QED_CLUSTER_FOUND: | |
933 | qed_aio_write_inplace(acb, offset, len); | |
934 | break; | |
935 | ||
936 | case QED_CLUSTER_L2: | |
937 | case QED_CLUSTER_L1: | |
938 | qed_aio_write_alloc(acb, len); | |
939 | break; | |
940 | ||
941 | default: | |
942 | qed_aio_complete(acb, ret); | |
943 | break; | |
944 | } | |
945 | } | |
946 | ||
947 | /** | |
948 | * Read data cluster | |
949 | * | |
950 | * @opaque: Read request | |
951 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, | |
952 | * or -errno | |
953 | * @offset: Cluster offset in bytes | |
954 | * @len: Length in bytes | |
955 | * | |
956 | * Callback from qed_find_cluster(). | |
957 | */ | |
958 | static void qed_aio_read_data(void *opaque, int ret, | |
959 | uint64_t offset, size_t len) | |
960 | { | |
961 | QEDAIOCB *acb = opaque; | |
962 | BDRVQEDState *s = acb_to_s(acb); | |
963 | BlockDriverState *bs = acb->common.bs; | |
964 | BlockDriverAIOCB *file_acb; | |
965 | ||
966 | /* Adjust offset into cluster */ | |
967 | offset += qed_offset_into_cluster(s, acb->cur_pos); | |
968 | ||
969 | trace_qed_aio_read_data(s, acb, ret, offset, len); | |
970 | ||
971 | if (ret < 0) { | |
972 | goto err; | |
973 | } | |
974 | ||
975 | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); | |
976 | ||
977 | /* Handle backing file and unallocated sparse hole reads */ | |
978 | if (ret != QED_CLUSTER_FOUND) { | |
979 | qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, | |
980 | qed_aio_next_io, acb); | |
981 | return; | |
982 | } | |
983 | ||
984 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | |
985 | file_acb = bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, | |
986 | &acb->cur_qiov, | |
987 | acb->cur_qiov.size / BDRV_SECTOR_SIZE, | |
988 | qed_aio_next_io, acb); | |
989 | if (!file_acb) { | |
990 | ret = -EIO; | |
991 | goto err; | |
992 | } | |
993 | return; | |
994 | ||
995 | err: | |
996 | qed_aio_complete(acb, ret); | |
997 | } | |
998 | ||
999 | /** | |
1000 | * Begin next I/O or complete the request | |
1001 | */ | |
1002 | static void qed_aio_next_io(void *opaque, int ret) | |
1003 | { | |
1004 | QEDAIOCB *acb = opaque; | |
1005 | BDRVQEDState *s = acb_to_s(acb); | |
1006 | QEDFindClusterFunc *io_fn = | |
1007 | acb->is_write ? qed_aio_write_data : qed_aio_read_data; | |
1008 | ||
1009 | trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); | |
1010 | ||
1011 | /* Handle I/O error */ | |
1012 | if (ret) { | |
1013 | qed_aio_complete(acb, ret); | |
1014 | return; | |
1015 | } | |
1016 | ||
1017 | acb->qiov_offset += acb->cur_qiov.size; | |
1018 | acb->cur_pos += acb->cur_qiov.size; | |
1019 | qemu_iovec_reset(&acb->cur_qiov); | |
1020 | ||
1021 | /* Complete request */ | |
1022 | if (acb->cur_pos >= acb->end_pos) { | |
1023 | qed_aio_complete(acb, 0); | |
1024 | return; | |
1025 | } | |
1026 | ||
1027 | /* Find next cluster and start I/O */ | |
1028 | qed_find_cluster(s, &acb->request, | |
1029 | acb->cur_pos, acb->end_pos - acb->cur_pos, | |
1030 | io_fn, acb); | |
1031 | } | |
1032 | ||
1033 | static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs, | |
1034 | int64_t sector_num, | |
1035 | QEMUIOVector *qiov, int nb_sectors, | |
1036 | BlockDriverCompletionFunc *cb, | |
1037 | void *opaque, bool is_write) | |
1038 | { | |
1039 | QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque); | |
1040 | ||
1041 | trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, | |
1042 | opaque, is_write); | |
1043 | ||
1044 | acb->is_write = is_write; | |
1045 | acb->finished = NULL; | |
1046 | acb->qiov = qiov; | |
1047 | acb->qiov_offset = 0; | |
1048 | acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; | |
1049 | acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; | |
1050 | acb->request.l2_table = NULL; | |
1051 | qemu_iovec_init(&acb->cur_qiov, qiov->niov); | |
1052 | ||
1053 | /* Start request */ | |
1054 | qed_aio_next_io(acb, 0); | |
1055 | return &acb->common; | |
1056 | } | |
1057 | ||
75411d23 SH |
1058 | static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, |
1059 | int64_t sector_num, | |
1060 | QEMUIOVector *qiov, int nb_sectors, | |
1061 | BlockDriverCompletionFunc *cb, | |
1062 | void *opaque) | |
1063 | { | |
eabba580 | 1064 | return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, false); |
75411d23 SH |
1065 | } |
1066 | ||
1067 | static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, | |
1068 | int64_t sector_num, | |
1069 | QEMUIOVector *qiov, int nb_sectors, | |
1070 | BlockDriverCompletionFunc *cb, | |
1071 | void *opaque) | |
1072 | { | |
eabba580 | 1073 | return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, true); |
75411d23 SH |
1074 | } |
1075 | ||
1076 | static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs, | |
1077 | BlockDriverCompletionFunc *cb, | |
1078 | void *opaque) | |
1079 | { | |
1080 | return bdrv_aio_flush(bs->file, cb, opaque); | |
1081 | } | |
1082 | ||
1083 | static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) | |
1084 | { | |
1085 | return -ENOTSUP; | |
1086 | } | |
1087 | ||
1088 | static int64_t bdrv_qed_getlength(BlockDriverState *bs) | |
1089 | { | |
1090 | BDRVQEDState *s = bs->opaque; | |
1091 | return s->header.image_size; | |
1092 | } | |
1093 | ||
1094 | static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | |
1095 | { | |
1096 | BDRVQEDState *s = bs->opaque; | |
1097 | ||
1098 | memset(bdi, 0, sizeof(*bdi)); | |
1099 | bdi->cluster_size = s->header.cluster_size; | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static int bdrv_qed_change_backing_file(BlockDriverState *bs, | |
1104 | const char *backing_file, | |
1105 | const char *backing_fmt) | |
1106 | { | |
1107 | BDRVQEDState *s = bs->opaque; | |
1108 | QEDHeader new_header, le_header; | |
1109 | void *buffer; | |
1110 | size_t buffer_len, backing_file_len; | |
1111 | int ret; | |
1112 | ||
1113 | /* Refuse to set backing filename if unknown compat feature bits are | |
1114 | * active. If the image uses an unknown compat feature then we may not | |
1115 | * know the layout of data following the header structure and cannot safely | |
1116 | * add a new string. | |
1117 | */ | |
1118 | if (backing_file && (s->header.compat_features & | |
1119 | ~QED_COMPAT_FEATURE_MASK)) { | |
1120 | return -ENOTSUP; | |
1121 | } | |
1122 | ||
1123 | memcpy(&new_header, &s->header, sizeof(new_header)); | |
1124 | ||
1125 | new_header.features &= ~(QED_F_BACKING_FILE | | |
1126 | QED_F_BACKING_FORMAT_NO_PROBE); | |
1127 | ||
1128 | /* Adjust feature flags */ | |
1129 | if (backing_file) { | |
1130 | new_header.features |= QED_F_BACKING_FILE; | |
1131 | ||
1132 | if (qed_fmt_is_raw(backing_fmt)) { | |
1133 | new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | /* Calculate new header size */ | |
1138 | backing_file_len = 0; | |
1139 | ||
1140 | if (backing_file) { | |
1141 | backing_file_len = strlen(backing_file); | |
1142 | } | |
1143 | ||
1144 | buffer_len = sizeof(new_header); | |
1145 | new_header.backing_filename_offset = buffer_len; | |
1146 | new_header.backing_filename_size = backing_file_len; | |
1147 | buffer_len += backing_file_len; | |
1148 | ||
1149 | /* Make sure we can rewrite header without failing */ | |
1150 | if (buffer_len > new_header.header_size * new_header.cluster_size) { | |
1151 | return -ENOSPC; | |
1152 | } | |
1153 | ||
1154 | /* Prepare new header */ | |
1155 | buffer = qemu_malloc(buffer_len); | |
1156 | ||
1157 | qed_header_cpu_to_le(&new_header, &le_header); | |
1158 | memcpy(buffer, &le_header, sizeof(le_header)); | |
1159 | buffer_len = sizeof(le_header); | |
1160 | ||
1161 | memcpy(buffer + buffer_len, backing_file, backing_file_len); | |
1162 | buffer_len += backing_file_len; | |
1163 | ||
1164 | /* Write new header */ | |
1165 | ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); | |
1166 | qemu_free(buffer); | |
1167 | if (ret == 0) { | |
1168 | memcpy(&s->header, &new_header, sizeof(new_header)); | |
1169 | } | |
1170 | return ret; | |
1171 | } | |
1172 | ||
1173 | static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result) | |
1174 | { | |
1175 | return -ENOTSUP; | |
1176 | } | |
1177 | ||
1178 | static QEMUOptionParameter qed_create_options[] = { | |
1179 | { | |
1180 | .name = BLOCK_OPT_SIZE, | |
1181 | .type = OPT_SIZE, | |
1182 | .help = "Virtual disk size (in bytes)" | |
1183 | }, { | |
1184 | .name = BLOCK_OPT_BACKING_FILE, | |
1185 | .type = OPT_STRING, | |
1186 | .help = "File name of a base image" | |
1187 | }, { | |
1188 | .name = BLOCK_OPT_BACKING_FMT, | |
1189 | .type = OPT_STRING, | |
1190 | .help = "Image format of the base image" | |
1191 | }, { | |
1192 | .name = BLOCK_OPT_CLUSTER_SIZE, | |
1193 | .type = OPT_SIZE, | |
1194 | .help = "Cluster size (in bytes)" | |
1195 | }, { | |
1196 | .name = BLOCK_OPT_TABLE_SIZE, | |
1197 | .type = OPT_SIZE, | |
1198 | .help = "L1/L2 table size (in clusters)" | |
1199 | }, | |
1200 | { /* end of list */ } | |
1201 | }; | |
1202 | ||
1203 | static BlockDriver bdrv_qed = { | |
1204 | .format_name = "qed", | |
1205 | .instance_size = sizeof(BDRVQEDState), | |
1206 | .create_options = qed_create_options, | |
1207 | ||
1208 | .bdrv_probe = bdrv_qed_probe, | |
1209 | .bdrv_open = bdrv_qed_open, | |
1210 | .bdrv_close = bdrv_qed_close, | |
1211 | .bdrv_create = bdrv_qed_create, | |
1212 | .bdrv_flush = bdrv_qed_flush, | |
1213 | .bdrv_is_allocated = bdrv_qed_is_allocated, | |
1214 | .bdrv_make_empty = bdrv_qed_make_empty, | |
1215 | .bdrv_aio_readv = bdrv_qed_aio_readv, | |
1216 | .bdrv_aio_writev = bdrv_qed_aio_writev, | |
1217 | .bdrv_aio_flush = bdrv_qed_aio_flush, | |
1218 | .bdrv_truncate = bdrv_qed_truncate, | |
1219 | .bdrv_getlength = bdrv_qed_getlength, | |
1220 | .bdrv_get_info = bdrv_qed_get_info, | |
1221 | .bdrv_change_backing_file = bdrv_qed_change_backing_file, | |
1222 | .bdrv_check = bdrv_qed_check, | |
1223 | }; | |
1224 | ||
1225 | static void bdrv_qed_init(void) | |
1226 | { | |
1227 | bdrv_register(&bdrv_qed); | |
1228 | } | |
1229 | ||
1230 | block_init(bdrv_qed_init); |