]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu-common.h" | |
26 | #include "block/block_int.h" | |
27 | #include "block/qcow2.h" | |
28 | #include "qemu/range.h" | |
29 | #include "qapi/qmp/types.h" | |
30 | ||
31 | static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size); | |
32 | static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, | |
33 | int64_t offset, int64_t length, | |
34 | int addend, enum qcow2_discard_type type); | |
35 | ||
36 | ||
37 | /*********************************************************/ | |
38 | /* refcount handling */ | |
39 | ||
40 | int qcow2_refcount_init(BlockDriverState *bs) | |
41 | { | |
42 | BDRVQcowState *s = bs->opaque; | |
43 | int ret, refcount_table_size2, i; | |
44 | ||
45 | refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t); | |
46 | s->refcount_table = g_malloc(refcount_table_size2); | |
47 | if (s->refcount_table_size > 0) { | |
48 | BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); | |
49 | ret = bdrv_pread(bs->file, s->refcount_table_offset, | |
50 | s->refcount_table, refcount_table_size2); | |
51 | if (ret != refcount_table_size2) | |
52 | goto fail; | |
53 | for(i = 0; i < s->refcount_table_size; i++) | |
54 | be64_to_cpus(&s->refcount_table[i]); | |
55 | } | |
56 | return 0; | |
57 | fail: | |
58 | return -ENOMEM; | |
59 | } | |
60 | ||
61 | void qcow2_refcount_close(BlockDriverState *bs) | |
62 | { | |
63 | BDRVQcowState *s = bs->opaque; | |
64 | g_free(s->refcount_table); | |
65 | } | |
66 | ||
67 | ||
68 | static int load_refcount_block(BlockDriverState *bs, | |
69 | int64_t refcount_block_offset, | |
70 | void **refcount_block) | |
71 | { | |
72 | BDRVQcowState *s = bs->opaque; | |
73 | int ret; | |
74 | ||
75 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD); | |
76 | ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, | |
77 | refcount_block); | |
78 | ||
79 | return ret; | |
80 | } | |
81 | ||
82 | /* | |
83 | * Returns the refcount of the cluster given by its index. Any non-negative | |
84 | * return value is the refcount of the cluster, negative values are -errno | |
85 | * and indicate an error. | |
86 | */ | |
87 | static int get_refcount(BlockDriverState *bs, int64_t cluster_index) | |
88 | { | |
89 | BDRVQcowState *s = bs->opaque; | |
90 | int refcount_table_index, block_index; | |
91 | int64_t refcount_block_offset; | |
92 | int ret; | |
93 | uint16_t *refcount_block; | |
94 | uint16_t refcount; | |
95 | ||
96 | refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); | |
97 | if (refcount_table_index >= s->refcount_table_size) | |
98 | return 0; | |
99 | refcount_block_offset = s->refcount_table[refcount_table_index]; | |
100 | if (!refcount_block_offset) | |
101 | return 0; | |
102 | ||
103 | ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, | |
104 | (void**) &refcount_block); | |
105 | if (ret < 0) { | |
106 | return ret; | |
107 | } | |
108 | ||
109 | block_index = cluster_index & | |
110 | ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); | |
111 | refcount = be16_to_cpu(refcount_block[block_index]); | |
112 | ||
113 | ret = qcow2_cache_put(bs, s->refcount_block_cache, | |
114 | (void**) &refcount_block); | |
115 | if (ret < 0) { | |
116 | return ret; | |
117 | } | |
118 | ||
119 | return refcount; | |
120 | } | |
121 | ||
122 | /* | |
123 | * Rounds the refcount table size up to avoid growing the table for each single | |
124 | * refcount block that is allocated. | |
125 | */ | |
126 | static unsigned int next_refcount_table_size(BDRVQcowState *s, | |
127 | unsigned int min_size) | |
128 | { | |
129 | unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1; | |
130 | unsigned int refcount_table_clusters = | |
131 | MAX(1, s->refcount_table_size >> (s->cluster_bits - 3)); | |
132 | ||
133 | while (min_clusters > refcount_table_clusters) { | |
134 | refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2; | |
135 | } | |
136 | ||
137 | return refcount_table_clusters << (s->cluster_bits - 3); | |
138 | } | |
139 | ||
140 | ||
141 | /* Checks if two offsets are described by the same refcount block */ | |
142 | static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a, | |
143 | uint64_t offset_b) | |
144 | { | |
145 | uint64_t block_a = offset_a >> (2 * s->cluster_bits - REFCOUNT_SHIFT); | |
146 | uint64_t block_b = offset_b >> (2 * s->cluster_bits - REFCOUNT_SHIFT); | |
147 | ||
148 | return (block_a == block_b); | |
149 | } | |
150 | ||
151 | /* | |
152 | * Loads a refcount block. If it doesn't exist yet, it is allocated first | |
153 | * (including growing the refcount table if needed). | |
154 | * | |
155 | * Returns 0 on success or -errno in error case | |
156 | */ | |
157 | static int alloc_refcount_block(BlockDriverState *bs, | |
158 | int64_t cluster_index, uint16_t **refcount_block) | |
159 | { | |
160 | BDRVQcowState *s = bs->opaque; | |
161 | unsigned int refcount_table_index; | |
162 | int ret; | |
163 | ||
164 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); | |
165 | ||
166 | /* Find the refcount block for the given cluster */ | |
167 | refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); | |
168 | ||
169 | if (refcount_table_index < s->refcount_table_size) { | |
170 | ||
171 | uint64_t refcount_block_offset = | |
172 | s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; | |
173 | ||
174 | /* If it's already there, we're done */ | |
175 | if (refcount_block_offset) { | |
176 | return load_refcount_block(bs, refcount_block_offset, | |
177 | (void**) refcount_block); | |
178 | } | |
179 | } | |
180 | ||
181 | /* | |
182 | * If we came here, we need to allocate something. Something is at least | |
183 | * a cluster for the new refcount block. It may also include a new refcount | |
184 | * table if the old refcount table is too small. | |
185 | * | |
186 | * Note that allocating clusters here needs some special care: | |
187 | * | |
188 | * - We can't use the normal qcow2_alloc_clusters(), it would try to | |
189 | * increase the refcount and very likely we would end up with an endless | |
190 | * recursion. Instead we must place the refcount blocks in a way that | |
191 | * they can describe them themselves. | |
192 | * | |
193 | * - We need to consider that at this point we are inside update_refcounts | |
194 | * and doing the initial refcount increase. This means that some clusters | |
195 | * have already been allocated by the caller, but their refcount isn't | |
196 | * accurate yet. free_cluster_index tells us where this allocation ends | |
197 | * as long as we don't overwrite it by freeing clusters. | |
198 | * | |
199 | * - alloc_clusters_noref and qcow2_free_clusters may load a different | |
200 | * refcount block into the cache | |
201 | */ | |
202 | ||
203 | *refcount_block = NULL; | |
204 | ||
205 | /* We write to the refcount table, so we might depend on L2 tables */ | |
206 | ret = qcow2_cache_flush(bs, s->l2_table_cache); | |
207 | if (ret < 0) { | |
208 | return ret; | |
209 | } | |
210 | ||
211 | /* Allocate the refcount block itself and mark it as used */ | |
212 | int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); | |
213 | if (new_block < 0) { | |
214 | return new_block; | |
215 | } | |
216 | ||
217 | #ifdef DEBUG_ALLOC2 | |
218 | fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 | |
219 | " at %" PRIx64 "\n", | |
220 | refcount_table_index, cluster_index << s->cluster_bits, new_block); | |
221 | #endif | |
222 | ||
223 | if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { | |
224 | /* Zero the new refcount block before updating it */ | |
225 | ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, | |
226 | (void**) refcount_block); | |
227 | if (ret < 0) { | |
228 | goto fail_block; | |
229 | } | |
230 | ||
231 | memset(*refcount_block, 0, s->cluster_size); | |
232 | ||
233 | /* The block describes itself, need to update the cache */ | |
234 | int block_index = (new_block >> s->cluster_bits) & | |
235 | ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); | |
236 | (*refcount_block)[block_index] = cpu_to_be16(1); | |
237 | } else { | |
238 | /* Described somewhere else. This can recurse at most twice before we | |
239 | * arrive at a block that describes itself. */ | |
240 | ret = update_refcount(bs, new_block, s->cluster_size, 1, | |
241 | QCOW2_DISCARD_NEVER); | |
242 | if (ret < 0) { | |
243 | goto fail_block; | |
244 | } | |
245 | ||
246 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
247 | if (ret < 0) { | |
248 | goto fail_block; | |
249 | } | |
250 | ||
251 | /* Initialize the new refcount block only after updating its refcount, | |
252 | * update_refcount uses the refcount cache itself */ | |
253 | ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, | |
254 | (void**) refcount_block); | |
255 | if (ret < 0) { | |
256 | goto fail_block; | |
257 | } | |
258 | ||
259 | memset(*refcount_block, 0, s->cluster_size); | |
260 | } | |
261 | ||
262 | /* Now the new refcount block needs to be written to disk */ | |
263 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); | |
264 | qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); | |
265 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
266 | if (ret < 0) { | |
267 | goto fail_block; | |
268 | } | |
269 | ||
270 | /* If the refcount table is big enough, just hook the block up there */ | |
271 | if (refcount_table_index < s->refcount_table_size) { | |
272 | uint64_t data64 = cpu_to_be64(new_block); | |
273 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); | |
274 | ret = bdrv_pwrite_sync(bs->file, | |
275 | s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), | |
276 | &data64, sizeof(data64)); | |
277 | if (ret < 0) { | |
278 | goto fail_block; | |
279 | } | |
280 | ||
281 | s->refcount_table[refcount_table_index] = new_block; | |
282 | return 0; | |
283 | } | |
284 | ||
285 | ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); | |
286 | if (ret < 0) { | |
287 | goto fail_block; | |
288 | } | |
289 | ||
290 | /* | |
291 | * If we come here, we need to grow the refcount table. Again, a new | |
292 | * refcount table needs some space and we can't simply allocate to avoid | |
293 | * endless recursion. | |
294 | * | |
295 | * Therefore let's grab new refcount blocks at the end of the image, which | |
296 | * will describe themselves and the new refcount table. This way we can | |
297 | * reference them only in the new table and do the switch to the new | |
298 | * refcount table at once without producing an inconsistent state in | |
299 | * between. | |
300 | */ | |
301 | BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); | |
302 | ||
303 | /* Calculate the number of refcount blocks needed so far */ | |
304 | uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT); | |
305 | uint64_t blocks_used = (s->free_cluster_index + | |
306 | refcount_block_clusters - 1) / refcount_block_clusters; | |
307 | ||
308 | /* And now we need at least one block more for the new metadata */ | |
309 | uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); | |
310 | uint64_t last_table_size; | |
311 | uint64_t blocks_clusters; | |
312 | do { | |
313 | uint64_t table_clusters = | |
314 | size_to_clusters(s, table_size * sizeof(uint64_t)); | |
315 | blocks_clusters = 1 + | |
316 | ((table_clusters + refcount_block_clusters - 1) | |
317 | / refcount_block_clusters); | |
318 | uint64_t meta_clusters = table_clusters + blocks_clusters; | |
319 | ||
320 | last_table_size = table_size; | |
321 | table_size = next_refcount_table_size(s, blocks_used + | |
322 | ((meta_clusters + refcount_block_clusters - 1) | |
323 | / refcount_block_clusters)); | |
324 | ||
325 | } while (last_table_size != table_size); | |
326 | ||
327 | #ifdef DEBUG_ALLOC2 | |
328 | fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", | |
329 | s->refcount_table_size, table_size); | |
330 | #endif | |
331 | ||
332 | /* Create the new refcount table and blocks */ | |
333 | uint64_t meta_offset = (blocks_used * refcount_block_clusters) * | |
334 | s->cluster_size; | |
335 | uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; | |
336 | uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size); | |
337 | uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t)); | |
338 | ||
339 | assert(meta_offset >= (s->free_cluster_index * s->cluster_size)); | |
340 | ||
341 | /* Fill the new refcount table */ | |
342 | memcpy(new_table, s->refcount_table, | |
343 | s->refcount_table_size * sizeof(uint64_t)); | |
344 | new_table[refcount_table_index] = new_block; | |
345 | ||
346 | int i; | |
347 | for (i = 0; i < blocks_clusters; i++) { | |
348 | new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); | |
349 | } | |
350 | ||
351 | /* Fill the refcount blocks */ | |
352 | uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); | |
353 | int block = 0; | |
354 | for (i = 0; i < table_clusters + blocks_clusters; i++) { | |
355 | new_blocks[block++] = cpu_to_be16(1); | |
356 | } | |
357 | ||
358 | /* Write refcount blocks to disk */ | |
359 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); | |
360 | ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, | |
361 | blocks_clusters * s->cluster_size); | |
362 | g_free(new_blocks); | |
363 | if (ret < 0) { | |
364 | goto fail_table; | |
365 | } | |
366 | ||
367 | /* Write refcount table to disk */ | |
368 | for(i = 0; i < table_size; i++) { | |
369 | cpu_to_be64s(&new_table[i]); | |
370 | } | |
371 | ||
372 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); | |
373 | ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, | |
374 | table_size * sizeof(uint64_t)); | |
375 | if (ret < 0) { | |
376 | goto fail_table; | |
377 | } | |
378 | ||
379 | for(i = 0; i < table_size; i++) { | |
380 | be64_to_cpus(&new_table[i]); | |
381 | } | |
382 | ||
383 | /* Hook up the new refcount table in the qcow2 header */ | |
384 | uint8_t data[12]; | |
385 | cpu_to_be64w((uint64_t*)data, table_offset); | |
386 | cpu_to_be32w((uint32_t*)(data + 8), table_clusters); | |
387 | BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); | |
388 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), | |
389 | data, sizeof(data)); | |
390 | if (ret < 0) { | |
391 | goto fail_table; | |
392 | } | |
393 | ||
394 | /* And switch it in memory */ | |
395 | uint64_t old_table_offset = s->refcount_table_offset; | |
396 | uint64_t old_table_size = s->refcount_table_size; | |
397 | ||
398 | g_free(s->refcount_table); | |
399 | s->refcount_table = new_table; | |
400 | s->refcount_table_size = table_size; | |
401 | s->refcount_table_offset = table_offset; | |
402 | ||
403 | /* Free old table. Remember, we must not change free_cluster_index */ | |
404 | uint64_t old_free_cluster_index = s->free_cluster_index; | |
405 | qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), | |
406 | QCOW2_DISCARD_OTHER); | |
407 | s->free_cluster_index = old_free_cluster_index; | |
408 | ||
409 | ret = load_refcount_block(bs, new_block, (void**) refcount_block); | |
410 | if (ret < 0) { | |
411 | return ret; | |
412 | } | |
413 | ||
414 | return 0; | |
415 | ||
416 | fail_table: | |
417 | g_free(new_table); | |
418 | fail_block: | |
419 | if (*refcount_block != NULL) { | |
420 | qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); | |
421 | } | |
422 | return ret; | |
423 | } | |
424 | ||
425 | void qcow2_process_discards(BlockDriverState *bs, int ret) | |
426 | { | |
427 | BDRVQcowState *s = bs->opaque; | |
428 | Qcow2DiscardRegion *d, *next; | |
429 | ||
430 | QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) { | |
431 | QTAILQ_REMOVE(&s->discards, d, next); | |
432 | ||
433 | /* Discard is optional, ignore the return value */ | |
434 | if (ret >= 0) { | |
435 | bdrv_discard(bs->file, | |
436 | d->offset >> BDRV_SECTOR_BITS, | |
437 | d->bytes >> BDRV_SECTOR_BITS); | |
438 | } | |
439 | ||
440 | g_free(d); | |
441 | } | |
442 | } | |
443 | ||
444 | static void update_refcount_discard(BlockDriverState *bs, | |
445 | uint64_t offset, uint64_t length) | |
446 | { | |
447 | BDRVQcowState *s = bs->opaque; | |
448 | Qcow2DiscardRegion *d, *p, *next; | |
449 | ||
450 | QTAILQ_FOREACH(d, &s->discards, next) { | |
451 | uint64_t new_start = MIN(offset, d->offset); | |
452 | uint64_t new_end = MAX(offset + length, d->offset + d->bytes); | |
453 | ||
454 | if (new_end - new_start <= length + d->bytes) { | |
455 | /* There can't be any overlap, areas ending up here have no | |
456 | * references any more and therefore shouldn't get freed another | |
457 | * time. */ | |
458 | assert(d->bytes + length == new_end - new_start); | |
459 | d->offset = new_start; | |
460 | d->bytes = new_end - new_start; | |
461 | goto found; | |
462 | } | |
463 | } | |
464 | ||
465 | d = g_malloc(sizeof(*d)); | |
466 | *d = (Qcow2DiscardRegion) { | |
467 | .bs = bs, | |
468 | .offset = offset, | |
469 | .bytes = length, | |
470 | }; | |
471 | QTAILQ_INSERT_TAIL(&s->discards, d, next); | |
472 | ||
473 | found: | |
474 | /* Merge discard requests if they are adjacent now */ | |
475 | QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { | |
476 | if (p == d | |
477 | || p->offset > d->offset + d->bytes | |
478 | || d->offset > p->offset + p->bytes) | |
479 | { | |
480 | continue; | |
481 | } | |
482 | ||
483 | /* Still no overlap possible */ | |
484 | assert(p->offset == d->offset + d->bytes | |
485 | || d->offset == p->offset + p->bytes); | |
486 | ||
487 | QTAILQ_REMOVE(&s->discards, p, next); | |
488 | d->offset = MIN(d->offset, p->offset); | |
489 | d->bytes += p->bytes; | |
490 | } | |
491 | } | |
492 | ||
493 | /* XXX: cache several refcount block clusters ? */ | |
494 | static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, | |
495 | int64_t offset, int64_t length, int addend, enum qcow2_discard_type type) | |
496 | { | |
497 | BDRVQcowState *s = bs->opaque; | |
498 | int64_t start, last, cluster_offset; | |
499 | uint16_t *refcount_block = NULL; | |
500 | int64_t old_table_index = -1; | |
501 | int ret; | |
502 | ||
503 | #ifdef DEBUG_ALLOC2 | |
504 | fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n", | |
505 | offset, length, addend); | |
506 | #endif | |
507 | if (length < 0) { | |
508 | return -EINVAL; | |
509 | } else if (length == 0) { | |
510 | return 0; | |
511 | } | |
512 | ||
513 | if (addend < 0) { | |
514 | qcow2_cache_set_dependency(bs, s->refcount_block_cache, | |
515 | s->l2_table_cache); | |
516 | } | |
517 | ||
518 | start = offset & ~(s->cluster_size - 1); | |
519 | last = (offset + length - 1) & ~(s->cluster_size - 1); | |
520 | for(cluster_offset = start; cluster_offset <= last; | |
521 | cluster_offset += s->cluster_size) | |
522 | { | |
523 | int block_index, refcount; | |
524 | int64_t cluster_index = cluster_offset >> s->cluster_bits; | |
525 | int64_t table_index = | |
526 | cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); | |
527 | ||
528 | /* Load the refcount block and allocate it if needed */ | |
529 | if (table_index != old_table_index) { | |
530 | if (refcount_block) { | |
531 | ret = qcow2_cache_put(bs, s->refcount_block_cache, | |
532 | (void**) &refcount_block); | |
533 | if (ret < 0) { | |
534 | goto fail; | |
535 | } | |
536 | } | |
537 | ||
538 | ret = alloc_refcount_block(bs, cluster_index, &refcount_block); | |
539 | if (ret < 0) { | |
540 | goto fail; | |
541 | } | |
542 | } | |
543 | old_table_index = table_index; | |
544 | ||
545 | qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block); | |
546 | ||
547 | /* we can update the count and save it */ | |
548 | block_index = cluster_index & | |
549 | ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); | |
550 | ||
551 | refcount = be16_to_cpu(refcount_block[block_index]); | |
552 | refcount += addend; | |
553 | if (refcount < 0 || refcount > 0xffff) { | |
554 | ret = -EINVAL; | |
555 | goto fail; | |
556 | } | |
557 | if (refcount == 0 && cluster_index < s->free_cluster_index) { | |
558 | s->free_cluster_index = cluster_index; | |
559 | } | |
560 | refcount_block[block_index] = cpu_to_be16(refcount); | |
561 | ||
562 | if (refcount == 0 && s->discard_passthrough[type]) { | |
563 | update_refcount_discard(bs, cluster_offset, s->cluster_size); | |
564 | } | |
565 | } | |
566 | ||
567 | ret = 0; | |
568 | fail: | |
569 | if (!s->cache_discards) { | |
570 | qcow2_process_discards(bs, ret); | |
571 | } | |
572 | ||
573 | /* Write last changed block to disk */ | |
574 | if (refcount_block) { | |
575 | int wret; | |
576 | wret = qcow2_cache_put(bs, s->refcount_block_cache, | |
577 | (void**) &refcount_block); | |
578 | if (wret < 0) { | |
579 | return ret < 0 ? ret : wret; | |
580 | } | |
581 | } | |
582 | ||
583 | /* | |
584 | * Try do undo any updates if an error is returned (This may succeed in | |
585 | * some cases like ENOSPC for allocating a new refcount block) | |
586 | */ | |
587 | if (ret < 0) { | |
588 | int dummy; | |
589 | dummy = update_refcount(bs, offset, cluster_offset - offset, -addend, | |
590 | QCOW2_DISCARD_NEVER); | |
591 | (void)dummy; | |
592 | } | |
593 | ||
594 | return ret; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Increases or decreases the refcount of a given cluster by one. | |
599 | * addend must be 1 or -1. | |
600 | * | |
601 | * If the return value is non-negative, it is the new refcount of the cluster. | |
602 | * If it is negative, it is -errno and indicates an error. | |
603 | */ | |
604 | static int update_cluster_refcount(BlockDriverState *bs, | |
605 | int64_t cluster_index, | |
606 | int addend, | |
607 | enum qcow2_discard_type type) | |
608 | { | |
609 | BDRVQcowState *s = bs->opaque; | |
610 | int ret; | |
611 | ||
612 | ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend, | |
613 | type); | |
614 | if (ret < 0) { | |
615 | return ret; | |
616 | } | |
617 | ||
618 | return get_refcount(bs, cluster_index); | |
619 | } | |
620 | ||
621 | ||
622 | ||
623 | /*********************************************************/ | |
624 | /* cluster allocation functions */ | |
625 | ||
626 | ||
627 | ||
628 | /* return < 0 if error */ | |
629 | static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size) | |
630 | { | |
631 | BDRVQcowState *s = bs->opaque; | |
632 | int i, nb_clusters, refcount; | |
633 | ||
634 | nb_clusters = size_to_clusters(s, size); | |
635 | retry: | |
636 | for(i = 0; i < nb_clusters; i++) { | |
637 | int64_t next_cluster_index = s->free_cluster_index++; | |
638 | refcount = get_refcount(bs, next_cluster_index); | |
639 | ||
640 | if (refcount < 0) { | |
641 | return refcount; | |
642 | } else if (refcount != 0) { | |
643 | goto retry; | |
644 | } | |
645 | } | |
646 | #ifdef DEBUG_ALLOC2 | |
647 | fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", | |
648 | size, | |
649 | (s->free_cluster_index - nb_clusters) << s->cluster_bits); | |
650 | #endif | |
651 | return (s->free_cluster_index - nb_clusters) << s->cluster_bits; | |
652 | } | |
653 | ||
654 | int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size) | |
655 | { | |
656 | int64_t offset; | |
657 | int ret; | |
658 | ||
659 | BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); | |
660 | offset = alloc_clusters_noref(bs, size); | |
661 | if (offset < 0) { | |
662 | return offset; | |
663 | } | |
664 | ||
665 | ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER); | |
666 | if (ret < 0) { | |
667 | return ret; | |
668 | } | |
669 | ||
670 | return offset; | |
671 | } | |
672 | ||
673 | int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, | |
674 | int nb_clusters) | |
675 | { | |
676 | BDRVQcowState *s = bs->opaque; | |
677 | uint64_t cluster_index; | |
678 | uint64_t old_free_cluster_index; | |
679 | int i, refcount, ret; | |
680 | ||
681 | /* Check how many clusters there are free */ | |
682 | cluster_index = offset >> s->cluster_bits; | |
683 | for(i = 0; i < nb_clusters; i++) { | |
684 | refcount = get_refcount(bs, cluster_index++); | |
685 | ||
686 | if (refcount < 0) { | |
687 | return refcount; | |
688 | } else if (refcount != 0) { | |
689 | break; | |
690 | } | |
691 | } | |
692 | ||
693 | /* And then allocate them */ | |
694 | old_free_cluster_index = s->free_cluster_index; | |
695 | s->free_cluster_index = cluster_index + i; | |
696 | ||
697 | ret = update_refcount(bs, offset, i << s->cluster_bits, 1, | |
698 | QCOW2_DISCARD_NEVER); | |
699 | if (ret < 0) { | |
700 | return ret; | |
701 | } | |
702 | ||
703 | s->free_cluster_index = old_free_cluster_index; | |
704 | ||
705 | return i; | |
706 | } | |
707 | ||
708 | /* only used to allocate compressed sectors. We try to allocate | |
709 | contiguous sectors. size must be <= cluster_size */ | |
710 | int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) | |
711 | { | |
712 | BDRVQcowState *s = bs->opaque; | |
713 | int64_t offset, cluster_offset; | |
714 | int free_in_cluster; | |
715 | ||
716 | BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); | |
717 | assert(size > 0 && size <= s->cluster_size); | |
718 | if (s->free_byte_offset == 0) { | |
719 | offset = qcow2_alloc_clusters(bs, s->cluster_size); | |
720 | if (offset < 0) { | |
721 | return offset; | |
722 | } | |
723 | s->free_byte_offset = offset; | |
724 | } | |
725 | redo: | |
726 | free_in_cluster = s->cluster_size - | |
727 | (s->free_byte_offset & (s->cluster_size - 1)); | |
728 | if (size <= free_in_cluster) { | |
729 | /* enough space in current cluster */ | |
730 | offset = s->free_byte_offset; | |
731 | s->free_byte_offset += size; | |
732 | free_in_cluster -= size; | |
733 | if (free_in_cluster == 0) | |
734 | s->free_byte_offset = 0; | |
735 | if ((offset & (s->cluster_size - 1)) != 0) | |
736 | update_cluster_refcount(bs, offset >> s->cluster_bits, 1, | |
737 | QCOW2_DISCARD_NEVER); | |
738 | } else { | |
739 | offset = qcow2_alloc_clusters(bs, s->cluster_size); | |
740 | if (offset < 0) { | |
741 | return offset; | |
742 | } | |
743 | cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1); | |
744 | if ((cluster_offset + s->cluster_size) == offset) { | |
745 | /* we are lucky: contiguous data */ | |
746 | offset = s->free_byte_offset; | |
747 | update_cluster_refcount(bs, offset >> s->cluster_bits, 1, | |
748 | QCOW2_DISCARD_NEVER); | |
749 | s->free_byte_offset += size; | |
750 | } else { | |
751 | s->free_byte_offset = offset; | |
752 | goto redo; | |
753 | } | |
754 | } | |
755 | ||
756 | /* The cluster refcount was incremented, either by qcow2_alloc_clusters() | |
757 | * or explicitly by update_cluster_refcount(). Refcount blocks must be | |
758 | * flushed before the caller's L2 table updates. | |
759 | */ | |
760 | qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); | |
761 | return offset; | |
762 | } | |
763 | ||
764 | void qcow2_free_clusters(BlockDriverState *bs, | |
765 | int64_t offset, int64_t size, | |
766 | enum qcow2_discard_type type) | |
767 | { | |
768 | int ret; | |
769 | ||
770 | BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); | |
771 | ret = update_refcount(bs, offset, size, -1, type); | |
772 | if (ret < 0) { | |
773 | fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); | |
774 | /* TODO Remember the clusters to free them later and avoid leaking */ | |
775 | } | |
776 | } | |
777 | ||
778 | /* | |
779 | * Free a cluster using its L2 entry (handles clusters of all types, e.g. | |
780 | * normal cluster, compressed cluster, etc.) | |
781 | */ | |
782 | void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry, | |
783 | int nb_clusters, enum qcow2_discard_type type) | |
784 | { | |
785 | BDRVQcowState *s = bs->opaque; | |
786 | ||
787 | switch (qcow2_get_cluster_type(l2_entry)) { | |
788 | case QCOW2_CLUSTER_COMPRESSED: | |
789 | { | |
790 | int nb_csectors; | |
791 | nb_csectors = ((l2_entry >> s->csize_shift) & | |
792 | s->csize_mask) + 1; | |
793 | qcow2_free_clusters(bs, | |
794 | (l2_entry & s->cluster_offset_mask) & ~511, | |
795 | nb_csectors * 512, type); | |
796 | } | |
797 | break; | |
798 | case QCOW2_CLUSTER_NORMAL: | |
799 | qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK, | |
800 | nb_clusters << s->cluster_bits, type); | |
801 | break; | |
802 | case QCOW2_CLUSTER_UNALLOCATED: | |
803 | case QCOW2_CLUSTER_ZERO: | |
804 | break; | |
805 | default: | |
806 | abort(); | |
807 | } | |
808 | } | |
809 | ||
810 | ||
811 | ||
812 | /*********************************************************/ | |
813 | /* snapshots and image creation */ | |
814 | ||
815 | ||
816 | ||
817 | /* update the refcounts of snapshots and the copied flag */ | |
818 | int qcow2_update_snapshot_refcount(BlockDriverState *bs, | |
819 | int64_t l1_table_offset, int l1_size, int addend) | |
820 | { | |
821 | BDRVQcowState *s = bs->opaque; | |
822 | uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated; | |
823 | int64_t old_offset, old_l2_offset; | |
824 | int i, j, l1_modified = 0, nb_csectors, refcount; | |
825 | int ret; | |
826 | ||
827 | l2_table = NULL; | |
828 | l1_table = NULL; | |
829 | l1_size2 = l1_size * sizeof(uint64_t); | |
830 | ||
831 | s->cache_discards = true; | |
832 | ||
833 | /* WARNING: qcow2_snapshot_goto relies on this function not using the | |
834 | * l1_table_offset when it is the current s->l1_table_offset! Be careful | |
835 | * when changing this! */ | |
836 | if (l1_table_offset != s->l1_table_offset) { | |
837 | l1_table = g_malloc0(align_offset(l1_size2, 512)); | |
838 | l1_allocated = 1; | |
839 | ||
840 | ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); | |
841 | if (ret < 0) { | |
842 | goto fail; | |
843 | } | |
844 | ||
845 | for(i = 0;i < l1_size; i++) | |
846 | be64_to_cpus(&l1_table[i]); | |
847 | } else { | |
848 | assert(l1_size == s->l1_size); | |
849 | l1_table = s->l1_table; | |
850 | l1_allocated = 0; | |
851 | } | |
852 | ||
853 | for(i = 0; i < l1_size; i++) { | |
854 | l2_offset = l1_table[i]; | |
855 | if (l2_offset) { | |
856 | old_l2_offset = l2_offset; | |
857 | l2_offset &= L1E_OFFSET_MASK; | |
858 | ||
859 | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, | |
860 | (void**) &l2_table); | |
861 | if (ret < 0) { | |
862 | goto fail; | |
863 | } | |
864 | ||
865 | for(j = 0; j < s->l2_size; j++) { | |
866 | uint64_t cluster_index; | |
867 | ||
868 | offset = be64_to_cpu(l2_table[j]); | |
869 | old_offset = offset; | |
870 | offset &= ~QCOW_OFLAG_COPIED; | |
871 | ||
872 | switch (qcow2_get_cluster_type(offset)) { | |
873 | case QCOW2_CLUSTER_COMPRESSED: | |
874 | nb_csectors = ((offset >> s->csize_shift) & | |
875 | s->csize_mask) + 1; | |
876 | if (addend != 0) { | |
877 | int ret; | |
878 | ret = update_refcount(bs, | |
879 | (offset & s->cluster_offset_mask) & ~511, | |
880 | nb_csectors * 512, addend, | |
881 | QCOW2_DISCARD_SNAPSHOT); | |
882 | if (ret < 0) { | |
883 | goto fail; | |
884 | } | |
885 | } | |
886 | /* compressed clusters are never modified */ | |
887 | refcount = 2; | |
888 | break; | |
889 | ||
890 | case QCOW2_CLUSTER_NORMAL: | |
891 | case QCOW2_CLUSTER_ZERO: | |
892 | cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits; | |
893 | if (!cluster_index) { | |
894 | /* unallocated */ | |
895 | refcount = 0; | |
896 | break; | |
897 | } | |
898 | if (addend != 0) { | |
899 | refcount = update_cluster_refcount(bs, cluster_index, addend, | |
900 | QCOW2_DISCARD_SNAPSHOT); | |
901 | } else { | |
902 | refcount = get_refcount(bs, cluster_index); | |
903 | } | |
904 | ||
905 | if (refcount < 0) { | |
906 | ret = refcount; | |
907 | goto fail; | |
908 | } | |
909 | break; | |
910 | ||
911 | case QCOW2_CLUSTER_UNALLOCATED: | |
912 | refcount = 0; | |
913 | break; | |
914 | ||
915 | default: | |
916 | abort(); | |
917 | } | |
918 | ||
919 | if (refcount == 1) { | |
920 | offset |= QCOW_OFLAG_COPIED; | |
921 | } | |
922 | if (offset != old_offset) { | |
923 | if (addend > 0) { | |
924 | qcow2_cache_set_dependency(bs, s->l2_table_cache, | |
925 | s->refcount_block_cache); | |
926 | } | |
927 | l2_table[j] = cpu_to_be64(offset); | |
928 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
929 | } | |
930 | } | |
931 | ||
932 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
933 | if (ret < 0) { | |
934 | goto fail; | |
935 | } | |
936 | ||
937 | ||
938 | if (addend != 0) { | |
939 | refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend, | |
940 | QCOW2_DISCARD_SNAPSHOT); | |
941 | } else { | |
942 | refcount = get_refcount(bs, l2_offset >> s->cluster_bits); | |
943 | } | |
944 | if (refcount < 0) { | |
945 | ret = refcount; | |
946 | goto fail; | |
947 | } else if (refcount == 1) { | |
948 | l2_offset |= QCOW_OFLAG_COPIED; | |
949 | } | |
950 | if (l2_offset != old_l2_offset) { | |
951 | l1_table[i] = l2_offset; | |
952 | l1_modified = 1; | |
953 | } | |
954 | } | |
955 | } | |
956 | ||
957 | ret = bdrv_flush(bs); | |
958 | fail: | |
959 | if (l2_table) { | |
960 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
961 | } | |
962 | ||
963 | s->cache_discards = false; | |
964 | qcow2_process_discards(bs, ret); | |
965 | ||
966 | /* Update L1 only if it isn't deleted anyway (addend = -1) */ | |
967 | if (ret == 0 && addend >= 0 && l1_modified) { | |
968 | for (i = 0; i < l1_size; i++) { | |
969 | cpu_to_be64s(&l1_table[i]); | |
970 | } | |
971 | ||
972 | ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2); | |
973 | ||
974 | for (i = 0; i < l1_size; i++) { | |
975 | be64_to_cpus(&l1_table[i]); | |
976 | } | |
977 | } | |
978 | if (l1_allocated) | |
979 | g_free(l1_table); | |
980 | return ret; | |
981 | } | |
982 | ||
983 | ||
984 | ||
985 | ||
986 | /*********************************************************/ | |
987 | /* refcount checking functions */ | |
988 | ||
989 | ||
990 | ||
991 | /* | |
992 | * Increases the refcount for a range of clusters in a given refcount table. | |
993 | * This is used to construct a temporary refcount table out of L1 and L2 tables | |
994 | * which can be compared the the refcount table saved in the image. | |
995 | * | |
996 | * Modifies the number of errors in res. | |
997 | */ | |
998 | static void inc_refcounts(BlockDriverState *bs, | |
999 | BdrvCheckResult *res, | |
1000 | uint16_t *refcount_table, | |
1001 | int refcount_table_size, | |
1002 | int64_t offset, int64_t size) | |
1003 | { | |
1004 | BDRVQcowState *s = bs->opaque; | |
1005 | int64_t start, last, cluster_offset; | |
1006 | int k; | |
1007 | ||
1008 | if (size <= 0) | |
1009 | return; | |
1010 | ||
1011 | start = offset & ~(s->cluster_size - 1); | |
1012 | last = (offset + size - 1) & ~(s->cluster_size - 1); | |
1013 | for(cluster_offset = start; cluster_offset <= last; | |
1014 | cluster_offset += s->cluster_size) { | |
1015 | k = cluster_offset >> s->cluster_bits; | |
1016 | if (k < 0) { | |
1017 | fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n", | |
1018 | cluster_offset); | |
1019 | res->corruptions++; | |
1020 | } else if (k >= refcount_table_size) { | |
1021 | fprintf(stderr, "Warning: cluster offset=0x%" PRIx64 " is after " | |
1022 | "the end of the image file, can't properly check refcounts.\n", | |
1023 | cluster_offset); | |
1024 | res->check_errors++; | |
1025 | } else { | |
1026 | if (++refcount_table[k] == 0) { | |
1027 | fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 | |
1028 | "\n", cluster_offset); | |
1029 | res->corruptions++; | |
1030 | } | |
1031 | } | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | /* Flags for check_refcounts_l1() and check_refcounts_l2() */ | |
1036 | enum { | |
1037 | CHECK_OFLAG_COPIED = 0x1, /* check QCOW_OFLAG_COPIED matches refcount */ | |
1038 | CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */ | |
1039 | }; | |
1040 | ||
1041 | /* | |
1042 | * Increases the refcount in the given refcount table for the all clusters | |
1043 | * referenced in the L2 table. While doing so, performs some checks on L2 | |
1044 | * entries. | |
1045 | * | |
1046 | * Returns the number of errors found by the checks or -errno if an internal | |
1047 | * error occurred. | |
1048 | */ | |
1049 | static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, | |
1050 | uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset, | |
1051 | int flags) | |
1052 | { | |
1053 | BDRVQcowState *s = bs->opaque; | |
1054 | uint64_t *l2_table, l2_entry; | |
1055 | uint64_t next_contiguous_offset = 0; | |
1056 | int i, l2_size, nb_csectors; | |
1057 | ||
1058 | /* Read L2 table from disk */ | |
1059 | l2_size = s->l2_size * sizeof(uint64_t); | |
1060 | l2_table = g_malloc(l2_size); | |
1061 | ||
1062 | if (bdrv_pread(bs->file, l2_offset, l2_table, l2_size) != l2_size) | |
1063 | goto fail; | |
1064 | ||
1065 | /* Do the actual checks */ | |
1066 | for(i = 0; i < s->l2_size; i++) { | |
1067 | l2_entry = be64_to_cpu(l2_table[i]); | |
1068 | ||
1069 | switch (qcow2_get_cluster_type(l2_entry)) { | |
1070 | case QCOW2_CLUSTER_COMPRESSED: | |
1071 | /* Compressed clusters don't have QCOW_OFLAG_COPIED */ | |
1072 | if (l2_entry & QCOW_OFLAG_COPIED) { | |
1073 | fprintf(stderr, "ERROR: cluster %" PRId64 ": " | |
1074 | "copied flag must never be set for compressed " | |
1075 | "clusters\n", l2_entry >> s->cluster_bits); | |
1076 | l2_entry &= ~QCOW_OFLAG_COPIED; | |
1077 | res->corruptions++; | |
1078 | } | |
1079 | ||
1080 | /* Mark cluster as used */ | |
1081 | nb_csectors = ((l2_entry >> s->csize_shift) & | |
1082 | s->csize_mask) + 1; | |
1083 | l2_entry &= s->cluster_offset_mask; | |
1084 | inc_refcounts(bs, res, refcount_table, refcount_table_size, | |
1085 | l2_entry & ~511, nb_csectors * 512); | |
1086 | ||
1087 | if (flags & CHECK_FRAG_INFO) { | |
1088 | res->bfi.allocated_clusters++; | |
1089 | res->bfi.compressed_clusters++; | |
1090 | ||
1091 | /* Compressed clusters are fragmented by nature. Since they | |
1092 | * take up sub-sector space but we only have sector granularity | |
1093 | * I/O we need to re-read the same sectors even for adjacent | |
1094 | * compressed clusters. | |
1095 | */ | |
1096 | res->bfi.fragmented_clusters++; | |
1097 | } | |
1098 | break; | |
1099 | ||
1100 | case QCOW2_CLUSTER_ZERO: | |
1101 | if ((l2_entry & L2E_OFFSET_MASK) == 0) { | |
1102 | break; | |
1103 | } | |
1104 | /* fall through */ | |
1105 | ||
1106 | case QCOW2_CLUSTER_NORMAL: | |
1107 | { | |
1108 | uint64_t offset = l2_entry & L2E_OFFSET_MASK; | |
1109 | ||
1110 | if (flags & CHECK_FRAG_INFO) { | |
1111 | res->bfi.allocated_clusters++; | |
1112 | if (next_contiguous_offset && | |
1113 | offset != next_contiguous_offset) { | |
1114 | res->bfi.fragmented_clusters++; | |
1115 | } | |
1116 | next_contiguous_offset = offset + s->cluster_size; | |
1117 | } | |
1118 | ||
1119 | /* Mark cluster as used */ | |
1120 | inc_refcounts(bs, res, refcount_table,refcount_table_size, | |
1121 | offset, s->cluster_size); | |
1122 | ||
1123 | /* Correct offsets are cluster aligned */ | |
1124 | if (offset & (s->cluster_size - 1)) { | |
1125 | fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not " | |
1126 | "properly aligned; L2 entry corrupted.\n", offset); | |
1127 | res->corruptions++; | |
1128 | } | |
1129 | break; | |
1130 | } | |
1131 | ||
1132 | case QCOW2_CLUSTER_UNALLOCATED: | |
1133 | break; | |
1134 | ||
1135 | default: | |
1136 | abort(); | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | g_free(l2_table); | |
1141 | return 0; | |
1142 | ||
1143 | fail: | |
1144 | fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); | |
1145 | g_free(l2_table); | |
1146 | return -EIO; | |
1147 | } | |
1148 | ||
1149 | /* | |
1150 | * Increases the refcount for the L1 table, its L2 tables and all referenced | |
1151 | * clusters in the given refcount table. While doing so, performs some checks | |
1152 | * on L1 and L2 entries. | |
1153 | * | |
1154 | * Returns the number of errors found by the checks or -errno if an internal | |
1155 | * error occurred. | |
1156 | */ | |
1157 | static int check_refcounts_l1(BlockDriverState *bs, | |
1158 | BdrvCheckResult *res, | |
1159 | uint16_t *refcount_table, | |
1160 | int refcount_table_size, | |
1161 | int64_t l1_table_offset, int l1_size, | |
1162 | int flags) | |
1163 | { | |
1164 | BDRVQcowState *s = bs->opaque; | |
1165 | uint64_t *l1_table, l2_offset, l1_size2; | |
1166 | int i, ret; | |
1167 | ||
1168 | l1_size2 = l1_size * sizeof(uint64_t); | |
1169 | ||
1170 | /* Mark L1 table as used */ | |
1171 | inc_refcounts(bs, res, refcount_table, refcount_table_size, | |
1172 | l1_table_offset, l1_size2); | |
1173 | ||
1174 | /* Read L1 table entries from disk */ | |
1175 | if (l1_size2 == 0) { | |
1176 | l1_table = NULL; | |
1177 | } else { | |
1178 | l1_table = g_malloc(l1_size2); | |
1179 | if (bdrv_pread(bs->file, l1_table_offset, | |
1180 | l1_table, l1_size2) != l1_size2) | |
1181 | goto fail; | |
1182 | for(i = 0;i < l1_size; i++) | |
1183 | be64_to_cpus(&l1_table[i]); | |
1184 | } | |
1185 | ||
1186 | /* Do the actual checks */ | |
1187 | for(i = 0; i < l1_size; i++) { | |
1188 | l2_offset = l1_table[i]; | |
1189 | if (l2_offset) { | |
1190 | /* Mark L2 table as used */ | |
1191 | l2_offset &= L1E_OFFSET_MASK; | |
1192 | inc_refcounts(bs, res, refcount_table, refcount_table_size, | |
1193 | l2_offset, s->cluster_size); | |
1194 | ||
1195 | /* L2 tables are cluster aligned */ | |
1196 | if (l2_offset & (s->cluster_size - 1)) { | |
1197 | fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " | |
1198 | "cluster aligned; L1 entry corrupted\n", l2_offset); | |
1199 | res->corruptions++; | |
1200 | } | |
1201 | ||
1202 | /* Process and check L2 entries */ | |
1203 | ret = check_refcounts_l2(bs, res, refcount_table, | |
1204 | refcount_table_size, l2_offset, flags); | |
1205 | if (ret < 0) { | |
1206 | goto fail; | |
1207 | } | |
1208 | } | |
1209 | } | |
1210 | g_free(l1_table); | |
1211 | return 0; | |
1212 | ||
1213 | fail: | |
1214 | fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); | |
1215 | res->check_errors++; | |
1216 | g_free(l1_table); | |
1217 | return -EIO; | |
1218 | } | |
1219 | ||
1220 | /* | |
1221 | * Checks the OFLAG_COPIED flag for all L1 and L2 entries. | |
1222 | * | |
1223 | * This function does not print an error message nor does it increment | |
1224 | * check_errors if get_refcount fails (this is because such an error will have | |
1225 | * been already detected and sufficiently signaled by the calling function | |
1226 | * (qcow2_check_refcounts) by the time this function is called). | |
1227 | */ | |
1228 | static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, | |
1229 | BdrvCheckMode fix) | |
1230 | { | |
1231 | BDRVQcowState *s = bs->opaque; | |
1232 | uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); | |
1233 | int ret; | |
1234 | int refcount; | |
1235 | int i, j; | |
1236 | ||
1237 | for (i = 0; i < s->l1_size; i++) { | |
1238 | uint64_t l1_entry = s->l1_table[i]; | |
1239 | uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK; | |
1240 | bool l2_dirty = false; | |
1241 | ||
1242 | if (!l2_offset) { | |
1243 | continue; | |
1244 | } | |
1245 | ||
1246 | refcount = get_refcount(bs, l2_offset >> s->cluster_bits); | |
1247 | if (refcount < 0) { | |
1248 | /* don't print message nor increment check_errors */ | |
1249 | continue; | |
1250 | } | |
1251 | if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) { | |
1252 | fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d " | |
1253 | "l1_entry=%" PRIx64 " refcount=%d\n", | |
1254 | fix & BDRV_FIX_ERRORS ? "Repairing" : | |
1255 | "ERROR", | |
1256 | i, l1_entry, refcount); | |
1257 | if (fix & BDRV_FIX_ERRORS) { | |
1258 | s->l1_table[i] = refcount == 1 | |
1259 | ? l1_entry | QCOW_OFLAG_COPIED | |
1260 | : l1_entry & ~QCOW_OFLAG_COPIED; | |
1261 | ret = qcow2_write_l1_entry(bs, i); | |
1262 | if (ret < 0) { | |
1263 | res->check_errors++; | |
1264 | goto fail; | |
1265 | } | |
1266 | res->corruptions_fixed++; | |
1267 | } else { | |
1268 | res->corruptions++; | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | ret = bdrv_pread(bs->file, l2_offset, l2_table, | |
1273 | s->l2_size * sizeof(uint64_t)); | |
1274 | if (ret < 0) { | |
1275 | fprintf(stderr, "ERROR: Could not read L2 table: %s\n", | |
1276 | strerror(-ret)); | |
1277 | res->check_errors++; | |
1278 | goto fail; | |
1279 | } | |
1280 | ||
1281 | for (j = 0; j < s->l2_size; j++) { | |
1282 | uint64_t l2_entry = be64_to_cpu(l2_table[j]); | |
1283 | uint64_t data_offset = l2_entry & L2E_OFFSET_MASK; | |
1284 | int cluster_type = qcow2_get_cluster_type(l2_entry); | |
1285 | ||
1286 | if ((cluster_type == QCOW2_CLUSTER_NORMAL) || | |
1287 | ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) { | |
1288 | refcount = get_refcount(bs, data_offset >> s->cluster_bits); | |
1289 | if (refcount < 0) { | |
1290 | /* don't print message nor increment check_errors */ | |
1291 | continue; | |
1292 | } | |
1293 | if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) { | |
1294 | fprintf(stderr, "%s OFLAG_COPIED data cluster: " | |
1295 | "l2_entry=%" PRIx64 " refcount=%d\n", | |
1296 | fix & BDRV_FIX_ERRORS ? "Repairing" : | |
1297 | "ERROR", | |
1298 | l2_entry, refcount); | |
1299 | if (fix & BDRV_FIX_ERRORS) { | |
1300 | l2_table[j] = cpu_to_be64(refcount == 1 | |
1301 | ? l2_entry | QCOW_OFLAG_COPIED | |
1302 | : l2_entry & ~QCOW_OFLAG_COPIED); | |
1303 | l2_dirty = true; | |
1304 | res->corruptions_fixed++; | |
1305 | } else { | |
1306 | res->corruptions++; | |
1307 | } | |
1308 | } | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | if (l2_dirty) { | |
1313 | ret = qcow2_pre_write_overlap_check(bs, | |
1314 | QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L2, l2_offset, | |
1315 | s->cluster_size); | |
1316 | if (ret < 0) { | |
1317 | fprintf(stderr, "ERROR: Could not write L2 table; metadata " | |
1318 | "overlap check failed: %s\n", strerror(-ret)); | |
1319 | res->check_errors++; | |
1320 | goto fail; | |
1321 | } | |
1322 | ||
1323 | ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size); | |
1324 | if (ret < 0) { | |
1325 | fprintf(stderr, "ERROR: Could not write L2 table: %s\n", | |
1326 | strerror(-ret)); | |
1327 | res->check_errors++; | |
1328 | goto fail; | |
1329 | } | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | ret = 0; | |
1334 | ||
1335 | fail: | |
1336 | qemu_vfree(l2_table); | |
1337 | return ret; | |
1338 | } | |
1339 | ||
1340 | /* | |
1341 | * Writes one sector of the refcount table to the disk | |
1342 | */ | |
1343 | #define RT_ENTRIES_PER_SECTOR (512 / sizeof(uint64_t)) | |
1344 | static int write_reftable_entry(BlockDriverState *bs, int rt_index) | |
1345 | { | |
1346 | BDRVQcowState *s = bs->opaque; | |
1347 | uint64_t buf[RT_ENTRIES_PER_SECTOR]; | |
1348 | int rt_start_index; | |
1349 | int i, ret; | |
1350 | ||
1351 | rt_start_index = rt_index & ~(RT_ENTRIES_PER_SECTOR - 1); | |
1352 | for (i = 0; i < RT_ENTRIES_PER_SECTOR; i++) { | |
1353 | buf[i] = cpu_to_be64(s->refcount_table[rt_start_index + i]); | |
1354 | } | |
1355 | ||
1356 | ret = qcow2_pre_write_overlap_check(bs, | |
1357 | QCOW2_OL_DEFAULT & ~QCOW2_OL_REFCOUNT_TABLE, | |
1358 | s->refcount_table_offset + rt_start_index * sizeof(uint64_t), | |
1359 | sizeof(buf)); | |
1360 | if (ret < 0) { | |
1361 | return ret; | |
1362 | } | |
1363 | ||
1364 | BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); | |
1365 | ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + | |
1366 | rt_start_index * sizeof(uint64_t), buf, sizeof(buf)); | |
1367 | if (ret < 0) { | |
1368 | return ret; | |
1369 | } | |
1370 | ||
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | /* | |
1375 | * Allocates a new cluster for the given refcount block (represented by its | |
1376 | * offset in the image file) and copies the current content there. This function | |
1377 | * does _not_ decrement the reference count for the currently occupied cluster. | |
1378 | * | |
1379 | * This function prints an informative message to stderr on error (and returns | |
1380 | * -errno); on success, 0 is returned. | |
1381 | */ | |
1382 | static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index, | |
1383 | uint64_t offset) | |
1384 | { | |
1385 | BDRVQcowState *s = bs->opaque; | |
1386 | int64_t new_offset = 0; | |
1387 | void *refcount_block = NULL; | |
1388 | int ret; | |
1389 | ||
1390 | /* allocate new refcount block */ | |
1391 | new_offset = qcow2_alloc_clusters(bs, s->cluster_size); | |
1392 | if (new_offset < 0) { | |
1393 | fprintf(stderr, "Could not allocate new cluster: %s\n", | |
1394 | strerror(-new_offset)); | |
1395 | ret = new_offset; | |
1396 | goto fail; | |
1397 | } | |
1398 | ||
1399 | /* fetch current refcount block content */ | |
1400 | ret = qcow2_cache_get(bs, s->refcount_block_cache, offset, &refcount_block); | |
1401 | if (ret < 0) { | |
1402 | fprintf(stderr, "Could not fetch refcount block: %s\n", strerror(-ret)); | |
1403 | goto fail; | |
1404 | } | |
1405 | ||
1406 | /* new block has not yet been entered into refcount table, therefore it is | |
1407 | * no refcount block yet (regarding this check) */ | |
1408 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, new_offset, | |
1409 | s->cluster_size); | |
1410 | if (ret < 0) { | |
1411 | fprintf(stderr, "Could not write refcount block; metadata overlap " | |
1412 | "check failed: %s\n", strerror(-ret)); | |
1413 | /* the image will be marked corrupt, so don't even attempt on freeing | |
1414 | * the cluster */ | |
1415 | new_offset = 0; | |
1416 | goto fail; | |
1417 | } | |
1418 | ||
1419 | /* write to new block */ | |
1420 | ret = bdrv_write(bs->file, new_offset / BDRV_SECTOR_SIZE, refcount_block, | |
1421 | s->cluster_sectors); | |
1422 | if (ret < 0) { | |
1423 | fprintf(stderr, "Could not write refcount block: %s\n", strerror(-ret)); | |
1424 | goto fail; | |
1425 | } | |
1426 | ||
1427 | /* update refcount table */ | |
1428 | assert(!(new_offset & (s->cluster_size - 1))); | |
1429 | s->refcount_table[reftable_index] = new_offset; | |
1430 | ret = write_reftable_entry(bs, reftable_index); | |
1431 | if (ret < 0) { | |
1432 | fprintf(stderr, "Could not update refcount table: %s\n", | |
1433 | strerror(-ret)); | |
1434 | goto fail; | |
1435 | } | |
1436 | ||
1437 | fail: | |
1438 | if (new_offset && (ret < 0)) { | |
1439 | qcow2_free_clusters(bs, new_offset, s->cluster_size, | |
1440 | QCOW2_DISCARD_ALWAYS); | |
1441 | } | |
1442 | if (refcount_block) { | |
1443 | if (ret < 0) { | |
1444 | qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); | |
1445 | } else { | |
1446 | ret = qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); | |
1447 | } | |
1448 | } | |
1449 | if (ret < 0) { | |
1450 | return ret; | |
1451 | } | |
1452 | return new_offset; | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * Checks an image for refcount consistency. | |
1457 | * | |
1458 | * Returns 0 if no errors are found, the number of errors in case the image is | |
1459 | * detected as corrupted, and -errno when an internal error occurred. | |
1460 | */ | |
1461 | int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | |
1462 | BdrvCheckMode fix) | |
1463 | { | |
1464 | BDRVQcowState *s = bs->opaque; | |
1465 | int64_t size, i, highest_cluster; | |
1466 | int nb_clusters, refcount1, refcount2; | |
1467 | QCowSnapshot *sn; | |
1468 | uint16_t *refcount_table; | |
1469 | int ret; | |
1470 | ||
1471 | size = bdrv_getlength(bs->file); | |
1472 | nb_clusters = size_to_clusters(s, size); | |
1473 | refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t)); | |
1474 | ||
1475 | res->bfi.total_clusters = | |
1476 | size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); | |
1477 | ||
1478 | /* header */ | |
1479 | inc_refcounts(bs, res, refcount_table, nb_clusters, | |
1480 | 0, s->cluster_size); | |
1481 | ||
1482 | /* current L1 table */ | |
1483 | ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, | |
1484 | s->l1_table_offset, s->l1_size, | |
1485 | CHECK_OFLAG_COPIED | CHECK_FRAG_INFO); | |
1486 | if (ret < 0) { | |
1487 | goto fail; | |
1488 | } | |
1489 | ||
1490 | /* snapshots */ | |
1491 | for(i = 0; i < s->nb_snapshots; i++) { | |
1492 | sn = s->snapshots + i; | |
1493 | ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, | |
1494 | sn->l1_table_offset, sn->l1_size, 0); | |
1495 | if (ret < 0) { | |
1496 | goto fail; | |
1497 | } | |
1498 | } | |
1499 | inc_refcounts(bs, res, refcount_table, nb_clusters, | |
1500 | s->snapshots_offset, s->snapshots_size); | |
1501 | ||
1502 | /* refcount data */ | |
1503 | inc_refcounts(bs, res, refcount_table, nb_clusters, | |
1504 | s->refcount_table_offset, | |
1505 | s->refcount_table_size * sizeof(uint64_t)); | |
1506 | ||
1507 | for(i = 0; i < s->refcount_table_size; i++) { | |
1508 | uint64_t offset, cluster; | |
1509 | offset = s->refcount_table[i]; | |
1510 | cluster = offset >> s->cluster_bits; | |
1511 | ||
1512 | /* Refcount blocks are cluster aligned */ | |
1513 | if (offset & (s->cluster_size - 1)) { | |
1514 | fprintf(stderr, "ERROR refcount block %" PRId64 " is not " | |
1515 | "cluster aligned; refcount table entry corrupted\n", i); | |
1516 | res->corruptions++; | |
1517 | continue; | |
1518 | } | |
1519 | ||
1520 | if (cluster >= nb_clusters) { | |
1521 | fprintf(stderr, "ERROR refcount block %" PRId64 | |
1522 | " is outside image\n", i); | |
1523 | res->corruptions++; | |
1524 | continue; | |
1525 | } | |
1526 | ||
1527 | if (offset != 0) { | |
1528 | inc_refcounts(bs, res, refcount_table, nb_clusters, | |
1529 | offset, s->cluster_size); | |
1530 | if (refcount_table[cluster] != 1) { | |
1531 | fprintf(stderr, "%s refcount block %" PRId64 | |
1532 | " refcount=%d\n", | |
1533 | fix & BDRV_FIX_ERRORS ? "Repairing" : | |
1534 | "ERROR", | |
1535 | i, refcount_table[cluster]); | |
1536 | ||
1537 | if (fix & BDRV_FIX_ERRORS) { | |
1538 | int64_t new_offset; | |
1539 | ||
1540 | new_offset = realloc_refcount_block(bs, i, offset); | |
1541 | if (new_offset < 0) { | |
1542 | res->corruptions++; | |
1543 | continue; | |
1544 | } | |
1545 | ||
1546 | /* update refcounts */ | |
1547 | if ((new_offset >> s->cluster_bits) >= nb_clusters) { | |
1548 | /* increase refcount_table size if necessary */ | |
1549 | int old_nb_clusters = nb_clusters; | |
1550 | nb_clusters = (new_offset >> s->cluster_bits) + 1; | |
1551 | refcount_table = g_realloc(refcount_table, | |
1552 | nb_clusters * sizeof(uint16_t)); | |
1553 | memset(&refcount_table[old_nb_clusters], 0, (nb_clusters | |
1554 | - old_nb_clusters) * sizeof(uint16_t)); | |
1555 | } | |
1556 | refcount_table[cluster]--; | |
1557 | inc_refcounts(bs, res, refcount_table, nb_clusters, | |
1558 | new_offset, s->cluster_size); | |
1559 | ||
1560 | res->corruptions_fixed++; | |
1561 | } else { | |
1562 | res->corruptions++; | |
1563 | } | |
1564 | } | |
1565 | } | |
1566 | } | |
1567 | ||
1568 | /* compare ref counts */ | |
1569 | for (i = 0, highest_cluster = 0; i < nb_clusters; i++) { | |
1570 | refcount1 = get_refcount(bs, i); | |
1571 | if (refcount1 < 0) { | |
1572 | fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", | |
1573 | i, strerror(-refcount1)); | |
1574 | res->check_errors++; | |
1575 | continue; | |
1576 | } | |
1577 | ||
1578 | refcount2 = refcount_table[i]; | |
1579 | ||
1580 | if (refcount1 > 0 || refcount2 > 0) { | |
1581 | highest_cluster = i; | |
1582 | } | |
1583 | ||
1584 | if (refcount1 != refcount2) { | |
1585 | ||
1586 | /* Check if we're allowed to fix the mismatch */ | |
1587 | int *num_fixed = NULL; | |
1588 | if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { | |
1589 | num_fixed = &res->leaks_fixed; | |
1590 | } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { | |
1591 | num_fixed = &res->corruptions_fixed; | |
1592 | } | |
1593 | ||
1594 | fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n", | |
1595 | num_fixed != NULL ? "Repairing" : | |
1596 | refcount1 < refcount2 ? "ERROR" : | |
1597 | "Leaked", | |
1598 | i, refcount1, refcount2); | |
1599 | ||
1600 | if (num_fixed) { | |
1601 | ret = update_refcount(bs, i << s->cluster_bits, 1, | |
1602 | refcount2 - refcount1, | |
1603 | QCOW2_DISCARD_ALWAYS); | |
1604 | if (ret >= 0) { | |
1605 | (*num_fixed)++; | |
1606 | continue; | |
1607 | } | |
1608 | } | |
1609 | ||
1610 | /* And if we couldn't, print an error */ | |
1611 | if (refcount1 < refcount2) { | |
1612 | res->corruptions++; | |
1613 | } else { | |
1614 | res->leaks++; | |
1615 | } | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | /* check OFLAG_COPIED */ | |
1620 | ret = check_oflag_copied(bs, res, fix); | |
1621 | if (ret < 0) { | |
1622 | goto fail; | |
1623 | } | |
1624 | ||
1625 | res->image_end_offset = (highest_cluster + 1) * s->cluster_size; | |
1626 | ret = 0; | |
1627 | ||
1628 | fail: | |
1629 | g_free(refcount_table); | |
1630 | ||
1631 | return ret; | |
1632 | } | |
1633 | ||
1634 | #define overlaps_with(ofs, sz) \ | |
1635 | ranges_overlap(offset, size, ofs, sz) | |
1636 | ||
1637 | /* | |
1638 | * Checks if the given offset into the image file is actually free to use by | |
1639 | * looking for overlaps with important metadata sections (L1/L2 tables etc.), | |
1640 | * i.e. a sanity check without relying on the refcount tables. | |
1641 | * | |
1642 | * The chk parameter specifies exactly what checks to perform (being a bitmask | |
1643 | * of QCow2MetadataOverlap values). | |
1644 | * | |
1645 | * Returns: | |
1646 | * - 0 if writing to this offset will not affect the mentioned metadata | |
1647 | * - a positive QCow2MetadataOverlap value indicating one overlapping section | |
1648 | * - a negative value (-errno) indicating an error while performing a check, | |
1649 | * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2 | |
1650 | */ | |
1651 | int qcow2_check_metadata_overlap(BlockDriverState *bs, int chk, int64_t offset, | |
1652 | int64_t size) | |
1653 | { | |
1654 | BDRVQcowState *s = bs->opaque; | |
1655 | int i, j; | |
1656 | ||
1657 | if (!size) { | |
1658 | return 0; | |
1659 | } | |
1660 | ||
1661 | if (chk & QCOW2_OL_MAIN_HEADER) { | |
1662 | if (offset < s->cluster_size) { | |
1663 | return QCOW2_OL_MAIN_HEADER; | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | /* align range to test to cluster boundaries */ | |
1668 | size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); | |
1669 | offset = start_of_cluster(s, offset); | |
1670 | ||
1671 | if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { | |
1672 | if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { | |
1673 | return QCOW2_OL_ACTIVE_L1; | |
1674 | } | |
1675 | } | |
1676 | ||
1677 | if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { | |
1678 | if (overlaps_with(s->refcount_table_offset, | |
1679 | s->refcount_table_size * sizeof(uint64_t))) { | |
1680 | return QCOW2_OL_REFCOUNT_TABLE; | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { | |
1685 | if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { | |
1686 | return QCOW2_OL_SNAPSHOT_TABLE; | |
1687 | } | |
1688 | } | |
1689 | ||
1690 | if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { | |
1691 | for (i = 0; i < s->nb_snapshots; i++) { | |
1692 | if (s->snapshots[i].l1_size && | |
1693 | overlaps_with(s->snapshots[i].l1_table_offset, | |
1694 | s->snapshots[i].l1_size * sizeof(uint64_t))) { | |
1695 | return QCOW2_OL_INACTIVE_L1; | |
1696 | } | |
1697 | } | |
1698 | } | |
1699 | ||
1700 | if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { | |
1701 | for (i = 0; i < s->l1_size; i++) { | |
1702 | if ((s->l1_table[i] & L1E_OFFSET_MASK) && | |
1703 | overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, | |
1704 | s->cluster_size)) { | |
1705 | return QCOW2_OL_ACTIVE_L2; | |
1706 | } | |
1707 | } | |
1708 | } | |
1709 | ||
1710 | if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { | |
1711 | for (i = 0; i < s->refcount_table_size; i++) { | |
1712 | if ((s->refcount_table[i] & REFT_OFFSET_MASK) && | |
1713 | overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, | |
1714 | s->cluster_size)) { | |
1715 | return QCOW2_OL_REFCOUNT_BLOCK; | |
1716 | } | |
1717 | } | |
1718 | } | |
1719 | ||
1720 | if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { | |
1721 | for (i = 0; i < s->nb_snapshots; i++) { | |
1722 | uint64_t l1_ofs = s->snapshots[i].l1_table_offset; | |
1723 | uint32_t l1_sz = s->snapshots[i].l1_size; | |
1724 | uint64_t *l1 = g_malloc(l1_sz * sizeof(uint64_t)); | |
1725 | int ret; | |
1726 | ||
1727 | ret = bdrv_read(bs->file, l1_ofs / BDRV_SECTOR_SIZE, (uint8_t *)l1, | |
1728 | l1_sz * sizeof(uint64_t) / BDRV_SECTOR_SIZE); | |
1729 | ||
1730 | if (ret < 0) { | |
1731 | g_free(l1); | |
1732 | return ret; | |
1733 | } | |
1734 | ||
1735 | for (j = 0; j < l1_sz; j++) { | |
1736 | if ((l1[j] & L1E_OFFSET_MASK) && | |
1737 | overlaps_with(l1[j] & L1E_OFFSET_MASK, s->cluster_size)) { | |
1738 | g_free(l1); | |
1739 | return QCOW2_OL_INACTIVE_L2; | |
1740 | } | |
1741 | } | |
1742 | ||
1743 | g_free(l1); | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | return 0; | |
1748 | } | |
1749 | ||
1750 | static const char *metadata_ol_names[] = { | |
1751 | [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header", | |
1752 | [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table", | |
1753 | [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table", | |
1754 | [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table", | |
1755 | [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block", | |
1756 | [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table", | |
1757 | [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table", | |
1758 | [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table", | |
1759 | }; | |
1760 | ||
1761 | /* | |
1762 | * First performs a check for metadata overlaps (through | |
1763 | * qcow2_check_metadata_overlap); if that fails with a negative value (error | |
1764 | * while performing a check), that value is returned. If an impending overlap | |
1765 | * is detected, the BDS will be made unusable, the qcow2 file marked corrupt | |
1766 | * and -EIO returned. | |
1767 | * | |
1768 | * Returns 0 if there were neither overlaps nor errors while checking for | |
1769 | * overlaps; or a negative value (-errno) on error. | |
1770 | */ | |
1771 | int qcow2_pre_write_overlap_check(BlockDriverState *bs, int chk, int64_t offset, | |
1772 | int64_t size) | |
1773 | { | |
1774 | int ret = qcow2_check_metadata_overlap(bs, chk, offset, size); | |
1775 | ||
1776 | if (ret < 0) { | |
1777 | return ret; | |
1778 | } else if (ret > 0) { | |
1779 | int metadata_ol_bitnr = ffs(ret) - 1; | |
1780 | char *message; | |
1781 | QObject *data; | |
1782 | ||
1783 | assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); | |
1784 | ||
1785 | fprintf(stderr, "qcow2: Preventing invalid write on metadata (overlaps " | |
1786 | "with %s); image marked as corrupt.\n", | |
1787 | metadata_ol_names[metadata_ol_bitnr]); | |
1788 | message = g_strdup_printf("Prevented %s overwrite", | |
1789 | metadata_ol_names[metadata_ol_bitnr]); | |
1790 | data = qobject_from_jsonf("{ 'device': %s, 'msg': %s, 'offset': %" | |
1791 | PRId64 ", 'size': %" PRId64 " }", bs->device_name, message, | |
1792 | offset, size); | |
1793 | monitor_protocol_event(QEVENT_BLOCK_IMAGE_CORRUPTED, data); | |
1794 | g_free(message); | |
1795 | qobject_decref(data); | |
1796 | ||
1797 | qcow2_mark_corrupt(bs); | |
1798 | bs->drv = NULL; /* make BDS unusable */ | |
1799 | return -EIO; | |
1800 | } | |
1801 | ||
1802 | return 0; | |
1803 | } |