]>
Commit | Line | Data |
---|---|---|
45aba42f KW |
1 | /* |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
80c71a24 | 25 | #include "qemu/osdep.h" |
45aba42f KW |
26 | #include <zlib.h> |
27 | ||
c9a442e4 | 28 | #include "qapi/error.h" |
0d8c41da | 29 | #include "qcow2.h" |
58369e22 | 30 | #include "qemu/bswap.h" |
3cce16f4 | 31 | #include "trace.h" |
45aba42f | 32 | |
46b732cd PB |
33 | int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) |
34 | { | |
35 | BDRVQcow2State *s = bs->opaque; | |
36 | int new_l1_size, i, ret; | |
37 | ||
38 | if (exact_size >= s->l1_size) { | |
39 | return 0; | |
40 | } | |
41 | ||
42 | new_l1_size = exact_size; | |
43 | ||
44 | #ifdef DEBUG_ALLOC2 | |
45 | fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); | |
46 | #endif | |
47 | ||
48 | BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); | |
49 | ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + | |
50 | new_l1_size * sizeof(uint64_t), | |
51 | (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); | |
52 | if (ret < 0) { | |
53 | goto fail; | |
54 | } | |
55 | ||
56 | ret = bdrv_flush(bs->file->bs); | |
57 | if (ret < 0) { | |
58 | goto fail; | |
59 | } | |
60 | ||
61 | BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); | |
62 | for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { | |
63 | if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { | |
64 | continue; | |
65 | } | |
66 | qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, | |
67 | s->cluster_size, QCOW2_DISCARD_ALWAYS); | |
68 | s->l1_table[i] = 0; | |
69 | } | |
70 | return 0; | |
71 | ||
72 | fail: | |
73 | /* | |
74 | * If the write in the l1_table failed the image may contain a partially | |
75 | * overwritten l1_table. In this case it would be better to clear the | |
76 | * l1_table in memory to avoid possible image corruption. | |
77 | */ | |
78 | memset(s->l1_table + new_l1_size, 0, | |
79 | (s->l1_size - new_l1_size) * sizeof(uint64_t)); | |
80 | return ret; | |
81 | } | |
82 | ||
2cf7cfa1 KW |
83 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, |
84 | bool exact_size) | |
45aba42f | 85 | { |
ff99129a | 86 | BDRVQcow2State *s = bs->opaque; |
2cf7cfa1 | 87 | int new_l1_size2, ret, i; |
45aba42f | 88 | uint64_t *new_l1_table; |
fda74f82 | 89 | int64_t old_l1_table_offset, old_l1_size; |
2cf7cfa1 | 90 | int64_t new_l1_table_offset, new_l1_size; |
45aba42f KW |
91 | uint8_t data[12]; |
92 | ||
72893756 | 93 | if (min_size <= s->l1_size) |
45aba42f | 94 | return 0; |
72893756 | 95 | |
b93f9950 HR |
96 | /* Do a sanity check on min_size before trying to calculate new_l1_size |
97 | * (this prevents overflows during the while loop for the calculation of | |
98 | * new_l1_size) */ | |
99 | if (min_size > INT_MAX / sizeof(uint64_t)) { | |
100 | return -EFBIG; | |
101 | } | |
102 | ||
72893756 SH |
103 | if (exact_size) { |
104 | new_l1_size = min_size; | |
105 | } else { | |
106 | /* Bump size up to reduce the number of times we have to grow */ | |
107 | new_l1_size = s->l1_size; | |
108 | if (new_l1_size == 0) { | |
109 | new_l1_size = 1; | |
110 | } | |
111 | while (min_size > new_l1_size) { | |
21cf3e12 | 112 | new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); |
72893756 | 113 | } |
45aba42f | 114 | } |
72893756 | 115 | |
84c26520 HR |
116 | QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); |
117 | if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { | |
2cf7cfa1 KW |
118 | return -EFBIG; |
119 | } | |
120 | ||
45aba42f | 121 | #ifdef DEBUG_ALLOC2 |
2cf7cfa1 KW |
122 | fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", |
123 | s->l1_size, new_l1_size); | |
45aba42f KW |
124 | #endif |
125 | ||
126 | new_l1_size2 = sizeof(uint64_t) * new_l1_size; | |
ef97d608 | 127 | new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2); |
de82815d KW |
128 | if (new_l1_table == NULL) { |
129 | return -ENOMEM; | |
130 | } | |
ef97d608 | 131 | memset(new_l1_table, 0, new_l1_size2); |
de82815d | 132 | |
0647d47c SH |
133 | if (s->l1_size) { |
134 | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); | |
135 | } | |
45aba42f KW |
136 | |
137 | /* write new table (align to cluster) */ | |
66f82cee | 138 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
ed6ccf0f | 139 | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
5d757b56 | 140 | if (new_l1_table_offset < 0) { |
de82815d | 141 | qemu_vfree(new_l1_table); |
5d757b56 KW |
142 | return new_l1_table_offset; |
143 | } | |
29c1a730 KW |
144 | |
145 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
146 | if (ret < 0) { | |
80fa3341 | 147 | goto fail; |
29c1a730 | 148 | } |
45aba42f | 149 | |
cf93980e HR |
150 | /* the L1 position has not yet been updated, so these clusters must |
151 | * indeed be completely free */ | |
231bb267 | 152 | ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, |
966b000f | 153 | new_l1_size2, false); |
cf93980e HR |
154 | if (ret < 0) { |
155 | goto fail; | |
156 | } | |
157 | ||
66f82cee | 158 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
45aba42f KW |
159 | for(i = 0; i < s->l1_size; i++) |
160 | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); | |
d9ca2ea2 | 161 | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, |
9a4f4c31 | 162 | new_l1_table, new_l1_size2); |
8b3b7206 | 163 | if (ret < 0) |
45aba42f KW |
164 | goto fail; |
165 | for(i = 0; i < s->l1_size; i++) | |
166 | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); | |
167 | ||
168 | /* set new table */ | |
66f82cee | 169 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
f1f7a1dd | 170 | stl_be_p(data, new_l1_size); |
e4ef9f46 | 171 | stq_be_p(data + 4, new_l1_table_offset); |
d9ca2ea2 | 172 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), |
9a4f4c31 | 173 | data, sizeof(data)); |
8b3b7206 | 174 | if (ret < 0) { |
45aba42f | 175 | goto fail; |
fb8fa77c | 176 | } |
de82815d | 177 | qemu_vfree(s->l1_table); |
fda74f82 | 178 | old_l1_table_offset = s->l1_table_offset; |
45aba42f KW |
179 | s->l1_table_offset = new_l1_table_offset; |
180 | s->l1_table = new_l1_table; | |
fda74f82 | 181 | old_l1_size = s->l1_size; |
45aba42f | 182 | s->l1_size = new_l1_size; |
fda74f82 HR |
183 | qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), |
184 | QCOW2_DISCARD_OTHER); | |
45aba42f KW |
185 | return 0; |
186 | fail: | |
de82815d | 187 | qemu_vfree(new_l1_table); |
6cfcb9b8 KW |
188 | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, |
189 | QCOW2_DISCARD_OTHER); | |
8b3b7206 | 190 | return ret; |
45aba42f KW |
191 | } |
192 | ||
45aba42f KW |
193 | /* |
194 | * l2_load | |
195 | * | |
e2b5713e AG |
196 | * @bs: The BlockDriverState |
197 | * @offset: A guest offset, used to calculate what slice of the L2 | |
198 | * table to load. | |
199 | * @l2_offset: Offset to the L2 table in the image file. | |
200 | * @l2_slice: Location to store the pointer to the L2 slice. | |
45aba42f | 201 | * |
e2b5713e AG |
202 | * Loads a L2 slice into memory (L2 slices are the parts of L2 tables |
203 | * that are loaded by the qcow2 cache). If the slice is in the cache, | |
204 | * the cache is used; otherwise the L2 slice is loaded from the image | |
205 | * file. | |
45aba42f | 206 | */ |
e2b5713e AG |
207 | static int l2_load(BlockDriverState *bs, uint64_t offset, |
208 | uint64_t l2_offset, uint64_t **l2_slice) | |
45aba42f | 209 | { |
ff99129a | 210 | BDRVQcow2State *s = bs->opaque; |
c8fd8554 | 211 | int start_of_slice = l2_entry_size(s) * |
e2b5713e | 212 | (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); |
45aba42f | 213 | |
e2b5713e AG |
214 | return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, |
215 | (void **)l2_slice); | |
45aba42f KW |
216 | } |
217 | ||
6583e3c7 | 218 | /* |
da86f8cb AG |
219 | * Writes an L1 entry to disk (note that depending on the alignment |
220 | * requirements this function may write more that just one entry in | |
221 | * order to prevent bdrv_pwrite from performing a read-modify-write) | |
6583e3c7 | 222 | */ |
e23e400e | 223 | int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) |
6583e3c7 | 224 | { |
ff99129a | 225 | BDRVQcow2State *s = bs->opaque; |
6583e3c7 | 226 | int l1_start_index; |
f7defcb6 | 227 | int i, ret; |
da86f8cb AG |
228 | int bufsize = MAX(sizeof(uint64_t), |
229 | MIN(bs->file->bs->bl.request_alignment, s->cluster_size)); | |
230 | int nentries = bufsize / sizeof(uint64_t); | |
231 | g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries); | |
6583e3c7 | 232 | |
da86f8cb AG |
233 | if (buf == NULL) { |
234 | return -ENOMEM; | |
235 | } | |
236 | ||
237 | l1_start_index = QEMU_ALIGN_DOWN(l1_index, nentries); | |
238 | for (i = 0; i < MIN(nentries, s->l1_size - l1_start_index); i++) { | |
6583e3c7 KW |
239 | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); |
240 | } | |
241 | ||
231bb267 | 242 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, |
da86f8cb | 243 | s->l1_table_offset + 8 * l1_start_index, bufsize, false); |
cf93980e HR |
244 | if (ret < 0) { |
245 | return ret; | |
246 | } | |
247 | ||
66f82cee | 248 | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
d9ca2ea2 | 249 | ret = bdrv_pwrite_sync(bs->file, |
9a4f4c31 | 250 | s->l1_table_offset + 8 * l1_start_index, |
da86f8cb | 251 | buf, bufsize); |
f7defcb6 KW |
252 | if (ret < 0) { |
253 | return ret; | |
6583e3c7 KW |
254 | } |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
45aba42f KW |
259 | /* |
260 | * l2_allocate | |
261 | * | |
262 | * Allocate a new l2 entry in the file. If l1_index points to an already | |
263 | * used entry in the L2 table (i.e. we are doing a copy on write for the L2 | |
264 | * table) copy the contents of the old L2 table into the newly allocated one. | |
265 | * Otherwise the new table is initialized with zeros. | |
266 | * | |
267 | */ | |
268 | ||
3861946a | 269 | static int l2_allocate(BlockDriverState *bs, int l1_index) |
45aba42f | 270 | { |
ff99129a | 271 | BDRVQcow2State *s = bs->opaque; |
6583e3c7 | 272 | uint64_t old_l2_offset; |
3861946a AG |
273 | uint64_t *l2_slice = NULL; |
274 | unsigned slice, slice_size2, n_slices; | |
f4f0d391 | 275 | int64_t l2_offset; |
c46e1167 | 276 | int ret; |
45aba42f KW |
277 | |
278 | old_l2_offset = s->l1_table[l1_index]; | |
279 | ||
3cce16f4 KW |
280 | trace_qcow2_l2_allocate(bs, l1_index); |
281 | ||
45aba42f KW |
282 | /* allocate a new l2 entry */ |
283 | ||
c8fd8554 | 284 | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s)); |
5d757b56 | 285 | if (l2_offset < 0) { |
be0b742e HR |
286 | ret = l2_offset; |
287 | goto fail; | |
5d757b56 | 288 | } |
29c1a730 | 289 | |
c1c43990 AG |
290 | /* The offset must fit in the offset field of the L1 table entry */ |
291 | assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); | |
292 | ||
98839750 AG |
293 | /* If we're allocating the table at offset 0 then something is wrong */ |
294 | if (l2_offset == 0) { | |
295 | qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " | |
296 | "allocation of L2 table at offset 0"); | |
297 | ret = -EIO; | |
298 | goto fail; | |
299 | } | |
300 | ||
29c1a730 KW |
301 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); |
302 | if (ret < 0) { | |
303 | goto fail; | |
304 | } | |
45aba42f | 305 | |
45aba42f KW |
306 | /* allocate a new entry in the l2 cache */ |
307 | ||
c8fd8554 | 308 | slice_size2 = s->l2_slice_size * l2_entry_size(s); |
3861946a AG |
309 | n_slices = s->cluster_size / slice_size2; |
310 | ||
3cce16f4 | 311 | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
3861946a | 312 | for (slice = 0; slice < n_slices; slice++) { |
6580bb09 | 313 | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, |
3861946a AG |
314 | l2_offset + slice * slice_size2, |
315 | (void **) &l2_slice); | |
6580bb09 AG |
316 | if (ret < 0) { |
317 | goto fail; | |
318 | } | |
29c1a730 | 319 | |
6580bb09 | 320 | if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
3861946a AG |
321 | /* if there was no old l2 table, clear the new slice */ |
322 | memset(l2_slice, 0, slice_size2); | |
6580bb09 | 323 | } else { |
3861946a AG |
324 | uint64_t *old_slice; |
325 | uint64_t old_l2_slice_offset = | |
326 | (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; | |
29c1a730 | 327 | |
3861946a | 328 | /* if there was an old l2 table, read a slice from the disk */ |
6580bb09 | 329 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
3861946a AG |
330 | ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, |
331 | (void **) &old_slice); | |
6580bb09 AG |
332 | if (ret < 0) { |
333 | goto fail; | |
334 | } | |
335 | ||
3861946a | 336 | memcpy(l2_slice, old_slice, slice_size2); |
6580bb09 | 337 | |
3861946a | 338 | qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); |
29c1a730 KW |
339 | } |
340 | ||
3861946a | 341 | /* write the l2 slice to the file */ |
6580bb09 | 342 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
29c1a730 | 343 | |
6580bb09 | 344 | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
3861946a AG |
345 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
346 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); | |
45aba42f | 347 | } |
29c1a730 | 348 | |
29c1a730 | 349 | ret = qcow2_cache_flush(bs, s->l2_table_cache); |
c46e1167 | 350 | if (ret < 0) { |
175e1152 KW |
351 | goto fail; |
352 | } | |
353 | ||
354 | /* update the L1 entry */ | |
3cce16f4 | 355 | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
175e1152 | 356 | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
e23e400e | 357 | ret = qcow2_write_l1_entry(bs, l1_index); |
175e1152 KW |
358 | if (ret < 0) { |
359 | goto fail; | |
c46e1167 | 360 | } |
45aba42f | 361 | |
3cce16f4 | 362 | trace_qcow2_l2_allocate_done(bs, l1_index, 0); |
c46e1167 | 363 | return 0; |
175e1152 KW |
364 | |
365 | fail: | |
3cce16f4 | 366 | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
3861946a AG |
367 | if (l2_slice != NULL) { |
368 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); | |
8585afd8 | 369 | } |
68dba0bf | 370 | s->l1_table[l1_index] = old_l2_offset; |
e3b21ef9 | 371 | if (l2_offset > 0) { |
c8fd8554 | 372 | qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), |
e3b21ef9 HR |
373 | QCOW2_DISCARD_ALWAYS); |
374 | } | |
175e1152 | 375 | return ret; |
45aba42f KW |
376 | } |
377 | ||
70d1cbae AG |
378 | /* |
379 | * For a given L2 entry, count the number of contiguous subclusters of | |
380 | * the same type starting from @sc_from. Compressed clusters are | |
381 | * treated as if they were divided into subclusters of size | |
382 | * s->subcluster_size. | |
383 | * | |
384 | * Return the number of contiguous subclusters and set @type to the | |
385 | * subcluster type. | |
386 | * | |
387 | * If the L2 entry is invalid return -errno and set @type to | |
388 | * QCOW2_SUBCLUSTER_INVALID. | |
389 | */ | |
70d1cbae AG |
390 | static int qcow2_get_subcluster_range_type(BlockDriverState *bs, |
391 | uint64_t l2_entry, | |
392 | uint64_t l2_bitmap, | |
393 | unsigned sc_from, | |
394 | QCow2SubclusterType *type) | |
395 | { | |
396 | BDRVQcow2State *s = bs->opaque; | |
397 | uint32_t val; | |
398 | ||
399 | *type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_from); | |
400 | ||
401 | if (*type == QCOW2_SUBCLUSTER_INVALID) { | |
402 | return -EINVAL; | |
403 | } else if (!has_subclusters(s) || *type == QCOW2_SUBCLUSTER_COMPRESSED) { | |
404 | return s->subclusters_per_cluster - sc_from; | |
405 | } | |
406 | ||
407 | switch (*type) { | |
408 | case QCOW2_SUBCLUSTER_NORMAL: | |
409 | val = l2_bitmap | QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); | |
410 | return cto32(val) - sc_from; | |
411 | ||
412 | case QCOW2_SUBCLUSTER_ZERO_PLAIN: | |
413 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: | |
414 | val = (l2_bitmap | QCOW_OFLAG_SUB_ZERO_RANGE(0, sc_from)) >> 32; | |
415 | return cto32(val) - sc_from; | |
416 | ||
417 | case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: | |
418 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | |
419 | val = ((l2_bitmap >> 32) | l2_bitmap) | |
420 | & ~QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); | |
421 | return ctz32(val) - sc_from; | |
422 | ||
423 | default: | |
424 | g_assert_not_reached(); | |
425 | } | |
426 | } | |
427 | ||
2bfcc4a0 | 428 | /* |
3f9c6b3b AG |
429 | * Return the number of contiguous subclusters of the exact same type |
430 | * in a given L2 slice, starting from cluster @l2_index, subcluster | |
431 | * @sc_index. Allocated subclusters are required to be contiguous in | |
432 | * the image file. | |
433 | * At most @nb_clusters are checked (note that this means clusters, | |
434 | * not subclusters). | |
435 | * Compressed clusters are always processed one by one but for the | |
436 | * purpose of this count they are treated as if they were divided into | |
437 | * subclusters of size s->subcluster_size. | |
438 | * On failure return -errno and update @l2_index to point to the | |
439 | * invalid entry. | |
2bfcc4a0 | 440 | */ |
3f9c6b3b AG |
441 | static int count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters, |
442 | unsigned sc_index, uint64_t *l2_slice, | |
443 | unsigned *l2_index) | |
45aba42f | 444 | { |
12c6aebe | 445 | BDRVQcow2State *s = bs->opaque; |
3f9c6b3b AG |
446 | int i, count = 0; |
447 | bool check_offset = false; | |
448 | uint64_t expected_offset = 0; | |
449 | QCow2SubclusterType expected_type = QCOW2_SUBCLUSTER_NORMAL, type; | |
45aba42f | 450 | |
3f9c6b3b | 451 | assert(*l2_index + nb_clusters <= s->l2_slice_size); |
15684a47 | 452 | |
61653008 | 453 | for (i = 0; i < nb_clusters; i++) { |
3f9c6b3b AG |
454 | unsigned first_sc = (i == 0) ? sc_index : 0; |
455 | uint64_t l2_entry = get_l2_entry(s, l2_slice, *l2_index + i); | |
456 | uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, *l2_index + i); | |
457 | int ret = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, | |
458 | first_sc, &type); | |
459 | if (ret < 0) { | |
460 | *l2_index += i; /* Point to the invalid entry */ | |
461 | return -EIO; | |
462 | } | |
463 | if (i == 0) { | |
464 | if (type == QCOW2_SUBCLUSTER_COMPRESSED) { | |
465 | /* Compressed clusters are always processed one by one */ | |
466 | return ret; | |
467 | } | |
468 | expected_type = type; | |
469 | expected_offset = l2_entry & L2E_OFFSET_MASK; | |
470 | check_offset = (type == QCOW2_SUBCLUSTER_NORMAL || | |
471 | type == QCOW2_SUBCLUSTER_ZERO_ALLOC || | |
472 | type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC); | |
473 | } else if (type != expected_type) { | |
45aba42f | 474 | break; |
3f9c6b3b AG |
475 | } else if (check_offset) { |
476 | expected_offset += s->cluster_size; | |
477 | if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { | |
478 | break; | |
479 | } | |
2bfcc4a0 | 480 | } |
3f9c6b3b AG |
481 | count += ret; |
482 | /* Stop if there are type changes before the end of the cluster */ | |
483 | if (first_sc + ret < s->subclusters_per_cluster) { | |
2bfcc4a0 KW |
484 | break; |
485 | } | |
486 | } | |
45aba42f | 487 | |
3f9c6b3b | 488 | return count; |
45aba42f KW |
489 | } |
490 | ||
672f0f2c AG |
491 | static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, |
492 | uint64_t src_cluster_offset, | |
493 | unsigned offset_in_cluster, | |
86b862c4 | 494 | QEMUIOVector *qiov) |
45aba42f | 495 | { |
aaa4d20b | 496 | int ret; |
1b9f1491 | 497 | |
86b862c4 | 498 | if (qiov->size == 0) { |
99450c6f AG |
499 | return 0; |
500 | } | |
501 | ||
66f82cee | 502 | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
aef4acb6 | 503 | |
dba28555 | 504 | if (!bs->drv) { |
672f0f2c | 505 | return -ENOMEDIUM; |
dba28555 HR |
506 | } |
507 | ||
aef4acb6 SH |
508 | /* Call .bdrv_co_readv() directly instead of using the public block-layer |
509 | * interface. This avoids double I/O throttling and request tracking, | |
510 | * which can lead to deadlock when block layer copy-on-read is enabled. | |
511 | */ | |
df893d25 VSO |
512 | ret = bs->drv->bdrv_co_preadv_part(bs, |
513 | src_cluster_offset + offset_in_cluster, | |
514 | qiov->size, qiov, 0, 0); | |
1b9f1491 | 515 | if (ret < 0) { |
672f0f2c | 516 | return ret; |
1b9f1491 KW |
517 | } |
518 | ||
672f0f2c AG |
519 | return 0; |
520 | } | |
521 | ||
672f0f2c AG |
522 | static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, |
523 | uint64_t cluster_offset, | |
524 | unsigned offset_in_cluster, | |
86b862c4 | 525 | QEMUIOVector *qiov) |
672f0f2c | 526 | { |
966b000f | 527 | BDRVQcow2State *s = bs->opaque; |
672f0f2c AG |
528 | int ret; |
529 | ||
86b862c4 | 530 | if (qiov->size == 0) { |
672f0f2c AG |
531 | return 0; |
532 | } | |
533 | ||
231bb267 | 534 | ret = qcow2_pre_write_overlap_check(bs, 0, |
966b000f | 535 | cluster_offset + offset_in_cluster, qiov->size, true); |
cf93980e | 536 | if (ret < 0) { |
672f0f2c | 537 | return ret; |
cf93980e HR |
538 | } |
539 | ||
66f82cee | 540 | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
966b000f | 541 | ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, |
86b862c4 | 542 | qiov->size, qiov, 0); |
1b9f1491 | 543 | if (ret < 0) { |
672f0f2c | 544 | return ret; |
1b9f1491 KW |
545 | } |
546 | ||
672f0f2c | 547 | return 0; |
45aba42f KW |
548 | } |
549 | ||
550 | ||
551 | /* | |
388e5816 | 552 | * get_host_offset |
45aba42f | 553 | * |
388e5816 AG |
554 | * For a given offset of the virtual disk find the equivalent host |
555 | * offset in the qcow2 file and store it in *host_offset. Neither | |
556 | * offset needs to be aligned to a cluster boundary. | |
557 | * | |
558 | * If the cluster is unallocated then *host_offset will be 0. | |
559 | * If the cluster is compressed then *host_offset will contain the | |
560 | * complete compressed cluster descriptor. | |
45aba42f | 561 | * |
ecfe1863 KW |
562 | * On entry, *bytes is the maximum number of contiguous bytes starting at |
563 | * offset that we are interested in. | |
45aba42f | 564 | * |
ecfe1863 | 565 | * On exit, *bytes is the number of bytes starting at offset that have the same |
10dabdc5 AG |
566 | * subcluster type and (if applicable) are stored contiguously in the image |
567 | * file. The subcluster type is stored in *subcluster_type. | |
568 | * Compressed clusters are always processed one by one. | |
45aba42f | 569 | * |
ca4a0bb8 | 570 | * Returns 0 on success, -errno in error cases. |
45aba42f | 571 | */ |
388e5816 | 572 | int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, |
ca4a0bb8 | 573 | unsigned int *bytes, uint64_t *host_offset, |
10dabdc5 | 574 | QCow2SubclusterType *subcluster_type) |
45aba42f | 575 | { |
ff99129a | 576 | BDRVQcow2State *s = bs->opaque; |
3f9c6b3b AG |
577 | unsigned int l2_index, sc_index; |
578 | uint64_t l1_index, l2_offset, *l2_slice, l2_entry, l2_bitmap; | |
579 | int sc; | |
c834cba9 HR |
580 | unsigned int offset_in_cluster; |
581 | uint64_t bytes_available, bytes_needed, nb_clusters; | |
3f9c6b3b | 582 | QCow2SubclusterType type; |
55c17e98 | 583 | int ret; |
45aba42f | 584 | |
b2f65d6b | 585 | offset_in_cluster = offset_into_cluster(s, offset); |
ecfe1863 | 586 | bytes_needed = (uint64_t) *bytes + offset_in_cluster; |
45aba42f | 587 | |
b2f65d6b | 588 | /* compute how many bytes there are between the start of the cluster |
fd630039 AG |
589 | * containing offset and the end of the l2 slice that contains |
590 | * the entry pointing to it */ | |
591 | bytes_available = | |
592 | ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) | |
593 | << s->cluster_bits; | |
45aba42f | 594 | |
b2f65d6b KW |
595 | if (bytes_needed > bytes_available) { |
596 | bytes_needed = bytes_available; | |
45aba42f KW |
597 | } |
598 | ||
388e5816 | 599 | *host_offset = 0; |
45aba42f | 600 | |
b6af0975 | 601 | /* seek to the l2 offset in the l1 table */ |
45aba42f | 602 | |
05b5b6ee | 603 | l1_index = offset_to_l1_index(s, offset); |
68d000a3 | 604 | if (l1_index >= s->l1_size) { |
3f9c6b3b | 605 | type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; |
45aba42f | 606 | goto out; |
68d000a3 | 607 | } |
45aba42f | 608 | |
68d000a3 KW |
609 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
610 | if (!l2_offset) { | |
3f9c6b3b | 611 | type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; |
45aba42f | 612 | goto out; |
68d000a3 | 613 | } |
45aba42f | 614 | |
a97c67ee HR |
615 | if (offset_into_cluster(s, l2_offset)) { |
616 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 | |
617 | " unaligned (L1 index: %#" PRIx64 ")", | |
618 | l2_offset, l1_index); | |
619 | return -EIO; | |
620 | } | |
621 | ||
fd630039 | 622 | /* load the l2 slice in memory */ |
45aba42f | 623 | |
fd630039 | 624 | ret = l2_load(bs, offset, l2_offset, &l2_slice); |
55c17e98 KW |
625 | if (ret < 0) { |
626 | return ret; | |
1c46efaa | 627 | } |
45aba42f KW |
628 | |
629 | /* find the cluster offset for the given disk offset */ | |
630 | ||
fd630039 | 631 | l2_index = offset_to_l2_slice_index(s, offset); |
3f9c6b3b | 632 | sc_index = offset_to_sc_index(s, offset); |
12c6aebe | 633 | l2_entry = get_l2_entry(s, l2_slice, l2_index); |
3f9c6b3b | 634 | l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); |
b6d36def | 635 | |
b2f65d6b | 636 | nb_clusters = size_to_clusters(s, bytes_needed); |
c834cba9 HR |
637 | /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned |
638 | * integers; the minimum cluster size is 512, so this assertion is always | |
639 | * true */ | |
640 | assert(nb_clusters <= INT_MAX); | |
45aba42f | 641 | |
3f9c6b3b AG |
642 | type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); |
643 | if (s->qcow_version < 3 && (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || | |
644 | type == QCOW2_SUBCLUSTER_ZERO_ALLOC)) { | |
fdfab37d EB |
645 | qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" |
646 | " in pre-v3 image (L2 offset: %#" PRIx64 | |
647 | ", L2 index: %#x)", l2_offset, l2_index); | |
648 | ret = -EIO; | |
649 | goto fail; | |
650 | } | |
3ef95218 | 651 | switch (type) { |
3f9c6b3b AG |
652 | case QCOW2_SUBCLUSTER_INVALID: |
653 | break; /* This is handled by count_contiguous_subclusters() below */ | |
654 | case QCOW2_SUBCLUSTER_COMPRESSED: | |
966b000f KW |
655 | if (has_data_file(bs)) { |
656 | qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " | |
657 | "entry found in image with external data " | |
658 | "file (L2 offset: %#" PRIx64 ", L2 index: " | |
659 | "%#x)", l2_offset, l2_index); | |
660 | ret = -EIO; | |
661 | goto fail; | |
662 | } | |
388e5816 | 663 | *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK; |
68d000a3 | 664 | break; |
3f9c6b3b AG |
665 | case QCOW2_SUBCLUSTER_ZERO_PLAIN: |
666 | case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: | |
68d000a3 | 667 | break; |
3f9c6b3b AG |
668 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: |
669 | case QCOW2_SUBCLUSTER_NORMAL: | |
670 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: { | |
388e5816 AG |
671 | uint64_t host_cluster_offset = l2_entry & L2E_OFFSET_MASK; |
672 | *host_offset = host_cluster_offset + offset_in_cluster; | |
388e5816 | 673 | if (offset_into_cluster(s, host_cluster_offset)) { |
fdfab37d EB |
674 | qcow2_signal_corruption(bs, true, -1, -1, |
675 | "Cluster allocation offset %#" | |
a97c67ee | 676 | PRIx64 " unaligned (L2 offset: %#" PRIx64 |
388e5816 | 677 | ", L2 index: %#x)", host_cluster_offset, |
a97c67ee HR |
678 | l2_offset, l2_index); |
679 | ret = -EIO; | |
680 | goto fail; | |
681 | } | |
388e5816 | 682 | if (has_data_file(bs) && *host_offset != offset) { |
966b000f KW |
683 | qcow2_signal_corruption(bs, true, -1, -1, |
684 | "External data file host cluster offset %#" | |
685 | PRIx64 " does not match guest cluster " | |
686 | "offset: %#" PRIx64 | |
388e5816 | 687 | ", L2 index: %#x)", host_cluster_offset, |
966b000f KW |
688 | offset - offset_in_cluster, l2_index); |
689 | ret = -EIO; | |
690 | goto fail; | |
691 | } | |
68d000a3 | 692 | break; |
388e5816 | 693 | } |
1417d7e4 KW |
694 | default: |
695 | abort(); | |
45aba42f KW |
696 | } |
697 | ||
3f9c6b3b AG |
698 | sc = count_contiguous_subclusters(bs, nb_clusters, sc_index, |
699 | l2_slice, &l2_index); | |
700 | if (sc < 0) { | |
701 | qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster entry found " | |
702 | " (L2 offset: %#" PRIx64 ", L2 index: %#x)", | |
703 | l2_offset, l2_index); | |
704 | ret = -EIO; | |
705 | goto fail; | |
706 | } | |
fd630039 | 707 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
29c1a730 | 708 | |
3f9c6b3b | 709 | bytes_available = ((int64_t)sc + sc_index) << s->subcluster_bits; |
68d000a3 | 710 | |
45aba42f | 711 | out: |
b2f65d6b KW |
712 | if (bytes_available > bytes_needed) { |
713 | bytes_available = bytes_needed; | |
714 | } | |
45aba42f | 715 | |
c834cba9 HR |
716 | /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; |
717 | * subtracting offset_in_cluster will therefore definitely yield something | |
718 | * not exceeding UINT_MAX */ | |
719 | assert(bytes_available - offset_in_cluster <= UINT_MAX); | |
ecfe1863 | 720 | *bytes = bytes_available - offset_in_cluster; |
45aba42f | 721 | |
3f9c6b3b | 722 | *subcluster_type = type; |
ca4a0bb8 AG |
723 | |
724 | return 0; | |
a97c67ee HR |
725 | |
726 | fail: | |
fd630039 | 727 | qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); |
a97c67ee | 728 | return ret; |
45aba42f KW |
729 | } |
730 | ||
731 | /* | |
732 | * get_cluster_table | |
733 | * | |
734 | * for a given disk offset, load (and allocate if needed) | |
c03bfc5b | 735 | * the appropriate slice of its l2 table. |
45aba42f | 736 | * |
c03bfc5b | 737 | * the cluster index in the l2 slice is given to the caller. |
45aba42f | 738 | * |
1e3e8f1a | 739 | * Returns 0 on success, -errno in failure case |
45aba42f | 740 | */ |
45aba42f | 741 | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
c03bfc5b | 742 | uint64_t **new_l2_slice, |
45aba42f KW |
743 | int *new_l2_index) |
744 | { | |
ff99129a | 745 | BDRVQcow2State *s = bs->opaque; |
2cf7cfa1 KW |
746 | unsigned int l2_index; |
747 | uint64_t l1_index, l2_offset; | |
c03bfc5b | 748 | uint64_t *l2_slice = NULL; |
80ee15a6 | 749 | int ret; |
45aba42f | 750 | |
b6af0975 | 751 | /* seek to the l2 offset in the l1 table */ |
45aba42f | 752 | |
05b5b6ee | 753 | l1_index = offset_to_l1_index(s, offset); |
45aba42f | 754 | if (l1_index >= s->l1_size) { |
72893756 | 755 | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
1e3e8f1a KW |
756 | if (ret < 0) { |
757 | return ret; | |
758 | } | |
45aba42f | 759 | } |
8e37f681 | 760 | |
2cf7cfa1 | 761 | assert(l1_index < s->l1_size); |
8e37f681 | 762 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
a97c67ee HR |
763 | if (offset_into_cluster(s, l2_offset)) { |
764 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 | |
765 | " unaligned (L1 index: %#" PRIx64 ")", | |
766 | l2_offset, l1_index); | |
767 | return -EIO; | |
768 | } | |
45aba42f | 769 | |
05f9ee46 | 770 | if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { |
16fde5f2 | 771 | /* First allocate a new L2 table (and do COW if needed) */ |
3861946a | 772 | ret = l2_allocate(bs, l1_index); |
c46e1167 KW |
773 | if (ret < 0) { |
774 | return ret; | |
1e3e8f1a | 775 | } |
16fde5f2 KW |
776 | |
777 | /* Then decrease the refcount of the old table */ | |
778 | if (l2_offset) { | |
c8fd8554 | 779 | qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), |
6cfcb9b8 | 780 | QCOW2_DISCARD_OTHER); |
16fde5f2 | 781 | } |
3861946a AG |
782 | |
783 | /* Get the offset of the newly-allocated l2 table */ | |
784 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; | |
785 | assert(offset_into_cluster(s, l2_offset) == 0); | |
05f9ee46 AG |
786 | } |
787 | ||
c03bfc5b AG |
788 | /* load the l2 slice in memory */ |
789 | ret = l2_load(bs, offset, l2_offset, &l2_slice); | |
05f9ee46 AG |
790 | if (ret < 0) { |
791 | return ret; | |
45aba42f KW |
792 | } |
793 | ||
794 | /* find the cluster offset for the given disk offset */ | |
795 | ||
c03bfc5b | 796 | l2_index = offset_to_l2_slice_index(s, offset); |
45aba42f | 797 | |
c03bfc5b | 798 | *new_l2_slice = l2_slice; |
45aba42f KW |
799 | *new_l2_index = l2_index; |
800 | ||
1e3e8f1a | 801 | return 0; |
45aba42f KW |
802 | } |
803 | ||
804 | /* | |
805 | * alloc_compressed_cluster_offset | |
806 | * | |
77e023ff KW |
807 | * For a given offset on the virtual disk, allocate a new compressed cluster |
808 | * and put the host offset of the cluster into *host_offset. If a cluster is | |
809 | * already allocated at the offset, return an error. | |
45aba42f | 810 | * |
77e023ff | 811 | * Return 0 on success and -errno in error cases |
45aba42f | 812 | */ |
77e023ff KW |
813 | int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
814 | uint64_t offset, | |
815 | int compressed_size, | |
816 | uint64_t *host_offset) | |
45aba42f | 817 | { |
ff99129a | 818 | BDRVQcow2State *s = bs->opaque; |
45aba42f | 819 | int l2_index, ret; |
e4e72548 | 820 | uint64_t *l2_slice; |
f4f0d391 | 821 | int64_t cluster_offset; |
45aba42f KW |
822 | int nb_csectors; |
823 | ||
966b000f KW |
824 | if (has_data_file(bs)) { |
825 | return 0; | |
826 | } | |
827 | ||
e4e72548 | 828 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
1e3e8f1a | 829 | if (ret < 0) { |
77e023ff | 830 | return ret; |
1e3e8f1a | 831 | } |
45aba42f | 832 | |
b0b6862e KW |
833 | /* Compression can't overwrite anything. Fail if the cluster was already |
834 | * allocated. */ | |
12c6aebe | 835 | cluster_offset = get_l2_entry(s, l2_slice, l2_index); |
b0b6862e | 836 | if (cluster_offset & L2E_OFFSET_MASK) { |
e4e72548 | 837 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
77e023ff | 838 | return -EIO; |
8f1efd00 | 839 | } |
45aba42f | 840 | |
ed6ccf0f | 841 | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
5d757b56 | 842 | if (cluster_offset < 0) { |
e4e72548 | 843 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
77e023ff | 844 | return cluster_offset; |
5d757b56 KW |
845 | } |
846 | ||
b6c24694 AG |
847 | nb_csectors = |
848 | (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE - | |
849 | (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE); | |
45aba42f | 850 | |
3a75a870 AG |
851 | /* The offset and size must fit in their fields of the L2 table entry */ |
852 | assert((cluster_offset & s->cluster_offset_mask) == cluster_offset); | |
853 | assert((nb_csectors & s->csize_mask) == nb_csectors); | |
854 | ||
45aba42f KW |
855 | cluster_offset |= QCOW_OFLAG_COMPRESSED | |
856 | ((uint64_t)nb_csectors << s->csize_shift); | |
857 | ||
858 | /* update L2 table */ | |
859 | ||
860 | /* compressed clusters never have the copied flag */ | |
861 | ||
66f82cee | 862 | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
e4e72548 | 863 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
12c6aebe | 864 | set_l2_entry(s, l2_slice, l2_index, cluster_offset); |
ff4cdec7 AG |
865 | if (has_subclusters(s)) { |
866 | set_l2_bitmap(s, l2_slice, l2_index, 0); | |
867 | } | |
e4e72548 | 868 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
4c1612d9 | 869 | |
77e023ff KW |
870 | *host_offset = cluster_offset & s->cluster_offset_mask; |
871 | return 0; | |
4c1612d9 KW |
872 | } |
873 | ||
99450c6f | 874 | static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
593fb83c | 875 | { |
ff99129a | 876 | BDRVQcow2State *s = bs->opaque; |
99450c6f AG |
877 | Qcow2COWRegion *start = &m->cow_start; |
878 | Qcow2COWRegion *end = &m->cow_end; | |
672f0f2c | 879 | unsigned buffer_size; |
b3cf1c7c AG |
880 | unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); |
881 | bool merge_reads; | |
672f0f2c | 882 | uint8_t *start_buffer, *end_buffer; |
86b862c4 | 883 | QEMUIOVector qiov; |
593fb83c KW |
884 | int ret; |
885 | ||
672f0f2c | 886 | assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); |
b3cf1c7c AG |
887 | assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); |
888 | assert(start->offset + start->nb_bytes <= end->offset); | |
672f0f2c | 889 | |
c8bb23cb | 890 | if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) { |
593fb83c KW |
891 | return 0; |
892 | } | |
893 | ||
b3cf1c7c AG |
894 | /* If we have to read both the start and end COW regions and the |
895 | * middle region is not too large then perform just one read | |
896 | * operation */ | |
897 | merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; | |
898 | if (merge_reads) { | |
899 | buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; | |
900 | } else { | |
901 | /* If we have to do two reads, add some padding in the middle | |
902 | * if necessary to make sure that the end region is optimally | |
903 | * aligned. */ | |
904 | size_t align = bdrv_opt_mem_align(bs); | |
905 | assert(align > 0 && align <= UINT_MAX); | |
906 | assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= | |
907 | UINT_MAX - end->nb_bytes); | |
908 | buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; | |
909 | } | |
910 | ||
911 | /* Reserve a buffer large enough to store all the data that we're | |
912 | * going to read */ | |
672f0f2c AG |
913 | start_buffer = qemu_try_blockalign(bs, buffer_size); |
914 | if (start_buffer == NULL) { | |
915 | return -ENOMEM; | |
916 | } | |
917 | /* The part of the buffer where the end region is located */ | |
918 | end_buffer = start_buffer + buffer_size - end->nb_bytes; | |
919 | ||
5396234b VSO |
920 | qemu_iovec_init(&qiov, 2 + (m->data_qiov ? |
921 | qemu_iovec_subvec_niov(m->data_qiov, | |
922 | m->data_qiov_offset, | |
923 | data_bytes) | |
924 | : 0)); | |
86b862c4 | 925 | |
593fb83c | 926 | qemu_co_mutex_unlock(&s->lock); |
b3cf1c7c AG |
927 | /* First we read the existing data from both COW regions. We |
928 | * either read the whole region in one go, or the start and end | |
929 | * regions separately. */ | |
930 | if (merge_reads) { | |
86b862c4 AG |
931 | qemu_iovec_add(&qiov, start_buffer, buffer_size); |
932 | ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); | |
b3cf1c7c | 933 | } else { |
86b862c4 AG |
934 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); |
935 | ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); | |
b3cf1c7c AG |
936 | if (ret < 0) { |
937 | goto fail; | |
938 | } | |
672f0f2c | 939 | |
86b862c4 AG |
940 | qemu_iovec_reset(&qiov); |
941 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); | |
942 | ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); | |
b3cf1c7c | 943 | } |
593fb83c | 944 | if (ret < 0) { |
99450c6f | 945 | goto fail; |
593fb83c KW |
946 | } |
947 | ||
672f0f2c AG |
948 | /* Encrypt the data if necessary before writing it */ |
949 | if (bs->encrypted) { | |
603fbd07 ML |
950 | ret = qcow2_co_encrypt(bs, |
951 | m->alloc_offset + start->offset, | |
952 | m->offset + start->offset, | |
953 | start_buffer, start->nb_bytes); | |
954 | if (ret < 0) { | |
955 | goto fail; | |
956 | } | |
957 | ||
958 | ret = qcow2_co_encrypt(bs, | |
959 | m->alloc_offset + end->offset, | |
960 | m->offset + end->offset, | |
961 | end_buffer, end->nb_bytes); | |
962 | if (ret < 0) { | |
672f0f2c AG |
963 | goto fail; |
964 | } | |
965 | } | |
966 | ||
ee22a9d8 AG |
967 | /* And now we can write everything. If we have the guest data we |
968 | * can write everything in one single operation */ | |
969 | if (m->data_qiov) { | |
970 | qemu_iovec_reset(&qiov); | |
971 | if (start->nb_bytes) { | |
972 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); | |
973 | } | |
5396234b | 974 | qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes); |
ee22a9d8 AG |
975 | if (end->nb_bytes) { |
976 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); | |
977 | } | |
978 | /* NOTE: we have a write_aio blkdebug event here followed by | |
979 | * a cow_write one in do_perform_cow_write(), but there's only | |
980 | * one single I/O operation */ | |
981 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | |
982 | ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); | |
983 | } else { | |
984 | /* If there's no guest data then write both COW regions separately */ | |
985 | qemu_iovec_reset(&qiov); | |
986 | qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); | |
987 | ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); | |
988 | if (ret < 0) { | |
989 | goto fail; | |
990 | } | |
991 | ||
992 | qemu_iovec_reset(&qiov); | |
993 | qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); | |
994 | ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); | |
672f0f2c | 995 | } |
99450c6f AG |
996 | |
997 | fail: | |
998 | qemu_co_mutex_lock(&s->lock); | |
999 | ||
593fb83c KW |
1000 | /* |
1001 | * Before we update the L2 table to actually point to the new cluster, we | |
1002 | * need to be sure that the refcounts have been increased and COW was | |
1003 | * handled. | |
1004 | */ | |
99450c6f AG |
1005 | if (ret == 0) { |
1006 | qcow2_cache_depends_on_flush(s->l2_table_cache); | |
1007 | } | |
593fb83c | 1008 | |
672f0f2c | 1009 | qemu_vfree(start_buffer); |
86b862c4 | 1010 | qemu_iovec_destroy(&qiov); |
99450c6f | 1011 | return ret; |
593fb83c KW |
1012 | } |
1013 | ||
148da7ea | 1014 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
45aba42f | 1015 | { |
ff99129a | 1016 | BDRVQcow2State *s = bs->opaque; |
45aba42f | 1017 | int i, j = 0, l2_index, ret; |
a002c0b0 | 1018 | uint64_t *old_cluster, *l2_slice; |
250196f1 | 1019 | uint64_t cluster_offset = m->alloc_offset; |
45aba42f | 1020 | |
3cce16f4 | 1021 | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
f50f88b9 | 1022 | assert(m->nb_clusters > 0); |
45aba42f | 1023 | |
5839e53b | 1024 | old_cluster = g_try_new(uint64_t, m->nb_clusters); |
de82815d KW |
1025 | if (old_cluster == NULL) { |
1026 | ret = -ENOMEM; | |
1027 | goto err; | |
1028 | } | |
45aba42f KW |
1029 | |
1030 | /* copy content of unmodified sectors */ | |
99450c6f | 1031 | ret = perform_cow(bs, m); |
593fb83c KW |
1032 | if (ret < 0) { |
1033 | goto err; | |
29c1a730 KW |
1034 | } |
1035 | ||
593fb83c | 1036 | /* Update L2 table. */ |
74c4510a | 1037 | if (s->use_lazy_refcounts) { |
280d3735 KW |
1038 | qcow2_mark_dirty(bs); |
1039 | } | |
bfe8043e SH |
1040 | if (qcow2_need_accurate_refcounts(s)) { |
1041 | qcow2_cache_set_dependency(bs, s->l2_table_cache, | |
1042 | s->refcount_block_cache); | |
1043 | } | |
280d3735 | 1044 | |
a002c0b0 | 1045 | ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); |
1e3e8f1a | 1046 | if (ret < 0) { |
45aba42f | 1047 | goto err; |
1e3e8f1a | 1048 | } |
a002c0b0 | 1049 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
45aba42f | 1050 | |
a002c0b0 | 1051 | assert(l2_index + m->nb_clusters <= s->l2_slice_size); |
45aba42f | 1052 | for (i = 0; i < m->nb_clusters; i++) { |
348fcc4f | 1053 | uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits); |
45aba42f | 1054 | /* if two concurrent writes happen to the same unallocated cluster |
aaa4d20b KW |
1055 | * each write allocates separate cluster and writes data concurrently. |
1056 | * The first one to complete updates l2 table with pointer to its | |
1057 | * cluster the second one has to do RMW (which is done above by | |
1058 | * perform_cow()), update l2 table with its cluster pointer and free | |
1059 | * old cluster. This is what this loop does */ | |
12c6aebe AG |
1060 | if (get_l2_entry(s, l2_slice, l2_index + i) != 0) { |
1061 | old_cluster[j++] = get_l2_entry(s, l2_slice, l2_index + i); | |
aaa4d20b | 1062 | } |
45aba42f | 1063 | |
3a75a870 AG |
1064 | /* The offset must fit in the offset field of the L2 table entry */ |
1065 | assert((offset & L2E_OFFSET_MASK) == offset); | |
1066 | ||
12c6aebe | 1067 | set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED); |
aca00cd9 AG |
1068 | |
1069 | /* Update bitmap with the subclusters that were just written */ | |
40dee943 | 1070 | if (has_subclusters(s) && !m->prealloc) { |
aca00cd9 AG |
1071 | uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); |
1072 | unsigned written_from = m->cow_start.offset; | |
1073 | unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes ?: | |
1074 | m->nb_clusters << s->cluster_bits; | |
1075 | int first_sc, last_sc; | |
1076 | /* Narrow written_from and written_to down to the current cluster */ | |
1077 | written_from = MAX(written_from, i << s->cluster_bits); | |
1078 | written_to = MIN(written_to, (i + 1) << s->cluster_bits); | |
1079 | assert(written_from < written_to); | |
1080 | first_sc = offset_to_sc_index(s, written_from); | |
1081 | last_sc = offset_to_sc_index(s, written_to - 1); | |
1082 | l2_bitmap |= QCOW_OFLAG_SUB_ALLOC_RANGE(first_sc, last_sc + 1); | |
1083 | l2_bitmap &= ~QCOW_OFLAG_SUB_ZERO_RANGE(first_sc, last_sc + 1); | |
1084 | set_l2_bitmap(s, l2_slice, l2_index + i, l2_bitmap); | |
1085 | } | |
45aba42f KW |
1086 | } |
1087 | ||
9f8e668e | 1088 | |
a002c0b0 | 1089 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
45aba42f | 1090 | |
7ec5e6a4 KW |
1091 | /* |
1092 | * If this was a COW, we need to decrease the refcount of the old cluster. | |
6cfcb9b8 KW |
1093 | * |
1094 | * Don't discard clusters that reach a refcount of 0 (e.g. compressed | |
1095 | * clusters), the next write will reuse them anyway. | |
7ec5e6a4 | 1096 | */ |
564a6b69 | 1097 | if (!m->keep_old_clusters && j != 0) { |
7ec5e6a4 | 1098 | for (i = 0; i < j; i++) { |
12c6aebe | 1099 | qcow2_free_any_clusters(bs, old_cluster[i], 1, QCOW2_DISCARD_NEVER); |
7ec5e6a4 KW |
1100 | } |
1101 | } | |
45aba42f KW |
1102 | |
1103 | ret = 0; | |
1104 | err: | |
7267c094 | 1105 | g_free(old_cluster); |
45aba42f KW |
1106 | return ret; |
1107 | } | |
1108 | ||
8b24cd14 KW |
1109 | /** |
1110 | * Frees the allocated clusters because the request failed and they won't | |
1111 | * actually be linked. | |
1112 | */ | |
1113 | void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) | |
1114 | { | |
1115 | BDRVQcow2State *s = bs->opaque; | |
3ede935f | 1116 | if (!has_data_file(bs) && !m->keep_old_clusters) { |
c3b6658c KW |
1117 | qcow2_free_clusters(bs, m->alloc_offset, |
1118 | m->nb_clusters << s->cluster_bits, | |
1119 | QCOW2_DISCARD_NEVER); | |
1120 | } | |
8b24cd14 KW |
1121 | } |
1122 | ||
8f91d690 AG |
1123 | /* |
1124 | * For a given write request, create a new QCowL2Meta structure, add | |
57538c86 AG |
1125 | * it to @m and the BDRVQcow2State.cluster_allocs list. If the write |
1126 | * request does not need copy-on-write or changes to the L2 metadata | |
1127 | * then this function does nothing. | |
8f91d690 AG |
1128 | * |
1129 | * @host_cluster_offset points to the beginning of the first cluster. | |
1130 | * | |
1131 | * @guest_offset and @bytes indicate the offset and length of the | |
1132 | * request. | |
1133 | * | |
57538c86 AG |
1134 | * @l2_slice contains the L2 entries of all clusters involved in this |
1135 | * write request. | |
1136 | * | |
8f91d690 AG |
1137 | * If @keep_old is true it means that the clusters were already |
1138 | * allocated and will be overwritten. If false then the clusters are | |
1139 | * new and we have to decrease the reference count of the old ones. | |
d53ec3d8 AG |
1140 | * |
1141 | * Returns 0 on success, -errno on failure. | |
8f91d690 | 1142 | */ |
d53ec3d8 AG |
1143 | static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset, |
1144 | uint64_t guest_offset, unsigned bytes, | |
1145 | uint64_t *l2_slice, QCowL2Meta **m, bool keep_old) | |
8f91d690 AG |
1146 | { |
1147 | BDRVQcow2State *s = bs->opaque; | |
d53ec3d8 AG |
1148 | int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset); |
1149 | uint64_t l2_entry, l2_bitmap; | |
57538c86 | 1150 | unsigned cow_start_from, cow_end_to; |
8f91d690 AG |
1151 | unsigned cow_start_to = offset_into_cluster(s, guest_offset); |
1152 | unsigned cow_end_from = cow_start_to + bytes; | |
8f91d690 AG |
1153 | unsigned nb_clusters = size_to_clusters(s, cow_end_from); |
1154 | QCowL2Meta *old_m = *m; | |
d53ec3d8 AG |
1155 | QCow2SubclusterType type; |
1156 | int i; | |
1157 | bool skip_cow = keep_old; | |
57538c86 AG |
1158 | |
1159 | assert(nb_clusters <= s->l2_slice_size - l2_index); | |
1160 | ||
d53ec3d8 AG |
1161 | /* Check the type of all affected subclusters */ |
1162 | for (i = 0; i < nb_clusters; i++) { | |
1163 | l2_entry = get_l2_entry(s, l2_slice, l2_index + i); | |
1164 | l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); | |
1165 | if (skip_cow) { | |
1166 | unsigned write_from = MAX(cow_start_to, i << s->cluster_bits); | |
1167 | unsigned write_to = MIN(cow_end_from, (i + 1) << s->cluster_bits); | |
1168 | int first_sc = offset_to_sc_index(s, write_from); | |
1169 | int last_sc = offset_to_sc_index(s, write_to - 1); | |
1170 | int cnt = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, | |
1171 | first_sc, &type); | |
1172 | /* Is any of the subclusters of type != QCOW2_SUBCLUSTER_NORMAL ? */ | |
1173 | if (type != QCOW2_SUBCLUSTER_NORMAL || first_sc + cnt <= last_sc) { | |
1174 | skip_cow = false; | |
57538c86 | 1175 | } |
d53ec3d8 AG |
1176 | } else { |
1177 | /* If we can't skip the cow we can still look for invalid entries */ | |
1178 | type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, 0); | |
57538c86 | 1179 | } |
d53ec3d8 AG |
1180 | if (type == QCOW2_SUBCLUSTER_INVALID) { |
1181 | int l1_index = offset_to_l1_index(s, guest_offset); | |
1182 | uint64_t l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; | |
1183 | qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster " | |
1184 | "entry found (L2 offset: %#" PRIx64 | |
1185 | ", L2 index: %#x)", | |
1186 | l2_offset, l2_index + i); | |
1187 | return -EIO; | |
57538c86 AG |
1188 | } |
1189 | } | |
1190 | ||
d53ec3d8 AG |
1191 | if (skip_cow) { |
1192 | return 0; | |
1193 | } | |
1194 | ||
57538c86 | 1195 | /* Get the L2 entry of the first cluster */ |
12c6aebe | 1196 | l2_entry = get_l2_entry(s, l2_slice, l2_index); |
d53ec3d8 AG |
1197 | l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); |
1198 | sc_index = offset_to_sc_index(s, guest_offset); | |
1199 | type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); | |
1200 | ||
1201 | if (!keep_old) { | |
1202 | switch (type) { | |
1203 | case QCOW2_SUBCLUSTER_COMPRESSED: | |
1204 | cow_start_from = 0; | |
1205 | break; | |
1206 | case QCOW2_SUBCLUSTER_NORMAL: | |
1207 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: | |
1208 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | |
1209 | if (has_subclusters(s)) { | |
1210 | /* Skip all leading zero and unallocated subclusters */ | |
1211 | uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; | |
1212 | cow_start_from = | |
1213 | MIN(sc_index, ctz32(alloc_bitmap)) << s->subcluster_bits; | |
1214 | } else { | |
1215 | cow_start_from = 0; | |
1216 | } | |
1217 | break; | |
1218 | case QCOW2_SUBCLUSTER_ZERO_PLAIN: | |
1219 | case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: | |
1220 | cow_start_from = sc_index << s->subcluster_bits; | |
1221 | break; | |
1222 | default: | |
1223 | g_assert_not_reached(); | |
1224 | } | |
57538c86 | 1225 | } else { |
d53ec3d8 AG |
1226 | switch (type) { |
1227 | case QCOW2_SUBCLUSTER_NORMAL: | |
1228 | cow_start_from = cow_start_to; | |
1229 | break; | |
1230 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: | |
1231 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | |
1232 | cow_start_from = sc_index << s->subcluster_bits; | |
1233 | break; | |
1234 | default: | |
1235 | g_assert_not_reached(); | |
1236 | } | |
57538c86 AG |
1237 | } |
1238 | ||
1239 | /* Get the L2 entry of the last cluster */ | |
d53ec3d8 AG |
1240 | l2_index += nb_clusters - 1; |
1241 | l2_entry = get_l2_entry(s, l2_slice, l2_index); | |
1242 | l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); | |
1243 | sc_index = offset_to_sc_index(s, guest_offset + bytes - 1); | |
1244 | type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); | |
1245 | ||
1246 | if (!keep_old) { | |
1247 | switch (type) { | |
1248 | case QCOW2_SUBCLUSTER_COMPRESSED: | |
1249 | cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); | |
1250 | break; | |
1251 | case QCOW2_SUBCLUSTER_NORMAL: | |
1252 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: | |
1253 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | |
1254 | cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); | |
1255 | if (has_subclusters(s)) { | |
1256 | /* Skip all trailing zero and unallocated subclusters */ | |
1257 | uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; | |
1258 | cow_end_to -= | |
1259 | MIN(s->subclusters_per_cluster - sc_index - 1, | |
1260 | clz32(alloc_bitmap)) << s->subcluster_bits; | |
1261 | } | |
1262 | break; | |
1263 | case QCOW2_SUBCLUSTER_ZERO_PLAIN: | |
1264 | case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: | |
1265 | cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); | |
1266 | break; | |
1267 | default: | |
1268 | g_assert_not_reached(); | |
1269 | } | |
57538c86 | 1270 | } else { |
d53ec3d8 AG |
1271 | switch (type) { |
1272 | case QCOW2_SUBCLUSTER_NORMAL: | |
1273 | cow_end_to = cow_end_from; | |
1274 | break; | |
1275 | case QCOW2_SUBCLUSTER_ZERO_ALLOC: | |
1276 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | |
1277 | cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); | |
1278 | break; | |
1279 | default: | |
1280 | g_assert_not_reached(); | |
1281 | } | |
57538c86 | 1282 | } |
8f91d690 AG |
1283 | |
1284 | *m = g_malloc0(sizeof(**m)); | |
1285 | **m = (QCowL2Meta) { | |
1286 | .next = old_m, | |
1287 | ||
1288 | .alloc_offset = host_cluster_offset, | |
1289 | .offset = start_of_cluster(s, guest_offset), | |
1290 | .nb_clusters = nb_clusters, | |
1291 | ||
1292 | .keep_old_clusters = keep_old, | |
1293 | ||
1294 | .cow_start = { | |
1295 | .offset = cow_start_from, | |
1296 | .nb_bytes = cow_start_to - cow_start_from, | |
1297 | }, | |
1298 | .cow_end = { | |
1299 | .offset = cow_end_from, | |
1300 | .nb_bytes = cow_end_to - cow_end_from, | |
1301 | }, | |
1302 | }; | |
1303 | ||
1304 | qemu_co_queue_init(&(*m)->dependent_requests); | |
1305 | QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); | |
d53ec3d8 AG |
1306 | |
1307 | return 0; | |
8f91d690 AG |
1308 | } |
1309 | ||
57538c86 AG |
1310 | /* |
1311 | * Returns true if writing to the cluster pointed to by @l2_entry | |
1312 | * requires a new allocation (that is, if the cluster is unallocated | |
1313 | * or has refcount > 1 and therefore cannot be written in-place). | |
1314 | */ | |
1315 | static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry) | |
c1587d87 AG |
1316 | { |
1317 | switch (qcow2_get_cluster_type(bs, l2_entry)) { | |
1318 | case QCOW2_CLUSTER_NORMAL: | |
57538c86 | 1319 | case QCOW2_CLUSTER_ZERO_ALLOC: |
c1587d87 AG |
1320 | if (l2_entry & QCOW_OFLAG_COPIED) { |
1321 | return false; | |
1322 | } | |
b9be6fae | 1323 | /* fallthrough */ |
c1587d87 AG |
1324 | case QCOW2_CLUSTER_UNALLOCATED: |
1325 | case QCOW2_CLUSTER_COMPRESSED: | |
1326 | case QCOW2_CLUSTER_ZERO_PLAIN: | |
c1587d87 AG |
1327 | return true; |
1328 | default: | |
1329 | abort(); | |
1330 | } | |
1331 | } | |
1332 | ||
bf319ece | 1333 | /* |
57538c86 AG |
1334 | * Returns the number of contiguous clusters that can be written to |
1335 | * using one single write request, starting from @l2_index. | |
1336 | * At most @nb_clusters are checked. | |
1337 | * | |
1338 | * If @new_alloc is true this counts clusters that are either | |
1339 | * unallocated, or allocated but with refcount > 1 (so they need to be | |
1340 | * newly allocated and COWed). | |
1341 | * | |
1342 | * If @new_alloc is false this counts clusters that are already | |
1343 | * allocated and can be overwritten in-place (this includes clusters | |
1344 | * of type QCOW2_CLUSTER_ZERO_ALLOC). | |
bf319ece | 1345 | */ |
57538c86 AG |
1346 | static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, |
1347 | uint64_t *l2_slice, int l2_index, | |
1348 | bool new_alloc) | |
bf319ece | 1349 | { |
57538c86 | 1350 | BDRVQcow2State *s = bs->opaque; |
12c6aebe | 1351 | uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index); |
57538c86 | 1352 | uint64_t expected_offset = l2_entry & L2E_OFFSET_MASK; |
143550a8 | 1353 | int i; |
bf319ece | 1354 | |
143550a8 | 1355 | for (i = 0; i < nb_clusters; i++) { |
12c6aebe | 1356 | l2_entry = get_l2_entry(s, l2_slice, l2_index + i); |
57538c86 | 1357 | if (cluster_needs_new_alloc(bs, l2_entry) != new_alloc) { |
bf319ece | 1358 | break; |
143550a8 | 1359 | } |
57538c86 AG |
1360 | if (!new_alloc) { |
1361 | if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { | |
1362 | break; | |
1363 | } | |
1364 | expected_offset += s->cluster_size; | |
1365 | } | |
bf319ece KW |
1366 | } |
1367 | ||
1368 | assert(i <= nb_clusters); | |
1369 | return i; | |
1370 | } | |
1371 | ||
250196f1 | 1372 | /* |
226c3c26 KW |
1373 | * Check if there already is an AIO write request in flight which allocates |
1374 | * the same cluster. In this case we need to wait until the previous | |
1375 | * request has completed and updated the L2 table accordingly. | |
65eb2e35 KW |
1376 | * |
1377 | * Returns: | |
1378 | * 0 if there was no dependency. *cur_bytes indicates the number of | |
1379 | * bytes from guest_offset that can be read before the next | |
1380 | * dependency must be processed (or the request is complete) | |
1381 | * | |
1382 | * -EAGAIN if we had to wait for another request, previously gathered | |
1383 | * information on cluster allocation may be invalid now. The caller | |
1384 | * must start over anyway, so consider *cur_bytes undefined. | |
250196f1 | 1385 | */ |
226c3c26 | 1386 | static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
ecdd5333 | 1387 | uint64_t *cur_bytes, QCowL2Meta **m) |
250196f1 | 1388 | { |
ff99129a | 1389 | BDRVQcow2State *s = bs->opaque; |
250196f1 | 1390 | QCowL2Meta *old_alloc; |
65eb2e35 | 1391 | uint64_t bytes = *cur_bytes; |
250196f1 | 1392 | |
250196f1 KW |
1393 | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
1394 | ||
65eb2e35 KW |
1395 | uint64_t start = guest_offset; |
1396 | uint64_t end = start + bytes; | |
d53ec3d8 AG |
1397 | uint64_t old_start = start_of_cluster(s, l2meta_cow_start(old_alloc)); |
1398 | uint64_t old_end = ROUND_UP(l2meta_cow_end(old_alloc), s->cluster_size); | |
250196f1 | 1399 | |
d9d74f41 | 1400 | if (end <= old_start || start >= old_end) { |
250196f1 KW |
1401 | /* No intersection */ |
1402 | } else { | |
1403 | if (start < old_start) { | |
1404 | /* Stop at the start of a running allocation */ | |
65eb2e35 | 1405 | bytes = old_start - start; |
250196f1 | 1406 | } else { |
65eb2e35 | 1407 | bytes = 0; |
250196f1 KW |
1408 | } |
1409 | ||
ecdd5333 KW |
1410 | /* Stop if already an l2meta exists. After yielding, it wouldn't |
1411 | * be valid any more, so we'd have to clean up the old L2Metas | |
1412 | * and deal with requests depending on them before starting to | |
1413 | * gather new ones. Not worth the trouble. */ | |
1414 | if (bytes == 0 && *m) { | |
1415 | *cur_bytes = 0; | |
1416 | return 0; | |
1417 | } | |
1418 | ||
65eb2e35 | 1419 | if (bytes == 0) { |
250196f1 KW |
1420 | /* Wait for the dependency to complete. We need to recheck |
1421 | * the free/allocated clusters when we continue. */ | |
1ace7cea | 1422 | qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); |
250196f1 KW |
1423 | return -EAGAIN; |
1424 | } | |
1425 | } | |
1426 | } | |
1427 | ||
65eb2e35 KW |
1428 | /* Make sure that existing clusters and new allocations are only used up to |
1429 | * the next dependency if we shortened the request above */ | |
1430 | *cur_bytes = bytes; | |
250196f1 | 1431 | |
226c3c26 KW |
1432 | return 0; |
1433 | } | |
1434 | ||
0af729ec | 1435 | /* |
57538c86 AG |
1436 | * Checks how many already allocated clusters that don't require a new |
1437 | * allocation there are at the given guest_offset (up to *bytes). | |
1438 | * If *host_offset is not INV_OFFSET, only physically contiguous clusters | |
1439 | * beginning at this host offset are counted. | |
0af729ec | 1440 | * |
411d62b0 KW |
1441 | * Note that guest_offset may not be cluster aligned. In this case, the |
1442 | * returned *host_offset points to exact byte referenced by guest_offset and | |
1443 | * therefore isn't cluster aligned as well. | |
0af729ec KW |
1444 | * |
1445 | * Returns: | |
1446 | * 0: if no allocated clusters are available at the given offset. | |
1447 | * *bytes is normally unchanged. It is set to 0 if the cluster | |
57538c86 AG |
1448 | * is allocated and can be overwritten in-place but doesn't have |
1449 | * the right physical offset. | |
0af729ec | 1450 | * |
57538c86 AG |
1451 | * 1: if allocated clusters that can be overwritten in place are |
1452 | * available at the requested offset. *bytes may have decreased | |
1453 | * and describes the length of the area that can be written to. | |
0af729ec KW |
1454 | * |
1455 | * -errno: in error cases | |
0af729ec KW |
1456 | */ |
1457 | static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, | |
c53ede9f | 1458 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
0af729ec | 1459 | { |
ff99129a | 1460 | BDRVQcow2State *s = bs->opaque; |
0af729ec | 1461 | int l2_index; |
57538c86 | 1462 | uint64_t l2_entry, cluster_offset; |
cde91766 | 1463 | uint64_t *l2_slice; |
b6d36def | 1464 | uint64_t nb_clusters; |
c53ede9f | 1465 | unsigned int keep_clusters; |
a3f1afb4 | 1466 | int ret; |
0af729ec KW |
1467 | |
1468 | trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, | |
1469 | *bytes); | |
0af729ec | 1470 | |
c6d619cc KW |
1471 | assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) |
1472 | == offset_into_cluster(s, *host_offset)); | |
411d62b0 | 1473 | |
acb0467f | 1474 | /* |
cde91766 | 1475 | * Calculate the number of clusters to look for. We stop at L2 slice |
acb0467f KW |
1476 | * boundaries to keep things simple. |
1477 | */ | |
1478 | nb_clusters = | |
1479 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
1480 | ||
cde91766 AG |
1481 | l2_index = offset_to_l2_slice_index(s, guest_offset); |
1482 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); | |
57538c86 AG |
1483 | /* Limit total byte count to BDRV_REQUEST_MAX_BYTES */ |
1484 | nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); | |
acb0467f | 1485 | |
0af729ec | 1486 | /* Find L2 entry for the first involved cluster */ |
cde91766 | 1487 | ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); |
0af729ec KW |
1488 | if (ret < 0) { |
1489 | return ret; | |
1490 | } | |
1491 | ||
12c6aebe | 1492 | l2_entry = get_l2_entry(s, l2_slice, l2_index); |
57538c86 AG |
1493 | cluster_offset = l2_entry & L2E_OFFSET_MASK; |
1494 | ||
1495 | if (!cluster_needs_new_alloc(bs, l2_entry)) { | |
1496 | if (offset_into_cluster(s, cluster_offset)) { | |
1497 | qcow2_signal_corruption(bs, true, -1, -1, "%s cluster offset " | |
1498 | "%#" PRIx64 " unaligned (guest offset: %#" | |
1499 | PRIx64 ")", l2_entry & QCOW_OFLAG_ZERO ? | |
1500 | "Preallocated zero" : "Data", | |
1501 | cluster_offset, guest_offset); | |
a97c67ee HR |
1502 | ret = -EIO; |
1503 | goto out; | |
1504 | } | |
1505 | ||
57538c86 AG |
1506 | /* If a specific host_offset is required, check it */ |
1507 | if (*host_offset != INV_OFFSET && cluster_offset != *host_offset) { | |
e62daaf6 KW |
1508 | *bytes = 0; |
1509 | ret = 0; | |
1510 | goto out; | |
1511 | } | |
1512 | ||
0af729ec | 1513 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
57538c86 AG |
1514 | keep_clusters = count_single_write_clusters(bs, nb_clusters, l2_slice, |
1515 | l2_index, false); | |
c53ede9f KW |
1516 | assert(keep_clusters <= nb_clusters); |
1517 | ||
1518 | *bytes = MIN(*bytes, | |
1519 | keep_clusters * s->cluster_size | |
1520 | - offset_into_cluster(s, guest_offset)); | |
57538c86 AG |
1521 | assert(*bytes != 0); |
1522 | ||
d53ec3d8 AG |
1523 | ret = calculate_l2_meta(bs, cluster_offset, guest_offset, |
1524 | *bytes, l2_slice, m, true); | |
1525 | if (ret < 0) { | |
1526 | goto out; | |
1527 | } | |
0af729ec KW |
1528 | |
1529 | ret = 1; | |
1530 | } else { | |
0af729ec KW |
1531 | ret = 0; |
1532 | } | |
1533 | ||
0af729ec | 1534 | /* Cleanup */ |
e62daaf6 | 1535 | out: |
cde91766 | 1536 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
0af729ec | 1537 | |
e62daaf6 KW |
1538 | /* Only return a host offset if we actually made progress. Otherwise we |
1539 | * would make requirements for handle_alloc() that it can't fulfill */ | |
a97c67ee | 1540 | if (ret > 0) { |
57538c86 | 1541 | *host_offset = cluster_offset + offset_into_cluster(s, guest_offset); |
e62daaf6 KW |
1542 | } |
1543 | ||
0af729ec KW |
1544 | return ret; |
1545 | } | |
1546 | ||
226c3c26 KW |
1547 | /* |
1548 | * Allocates new clusters for the given guest_offset. | |
1549 | * | |
1550 | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to | |
1551 | * contain the number of clusters that have been allocated and are contiguous | |
1552 | * in the image file. | |
1553 | * | |
c6d619cc KW |
1554 | * If *host_offset is not INV_OFFSET, it specifies the offset in the image file |
1555 | * at which the new clusters must start. *nb_clusters can be 0 on return in | |
1556 | * this case if the cluster at host_offset is already in use. If *host_offset | |
1557 | * is INV_OFFSET, the clusters can be allocated anywhere in the image file. | |
226c3c26 KW |
1558 | * |
1559 | * *host_offset is updated to contain the offset into the image file at which | |
1560 | * the first allocated cluster starts. | |
1561 | * | |
1562 | * Return 0 on success and -errno in error cases. -EAGAIN means that the | |
1563 | * function has been waiting for another request and the allocation must be | |
1564 | * restarted, but the whole request should not be failed. | |
1565 | */ | |
1566 | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, | |
b6d36def | 1567 | uint64_t *host_offset, uint64_t *nb_clusters) |
226c3c26 | 1568 | { |
ff99129a | 1569 | BDRVQcow2State *s = bs->opaque; |
226c3c26 KW |
1570 | |
1571 | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, | |
1572 | *host_offset, *nb_clusters); | |
1573 | ||
966b000f KW |
1574 | if (has_data_file(bs)) { |
1575 | assert(*host_offset == INV_OFFSET || | |
1576 | *host_offset == start_of_cluster(s, guest_offset)); | |
1577 | *host_offset = start_of_cluster(s, guest_offset); | |
1578 | return 0; | |
1579 | } | |
1580 | ||
250196f1 KW |
1581 | /* Allocate new clusters */ |
1582 | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); | |
c6d619cc | 1583 | if (*host_offset == INV_OFFSET) { |
df021791 KW |
1584 | int64_t cluster_offset = |
1585 | qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); | |
1586 | if (cluster_offset < 0) { | |
1587 | return cluster_offset; | |
1588 | } | |
1589 | *host_offset = cluster_offset; | |
1590 | return 0; | |
250196f1 | 1591 | } else { |
b6d36def | 1592 | int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); |
df021791 KW |
1593 | if (ret < 0) { |
1594 | return ret; | |
1595 | } | |
1596 | *nb_clusters = ret; | |
1597 | return 0; | |
250196f1 | 1598 | } |
250196f1 KW |
1599 | } |
1600 | ||
10f0ed8b | 1601 | /* |
57538c86 AG |
1602 | * Allocates new clusters for an area that is either still unallocated or |
1603 | * cannot be overwritten in-place. If *host_offset is not INV_OFFSET, | |
1604 | * clusters are only allocated if the new allocation can match the specified | |
1605 | * host offset. | |
10f0ed8b | 1606 | * |
411d62b0 KW |
1607 | * Note that guest_offset may not be cluster aligned. In this case, the |
1608 | * returned *host_offset points to exact byte referenced by guest_offset and | |
1609 | * therefore isn't cluster aligned as well. | |
10f0ed8b KW |
1610 | * |
1611 | * Returns: | |
1612 | * 0: if no clusters could be allocated. *bytes is set to 0, | |
1613 | * *host_offset is left unchanged. | |
1614 | * | |
1615 | * 1: if new clusters were allocated. *bytes may be decreased if the | |
1616 | * new allocation doesn't cover all of the requested area. | |
1617 | * *host_offset is updated to contain the host offset of the first | |
1618 | * newly allocated cluster. | |
1619 | * | |
1620 | * -errno: in error cases | |
10f0ed8b KW |
1621 | */ |
1622 | static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, | |
c37f4cd7 | 1623 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
10f0ed8b | 1624 | { |
ff99129a | 1625 | BDRVQcow2State *s = bs->opaque; |
10f0ed8b | 1626 | int l2_index; |
6d99a344 | 1627 | uint64_t *l2_slice; |
b6d36def | 1628 | uint64_t nb_clusters; |
10f0ed8b KW |
1629 | int ret; |
1630 | ||
57538c86 | 1631 | uint64_t alloc_cluster_offset; |
10f0ed8b KW |
1632 | |
1633 | trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, | |
1634 | *bytes); | |
1635 | assert(*bytes > 0); | |
1636 | ||
f5bc6350 | 1637 | /* |
6d99a344 | 1638 | * Calculate the number of clusters to look for. We stop at L2 slice |
f5bc6350 KW |
1639 | * boundaries to keep things simple. |
1640 | */ | |
c37f4cd7 KW |
1641 | nb_clusters = |
1642 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
1643 | ||
6d99a344 AG |
1644 | l2_index = offset_to_l2_slice_index(s, guest_offset); |
1645 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); | |
57538c86 AG |
1646 | /* Limit total allocation byte count to BDRV_REQUEST_MAX_BYTES */ |
1647 | nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); | |
d1b9d19f | 1648 | |
10f0ed8b | 1649 | /* Find L2 entry for the first involved cluster */ |
6d99a344 | 1650 | ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); |
10f0ed8b KW |
1651 | if (ret < 0) { |
1652 | return ret; | |
1653 | } | |
1654 | ||
57538c86 AG |
1655 | nb_clusters = count_single_write_clusters(bs, nb_clusters, |
1656 | l2_slice, l2_index, true); | |
10f0ed8b | 1657 | |
ecdd5333 KW |
1658 | /* This function is only called when there were no non-COW clusters, so if |
1659 | * we can't find any unallocated or COW clusters either, something is | |
1660 | * wrong with our code. */ | |
1661 | assert(nb_clusters > 0); | |
1662 | ||
57538c86 AG |
1663 | /* Allocate at a given offset in the image file */ |
1664 | alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : | |
1665 | start_of_cluster(s, *host_offset); | |
1666 | ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, | |
1667 | &nb_clusters); | |
1668 | if (ret < 0) { | |
1669 | goto out; | |
10f0ed8b KW |
1670 | } |
1671 | ||
57538c86 AG |
1672 | /* Can't extend contiguous allocation */ |
1673 | if (nb_clusters == 0) { | |
1674 | *bytes = 0; | |
1675 | ret = 0; | |
1676 | goto out; | |
ff52aab2 HR |
1677 | } |
1678 | ||
57538c86 AG |
1679 | assert(alloc_cluster_offset != INV_OFFSET); |
1680 | ||
83baa9a4 KW |
1681 | /* |
1682 | * Save info needed for meta data update. | |
1683 | * | |
85567393 | 1684 | * requested_bytes: Number of bytes from the start of the first |
83baa9a4 KW |
1685 | * newly allocated cluster to the end of the (possibly shortened |
1686 | * before) write request. | |
1687 | * | |
85567393 | 1688 | * avail_bytes: Number of bytes from the start of the first |
83baa9a4 KW |
1689 | * newly allocated to the end of the last newly allocated cluster. |
1690 | * | |
85567393 | 1691 | * nb_bytes: The number of bytes from the start of the first |
83baa9a4 KW |
1692 | * newly allocated cluster to the end of the area that the write |
1693 | * request actually writes to (excluding COW at the end) | |
1694 | */ | |
85567393 | 1695 | uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); |
d1b9d19f | 1696 | int avail_bytes = nb_clusters << s->cluster_bits; |
85567393 | 1697 | int nb_bytes = MIN(requested_bytes, avail_bytes); |
83baa9a4 | 1698 | |
411d62b0 | 1699 | *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); |
85567393 | 1700 | *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); |
83baa9a4 KW |
1701 | assert(*bytes != 0); |
1702 | ||
d53ec3d8 AG |
1703 | ret = calculate_l2_meta(bs, alloc_cluster_offset, guest_offset, *bytes, |
1704 | l2_slice, m, false); | |
1705 | if (ret < 0) { | |
1706 | goto out; | |
1707 | } | |
8f91d690 | 1708 | |
57538c86 | 1709 | ret = 1; |
10f0ed8b | 1710 | |
57538c86 AG |
1711 | out: |
1712 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); | |
1713 | if (ret < 0 && *m && (*m)->nb_clusters > 0) { | |
10f0ed8b KW |
1714 | QLIST_REMOVE(*m, next_in_flight); |
1715 | } | |
1716 | return ret; | |
1717 | } | |
1718 | ||
45aba42f KW |
1719 | /* |
1720 | * alloc_cluster_offset | |
1721 | * | |
250196f1 KW |
1722 | * For a given offset on the virtual disk, find the cluster offset in qcow2 |
1723 | * file. If the offset is not found, allocate a new cluster. | |
45aba42f | 1724 | * |
250196f1 | 1725 | * If the cluster was already allocated, m->nb_clusters is set to 0 and |
a7912369 | 1726 | * other fields in m are meaningless. |
148da7ea KW |
1727 | * |
1728 | * If the cluster is newly allocated, m->nb_clusters is set to the number of | |
68d100e9 KW |
1729 | * contiguous clusters that have been allocated. In this case, the other |
1730 | * fields of m are valid and contain information about the first allocated | |
1731 | * cluster. | |
45aba42f | 1732 | * |
68d100e9 KW |
1733 | * If the request conflicts with another write request in flight, the coroutine |
1734 | * is queued and will be reentered when the dependency has completed. | |
148da7ea KW |
1735 | * |
1736 | * Return 0 on success and -errno in error cases | |
45aba42f | 1737 | */ |
f4f0d391 | 1738 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, |
d46a0bb2 KW |
1739 | unsigned int *bytes, uint64_t *host_offset, |
1740 | QCowL2Meta **m) | |
45aba42f | 1741 | { |
ff99129a | 1742 | BDRVQcow2State *s = bs->opaque; |
710c2496 | 1743 | uint64_t start, remaining; |
250196f1 | 1744 | uint64_t cluster_offset; |
65eb2e35 | 1745 | uint64_t cur_bytes; |
710c2496 | 1746 | int ret; |
45aba42f | 1747 | |
d46a0bb2 | 1748 | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); |
710c2496 | 1749 | |
72424114 | 1750 | again: |
16f0587e | 1751 | start = offset; |
d46a0bb2 | 1752 | remaining = *bytes; |
c6d619cc KW |
1753 | cluster_offset = INV_OFFSET; |
1754 | *host_offset = INV_OFFSET; | |
ecdd5333 KW |
1755 | cur_bytes = 0; |
1756 | *m = NULL; | |
0af729ec | 1757 | |
2c3b32d2 | 1758 | while (true) { |
ecdd5333 | 1759 | |
c6d619cc | 1760 | if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { |
ecdd5333 KW |
1761 | *host_offset = start_of_cluster(s, cluster_offset); |
1762 | } | |
1763 | ||
1764 | assert(remaining >= cur_bytes); | |
1765 | ||
1766 | start += cur_bytes; | |
1767 | remaining -= cur_bytes; | |
c6d619cc KW |
1768 | |
1769 | if (cluster_offset != INV_OFFSET) { | |
1770 | cluster_offset += cur_bytes; | |
1771 | } | |
ecdd5333 KW |
1772 | |
1773 | if (remaining == 0) { | |
1774 | break; | |
1775 | } | |
1776 | ||
1777 | cur_bytes = remaining; | |
1778 | ||
2c3b32d2 KW |
1779 | /* |
1780 | * Now start gathering as many contiguous clusters as possible: | |
1781 | * | |
1782 | * 1. Check for overlaps with in-flight allocations | |
1783 | * | |
1784 | * a) Overlap not in the first cluster -> shorten this request and | |
1785 | * let the caller handle the rest in its next loop iteration. | |
1786 | * | |
1787 | * b) Real overlaps of two requests. Yield and restart the search | |
1788 | * for contiguous clusters (the situation could have changed | |
1789 | * while we were sleeping) | |
1790 | * | |
1791 | * c) TODO: Request starts in the same cluster as the in-flight | |
1792 | * allocation ends. Shorten the COW of the in-fight allocation, | |
1793 | * set cluster_offset to write to the same cluster and set up | |
1794 | * the right synchronisation between the in-flight request and | |
1795 | * the new one. | |
1796 | */ | |
ecdd5333 | 1797 | ret = handle_dependencies(bs, start, &cur_bytes, m); |
2c3b32d2 | 1798 | if (ret == -EAGAIN) { |
ecdd5333 KW |
1799 | /* Currently handle_dependencies() doesn't yield if we already had |
1800 | * an allocation. If it did, we would have to clean up the L2Meta | |
1801 | * structs before starting over. */ | |
1802 | assert(*m == NULL); | |
2c3b32d2 KW |
1803 | goto again; |
1804 | } else if (ret < 0) { | |
1805 | return ret; | |
ecdd5333 KW |
1806 | } else if (cur_bytes == 0) { |
1807 | break; | |
2c3b32d2 KW |
1808 | } else { |
1809 | /* handle_dependencies() may have decreased cur_bytes (shortened | |
1810 | * the allocations below) so that the next dependency is processed | |
1811 | * correctly during the next loop iteration. */ | |
0af729ec | 1812 | } |
710c2496 | 1813 | |
2c3b32d2 KW |
1814 | /* |
1815 | * 2. Count contiguous COPIED clusters. | |
1816 | */ | |
1817 | ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); | |
1818 | if (ret < 0) { | |
1819 | return ret; | |
1820 | } else if (ret) { | |
ecdd5333 | 1821 | continue; |
2c3b32d2 KW |
1822 | } else if (cur_bytes == 0) { |
1823 | break; | |
1824 | } | |
060bee89 | 1825 | |
2c3b32d2 KW |
1826 | /* |
1827 | * 3. If the request still hasn't completed, allocate new clusters, | |
1828 | * considering any cluster_offset of steps 1c or 2. | |
1829 | */ | |
1830 | ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); | |
1831 | if (ret < 0) { | |
1832 | return ret; | |
1833 | } else if (ret) { | |
ecdd5333 | 1834 | continue; |
2c3b32d2 KW |
1835 | } else { |
1836 | assert(cur_bytes == 0); | |
1837 | break; | |
1838 | } | |
f5bc6350 | 1839 | } |
10f0ed8b | 1840 | |
d46a0bb2 KW |
1841 | *bytes -= remaining; |
1842 | assert(*bytes > 0); | |
c6d619cc | 1843 | assert(*host_offset != INV_OFFSET); |
45aba42f | 1844 | |
148da7ea | 1845 | return 0; |
45aba42f KW |
1846 | } |
1847 | ||
5ea929e3 KW |
1848 | /* |
1849 | * This discards as many clusters of nb_clusters as possible at once (i.e. | |
21ab3add | 1850 | * all clusters in the same L2 slice) and returns the number of discarded |
5ea929e3 KW |
1851 | * clusters. |
1852 | */ | |
21ab3add AG |
1853 | static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, |
1854 | uint64_t nb_clusters, | |
1855 | enum qcow2_discard_type type, bool full_discard) | |
5ea929e3 | 1856 | { |
ff99129a | 1857 | BDRVQcow2State *s = bs->opaque; |
21ab3add | 1858 | uint64_t *l2_slice; |
5ea929e3 KW |
1859 | int l2_index; |
1860 | int ret; | |
1861 | int i; | |
1862 | ||
21ab3add | 1863 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
5ea929e3 KW |
1864 | if (ret < 0) { |
1865 | return ret; | |
1866 | } | |
1867 | ||
21ab3add AG |
1868 | /* Limit nb_clusters to one L2 slice */ |
1869 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); | |
b6d36def | 1870 | assert(nb_clusters <= INT_MAX); |
5ea929e3 KW |
1871 | |
1872 | for (i = 0; i < nb_clusters; i++) { | |
a68cd703 AG |
1873 | uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); |
1874 | uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); | |
1875 | uint64_t new_l2_entry = old_l2_entry; | |
1876 | uint64_t new_l2_bitmap = old_l2_bitmap; | |
1877 | QCow2ClusterType cluster_type = | |
1878 | qcow2_get_cluster_type(bs, old_l2_entry); | |
a71835a0 KW |
1879 | |
1880 | /* | |
a68cd703 AG |
1881 | * If full_discard is true, the cluster should not read back as zeroes, |
1882 | * but rather fall through to the backing file. | |
1883 | * | |
808c4b6f HR |
1884 | * If full_discard is false, make sure that a discarded area reads back |
1885 | * as zeroes for v3 images (we cannot do it for v2 without actually | |
1886 | * writing a zero-filled buffer). We can skip the operation if the | |
1887 | * cluster is already marked as zero, or if it's unallocated and we | |
1888 | * don't have a backing file. | |
a71835a0 | 1889 | * |
237d78f8 | 1890 | * TODO We might want to use bdrv_block_status(bs) here, but we're |
a71835a0 KW |
1891 | * holding s->lock, so that doesn't work today. |
1892 | */ | |
a68cd703 AG |
1893 | if (full_discard) { |
1894 | new_l2_entry = new_l2_bitmap = 0; | |
1895 | } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) { | |
1896 | if (has_subclusters(s)) { | |
1897 | new_l2_entry = 0; | |
1898 | new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; | |
1899 | } else { | |
1900 | new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0; | |
bbd995d8 | 1901 | } |
a68cd703 | 1902 | } |
bbd995d8 | 1903 | |
a68cd703 AG |
1904 | if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { |
1905 | continue; | |
5ea929e3 KW |
1906 | } |
1907 | ||
1908 | /* First remove L2 entries */ | |
21ab3add | 1909 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
a68cd703 AG |
1910 | set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); |
1911 | if (has_subclusters(s)) { | |
1912 | set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); | |
a71835a0 | 1913 | } |
5ea929e3 | 1914 | /* Then decrease the refcount */ |
c883db0d | 1915 | qcow2_free_any_clusters(bs, old_l2_entry, 1, type); |
5ea929e3 KW |
1916 | } |
1917 | ||
21ab3add | 1918 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
5ea929e3 KW |
1919 | |
1920 | return nb_clusters; | |
1921 | } | |
1922 | ||
d2cb36af EB |
1923 | int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, |
1924 | uint64_t bytes, enum qcow2_discard_type type, | |
1925 | bool full_discard) | |
5ea929e3 | 1926 | { |
ff99129a | 1927 | BDRVQcow2State *s = bs->opaque; |
d2cb36af | 1928 | uint64_t end_offset = offset + bytes; |
b6d36def | 1929 | uint64_t nb_clusters; |
d2cb36af | 1930 | int64_t cleared; |
5ea929e3 KW |
1931 | int ret; |
1932 | ||
f10ee139 | 1933 | /* Caller must pass aligned values, except at image end */ |
0c1bd469 | 1934 | assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); |
f10ee139 EB |
1935 | assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || |
1936 | end_offset == bs->total_sectors << BDRV_SECTOR_BITS); | |
5ea929e3 | 1937 | |
d2cb36af | 1938 | nb_clusters = size_to_clusters(s, bytes); |
5ea929e3 | 1939 | |
0b919fae KW |
1940 | s->cache_discards = true; |
1941 | ||
21ab3add | 1942 | /* Each L2 slice is handled by its own loop iteration */ |
5ea929e3 | 1943 | while (nb_clusters > 0) { |
21ab3add AG |
1944 | cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, |
1945 | full_discard); | |
d2cb36af EB |
1946 | if (cleared < 0) { |
1947 | ret = cleared; | |
0b919fae | 1948 | goto fail; |
5ea929e3 KW |
1949 | } |
1950 | ||
d2cb36af EB |
1951 | nb_clusters -= cleared; |
1952 | offset += (cleared * s->cluster_size); | |
5ea929e3 KW |
1953 | } |
1954 | ||
0b919fae KW |
1955 | ret = 0; |
1956 | fail: | |
1957 | s->cache_discards = false; | |
1958 | qcow2_process_discards(bs, ret); | |
1959 | ||
1960 | return ret; | |
5ea929e3 | 1961 | } |
621f0589 KW |
1962 | |
1963 | /* | |
1964 | * This zeroes as many clusters of nb_clusters as possible at once (i.e. | |
a9a9f8f0 | 1965 | * all clusters in the same L2 slice) and returns the number of zeroed |
621f0589 KW |
1966 | * clusters. |
1967 | */ | |
a9a9f8f0 AG |
1968 | static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, |
1969 | uint64_t nb_clusters, int flags) | |
621f0589 | 1970 | { |
ff99129a | 1971 | BDRVQcow2State *s = bs->opaque; |
a9a9f8f0 | 1972 | uint64_t *l2_slice; |
621f0589 KW |
1973 | int l2_index; |
1974 | int ret; | |
1975 | int i; | |
1976 | ||
a9a9f8f0 | 1977 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); |
621f0589 KW |
1978 | if (ret < 0) { |
1979 | return ret; | |
1980 | } | |
1981 | ||
a9a9f8f0 AG |
1982 | /* Limit nb_clusters to one L2 slice */ |
1983 | nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); | |
b6d36def | 1984 | assert(nb_clusters <= INT_MAX); |
621f0589 KW |
1985 | |
1986 | for (i = 0; i < nb_clusters; i++) { | |
205fa507 AG |
1987 | uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); |
1988 | uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); | |
1989 | QCow2ClusterType type = qcow2_get_cluster_type(bs, old_l2_entry); | |
1990 | bool unmap = (type == QCOW2_CLUSTER_COMPRESSED) || | |
1991 | ((flags & BDRV_REQ_MAY_UNMAP) && qcow2_cluster_is_allocated(type)); | |
1992 | uint64_t new_l2_entry = unmap ? 0 : old_l2_entry; | |
1993 | uint64_t new_l2_bitmap = old_l2_bitmap; | |
1994 | ||
1995 | if (has_subclusters(s)) { | |
1996 | new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; | |
1997 | } else { | |
1998 | new_l2_entry |= QCOW_OFLAG_ZERO; | |
1999 | } | |
621f0589 | 2000 | |
205fa507 | 2001 | if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { |
06cc5e2b EB |
2002 | continue; |
2003 | } | |
2004 | ||
a9a9f8f0 | 2005 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
205fa507 AG |
2006 | if (unmap) { |
2007 | qcow2_free_any_clusters(bs, old_l2_entry, 1, QCOW2_DISCARD_REQUEST); | |
2008 | } | |
2009 | set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); | |
2010 | if (has_subclusters(s)) { | |
2011 | set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); | |
621f0589 KW |
2012 | } |
2013 | } | |
2014 | ||
a9a9f8f0 | 2015 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
621f0589 KW |
2016 | |
2017 | return nb_clusters; | |
2018 | } | |
2019 | ||
a6841a2d AG |
2020 | static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset, |
2021 | unsigned nb_subclusters) | |
2022 | { | |
2023 | BDRVQcow2State *s = bs->opaque; | |
2024 | uint64_t *l2_slice; | |
2025 | uint64_t old_l2_bitmap, l2_bitmap; | |
2026 | int l2_index, ret, sc = offset_to_sc_index(s, offset); | |
2027 | ||
2028 | /* For full clusters use zero_in_l2_slice() instead */ | |
2029 | assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster); | |
2030 | assert(sc + nb_subclusters <= s->subclusters_per_cluster); | |
2031 | assert(offset_into_subcluster(s, offset) == 0); | |
2032 | ||
2033 | ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); | |
2034 | if (ret < 0) { | |
2035 | return ret; | |
2036 | } | |
2037 | ||
2038 | switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) { | |
2039 | case QCOW2_CLUSTER_COMPRESSED: | |
2040 | ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */ | |
2041 | goto out; | |
2042 | case QCOW2_CLUSTER_NORMAL: | |
2043 | case QCOW2_CLUSTER_UNALLOCATED: | |
2044 | break; | |
2045 | default: | |
2046 | g_assert_not_reached(); | |
2047 | } | |
2048 | ||
2049 | old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); | |
2050 | ||
2051 | l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters); | |
2052 | l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters); | |
2053 | ||
2054 | if (old_l2_bitmap != l2_bitmap) { | |
2055 | set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap); | |
2056 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); | |
2057 | } | |
2058 | ||
2059 | ret = 0; | |
2060 | out: | |
2061 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); | |
2062 | ||
2063 | return ret; | |
2064 | } | |
2065 | ||
2066 | int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | |
2067 | uint64_t bytes, int flags) | |
621f0589 | 2068 | { |
ff99129a | 2069 | BDRVQcow2State *s = bs->opaque; |
d2cb36af | 2070 | uint64_t end_offset = offset + bytes; |
b6d36def | 2071 | uint64_t nb_clusters; |
a6841a2d | 2072 | unsigned head, tail; |
d2cb36af | 2073 | int64_t cleared; |
621f0589 KW |
2074 | int ret; |
2075 | ||
6c3944dc KW |
2076 | /* If we have to stay in sync with an external data file, zero out |
2077 | * s->data_file first. */ | |
2078 | if (data_file_is_raw(bs)) { | |
2079 | assert(has_data_file(bs)); | |
2080 | ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); | |
2081 | if (ret < 0) { | |
2082 | return ret; | |
2083 | } | |
2084 | } | |
2085 | ||
f10ee139 | 2086 | /* Caller must pass aligned values, except at image end */ |
a6841a2d AG |
2087 | assert(offset_into_subcluster(s, offset) == 0); |
2088 | assert(offset_into_subcluster(s, end_offset) == 0 || | |
f01643fb | 2089 | end_offset >= bs->total_sectors << BDRV_SECTOR_BITS); |
f10ee139 | 2090 | |
61b30439 KW |
2091 | /* |
2092 | * The zero flag is only supported by version 3 and newer. However, if we | |
2093 | * have no backing file, we can resort to discard in version 2. | |
2094 | */ | |
621f0589 | 2095 | if (s->qcow_version < 3) { |
61b30439 KW |
2096 | if (!bs->backing) { |
2097 | return qcow2_cluster_discard(bs, offset, bytes, | |
2098 | QCOW2_DISCARD_REQUEST, false); | |
2099 | } | |
621f0589 KW |
2100 | return -ENOTSUP; |
2101 | } | |
2102 | ||
a6841a2d AG |
2103 | head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset; |
2104 | offset += head; | |
2105 | ||
2106 | tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 : | |
2107 | end_offset - MAX(offset, start_of_cluster(s, end_offset)); | |
2108 | end_offset -= tail; | |
621f0589 | 2109 | |
0b919fae KW |
2110 | s->cache_discards = true; |
2111 | ||
a6841a2d AG |
2112 | if (head) { |
2113 | ret = zero_l2_subclusters(bs, offset - head, | |
2114 | size_to_subclusters(s, head)); | |
2115 | if (ret < 0) { | |
2116 | goto fail; | |
2117 | } | |
2118 | } | |
2119 | ||
2120 | /* Each L2 slice is handled by its own loop iteration */ | |
2121 | nb_clusters = size_to_clusters(s, end_offset - offset); | |
2122 | ||
621f0589 | 2123 | while (nb_clusters > 0) { |
a9a9f8f0 | 2124 | cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); |
d2cb36af EB |
2125 | if (cleared < 0) { |
2126 | ret = cleared; | |
0b919fae | 2127 | goto fail; |
621f0589 KW |
2128 | } |
2129 | ||
d2cb36af EB |
2130 | nb_clusters -= cleared; |
2131 | offset += (cleared * s->cluster_size); | |
621f0589 KW |
2132 | } |
2133 | ||
a6841a2d AG |
2134 | if (tail) { |
2135 | ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail)); | |
2136 | if (ret < 0) { | |
2137 | goto fail; | |
2138 | } | |
2139 | } | |
2140 | ||
0b919fae KW |
2141 | ret = 0; |
2142 | fail: | |
2143 | s->cache_discards = false; | |
2144 | qcow2_process_discards(bs, ret); | |
2145 | ||
2146 | return ret; | |
621f0589 | 2147 | } |
32b6444d HR |
2148 | |
2149 | /* | |
2150 | * Expands all zero clusters in a specific L1 table (or deallocates them, for | |
2151 | * non-backed non-pre-allocated zero clusters). | |
2152 | * | |
4057a2b2 HR |
2153 | * l1_entries and *visited_l1_entries are used to keep track of progress for |
2154 | * status_cb(). l1_entries contains the total number of L1 entries and | |
2155 | * *visited_l1_entries counts all visited L1 entries. | |
32b6444d HR |
2156 | */ |
2157 | static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, | |
ecf58777 | 2158 | int l1_size, int64_t *visited_l1_entries, |
4057a2b2 | 2159 | int64_t l1_entries, |
8b13976d HR |
2160 | BlockDriverAmendStatusCB *status_cb, |
2161 | void *cb_opaque) | |
32b6444d | 2162 | { |
ff99129a | 2163 | BDRVQcow2State *s = bs->opaque; |
32b6444d | 2164 | bool is_active_l1 = (l1_table == s->l1_table); |
415184f5 AG |
2165 | uint64_t *l2_slice = NULL; |
2166 | unsigned slice, slice_size2, n_slices; | |
32b6444d HR |
2167 | int ret; |
2168 | int i, j; | |
2169 | ||
7bbb5920 AG |
2170 | /* qcow2_downgrade() is not allowed in images with subclusters */ |
2171 | assert(!has_subclusters(s)); | |
2172 | ||
c8fd8554 | 2173 | slice_size2 = s->l2_slice_size * l2_entry_size(s); |
415184f5 AG |
2174 | n_slices = s->cluster_size / slice_size2; |
2175 | ||
32b6444d HR |
2176 | if (!is_active_l1) { |
2177 | /* inactive L2 tables require a buffer to be stored in when loading | |
2178 | * them from disk */ | |
415184f5 AG |
2179 | l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); |
2180 | if (l2_slice == NULL) { | |
de82815d KW |
2181 | return -ENOMEM; |
2182 | } | |
32b6444d HR |
2183 | } |
2184 | ||
2185 | for (i = 0; i < l1_size; i++) { | |
2186 | uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; | |
0e06528e | 2187 | uint64_t l2_refcount; |
32b6444d HR |
2188 | |
2189 | if (!l2_offset) { | |
2190 | /* unallocated */ | |
4057a2b2 HR |
2191 | (*visited_l1_entries)++; |
2192 | if (status_cb) { | |
8b13976d | 2193 | status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); |
4057a2b2 | 2194 | } |
32b6444d HR |
2195 | continue; |
2196 | } | |
2197 | ||
8dd93d93 HR |
2198 | if (offset_into_cluster(s, l2_offset)) { |
2199 | qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" | |
2200 | PRIx64 " unaligned (L1 index: %#x)", | |
2201 | l2_offset, i); | |
2202 | ret = -EIO; | |
2203 | goto fail; | |
2204 | } | |
2205 | ||
9b765486 AG |
2206 | ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, |
2207 | &l2_refcount); | |
2208 | if (ret < 0) { | |
2209 | goto fail; | |
2210 | } | |
2211 | ||
415184f5 AG |
2212 | for (slice = 0; slice < n_slices; slice++) { |
2213 | uint64_t slice_offset = l2_offset + slice * slice_size2; | |
2214 | bool l2_dirty = false; | |
226494ff AG |
2215 | if (is_active_l1) { |
2216 | /* get active L2 tables from cache */ | |
415184f5 AG |
2217 | ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, |
2218 | (void **)&l2_slice); | |
226494ff AG |
2219 | } else { |
2220 | /* load inactive L2 tables from disk */ | |
415184f5 | 2221 | ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); |
226494ff AG |
2222 | } |
2223 | if (ret < 0) { | |
2224 | goto fail; | |
32b6444d HR |
2225 | } |
2226 | ||
415184f5 | 2227 | for (j = 0; j < s->l2_slice_size; j++) { |
12c6aebe | 2228 | uint64_t l2_entry = get_l2_entry(s, l2_slice, j); |
226494ff AG |
2229 | int64_t offset = l2_entry & L2E_OFFSET_MASK; |
2230 | QCow2ClusterType cluster_type = | |
808c2bb4 | 2231 | qcow2_get_cluster_type(bs, l2_entry); |
226494ff AG |
2232 | |
2233 | if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && | |
2234 | cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { | |
32b6444d HR |
2235 | continue; |
2236 | } | |
2237 | ||
226494ff AG |
2238 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
2239 | if (!bs->backing) { | |
7bbb5920 AG |
2240 | /* |
2241 | * not backed; therefore we can simply deallocate the | |
2242 | * cluster. No need to call set_l2_bitmap(), this | |
2243 | * function doesn't support images with subclusters. | |
2244 | */ | |
12c6aebe | 2245 | set_l2_entry(s, l2_slice, j, 0); |
226494ff AG |
2246 | l2_dirty = true; |
2247 | continue; | |
2248 | } | |
2249 | ||
2250 | offset = qcow2_alloc_clusters(bs, s->cluster_size); | |
2251 | if (offset < 0) { | |
2252 | ret = offset; | |
2253 | goto fail; | |
2254 | } | |
ecf58777 | 2255 | |
3a75a870 AG |
2256 | /* The offset must fit in the offset field */ |
2257 | assert((offset & L2E_OFFSET_MASK) == offset); | |
2258 | ||
226494ff AG |
2259 | if (l2_refcount > 1) { |
2260 | /* For shared L2 tables, set the refcount accordingly | |
2261 | * (it is already 1 and needs to be l2_refcount) */ | |
2262 | ret = qcow2_update_cluster_refcount( | |
2263 | bs, offset >> s->cluster_bits, | |
2aabe7c7 | 2264 | refcount_diff(1, l2_refcount), false, |
ecf58777 | 2265 | QCOW2_DISCARD_OTHER); |
226494ff AG |
2266 | if (ret < 0) { |
2267 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
2268 | QCOW2_DISCARD_OTHER); | |
2269 | goto fail; | |
2270 | } | |
ecf58777 HR |
2271 | } |
2272 | } | |
32b6444d | 2273 | |
226494ff | 2274 | if (offset_into_cluster(s, offset)) { |
415184f5 | 2275 | int l2_index = slice * s->l2_slice_size + j; |
226494ff AG |
2276 | qcow2_signal_corruption( |
2277 | bs, true, -1, -1, | |
2278 | "Cluster allocation offset " | |
2279 | "%#" PRIx64 " unaligned (L2 offset: %#" | |
2280 | PRIx64 ", L2 index: %#x)", offset, | |
415184f5 | 2281 | l2_offset, l2_index); |
226494ff AG |
2282 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { |
2283 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
2284 | QCOW2_DISCARD_ALWAYS); | |
2285 | } | |
2286 | ret = -EIO; | |
2287 | goto fail; | |
8dd93d93 | 2288 | } |
8dd93d93 | 2289 | |
226494ff | 2290 | ret = qcow2_pre_write_overlap_check(bs, 0, offset, |
966b000f | 2291 | s->cluster_size, true); |
226494ff AG |
2292 | if (ret < 0) { |
2293 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { | |
2294 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
2295 | QCOW2_DISCARD_ALWAYS); | |
2296 | } | |
2297 | goto fail; | |
320c7066 | 2298 | } |
32b6444d | 2299 | |
966b000f KW |
2300 | ret = bdrv_pwrite_zeroes(s->data_file, offset, |
2301 | s->cluster_size, 0); | |
226494ff AG |
2302 | if (ret < 0) { |
2303 | if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { | |
2304 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
2305 | QCOW2_DISCARD_ALWAYS); | |
2306 | } | |
2307 | goto fail; | |
320c7066 | 2308 | } |
32b6444d | 2309 | |
226494ff | 2310 | if (l2_refcount == 1) { |
12c6aebe | 2311 | set_l2_entry(s, l2_slice, j, offset | QCOW_OFLAG_COPIED); |
226494ff | 2312 | } else { |
12c6aebe | 2313 | set_l2_entry(s, l2_slice, j, offset); |
226494ff | 2314 | } |
7bbb5920 AG |
2315 | /* |
2316 | * No need to call set_l2_bitmap() after set_l2_entry() because | |
2317 | * this function doesn't support images with subclusters. | |
2318 | */ | |
226494ff | 2319 | l2_dirty = true; |
e390cf5a | 2320 | } |
32b6444d | 2321 | |
226494ff AG |
2322 | if (is_active_l1) { |
2323 | if (l2_dirty) { | |
415184f5 | 2324 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); |
226494ff | 2325 | qcow2_cache_depends_on_flush(s->l2_table_cache); |
32b6444d | 2326 | } |
415184f5 | 2327 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
226494ff AG |
2328 | } else { |
2329 | if (l2_dirty) { | |
2330 | ret = qcow2_pre_write_overlap_check( | |
2331 | bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, | |
966b000f | 2332 | slice_offset, slice_size2, false); |
226494ff AG |
2333 | if (ret < 0) { |
2334 | goto fail; | |
2335 | } | |
32b6444d | 2336 | |
415184f5 AG |
2337 | ret = bdrv_pwrite(bs->file, slice_offset, |
2338 | l2_slice, slice_size2); | |
226494ff AG |
2339 | if (ret < 0) { |
2340 | goto fail; | |
2341 | } | |
32b6444d HR |
2342 | } |
2343 | } | |
2344 | } | |
4057a2b2 HR |
2345 | |
2346 | (*visited_l1_entries)++; | |
2347 | if (status_cb) { | |
8b13976d | 2348 | status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); |
4057a2b2 | 2349 | } |
32b6444d HR |
2350 | } |
2351 | ||
2352 | ret = 0; | |
2353 | ||
2354 | fail: | |
415184f5 | 2355 | if (l2_slice) { |
32b6444d | 2356 | if (!is_active_l1) { |
415184f5 | 2357 | qemu_vfree(l2_slice); |
32b6444d | 2358 | } else { |
415184f5 | 2359 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); |
32b6444d HR |
2360 | } |
2361 | } | |
2362 | return ret; | |
2363 | } | |
2364 | ||
2365 | /* | |
2366 | * For backed images, expands all zero clusters on the image. For non-backed | |
2367 | * images, deallocates all non-pre-allocated zero clusters (and claims the | |
2368 | * allocation for pre-allocated ones). This is important for downgrading to a | |
2369 | * qcow2 version which doesn't yet support metadata zero clusters. | |
2370 | */ | |
4057a2b2 | 2371 | int qcow2_expand_zero_clusters(BlockDriverState *bs, |
8b13976d HR |
2372 | BlockDriverAmendStatusCB *status_cb, |
2373 | void *cb_opaque) | |
32b6444d | 2374 | { |
ff99129a | 2375 | BDRVQcow2State *s = bs->opaque; |
32b6444d | 2376 | uint64_t *l1_table = NULL; |
4057a2b2 | 2377 | int64_t l1_entries = 0, visited_l1_entries = 0; |
32b6444d HR |
2378 | int ret; |
2379 | int i, j; | |
2380 | ||
4057a2b2 HR |
2381 | if (status_cb) { |
2382 | l1_entries = s->l1_size; | |
2383 | for (i = 0; i < s->nb_snapshots; i++) { | |
2384 | l1_entries += s->snapshots[i].l1_size; | |
2385 | } | |
2386 | } | |
2387 | ||
32b6444d | 2388 | ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, |
4057a2b2 | 2389 | &visited_l1_entries, l1_entries, |
8b13976d | 2390 | status_cb, cb_opaque); |
32b6444d HR |
2391 | if (ret < 0) { |
2392 | goto fail; | |
2393 | } | |
2394 | ||
2395 | /* Inactive L1 tables may point to active L2 tables - therefore it is | |
2396 | * necessary to flush the L2 table cache before trying to access the L2 | |
2397 | * tables pointed to by inactive L1 entries (else we might try to expand | |
2398 | * zero clusters that have already been expanded); furthermore, it is also | |
2399 | * necessary to empty the L2 table cache, since it may contain tables which | |
2400 | * are now going to be modified directly on disk, bypassing the cache. | |
2401 | * qcow2_cache_empty() does both for us. */ | |
2402 | ret = qcow2_cache_empty(bs, s->l2_table_cache); | |
2403 | if (ret < 0) { | |
2404 | goto fail; | |
2405 | } | |
2406 | ||
2407 | for (i = 0; i < s->nb_snapshots; i++) { | |
c9a442e4 AG |
2408 | int l1_size2; |
2409 | uint64_t *new_l1_table; | |
2410 | Error *local_err = NULL; | |
2411 | ||
2412 | ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, | |
2413 | s->snapshots[i].l1_size, sizeof(uint64_t), | |
2414 | QCOW_MAX_L1_SIZE, "Snapshot L1 table", | |
2415 | &local_err); | |
2416 | if (ret < 0) { | |
2417 | error_report_err(local_err); | |
2418 | goto fail; | |
2419 | } | |
32b6444d | 2420 | |
c9a442e4 AG |
2421 | l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t); |
2422 | new_l1_table = g_try_realloc(l1_table, l1_size2); | |
de7269d2 AG |
2423 | |
2424 | if (!new_l1_table) { | |
2425 | ret = -ENOMEM; | |
2426 | goto fail; | |
2427 | } | |
2428 | ||
2429 | l1_table = new_l1_table; | |
32b6444d | 2430 | |
c9a442e4 AG |
2431 | ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, |
2432 | l1_table, l1_size2); | |
32b6444d HR |
2433 | if (ret < 0) { |
2434 | goto fail; | |
2435 | } | |
2436 | ||
2437 | for (j = 0; j < s->snapshots[i].l1_size; j++) { | |
2438 | be64_to_cpus(&l1_table[j]); | |
2439 | } | |
2440 | ||
2441 | ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, | |
4057a2b2 | 2442 | &visited_l1_entries, l1_entries, |
8b13976d | 2443 | status_cb, cb_opaque); |
32b6444d HR |
2444 | if (ret < 0) { |
2445 | goto fail; | |
2446 | } | |
2447 | } | |
2448 | ||
2449 | ret = 0; | |
2450 | ||
2451 | fail: | |
32b6444d HR |
2452 | g_free(l1_table); |
2453 | return ret; | |
2454 | } |