]> git.proxmox.com Git - qemu.git/blame - block/qcow2-cluster.c
Use glib memory allocation and free functions
[qemu.git] / block / qcow2-cluster.c
CommitLineData
45aba42f
KW
1/*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include <zlib.h>
26
27#include "qemu-common.h"
28#include "block_int.h"
29#include "block/qcow2.h"
30
72893756 31int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
45aba42f
KW
32{
33 BDRVQcowState *s = bs->opaque;
34 int new_l1_size, new_l1_size2, ret, i;
35 uint64_t *new_l1_table;
5d757b56 36 int64_t new_l1_table_offset;
45aba42f
KW
37 uint8_t data[12];
38
72893756 39 if (min_size <= s->l1_size)
45aba42f 40 return 0;
72893756
SH
41
42 if (exact_size) {
43 new_l1_size = min_size;
44 } else {
45 /* Bump size up to reduce the number of times we have to grow */
46 new_l1_size = s->l1_size;
47 if (new_l1_size == 0) {
48 new_l1_size = 1;
49 }
50 while (min_size > new_l1_size) {
51 new_l1_size = (new_l1_size * 3 + 1) / 2;
52 }
45aba42f 53 }
72893756 54
45aba42f
KW
55#ifdef DEBUG_ALLOC2
56 printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
57#endif
58
59 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
7267c094 60 new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
45aba42f
KW
61 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
62
63 /* write new table (align to cluster) */
66f82cee 64 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
ed6ccf0f 65 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
5d757b56 66 if (new_l1_table_offset < 0) {
7267c094 67 g_free(new_l1_table);
5d757b56
KW
68 return new_l1_table_offset;
69 }
29c1a730
KW
70
71 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
72 if (ret < 0) {
80fa3341 73 goto fail;
29c1a730 74 }
45aba42f 75
66f82cee 76 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
45aba42f
KW
77 for(i = 0; i < s->l1_size; i++)
78 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
8b3b7206
KW
79 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
80 if (ret < 0)
45aba42f
KW
81 goto fail;
82 for(i = 0; i < s->l1_size; i++)
83 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
84
85 /* set new table */
66f82cee 86 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
45aba42f 87 cpu_to_be32w((uint32_t*)data, new_l1_size);
653df36b 88 cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
8b3b7206
KW
89 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
90 if (ret < 0) {
45aba42f 91 goto fail;
fb8fa77c 92 }
7267c094 93 g_free(s->l1_table);
ed6ccf0f 94 qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
45aba42f
KW
95 s->l1_table_offset = new_l1_table_offset;
96 s->l1_table = new_l1_table;
97 s->l1_size = new_l1_size;
98 return 0;
99 fail:
7267c094 100 g_free(new_l1_table);
fb8fa77c 101 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
8b3b7206 102 return ret;
45aba42f
KW
103}
104
45aba42f
KW
105/*
106 * l2_load
107 *
108 * Loads a L2 table into memory. If the table is in the cache, the cache
109 * is used; otherwise the L2 table is loaded from the image file.
110 *
111 * Returns a pointer to the L2 table on success, or NULL if the read from
112 * the image file failed.
113 */
114
55c17e98
KW
115static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
116 uint64_t **l2_table)
45aba42f
KW
117{
118 BDRVQcowState *s = bs->opaque;
55c17e98 119 int ret;
45aba42f 120
29c1a730 121 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
45aba42f 122
29c1a730 123 return ret;
45aba42f
KW
124}
125
6583e3c7
KW
126/*
127 * Writes one sector of the L1 table to the disk (can't update single entries
128 * and we really don't want bdrv_pread to perform a read-modify-write)
129 */
130#define L1_ENTRIES_PER_SECTOR (512 / 8)
66f82cee 131static int write_l1_entry(BlockDriverState *bs, int l1_index)
6583e3c7 132{
66f82cee 133 BDRVQcowState *s = bs->opaque;
6583e3c7
KW
134 uint64_t buf[L1_ENTRIES_PER_SECTOR];
135 int l1_start_index;
f7defcb6 136 int i, ret;
6583e3c7
KW
137
138 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
139 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
140 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
141 }
142
66f82cee 143 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
8b3b7206 144 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
f7defcb6
KW
145 buf, sizeof(buf));
146 if (ret < 0) {
147 return ret;
6583e3c7
KW
148 }
149
150 return 0;
151}
152
45aba42f
KW
153/*
154 * l2_allocate
155 *
156 * Allocate a new l2 entry in the file. If l1_index points to an already
157 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
158 * table) copy the contents of the old L2 table into the newly allocated one.
159 * Otherwise the new table is initialized with zeros.
160 *
161 */
162
c46e1167 163static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
45aba42f
KW
164{
165 BDRVQcowState *s = bs->opaque;
6583e3c7 166 uint64_t old_l2_offset;
f4f0d391
KW
167 uint64_t *l2_table;
168 int64_t l2_offset;
c46e1167 169 int ret;
45aba42f
KW
170
171 old_l2_offset = s->l1_table[l1_index];
172
173 /* allocate a new l2 entry */
174
ed6ccf0f 175 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
5d757b56 176 if (l2_offset < 0) {
c46e1167 177 return l2_offset;
5d757b56 178 }
29c1a730
KW
179
180 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
181 if (ret < 0) {
182 goto fail;
183 }
45aba42f 184
45aba42f
KW
185 /* allocate a new entry in the l2 cache */
186
29c1a730
KW
187 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
188 if (ret < 0) {
189 return ret;
190 }
191
192 l2_table = *table;
45aba42f
KW
193
194 if (old_l2_offset == 0) {
195 /* if there was no old l2 table, clear the new table */
196 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
197 } else {
29c1a730
KW
198 uint64_t* old_table;
199
45aba42f 200 /* if there was an old l2 table, read it from the disk */
66f82cee 201 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
29c1a730
KW
202 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset,
203 (void**) &old_table);
204 if (ret < 0) {
205 goto fail;
206 }
207
208 memcpy(l2_table, old_table, s->cluster_size);
209
210 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
c46e1167 211 if (ret < 0) {
175e1152 212 goto fail;
c46e1167 213 }
45aba42f 214 }
29c1a730 215
45aba42f 216 /* write the l2 table to the file */
66f82cee 217 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
29c1a730
KW
218
219 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
220 ret = qcow2_cache_flush(bs, s->l2_table_cache);
c46e1167 221 if (ret < 0) {
175e1152
KW
222 goto fail;
223 }
224
225 /* update the L1 entry */
226 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
227 ret = write_l1_entry(bs, l1_index);
228 if (ret < 0) {
229 goto fail;
c46e1167 230 }
45aba42f 231
c46e1167
KW
232 *table = l2_table;
233 return 0;
175e1152
KW
234
235fail:
29c1a730 236 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
68dba0bf 237 s->l1_table[l1_index] = old_l2_offset;
175e1152 238 return ret;
45aba42f
KW
239}
240
241static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
242 uint64_t *l2_table, uint64_t start, uint64_t mask)
243{
244 int i;
245 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
246
247 if (!offset)
248 return 0;
249
250 for (i = start; i < start + nb_clusters; i++)
80ee15a6 251 if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
45aba42f
KW
252 break;
253
254 return (i - start);
255}
256
257static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
258{
259 int i = 0;
260
261 while(nb_clusters-- && l2_table[i] == 0)
262 i++;
263
264 return i;
265}
266
267/* The crypt function is compatible with the linux cryptoloop
268 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
269 supported */
ed6ccf0f
KW
270void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
271 uint8_t *out_buf, const uint8_t *in_buf,
272 int nb_sectors, int enc,
273 const AES_KEY *key)
45aba42f
KW
274{
275 union {
276 uint64_t ll[2];
277 uint8_t b[16];
278 } ivec;
279 int i;
280
281 for(i = 0; i < nb_sectors; i++) {
282 ivec.ll[0] = cpu_to_le64(sector_num);
283 ivec.ll[1] = 0;
284 AES_cbc_encrypt(in_buf, out_buf, 512, key,
285 ivec.b, enc);
286 sector_num++;
287 in_buf += 512;
288 out_buf += 512;
289 }
290}
291
292
7c80ab3f
JS
293static int qcow2_read(BlockDriverState *bs, int64_t sector_num,
294 uint8_t *buf, int nb_sectors)
45aba42f
KW
295{
296 BDRVQcowState *s = bs->opaque;
297 int ret, index_in_cluster, n, n1;
298 uint64_t cluster_offset;
bd28f835
KW
299 struct iovec iov;
300 QEMUIOVector qiov;
45aba42f
KW
301
302 while (nb_sectors > 0) {
303 n = nb_sectors;
1c46efaa
KW
304
305 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
306 &cluster_offset);
307 if (ret < 0) {
308 return ret;
309 }
310
45aba42f
KW
311 index_in_cluster = sector_num & (s->cluster_sectors - 1);
312 if (!cluster_offset) {
313 if (bs->backing_hd) {
314 /* read from the base image */
bd28f835
KW
315 iov.iov_base = buf;
316 iov.iov_len = n * 512;
317 qemu_iovec_init_external(&qiov, &iov, 1);
318
319 n1 = qcow2_backing_read1(bs->backing_hd, &qiov, sector_num, n);
45aba42f 320 if (n1 > 0) {
66f82cee 321 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING);
45aba42f
KW
322 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
323 if (ret < 0)
324 return -1;
325 }
326 } else {
327 memset(buf, 0, 512 * n);
328 }
329 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
66f82cee 330 if (qcow2_decompress_cluster(bs, cluster_offset) < 0)
45aba42f
KW
331 return -1;
332 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
333 } else {
66f82cee
KW
334 BLKDBG_EVENT(bs->file, BLKDBG_READ);
335 ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512);
45aba42f
KW
336 if (ret != n * 512)
337 return -1;
338 if (s->crypt_method) {
ed6ccf0f 339 qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
45aba42f
KW
340 &s->aes_decrypt_key);
341 }
342 }
343 nb_sectors -= n;
344 sector_num += n;
345 buf += n * 512;
346 }
347 return 0;
348}
349
350static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
351 uint64_t cluster_offset, int n_start, int n_end)
352{
353 BDRVQcowState *s = bs->opaque;
354 int n, ret;
355
356 n = n_end - n_start;
357 if (n <= 0)
358 return 0;
66f82cee 359 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
7c80ab3f 360 ret = qcow2_read(bs, start_sect + n_start, s->cluster_data, n);
45aba42f
KW
361 if (ret < 0)
362 return ret;
363 if (s->crypt_method) {
ed6ccf0f 364 qcow2_encrypt_sectors(s, start_sect + n_start,
45aba42f
KW
365 s->cluster_data,
366 s->cluster_data, n, 1,
367 &s->aes_encrypt_key);
368 }
66f82cee 369 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
9f8e668e 370 ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start,
8b3b7206 371 s->cluster_data, n);
45aba42f
KW
372 if (ret < 0)
373 return ret;
374 return 0;
375}
376
377
378/*
379 * get_cluster_offset
380 *
1c46efaa
KW
381 * For a given offset of the disk image, find the cluster offset in
382 * qcow2 file. The offset is stored in *cluster_offset.
45aba42f
KW
383 *
384 * on entry, *num is the number of contiguous clusters we'd like to
385 * access following offset.
386 *
387 * on exit, *num is the number of contiguous clusters we can read.
388 *
1c46efaa
KW
389 * Return 0, if the offset is found
390 * Return -errno, otherwise.
45aba42f
KW
391 *
392 */
393
1c46efaa
KW
394int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
395 int *num, uint64_t *cluster_offset)
45aba42f
KW
396{
397 BDRVQcowState *s = bs->opaque;
80ee15a6 398 unsigned int l1_index, l2_index;
1c46efaa 399 uint64_t l2_offset, *l2_table;
45aba42f 400 int l1_bits, c;
80ee15a6
KW
401 unsigned int index_in_cluster, nb_clusters;
402 uint64_t nb_available, nb_needed;
55c17e98 403 int ret;
45aba42f
KW
404
405 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
406 nb_needed = *num + index_in_cluster;
407
408 l1_bits = s->l2_bits + s->cluster_bits;
409
410 /* compute how many bytes there are between the offset and
411 * the end of the l1 entry
412 */
413
80ee15a6 414 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
45aba42f
KW
415
416 /* compute the number of available sectors */
417
418 nb_available = (nb_available >> 9) + index_in_cluster;
419
420 if (nb_needed > nb_available) {
421 nb_needed = nb_available;
422 }
423
1c46efaa 424 *cluster_offset = 0;
45aba42f
KW
425
426 /* seek the the l2 offset in the l1 table */
427
428 l1_index = offset >> l1_bits;
429 if (l1_index >= s->l1_size)
430 goto out;
431
432 l2_offset = s->l1_table[l1_index];
433
434 /* seek the l2 table of the given l2 offset */
435
436 if (!l2_offset)
437 goto out;
438
439 /* load the l2 table in memory */
440
441 l2_offset &= ~QCOW_OFLAG_COPIED;
55c17e98
KW
442 ret = l2_load(bs, l2_offset, &l2_table);
443 if (ret < 0) {
444 return ret;
1c46efaa 445 }
45aba42f
KW
446
447 /* find the cluster offset for the given disk offset */
448
449 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
1c46efaa 450 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
45aba42f
KW
451 nb_clusters = size_to_clusters(s, nb_needed << 9);
452
1c46efaa 453 if (!*cluster_offset) {
45aba42f
KW
454 /* how many empty clusters ? */
455 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
456 } else {
457 /* how many allocated clusters ? */
458 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
459 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
460 }
461
29c1a730
KW
462 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
463
45aba42f
KW
464 nb_available = (c * s->cluster_sectors);
465out:
466 if (nb_available > nb_needed)
467 nb_available = nb_needed;
468
469 *num = nb_available - index_in_cluster;
470
1c46efaa
KW
471 *cluster_offset &=~QCOW_OFLAG_COPIED;
472 return 0;
45aba42f
KW
473}
474
475/*
476 * get_cluster_table
477 *
478 * for a given disk offset, load (and allocate if needed)
479 * the l2 table.
480 *
481 * the l2 table offset in the qcow2 file and the cluster index
482 * in the l2 table are given to the caller.
483 *
1e3e8f1a 484 * Returns 0 on success, -errno in failure case
45aba42f 485 */
45aba42f
KW
486static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
487 uint64_t **new_l2_table,
488 uint64_t *new_l2_offset,
489 int *new_l2_index)
490{
491 BDRVQcowState *s = bs->opaque;
80ee15a6 492 unsigned int l1_index, l2_index;
c46e1167
KW
493 uint64_t l2_offset;
494 uint64_t *l2_table = NULL;
80ee15a6 495 int ret;
45aba42f
KW
496
497 /* seek the the l2 offset in the l1 table */
498
499 l1_index = offset >> (s->l2_bits + s->cluster_bits);
500 if (l1_index >= s->l1_size) {
72893756 501 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
1e3e8f1a
KW
502 if (ret < 0) {
503 return ret;
504 }
45aba42f
KW
505 }
506 l2_offset = s->l1_table[l1_index];
507
508 /* seek the l2 table of the given l2 offset */
509
510 if (l2_offset & QCOW_OFLAG_COPIED) {
511 /* load the l2 table in memory */
512 l2_offset &= ~QCOW_OFLAG_COPIED;
55c17e98
KW
513 ret = l2_load(bs, l2_offset, &l2_table);
514 if (ret < 0) {
515 return ret;
1e3e8f1a 516 }
45aba42f 517 } else {
16fde5f2 518 /* First allocate a new L2 table (and do COW if needed) */
c46e1167
KW
519 ret = l2_allocate(bs, l1_index, &l2_table);
520 if (ret < 0) {
521 return ret;
1e3e8f1a 522 }
16fde5f2
KW
523
524 /* Then decrease the refcount of the old table */
525 if (l2_offset) {
526 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
527 }
45aba42f
KW
528 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
529 }
530
531 /* find the cluster offset for the given disk offset */
532
533 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
534
535 *new_l2_table = l2_table;
536 *new_l2_offset = l2_offset;
537 *new_l2_index = l2_index;
538
1e3e8f1a 539 return 0;
45aba42f
KW
540}
541
542/*
543 * alloc_compressed_cluster_offset
544 *
545 * For a given offset of the disk image, return cluster offset in
546 * qcow2 file.
547 *
548 * If the offset is not found, allocate a new compressed cluster.
549 *
550 * Return the cluster offset if successful,
551 * Return 0, otherwise.
552 *
553 */
554
ed6ccf0f
KW
555uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
556 uint64_t offset,
557 int compressed_size)
45aba42f
KW
558{
559 BDRVQcowState *s = bs->opaque;
560 int l2_index, ret;
f4f0d391
KW
561 uint64_t l2_offset, *l2_table;
562 int64_t cluster_offset;
45aba42f
KW
563 int nb_csectors;
564
565 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
1e3e8f1a 566 if (ret < 0) {
45aba42f 567 return 0;
1e3e8f1a 568 }
45aba42f
KW
569
570 cluster_offset = be64_to_cpu(l2_table[l2_index]);
571 if (cluster_offset & QCOW_OFLAG_COPIED)
572 return cluster_offset & ~QCOW_OFLAG_COPIED;
573
574 if (cluster_offset)
ed6ccf0f 575 qcow2_free_any_clusters(bs, cluster_offset, 1);
45aba42f 576
ed6ccf0f 577 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
5d757b56 578 if (cluster_offset < 0) {
29c1a730 579 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
5d757b56
KW
580 return 0;
581 }
582
45aba42f
KW
583 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
584 (cluster_offset >> 9);
585
586 cluster_offset |= QCOW_OFLAG_COMPRESSED |
587 ((uint64_t)nb_csectors << s->csize_shift);
588
589 /* update L2 table */
590
591 /* compressed clusters never have the copied flag */
592
66f82cee 593 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
29c1a730 594 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
45aba42f 595 l2_table[l2_index] = cpu_to_be64(cluster_offset);
29c1a730 596 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
79a31189 597 if (ret < 0) {
29c1a730 598 return 0;
4c1612d9
KW
599 }
600
29c1a730 601 return cluster_offset;
4c1612d9
KW
602}
603
148da7ea 604int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
45aba42f
KW
605{
606 BDRVQcowState *s = bs->opaque;
607 int i, j = 0, l2_index, ret;
608 uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
148da7ea 609 uint64_t cluster_offset = m->cluster_offset;
29c1a730 610 bool cow = false;
45aba42f
KW
611
612 if (m->nb_clusters == 0)
613 return 0;
614
7267c094 615 old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
45aba42f
KW
616
617 /* copy content of unmodified sectors */
618 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
619 if (m->n_start) {
29c1a730 620 cow = true;
45aba42f
KW
621 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
622 if (ret < 0)
623 goto err;
624 }
625
626 if (m->nb_available & (s->cluster_sectors - 1)) {
627 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
29c1a730 628 cow = true;
45aba42f
KW
629 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
630 m->nb_available - end, s->cluster_sectors);
631 if (ret < 0)
632 goto err;
633 }
634
29c1a730
KW
635 /*
636 * Update L2 table.
637 *
638 * Before we update the L2 table to actually point to the new cluster, we
639 * need to be sure that the refcounts have been increased and COW was
640 * handled.
641 */
642 if (cow) {
3de0a294 643 qcow2_cache_depends_on_flush(s->l2_table_cache);
29c1a730
KW
644 }
645
646 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
1e3e8f1a
KW
647 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index);
648 if (ret < 0) {
45aba42f 649 goto err;
1e3e8f1a 650 }
29c1a730 651 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
45aba42f
KW
652
653 for (i = 0; i < m->nb_clusters; i++) {
654 /* if two concurrent writes happen to the same unallocated cluster
655 * each write allocates separate cluster and writes data concurrently.
656 * The first one to complete updates l2 table with pointer to its
657 * cluster the second one has to do RMW (which is done above by
658 * copy_sectors()), update l2 table with its cluster pointer and free
659 * old cluster. This is what this loop does */
660 if(l2_table[l2_index + i] != 0)
661 old_cluster[j++] = l2_table[l2_index + i];
662
663 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
664 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
665 }
666
9f8e668e 667
29c1a730 668 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
c835d00f 669 if (ret < 0) {
45aba42f 670 goto err;
4c1612d9 671 }
45aba42f 672
7ec5e6a4
KW
673 /*
674 * If this was a COW, we need to decrease the refcount of the old cluster.
675 * Also flush bs->file to get the right order for L2 and refcount update.
676 */
677 if (j != 0) {
7ec5e6a4
KW
678 for (i = 0; i < j; i++) {
679 qcow2_free_any_clusters(bs,
680 be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
681 }
682 }
45aba42f
KW
683
684 ret = 0;
685err:
7267c094 686 g_free(old_cluster);
45aba42f
KW
687 return ret;
688 }
689
690/*
691 * alloc_cluster_offset
692 *
148da7ea 693 * For a given offset of the disk image, return cluster offset in qcow2 file.
45aba42f
KW
694 * If the offset is not found, allocate a new cluster.
695 *
148da7ea
KW
696 * If the cluster was already allocated, m->nb_clusters is set to 0,
697 * m->depends_on is set to NULL and the other fields in m are meaningless.
698 *
699 * If the cluster is newly allocated, m->nb_clusters is set to the number of
68d100e9
KW
700 * contiguous clusters that have been allocated. In this case, the other
701 * fields of m are valid and contain information about the first allocated
702 * cluster.
45aba42f 703 *
68d100e9
KW
704 * If the request conflicts with another write request in flight, the coroutine
705 * is queued and will be reentered when the dependency has completed.
148da7ea
KW
706 *
707 * Return 0 on success and -errno in error cases
45aba42f 708 */
f4f0d391
KW
709int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
710 int n_start, int n_end, int *num, QCowL2Meta *m)
45aba42f
KW
711{
712 BDRVQcowState *s = bs->opaque;
713 int l2_index, ret;
5d757b56
KW
714 uint64_t l2_offset, *l2_table;
715 int64_t cluster_offset;
80ee15a6 716 unsigned int nb_clusters, i = 0;
f214978a 717 QCowL2Meta *old_alloc;
45aba42f
KW
718
719 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
1e3e8f1a 720 if (ret < 0) {
148da7ea 721 return ret;
1e3e8f1a 722 }
45aba42f 723
68d100e9 724again:
45aba42f
KW
725 nb_clusters = size_to_clusters(s, n_end << 9);
726
727 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
728
729 cluster_offset = be64_to_cpu(l2_table[l2_index]);
730
731 /* We keep all QCOW_OFLAG_COPIED clusters */
732
733 if (cluster_offset & QCOW_OFLAG_COPIED) {
734 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
735 &l2_table[l2_index], 0, 0);
736
737 cluster_offset &= ~QCOW_OFLAG_COPIED;
738 m->nb_clusters = 0;
148da7ea 739 m->depends_on = NULL;
45aba42f
KW
740
741 goto out;
742 }
743
744 /* for the moment, multiple compressed clusters are not managed */
745
746 if (cluster_offset & QCOW_OFLAG_COMPRESSED)
747 nb_clusters = 1;
748
749 /* how many available clusters ? */
750
751 while (i < nb_clusters) {
752 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
753 &l2_table[l2_index], i, 0);
4805bb66 754 if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
45aba42f 755 break;
4805bb66 756 }
45aba42f
KW
757
758 i += count_contiguous_free_clusters(nb_clusters - i,
759 &l2_table[l2_index + i]);
4805bb66
KW
760 if (i >= nb_clusters) {
761 break;
762 }
45aba42f
KW
763
764 cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
765
766 if ((cluster_offset & QCOW_OFLAG_COPIED) ||
767 (cluster_offset & QCOW_OFLAG_COMPRESSED))
768 break;
769 }
4805bb66 770 assert(i <= nb_clusters);
45aba42f
KW
771 nb_clusters = i;
772
f214978a
KW
773 /*
774 * Check if there already is an AIO write request in flight which allocates
775 * the same cluster. In this case we need to wait until the previous
776 * request has completed and updated the L2 table accordingly.
777 */
72cf2d4f 778 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
f214978a
KW
779
780 uint64_t end_offset = offset + nb_clusters * s->cluster_size;
781 uint64_t old_offset = old_alloc->offset;
782 uint64_t old_end_offset = old_alloc->offset +
783 old_alloc->nb_clusters * s->cluster_size;
784
785 if (end_offset < old_offset || offset > old_end_offset) {
786 /* No intersection */
787 } else {
788 if (offset < old_offset) {
789 /* Stop at the start of a running allocation */
790 nb_clusters = (old_offset - offset) >> s->cluster_bits;
791 } else {
792 nb_clusters = 0;
793 }
794
795 if (nb_clusters == 0) {
68d100e9
KW
796 /* Wait for the dependency to complete. We need to recheck
797 * the free/allocated clusters when we continue. */
798 qemu_co_mutex_unlock(&s->lock);
799 qemu_co_queue_wait(&old_alloc->dependent_requests);
800 qemu_co_mutex_lock(&s->lock);
801 goto again;
f214978a
KW
802 }
803 }
804 }
805
806 if (!nb_clusters) {
807 abort();
808 }
809
72cf2d4f 810 QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
f214978a 811
45aba42f
KW
812 /* allocate a new cluster */
813
ed6ccf0f 814 cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
5d757b56 815 if (cluster_offset < 0) {
29c1a730
KW
816 ret = cluster_offset;
817 goto fail;
5d757b56 818 }
45aba42f
KW
819
820 /* save info needed for meta data update */
821 m->offset = offset;
822 m->n_start = n_start;
823 m->nb_clusters = nb_clusters;
824
825out:
29c1a730
KW
826 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
827 if (ret < 0) {
9e2a3701 828 goto fail_put;
29c1a730
KW
829 }
830
45aba42f 831 m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
148da7ea 832 m->cluster_offset = cluster_offset;
45aba42f
KW
833
834 *num = m->nb_available - n_start;
835
148da7ea 836 return 0;
29c1a730
KW
837
838fail:
839 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
9e2a3701
KW
840fail_put:
841 QLIST_REMOVE(m, next_in_flight);
29c1a730 842 return ret;
45aba42f
KW
843}
844
845static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
846 const uint8_t *buf, int buf_size)
847{
848 z_stream strm1, *strm = &strm1;
849 int ret, out_len;
850
851 memset(strm, 0, sizeof(*strm));
852
853 strm->next_in = (uint8_t *)buf;
854 strm->avail_in = buf_size;
855 strm->next_out = out_buf;
856 strm->avail_out = out_buf_size;
857
858 ret = inflateInit2(strm, -12);
859 if (ret != Z_OK)
860 return -1;
861 ret = inflate(strm, Z_FINISH);
862 out_len = strm->next_out - out_buf;
863 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
864 out_len != out_buf_size) {
865 inflateEnd(strm);
866 return -1;
867 }
868 inflateEnd(strm);
869 return 0;
870}
871
66f82cee 872int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
45aba42f 873{
66f82cee 874 BDRVQcowState *s = bs->opaque;
45aba42f
KW
875 int ret, csize, nb_csectors, sector_offset;
876 uint64_t coffset;
877
878 coffset = cluster_offset & s->cluster_offset_mask;
879 if (s->cluster_cache_offset != coffset) {
880 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
881 sector_offset = coffset & 511;
882 csize = nb_csectors * 512 - sector_offset;
66f82cee
KW
883 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
884 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
45aba42f 885 if (ret < 0) {
8af36488 886 return ret;
45aba42f
KW
887 }
888 if (decompress_buffer(s->cluster_cache, s->cluster_size,
889 s->cluster_data + sector_offset, csize) < 0) {
8af36488 890 return -EIO;
45aba42f
KW
891 }
892 s->cluster_cache_offset = coffset;
893 }
894 return 0;
895}
5ea929e3
KW
896
897/*
898 * This discards as many clusters of nb_clusters as possible at once (i.e.
899 * all clusters in the same L2 table) and returns the number of discarded
900 * clusters.
901 */
902static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
903 unsigned int nb_clusters)
904{
905 BDRVQcowState *s = bs->opaque;
906 uint64_t l2_offset, *l2_table;
907 int l2_index;
908 int ret;
909 int i;
910
911 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
912 if (ret < 0) {
913 return ret;
914 }
915
916 /* Limit nb_clusters to one L2 table */
917 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
918
919 for (i = 0; i < nb_clusters; i++) {
920 uint64_t old_offset;
921
922 old_offset = be64_to_cpu(l2_table[l2_index + i]);
923 old_offset &= ~QCOW_OFLAG_COPIED;
924
925 if (old_offset == 0) {
926 continue;
927 }
928
929 /* First remove L2 entries */
930 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
931 l2_table[l2_index + i] = cpu_to_be64(0);
932
933 /* Then decrease the refcount */
934 qcow2_free_any_clusters(bs, old_offset, 1);
935 }
936
937 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
938 if (ret < 0) {
939 return ret;
940 }
941
942 return nb_clusters;
943}
944
945int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
946 int nb_sectors)
947{
948 BDRVQcowState *s = bs->opaque;
949 uint64_t end_offset;
950 unsigned int nb_clusters;
951 int ret;
952
953 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
954
955 /* Round start up and end down */
956 offset = align_offset(offset, s->cluster_size);
957 end_offset &= ~(s->cluster_size - 1);
958
959 if (offset > end_offset) {
960 return 0;
961 }
962
963 nb_clusters = size_to_clusters(s, end_offset - offset);
964
965 /* Each L2 table is handled by its own loop iteration */
966 while (nb_clusters > 0) {
967 ret = discard_single_l2(bs, offset, nb_clusters);
968 if (ret < 0) {
969 return ret;
970 }
971
972 nb_clusters -= ret;
973 offset += (ret * s->cluster_size);
974 }
975
976 return 0;
977}