]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed-cluster.c
parallels: wrong call to bdrv_truncate
[mirror_qemu.git] / block / qed-cluster.c
CommitLineData
298800ca
SH
1/*
2 * QEMU Enhanced Disk Format Cluster functions
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
80c71a24 15#include "qemu/osdep.h"
298800ca
SH
16#include "qed.h"
17
18/**
19 * Count the number of contiguous data clusters
20 *
21 * @s: QED state
22 * @table: L2 table
23 * @index: First cluster index
24 * @n: Maximum number of clusters
25 * @offset: Set to first cluster offset
26 *
21df65b6
AL
27 * This function scans tables for contiguous clusters. A contiguous run of
28 * clusters may be allocated, unallocated, or zero.
298800ca
SH
29 */
30static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
31 QEDTable *table,
32 unsigned int index,
33 unsigned int n,
34 uint64_t *offset)
35{
36 unsigned int end = MIN(index + n, s->table_nelems);
37 uint64_t last = table->offsets[index];
38 unsigned int i;
39
40 *offset = last;
41
42 for (i = index + 1; i < end; i++) {
21df65b6
AL
43 if (qed_offset_is_unalloc_cluster(last)) {
44 /* Counting unallocated clusters */
45 if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
46 break;
47 }
48 } else if (qed_offset_is_zero_cluster(last)) {
49 /* Counting zero clusters */
50 if (!qed_offset_is_zero_cluster(table->offsets[i])) {
298800ca
SH
51 break;
52 }
53 } else {
54 /* Counting allocated clusters */
55 if (table->offsets[i] != last + s->header.cluster_size) {
56 break;
57 }
58 last = table->offsets[i];
59 }
60 }
61 return i - index;
62}
63
64typedef struct {
65 BDRVQEDState *s;
66 uint64_t pos;
67 size_t len;
68
69 QEDRequest *request;
70
71 /* User callback */
72 QEDFindClusterFunc *cb;
73 void *opaque;
74} QEDFindClusterCB;
75
76static void qed_find_cluster_cb(void *opaque, int ret)
77{
78 QEDFindClusterCB *find_cluster_cb = opaque;
79 BDRVQEDState *s = find_cluster_cb->s;
80 QEDRequest *request = find_cluster_cb->request;
81 uint64_t offset = 0;
82 size_t len = 0;
83 unsigned int index;
84 unsigned int n;
85
b9e413dd 86 qed_acquire(s);
298800ca
SH
87 if (ret) {
88 goto out;
89 }
90
91 index = qed_l2_index(s, find_cluster_cb->pos);
92 n = qed_bytes_to_clusters(s,
93 qed_offset_into_cluster(s, find_cluster_cb->pos) +
94 find_cluster_cb->len);
95 n = qed_count_contiguous_clusters(s, request->l2_table->table,
96 index, n, &offset);
97
21df65b6
AL
98 if (qed_offset_is_unalloc_cluster(offset)) {
99 ret = QED_CLUSTER_L2;
100 } else if (qed_offset_is_zero_cluster(offset)) {
101 ret = QED_CLUSTER_ZERO;
102 } else if (qed_check_cluster_offset(s, offset)) {
103 ret = QED_CLUSTER_FOUND;
104 } else {
298800ca
SH
105 ret = -EINVAL;
106 }
107
21df65b6
AL
108 len = MIN(find_cluster_cb->len, n * s->header.cluster_size -
109 qed_offset_into_cluster(s, find_cluster_cb->pos));
110
298800ca
SH
111out:
112 find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
b9e413dd 113 qed_release(s);
7267c094 114 g_free(find_cluster_cb);
298800ca
SH
115}
116
117/**
118 * Find the offset of a data cluster
119 *
120 * @s: QED state
121 * @request: L2 cache entry
122 * @pos: Byte position in device
123 * @len: Number of bytes
124 * @cb: Completion function
125 * @opaque: User data for completion function
126 *
127 * This function translates a position in the block device to an offset in the
128 * image file. It invokes the cb completion callback to report back the
129 * translated offset or unallocated range in the image file.
130 *
131 * If the L2 table exists, request->l2_table points to the L2 table cache entry
132 * and the caller must free the reference when they are finished. The cache
133 * entry is exposed in this way to avoid callers having to read the L2 table
134 * again later during request processing. If request->l2_table is non-NULL it
135 * will be unreferenced before taking on the new cache entry.
136 */
137void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
138 size_t len, QEDFindClusterFunc *cb, void *opaque)
139{
140 QEDFindClusterCB *find_cluster_cb;
141 uint64_t l2_offset;
142
143 /* Limit length to L2 boundary. Requests are broken up at the L2 boundary
144 * so that a request acts on one L2 table at a time.
145 */
146 len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
147
148 l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
21df65b6 149 if (qed_offset_is_unalloc_cluster(l2_offset)) {
298800ca
SH
150 cb(opaque, QED_CLUSTER_L1, 0, len);
151 return;
152 }
153 if (!qed_check_table_offset(s, l2_offset)) {
154 cb(opaque, -EINVAL, 0, 0);
155 return;
156 }
157
7267c094 158 find_cluster_cb = g_malloc(sizeof(*find_cluster_cb));
298800ca
SH
159 find_cluster_cb->s = s;
160 find_cluster_cb->pos = pos;
161 find_cluster_cb->len = len;
162 find_cluster_cb->cb = cb;
163 find_cluster_cb->opaque = opaque;
164 find_cluster_cb->request = request;
165
166 qed_read_l2_table(s, request, l2_offset,
167 qed_find_cluster_cb, find_cluster_cb);
168}