]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed-check.c
Merge tag 'pull-block-2023-06-05' of https://gitlab.com/hreitz/qemu into staging
[mirror_qemu.git] / block / qed-check.c
CommitLineData
01979a98
SH
1/*
2 * QEMU Enhanced Disk Format Consistency Check
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
80c71a24 14#include "qemu/osdep.h"
e2c1c34f 15#include "block/block-io.h"
01979a98
SH
16#include "qed.h"
17
18typedef struct {
19 BDRVQEDState *s;
20 BdrvCheckResult *result;
21 bool fix; /* whether to fix invalid offsets */
22
19dfc44a 23 uint64_t nclusters;
01979a98
SH
24 uint32_t *used_clusters; /* referenced cluster bitmap */
25
26 QEDRequest request;
27} QEDCheck;
28
29static bool qed_test_bit(uint32_t *bitmap, uint64_t n) {
30 return !!(bitmap[n / 32] & (1 << (n % 32)));
31}
32
33static void qed_set_bit(uint32_t *bitmap, uint64_t n) {
34 bitmap[n / 32] |= 1 << (n % 32);
35}
36
37/**
38 * Set bitmap bits for clusters
39 *
40 * @check: Check structure
41 * @offset: Starting offset in bytes
42 * @n: Number of clusters
43 */
44static bool qed_set_used_clusters(QEDCheck *check, uint64_t offset,
45 unsigned int n)
46{
47 uint64_t cluster = qed_bytes_to_clusters(check->s, offset);
48 unsigned int corruptions = 0;
49
50 while (n-- != 0) {
51 /* Clusters should only be referenced once */
52 if (qed_test_bit(check->used_clusters, cluster)) {
53 corruptions++;
54 }
55
56 qed_set_bit(check->used_clusters, cluster);
57 cluster++;
58 }
59
60 check->result->corruptions += corruptions;
61 return corruptions == 0;
62}
63
64/**
65 * Check an L2 table
66 *
67 * @ret: Number of invalid cluster offsets
68 */
69static unsigned int qed_check_l2_table(QEDCheck *check, QEDTable *table)
70{
71 BDRVQEDState *s = check->s;
72 unsigned int i, num_invalid = 0;
11c9c615 73 uint64_t last_offset = 0;
01979a98
SH
74
75 for (i = 0; i < s->table_nelems; i++) {
76 uint64_t offset = table->offsets[i];
77
21df65b6
AL
78 if (qed_offset_is_unalloc_cluster(offset) ||
79 qed_offset_is_zero_cluster(offset)) {
01979a98
SH
80 continue;
81 }
11c9c615
DXW
82 check->result->bfi.allocated_clusters++;
83 if (last_offset && (last_offset + s->header.cluster_size != offset)) {
84 check->result->bfi.fragmented_clusters++;
85 }
86 last_offset = offset;
01979a98
SH
87
88 /* Detect invalid cluster offset */
89 if (!qed_check_cluster_offset(s, offset)) {
90 if (check->fix) {
91 table->offsets[i] = 0;
ccf34716 92 check->result->corruptions_fixed++;
01979a98
SH
93 } else {
94 check->result->corruptions++;
95 }
96
97 num_invalid++;
98 continue;
99 }
100
101 qed_set_used_clusters(check, offset, 1);
102 }
103
104 return num_invalid;
105}
106
107/**
108 * Descend tables and check each cluster is referenced once only
109 */
88095349
EGE
110static int coroutine_fn GRAPH_RDLOCK
111qed_check_l1_table(QEDCheck *check, QEDTable *table)
01979a98
SH
112{
113 BDRVQEDState *s = check->s;
114 unsigned int i, num_invalid_l1 = 0;
115 int ret, last_error = 0;
116
117 /* Mark L1 table clusters used */
118 qed_set_used_clusters(check, s->header.l1_table_offset,
119 s->header.table_size);
120
121 for (i = 0; i < s->table_nelems; i++) {
122 unsigned int num_invalid_l2;
123 uint64_t offset = table->offsets[i];
124
21df65b6 125 if (qed_offset_is_unalloc_cluster(offset)) {
01979a98
SH
126 continue;
127 }
128
129 /* Detect invalid L2 offset */
130 if (!qed_check_table_offset(s, offset)) {
131 /* Clear invalid offset */
132 if (check->fix) {
133 table->offsets[i] = 0;
ccf34716 134 check->result->corruptions_fixed++;
01979a98
SH
135 } else {
136 check->result->corruptions++;
137 }
138
139 num_invalid_l1++;
140 continue;
141 }
142
143 if (!qed_set_used_clusters(check, offset, s->header.table_size)) {
144 continue; /* skip an invalid table */
145 }
146
147 ret = qed_read_l2_table_sync(s, &check->request, offset);
148 if (ret) {
149 check->result->check_errors++;
150 last_error = ret;
151 continue;
152 }
153
154 num_invalid_l2 = qed_check_l2_table(check,
155 check->request.l2_table->table);
156
157 /* Write out fixed L2 table */
158 if (num_invalid_l2 > 0 && check->fix) {
159 ret = qed_write_l2_table_sync(s, &check->request, 0,
160 s->table_nelems, false);
161 if (ret) {
162 check->result->check_errors++;
163 last_error = ret;
164 continue;
165 }
166 }
167 }
168
169 /* Drop reference to final table */
170 qed_unref_l2_cache_entry(check->request.l2_table);
171 check->request.l2_table = NULL;
172
173 /* Write out fixed L1 table */
174 if (num_invalid_l1 > 0 && check->fix) {
175 ret = qed_write_l1_table_sync(s, 0, s->table_nelems);
176 if (ret) {
177 check->result->check_errors++;
178 last_error = ret;
179 }
180 }
181
182 return last_error;
183}
184
185/**
186 * Check for unreferenced (leaked) clusters
187 */
188static void qed_check_for_leaks(QEDCheck *check)
189{
190 BDRVQEDState *s = check->s;
19dfc44a 191 uint64_t i;
01979a98
SH
192
193 for (i = s->header.header_size; i < check->nclusters; i++) {
194 if (!qed_test_bit(check->used_clusters, i)) {
195 check->result->leaks++;
196 }
197 }
198}
199
b10170ac
SH
200/**
201 * Mark an image clean once it passes check or has been repaired
202 */
203static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
204{
205 /* Skip if there were unfixable corruptions or I/O errors */
206 if (result->corruptions > 0 || result->check_errors > 0) {
207 return;
208 }
209
210 /* Skip if image is already marked clean */
211 if (!(s->header.features & QED_F_NEED_CHECK)) {
212 return;
213 }
214
215 /* Ensure fixes reach storage before clearing check bit */
216 bdrv_flush(s->bs);
217
218 s->header.features &= ~QED_F_NEED_CHECK;
219 qed_write_header_sync(s);
220}
221
2fd61638 222/* Called with table_lock held. */
54277a2a 223int coroutine_fn qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix)
01979a98
SH
224{
225 QEDCheck check = {
226 .s = s,
227 .result = result,
228 .nclusters = qed_bytes_to_clusters(s, s->file_size),
229 .request = { .l2_table = NULL },
230 .fix = fix,
231 };
232 int ret;
233
02c4f26b 234 check.used_clusters = g_try_new0(uint32_t, (check.nclusters + 31) / 32);
4f4896db
KW
235 if (check.nclusters && check.used_clusters == NULL) {
236 return -ENOMEM;
237 }
01979a98 238
11c9c615 239 check.result->bfi.total_clusters =
c41a73ff 240 DIV_ROUND_UP(s->header.image_size, s->header.cluster_size);
01979a98
SH
241 ret = qed_check_l1_table(&check, s->l1_table);
242 if (ret == 0) {
243 /* Only check for leaks if entire image was scanned successfully */
244 qed_check_for_leaks(&check);
b10170ac
SH
245
246 if (fix) {
247 qed_check_mark_clean(s, result);
248 }
01979a98
SH
249 }
250
7267c094 251 g_free(check.used_clusters);
01979a98
SH
252 return ret;
253}