]>
Commit | Line | Data |
---|---|---|
298800ca SH |
1 | /* |
2 | * QEMU Enhanced Disk Format Cluster functions | |
3 | * | |
4 | * Copyright IBM, Corp. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
80c71a24 | 15 | #include "qemu/osdep.h" |
298800ca SH |
16 | #include "qed.h" |
17 | ||
18 | /** | |
19 | * Count the number of contiguous data clusters | |
20 | * | |
21 | * @s: QED state | |
22 | * @table: L2 table | |
23 | * @index: First cluster index | |
24 | * @n: Maximum number of clusters | |
25 | * @offset: Set to first cluster offset | |
26 | * | |
21df65b6 AL |
27 | * This function scans tables for contiguous clusters. A contiguous run of |
28 | * clusters may be allocated, unallocated, or zero. | |
298800ca SH |
29 | */ |
30 | static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s, | |
31 | QEDTable *table, | |
32 | unsigned int index, | |
33 | unsigned int n, | |
34 | uint64_t *offset) | |
35 | { | |
36 | unsigned int end = MIN(index + n, s->table_nelems); | |
37 | uint64_t last = table->offsets[index]; | |
38 | unsigned int i; | |
39 | ||
40 | *offset = last; | |
41 | ||
42 | for (i = index + 1; i < end; i++) { | |
21df65b6 AL |
43 | if (qed_offset_is_unalloc_cluster(last)) { |
44 | /* Counting unallocated clusters */ | |
45 | if (!qed_offset_is_unalloc_cluster(table->offsets[i])) { | |
46 | break; | |
47 | } | |
48 | } else if (qed_offset_is_zero_cluster(last)) { | |
49 | /* Counting zero clusters */ | |
50 | if (!qed_offset_is_zero_cluster(table->offsets[i])) { | |
298800ca SH |
51 | break; |
52 | } | |
53 | } else { | |
54 | /* Counting allocated clusters */ | |
55 | if (table->offsets[i] != last + s->header.cluster_size) { | |
56 | break; | |
57 | } | |
58 | last = table->offsets[i]; | |
59 | } | |
60 | } | |
61 | return i - index; | |
62 | } | |
63 | ||
64 | typedef struct { | |
65 | BDRVQEDState *s; | |
66 | uint64_t pos; | |
67 | size_t len; | |
68 | ||
69 | QEDRequest *request; | |
70 | ||
71 | /* User callback */ | |
72 | QEDFindClusterFunc *cb; | |
73 | void *opaque; | |
74 | } QEDFindClusterCB; | |
75 | ||
76 | static void qed_find_cluster_cb(void *opaque, int ret) | |
77 | { | |
78 | QEDFindClusterCB *find_cluster_cb = opaque; | |
79 | BDRVQEDState *s = find_cluster_cb->s; | |
80 | QEDRequest *request = find_cluster_cb->request; | |
81 | uint64_t offset = 0; | |
82 | size_t len = 0; | |
83 | unsigned int index; | |
84 | unsigned int n; | |
85 | ||
86 | if (ret) { | |
87 | goto out; | |
88 | } | |
89 | ||
90 | index = qed_l2_index(s, find_cluster_cb->pos); | |
91 | n = qed_bytes_to_clusters(s, | |
92 | qed_offset_into_cluster(s, find_cluster_cb->pos) + | |
93 | find_cluster_cb->len); | |
94 | n = qed_count_contiguous_clusters(s, request->l2_table->table, | |
95 | index, n, &offset); | |
96 | ||
21df65b6 AL |
97 | if (qed_offset_is_unalloc_cluster(offset)) { |
98 | ret = QED_CLUSTER_L2; | |
99 | } else if (qed_offset_is_zero_cluster(offset)) { | |
100 | ret = QED_CLUSTER_ZERO; | |
101 | } else if (qed_check_cluster_offset(s, offset)) { | |
102 | ret = QED_CLUSTER_FOUND; | |
103 | } else { | |
298800ca SH |
104 | ret = -EINVAL; |
105 | } | |
106 | ||
21df65b6 AL |
107 | len = MIN(find_cluster_cb->len, n * s->header.cluster_size - |
108 | qed_offset_into_cluster(s, find_cluster_cb->pos)); | |
109 | ||
298800ca SH |
110 | out: |
111 | find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len); | |
7267c094 | 112 | g_free(find_cluster_cb); |
298800ca SH |
113 | } |
114 | ||
115 | /** | |
116 | * Find the offset of a data cluster | |
117 | * | |
118 | * @s: QED state | |
119 | * @request: L2 cache entry | |
120 | * @pos: Byte position in device | |
121 | * @len: Number of bytes | |
122 | * @cb: Completion function | |
123 | * @opaque: User data for completion function | |
124 | * | |
125 | * This function translates a position in the block device to an offset in the | |
126 | * image file. It invokes the cb completion callback to report back the | |
127 | * translated offset or unallocated range in the image file. | |
128 | * | |
129 | * If the L2 table exists, request->l2_table points to the L2 table cache entry | |
130 | * and the caller must free the reference when they are finished. The cache | |
131 | * entry is exposed in this way to avoid callers having to read the L2 table | |
132 | * again later during request processing. If request->l2_table is non-NULL it | |
133 | * will be unreferenced before taking on the new cache entry. | |
134 | */ | |
135 | void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos, | |
136 | size_t len, QEDFindClusterFunc *cb, void *opaque) | |
137 | { | |
138 | QEDFindClusterCB *find_cluster_cb; | |
139 | uint64_t l2_offset; | |
140 | ||
141 | /* Limit length to L2 boundary. Requests are broken up at the L2 boundary | |
142 | * so that a request acts on one L2 table at a time. | |
143 | */ | |
144 | len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos); | |
145 | ||
146 | l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)]; | |
21df65b6 | 147 | if (qed_offset_is_unalloc_cluster(l2_offset)) { |
298800ca SH |
148 | cb(opaque, QED_CLUSTER_L1, 0, len); |
149 | return; | |
150 | } | |
151 | if (!qed_check_table_offset(s, l2_offset)) { | |
152 | cb(opaque, -EINVAL, 0, 0); | |
153 | return; | |
154 | } | |
155 | ||
7267c094 | 156 | find_cluster_cb = g_malloc(sizeof(*find_cluster_cb)); |
298800ca SH |
157 | find_cluster_cb->s = s; |
158 | find_cluster_cb->pos = pos; | |
159 | find_cluster_cb->len = len; | |
160 | find_cluster_cb->cb = cb; | |
161 | find_cluster_cb->opaque = opaque; | |
162 | find_cluster_cb->request = request; | |
163 | ||
164 | qed_read_l2_table(s, request, l2_offset, | |
165 | qed_find_cluster_cb, find_cluster_cb); | |
166 | } |