]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/affs/bitmap.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / affs / bitmap.c
1 /*
2 * linux/fs/affs/bitmap.c
3 *
4 * (c) 1996 Hans-Joachim Widmaier
5 *
6 * bitmap.c contains the code that handles all bitmap related stuff -
7 * block allocation, deallocation, calculation of free space.
8 */
9
10 #include <linux/slab.h>
11 #include "affs.h"
12
13 u32
14 affs_count_free_blocks(struct super_block *sb)
15 {
16 struct affs_bm_info *bm;
17 u32 free;
18 int i;
19
20 pr_debug("%s()\n", __func__);
21
22 if (sb->s_flags & MS_RDONLY)
23 return 0;
24
25 mutex_lock(&AFFS_SB(sb)->s_bmlock);
26
27 bm = AFFS_SB(sb)->s_bitmap;
28 free = 0;
29 for (i = AFFS_SB(sb)->s_bmap_count; i > 0; bm++, i--)
30 free += bm->bm_free;
31
32 mutex_unlock(&AFFS_SB(sb)->s_bmlock);
33
34 return free;
35 }
36
37 void
38 affs_free_block(struct super_block *sb, u32 block)
39 {
40 struct affs_sb_info *sbi = AFFS_SB(sb);
41 struct affs_bm_info *bm;
42 struct buffer_head *bh;
43 u32 blk, bmap, bit, mask, tmp;
44 __be32 *data;
45
46 pr_debug("%s(%u)\n", __func__, block);
47
48 if (block > sbi->s_partition_size)
49 goto err_range;
50
51 blk = block - sbi->s_reserved;
52 bmap = blk / sbi->s_bmap_bits;
53 bit = blk % sbi->s_bmap_bits;
54 bm = &sbi->s_bitmap[bmap];
55
56 mutex_lock(&sbi->s_bmlock);
57
58 bh = sbi->s_bmap_bh;
59 if (sbi->s_last_bmap != bmap) {
60 affs_brelse(bh);
61 bh = affs_bread(sb, bm->bm_key);
62 if (!bh)
63 goto err_bh_read;
64 sbi->s_bmap_bh = bh;
65 sbi->s_last_bmap = bmap;
66 }
67
68 mask = 1 << (bit & 31);
69 data = (__be32 *)bh->b_data + bit / 32 + 1;
70
71 /* mark block free */
72 tmp = be32_to_cpu(*data);
73 if (tmp & mask)
74 goto err_free;
75 *data = cpu_to_be32(tmp | mask);
76
77 /* fix checksum */
78 tmp = be32_to_cpu(*(__be32 *)bh->b_data);
79 *(__be32 *)bh->b_data = cpu_to_be32(tmp - mask);
80
81 mark_buffer_dirty(bh);
82 affs_mark_sb_dirty(sb);
83 bm->bm_free++;
84
85 mutex_unlock(&sbi->s_bmlock);
86 return;
87
88 err_free:
89 affs_warning(sb,"affs_free_block","Trying to free block %u which is already free", block);
90 mutex_unlock(&sbi->s_bmlock);
91 return;
92
93 err_bh_read:
94 affs_error(sb,"affs_free_block","Cannot read bitmap block %u", bm->bm_key);
95 sbi->s_bmap_bh = NULL;
96 sbi->s_last_bmap = ~0;
97 mutex_unlock(&sbi->s_bmlock);
98 return;
99
100 err_range:
101 affs_error(sb, "affs_free_block","Block %u outside partition", block);
102 }
103
104 /*
105 * Allocate a block in the given allocation zone.
106 * Since we have to byte-swap the bitmap on little-endian
107 * machines, this is rather expensive. Therefore we will
108 * preallocate up to 16 blocks from the same word, if
109 * possible. We are not doing preallocations in the
110 * header zone, though.
111 */
112
113 u32
114 affs_alloc_block(struct inode *inode, u32 goal)
115 {
116 struct super_block *sb;
117 struct affs_sb_info *sbi;
118 struct affs_bm_info *bm;
119 struct buffer_head *bh;
120 __be32 *data, *enddata;
121 u32 blk, bmap, bit, mask, mask2, tmp;
122 int i;
123
124 sb = inode->i_sb;
125 sbi = AFFS_SB(sb);
126
127 pr_debug("balloc(inode=%lu,goal=%u): ", inode->i_ino, goal);
128
129 if (AFFS_I(inode)->i_pa_cnt) {
130 pr_debug("%d\n", AFFS_I(inode)->i_lastalloc+1);
131 AFFS_I(inode)->i_pa_cnt--;
132 return ++AFFS_I(inode)->i_lastalloc;
133 }
134
135 if (!goal || goal > sbi->s_partition_size) {
136 if (goal)
137 affs_warning(sb, "affs_balloc", "invalid goal %d", goal);
138 //if (!AFFS_I(inode)->i_last_block)
139 // affs_warning(sb, "affs_balloc", "no last alloc block");
140 goal = sbi->s_reserved;
141 }
142
143 blk = goal - sbi->s_reserved;
144 bmap = blk / sbi->s_bmap_bits;
145 bm = &sbi->s_bitmap[bmap];
146
147 mutex_lock(&sbi->s_bmlock);
148
149 if (bm->bm_free)
150 goto find_bmap_bit;
151
152 find_bmap:
153 /* search for the next bmap buffer with free bits */
154 i = sbi->s_bmap_count;
155 do {
156 if (--i < 0)
157 goto err_full;
158 bmap++;
159 bm++;
160 if (bmap < sbi->s_bmap_count)
161 continue;
162 /* restart search at zero */
163 bmap = 0;
164 bm = sbi->s_bitmap;
165 } while (!bm->bm_free);
166 blk = bmap * sbi->s_bmap_bits;
167
168 find_bmap_bit:
169
170 bh = sbi->s_bmap_bh;
171 if (sbi->s_last_bmap != bmap) {
172 affs_brelse(bh);
173 bh = affs_bread(sb, bm->bm_key);
174 if (!bh)
175 goto err_bh_read;
176 sbi->s_bmap_bh = bh;
177 sbi->s_last_bmap = bmap;
178 }
179
180 /* find an unused block in this bitmap block */
181 bit = blk % sbi->s_bmap_bits;
182 data = (__be32 *)bh->b_data + bit / 32 + 1;
183 enddata = (__be32 *)((u8 *)bh->b_data + sb->s_blocksize);
184 mask = ~0UL << (bit & 31);
185 blk &= ~31UL;
186
187 tmp = be32_to_cpu(*data);
188 if (tmp & mask)
189 goto find_bit;
190
191 /* scan the rest of the buffer */
192 do {
193 blk += 32;
194 if (++data >= enddata)
195 /* didn't find something, can only happen
196 * if scan didn't start at 0, try next bmap
197 */
198 goto find_bmap;
199 } while (!*data);
200 tmp = be32_to_cpu(*data);
201 mask = ~0;
202
203 find_bit:
204 /* finally look for a free bit in the word */
205 bit = ffs(tmp & mask) - 1;
206 blk += bit + sbi->s_reserved;
207 mask2 = mask = 1 << (bit & 31);
208 AFFS_I(inode)->i_lastalloc = blk;
209
210 /* prealloc as much as possible within this word */
211 while ((mask2 <<= 1)) {
212 if (!(tmp & mask2))
213 break;
214 AFFS_I(inode)->i_pa_cnt++;
215 mask |= mask2;
216 }
217 bm->bm_free -= AFFS_I(inode)->i_pa_cnt + 1;
218
219 *data = cpu_to_be32(tmp & ~mask);
220
221 /* fix checksum */
222 tmp = be32_to_cpu(*(__be32 *)bh->b_data);
223 *(__be32 *)bh->b_data = cpu_to_be32(tmp + mask);
224
225 mark_buffer_dirty(bh);
226 affs_mark_sb_dirty(sb);
227
228 mutex_unlock(&sbi->s_bmlock);
229
230 pr_debug("%d\n", blk);
231 return blk;
232
233 err_bh_read:
234 affs_error(sb,"affs_read_block","Cannot read bitmap block %u", bm->bm_key);
235 sbi->s_bmap_bh = NULL;
236 sbi->s_last_bmap = ~0;
237 err_full:
238 mutex_unlock(&sbi->s_bmlock);
239 pr_debug("failed\n");
240 return 0;
241 }
242
243 int affs_init_bitmap(struct super_block *sb, int *flags)
244 {
245 struct affs_bm_info *bm;
246 struct buffer_head *bmap_bh = NULL, *bh = NULL;
247 __be32 *bmap_blk;
248 u32 size, blk, end, offset, mask;
249 int i, res = 0;
250 struct affs_sb_info *sbi = AFFS_SB(sb);
251
252 if (*flags & MS_RDONLY)
253 return 0;
254
255 if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
256 pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
257 *flags |= MS_RDONLY;
258 return 0;
259 }
260
261 sbi->s_last_bmap = ~0;
262 sbi->s_bmap_bh = NULL;
263 sbi->s_bmap_bits = sb->s_blocksize * 8 - 32;
264 sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
265 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
266 size = sbi->s_bmap_count * sizeof(*bm);
267 bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
268 if (!sbi->s_bitmap) {
269 pr_err("Bitmap allocation failed\n");
270 return -ENOMEM;
271 }
272
273 bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
274 blk = sb->s_blocksize / 4 - 49;
275 end = blk + 25;
276
277 for (i = sbi->s_bmap_count; i > 0; bm++, i--) {
278 affs_brelse(bh);
279
280 bm->bm_key = be32_to_cpu(bmap_blk[blk]);
281 bh = affs_bread(sb, bm->bm_key);
282 if (!bh) {
283 pr_err("Cannot read bitmap\n");
284 res = -EIO;
285 goto out;
286 }
287 if (affs_checksum_block(sb, bh)) {
288 pr_warn("Bitmap %u invalid - mounting %s read only.\n",
289 bm->bm_key, sb->s_id);
290 *flags |= MS_RDONLY;
291 goto out;
292 }
293 pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
294 bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
295
296 /* Don't try read the extension if this is the last block,
297 * but we also need the right bm pointer below
298 */
299 if (++blk < end || i == 1)
300 continue;
301 if (bmap_bh)
302 affs_brelse(bmap_bh);
303 bmap_bh = affs_bread(sb, be32_to_cpu(bmap_blk[blk]));
304 if (!bmap_bh) {
305 pr_err("Cannot read bitmap extension\n");
306 res = -EIO;
307 goto out;
308 }
309 bmap_blk = (__be32 *)bmap_bh->b_data;
310 blk = 0;
311 end = sb->s_blocksize / 4 - 1;
312 }
313
314 offset = (sbi->s_partition_size - sbi->s_reserved) % sbi->s_bmap_bits;
315 mask = ~(0xFFFFFFFFU << (offset & 31));
316 pr_debug("last word: %d %d %d\n", offset, offset / 32 + 1, mask);
317 offset = offset / 32 + 1;
318
319 if (mask) {
320 u32 old, new;
321
322 /* Mark unused bits in the last word as allocated */
323 old = be32_to_cpu(((__be32 *)bh->b_data)[offset]);
324 new = old & mask;
325 //if (old != new) {
326 ((__be32 *)bh->b_data)[offset] = cpu_to_be32(new);
327 /* fix checksum */
328 //new -= old;
329 //old = be32_to_cpu(*(__be32 *)bh->b_data);
330 //*(__be32 *)bh->b_data = cpu_to_be32(old - new);
331 //mark_buffer_dirty(bh);
332 //}
333 /* correct offset for the bitmap count below */
334 //offset++;
335 }
336 while (++offset < sb->s_blocksize / 4)
337 ((__be32 *)bh->b_data)[offset] = 0;
338 ((__be32 *)bh->b_data)[0] = 0;
339 ((__be32 *)bh->b_data)[0] = cpu_to_be32(-affs_checksum_block(sb, bh));
340 mark_buffer_dirty(bh);
341
342 /* recalculate bitmap count for last block */
343 bm--;
344 bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
345
346 out:
347 affs_brelse(bh);
348 affs_brelse(bmap_bh);
349 return res;
350 }
351
352 void affs_free_bitmap(struct super_block *sb)
353 {
354 struct affs_sb_info *sbi = AFFS_SB(sb);
355
356 if (!sbi->s_bitmap)
357 return;
358
359 affs_brelse(sbi->s_bmap_bh);
360 sbi->s_bmap_bh = NULL;
361 sbi->s_last_bmap = ~0;
362 kfree(sbi->s_bitmap);
363 sbi->s_bitmap = NULL;
364 }