]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext3/balloc.c | |
3 | * | |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | |
5 | * Remy Card (card@masi.ibp.fr) | |
6 | * Laboratoire MASI - Institut Blaise Pascal | |
7 | * Universite Pierre et Marie Curie (Paris VI) | |
8 | * | |
9 | * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 | |
10 | * Big-endian to little-endian byte-swapping/bitmaps by | |
11 | * David S. Miller (davem@caip.rutgers.edu), 1995 | |
12 | */ | |
13 | ||
14 | #include <linux/config.h> | |
15 | #include <linux/time.h> | |
16f7e0fe | 16 | #include <linux/capability.h> |
1da177e4 LT |
17 | #include <linux/fs.h> |
18 | #include <linux/jbd.h> | |
19 | #include <linux/ext3_fs.h> | |
20 | #include <linux/ext3_jbd.h> | |
21 | #include <linux/quotaops.h> | |
22 | #include <linux/buffer_head.h> | |
23 | ||
24 | /* | |
25 | * balloc.c contains the blocks allocation and deallocation routines | |
26 | */ | |
27 | ||
28 | /* | |
29 | * The free blocks are managed by bitmaps. A file system contains several | |
30 | * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap | |
31 | * block for inodes, N blocks for the inode table and data blocks. | |
32 | * | |
33 | * The file system contains group descriptors which are located after the | |
34 | * super block. Each descriptor contains the number of the bitmap block and | |
35 | * the free blocks count in the block. The descriptors are loaded in memory | |
36 | * when a file system is mounted (see ext3_read_super). | |
37 | */ | |
38 | ||
39 | ||
40 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | |
41 | ||
42 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |
43 | unsigned int block_group, | |
44 | struct buffer_head ** bh) | |
45 | { | |
46 | unsigned long group_desc; | |
47 | unsigned long offset; | |
48 | struct ext3_group_desc * desc; | |
49 | struct ext3_sb_info *sbi = EXT3_SB(sb); | |
50 | ||
51 | if (block_group >= sbi->s_groups_count) { | |
52 | ext3_error (sb, "ext3_get_group_desc", | |
53 | "block_group >= groups_count - " | |
54 | "block_group = %d, groups_count = %lu", | |
55 | block_group, sbi->s_groups_count); | |
56 | ||
57 | return NULL; | |
58 | } | |
59 | smp_rmb(); | |
60 | ||
61 | group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); | |
62 | offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); | |
63 | if (!sbi->s_group_desc[group_desc]) { | |
64 | ext3_error (sb, "ext3_get_group_desc", | |
65 | "Group descriptor not loaded - " | |
66 | "block_group = %d, group_desc = %lu, desc = %lu", | |
67 | block_group, group_desc, offset); | |
68 | return NULL; | |
69 | } | |
70 | ||
71 | desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; | |
72 | if (bh) | |
73 | *bh = sbi->s_group_desc[group_desc]; | |
74 | return desc + offset; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Read the bitmap for a given block_group, reading into the specified | |
79 | * slot in the superblock's bitmap cache. | |
80 | * | |
81 | * Return buffer_head on success or NULL in case of failure. | |
82 | */ | |
83 | static struct buffer_head * | |
84 | read_block_bitmap(struct super_block *sb, unsigned int block_group) | |
85 | { | |
86 | struct ext3_group_desc * desc; | |
87 | struct buffer_head * bh = NULL; | |
88 | ||
89 | desc = ext3_get_group_desc (sb, block_group, NULL); | |
90 | if (!desc) | |
91 | goto error_out; | |
92 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | |
93 | if (!bh) | |
94 | ext3_error (sb, "read_block_bitmap", | |
95 | "Cannot read block bitmap - " | |
96 | "block_group = %d, block_bitmap = %u", | |
97 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | |
98 | error_out: | |
99 | return bh; | |
100 | } | |
101 | /* | |
102 | * The reservation window structure operations | |
103 | * -------------------------------------------- | |
104 | * Operations include: | |
105 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. | |
106 | * | |
107 | * We use sorted double linked list for the per-filesystem reservation | |
108 | * window list. (like in vm_region). | |
109 | * | |
110 | * Initially, we keep those small operations in the abstract functions, | |
111 | * so later if we need a better searching tree than double linked-list, | |
112 | * we could easily switch to that without changing too much | |
113 | * code. | |
114 | */ | |
115 | #if 0 | |
116 | static void __rsv_window_dump(struct rb_root *root, int verbose, | |
117 | const char *fn) | |
118 | { | |
119 | struct rb_node *n; | |
120 | struct ext3_reserve_window_node *rsv, *prev; | |
121 | int bad; | |
122 | ||
123 | restart: | |
124 | n = rb_first(root); | |
125 | bad = 0; | |
126 | prev = NULL; | |
127 | ||
128 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); | |
129 | while (n) { | |
130 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); | |
131 | if (verbose) | |
132 | printk("reservation window 0x%p " | |
133 | "start: %d, end: %d\n", | |
134 | rsv, rsv->rsv_start, rsv->rsv_end); | |
135 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { | |
136 | printk("Bad reservation %p (start >= end)\n", | |
137 | rsv); | |
138 | bad = 1; | |
139 | } | |
140 | if (prev && prev->rsv_end >= rsv->rsv_start) { | |
141 | printk("Bad reservation %p (prev->end >= start)\n", | |
142 | rsv); | |
143 | bad = 1; | |
144 | } | |
145 | if (bad) { | |
146 | if (!verbose) { | |
147 | printk("Restarting reservation walk in verbose mode\n"); | |
148 | verbose = 1; | |
149 | goto restart; | |
150 | } | |
151 | } | |
152 | n = rb_next(n); | |
153 | prev = rsv; | |
154 | } | |
155 | printk("Window map complete.\n"); | |
156 | if (bad) | |
157 | BUG(); | |
158 | } | |
159 | #define rsv_window_dump(root, verbose) \ | |
160 | __rsv_window_dump((root), (verbose), __FUNCTION__) | |
161 | #else | |
162 | #define rsv_window_dump(root, verbose) do {} while (0) | |
163 | #endif | |
164 | ||
165 | static int | |
166 | goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal, | |
167 | unsigned int group, struct super_block * sb) | |
168 | { | |
169 | unsigned long group_first_block, group_last_block; | |
170 | ||
171 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
172 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
173 | group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | |
174 | ||
175 | if ((rsv->_rsv_start > group_last_block) || | |
176 | (rsv->_rsv_end < group_first_block)) | |
177 | return 0; | |
178 | if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start) | |
179 | || (goal + group_first_block > rsv->_rsv_end))) | |
180 | return 0; | |
181 | return 1; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Find the reserved window which includes the goal, or the previous one | |
186 | * if the goal is not in any window. | |
187 | * Returns NULL if there are no windows or if all windows start after the goal. | |
188 | */ | |
189 | static struct ext3_reserve_window_node * | |
190 | search_reserve_window(struct rb_root *root, unsigned long goal) | |
191 | { | |
192 | struct rb_node *n = root->rb_node; | |
193 | struct ext3_reserve_window_node *rsv; | |
194 | ||
195 | if (!n) | |
196 | return NULL; | |
197 | ||
198 | do { | |
199 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | |
200 | ||
201 | if (goal < rsv->rsv_start) | |
202 | n = n->rb_left; | |
203 | else if (goal > rsv->rsv_end) | |
204 | n = n->rb_right; | |
205 | else | |
206 | return rsv; | |
207 | } while (n); | |
208 | /* | |
209 | * We've fallen off the end of the tree: the goal wasn't inside | |
210 | * any particular node. OK, the previous node must be to one | |
211 | * side of the interval containing the goal. If it's the RHS, | |
212 | * we need to back up one. | |
213 | */ | |
214 | if (rsv->rsv_start > goal) { | |
215 | n = rb_prev(&rsv->rsv_node); | |
216 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | |
217 | } | |
218 | return rsv; | |
219 | } | |
220 | ||
221 | void ext3_rsv_window_add(struct super_block *sb, | |
222 | struct ext3_reserve_window_node *rsv) | |
223 | { | |
224 | struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; | |
225 | struct rb_node *node = &rsv->rsv_node; | |
226 | unsigned int start = rsv->rsv_start; | |
227 | ||
228 | struct rb_node ** p = &root->rb_node; | |
229 | struct rb_node * parent = NULL; | |
230 | struct ext3_reserve_window_node *this; | |
231 | ||
232 | while (*p) | |
233 | { | |
234 | parent = *p; | |
235 | this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); | |
236 | ||
237 | if (start < this->rsv_start) | |
238 | p = &(*p)->rb_left; | |
239 | else if (start > this->rsv_end) | |
240 | p = &(*p)->rb_right; | |
241 | else | |
242 | BUG(); | |
243 | } | |
244 | ||
245 | rb_link_node(node, parent, p); | |
246 | rb_insert_color(node, root); | |
247 | } | |
248 | ||
249 | static void rsv_window_remove(struct super_block *sb, | |
250 | struct ext3_reserve_window_node *rsv) | |
251 | { | |
252 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
253 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
254 | rsv->rsv_alloc_hit = 0; | |
255 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); | |
256 | } | |
257 | ||
258 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) | |
259 | { | |
260 | /* a valid reservation end block could not be 0 */ | |
261 | return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); | |
262 | } | |
263 | void ext3_init_block_alloc_info(struct inode *inode) | |
264 | { | |
265 | struct ext3_inode_info *ei = EXT3_I(inode); | |
266 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | |
267 | struct super_block *sb = inode->i_sb; | |
268 | ||
269 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); | |
270 | if (block_i) { | |
271 | struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; | |
272 | ||
273 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
274 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
275 | ||
276 | /* | |
277 | * if filesystem is mounted with NORESERVATION, the goal | |
278 | * reservation window size is set to zero to indicate | |
279 | * block reservation is off | |
280 | */ | |
281 | if (!test_opt(sb, RESERVATION)) | |
282 | rsv->rsv_goal_size = 0; | |
283 | else | |
284 | rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; | |
285 | rsv->rsv_alloc_hit = 0; | |
286 | block_i->last_alloc_logical_block = 0; | |
287 | block_i->last_alloc_physical_block = 0; | |
288 | } | |
289 | ei->i_block_alloc_info = block_i; | |
290 | } | |
291 | ||
292 | void ext3_discard_reservation(struct inode *inode) | |
293 | { | |
294 | struct ext3_inode_info *ei = EXT3_I(inode); | |
295 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | |
296 | struct ext3_reserve_window_node *rsv; | |
297 | spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; | |
298 | ||
299 | if (!block_i) | |
300 | return; | |
301 | ||
302 | rsv = &block_i->rsv_window_node; | |
303 | if (!rsv_is_empty(&rsv->rsv_window)) { | |
304 | spin_lock(rsv_lock); | |
305 | if (!rsv_is_empty(&rsv->rsv_window)) | |
306 | rsv_window_remove(inode->i_sb, rsv); | |
307 | spin_unlock(rsv_lock); | |
308 | } | |
309 | } | |
310 | ||
311 | /* Free given blocks, update quota and i_blocks field */ | |
312 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, | |
313 | unsigned long block, unsigned long count, | |
314 | int *pdquot_freed_blocks) | |
315 | { | |
316 | struct buffer_head *bitmap_bh = NULL; | |
317 | struct buffer_head *gd_bh; | |
318 | unsigned long block_group; | |
319 | unsigned long bit; | |
320 | unsigned long i; | |
321 | unsigned long overflow; | |
322 | struct ext3_group_desc * desc; | |
323 | struct ext3_super_block * es; | |
324 | struct ext3_sb_info *sbi; | |
325 | int err = 0, ret; | |
326 | unsigned group_freed; | |
327 | ||
328 | *pdquot_freed_blocks = 0; | |
329 | sbi = EXT3_SB(sb); | |
330 | es = sbi->s_es; | |
331 | if (block < le32_to_cpu(es->s_first_data_block) || | |
332 | block + count < block || | |
333 | block + count > le32_to_cpu(es->s_blocks_count)) { | |
334 | ext3_error (sb, "ext3_free_blocks", | |
335 | "Freeing blocks not in datazone - " | |
336 | "block = %lu, count = %lu", block, count); | |
337 | goto error_return; | |
338 | } | |
339 | ||
340 | ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); | |
341 | ||
342 | do_more: | |
343 | overflow = 0; | |
344 | block_group = (block - le32_to_cpu(es->s_first_data_block)) / | |
345 | EXT3_BLOCKS_PER_GROUP(sb); | |
346 | bit = (block - le32_to_cpu(es->s_first_data_block)) % | |
347 | EXT3_BLOCKS_PER_GROUP(sb); | |
348 | /* | |
349 | * Check to see if we are freeing blocks across a group | |
350 | * boundary. | |
351 | */ | |
352 | if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) { | |
353 | overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb); | |
354 | count -= overflow; | |
355 | } | |
356 | brelse(bitmap_bh); | |
357 | bitmap_bh = read_block_bitmap(sb, block_group); | |
358 | if (!bitmap_bh) | |
359 | goto error_return; | |
360 | desc = ext3_get_group_desc (sb, block_group, &gd_bh); | |
361 | if (!desc) | |
362 | goto error_return; | |
363 | ||
364 | if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) || | |
365 | in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) || | |
366 | in_range (block, le32_to_cpu(desc->bg_inode_table), | |
367 | sbi->s_itb_per_group) || | |
368 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | |
369 | sbi->s_itb_per_group)) | |
370 | ext3_error (sb, "ext3_free_blocks", | |
371 | "Freeing blocks in system zones - " | |
372 | "Block = %lu, count = %lu", | |
373 | block, count); | |
374 | ||
375 | /* | |
376 | * We are about to start releasing blocks in the bitmap, | |
377 | * so we need undo access. | |
378 | */ | |
379 | /* @@@ check errors */ | |
380 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | |
381 | err = ext3_journal_get_undo_access(handle, bitmap_bh); | |
382 | if (err) | |
383 | goto error_return; | |
384 | ||
385 | /* | |
386 | * We are about to modify some metadata. Call the journal APIs | |
387 | * to unshare ->b_data if a currently-committing transaction is | |
388 | * using it | |
389 | */ | |
390 | BUFFER_TRACE(gd_bh, "get_write_access"); | |
391 | err = ext3_journal_get_write_access(handle, gd_bh); | |
392 | if (err) | |
393 | goto error_return; | |
394 | ||
395 | jbd_lock_bh_state(bitmap_bh); | |
396 | ||
397 | for (i = 0, group_freed = 0; i < count; i++) { | |
398 | /* | |
399 | * An HJ special. This is expensive... | |
400 | */ | |
401 | #ifdef CONFIG_JBD_DEBUG | |
402 | jbd_unlock_bh_state(bitmap_bh); | |
403 | { | |
404 | struct buffer_head *debug_bh; | |
405 | debug_bh = sb_find_get_block(sb, block + i); | |
406 | if (debug_bh) { | |
407 | BUFFER_TRACE(debug_bh, "Deleted!"); | |
408 | if (!bh2jh(bitmap_bh)->b_committed_data) | |
409 | BUFFER_TRACE(debug_bh, | |
410 | "No commited data in bitmap"); | |
411 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); | |
412 | __brelse(debug_bh); | |
413 | } | |
414 | } | |
415 | jbd_lock_bh_state(bitmap_bh); | |
416 | #endif | |
417 | if (need_resched()) { | |
418 | jbd_unlock_bh_state(bitmap_bh); | |
419 | cond_resched(); | |
420 | jbd_lock_bh_state(bitmap_bh); | |
421 | } | |
422 | /* @@@ This prevents newly-allocated data from being | |
423 | * freed and then reallocated within the same | |
424 | * transaction. | |
425 | * | |
426 | * Ideally we would want to allow that to happen, but to | |
427 | * do so requires making journal_forget() capable of | |
428 | * revoking the queued write of a data block, which | |
429 | * implies blocking on the journal lock. *forget() | |
430 | * cannot block due to truncate races. | |
431 | * | |
432 | * Eventually we can fix this by making journal_forget() | |
433 | * return a status indicating whether or not it was able | |
434 | * to revoke the buffer. On successful revoke, it is | |
435 | * safe not to set the allocation bit in the committed | |
436 | * bitmap, because we know that there is no outstanding | |
437 | * activity on the buffer any more and so it is safe to | |
438 | * reallocate it. | |
439 | */ | |
440 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | |
441 | J_ASSERT_BH(bitmap_bh, | |
442 | bh2jh(bitmap_bh)->b_committed_data != NULL); | |
443 | ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, | |
444 | bh2jh(bitmap_bh)->b_committed_data); | |
445 | ||
446 | /* | |
447 | * We clear the bit in the bitmap after setting the committed | |
448 | * data bit, because this is the reverse order to that which | |
449 | * the allocator uses. | |
450 | */ | |
451 | BUFFER_TRACE(bitmap_bh, "clear bit"); | |
452 | if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | |
453 | bit + i, bitmap_bh->b_data)) { | |
454 | jbd_unlock_bh_state(bitmap_bh); | |
455 | ext3_error(sb, __FUNCTION__, | |
456 | "bit already cleared for block %lu", block + i); | |
457 | jbd_lock_bh_state(bitmap_bh); | |
458 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | |
459 | } else { | |
460 | group_freed++; | |
461 | } | |
462 | } | |
463 | jbd_unlock_bh_state(bitmap_bh); | |
464 | ||
465 | spin_lock(sb_bgl_lock(sbi, block_group)); | |
466 | desc->bg_free_blocks_count = | |
467 | cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + | |
468 | group_freed); | |
469 | spin_unlock(sb_bgl_lock(sbi, block_group)); | |
470 | percpu_counter_mod(&sbi->s_freeblocks_counter, count); | |
471 | ||
472 | /* We dirtied the bitmap block */ | |
473 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | |
474 | err = ext3_journal_dirty_metadata(handle, bitmap_bh); | |
475 | ||
476 | /* And the group descriptor block */ | |
477 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | |
478 | ret = ext3_journal_dirty_metadata(handle, gd_bh); | |
479 | if (!err) err = ret; | |
480 | *pdquot_freed_blocks += group_freed; | |
481 | ||
482 | if (overflow && !err) { | |
483 | block += count; | |
484 | count = overflow; | |
485 | goto do_more; | |
486 | } | |
487 | sb->s_dirt = 1; | |
488 | error_return: | |
489 | brelse(bitmap_bh); | |
490 | ext3_std_error(sb, err); | |
491 | return; | |
492 | } | |
493 | ||
494 | /* Free given blocks, update quota and i_blocks field */ | |
495 | void ext3_free_blocks(handle_t *handle, struct inode *inode, | |
496 | unsigned long block, unsigned long count) | |
497 | { | |
498 | struct super_block * sb; | |
499 | int dquot_freed_blocks; | |
500 | ||
501 | sb = inode->i_sb; | |
502 | if (!sb) { | |
503 | printk ("ext3_free_blocks: nonexistent device"); | |
504 | return; | |
505 | } | |
506 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | |
507 | if (dquot_freed_blocks) | |
508 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); | |
509 | return; | |
510 | } | |
511 | ||
512 | /* | |
513 | * For ext3 allocations, we must not reuse any blocks which are | |
514 | * allocated in the bitmap buffer's "last committed data" copy. This | |
515 | * prevents deletes from freeing up the page for reuse until we have | |
516 | * committed the delete transaction. | |
517 | * | |
518 | * If we didn't do this, then deleting something and reallocating it as | |
519 | * data would allow the old block to be overwritten before the | |
520 | * transaction committed (because we force data to disk before commit). | |
521 | * This would lead to corruption if we crashed between overwriting the | |
522 | * data and committing the delete. | |
523 | * | |
524 | * @@@ We may want to make this allocation behaviour conditional on | |
525 | * data-writes at some point, and disable it for metadata allocations or | |
526 | * sync-data inodes. | |
527 | */ | |
528 | static int ext3_test_allocatable(int nr, struct buffer_head *bh) | |
529 | { | |
530 | int ret; | |
531 | struct journal_head *jh = bh2jh(bh); | |
532 | ||
533 | if (ext3_test_bit(nr, bh->b_data)) | |
534 | return 0; | |
535 | ||
536 | jbd_lock_bh_state(bh); | |
537 | if (!jh->b_committed_data) | |
538 | ret = 1; | |
539 | else | |
540 | ret = !ext3_test_bit(nr, jh->b_committed_data); | |
541 | jbd_unlock_bh_state(bh); | |
542 | return ret; | |
543 | } | |
544 | ||
545 | static int | |
546 | bitmap_search_next_usable_block(int start, struct buffer_head *bh, | |
547 | int maxblocks) | |
548 | { | |
549 | int next; | |
550 | struct journal_head *jh = bh2jh(bh); | |
551 | ||
552 | /* | |
553 | * The bitmap search --- search forward alternately through the actual | |
554 | * bitmap and the last-committed copy until we find a bit free in | |
555 | * both | |
556 | */ | |
557 | while (start < maxblocks) { | |
558 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); | |
559 | if (next >= maxblocks) | |
560 | return -1; | |
561 | if (ext3_test_allocatable(next, bh)) | |
562 | return next; | |
563 | jbd_lock_bh_state(bh); | |
564 | if (jh->b_committed_data) | |
565 | start = ext3_find_next_zero_bit(jh->b_committed_data, | |
566 | maxblocks, next); | |
567 | jbd_unlock_bh_state(bh); | |
568 | } | |
569 | return -1; | |
570 | } | |
571 | ||
572 | /* | |
573 | * Find an allocatable block in a bitmap. We honour both the bitmap and | |
574 | * its last-committed copy (if that exists), and perform the "most | |
575 | * appropriate allocation" algorithm of looking for a free block near | |
576 | * the initial goal; then for a free byte somewhere in the bitmap; then | |
577 | * for any free bit in the bitmap. | |
578 | */ | |
579 | static int | |
580 | find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) | |
581 | { | |
582 | int here, next; | |
583 | char *p, *r; | |
584 | ||
585 | if (start > 0) { | |
586 | /* | |
587 | * The goal was occupied; search forward for a free | |
588 | * block within the next XX blocks. | |
589 | * | |
590 | * end_goal is more or less random, but it has to be | |
591 | * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the | |
592 | * next 64-bit boundary is simple.. | |
593 | */ | |
594 | int end_goal = (start + 63) & ~63; | |
595 | if (end_goal > maxblocks) | |
596 | end_goal = maxblocks; | |
597 | here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); | |
598 | if (here < end_goal && ext3_test_allocatable(here, bh)) | |
599 | return here; | |
600 | ext3_debug("Bit not found near goal\n"); | |
601 | } | |
602 | ||
603 | here = start; | |
604 | if (here < 0) | |
605 | here = 0; | |
606 | ||
607 | p = ((char *)bh->b_data) + (here >> 3); | |
608 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); | |
609 | next = (r - ((char *)bh->b_data)) << 3; | |
610 | ||
611 | if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) | |
612 | return next; | |
613 | ||
614 | /* | |
615 | * The bitmap search --- search forward alternately through the actual | |
616 | * bitmap and the last-committed copy until we find a bit free in | |
617 | * both | |
618 | */ | |
619 | here = bitmap_search_next_usable_block(here, bh, maxblocks); | |
620 | return here; | |
621 | } | |
622 | ||
623 | /* | |
624 | * We think we can allocate this block in this bitmap. Try to set the bit. | |
625 | * If that succeeds then check that nobody has allocated and then freed the | |
626 | * block since we saw that is was not marked in b_committed_data. If it _was_ | |
627 | * allocated and freed then clear the bit in the bitmap again and return | |
628 | * zero (failure). | |
629 | */ | |
630 | static inline int | |
631 | claim_block(spinlock_t *lock, int block, struct buffer_head *bh) | |
632 | { | |
633 | struct journal_head *jh = bh2jh(bh); | |
634 | int ret; | |
635 | ||
636 | if (ext3_set_bit_atomic(lock, block, bh->b_data)) | |
637 | return 0; | |
638 | jbd_lock_bh_state(bh); | |
639 | if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) { | |
640 | ext3_clear_bit_atomic(lock, block, bh->b_data); | |
641 | ret = 0; | |
642 | } else { | |
643 | ret = 1; | |
644 | } | |
645 | jbd_unlock_bh_state(bh); | |
646 | return ret; | |
647 | } | |
648 | ||
649 | /* | |
650 | * If we failed to allocate the desired block then we may end up crossing to a | |
651 | * new bitmap. In that case we must release write access to the old one via | |
652 | * ext3_journal_release_buffer(), else we'll run out of credits. | |
653 | */ | |
654 | static int | |
655 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | |
b54e41ec MC |
656 | struct buffer_head *bitmap_bh, int goal, |
657 | unsigned long *count, struct ext3_reserve_window *my_rsv) | |
1da177e4 LT |
658 | { |
659 | int group_first_block, start, end; | |
b54e41ec | 660 | unsigned long num = 0; |
1da177e4 LT |
661 | |
662 | /* we do allocation within the reservation window if we have a window */ | |
663 | if (my_rsv) { | |
664 | group_first_block = | |
665 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
666 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
667 | if (my_rsv->_rsv_start >= group_first_block) | |
668 | start = my_rsv->_rsv_start - group_first_block; | |
669 | else | |
670 | /* reservation window cross group boundary */ | |
671 | start = 0; | |
672 | end = my_rsv->_rsv_end - group_first_block + 1; | |
673 | if (end > EXT3_BLOCKS_PER_GROUP(sb)) | |
674 | /* reservation window crosses group boundary */ | |
675 | end = EXT3_BLOCKS_PER_GROUP(sb); | |
676 | if ((start <= goal) && (goal < end)) | |
677 | start = goal; | |
678 | else | |
679 | goal = -1; | |
680 | } else { | |
681 | if (goal > 0) | |
682 | start = goal; | |
683 | else | |
684 | start = 0; | |
685 | end = EXT3_BLOCKS_PER_GROUP(sb); | |
686 | } | |
687 | ||
688 | BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); | |
689 | ||
690 | repeat: | |
691 | if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) { | |
692 | goal = find_next_usable_block(start, bitmap_bh, end); | |
693 | if (goal < 0) | |
694 | goto fail_access; | |
695 | if (!my_rsv) { | |
696 | int i; | |
697 | ||
698 | for (i = 0; i < 7 && goal > start && | |
699 | ext3_test_allocatable(goal - 1, | |
700 | bitmap_bh); | |
701 | i++, goal--) | |
702 | ; | |
703 | } | |
704 | } | |
705 | start = goal; | |
706 | ||
707 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | |
708 | /* | |
709 | * The block was allocated by another thread, or it was | |
710 | * allocated and then freed by another thread | |
711 | */ | |
712 | start++; | |
713 | goal++; | |
714 | if (start >= end) | |
715 | goto fail_access; | |
716 | goto repeat; | |
717 | } | |
b54e41ec MC |
718 | num++; |
719 | goal++; | |
720 | while (num < *count && goal < end | |
721 | && ext3_test_allocatable(goal, bitmap_bh) | |
722 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | |
723 | num++; | |
724 | goal++; | |
725 | } | |
726 | *count = num; | |
727 | return goal - num; | |
1da177e4 | 728 | fail_access: |
b54e41ec | 729 | *count = num; |
1da177e4 LT |
730 | return -1; |
731 | } | |
732 | ||
733 | /** | |
734 | * find_next_reservable_window(): | |
735 | * find a reservable space within the given range. | |
736 | * It does not allocate the reservation window for now: | |
737 | * alloc_new_reservation() will do the work later. | |
738 | * | |
739 | * @search_head: the head of the searching list; | |
740 | * This is not necessarily the list head of the whole filesystem | |
741 | * | |
742 | * We have both head and start_block to assist the search | |
743 | * for the reservable space. The list starts from head, | |
744 | * but we will shift to the place where start_block is, | |
745 | * then start from there, when looking for a reservable space. | |
746 | * | |
747 | * @size: the target new reservation window size | |
748 | * | |
749 | * @group_first_block: the first block we consider to start | |
750 | * the real search from | |
751 | * | |
752 | * @last_block: | |
753 | * the maximum block number that our goal reservable space | |
754 | * could start from. This is normally the last block in this | |
755 | * group. The search will end when we found the start of next | |
756 | * possible reservable space is out of this boundary. | |
757 | * This could handle the cross boundary reservation window | |
758 | * request. | |
759 | * | |
760 | * basically we search from the given range, rather than the whole | |
761 | * reservation double linked list, (start_block, last_block) | |
762 | * to find a free region that is of my size and has not | |
763 | * been reserved. | |
764 | * | |
1da177e4 | 765 | */ |
21fe3471 | 766 | static int find_next_reservable_window( |
1da177e4 | 767 | struct ext3_reserve_window_node *search_head, |
21fe3471 MC |
768 | struct ext3_reserve_window_node *my_rsv, |
769 | struct super_block * sb, int start_block, | |
1da177e4 LT |
770 | int last_block) |
771 | { | |
772 | struct rb_node *next; | |
773 | struct ext3_reserve_window_node *rsv, *prev; | |
774 | int cur; | |
21fe3471 | 775 | int size = my_rsv->rsv_goal_size; |
1da177e4 LT |
776 | |
777 | /* TODO: make the start of the reservation window byte-aligned */ | |
778 | /* cur = *start_block & ~7;*/ | |
21fe3471 | 779 | cur = start_block; |
1da177e4 LT |
780 | rsv = search_head; |
781 | if (!rsv) | |
21fe3471 | 782 | return -1; |
1da177e4 LT |
783 | |
784 | while (1) { | |
785 | if (cur <= rsv->rsv_end) | |
786 | cur = rsv->rsv_end + 1; | |
787 | ||
788 | /* TODO? | |
789 | * in the case we could not find a reservable space | |
790 | * that is what is expected, during the re-search, we could | |
791 | * remember what's the largest reservable space we could have | |
792 | * and return that one. | |
793 | * | |
794 | * For now it will fail if we could not find the reservable | |
795 | * space with expected-size (or more)... | |
796 | */ | |
797 | if (cur > last_block) | |
21fe3471 | 798 | return -1; /* fail */ |
1da177e4 LT |
799 | |
800 | prev = rsv; | |
801 | next = rb_next(&rsv->rsv_node); | |
21fe3471 | 802 | rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); |
1da177e4 LT |
803 | |
804 | /* | |
805 | * Reached the last reservation, we can just append to the | |
806 | * previous one. | |
807 | */ | |
808 | if (!next) | |
809 | break; | |
810 | ||
811 | if (cur + size <= rsv->rsv_start) { | |
812 | /* | |
813 | * Found a reserveable space big enough. We could | |
814 | * have a reservation across the group boundary here | |
815 | */ | |
816 | break; | |
817 | } | |
818 | } | |
819 | /* | |
820 | * we come here either : | |
821 | * when we reach the end of the whole list, | |
822 | * and there is empty reservable space after last entry in the list. | |
823 | * append it to the end of the list. | |
824 | * | |
825 | * or we found one reservable space in the middle of the list, | |
826 | * return the reservation window that we could append to. | |
827 | * succeed. | |
828 | */ | |
21fe3471 MC |
829 | |
830 | if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) | |
831 | rsv_window_remove(sb, my_rsv); | |
832 | ||
833 | /* | |
834 | * Let's book the whole avaliable window for now. We will check the | |
835 | * disk bitmap later and then, if there are free blocks then we adjust | |
836 | * the window size if it's larger than requested. | |
837 | * Otherwise, we will remove this node from the tree next time | |
838 | * call find_next_reservable_window. | |
839 | */ | |
840 | my_rsv->rsv_start = cur; | |
841 | my_rsv->rsv_end = cur + size - 1; | |
842 | my_rsv->rsv_alloc_hit = 0; | |
843 | ||
844 | if (prev != my_rsv) | |
845 | ext3_rsv_window_add(sb, my_rsv); | |
846 | ||
847 | return 0; | |
1da177e4 LT |
848 | } |
849 | ||
850 | /** | |
851 | * alloc_new_reservation()--allocate a new reservation window | |
852 | * | |
853 | * To make a new reservation, we search part of the filesystem | |
854 | * reservation list (the list that inside the group). We try to | |
855 | * allocate a new reservation window near the allocation goal, | |
856 | * or the beginning of the group, if there is no goal. | |
857 | * | |
858 | * We first find a reservable space after the goal, then from | |
859 | * there, we check the bitmap for the first free block after | |
860 | * it. If there is no free block until the end of group, then the | |
861 | * whole group is full, we failed. Otherwise, check if the free | |
862 | * block is inside the expected reservable space, if so, we | |
863 | * succeed. | |
864 | * If the first free block is outside the reservable space, then | |
865 | * start from the first free block, we search for next available | |
866 | * space, and go on. | |
867 | * | |
868 | * on succeed, a new reservation will be found and inserted into the list | |
869 | * It contains at least one free block, and it does not overlap with other | |
870 | * reservation windows. | |
871 | * | |
872 | * failed: we failed to find a reservation window in this group | |
873 | * | |
874 | * @rsv: the reservation | |
875 | * | |
876 | * @goal: The goal (group-relative). It is where the search for a | |
877 | * free reservable space should start from. | |
878 | * if we have a goal(goal >0 ), then start from there, | |
879 | * no goal(goal = -1), we start from the first block | |
880 | * of the group. | |
881 | * | |
882 | * @sb: the super block | |
883 | * @group: the group we are trying to allocate in | |
884 | * @bitmap_bh: the block group block bitmap | |
21fe3471 | 885 | * |
1da177e4 LT |
886 | */ |
887 | static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | |
888 | int goal, struct super_block *sb, | |
889 | unsigned int group, struct buffer_head *bitmap_bh) | |
890 | { | |
891 | struct ext3_reserve_window_node *search_head; | |
892 | int group_first_block, group_end_block, start_block; | |
893 | int first_free_block; | |
1da177e4 LT |
894 | struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; |
895 | unsigned long size; | |
21fe3471 MC |
896 | int ret; |
897 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | |
1da177e4 LT |
898 | |
899 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
900 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
901 | group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | |
902 | ||
903 | if (goal < 0) | |
904 | start_block = group_first_block; | |
905 | else | |
906 | start_block = goal + group_first_block; | |
907 | ||
908 | size = my_rsv->rsv_goal_size; | |
21fe3471 | 909 | |
1da177e4 LT |
910 | if (!rsv_is_empty(&my_rsv->rsv_window)) { |
911 | /* | |
912 | * if the old reservation is cross group boundary | |
913 | * and if the goal is inside the old reservation window, | |
914 | * we will come here when we just failed to allocate from | |
915 | * the first part of the window. We still have another part | |
916 | * that belongs to the next group. In this case, there is no | |
917 | * point to discard our window and try to allocate a new one | |
918 | * in this group(which will fail). we should | |
919 | * keep the reservation window, just simply move on. | |
920 | * | |
921 | * Maybe we could shift the start block of the reservation | |
922 | * window to the first block of next group. | |
923 | */ | |
924 | ||
925 | if ((my_rsv->rsv_start <= group_end_block) && | |
926 | (my_rsv->rsv_end > group_end_block) && | |
927 | (start_block >= my_rsv->rsv_start)) | |
928 | return -1; | |
929 | ||
930 | if ((my_rsv->rsv_alloc_hit > | |
931 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { | |
932 | /* | |
933 | * if we previously allocation hit ration is greater than half | |
934 | * we double the size of reservation window next time | |
935 | * otherwise keep the same | |
936 | */ | |
937 | size = size * 2; | |
938 | if (size > EXT3_MAX_RESERVE_BLOCKS) | |
939 | size = EXT3_MAX_RESERVE_BLOCKS; | |
940 | my_rsv->rsv_goal_size= size; | |
941 | } | |
942 | } | |
21fe3471 MC |
943 | |
944 | spin_lock(rsv_lock); | |
1da177e4 LT |
945 | /* |
946 | * shift the search start to the window near the goal block | |
947 | */ | |
948 | search_head = search_reserve_window(fs_rsv_root, start_block); | |
949 | ||
950 | /* | |
951 | * find_next_reservable_window() simply finds a reservable window | |
952 | * inside the given range(start_block, group_end_block). | |
953 | * | |
954 | * To make sure the reservation window has a free bit inside it, we | |
955 | * need to check the bitmap after we found a reservable window. | |
956 | */ | |
957 | retry: | |
21fe3471 MC |
958 | ret = find_next_reservable_window(search_head, my_rsv, sb, |
959 | start_block, group_end_block); | |
960 | ||
961 | if (ret == -1) { | |
962 | if (!rsv_is_empty(&my_rsv->rsv_window)) | |
963 | rsv_window_remove(sb, my_rsv); | |
964 | spin_unlock(rsv_lock); | |
965 | return -1; | |
966 | } | |
967 | ||
1da177e4 LT |
968 | /* |
969 | * On success, find_next_reservable_window() returns the | |
970 | * reservation window where there is a reservable space after it. | |
971 | * Before we reserve this reservable space, we need | |
972 | * to make sure there is at least a free block inside this region. | |
973 | * | |
974 | * searching the first free bit on the block bitmap and copy of | |
975 | * last committed bitmap alternatively, until we found a allocatable | |
976 | * block. Search start from the start block of the reservable space | |
977 | * we just found. | |
978 | */ | |
21fe3471 | 979 | spin_unlock(rsv_lock); |
1da177e4 | 980 | first_free_block = bitmap_search_next_usable_block( |
21fe3471 | 981 | my_rsv->rsv_start - group_first_block, |
1da177e4 LT |
982 | bitmap_bh, group_end_block - group_first_block + 1); |
983 | ||
984 | if (first_free_block < 0) { | |
985 | /* | |
986 | * no free block left on the bitmap, no point | |
987 | * to reserve the space. return failed. | |
988 | */ | |
21fe3471 MC |
989 | spin_lock(rsv_lock); |
990 | if (!rsv_is_empty(&my_rsv->rsv_window)) | |
991 | rsv_window_remove(sb, my_rsv); | |
992 | spin_unlock(rsv_lock); | |
993 | return -1; /* failed */ | |
1da177e4 | 994 | } |
21fe3471 | 995 | |
1da177e4 LT |
996 | start_block = first_free_block + group_first_block; |
997 | /* | |
998 | * check if the first free block is within the | |
21fe3471 | 999 | * free space we just reserved |
1da177e4 | 1000 | */ |
21fe3471 MC |
1001 | if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) |
1002 | return 0; /* success */ | |
1da177e4 LT |
1003 | /* |
1004 | * if the first free bit we found is out of the reservable space | |
21fe3471 | 1005 | * continue search for next reservable space, |
1da177e4 LT |
1006 | * start from where the free block is, |
1007 | * we also shift the list head to where we stopped last time | |
1008 | */ | |
21fe3471 MC |
1009 | search_head = my_rsv; |
1010 | spin_lock(rsv_lock); | |
1da177e4 | 1011 | goto retry; |
1da177e4 LT |
1012 | } |
1013 | ||
d48589bf MC |
1014 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, |
1015 | struct super_block *sb, int size) | |
1016 | { | |
1017 | struct ext3_reserve_window_node *next_rsv; | |
1018 | struct rb_node *next; | |
1019 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | |
1020 | ||
1021 | if (!spin_trylock(rsv_lock)) | |
1022 | return; | |
1023 | ||
1024 | next = rb_next(&my_rsv->rsv_node); | |
1025 | ||
1026 | if (!next) | |
1027 | my_rsv->rsv_end += size; | |
1028 | else { | |
1029 | next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); | |
1030 | ||
1031 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) | |
1032 | my_rsv->rsv_end += size; | |
1033 | else | |
1034 | my_rsv->rsv_end = next_rsv->rsv_start - 1; | |
1035 | } | |
1036 | spin_unlock(rsv_lock); | |
1037 | } | |
1038 | ||
1da177e4 LT |
1039 | /* |
1040 | * This is the main function used to allocate a new block and its reservation | |
1041 | * window. | |
1042 | * | |
1043 | * Each time when a new block allocation is need, first try to allocate from | |
1044 | * its own reservation. If it does not have a reservation window, instead of | |
1045 | * looking for a free bit on bitmap first, then look up the reservation list to | |
1046 | * see if it is inside somebody else's reservation window, we try to allocate a | |
1047 | * reservation window for it starting from the goal first. Then do the block | |
1048 | * allocation within the reservation window. | |
1049 | * | |
1050 | * This will avoid keeping on searching the reservation list again and | |
5b116879 | 1051 | * again when somebody is looking for a free block (without |
1da177e4 LT |
1052 | * reservation), and there are lots of free blocks, but they are all |
1053 | * being reserved. | |
1054 | * | |
1055 | * We use a sorted double linked list for the per-filesystem reservation list. | |
1056 | * The insert, remove and find a free space(non-reserved) operations for the | |
1057 | * sorted double linked list should be fast. | |
1058 | * | |
1059 | */ | |
1060 | static int | |
1061 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |
1062 | unsigned int group, struct buffer_head *bitmap_bh, | |
1063 | int goal, struct ext3_reserve_window_node * my_rsv, | |
b54e41ec | 1064 | unsigned long *count, int *errp) |
1da177e4 | 1065 | { |
1da177e4 LT |
1066 | unsigned long group_first_block; |
1067 | int ret = 0; | |
1068 | int fatal; | |
b54e41ec | 1069 | unsigned long num = *count; |
1da177e4 LT |
1070 | |
1071 | *errp = 0; | |
1072 | ||
1073 | /* | |
1074 | * Make sure we use undo access for the bitmap, because it is critical | |
1075 | * that we do the frozen_data COW on bitmap buffers in all cases even | |
1076 | * if the buffer is in BJ_Forget state in the committing transaction. | |
1077 | */ | |
1078 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); | |
1079 | fatal = ext3_journal_get_undo_access(handle, bitmap_bh); | |
1080 | if (fatal) { | |
1081 | *errp = fatal; | |
1082 | return -1; | |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * we don't deal with reservation when | |
1087 | * filesystem is mounted without reservation | |
1088 | * or the file is not a regular file | |
1089 | * or last attempt to allocate a block with reservation turned on failed | |
1090 | */ | |
1091 | if (my_rsv == NULL ) { | |
b54e41ec MC |
1092 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, |
1093 | goal, count, NULL); | |
1da177e4 LT |
1094 | goto out; |
1095 | } | |
1da177e4 LT |
1096 | /* |
1097 | * goal is a group relative block number (if there is a goal) | |
1098 | * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb) | |
1099 | * first block is a filesystem wide block number | |
1100 | * first block is the block number of the first block in this group | |
1101 | */ | |
1102 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
1103 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
1104 | ||
1105 | /* | |
1106 | * Basically we will allocate a new block from inode's reservation | |
1107 | * window. | |
1108 | * | |
1109 | * We need to allocate a new reservation window, if: | |
1110 | * a) inode does not have a reservation window; or | |
1111 | * b) last attempt to allocate a block from existing reservation | |
1112 | * failed; or | |
1113 | * c) we come here with a goal and with a reservation window | |
1114 | * | |
1115 | * We do not need to allocate a new reservation window if we come here | |
1116 | * at the beginning with a goal and the goal is inside the window, or | |
1117 | * we don't have a goal but already have a reservation window. | |
1118 | * then we could go to allocate from the reservation window directly. | |
1119 | */ | |
1120 | while (1) { | |
21fe3471 MC |
1121 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || |
1122 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { | |
d48589bf MC |
1123 | if (my_rsv->rsv_goal_size < *count) |
1124 | my_rsv->rsv_goal_size = *count; | |
1da177e4 LT |
1125 | ret = alloc_new_reservation(my_rsv, goal, sb, |
1126 | group, bitmap_bh); | |
1da177e4 LT |
1127 | if (ret < 0) |
1128 | break; /* failed */ | |
1129 | ||
21fe3471 | 1130 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) |
1da177e4 | 1131 | goal = -1; |
d48589bf MC |
1132 | } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count) |
1133 | try_to_extend_reservation(my_rsv, sb, | |
1134 | *count-my_rsv->rsv_end + goal - 1); | |
1135 | ||
21fe3471 MC |
1136 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) |
1137 | || (my_rsv->rsv_end < group_first_block)) | |
1da177e4 LT |
1138 | BUG(); |
1139 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, | |
b54e41ec | 1140 | &num, &my_rsv->rsv_window); |
1da177e4 | 1141 | if (ret >= 0) { |
b54e41ec MC |
1142 | my_rsv->rsv_alloc_hit += num; |
1143 | *count = num; | |
1da177e4 LT |
1144 | break; /* succeed */ |
1145 | } | |
b54e41ec | 1146 | num = *count; |
1da177e4 LT |
1147 | } |
1148 | out: | |
1149 | if (ret >= 0) { | |
1150 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " | |
1151 | "bitmap block"); | |
1152 | fatal = ext3_journal_dirty_metadata(handle, bitmap_bh); | |
1153 | if (fatal) { | |
1154 | *errp = fatal; | |
1155 | return -1; | |
1156 | } | |
1157 | return ret; | |
1158 | } | |
1159 | ||
1160 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); | |
1161 | ext3_journal_release_buffer(handle, bitmap_bh); | |
1162 | return ret; | |
1163 | } | |
1164 | ||
1165 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | |
1166 | { | |
1167 | int free_blocks, root_blocks; | |
1168 | ||
1169 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | |
1170 | root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); | |
1171 | if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && | |
1172 | sbi->s_resuid != current->fsuid && | |
1173 | (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { | |
1174 | return 0; | |
1175 | } | |
1176 | return 1; | |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if | |
1181 | * it is profitable to retry the operation, this function will wait | |
1182 | * for the current or commiting transaction to complete, and then | |
1183 | * return TRUE. | |
1184 | */ | |
1185 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) | |
1186 | { | |
1187 | if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) | |
1188 | return 0; | |
1189 | ||
1190 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | |
1191 | ||
1192 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); | |
1193 | } | |
1194 | ||
1195 | /* | |
1196 | * ext3_new_block uses a goal block to assist allocation. If the goal is | |
1197 | * free, or there is a free block within 32 blocks of the goal, that block | |
1198 | * is allocated. Otherwise a forward search is made for a free block; within | |
1199 | * each block group the search first looks for an entire free byte in the block | |
1200 | * bitmap, and then for any free bit if that fails. | |
1201 | * This function also updates quota and i_blocks field. | |
1202 | */ | |
b54e41ec MC |
1203 | int ext3_new_blocks(handle_t *handle, struct inode *inode, |
1204 | unsigned long goal, unsigned long *count, int *errp) | |
1da177e4 LT |
1205 | { |
1206 | struct buffer_head *bitmap_bh = NULL; | |
1207 | struct buffer_head *gdp_bh; | |
1208 | int group_no; | |
1209 | int goal_group; | |
1210 | int ret_block; | |
1211 | int bgi; /* blockgroup iteration index */ | |
1212 | int target_block; | |
1213 | int fatal = 0, err; | |
1214 | int performed_allocation = 0; | |
1215 | int free_blocks; | |
1216 | struct super_block *sb; | |
1217 | struct ext3_group_desc *gdp; | |
1218 | struct ext3_super_block *es; | |
1219 | struct ext3_sb_info *sbi; | |
1220 | struct ext3_reserve_window_node *my_rsv = NULL; | |
1221 | struct ext3_block_alloc_info *block_i; | |
1222 | unsigned short windowsz = 0; | |
1223 | #ifdef EXT3FS_DEBUG | |
1224 | static int goal_hits, goal_attempts; | |
1225 | #endif | |
1226 | unsigned long ngroups; | |
b54e41ec | 1227 | unsigned long num = *count; |
1da177e4 LT |
1228 | |
1229 | *errp = -ENOSPC; | |
1230 | sb = inode->i_sb; | |
1231 | if (!sb) { | |
1232 | printk("ext3_new_block: nonexistent device"); | |
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * Check quota for allocation of this block. | |
1238 | */ | |
faa56976 | 1239 | if (DQUOT_ALLOC_BLOCK(inode, num)) { |
1da177e4 LT |
1240 | *errp = -EDQUOT; |
1241 | return 0; | |
1242 | } | |
1243 | ||
1244 | sbi = EXT3_SB(sb); | |
1245 | es = EXT3_SB(sb)->s_es; | |
1246 | ext3_debug("goal=%lu.\n", goal); | |
1247 | /* | |
1248 | * Allocate a block from reservation only when | |
1249 | * filesystem is mounted with reservation(default,-o reservation), and | |
1250 | * it's a regular file, and | |
1251 | * the desired window size is greater than 0 (One could use ioctl | |
1252 | * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off | |
1253 | * reservation on that particular file) | |
1254 | */ | |
1255 | block_i = EXT3_I(inode)->i_block_alloc_info; | |
1256 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) | |
1257 | my_rsv = &block_i->rsv_window_node; | |
1258 | ||
1259 | if (!ext3_has_free_blocks(sbi)) { | |
1260 | *errp = -ENOSPC; | |
1261 | goto out; | |
1262 | } | |
1263 | ||
1264 | /* | |
1265 | * First, test whether the goal block is free. | |
1266 | */ | |
1267 | if (goal < le32_to_cpu(es->s_first_data_block) || | |
1268 | goal >= le32_to_cpu(es->s_blocks_count)) | |
1269 | goal = le32_to_cpu(es->s_first_data_block); | |
1270 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | |
1271 | EXT3_BLOCKS_PER_GROUP(sb); | |
1272 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | |
1273 | if (!gdp) | |
1274 | goto io_error; | |
1275 | ||
1276 | goal_group = group_no; | |
1277 | retry: | |
1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | |
1279 | /* | |
1280 | * if there is not enough free blocks to make a new resevation | |
1281 | * turn off reservation for this allocation | |
1282 | */ | |
1283 | if (my_rsv && (free_blocks < windowsz) | |
1284 | && (rsv_is_empty(&my_rsv->rsv_window))) | |
1285 | my_rsv = NULL; | |
1286 | ||
1287 | if (free_blocks > 0) { | |
1288 | ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) % | |
1289 | EXT3_BLOCKS_PER_GROUP(sb)); | |
1290 | bitmap_bh = read_block_bitmap(sb, group_no); | |
1291 | if (!bitmap_bh) | |
1292 | goto io_error; | |
1293 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | |
b54e41ec | 1294 | bitmap_bh, ret_block, my_rsv, &num, &fatal); |
1da177e4 LT |
1295 | if (fatal) |
1296 | goto out; | |
1297 | if (ret_block >= 0) | |
1298 | goto allocated; | |
1299 | } | |
1300 | ||
1301 | ngroups = EXT3_SB(sb)->s_groups_count; | |
1302 | smp_rmb(); | |
1303 | ||
1304 | /* | |
1305 | * Now search the rest of the groups. We assume that | |
1306 | * i and gdp correctly point to the last group visited. | |
1307 | */ | |
1308 | for (bgi = 0; bgi < ngroups; bgi++) { | |
1309 | group_no++; | |
1310 | if (group_no >= ngroups) | |
1311 | group_no = 0; | |
1312 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | |
1313 | if (!gdp) { | |
1314 | *errp = -EIO; | |
1315 | goto out; | |
1316 | } | |
1317 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | |
1318 | /* | |
1319 | * skip this group if the number of | |
1320 | * free blocks is less than half of the reservation | |
1321 | * window size. | |
1322 | */ | |
1323 | if (free_blocks <= (windowsz/2)) | |
1324 | continue; | |
1325 | ||
1326 | brelse(bitmap_bh); | |
1327 | bitmap_bh = read_block_bitmap(sb, group_no); | |
1328 | if (!bitmap_bh) | |
1329 | goto io_error; | |
1330 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | |
b54e41ec | 1331 | bitmap_bh, -1, my_rsv, &num, &fatal); |
1da177e4 LT |
1332 | if (fatal) |
1333 | goto out; | |
1334 | if (ret_block >= 0) | |
1335 | goto allocated; | |
1336 | } | |
1337 | /* | |
1338 | * We may end up a bogus ealier ENOSPC error due to | |
1339 | * filesystem is "full" of reservations, but | |
1340 | * there maybe indeed free blocks avaliable on disk | |
1341 | * In this case, we just forget about the reservations | |
1342 | * just do block allocation as without reservations. | |
1343 | */ | |
1344 | if (my_rsv) { | |
1345 | my_rsv = NULL; | |
1346 | group_no = goal_group; | |
1347 | goto retry; | |
1348 | } | |
1349 | /* No space left on the device */ | |
1350 | *errp = -ENOSPC; | |
1351 | goto out; | |
1352 | ||
1353 | allocated: | |
1354 | ||
1355 | ext3_debug("using block group %d(%d)\n", | |
1356 | group_no, gdp->bg_free_blocks_count); | |
1357 | ||
1358 | BUFFER_TRACE(gdp_bh, "get_write_access"); | |
1359 | fatal = ext3_journal_get_write_access(handle, gdp_bh); | |
1360 | if (fatal) | |
1361 | goto out; | |
1362 | ||
1363 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) | |
1364 | + le32_to_cpu(es->s_first_data_block); | |
1365 | ||
faa56976 MC |
1366 | if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) || |
1367 | in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) || | |
1da177e4 | 1368 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), |
faa56976 MC |
1369 | EXT3_SB(sb)->s_itb_per_group) || |
1370 | in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | |
1da177e4 LT |
1371 | EXT3_SB(sb)->s_itb_per_group)) |
1372 | ext3_error(sb, "ext3_new_block", | |
1373 | "Allocating block in system zone - " | |
faa56976 | 1374 | "blocks from %u, length %lu", target_block, num); |
1da177e4 LT |
1375 | |
1376 | performed_allocation = 1; | |
1377 | ||
1378 | #ifdef CONFIG_JBD_DEBUG | |
1379 | { | |
1380 | struct buffer_head *debug_bh; | |
1381 | ||
1382 | /* Record bitmap buffer state in the newly allocated block */ | |
1383 | debug_bh = sb_find_get_block(sb, target_block); | |
1384 | if (debug_bh) { | |
1385 | BUFFER_TRACE(debug_bh, "state when allocated"); | |
1386 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); | |
1387 | brelse(debug_bh); | |
1388 | } | |
1389 | } | |
1390 | jbd_lock_bh_state(bitmap_bh); | |
1391 | spin_lock(sb_bgl_lock(sbi, group_no)); | |
1392 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | |
faa56976 MC |
1393 | int i; |
1394 | ||
1395 | for (i = 0; i < num; i++) { | |
1396 | if (ext3_test_bit(ret_block, | |
1397 | bh2jh(bitmap_bh)->b_committed_data)) { | |
1398 | printk("%s: block was unexpectedly set in " | |
1399 | "b_committed_data\n", __FUNCTION__); | |
1400 | } | |
1da177e4 LT |
1401 | } |
1402 | } | |
1403 | ext3_debug("found bit %d\n", ret_block); | |
1404 | spin_unlock(sb_bgl_lock(sbi, group_no)); | |
1405 | jbd_unlock_bh_state(bitmap_bh); | |
1406 | #endif | |
1407 | ||
1408 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ | |
1409 | ret_block = target_block; | |
1410 | ||
faa56976 | 1411 | if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { |
1da177e4 LT |
1412 | ext3_error(sb, "ext3_new_block", |
1413 | "block(%d) >= blocks count(%d) - " | |
1414 | "block_group = %d, es == %p ", ret_block, | |
1415 | le32_to_cpu(es->s_blocks_count), group_no, es); | |
1416 | goto out; | |
1417 | } | |
1418 | ||
1419 | /* | |
1420 | * It is up to the caller to add the new buffer to a journal | |
1421 | * list of some description. We don't know in advance whether | |
1422 | * the caller wants to use it as metadata or data. | |
1423 | */ | |
1424 | ext3_debug("allocating block %d. Goal hits %d of %d.\n", | |
1425 | ret_block, goal_hits, goal_attempts); | |
1426 | ||
1427 | spin_lock(sb_bgl_lock(sbi, group_no)); | |
1428 | gdp->bg_free_blocks_count = | |
faa56976 | 1429 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); |
1da177e4 | 1430 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
faa56976 | 1431 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); |
1da177e4 LT |
1432 | |
1433 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | |
1434 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | |
1435 | if (!fatal) | |
1436 | fatal = err; | |
1437 | ||
1438 | sb->s_dirt = 1; | |
1439 | if (fatal) | |
1440 | goto out; | |
1441 | ||
1442 | *errp = 0; | |
1443 | brelse(bitmap_bh); | |
faa56976 | 1444 | DQUOT_FREE_BLOCK(inode, *count-num); |
b54e41ec | 1445 | *count = num; |
1da177e4 LT |
1446 | return ret_block; |
1447 | ||
1448 | io_error: | |
1449 | *errp = -EIO; | |
1450 | out: | |
1451 | if (fatal) { | |
1452 | *errp = fatal; | |
1453 | ext3_std_error(sb, fatal); | |
1454 | } | |
1455 | /* | |
1456 | * Undo the block allocation | |
1457 | */ | |
1458 | if (!performed_allocation) | |
faa56976 | 1459 | DQUOT_FREE_BLOCK(inode, *count); |
1da177e4 LT |
1460 | brelse(bitmap_bh); |
1461 | return 0; | |
1462 | } | |
1463 | ||
b54e41ec MC |
1464 | int ext3_new_block(handle_t *handle, struct inode *inode, |
1465 | unsigned long goal, int *errp) | |
1466 | { | |
1467 | unsigned long count = 1; | |
1468 | ||
1469 | return ext3_new_blocks(handle, inode, goal, &count, errp); | |
1470 | } | |
1471 | ||
1da177e4 LT |
1472 | unsigned long ext3_count_free_blocks(struct super_block *sb) |
1473 | { | |
1474 | unsigned long desc_count; | |
1475 | struct ext3_group_desc *gdp; | |
1476 | int i; | |
8bdac5d1 | 1477 | unsigned long ngroups = EXT3_SB(sb)->s_groups_count; |
1da177e4 LT |
1478 | #ifdef EXT3FS_DEBUG |
1479 | struct ext3_super_block *es; | |
1480 | unsigned long bitmap_count, x; | |
1481 | struct buffer_head *bitmap_bh = NULL; | |
1482 | ||
1da177e4 LT |
1483 | es = EXT3_SB(sb)->s_es; |
1484 | desc_count = 0; | |
1485 | bitmap_count = 0; | |
1486 | gdp = NULL; | |
8bdac5d1 | 1487 | |
5b116879 | 1488 | smp_rmb(); |
8bdac5d1 | 1489 | for (i = 0; i < ngroups; i++) { |
1da177e4 LT |
1490 | gdp = ext3_get_group_desc(sb, i, NULL); |
1491 | if (!gdp) | |
1492 | continue; | |
1493 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | |
1494 | brelse(bitmap_bh); | |
1495 | bitmap_bh = read_block_bitmap(sb, i); | |
1496 | if (bitmap_bh == NULL) | |
1497 | continue; | |
1498 | ||
1499 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | |
1500 | printk("group %d: stored = %d, counted = %lu\n", | |
1501 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); | |
1502 | bitmap_count += x; | |
1503 | } | |
1504 | brelse(bitmap_bh); | |
1505 | printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", | |
1506 | le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); | |
1da177e4 LT |
1507 | return bitmap_count; |
1508 | #else | |
1509 | desc_count = 0; | |
1da177e4 LT |
1510 | smp_rmb(); |
1511 | for (i = 0; i < ngroups; i++) { | |
1512 | gdp = ext3_get_group_desc(sb, i, NULL); | |
1513 | if (!gdp) | |
1514 | continue; | |
1515 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | |
1516 | } | |
1517 | ||
1518 | return desc_count; | |
1519 | #endif | |
1520 | } | |
1521 | ||
1522 | static inline int | |
1523 | block_in_use(unsigned long block, struct super_block *sb, unsigned char *map) | |
1524 | { | |
1525 | return ext3_test_bit ((block - | |
1526 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % | |
1527 | EXT3_BLOCKS_PER_GROUP(sb), map); | |
1528 | } | |
1529 | ||
1530 | static inline int test_root(int a, int b) | |
1531 | { | |
1532 | int num = b; | |
1533 | ||
1534 | while (a > num) | |
1535 | num *= b; | |
1536 | return num == a; | |
1537 | } | |
1538 | ||
1539 | static int ext3_group_sparse(int group) | |
1540 | { | |
1541 | if (group <= 1) | |
1542 | return 1; | |
1543 | if (!(group & 1)) | |
1544 | return 0; | |
1545 | return (test_root(group, 7) || test_root(group, 5) || | |
1546 | test_root(group, 3)); | |
1547 | } | |
1548 | ||
1549 | /** | |
1550 | * ext3_bg_has_super - number of blocks used by the superblock in group | |
1551 | * @sb: superblock for filesystem | |
1552 | * @group: group number to check | |
1553 | * | |
1554 | * Return the number of blocks used by the superblock (primary or backup) | |
1555 | * in this group. Currently this will be only 0 or 1. | |
1556 | */ | |
1557 | int ext3_bg_has_super(struct super_block *sb, int group) | |
1558 | { | |
b5a7c4f5 GOC |
1559 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb, |
1560 | EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && | |
1561 | !ext3_group_sparse(group)) | |
1da177e4 LT |
1562 | return 0; |
1563 | return 1; | |
1564 | } | |
1565 | ||
b5a7c4f5 GOC |
1566 | static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) |
1567 | { | |
1568 | unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); | |
1569 | unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb); | |
1570 | unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1; | |
1571 | ||
1572 | if (group == first || group == first + 1 || group == last) | |
1573 | return 1; | |
1574 | return 0; | |
1575 | } | |
1576 | ||
1577 | static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) | |
1578 | { | |
1579 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb, | |
1580 | EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && | |
1581 | !ext3_group_sparse(group)) | |
1582 | return 0; | |
1583 | return EXT3_SB(sb)->s_gdb_count; | |
1584 | } | |
1585 | ||
1da177e4 LT |
1586 | /** |
1587 | * ext3_bg_num_gdb - number of blocks used by the group table in group | |
1588 | * @sb: superblock for filesystem | |
1589 | * @group: group number to check | |
1590 | * | |
1591 | * Return the number of blocks used by the group descriptor table | |
1592 | * (primary or backup) in this group. In the future there may be a | |
1593 | * different number of descriptor blocks in each group. | |
1594 | */ | |
1595 | unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) | |
1596 | { | |
b5a7c4f5 GOC |
1597 | unsigned long first_meta_bg = |
1598 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg); | |
1599 | unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); | |
1600 | ||
1601 | if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) || | |
1602 | metagroup < first_meta_bg) | |
1603 | return ext3_bg_num_gdb_nometa(sb,group); | |
1da177e4 | 1604 | |
b5a7c4f5 GOC |
1605 | return ext3_bg_num_gdb_meta(sb,group); |
1606 | ||
1607 | } |