]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ext4/balloc.c
Merge branch 'postmerge' into for-linus
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / balloc.c
1 /*
2 * linux/fs/ext4/balloc.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
12 */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
20 #include "ext4.h"
21 #include "ext4_jbd2.h"
22 #include "mballoc.h"
23
24 #include <trace/events/ext4.h>
25
26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
27 ext4_group_t block_group);
28 /*
29 * balloc.c contains the blocks allocation and deallocation routines
30 */
31
32 /*
33 * Calculate block group number for a given block number
34 */
35 ext4_group_t ext4_get_group_number(struct super_block *sb,
36 ext4_fsblk_t block)
37 {
38 ext4_group_t group;
39
40 if (test_opt2(sb, STD_GROUP_SIZE))
41 group = (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
42 block) >>
43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
44 else
45 ext4_get_group_no_and_offset(sb, block, &group, NULL);
46 return group;
47 }
48
49 /*
50 * Calculate the block group number and offset into the block/cluster
51 * allocation bitmap, given a block number
52 */
53 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55 {
56 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 ext4_grpblk_t offset;
58
59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
61 EXT4_SB(sb)->s_cluster_bits;
62 if (offsetp)
63 *offsetp = offset;
64 if (blockgrpp)
65 *blockgrpp = blocknr;
66
67 }
68
69 /*
70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
71 * and 0 otherwise.
72 */
73 static inline int ext4_block_in_group(struct super_block *sb,
74 ext4_fsblk_t block,
75 ext4_group_t block_group)
76 {
77 ext4_group_t actual_group;
78
79 actual_group = ext4_get_group_number(sb, block);
80 return (actual_group == block_group) ? 1 : 0;
81 }
82
83 /* Return the number of clusters used for file system metadata; this
84 * represents the overhead needed by the file system.
85 */
86 unsigned ext4_num_overhead_clusters(struct super_block *sb,
87 ext4_group_t block_group,
88 struct ext4_group_desc *gdp)
89 {
90 unsigned num_clusters;
91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
93 ext4_fsblk_t itbl_blk;
94 struct ext4_sb_info *sbi = EXT4_SB(sb);
95
96 /* This is the number of clusters used by the superblock,
97 * block group descriptors, and reserved block group
98 * descriptor blocks */
99 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
100
101 /*
102 * For the allocation bitmaps and inode table, we first need
103 * to check to see if the block is in the block group. If it
104 * is, then check to see if the cluster is already accounted
105 * for in the clusters used for the base metadata cluster, or
106 * if we can increment the base metadata cluster to include
107 * that block. Otherwise, we will have to track the cluster
108 * used for the allocation bitmap or inode table explicitly.
109 * Normally all of these blocks are contiguous, so the special
110 * case handling shouldn't be necessary except for *very*
111 * unusual file system layouts.
112 */
113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 block_cluster = EXT4_B2C(sbi,
115 ext4_block_bitmap(sb, gdp) - start);
116 if (block_cluster < num_clusters)
117 block_cluster = -1;
118 else if (block_cluster == num_clusters) {
119 num_clusters++;
120 block_cluster = -1;
121 }
122 }
123
124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
125 inode_cluster = EXT4_B2C(sbi,
126 ext4_inode_bitmap(sb, gdp) - start);
127 if (inode_cluster < num_clusters)
128 inode_cluster = -1;
129 else if (inode_cluster == num_clusters) {
130 num_clusters++;
131 inode_cluster = -1;
132 }
133 }
134
135 itbl_blk = ext4_inode_table(sb, gdp);
136 for (i = 0; i < sbi->s_itb_per_group; i++) {
137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138 c = EXT4_B2C(sbi, itbl_blk + i - start);
139 if ((c < num_clusters) || (c == inode_cluster) ||
140 (c == block_cluster) || (c == itbl_cluster))
141 continue;
142 if (c == num_clusters) {
143 num_clusters++;
144 continue;
145 }
146 num_clusters++;
147 itbl_cluster = c;
148 }
149 }
150
151 if (block_cluster != -1)
152 num_clusters++;
153 if (inode_cluster != -1)
154 num_clusters++;
155
156 return num_clusters;
157 }
158
159 static unsigned int num_clusters_in_group(struct super_block *sb,
160 ext4_group_t block_group)
161 {
162 unsigned int blocks;
163
164 if (block_group == ext4_get_groups_count(sb) - 1) {
165 /*
166 * Even though mke2fs always initializes the first and
167 * last group, just in case some other tool was used,
168 * we need to make sure we calculate the right free
169 * blocks.
170 */
171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 ext4_group_first_block_no(sb, block_group);
173 } else
174 blocks = EXT4_BLOCKS_PER_GROUP(sb);
175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176 }
177
178 /* Initializes an uninitialized block bitmap */
179 void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
180 ext4_group_t block_group,
181 struct ext4_group_desc *gdp)
182 {
183 unsigned int bit, bit_max;
184 struct ext4_sb_info *sbi = EXT4_SB(sb);
185 ext4_fsblk_t start, tmp;
186 int flex_bg = 0;
187
188 J_ASSERT_BH(bh, buffer_locked(bh));
189
190 /* If checksum is bad mark all blocks used to prevent allocation
191 * essentially implementing a per-group read-only flag. */
192 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
193 ext4_error(sb, "Checksum bad for group %u", block_group);
194 ext4_free_group_clusters_set(sb, gdp, 0);
195 ext4_free_inodes_set(sb, gdp, 0);
196 ext4_itable_unused_set(sb, gdp, 0);
197 memset(bh->b_data, 0xff, sb->s_blocksize);
198 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
199 return;
200 }
201 memset(bh->b_data, 0, sb->s_blocksize);
202
203 bit_max = ext4_num_base_meta_clusters(sb, block_group);
204 for (bit = 0; bit < bit_max; bit++)
205 ext4_set_bit(bit, bh->b_data);
206
207 start = ext4_group_first_block_no(sb, block_group);
208
209 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
210 flex_bg = 1;
211
212 /* Set bits for block and inode bitmaps, and inode table */
213 tmp = ext4_block_bitmap(sb, gdp);
214 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
215 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
216
217 tmp = ext4_inode_bitmap(sb, gdp);
218 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
219 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
220
221 tmp = ext4_inode_table(sb, gdp);
222 for (; tmp < ext4_inode_table(sb, gdp) +
223 sbi->s_itb_per_group; tmp++) {
224 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
225 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
226 }
227
228 /*
229 * Also if the number of blocks within the group is less than
230 * the blocksize * 8 ( which is the size of bitmap ), set rest
231 * of the block bitmap to 1
232 */
233 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
234 sb->s_blocksize * 8, bh->b_data);
235 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
236 ext4_group_desc_csum_set(sb, block_group, gdp);
237 }
238
239 /* Return the number of free blocks in a block group. It is used when
240 * the block bitmap is uninitialized, so we can't just count the bits
241 * in the bitmap. */
242 unsigned ext4_free_clusters_after_init(struct super_block *sb,
243 ext4_group_t block_group,
244 struct ext4_group_desc *gdp)
245 {
246 return num_clusters_in_group(sb, block_group) -
247 ext4_num_overhead_clusters(sb, block_group, gdp);
248 }
249
250 /*
251 * The free blocks are managed by bitmaps. A file system contains several
252 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
253 * block for inodes, N blocks for the inode table and data blocks.
254 *
255 * The file system contains group descriptors which are located after the
256 * super block. Each descriptor contains the number of the bitmap block and
257 * the free blocks count in the block. The descriptors are loaded in memory
258 * when a file system is mounted (see ext4_fill_super).
259 */
260
261 /**
262 * ext4_get_group_desc() -- load group descriptor from disk
263 * @sb: super block
264 * @block_group: given block group
265 * @bh: pointer to the buffer head to store the block
266 * group descriptor
267 */
268 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
269 ext4_group_t block_group,
270 struct buffer_head **bh)
271 {
272 unsigned int group_desc;
273 unsigned int offset;
274 ext4_group_t ngroups = ext4_get_groups_count(sb);
275 struct ext4_group_desc *desc;
276 struct ext4_sb_info *sbi = EXT4_SB(sb);
277
278 if (block_group >= ngroups) {
279 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
280 " groups_count = %u", block_group, ngroups);
281
282 return NULL;
283 }
284
285 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
286 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
287 if (!sbi->s_group_desc[group_desc]) {
288 ext4_error(sb, "Group descriptor not loaded - "
289 "block_group = %u, group_desc = %u, desc = %u",
290 block_group, group_desc, offset);
291 return NULL;
292 }
293
294 desc = (struct ext4_group_desc *)(
295 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
296 offset * EXT4_DESC_SIZE(sb));
297 if (bh)
298 *bh = sbi->s_group_desc[group_desc];
299 return desc;
300 }
301
302 /*
303 * Return the block number which was discovered to be invalid, or 0 if
304 * the block bitmap is valid.
305 */
306 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
307 struct ext4_group_desc *desc,
308 unsigned int block_group,
309 struct buffer_head *bh)
310 {
311 ext4_grpblk_t offset;
312 ext4_grpblk_t next_zero_bit;
313 ext4_fsblk_t blk;
314 ext4_fsblk_t group_first_block;
315
316 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
317 /* with FLEX_BG, the inode/block bitmaps and itable
318 * blocks may not be in the group at all
319 * so the bitmap validation will be skipped for those groups
320 * or it has to also read the block group where the bitmaps
321 * are located to verify they are set.
322 */
323 return 0;
324 }
325 group_first_block = ext4_group_first_block_no(sb, block_group);
326
327 /* check whether block bitmap block number is set */
328 blk = ext4_block_bitmap(sb, desc);
329 offset = blk - group_first_block;
330 if (!ext4_test_bit(offset, bh->b_data))
331 /* bad block bitmap */
332 return blk;
333
334 /* check whether the inode bitmap block number is set */
335 blk = ext4_inode_bitmap(sb, desc);
336 offset = blk - group_first_block;
337 if (!ext4_test_bit(offset, bh->b_data))
338 /* bad block bitmap */
339 return blk;
340
341 /* check whether the inode table block number is set */
342 blk = ext4_inode_table(sb, desc);
343 offset = blk - group_first_block;
344 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
345 offset + EXT4_SB(sb)->s_itb_per_group,
346 offset);
347 if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
348 /* bad bitmap for inode tables */
349 return blk;
350 return 0;
351 }
352
353 void ext4_validate_block_bitmap(struct super_block *sb,
354 struct ext4_group_desc *desc,
355 unsigned int block_group,
356 struct buffer_head *bh)
357 {
358 ext4_fsblk_t blk;
359
360 if (buffer_verified(bh))
361 return;
362
363 ext4_lock_group(sb, block_group);
364 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
365 if (unlikely(blk != 0)) {
366 ext4_unlock_group(sb, block_group);
367 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
368 block_group, blk);
369 return;
370 }
371 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
372 desc, bh))) {
373 ext4_unlock_group(sb, block_group);
374 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
375 return;
376 }
377 set_buffer_verified(bh);
378 ext4_unlock_group(sb, block_group);
379 }
380
381 /**
382 * ext4_read_block_bitmap_nowait()
383 * @sb: super block
384 * @block_group: given block group
385 *
386 * Read the bitmap for a given block_group,and validate the
387 * bits for block/inode/inode tables are set in the bitmaps
388 *
389 * Return buffer_head on success or NULL in case of failure.
390 */
391 struct buffer_head *
392 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
393 {
394 struct ext4_group_desc *desc;
395 struct buffer_head *bh;
396 ext4_fsblk_t bitmap_blk;
397
398 desc = ext4_get_group_desc(sb, block_group, NULL);
399 if (!desc)
400 return NULL;
401 bitmap_blk = ext4_block_bitmap(sb, desc);
402 bh = sb_getblk(sb, bitmap_blk);
403 if (unlikely(!bh)) {
404 ext4_error(sb, "Cannot get buffer for block bitmap - "
405 "block_group = %u, block_bitmap = %llu",
406 block_group, bitmap_blk);
407 return NULL;
408 }
409
410 if (bitmap_uptodate(bh))
411 goto verify;
412
413 lock_buffer(bh);
414 if (bitmap_uptodate(bh)) {
415 unlock_buffer(bh);
416 goto verify;
417 }
418 ext4_lock_group(sb, block_group);
419 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
420 ext4_init_block_bitmap(sb, bh, block_group, desc);
421 set_bitmap_uptodate(bh);
422 set_buffer_uptodate(bh);
423 ext4_unlock_group(sb, block_group);
424 unlock_buffer(bh);
425 return bh;
426 }
427 ext4_unlock_group(sb, block_group);
428 if (buffer_uptodate(bh)) {
429 /*
430 * if not uninit if bh is uptodate,
431 * bitmap is also uptodate
432 */
433 set_bitmap_uptodate(bh);
434 unlock_buffer(bh);
435 goto verify;
436 }
437 /*
438 * submit the buffer_head for reading
439 */
440 set_buffer_new(bh);
441 trace_ext4_read_block_bitmap_load(sb, block_group);
442 bh->b_end_io = ext4_end_bitmap_read;
443 get_bh(bh);
444 submit_bh(READ | REQ_META | REQ_PRIO, bh);
445 return bh;
446 verify:
447 ext4_validate_block_bitmap(sb, desc, block_group, bh);
448 return bh;
449 }
450
451 /* Returns 0 on success, 1 on error */
452 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
453 struct buffer_head *bh)
454 {
455 struct ext4_group_desc *desc;
456
457 if (!buffer_new(bh))
458 return 0;
459 desc = ext4_get_group_desc(sb, block_group, NULL);
460 if (!desc)
461 return 1;
462 wait_on_buffer(bh);
463 if (!buffer_uptodate(bh)) {
464 ext4_error(sb, "Cannot read block bitmap - "
465 "block_group = %u, block_bitmap = %llu",
466 block_group, (unsigned long long) bh->b_blocknr);
467 return 1;
468 }
469 clear_buffer_new(bh);
470 /* Panic or remount fs read-only if block bitmap is invalid */
471 ext4_validate_block_bitmap(sb, desc, block_group, bh);
472 return 0;
473 }
474
475 struct buffer_head *
476 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
477 {
478 struct buffer_head *bh;
479
480 bh = ext4_read_block_bitmap_nowait(sb, block_group);
481 if (!bh)
482 return NULL;
483 if (ext4_wait_block_bitmap(sb, block_group, bh)) {
484 put_bh(bh);
485 return NULL;
486 }
487 return bh;
488 }
489
490 /**
491 * ext4_has_free_clusters()
492 * @sbi: in-core super block structure.
493 * @nclusters: number of needed blocks
494 * @flags: flags from ext4_mb_new_blocks()
495 *
496 * Check if filesystem has nclusters free & available for allocation.
497 * On success return 1, return 0 on failure.
498 */
499 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
500 s64 nclusters, unsigned int flags)
501 {
502 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
503 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
504 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
505
506 free_clusters = percpu_counter_read_positive(fcc);
507 dirty_clusters = percpu_counter_read_positive(dcc);
508 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
509
510 /*
511 * r_blocks_count should always be multiple of the cluster ratio so
512 * we are safe to do a plane bit shift only.
513 */
514 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
515 resv_clusters;
516
517 if (free_clusters - (nclusters + rsv + dirty_clusters) <
518 EXT4_FREECLUSTERS_WATERMARK) {
519 free_clusters = percpu_counter_sum_positive(fcc);
520 dirty_clusters = percpu_counter_sum_positive(dcc);
521 }
522 /* Check whether we have space after accounting for current
523 * dirty clusters & root reserved clusters.
524 */
525 if (free_clusters >= (rsv + nclusters + dirty_clusters))
526 return 1;
527
528 /* Hm, nope. Are (enough) root reserved clusters available? */
529 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
530 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
531 capable(CAP_SYS_RESOURCE) ||
532 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
533
534 if (free_clusters >= (nclusters + dirty_clusters +
535 resv_clusters))
536 return 1;
537 }
538 /* No free blocks. Let's see if we can dip into reserved pool */
539 if (flags & EXT4_MB_USE_RESERVED) {
540 if (free_clusters >= (nclusters + dirty_clusters))
541 return 1;
542 }
543
544 return 0;
545 }
546
547 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
548 s64 nclusters, unsigned int flags)
549 {
550 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
551 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
552 return 0;
553 } else
554 return -ENOSPC;
555 }
556
557 /**
558 * ext4_should_retry_alloc()
559 * @sb: super block
560 * @retries number of attemps has been made
561 *
562 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
563 * it is profitable to retry the operation, this function will wait
564 * for the current or committing transaction to complete, and then
565 * return TRUE.
566 *
567 * if the total number of retries exceed three times, return FALSE.
568 */
569 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
570 {
571 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
572 (*retries)++ > 3 ||
573 !EXT4_SB(sb)->s_journal)
574 return 0;
575
576 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
577
578 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
579 }
580
581 /*
582 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
583 *
584 * @handle: handle to this transaction
585 * @inode: file inode
586 * @goal: given target block(filesystem wide)
587 * @count: pointer to total number of clusters needed
588 * @errp: error code
589 *
590 * Return 1st allocated block number on success, *count stores total account
591 * error stores in errp pointer
592 */
593 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
594 ext4_fsblk_t goal, unsigned int flags,
595 unsigned long *count, int *errp)
596 {
597 struct ext4_allocation_request ar;
598 ext4_fsblk_t ret;
599
600 memset(&ar, 0, sizeof(ar));
601 /* Fill with neighbour allocated blocks */
602 ar.inode = inode;
603 ar.goal = goal;
604 ar.len = count ? *count : 1;
605 ar.flags = flags;
606
607 ret = ext4_mb_new_blocks(handle, &ar, errp);
608 if (count)
609 *count = ar.len;
610 /*
611 * Account for the allocated meta blocks. We will never
612 * fail EDQUOT for metdata, but we do account for it.
613 */
614 if (!(*errp) &&
615 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
616 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
617 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
618 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
619 dquot_alloc_block_nofail(inode,
620 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
621 }
622 return ret;
623 }
624
625 /**
626 * ext4_count_free_clusters() -- count filesystem free clusters
627 * @sb: superblock
628 *
629 * Adds up the number of free clusters from each block group.
630 */
631 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
632 {
633 ext4_fsblk_t desc_count;
634 struct ext4_group_desc *gdp;
635 ext4_group_t i;
636 ext4_group_t ngroups = ext4_get_groups_count(sb);
637 #ifdef EXT4FS_DEBUG
638 struct ext4_super_block *es;
639 ext4_fsblk_t bitmap_count;
640 unsigned int x;
641 struct buffer_head *bitmap_bh = NULL;
642
643 es = EXT4_SB(sb)->s_es;
644 desc_count = 0;
645 bitmap_count = 0;
646 gdp = NULL;
647
648 for (i = 0; i < ngroups; i++) {
649 gdp = ext4_get_group_desc(sb, i, NULL);
650 if (!gdp)
651 continue;
652 desc_count += ext4_free_group_clusters(sb, gdp);
653 brelse(bitmap_bh);
654 bitmap_bh = ext4_read_block_bitmap(sb, i);
655 if (bitmap_bh == NULL)
656 continue;
657
658 x = ext4_count_free(bitmap_bh->b_data,
659 EXT4_BLOCKS_PER_GROUP(sb) / 8);
660 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
661 i, ext4_free_group_clusters(sb, gdp), x);
662 bitmap_count += x;
663 }
664 brelse(bitmap_bh);
665 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
666 ", computed = %llu, %llu\n",
667 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
668 desc_count, bitmap_count);
669 return bitmap_count;
670 #else
671 desc_count = 0;
672 for (i = 0; i < ngroups; i++) {
673 gdp = ext4_get_group_desc(sb, i, NULL);
674 if (!gdp)
675 continue;
676 desc_count += ext4_free_group_clusters(sb, gdp);
677 }
678
679 return desc_count;
680 #endif
681 }
682
683 static inline int test_root(ext4_group_t a, int b)
684 {
685 int num = b;
686
687 while (a > num)
688 num *= b;
689 return num == a;
690 }
691
692 static int ext4_group_sparse(ext4_group_t group)
693 {
694 if (group <= 1)
695 return 1;
696 if (!(group & 1))
697 return 0;
698 return (test_root(group, 7) || test_root(group, 5) ||
699 test_root(group, 3));
700 }
701
702 /**
703 * ext4_bg_has_super - number of blocks used by the superblock in group
704 * @sb: superblock for filesystem
705 * @group: group number to check
706 *
707 * Return the number of blocks used by the superblock (primary or backup)
708 * in this group. Currently this will be only 0 or 1.
709 */
710 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
711 {
712 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
713 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
714 !ext4_group_sparse(group))
715 return 0;
716 return 1;
717 }
718
719 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
720 ext4_group_t group)
721 {
722 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
723 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
724 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
725
726 if (group == first || group == first + 1 || group == last)
727 return 1;
728 return 0;
729 }
730
731 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
732 ext4_group_t group)
733 {
734 if (!ext4_bg_has_super(sb, group))
735 return 0;
736
737 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
738 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
739 else
740 return EXT4_SB(sb)->s_gdb_count;
741 }
742
743 /**
744 * ext4_bg_num_gdb - number of blocks used by the group table in group
745 * @sb: superblock for filesystem
746 * @group: group number to check
747 *
748 * Return the number of blocks used by the group descriptor table
749 * (primary or backup) in this group. In the future there may be a
750 * different number of descriptor blocks in each group.
751 */
752 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
753 {
754 unsigned long first_meta_bg =
755 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
756 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
757
758 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
759 metagroup < first_meta_bg)
760 return ext4_bg_num_gdb_nometa(sb, group);
761
762 return ext4_bg_num_gdb_meta(sb,group);
763
764 }
765
766 /*
767 * This function returns the number of file system metadata clusters at
768 * the beginning of a block group, including the reserved gdt blocks.
769 */
770 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
771 ext4_group_t block_group)
772 {
773 struct ext4_sb_info *sbi = EXT4_SB(sb);
774 unsigned num;
775
776 /* Check for superblock and gdt backups in this group */
777 num = ext4_bg_has_super(sb, block_group);
778
779 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
780 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
781 sbi->s_desc_per_block) {
782 if (num) {
783 num += ext4_bg_num_gdb(sb, block_group);
784 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
785 }
786 } else { /* For META_BG_BLOCK_GROUPS */
787 num += ext4_bg_num_gdb(sb, block_group);
788 }
789 return EXT4_NUM_B2C(sbi, num);
790 }
791 /**
792 * ext4_inode_to_goal_block - return a hint for block allocation
793 * @inode: inode for block allocation
794 *
795 * Return the ideal location to start allocating blocks for a
796 * newly created inode.
797 */
798 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
799 {
800 struct ext4_inode_info *ei = EXT4_I(inode);
801 ext4_group_t block_group;
802 ext4_grpblk_t colour;
803 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
804 ext4_fsblk_t bg_start;
805 ext4_fsblk_t last_block;
806
807 block_group = ei->i_block_group;
808 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
809 /*
810 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
811 * block groups per flexgroup, reserve the first block
812 * group for directories and special files. Regular
813 * files will start at the second block group. This
814 * tends to speed up directory access and improves
815 * fsck times.
816 */
817 block_group &= ~(flex_size-1);
818 if (S_ISREG(inode->i_mode))
819 block_group++;
820 }
821 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
822 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
823
824 /*
825 * If we are doing delayed allocation, we don't need take
826 * colour into account.
827 */
828 if (test_opt(inode->i_sb, DELALLOC))
829 return bg_start;
830
831 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
832 colour = (current->pid % 16) *
833 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
834 else
835 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
836 return bg_start + colour;
837 }
838