1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
9 * mballoc.c contains the multiblocks allocation routines
12 #include "ext4_jbd2.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
23 * - test ext4_ext_search_left() and ext4_ext_search_right()
24 * - search for metadata in few groups
27 * - normalization should take into account whether file is still open
28 * - discard preallocations if no free space left (policy?)
29 * - don't normalize tails
31 * - reservation for superuser
34 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
35 * - track min/max extents in each group for better group selection
36 * - mb_mark_used() may allocate chunk right after splitting buddy
37 * - tree of groups sorted by number of free blocks
42 * The allocation request involve request for multiple number of blocks
43 * near to the goal(block) value specified.
45 * During initialization phase of the allocator we decide to use the
46 * group preallocation or inode preallocation depending on the size of
47 * the file. The size of the file could be the resulting file size we
48 * would have after allocation, or the current file size, which ever
49 * is larger. If the size is less than sbi->s_mb_stream_request we
50 * select to use the group preallocation. The default value of
51 * s_mb_stream_request is 16 blocks. This can also be tuned via
52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53 * terms of number of blocks.
55 * The main motivation for having small file use group preallocation is to
56 * ensure that we have small files closer together on the disk.
58 * First stage the allocator looks at the inode prealloc list,
59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60 * spaces for this particular inode. The inode prealloc space is
63 * pa_lstart -> the logical start block for this prealloc space
64 * pa_pstart -> the physical start block for this prealloc space
65 * pa_len -> length for this prealloc space (in clusters)
66 * pa_free -> free space available in this prealloc space (in clusters)
68 * The inode preallocation space is used looking at the _logical_ start
69 * block. If only the logical file block falls within the range of prealloc
70 * space we will consume the particular prealloc space. This makes sure that
71 * we have contiguous physical blocks representing the file blocks
73 * The important thing to be noted in case of inode prealloc space is that
74 * we don't modify the values associated to inode prealloc space except
77 * If we are not able to find blocks in the inode prealloc space and if we
78 * have the group allocation flag set then we look at the locality group
79 * prealloc space. These are per CPU prealloc list represented as
81 * ext4_sb_info.s_locality_groups[smp_processor_id()]
83 * The reason for having a per cpu locality group is to reduce the contention
84 * between CPUs. It is possible to get scheduled at this point.
86 * The locality group prealloc space is used looking at whether we have
87 * enough free space (pa_free) within the prealloc space.
89 * If we can't allocate blocks via inode prealloc or/and locality group
90 * prealloc then we look at the buddy cache. The buddy cache is represented
91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92 * mapped to the buddy and bitmap information regarding different
93 * groups. The buddy information is attached to buddy cache inode so that
94 * we can access them through the page cache. The information regarding
95 * each group is loaded via ext4_mb_load_buddy. The information involve
96 * block bitmap and buddy information. The information are stored in the
100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
103 * one block each for bitmap and buddy information. So for each group we
104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105 * blocksize) blocks. So it can have information regarding groups_per_page
106 * which is blocks_per_page/2
108 * The buddy cache inode is not stored on disk. The inode is thrown
109 * away when the filesystem is unmounted.
111 * We look for count number of blocks in the buddy cache. If we were able
112 * to locate that many free blocks we return with additional information
113 * regarding rest of the contiguous physical block available
115 * Before allocating blocks via buddy cache we normalize the request
116 * blocks. This ensure we ask for more blocks that we needed. The extra
117 * blocks that we get after allocation is added to the respective prealloc
118 * list. In case of inode preallocation we follow a list of heuristics
119 * based on file size. This can be found in ext4_mb_normalize_request. If
120 * we are doing a group prealloc we try to normalize the request to
121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
122 * dependent on the cluster size; for non-bigalloc file systems, it is
123 * 512 blocks. This can be tuned via
124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125 * terms of number of blocks. If we have mounted the file system with -O
126 * stripe=<value> option the group prealloc request is normalized to the
127 * smallest multiple of the stripe value (sbi->s_stripe) which is
128 * greater than the default mb_group_prealloc.
130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131 * structures in two data structures:
133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137 * This is an array of lists where the index in the array represents the
138 * largest free order in the buddy bitmap of the participating group infos of
139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140 * number of buddy bitmap orders possible) number of lists. Group-infos are
141 * placed in appropriate lists.
143 * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
145 * Locking: sbi->s_mb_rb_lock (rwlock)
147 * This is a red black tree consisting of group infos and the tree is sorted
148 * by average fragment sizes (which is calculated as ext4_group_info->bb_free
149 * / ext4_group_info->bb_fragments).
151 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
152 * structures to decide the order in which groups are to be traversed for
153 * fulfilling an allocation request.
155 * At CR = 0, we look for groups which have the largest_free_order >= the order
156 * of the request. We directly look at the largest free order list in the data
157 * structure (1) above where largest_free_order = order of the request. If that
158 * list is empty, we look at remaining list in the increasing order of
159 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
161 * At CR = 1, we only consider groups where average fragment size > request
162 * size. So, we lookup a group which has average fragment size just above or
163 * equal to request size using our rb tree (data structure 2) in O(log N) time.
165 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
166 * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
168 * The regular allocator (using the buddy cache) supports a few tunables.
170 * /sys/fs/ext4/<partition>/mb_min_to_scan
171 * /sys/fs/ext4/<partition>/mb_max_to_scan
172 * /sys/fs/ext4/<partition>/mb_order2_req
173 * /sys/fs/ext4/<partition>/mb_linear_limit
175 * The regular allocator uses buddy scan only if the request len is power of
176 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
177 * value of s_mb_order2_reqs can be tuned via
178 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
179 * stripe size (sbi->s_stripe), we try to search for contiguous block in
180 * stripe size. This should result in better allocation on RAID setups. If
181 * not, we search in the specific group using bitmap for best extents. The
182 * tunable min_to_scan and max_to_scan control the behaviour here.
183 * min_to_scan indicate how long the mballoc __must__ look for a best
184 * extent and max_to_scan indicates how long the mballoc __can__ look for a
185 * best extent in the found extents. Searching for the blocks starts with
186 * the group specified as the goal value in allocation context via
187 * ac_g_ex. Each group is first checked based on the criteria whether it
188 * can be used for allocation. ext4_mb_good_group explains how the groups are
191 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
192 * get traversed linearly. That may result in subsequent allocations being not
193 * close to each other. And so, the underlying device may get filled up in a
194 * non-linear fashion. While that may not matter on non-rotational devices, for
195 * rotational devices that may result in higher seek times. "mb_linear_limit"
196 * tells mballoc how many groups mballoc should search linearly before
197 * performing consulting above data structures for more efficient lookups. For
198 * non rotational devices, this value defaults to 0 and for rotational devices
199 * this is set to MB_DEFAULT_LINEAR_LIMIT.
201 * Both the prealloc space are getting populated as above. So for the first
202 * request we will hit the buddy cache which will result in this prealloc
203 * space getting filled. The prealloc space is then later used for the
204 * subsequent request.
208 * mballoc operates on the following data:
210 * - in-core buddy (actually includes buddy and bitmap)
211 * - preallocation descriptors (PAs)
213 * there are two types of preallocations:
215 * assiged to specific inode and can be used for this inode only.
216 * it describes part of inode's space preallocated to specific
217 * physical blocks. any block from that preallocated can be used
218 * independent. the descriptor just tracks number of blocks left
219 * unused. so, before taking some block from descriptor, one must
220 * make sure corresponded logical block isn't allocated yet. this
221 * also means that freeing any block within descriptor's range
222 * must discard all preallocated blocks.
224 * assigned to specific locality group which does not translate to
225 * permanent set of inodes: inode can join and leave group. space
226 * from this type of preallocation can be used for any inode. thus
227 * it's consumed from the beginning to the end.
229 * relation between them can be expressed as:
230 * in-core buddy = on-disk bitmap + preallocation descriptors
232 * this mean blocks mballoc considers used are:
233 * - allocated blocks (persistent)
234 * - preallocated blocks (non-persistent)
236 * consistency in mballoc world means that at any time a block is either
237 * free or used in ALL structures. notice: "any time" should not be read
238 * literally -- time is discrete and delimited by locks.
240 * to keep it simple, we don't use block numbers, instead we count number of
241 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
243 * all operations can be expressed as:
244 * - init buddy: buddy = on-disk + PAs
245 * - new PA: buddy += N; PA = N
246 * - use inode PA: on-disk += N; PA -= N
247 * - discard inode PA buddy -= on-disk - PA; PA = 0
248 * - use locality group PA on-disk += N; PA -= N
249 * - discard locality group PA buddy -= PA; PA = 0
250 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
251 * is used in real operation because we can't know actual used
252 * bits from PA, only from on-disk bitmap
254 * if we follow this strict logic, then all operations above should be atomic.
255 * given some of them can block, we'd have to use something like semaphores
256 * killing performance on high-end SMP hardware. let's try to relax it using
257 * the following knowledge:
258 * 1) if buddy is referenced, it's already initialized
259 * 2) while block is used in buddy and the buddy is referenced,
260 * nobody can re-allocate that block
261 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
262 * bit set and PA claims same block, it's OK. IOW, one can set bit in
263 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
266 * so, now we're building a concurrency table:
269 * blocks for PA are allocated in the buddy, buddy must be referenced
270 * until PA is linked to allocation group to avoid concurrent buddy init
272 * we need to make sure that either on-disk bitmap or PA has uptodate data
273 * given (3) we care that PA-=N operation doesn't interfere with init
275 * the simplest way would be to have buddy initialized by the discard
276 * - use locality group PA
277 * again PA-=N must be serialized with init
278 * - discard locality group PA
279 * the simplest way would be to have buddy initialized by the discard
282 * i_data_sem serializes them
284 * discard process must wait until PA isn't used by another process
285 * - use locality group PA
286 * some mutex should serialize them
287 * - discard locality group PA
288 * discard process must wait until PA isn't used by another process
291 * i_data_sem or another mutex should serializes them
293 * discard process must wait until PA isn't used by another process
294 * - use locality group PA
295 * nothing wrong here -- they're different PAs covering different blocks
296 * - discard locality group PA
297 * discard process must wait until PA isn't used by another process
299 * now we're ready to make few consequences:
300 * - PA is referenced and while it is no discard is possible
301 * - PA is referenced until block isn't marked in on-disk bitmap
302 * - PA changes only after on-disk bitmap
303 * - discard must not compete with init. either init is done before
304 * any discard or they're serialized somehow
305 * - buddy init as sum of on-disk bitmap and PAs is done atomically
307 * a special case when we've used PA to emptiness. no need to modify buddy
308 * in this case, but we should care about concurrent init
313 * Logic in few words:
318 * mark bits in on-disk bitmap
321 * - use preallocation:
322 * find proper PA (per-inode or group)
324 * mark bits in on-disk bitmap
330 * mark bits in on-disk bitmap
333 * - discard preallocations in group:
335 * move them onto local list
336 * load on-disk bitmap
338 * remove PA from object (inode or locality group)
339 * mark free blocks in-core
341 * - discard inode's preallocations:
348 * - bitlock on a group (group)
349 * - object (inode/locality) (object)
351 * - cr0 lists lock (cr0)
352 * - cr1 tree lock (cr1)
362 * - release consumed pa:
367 * - generate in-core bitmap:
371 * - discard all for given object (inode, locality group):
376 * - discard all for given group:
382 * - allocation path (ext4_mb_regular_allocator)
386 static struct kmem_cache
*ext4_pspace_cachep
;
387 static struct kmem_cache
*ext4_ac_cachep
;
388 static struct kmem_cache
*ext4_free_data_cachep
;
390 /* We create slab caches for groupinfo data structures based on the
391 * superblock block size. There will be one per mounted filesystem for
392 * each unique s_blocksize_bits */
393 #define NR_GRPINFO_CACHES 8
394 static struct kmem_cache
*ext4_groupinfo_caches
[NR_GRPINFO_CACHES
];
396 static const char * const ext4_groupinfo_slab_names
[NR_GRPINFO_CACHES
] = {
397 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
398 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
399 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
402 static void ext4_mb_generate_from_pa(struct super_block
*sb
, void *bitmap
,
404 static void ext4_mb_generate_from_freelist(struct super_block
*sb
, void *bitmap
,
406 static void ext4_mb_new_preallocation(struct ext4_allocation_context
*ac
);
408 static bool ext4_mb_good_group(struct ext4_allocation_context
*ac
,
409 ext4_group_t group
, int cr
);
411 static int ext4_try_to_trim_range(struct super_block
*sb
,
412 struct ext4_buddy
*e4b
, ext4_grpblk_t start
,
413 ext4_grpblk_t max
, ext4_grpblk_t minblocks
);
416 * The algorithm using this percpu seq counter goes below:
417 * 1. We sample the percpu discard_pa_seq counter before trying for block
418 * allocation in ext4_mb_new_blocks().
419 * 2. We increment this percpu discard_pa_seq counter when we either allocate
420 * or free these blocks i.e. while marking those blocks as used/free in
421 * mb_mark_used()/mb_free_blocks().
422 * 3. We also increment this percpu seq counter when we successfully identify
423 * that the bb_prealloc_list is not empty and hence proceed for discarding
424 * of those PAs inside ext4_mb_discard_group_preallocations().
426 * Now to make sure that the regular fast path of block allocation is not
427 * affected, as a small optimization we only sample the percpu seq counter
428 * on that cpu. Only when the block allocation fails and when freed blocks
429 * found were 0, that is when we sample percpu seq counter for all cpus using
430 * below function ext4_get_discard_pa_seq_sum(). This happens after making
431 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
433 static DEFINE_PER_CPU(u64
, discard_pa_seq
);
434 static inline u64
ext4_get_discard_pa_seq_sum(void)
439 for_each_possible_cpu(__cpu
)
440 __seq
+= per_cpu(discard_pa_seq
, __cpu
);
444 static inline void *mb_correct_addr_and_bit(int *bit
, void *addr
)
446 #if BITS_PER_LONG == 64
447 *bit
+= ((unsigned long) addr
& 7UL) << 3;
448 addr
= (void *) ((unsigned long) addr
& ~7UL);
449 #elif BITS_PER_LONG == 32
450 *bit
+= ((unsigned long) addr
& 3UL) << 3;
451 addr
= (void *) ((unsigned long) addr
& ~3UL);
453 #error "how many bits you are?!"
458 static inline int mb_test_bit(int bit
, void *addr
)
461 * ext4_test_bit on architecture like powerpc
462 * needs unsigned long aligned address
464 addr
= mb_correct_addr_and_bit(&bit
, addr
);
465 return ext4_test_bit(bit
, addr
);
468 static inline void mb_set_bit(int bit
, void *addr
)
470 addr
= mb_correct_addr_and_bit(&bit
, addr
);
471 ext4_set_bit(bit
, addr
);
474 static inline void mb_clear_bit(int bit
, void *addr
)
476 addr
= mb_correct_addr_and_bit(&bit
, addr
);
477 ext4_clear_bit(bit
, addr
);
480 static inline int mb_test_and_clear_bit(int bit
, void *addr
)
482 addr
= mb_correct_addr_and_bit(&bit
, addr
);
483 return ext4_test_and_clear_bit(bit
, addr
);
486 static inline int mb_find_next_zero_bit(void *addr
, int max
, int start
)
488 int fix
= 0, ret
, tmpmax
;
489 addr
= mb_correct_addr_and_bit(&fix
, addr
);
493 ret
= ext4_find_next_zero_bit(addr
, tmpmax
, start
) - fix
;
499 static inline int mb_find_next_bit(void *addr
, int max
, int start
)
501 int fix
= 0, ret
, tmpmax
;
502 addr
= mb_correct_addr_and_bit(&fix
, addr
);
506 ret
= ext4_find_next_bit(addr
, tmpmax
, start
) - fix
;
512 static void *mb_find_buddy(struct ext4_buddy
*e4b
, int order
, int *max
)
516 BUG_ON(e4b
->bd_bitmap
== e4b
->bd_buddy
);
519 if (order
> e4b
->bd_blkbits
+ 1) {
524 /* at order 0 we see each particular block */
526 *max
= 1 << (e4b
->bd_blkbits
+ 3);
527 return e4b
->bd_bitmap
;
530 bb
= e4b
->bd_buddy
+ EXT4_SB(e4b
->bd_sb
)->s_mb_offsets
[order
];
531 *max
= EXT4_SB(e4b
->bd_sb
)->s_mb_maxs
[order
];
537 static void mb_free_blocks_double(struct inode
*inode
, struct ext4_buddy
*e4b
,
538 int first
, int count
)
541 struct super_block
*sb
= e4b
->bd_sb
;
543 if (unlikely(e4b
->bd_info
->bb_bitmap
== NULL
))
545 assert_spin_locked(ext4_group_lock_ptr(sb
, e4b
->bd_group
));
546 for (i
= 0; i
< count
; i
++) {
547 if (!mb_test_bit(first
+ i
, e4b
->bd_info
->bb_bitmap
)) {
548 ext4_fsblk_t blocknr
;
550 blocknr
= ext4_group_first_block_no(sb
, e4b
->bd_group
);
551 blocknr
+= EXT4_C2B(EXT4_SB(sb
), first
+ i
);
552 ext4_grp_locked_error(sb
, e4b
->bd_group
,
553 inode
? inode
->i_ino
: 0,
555 "freeing block already freed "
558 ext4_mark_group_bitmap_corrupted(sb
, e4b
->bd_group
,
559 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
561 mb_clear_bit(first
+ i
, e4b
->bd_info
->bb_bitmap
);
565 static void mb_mark_used_double(struct ext4_buddy
*e4b
, int first
, int count
)
569 if (unlikely(e4b
->bd_info
->bb_bitmap
== NULL
))
571 assert_spin_locked(ext4_group_lock_ptr(e4b
->bd_sb
, e4b
->bd_group
));
572 for (i
= 0; i
< count
; i
++) {
573 BUG_ON(mb_test_bit(first
+ i
, e4b
->bd_info
->bb_bitmap
));
574 mb_set_bit(first
+ i
, e4b
->bd_info
->bb_bitmap
);
578 static void mb_cmp_bitmaps(struct ext4_buddy
*e4b
, void *bitmap
)
580 if (unlikely(e4b
->bd_info
->bb_bitmap
== NULL
))
582 if (memcmp(e4b
->bd_info
->bb_bitmap
, bitmap
, e4b
->bd_sb
->s_blocksize
)) {
583 unsigned char *b1
, *b2
;
585 b1
= (unsigned char *) e4b
->bd_info
->bb_bitmap
;
586 b2
= (unsigned char *) bitmap
;
587 for (i
= 0; i
< e4b
->bd_sb
->s_blocksize
; i
++) {
588 if (b1
[i
] != b2
[i
]) {
589 ext4_msg(e4b
->bd_sb
, KERN_ERR
,
590 "corruption in group %u "
591 "at byte %u(%u): %x in copy != %x "
593 e4b
->bd_group
, i
, i
* 8, b1
[i
], b2
[i
]);
600 static void mb_group_bb_bitmap_alloc(struct super_block
*sb
,
601 struct ext4_group_info
*grp
, ext4_group_t group
)
603 struct buffer_head
*bh
;
605 grp
->bb_bitmap
= kmalloc(sb
->s_blocksize
, GFP_NOFS
);
609 bh
= ext4_read_block_bitmap(sb
, group
);
610 if (IS_ERR_OR_NULL(bh
)) {
611 kfree(grp
->bb_bitmap
);
612 grp
->bb_bitmap
= NULL
;
616 memcpy(grp
->bb_bitmap
, bh
->b_data
, sb
->s_blocksize
);
620 static void mb_group_bb_bitmap_free(struct ext4_group_info
*grp
)
622 kfree(grp
->bb_bitmap
);
626 static inline void mb_free_blocks_double(struct inode
*inode
,
627 struct ext4_buddy
*e4b
, int first
, int count
)
631 static inline void mb_mark_used_double(struct ext4_buddy
*e4b
,
632 int first
, int count
)
636 static inline void mb_cmp_bitmaps(struct ext4_buddy
*e4b
, void *bitmap
)
641 static inline void mb_group_bb_bitmap_alloc(struct super_block
*sb
,
642 struct ext4_group_info
*grp
, ext4_group_t group
)
647 static inline void mb_group_bb_bitmap_free(struct ext4_group_info
*grp
)
653 #ifdef AGGRESSIVE_CHECK
655 #define MB_CHECK_ASSERT(assert) \
659 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
660 function, file, line, # assert); \
665 static int __mb_check_buddy(struct ext4_buddy
*e4b
, char *file
,
666 const char *function
, int line
)
668 struct super_block
*sb
= e4b
->bd_sb
;
669 int order
= e4b
->bd_blkbits
+ 1;
676 struct ext4_group_info
*grp
;
679 struct list_head
*cur
;
683 if (e4b
->bd_info
->bb_check_counter
++ % 10)
687 buddy
= mb_find_buddy(e4b
, order
, &max
);
688 MB_CHECK_ASSERT(buddy
);
689 buddy2
= mb_find_buddy(e4b
, order
- 1, &max2
);
690 MB_CHECK_ASSERT(buddy2
);
691 MB_CHECK_ASSERT(buddy
!= buddy2
);
692 MB_CHECK_ASSERT(max
* 2 == max2
);
695 for (i
= 0; i
< max
; i
++) {
697 if (mb_test_bit(i
, buddy
)) {
698 /* only single bit in buddy2 may be 1 */
699 if (!mb_test_bit(i
<< 1, buddy2
)) {
701 mb_test_bit((i
<<1)+1, buddy2
));
702 } else if (!mb_test_bit((i
<< 1) + 1, buddy2
)) {
704 mb_test_bit(i
<< 1, buddy2
));
709 /* both bits in buddy2 must be 1 */
710 MB_CHECK_ASSERT(mb_test_bit(i
<< 1, buddy2
));
711 MB_CHECK_ASSERT(mb_test_bit((i
<< 1) + 1, buddy2
));
713 for (j
= 0; j
< (1 << order
); j
++) {
714 k
= (i
* (1 << order
)) + j
;
716 !mb_test_bit(k
, e4b
->bd_bitmap
));
720 MB_CHECK_ASSERT(e4b
->bd_info
->bb_counters
[order
] == count
);
725 buddy
= mb_find_buddy(e4b
, 0, &max
);
726 for (i
= 0; i
< max
; i
++) {
727 if (!mb_test_bit(i
, buddy
)) {
728 MB_CHECK_ASSERT(i
>= e4b
->bd_info
->bb_first_free
);
736 /* check used bits only */
737 for (j
= 0; j
< e4b
->bd_blkbits
+ 1; j
++) {
738 buddy2
= mb_find_buddy(e4b
, j
, &max2
);
740 MB_CHECK_ASSERT(k
< max2
);
741 MB_CHECK_ASSERT(mb_test_bit(k
, buddy2
));
744 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b
->bd_info
));
745 MB_CHECK_ASSERT(e4b
->bd_info
->bb_fragments
== fragments
);
747 grp
= ext4_get_group_info(sb
, e4b
->bd_group
);
748 list_for_each(cur
, &grp
->bb_prealloc_list
) {
749 ext4_group_t groupnr
;
750 struct ext4_prealloc_space
*pa
;
751 pa
= list_entry(cur
, struct ext4_prealloc_space
, pa_group_list
);
752 ext4_get_group_no_and_offset(sb
, pa
->pa_pstart
, &groupnr
, &k
);
753 MB_CHECK_ASSERT(groupnr
== e4b
->bd_group
);
754 for (i
= 0; i
< pa
->pa_len
; i
++)
755 MB_CHECK_ASSERT(mb_test_bit(k
+ i
, buddy
));
759 #undef MB_CHECK_ASSERT
760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
761 __FILE__, __func__, __LINE__)
763 #define mb_check_buddy(e4b)
767 * Divide blocks started from @first with length @len into
768 * smaller chunks with power of 2 blocks.
769 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
770 * then increase bb_counters[] for corresponded chunk size.
772 static void ext4_mb_mark_free_simple(struct super_block
*sb
,
773 void *buddy
, ext4_grpblk_t first
, ext4_grpblk_t len
,
774 struct ext4_group_info
*grp
)
776 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
782 BUG_ON(len
> EXT4_CLUSTERS_PER_GROUP(sb
));
784 border
= 2 << sb
->s_blocksize_bits
;
787 /* find how many blocks can be covered since this position */
788 max
= ffs(first
| border
) - 1;
790 /* find how many blocks of power 2 we need to mark */
797 /* mark multiblock chunks only */
798 grp
->bb_counters
[min
]++;
800 mb_clear_bit(first
>> min
,
801 buddy
+ sbi
->s_mb_offsets
[min
]);
808 static void ext4_mb_rb_insert(struct rb_root
*root
, struct rb_node
*new,
809 int (*cmp
)(struct rb_node
*, struct rb_node
*))
811 struct rb_node
**iter
= &root
->rb_node
, *parent
= NULL
;
815 if (cmp(new, *iter
) > 0)
816 iter
= &((*iter
)->rb_left
);
818 iter
= &((*iter
)->rb_right
);
821 rb_link_node(new, parent
, iter
);
822 rb_insert_color(new, root
);
826 ext4_mb_avg_fragment_size_cmp(struct rb_node
*rb1
, struct rb_node
*rb2
)
828 struct ext4_group_info
*grp1
= rb_entry(rb1
,
829 struct ext4_group_info
,
830 bb_avg_fragment_size_rb
);
831 struct ext4_group_info
*grp2
= rb_entry(rb2
,
832 struct ext4_group_info
,
833 bb_avg_fragment_size_rb
);
834 int num_frags_1
, num_frags_2
;
836 num_frags_1
= grp1
->bb_fragments
?
837 grp1
->bb_free
/ grp1
->bb_fragments
: 0;
838 num_frags_2
= grp2
->bb_fragments
?
839 grp2
->bb_free
/ grp2
->bb_fragments
: 0;
841 return (num_frags_2
- num_frags_1
);
845 * Reinsert grpinfo into the avg_fragment_size tree with new average
849 mb_update_avg_fragment_size(struct super_block
*sb
, struct ext4_group_info
*grp
)
851 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
853 if (!test_opt2(sb
, MB_OPTIMIZE_SCAN
) || grp
->bb_free
== 0)
856 write_lock(&sbi
->s_mb_rb_lock
);
857 if (!RB_EMPTY_NODE(&grp
->bb_avg_fragment_size_rb
)) {
858 rb_erase(&grp
->bb_avg_fragment_size_rb
,
859 &sbi
->s_mb_avg_fragment_size_root
);
860 RB_CLEAR_NODE(&grp
->bb_avg_fragment_size_rb
);
863 ext4_mb_rb_insert(&sbi
->s_mb_avg_fragment_size_root
,
864 &grp
->bb_avg_fragment_size_rb
,
865 ext4_mb_avg_fragment_size_cmp
);
866 write_unlock(&sbi
->s_mb_rb_lock
);
870 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
871 * cr level needs an update.
873 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context
*ac
,
874 int *new_cr
, ext4_group_t
*group
, ext4_group_t ngroups
)
876 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
877 struct ext4_group_info
*iter
, *grp
;
880 if (ac
->ac_status
== AC_STATUS_FOUND
)
883 if (unlikely(sbi
->s_mb_stats
&& ac
->ac_flags
& EXT4_MB_CR0_OPTIMIZED
))
884 atomic_inc(&sbi
->s_bal_cr0_bad_suggestions
);
887 for (i
= ac
->ac_2order
; i
< MB_NUM_ORDERS(ac
->ac_sb
); i
++) {
888 if (list_empty(&sbi
->s_mb_largest_free_orders
[i
]))
890 read_lock(&sbi
->s_mb_largest_free_orders_locks
[i
]);
891 if (list_empty(&sbi
->s_mb_largest_free_orders
[i
])) {
892 read_unlock(&sbi
->s_mb_largest_free_orders_locks
[i
]);
896 list_for_each_entry(iter
, &sbi
->s_mb_largest_free_orders
[i
],
897 bb_largest_free_order_node
) {
899 atomic64_inc(&sbi
->s_bal_cX_groups_considered
[0]);
900 if (likely(ext4_mb_good_group(ac
, iter
->bb_group
, 0))) {
905 read_unlock(&sbi
->s_mb_largest_free_orders_locks
[i
]);
911 /* Increment cr and search again */
914 *group
= grp
->bb_group
;
915 ac
->ac_last_optimal_group
= *group
;
916 ac
->ac_flags
|= EXT4_MB_CR0_OPTIMIZED
;
921 * Choose next group by traversing average fragment size tree. Updates *new_cr
922 * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
923 * the linear search should continue for one iteration since there's lock
924 * contention on the rb tree lock.
926 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context
*ac
,
927 int *new_cr
, ext4_group_t
*group
, ext4_group_t ngroups
)
929 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
930 int avg_fragment_size
, best_so_far
;
931 struct rb_node
*node
, *found
;
932 struct ext4_group_info
*grp
;
935 * If there is contention on the lock, instead of waiting for the lock
936 * to become available, just continue searching lineraly. We'll resume
937 * our rb tree search later starting at ac->ac_last_optimal_group.
939 if (!read_trylock(&sbi
->s_mb_rb_lock
)) {
940 ac
->ac_flags
|= EXT4_MB_SEARCH_NEXT_LINEAR
;
944 if (unlikely(ac
->ac_flags
& EXT4_MB_CR1_OPTIMIZED
)) {
946 atomic_inc(&sbi
->s_bal_cr1_bad_suggestions
);
947 /* We have found something at CR 1 in the past */
948 grp
= ext4_get_group_info(ac
->ac_sb
, ac
->ac_last_optimal_group
);
949 for (found
= rb_next(&grp
->bb_avg_fragment_size_rb
); found
!= NULL
;
950 found
= rb_next(found
)) {
951 grp
= rb_entry(found
, struct ext4_group_info
,
952 bb_avg_fragment_size_rb
);
954 atomic64_inc(&sbi
->s_bal_cX_groups_considered
[1]);
955 if (likely(ext4_mb_good_group(ac
, grp
->bb_group
, 1)))
961 node
= sbi
->s_mb_avg_fragment_size_root
.rb_node
;
966 grp
= rb_entry(node
, struct ext4_group_info
,
967 bb_avg_fragment_size_rb
);
968 avg_fragment_size
= 0;
969 if (ext4_mb_good_group(ac
, grp
->bb_group
, 1)) {
970 avg_fragment_size
= grp
->bb_fragments
?
971 grp
->bb_free
/ grp
->bb_fragments
: 0;
972 if (!best_so_far
|| avg_fragment_size
< best_so_far
) {
973 best_so_far
= avg_fragment_size
;
977 if (avg_fragment_size
> ac
->ac_g_ex
.fe_len
)
978 node
= node
->rb_right
;
980 node
= node
->rb_left
;
985 grp
= rb_entry(found
, struct ext4_group_info
,
986 bb_avg_fragment_size_rb
);
987 *group
= grp
->bb_group
;
988 ac
->ac_flags
|= EXT4_MB_CR1_OPTIMIZED
;
993 read_unlock(&sbi
->s_mb_rb_lock
);
994 ac
->ac_last_optimal_group
= *group
;
997 static inline int should_optimize_scan(struct ext4_allocation_context
*ac
)
999 if (unlikely(!test_opt2(ac
->ac_sb
, MB_OPTIMIZE_SCAN
)))
1001 if (ac
->ac_criteria
>= 2)
1003 if (!ext4_test_inode_flag(ac
->ac_inode
, EXT4_INODE_EXTENTS
))
1009 * Return next linear group for allocation. If linear traversal should not be
1010 * performed, this function just returns the same group
1013 next_linear_group(struct ext4_allocation_context
*ac
, int group
, int ngroups
)
1015 if (!should_optimize_scan(ac
))
1016 goto inc_and_return
;
1018 if (ac
->ac_groups_linear_remaining
) {
1019 ac
->ac_groups_linear_remaining
--;
1020 goto inc_and_return
;
1023 if (ac
->ac_flags
& EXT4_MB_SEARCH_NEXT_LINEAR
) {
1024 ac
->ac_flags
&= ~EXT4_MB_SEARCH_NEXT_LINEAR
;
1025 goto inc_and_return
;
1031 * Artificially restricted ngroups for non-extent
1032 * files makes group > ngroups possible on first loop.
1034 return group
+ 1 >= ngroups
? 0 : group
+ 1;
1038 * ext4_mb_choose_next_group: choose next group for allocation.
1040 * @ac Allocation Context
1041 * @new_cr This is an output parameter. If the there is no good group
1042 * available at current CR level, this field is updated to indicate
1043 * the new cr level that should be used.
1044 * @group This is an input / output parameter. As an input it indicates the
1045 * next group that the allocator intends to use for allocation. As
1046 * output, this field indicates the next group that should be used as
1047 * determined by the optimization functions.
1048 * @ngroups Total number of groups
1050 static void ext4_mb_choose_next_group(struct ext4_allocation_context
*ac
,
1051 int *new_cr
, ext4_group_t
*group
, ext4_group_t ngroups
)
1053 *new_cr
= ac
->ac_criteria
;
1055 if (!should_optimize_scan(ac
) || ac
->ac_groups_linear_remaining
)
1059 ext4_mb_choose_next_group_cr0(ac
, new_cr
, group
, ngroups
);
1060 } else if (*new_cr
== 1) {
1061 ext4_mb_choose_next_group_cr1(ac
, new_cr
, group
, ngroups
);
1064 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1065 * bb_free. But until that happens, we should never come here.
1072 * Cache the order of the largest free extent we have available in this block
1076 mb_set_largest_free_order(struct super_block
*sb
, struct ext4_group_info
*grp
)
1078 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1081 if (test_opt2(sb
, MB_OPTIMIZE_SCAN
) && grp
->bb_largest_free_order
>= 0) {
1082 write_lock(&sbi
->s_mb_largest_free_orders_locks
[
1083 grp
->bb_largest_free_order
]);
1084 list_del_init(&grp
->bb_largest_free_order_node
);
1085 write_unlock(&sbi
->s_mb_largest_free_orders_locks
[
1086 grp
->bb_largest_free_order
]);
1088 grp
->bb_largest_free_order
= -1; /* uninit */
1090 for (i
= MB_NUM_ORDERS(sb
) - 1; i
>= 0; i
--) {
1091 if (grp
->bb_counters
[i
] > 0) {
1092 grp
->bb_largest_free_order
= i
;
1096 if (test_opt2(sb
, MB_OPTIMIZE_SCAN
) &&
1097 grp
->bb_largest_free_order
>= 0 && grp
->bb_free
) {
1098 write_lock(&sbi
->s_mb_largest_free_orders_locks
[
1099 grp
->bb_largest_free_order
]);
1100 list_add_tail(&grp
->bb_largest_free_order_node
,
1101 &sbi
->s_mb_largest_free_orders
[grp
->bb_largest_free_order
]);
1102 write_unlock(&sbi
->s_mb_largest_free_orders_locks
[
1103 grp
->bb_largest_free_order
]);
1107 static noinline_for_stack
1108 void ext4_mb_generate_buddy(struct super_block
*sb
,
1109 void *buddy
, void *bitmap
, ext4_group_t group
)
1111 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
1112 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1113 ext4_grpblk_t max
= EXT4_CLUSTERS_PER_GROUP(sb
);
1114 ext4_grpblk_t i
= 0;
1115 ext4_grpblk_t first
;
1118 unsigned fragments
= 0;
1119 unsigned long long period
= get_cycles();
1121 /* initialize buddy from bitmap which is aggregation
1122 * of on-disk bitmap and preallocations */
1123 i
= mb_find_next_zero_bit(bitmap
, max
, 0);
1124 grp
->bb_first_free
= i
;
1128 i
= mb_find_next_bit(bitmap
, max
, i
);
1132 ext4_mb_mark_free_simple(sb
, buddy
, first
, len
, grp
);
1134 grp
->bb_counters
[0]++;
1136 i
= mb_find_next_zero_bit(bitmap
, max
, i
);
1138 grp
->bb_fragments
= fragments
;
1140 if (free
!= grp
->bb_free
) {
1141 ext4_grp_locked_error(sb
, group
, 0, 0,
1142 "block bitmap and bg descriptor "
1143 "inconsistent: %u vs %u free clusters",
1144 free
, grp
->bb_free
);
1146 * If we intend to continue, we consider group descriptor
1147 * corrupt and update bb_free using bitmap value
1149 grp
->bb_free
= free
;
1150 ext4_mark_group_bitmap_corrupted(sb
, group
,
1151 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
1153 mb_set_largest_free_order(sb
, grp
);
1155 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT
, &(grp
->bb_state
));
1157 period
= get_cycles() - period
;
1158 atomic_inc(&sbi
->s_mb_buddies_generated
);
1159 atomic64_add(period
, &sbi
->s_mb_generation_time
);
1160 mb_update_avg_fragment_size(sb
, grp
);
1163 /* The buddy information is attached the buddy cache inode
1164 * for convenience. The information regarding each group
1165 * is loaded via ext4_mb_load_buddy. The information involve
1166 * block bitmap and buddy information. The information are
1167 * stored in the inode as
1170 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1173 * one block each for bitmap and buddy information.
1174 * So for each group we take up 2 blocks. A page can
1175 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1176 * So it can have information regarding groups_per_page which
1177 * is blocks_per_page/2
1179 * Locking note: This routine takes the block group lock of all groups
1180 * for this page; do not hold this lock when calling this routine!
1183 static int ext4_mb_init_cache(struct page
*page
, char *incore
, gfp_t gfp
)
1185 ext4_group_t ngroups
;
1187 int blocks_per_page
;
1188 int groups_per_page
;
1191 ext4_group_t first_group
, group
;
1193 struct super_block
*sb
;
1194 struct buffer_head
*bhs
;
1195 struct buffer_head
**bh
= NULL
;
1196 struct inode
*inode
;
1199 struct ext4_group_info
*grinfo
;
1201 inode
= page
->mapping
->host
;
1203 ngroups
= ext4_get_groups_count(sb
);
1204 blocksize
= i_blocksize(inode
);
1205 blocks_per_page
= PAGE_SIZE
/ blocksize
;
1207 mb_debug(sb
, "init page %lu\n", page
->index
);
1209 groups_per_page
= blocks_per_page
>> 1;
1210 if (groups_per_page
== 0)
1211 groups_per_page
= 1;
1213 /* allocate buffer_heads to read bitmaps */
1214 if (groups_per_page
> 1) {
1215 i
= sizeof(struct buffer_head
*) * groups_per_page
;
1216 bh
= kzalloc(i
, gfp
);
1224 first_group
= page
->index
* blocks_per_page
/ 2;
1226 /* read all groups the page covers into the cache */
1227 for (i
= 0, group
= first_group
; i
< groups_per_page
; i
++, group
++) {
1228 if (group
>= ngroups
)
1231 grinfo
= ext4_get_group_info(sb
, group
);
1233 * If page is uptodate then we came here after online resize
1234 * which added some new uninitialized group info structs, so
1235 * we must skip all initialized uptodate buddies on the page,
1236 * which may be currently in use by an allocating task.
1238 if (PageUptodate(page
) && !EXT4_MB_GRP_NEED_INIT(grinfo
)) {
1242 bh
[i
] = ext4_read_block_bitmap_nowait(sb
, group
, false);
1243 if (IS_ERR(bh
[i
])) {
1244 err
= PTR_ERR(bh
[i
]);
1248 mb_debug(sb
, "read bitmap for group %u\n", group
);
1251 /* wait for I/O completion */
1252 for (i
= 0, group
= first_group
; i
< groups_per_page
; i
++, group
++) {
1257 err2
= ext4_wait_block_bitmap(sb
, group
, bh
[i
]);
1262 first_block
= page
->index
* blocks_per_page
;
1263 for (i
= 0; i
< blocks_per_page
; i
++) {
1264 group
= (first_block
+ i
) >> 1;
1265 if (group
>= ngroups
)
1268 if (!bh
[group
- first_group
])
1269 /* skip initialized uptodate buddy */
1272 if (!buffer_verified(bh
[group
- first_group
]))
1273 /* Skip faulty bitmaps */
1278 * data carry information regarding this
1279 * particular group in the format specified
1283 data
= page_address(page
) + (i
* blocksize
);
1284 bitmap
= bh
[group
- first_group
]->b_data
;
1287 * We place the buddy block and bitmap block
1290 if ((first_block
+ i
) & 1) {
1291 /* this is block of buddy */
1292 BUG_ON(incore
== NULL
);
1293 mb_debug(sb
, "put buddy for group %u in page %lu/%x\n",
1294 group
, page
->index
, i
* blocksize
);
1295 trace_ext4_mb_buddy_bitmap_load(sb
, group
);
1296 grinfo
= ext4_get_group_info(sb
, group
);
1297 grinfo
->bb_fragments
= 0;
1298 memset(grinfo
->bb_counters
, 0,
1299 sizeof(*grinfo
->bb_counters
) *
1300 (MB_NUM_ORDERS(sb
)));
1302 * incore got set to the group block bitmap below
1304 ext4_lock_group(sb
, group
);
1305 /* init the buddy */
1306 memset(data
, 0xff, blocksize
);
1307 ext4_mb_generate_buddy(sb
, data
, incore
, group
);
1308 ext4_unlock_group(sb
, group
);
1311 /* this is block of bitmap */
1312 BUG_ON(incore
!= NULL
);
1313 mb_debug(sb
, "put bitmap for group %u in page %lu/%x\n",
1314 group
, page
->index
, i
* blocksize
);
1315 trace_ext4_mb_bitmap_load(sb
, group
);
1317 /* see comments in ext4_mb_put_pa() */
1318 ext4_lock_group(sb
, group
);
1319 memcpy(data
, bitmap
, blocksize
);
1321 /* mark all preallocated blks used in in-core bitmap */
1322 ext4_mb_generate_from_pa(sb
, data
, group
);
1323 ext4_mb_generate_from_freelist(sb
, data
, group
);
1324 ext4_unlock_group(sb
, group
);
1326 /* set incore so that the buddy information can be
1327 * generated using this
1332 SetPageUptodate(page
);
1336 for (i
= 0; i
< groups_per_page
; i
++)
1345 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1346 * on the same buddy page doesn't happen whild holding the buddy page lock.
1347 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1348 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1350 static int ext4_mb_get_buddy_page_lock(struct super_block
*sb
,
1351 ext4_group_t group
, struct ext4_buddy
*e4b
, gfp_t gfp
)
1353 struct inode
*inode
= EXT4_SB(sb
)->s_buddy_cache
;
1354 int block
, pnum
, poff
;
1355 int blocks_per_page
;
1358 e4b
->bd_buddy_page
= NULL
;
1359 e4b
->bd_bitmap_page
= NULL
;
1361 blocks_per_page
= PAGE_SIZE
/ sb
->s_blocksize
;
1363 * the buddy cache inode stores the block bitmap
1364 * and buddy information in consecutive blocks.
1365 * So for each group we need two blocks.
1368 pnum
= block
/ blocks_per_page
;
1369 poff
= block
% blocks_per_page
;
1370 page
= find_or_create_page(inode
->i_mapping
, pnum
, gfp
);
1373 BUG_ON(page
->mapping
!= inode
->i_mapping
);
1374 e4b
->bd_bitmap_page
= page
;
1375 e4b
->bd_bitmap
= page_address(page
) + (poff
* sb
->s_blocksize
);
1377 if (blocks_per_page
>= 2) {
1378 /* buddy and bitmap are on the same page */
1383 pnum
= block
/ blocks_per_page
;
1384 page
= find_or_create_page(inode
->i_mapping
, pnum
, gfp
);
1387 BUG_ON(page
->mapping
!= inode
->i_mapping
);
1388 e4b
->bd_buddy_page
= page
;
1392 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy
*e4b
)
1394 if (e4b
->bd_bitmap_page
) {
1395 unlock_page(e4b
->bd_bitmap_page
);
1396 put_page(e4b
->bd_bitmap_page
);
1398 if (e4b
->bd_buddy_page
) {
1399 unlock_page(e4b
->bd_buddy_page
);
1400 put_page(e4b
->bd_buddy_page
);
1405 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1406 * block group lock of all groups for this page; do not hold the BG lock when
1407 * calling this routine!
1409 static noinline_for_stack
1410 int ext4_mb_init_group(struct super_block
*sb
, ext4_group_t group
, gfp_t gfp
)
1413 struct ext4_group_info
*this_grp
;
1414 struct ext4_buddy e4b
;
1419 mb_debug(sb
, "init group %u\n", group
);
1420 this_grp
= ext4_get_group_info(sb
, group
);
1422 * This ensures that we don't reinit the buddy cache
1423 * page which map to the group from which we are already
1424 * allocating. If we are looking at the buddy cache we would
1425 * have taken a reference using ext4_mb_load_buddy and that
1426 * would have pinned buddy page to page cache.
1427 * The call to ext4_mb_get_buddy_page_lock will mark the
1430 ret
= ext4_mb_get_buddy_page_lock(sb
, group
, &e4b
, gfp
);
1431 if (ret
|| !EXT4_MB_GRP_NEED_INIT(this_grp
)) {
1433 * somebody initialized the group
1434 * return without doing anything
1439 page
= e4b
.bd_bitmap_page
;
1440 ret
= ext4_mb_init_cache(page
, NULL
, gfp
);
1443 if (!PageUptodate(page
)) {
1448 if (e4b
.bd_buddy_page
== NULL
) {
1450 * If both the bitmap and buddy are in
1451 * the same page we don't need to force
1457 /* init buddy cache */
1458 page
= e4b
.bd_buddy_page
;
1459 ret
= ext4_mb_init_cache(page
, e4b
.bd_bitmap
, gfp
);
1462 if (!PageUptodate(page
)) {
1467 ext4_mb_put_buddy_page_lock(&e4b
);
1472 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1473 * block group lock of all groups for this page; do not hold the BG lock when
1474 * calling this routine!
1476 static noinline_for_stack
int
1477 ext4_mb_load_buddy_gfp(struct super_block
*sb
, ext4_group_t group
,
1478 struct ext4_buddy
*e4b
, gfp_t gfp
)
1480 int blocks_per_page
;
1486 struct ext4_group_info
*grp
;
1487 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1488 struct inode
*inode
= sbi
->s_buddy_cache
;
1491 mb_debug(sb
, "load group %u\n", group
);
1493 blocks_per_page
= PAGE_SIZE
/ sb
->s_blocksize
;
1494 grp
= ext4_get_group_info(sb
, group
);
1496 e4b
->bd_blkbits
= sb
->s_blocksize_bits
;
1499 e4b
->bd_group
= group
;
1500 e4b
->bd_buddy_page
= NULL
;
1501 e4b
->bd_bitmap_page
= NULL
;
1503 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp
))) {
1505 * we need full data about the group
1506 * to make a good selection
1508 ret
= ext4_mb_init_group(sb
, group
, gfp
);
1514 * the buddy cache inode stores the block bitmap
1515 * and buddy information in consecutive blocks.
1516 * So for each group we need two blocks.
1519 pnum
= block
/ blocks_per_page
;
1520 poff
= block
% blocks_per_page
;
1522 /* we could use find_or_create_page(), but it locks page
1523 * what we'd like to avoid in fast path ... */
1524 page
= find_get_page_flags(inode
->i_mapping
, pnum
, FGP_ACCESSED
);
1525 if (page
== NULL
|| !PageUptodate(page
)) {
1528 * drop the page reference and try
1529 * to get the page with lock. If we
1530 * are not uptodate that implies
1531 * somebody just created the page but
1532 * is yet to initialize the same. So
1533 * wait for it to initialize.
1536 page
= find_or_create_page(inode
->i_mapping
, pnum
, gfp
);
1538 BUG_ON(page
->mapping
!= inode
->i_mapping
);
1539 if (!PageUptodate(page
)) {
1540 ret
= ext4_mb_init_cache(page
, NULL
, gfp
);
1545 mb_cmp_bitmaps(e4b
, page_address(page
) +
1546 (poff
* sb
->s_blocksize
));
1555 if (!PageUptodate(page
)) {
1560 /* Pages marked accessed already */
1561 e4b
->bd_bitmap_page
= page
;
1562 e4b
->bd_bitmap
= page_address(page
) + (poff
* sb
->s_blocksize
);
1565 pnum
= block
/ blocks_per_page
;
1566 poff
= block
% blocks_per_page
;
1568 page
= find_get_page_flags(inode
->i_mapping
, pnum
, FGP_ACCESSED
);
1569 if (page
== NULL
|| !PageUptodate(page
)) {
1572 page
= find_or_create_page(inode
->i_mapping
, pnum
, gfp
);
1574 BUG_ON(page
->mapping
!= inode
->i_mapping
);
1575 if (!PageUptodate(page
)) {
1576 ret
= ext4_mb_init_cache(page
, e4b
->bd_bitmap
,
1590 if (!PageUptodate(page
)) {
1595 /* Pages marked accessed already */
1596 e4b
->bd_buddy_page
= page
;
1597 e4b
->bd_buddy
= page_address(page
) + (poff
* sb
->s_blocksize
);
1604 if (e4b
->bd_bitmap_page
)
1605 put_page(e4b
->bd_bitmap_page
);
1606 if (e4b
->bd_buddy_page
)
1607 put_page(e4b
->bd_buddy_page
);
1608 e4b
->bd_buddy
= NULL
;
1609 e4b
->bd_bitmap
= NULL
;
1613 static int ext4_mb_load_buddy(struct super_block
*sb
, ext4_group_t group
,
1614 struct ext4_buddy
*e4b
)
1616 return ext4_mb_load_buddy_gfp(sb
, group
, e4b
, GFP_NOFS
);
1619 static void ext4_mb_unload_buddy(struct ext4_buddy
*e4b
)
1621 if (e4b
->bd_bitmap_page
)
1622 put_page(e4b
->bd_bitmap_page
);
1623 if (e4b
->bd_buddy_page
)
1624 put_page(e4b
->bd_buddy_page
);
1628 static int mb_find_order_for_block(struct ext4_buddy
*e4b
, int block
)
1633 BUG_ON(e4b
->bd_bitmap
== e4b
->bd_buddy
);
1634 BUG_ON(block
>= (1 << (e4b
->bd_blkbits
+ 3)));
1636 while (order
<= e4b
->bd_blkbits
+ 1) {
1637 bb
= mb_find_buddy(e4b
, order
, &max
);
1638 if (!mb_test_bit(block
>> order
, bb
)) {
1639 /* this block is part of buddy of order 'order' */
1647 static void mb_clear_bits(void *bm
, int cur
, int len
)
1653 if ((cur
& 31) == 0 && (len
- cur
) >= 32) {
1654 /* fast path: clear whole word at once */
1655 addr
= bm
+ (cur
>> 3);
1660 mb_clear_bit(cur
, bm
);
1665 /* clear bits in given range
1666 * will return first found zero bit if any, -1 otherwise
1668 static int mb_test_and_clear_bits(void *bm
, int cur
, int len
)
1675 if ((cur
& 31) == 0 && (len
- cur
) >= 32) {
1676 /* fast path: clear whole word at once */
1677 addr
= bm
+ (cur
>> 3);
1678 if (*addr
!= (__u32
)(-1) && zero_bit
== -1)
1679 zero_bit
= cur
+ mb_find_next_zero_bit(addr
, 32, 0);
1684 if (!mb_test_and_clear_bit(cur
, bm
) && zero_bit
== -1)
1692 void ext4_set_bits(void *bm
, int cur
, int len
)
1698 if ((cur
& 31) == 0 && (len
- cur
) >= 32) {
1699 /* fast path: set whole word at once */
1700 addr
= bm
+ (cur
>> 3);
1705 mb_set_bit(cur
, bm
);
1710 static inline int mb_buddy_adjust_border(int* bit
, void* bitmap
, int side
)
1712 if (mb_test_bit(*bit
+ side
, bitmap
)) {
1713 mb_clear_bit(*bit
, bitmap
);
1719 mb_set_bit(*bit
, bitmap
);
1724 static void mb_buddy_mark_free(struct ext4_buddy
*e4b
, int first
, int last
)
1728 void *buddy
= mb_find_buddy(e4b
, order
, &max
);
1733 /* Bits in range [first; last] are known to be set since
1734 * corresponding blocks were allocated. Bits in range
1735 * (first; last) will stay set because they form buddies on
1736 * upper layer. We just deal with borders if they don't
1737 * align with upper layer and then go up.
1738 * Releasing entire group is all about clearing
1739 * single bit of highest order buddy.
1743 * ---------------------------------
1745 * ---------------------------------
1746 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1747 * ---------------------------------
1749 * \_____________________/
1751 * Neither [1] nor [6] is aligned to above layer.
1752 * Left neighbour [0] is free, so mark it busy,
1753 * decrease bb_counters and extend range to
1755 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1756 * mark [6] free, increase bb_counters and shrink range to
1758 * Then shift range to [0; 2], go up and do the same.
1763 e4b
->bd_info
->bb_counters
[order
] += mb_buddy_adjust_border(&first
, buddy
, -1);
1765 e4b
->bd_info
->bb_counters
[order
] += mb_buddy_adjust_border(&last
, buddy
, 1);
1770 if (first
== last
|| !(buddy2
= mb_find_buddy(e4b
, order
, &max
))) {
1771 mb_clear_bits(buddy
, first
, last
- first
+ 1);
1772 e4b
->bd_info
->bb_counters
[order
- 1] += last
- first
+ 1;
1781 static void mb_free_blocks(struct inode
*inode
, struct ext4_buddy
*e4b
,
1782 int first
, int count
)
1784 int left_is_free
= 0;
1785 int right_is_free
= 0;
1787 int last
= first
+ count
- 1;
1788 struct super_block
*sb
= e4b
->bd_sb
;
1790 if (WARN_ON(count
== 0))
1792 BUG_ON(last
>= (sb
->s_blocksize
<< 3));
1793 assert_spin_locked(ext4_group_lock_ptr(sb
, e4b
->bd_group
));
1794 /* Don't bother if the block group is corrupt. */
1795 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b
->bd_info
)))
1798 mb_check_buddy(e4b
);
1799 mb_free_blocks_double(inode
, e4b
, first
, count
);
1801 this_cpu_inc(discard_pa_seq
);
1802 e4b
->bd_info
->bb_free
+= count
;
1803 if (first
< e4b
->bd_info
->bb_first_free
)
1804 e4b
->bd_info
->bb_first_free
= first
;
1806 /* access memory sequentially: check left neighbour,
1807 * clear range and then check right neighbour
1810 left_is_free
= !mb_test_bit(first
- 1, e4b
->bd_bitmap
);
1811 block
= mb_test_and_clear_bits(e4b
->bd_bitmap
, first
, count
);
1812 if (last
+ 1 < EXT4_SB(sb
)->s_mb_maxs
[0])
1813 right_is_free
= !mb_test_bit(last
+ 1, e4b
->bd_bitmap
);
1815 if (unlikely(block
!= -1)) {
1816 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1817 ext4_fsblk_t blocknr
;
1819 blocknr
= ext4_group_first_block_no(sb
, e4b
->bd_group
);
1820 blocknr
+= EXT4_C2B(sbi
, block
);
1821 if (!(sbi
->s_mount_state
& EXT4_FC_REPLAY
)) {
1822 ext4_grp_locked_error(sb
, e4b
->bd_group
,
1823 inode
? inode
->i_ino
: 0,
1825 "freeing already freed block (bit %u); block bitmap corrupt.",
1827 ext4_mark_group_bitmap_corrupted(
1829 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
1834 /* let's maintain fragments counter */
1835 if (left_is_free
&& right_is_free
)
1836 e4b
->bd_info
->bb_fragments
--;
1837 else if (!left_is_free
&& !right_is_free
)
1838 e4b
->bd_info
->bb_fragments
++;
1840 /* buddy[0] == bd_bitmap is a special case, so handle
1841 * it right away and let mb_buddy_mark_free stay free of
1842 * zero order checks.
1843 * Check if neighbours are to be coaleasced,
1844 * adjust bitmap bb_counters and borders appropriately.
1847 first
+= !left_is_free
;
1848 e4b
->bd_info
->bb_counters
[0] += left_is_free
? -1 : 1;
1851 last
-= !right_is_free
;
1852 e4b
->bd_info
->bb_counters
[0] += right_is_free
? -1 : 1;
1856 mb_buddy_mark_free(e4b
, first
>> 1, last
>> 1);
1859 mb_set_largest_free_order(sb
, e4b
->bd_info
);
1860 mb_update_avg_fragment_size(sb
, e4b
->bd_info
);
1861 mb_check_buddy(e4b
);
1864 static int mb_find_extent(struct ext4_buddy
*e4b
, int block
,
1865 int needed
, struct ext4_free_extent
*ex
)
1871 assert_spin_locked(ext4_group_lock_ptr(e4b
->bd_sb
, e4b
->bd_group
));
1874 buddy
= mb_find_buddy(e4b
, 0, &max
);
1875 BUG_ON(buddy
== NULL
);
1876 BUG_ON(block
>= max
);
1877 if (mb_test_bit(block
, buddy
)) {
1884 /* find actual order */
1885 order
= mb_find_order_for_block(e4b
, block
);
1886 block
= block
>> order
;
1888 ex
->fe_len
= 1 << order
;
1889 ex
->fe_start
= block
<< order
;
1890 ex
->fe_group
= e4b
->bd_group
;
1892 /* calc difference from given start */
1893 next
= next
- ex
->fe_start
;
1895 ex
->fe_start
+= next
;
1897 while (needed
> ex
->fe_len
&&
1898 mb_find_buddy(e4b
, order
, &max
)) {
1900 if (block
+ 1 >= max
)
1903 next
= (block
+ 1) * (1 << order
);
1904 if (mb_test_bit(next
, e4b
->bd_bitmap
))
1907 order
= mb_find_order_for_block(e4b
, next
);
1909 block
= next
>> order
;
1910 ex
->fe_len
+= 1 << order
;
1913 if (ex
->fe_start
+ ex
->fe_len
> EXT4_CLUSTERS_PER_GROUP(e4b
->bd_sb
)) {
1914 /* Should never happen! (but apparently sometimes does?!?) */
1916 ext4_grp_locked_error(e4b
->bd_sb
, e4b
->bd_group
, 0, 0,
1917 "corruption or bug in mb_find_extent "
1918 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1919 block
, order
, needed
, ex
->fe_group
, ex
->fe_start
,
1920 ex
->fe_len
, ex
->fe_logical
);
1928 static int mb_mark_used(struct ext4_buddy
*e4b
, struct ext4_free_extent
*ex
)
1934 int start
= ex
->fe_start
;
1935 int len
= ex
->fe_len
;
1940 BUG_ON(start
+ len
> (e4b
->bd_sb
->s_blocksize
<< 3));
1941 BUG_ON(e4b
->bd_group
!= ex
->fe_group
);
1942 assert_spin_locked(ext4_group_lock_ptr(e4b
->bd_sb
, e4b
->bd_group
));
1943 mb_check_buddy(e4b
);
1944 mb_mark_used_double(e4b
, start
, len
);
1946 this_cpu_inc(discard_pa_seq
);
1947 e4b
->bd_info
->bb_free
-= len
;
1948 if (e4b
->bd_info
->bb_first_free
== start
)
1949 e4b
->bd_info
->bb_first_free
+= len
;
1951 /* let's maintain fragments counter */
1953 mlen
= !mb_test_bit(start
- 1, e4b
->bd_bitmap
);
1954 if (start
+ len
< EXT4_SB(e4b
->bd_sb
)->s_mb_maxs
[0])
1955 max
= !mb_test_bit(start
+ len
, e4b
->bd_bitmap
);
1957 e4b
->bd_info
->bb_fragments
++;
1958 else if (!mlen
&& !max
)
1959 e4b
->bd_info
->bb_fragments
--;
1961 /* let's maintain buddy itself */
1963 ord
= mb_find_order_for_block(e4b
, start
);
1965 if (((start
>> ord
) << ord
) == start
&& len
>= (1 << ord
)) {
1966 /* the whole chunk may be allocated at once! */
1968 buddy
= mb_find_buddy(e4b
, ord
, &max
);
1969 BUG_ON((start
>> ord
) >= max
);
1970 mb_set_bit(start
>> ord
, buddy
);
1971 e4b
->bd_info
->bb_counters
[ord
]--;
1978 /* store for history */
1980 ret
= len
| (ord
<< 16);
1982 /* we have to split large buddy */
1984 buddy
= mb_find_buddy(e4b
, ord
, &max
);
1985 mb_set_bit(start
>> ord
, buddy
);
1986 e4b
->bd_info
->bb_counters
[ord
]--;
1989 cur
= (start
>> ord
) & ~1U;
1990 buddy
= mb_find_buddy(e4b
, ord
, &max
);
1991 mb_clear_bit(cur
, buddy
);
1992 mb_clear_bit(cur
+ 1, buddy
);
1993 e4b
->bd_info
->bb_counters
[ord
]++;
1994 e4b
->bd_info
->bb_counters
[ord
]++;
1996 mb_set_largest_free_order(e4b
->bd_sb
, e4b
->bd_info
);
1998 mb_update_avg_fragment_size(e4b
->bd_sb
, e4b
->bd_info
);
1999 ext4_set_bits(e4b
->bd_bitmap
, ex
->fe_start
, len0
);
2000 mb_check_buddy(e4b
);
2006 * Must be called under group lock!
2008 static void ext4_mb_use_best_found(struct ext4_allocation_context
*ac
,
2009 struct ext4_buddy
*e4b
)
2011 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
2014 BUG_ON(ac
->ac_b_ex
.fe_group
!= e4b
->bd_group
);
2015 BUG_ON(ac
->ac_status
== AC_STATUS_FOUND
);
2017 ac
->ac_b_ex
.fe_len
= min(ac
->ac_b_ex
.fe_len
, ac
->ac_g_ex
.fe_len
);
2018 ac
->ac_b_ex
.fe_logical
= ac
->ac_g_ex
.fe_logical
;
2019 ret
= mb_mark_used(e4b
, &ac
->ac_b_ex
);
2021 /* preallocation can change ac_b_ex, thus we store actually
2022 * allocated blocks for history */
2023 ac
->ac_f_ex
= ac
->ac_b_ex
;
2025 ac
->ac_status
= AC_STATUS_FOUND
;
2026 ac
->ac_tail
= ret
& 0xffff;
2027 ac
->ac_buddy
= ret
>> 16;
2030 * take the page reference. We want the page to be pinned
2031 * so that we don't get a ext4_mb_init_cache_call for this
2032 * group until we update the bitmap. That would mean we
2033 * double allocate blocks. The reference is dropped
2034 * in ext4_mb_release_context
2036 ac
->ac_bitmap_page
= e4b
->bd_bitmap_page
;
2037 get_page(ac
->ac_bitmap_page
);
2038 ac
->ac_buddy_page
= e4b
->bd_buddy_page
;
2039 get_page(ac
->ac_buddy_page
);
2040 /* store last allocated for subsequent stream allocation */
2041 if (ac
->ac_flags
& EXT4_MB_STREAM_ALLOC
) {
2042 spin_lock(&sbi
->s_md_lock
);
2043 sbi
->s_mb_last_group
= ac
->ac_f_ex
.fe_group
;
2044 sbi
->s_mb_last_start
= ac
->ac_f_ex
.fe_start
;
2045 spin_unlock(&sbi
->s_md_lock
);
2048 * As we've just preallocated more space than
2049 * user requested originally, we store allocated
2050 * space in a special descriptor.
2052 if (ac
->ac_o_ex
.fe_len
< ac
->ac_b_ex
.fe_len
)
2053 ext4_mb_new_preallocation(ac
);
2057 static void ext4_mb_check_limits(struct ext4_allocation_context
*ac
,
2058 struct ext4_buddy
*e4b
,
2061 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
2062 struct ext4_free_extent
*bex
= &ac
->ac_b_ex
;
2063 struct ext4_free_extent
*gex
= &ac
->ac_g_ex
;
2064 struct ext4_free_extent ex
;
2067 if (ac
->ac_status
== AC_STATUS_FOUND
)
2070 * We don't want to scan for a whole year
2072 if (ac
->ac_found
> sbi
->s_mb_max_to_scan
&&
2073 !(ac
->ac_flags
& EXT4_MB_HINT_FIRST
)) {
2074 ac
->ac_status
= AC_STATUS_BREAK
;
2079 * Haven't found good chunk so far, let's continue
2081 if (bex
->fe_len
< gex
->fe_len
)
2084 if ((finish_group
|| ac
->ac_found
> sbi
->s_mb_min_to_scan
)
2085 && bex
->fe_group
== e4b
->bd_group
) {
2086 /* recheck chunk's availability - we don't know
2087 * when it was found (within this lock-unlock
2089 max
= mb_find_extent(e4b
, bex
->fe_start
, gex
->fe_len
, &ex
);
2090 if (max
>= gex
->fe_len
) {
2091 ext4_mb_use_best_found(ac
, e4b
);
2098 * The routine checks whether found extent is good enough. If it is,
2099 * then the extent gets marked used and flag is set to the context
2100 * to stop scanning. Otherwise, the extent is compared with the
2101 * previous found extent and if new one is better, then it's stored
2102 * in the context. Later, the best found extent will be used, if
2103 * mballoc can't find good enough extent.
2105 * FIXME: real allocation policy is to be designed yet!
2107 static void ext4_mb_measure_extent(struct ext4_allocation_context
*ac
,
2108 struct ext4_free_extent
*ex
,
2109 struct ext4_buddy
*e4b
)
2111 struct ext4_free_extent
*bex
= &ac
->ac_b_ex
;
2112 struct ext4_free_extent
*gex
= &ac
->ac_g_ex
;
2114 BUG_ON(ex
->fe_len
<= 0);
2115 BUG_ON(ex
->fe_len
> EXT4_CLUSTERS_PER_GROUP(ac
->ac_sb
));
2116 BUG_ON(ex
->fe_start
>= EXT4_CLUSTERS_PER_GROUP(ac
->ac_sb
));
2117 BUG_ON(ac
->ac_status
!= AC_STATUS_CONTINUE
);
2122 * The special case - take what you catch first
2124 if (unlikely(ac
->ac_flags
& EXT4_MB_HINT_FIRST
)) {
2126 ext4_mb_use_best_found(ac
, e4b
);
2131 * Let's check whether the chuck is good enough
2133 if (ex
->fe_len
== gex
->fe_len
) {
2135 ext4_mb_use_best_found(ac
, e4b
);
2140 * If this is first found extent, just store it in the context
2142 if (bex
->fe_len
== 0) {
2148 * If new found extent is better, store it in the context
2150 if (bex
->fe_len
< gex
->fe_len
) {
2151 /* if the request isn't satisfied, any found extent
2152 * larger than previous best one is better */
2153 if (ex
->fe_len
> bex
->fe_len
)
2155 } else if (ex
->fe_len
> gex
->fe_len
) {
2156 /* if the request is satisfied, then we try to find
2157 * an extent that still satisfy the request, but is
2158 * smaller than previous one */
2159 if (ex
->fe_len
< bex
->fe_len
)
2163 ext4_mb_check_limits(ac
, e4b
, 0);
2166 static noinline_for_stack
2167 int ext4_mb_try_best_found(struct ext4_allocation_context
*ac
,
2168 struct ext4_buddy
*e4b
)
2170 struct ext4_free_extent ex
= ac
->ac_b_ex
;
2171 ext4_group_t group
= ex
.fe_group
;
2175 BUG_ON(ex
.fe_len
<= 0);
2176 err
= ext4_mb_load_buddy(ac
->ac_sb
, group
, e4b
);
2180 ext4_lock_group(ac
->ac_sb
, group
);
2181 max
= mb_find_extent(e4b
, ex
.fe_start
, ex
.fe_len
, &ex
);
2185 ext4_mb_use_best_found(ac
, e4b
);
2188 ext4_unlock_group(ac
->ac_sb
, group
);
2189 ext4_mb_unload_buddy(e4b
);
2194 static noinline_for_stack
2195 int ext4_mb_find_by_goal(struct ext4_allocation_context
*ac
,
2196 struct ext4_buddy
*e4b
)
2198 ext4_group_t group
= ac
->ac_g_ex
.fe_group
;
2201 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
2202 struct ext4_group_info
*grp
= ext4_get_group_info(ac
->ac_sb
, group
);
2203 struct ext4_free_extent ex
;
2205 if (!(ac
->ac_flags
& EXT4_MB_HINT_TRY_GOAL
))
2207 if (grp
->bb_free
== 0)
2210 err
= ext4_mb_load_buddy(ac
->ac_sb
, group
, e4b
);
2214 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b
->bd_info
))) {
2215 ext4_mb_unload_buddy(e4b
);
2219 ext4_lock_group(ac
->ac_sb
, group
);
2220 max
= mb_find_extent(e4b
, ac
->ac_g_ex
.fe_start
,
2221 ac
->ac_g_ex
.fe_len
, &ex
);
2222 ex
.fe_logical
= 0xDEADFA11; /* debug value */
2224 if (max
>= ac
->ac_g_ex
.fe_len
&& ac
->ac_g_ex
.fe_len
== sbi
->s_stripe
) {
2227 start
= ext4_group_first_block_no(ac
->ac_sb
, e4b
->bd_group
) +
2229 /* use do_div to get remainder (would be 64-bit modulo) */
2230 if (do_div(start
, sbi
->s_stripe
) == 0) {
2233 ext4_mb_use_best_found(ac
, e4b
);
2235 } else if (max
>= ac
->ac_g_ex
.fe_len
) {
2236 BUG_ON(ex
.fe_len
<= 0);
2237 BUG_ON(ex
.fe_group
!= ac
->ac_g_ex
.fe_group
);
2238 BUG_ON(ex
.fe_start
!= ac
->ac_g_ex
.fe_start
);
2241 ext4_mb_use_best_found(ac
, e4b
);
2242 } else if (max
> 0 && (ac
->ac_flags
& EXT4_MB_HINT_MERGE
)) {
2243 /* Sometimes, caller may want to merge even small
2244 * number of blocks to an existing extent */
2245 BUG_ON(ex
.fe_len
<= 0);
2246 BUG_ON(ex
.fe_group
!= ac
->ac_g_ex
.fe_group
);
2247 BUG_ON(ex
.fe_start
!= ac
->ac_g_ex
.fe_start
);
2250 ext4_mb_use_best_found(ac
, e4b
);
2252 ext4_unlock_group(ac
->ac_sb
, group
);
2253 ext4_mb_unload_buddy(e4b
);
2259 * The routine scans buddy structures (not bitmap!) from given order
2260 * to max order and tries to find big enough chunk to satisfy the req
2262 static noinline_for_stack
2263 void ext4_mb_simple_scan_group(struct ext4_allocation_context
*ac
,
2264 struct ext4_buddy
*e4b
)
2266 struct super_block
*sb
= ac
->ac_sb
;
2267 struct ext4_group_info
*grp
= e4b
->bd_info
;
2273 BUG_ON(ac
->ac_2order
<= 0);
2274 for (i
= ac
->ac_2order
; i
< MB_NUM_ORDERS(sb
); i
++) {
2275 if (grp
->bb_counters
[i
] == 0)
2278 buddy
= mb_find_buddy(e4b
, i
, &max
);
2279 BUG_ON(buddy
== NULL
);
2281 k
= mb_find_next_zero_bit(buddy
, max
, 0);
2283 ext4_grp_locked_error(ac
->ac_sb
, e4b
->bd_group
, 0, 0,
2284 "%d free clusters of order %d. But found 0",
2285 grp
->bb_counters
[i
], i
);
2286 ext4_mark_group_bitmap_corrupted(ac
->ac_sb
,
2288 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
2293 ac
->ac_b_ex
.fe_len
= 1 << i
;
2294 ac
->ac_b_ex
.fe_start
= k
<< i
;
2295 ac
->ac_b_ex
.fe_group
= e4b
->bd_group
;
2297 ext4_mb_use_best_found(ac
, e4b
);
2299 BUG_ON(ac
->ac_f_ex
.fe_len
!= ac
->ac_g_ex
.fe_len
);
2301 if (EXT4_SB(sb
)->s_mb_stats
)
2302 atomic_inc(&EXT4_SB(sb
)->s_bal_2orders
);
2309 * The routine scans the group and measures all found extents.
2310 * In order to optimize scanning, caller must pass number of
2311 * free blocks in the group, so the routine can know upper limit.
2313 static noinline_for_stack
2314 void ext4_mb_complex_scan_group(struct ext4_allocation_context
*ac
,
2315 struct ext4_buddy
*e4b
)
2317 struct super_block
*sb
= ac
->ac_sb
;
2318 void *bitmap
= e4b
->bd_bitmap
;
2319 struct ext4_free_extent ex
;
2323 free
= e4b
->bd_info
->bb_free
;
2324 if (WARN_ON(free
<= 0))
2327 i
= e4b
->bd_info
->bb_first_free
;
2329 while (free
&& ac
->ac_status
== AC_STATUS_CONTINUE
) {
2330 i
= mb_find_next_zero_bit(bitmap
,
2331 EXT4_CLUSTERS_PER_GROUP(sb
), i
);
2332 if (i
>= EXT4_CLUSTERS_PER_GROUP(sb
)) {
2334 * IF we have corrupt bitmap, we won't find any
2335 * free blocks even though group info says we
2338 ext4_grp_locked_error(sb
, e4b
->bd_group
, 0, 0,
2339 "%d free clusters as per "
2340 "group info. But bitmap says 0",
2342 ext4_mark_group_bitmap_corrupted(sb
, e4b
->bd_group
,
2343 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
2347 mb_find_extent(e4b
, i
, ac
->ac_g_ex
.fe_len
, &ex
);
2348 if (WARN_ON(ex
.fe_len
<= 0))
2350 if (free
< ex
.fe_len
) {
2351 ext4_grp_locked_error(sb
, e4b
->bd_group
, 0, 0,
2352 "%d free clusters as per "
2353 "group info. But got %d blocks",
2355 ext4_mark_group_bitmap_corrupted(sb
, e4b
->bd_group
,
2356 EXT4_GROUP_INFO_BBITMAP_CORRUPT
);
2358 * The number of free blocks differs. This mostly
2359 * indicate that the bitmap is corrupt. So exit
2360 * without claiming the space.
2364 ex
.fe_logical
= 0xDEADC0DE; /* debug value */
2365 ext4_mb_measure_extent(ac
, &ex
, e4b
);
2371 ext4_mb_check_limits(ac
, e4b
, 1);
2375 * This is a special case for storages like raid5
2376 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2378 static noinline_for_stack
2379 void ext4_mb_scan_aligned(struct ext4_allocation_context
*ac
,
2380 struct ext4_buddy
*e4b
)
2382 struct super_block
*sb
= ac
->ac_sb
;
2383 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2384 void *bitmap
= e4b
->bd_bitmap
;
2385 struct ext4_free_extent ex
;
2386 ext4_fsblk_t first_group_block
;
2391 BUG_ON(sbi
->s_stripe
== 0);
2393 /* find first stripe-aligned block in group */
2394 first_group_block
= ext4_group_first_block_no(sb
, e4b
->bd_group
);
2396 a
= first_group_block
+ sbi
->s_stripe
- 1;
2397 do_div(a
, sbi
->s_stripe
);
2398 i
= (a
* sbi
->s_stripe
) - first_group_block
;
2400 while (i
< EXT4_CLUSTERS_PER_GROUP(sb
)) {
2401 if (!mb_test_bit(i
, bitmap
)) {
2402 max
= mb_find_extent(e4b
, i
, sbi
->s_stripe
, &ex
);
2403 if (max
>= sbi
->s_stripe
) {
2405 ex
.fe_logical
= 0xDEADF00D; /* debug value */
2407 ext4_mb_use_best_found(ac
, e4b
);
2416 * This is also called BEFORE we load the buddy bitmap.
2417 * Returns either 1 or 0 indicating that the group is either suitable
2418 * for the allocation or not.
2420 static bool ext4_mb_good_group(struct ext4_allocation_context
*ac
,
2421 ext4_group_t group
, int cr
)
2423 ext4_grpblk_t free
, fragments
;
2424 int flex_size
= ext4_flex_bg_size(EXT4_SB(ac
->ac_sb
));
2425 struct ext4_group_info
*grp
= ext4_get_group_info(ac
->ac_sb
, group
);
2427 BUG_ON(cr
< 0 || cr
>= 4);
2429 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp
)))
2432 free
= grp
->bb_free
;
2436 fragments
= grp
->bb_fragments
;
2442 BUG_ON(ac
->ac_2order
== 0);
2444 /* Avoid using the first bg of a flexgroup for data files */
2445 if ((ac
->ac_flags
& EXT4_MB_HINT_DATA
) &&
2446 (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) &&
2447 ((group
% flex_size
) == 0))
2450 if (free
< ac
->ac_g_ex
.fe_len
)
2453 if (ac
->ac_2order
>= MB_NUM_ORDERS(ac
->ac_sb
))
2456 if (grp
->bb_largest_free_order
< ac
->ac_2order
)
2461 if ((free
/ fragments
) >= ac
->ac_g_ex
.fe_len
)
2465 if (free
>= ac
->ac_g_ex
.fe_len
)
2478 * This could return negative error code if something goes wrong
2479 * during ext4_mb_init_group(). This should not be called with
2480 * ext4_lock_group() held.
2482 * Note: because we are conditionally operating with the group lock in
2483 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2484 * function using __acquire and __release. This means we need to be
2485 * super careful before messing with the error path handling via "goto
2488 static int ext4_mb_good_group_nolock(struct ext4_allocation_context
*ac
,
2489 ext4_group_t group
, int cr
)
2491 struct ext4_group_info
*grp
= ext4_get_group_info(ac
->ac_sb
, group
);
2492 struct super_block
*sb
= ac
->ac_sb
;
2493 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2494 bool should_lock
= ac
->ac_flags
& EXT4_MB_STRICT_CHECK
;
2498 if (sbi
->s_mb_stats
)
2499 atomic64_inc(&sbi
->s_bal_cX_groups_considered
[ac
->ac_criteria
]);
2501 ext4_lock_group(sb
, group
);
2502 __release(ext4_group_lock_ptr(sb
, group
));
2504 free
= grp
->bb_free
;
2507 if (cr
<= 2 && free
< ac
->ac_g_ex
.fe_len
)
2509 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp
)))
2512 __acquire(ext4_group_lock_ptr(sb
, group
));
2513 ext4_unlock_group(sb
, group
);
2516 /* We only do this if the grp has never been initialized */
2517 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp
))) {
2518 struct ext4_group_desc
*gdp
=
2519 ext4_get_group_desc(sb
, group
, NULL
);
2522 /* cr=0/1 is a very optimistic search to find large
2523 * good chunks almost for free. If buddy data is not
2524 * ready, then this optimization makes no sense. But
2525 * we never skip the first block group in a flex_bg,
2526 * since this gets used for metadata block allocation,
2527 * and we want to make sure we locate metadata blocks
2528 * in the first block group in the flex_bg if possible.
2531 (!sbi
->s_log_groups_per_flex
||
2532 ((group
& ((1 << sbi
->s_log_groups_per_flex
) - 1)) != 0)) &&
2533 !(ext4_has_group_desc_csum(sb
) &&
2534 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))))
2536 ret
= ext4_mb_init_group(sb
, group
, GFP_NOFS
);
2542 ext4_lock_group(sb
, group
);
2543 __release(ext4_group_lock_ptr(sb
, group
));
2545 ret
= ext4_mb_good_group(ac
, group
, cr
);
2548 __acquire(ext4_group_lock_ptr(sb
, group
));
2549 ext4_unlock_group(sb
, group
);
2555 * Start prefetching @nr block bitmaps starting at @group.
2556 * Return the next group which needs to be prefetched.
2558 ext4_group_t
ext4_mb_prefetch(struct super_block
*sb
, ext4_group_t group
,
2559 unsigned int nr
, int *cnt
)
2561 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
2562 struct buffer_head
*bh
;
2563 struct blk_plug plug
;
2565 blk_start_plug(&plug
);
2567 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, group
,
2569 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
2572 * Prefetch block groups with free blocks; but don't
2573 * bother if it is marked uninitialized on disk, since
2574 * it won't require I/O to read. Also only try to
2575 * prefetch once, so we avoid getblk() call, which can
2578 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp
) &&
2579 EXT4_MB_GRP_NEED_INIT(grp
) &&
2580 ext4_free_group_clusters(sb
, gdp
) > 0 &&
2581 !(ext4_has_group_desc_csum(sb
) &&
2582 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)))) {
2583 bh
= ext4_read_block_bitmap_nowait(sb
, group
, true);
2584 if (bh
&& !IS_ERR(bh
)) {
2585 if (!buffer_uptodate(bh
) && cnt
)
2590 if (++group
>= ngroups
)
2593 blk_finish_plug(&plug
);
2598 * Prefetching reads the block bitmap into the buffer cache; but we
2599 * need to make sure that the buddy bitmap in the page cache has been
2600 * initialized. Note that ext4_mb_init_group() will block if the I/O
2601 * is not yet completed, or indeed if it was not initiated by
2602 * ext4_mb_prefetch did not start the I/O.
2604 * TODO: We should actually kick off the buddy bitmap setup in a work
2605 * queue when the buffer I/O is completed, so that we don't block
2606 * waiting for the block allocation bitmap read to finish when
2607 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2609 void ext4_mb_prefetch_fini(struct super_block
*sb
, ext4_group_t group
,
2613 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, group
,
2615 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
2618 group
= ext4_get_groups_count(sb
);
2620 grp
= ext4_get_group_info(sb
, group
);
2622 if (EXT4_MB_GRP_NEED_INIT(grp
) &&
2623 ext4_free_group_clusters(sb
, gdp
) > 0 &&
2624 !(ext4_has_group_desc_csum(sb
) &&
2625 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)))) {
2626 if (ext4_mb_init_group(sb
, group
, GFP_NOFS
))
2632 static noinline_for_stack
int
2633 ext4_mb_regular_allocator(struct ext4_allocation_context
*ac
)
2635 ext4_group_t prefetch_grp
= 0, ngroups
, group
, i
;
2637 int err
= 0, first_err
= 0;
2638 unsigned int nr
= 0, prefetch_ios
= 0;
2639 struct ext4_sb_info
*sbi
;
2640 struct super_block
*sb
;
2641 struct ext4_buddy e4b
;
2646 ngroups
= ext4_get_groups_count(sb
);
2647 /* non-extent files are limited to low blocks/groups */
2648 if (!(ext4_test_inode_flag(ac
->ac_inode
, EXT4_INODE_EXTENTS
)))
2649 ngroups
= sbi
->s_blockfile_groups
;
2651 BUG_ON(ac
->ac_status
== AC_STATUS_FOUND
);
2653 /* first, try the goal */
2654 err
= ext4_mb_find_by_goal(ac
, &e4b
);
2655 if (err
|| ac
->ac_status
== AC_STATUS_FOUND
)
2658 if (unlikely(ac
->ac_flags
& EXT4_MB_HINT_GOAL_ONLY
))
2662 * ac->ac_2order is set only if the fe_len is a power of 2
2663 * if ac->ac_2order is set we also set criteria to 0 so that we
2664 * try exact allocation using buddy.
2666 i
= fls(ac
->ac_g_ex
.fe_len
);
2669 * We search using buddy data only if the order of the request
2670 * is greater than equal to the sbi_s_mb_order2_reqs
2671 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2672 * We also support searching for power-of-two requests only for
2673 * requests upto maximum buddy size we have constructed.
2675 if (i
>= sbi
->s_mb_order2_reqs
&& i
<= MB_NUM_ORDERS(sb
)) {
2677 * This should tell if fe_len is exactly power of 2
2679 if ((ac
->ac_g_ex
.fe_len
& (~(1 << (i
- 1)))) == 0)
2680 ac
->ac_2order
= array_index_nospec(i
- 1,
2684 /* if stream allocation is enabled, use global goal */
2685 if (ac
->ac_flags
& EXT4_MB_STREAM_ALLOC
) {
2686 /* TBD: may be hot point */
2687 spin_lock(&sbi
->s_md_lock
);
2688 ac
->ac_g_ex
.fe_group
= sbi
->s_mb_last_group
;
2689 ac
->ac_g_ex
.fe_start
= sbi
->s_mb_last_start
;
2690 spin_unlock(&sbi
->s_md_lock
);
2693 /* Let's just scan groups to find more-less suitable blocks */
2694 cr
= ac
->ac_2order
? 0 : 1;
2696 * cr == 0 try to get exact allocation,
2697 * cr == 3 try to get anything
2700 for (; cr
< 4 && ac
->ac_status
== AC_STATUS_CONTINUE
; cr
++) {
2701 ac
->ac_criteria
= cr
;
2703 * searching for the right group start
2704 * from the goal value specified
2706 group
= ac
->ac_g_ex
.fe_group
;
2707 ac
->ac_last_optimal_group
= group
;
2708 ac
->ac_groups_linear_remaining
= sbi
->s_mb_max_linear_groups
;
2709 prefetch_grp
= group
;
2711 for (i
= 0; i
< ngroups
; group
= next_linear_group(ac
, group
, ngroups
),
2713 int ret
= 0, new_cr
;
2717 ext4_mb_choose_next_group(ac
, &new_cr
, &group
, ngroups
);
2724 * Batch reads of the block allocation bitmaps
2725 * to get multiple READs in flight; limit
2726 * prefetching at cr=0/1, otherwise mballoc can
2727 * spend a lot of time loading imperfect groups
2729 if ((prefetch_grp
== group
) &&
2731 prefetch_ios
< sbi
->s_mb_prefetch_limit
)) {
2732 unsigned int curr_ios
= prefetch_ios
;
2734 nr
= sbi
->s_mb_prefetch
;
2735 if (ext4_has_feature_flex_bg(sb
)) {
2736 nr
= 1 << sbi
->s_log_groups_per_flex
;
2737 nr
-= group
& (nr
- 1);
2738 nr
= min(nr
, sbi
->s_mb_prefetch
);
2740 prefetch_grp
= ext4_mb_prefetch(sb
, group
,
2742 if (prefetch_ios
== curr_ios
)
2746 /* This now checks without needing the buddy page */
2747 ret
= ext4_mb_good_group_nolock(ac
, group
, cr
);
2754 err
= ext4_mb_load_buddy(sb
, group
, &e4b
);
2758 ext4_lock_group(sb
, group
);
2761 * We need to check again after locking the
2764 ret
= ext4_mb_good_group(ac
, group
, cr
);
2766 ext4_unlock_group(sb
, group
);
2767 ext4_mb_unload_buddy(&e4b
);
2771 ac
->ac_groups_scanned
++;
2773 ext4_mb_simple_scan_group(ac
, &e4b
);
2774 else if (cr
== 1 && sbi
->s_stripe
&&
2775 !(ac
->ac_g_ex
.fe_len
% sbi
->s_stripe
))
2776 ext4_mb_scan_aligned(ac
, &e4b
);
2778 ext4_mb_complex_scan_group(ac
, &e4b
);
2780 ext4_unlock_group(sb
, group
);
2781 ext4_mb_unload_buddy(&e4b
);
2783 if (ac
->ac_status
!= AC_STATUS_CONTINUE
)
2786 /* Processed all groups and haven't found blocks */
2787 if (sbi
->s_mb_stats
&& i
== ngroups
)
2788 atomic64_inc(&sbi
->s_bal_cX_failed
[cr
]);
2791 if (ac
->ac_b_ex
.fe_len
> 0 && ac
->ac_status
!= AC_STATUS_FOUND
&&
2792 !(ac
->ac_flags
& EXT4_MB_HINT_FIRST
)) {
2794 * We've been searching too long. Let's try to allocate
2795 * the best chunk we've found so far
2797 ext4_mb_try_best_found(ac
, &e4b
);
2798 if (ac
->ac_status
!= AC_STATUS_FOUND
) {
2800 * Someone more lucky has already allocated it.
2801 * The only thing we can do is just take first
2804 lost
= atomic_inc_return(&sbi
->s_mb_lost_chunks
);
2805 mb_debug(sb
, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2806 ac
->ac_b_ex
.fe_group
, ac
->ac_b_ex
.fe_start
,
2807 ac
->ac_b_ex
.fe_len
, lost
);
2809 ac
->ac_b_ex
.fe_group
= 0;
2810 ac
->ac_b_ex
.fe_start
= 0;
2811 ac
->ac_b_ex
.fe_len
= 0;
2812 ac
->ac_status
= AC_STATUS_CONTINUE
;
2813 ac
->ac_flags
|= EXT4_MB_HINT_FIRST
;
2819 if (sbi
->s_mb_stats
&& ac
->ac_status
== AC_STATUS_FOUND
)
2820 atomic64_inc(&sbi
->s_bal_cX_hits
[ac
->ac_criteria
]);
2822 if (!err
&& ac
->ac_status
!= AC_STATUS_FOUND
&& first_err
)
2825 mb_debug(sb
, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2826 ac
->ac_b_ex
.fe_len
, ac
->ac_o_ex
.fe_len
, ac
->ac_status
,
2827 ac
->ac_flags
, cr
, err
);
2830 ext4_mb_prefetch_fini(sb
, prefetch_grp
, nr
);
2835 static void *ext4_mb_seq_groups_start(struct seq_file
*seq
, loff_t
*pos
)
2837 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
2840 if (*pos
< 0 || *pos
>= ext4_get_groups_count(sb
))
2843 return (void *) ((unsigned long) group
);
2846 static void *ext4_mb_seq_groups_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2848 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
2852 if (*pos
< 0 || *pos
>= ext4_get_groups_count(sb
))
2855 return (void *) ((unsigned long) group
);
2858 static int ext4_mb_seq_groups_show(struct seq_file
*seq
, void *v
)
2860 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
2861 ext4_group_t group
= (ext4_group_t
) ((unsigned long) v
);
2863 int err
, buddy_loaded
= 0;
2864 struct ext4_buddy e4b
;
2865 struct ext4_group_info
*grinfo
;
2866 unsigned char blocksize_bits
= min_t(unsigned char,
2867 sb
->s_blocksize_bits
,
2868 EXT4_MAX_BLOCK_LOG_SIZE
);
2870 struct ext4_group_info info
;
2871 ext4_grpblk_t counters
[EXT4_MAX_BLOCK_LOG_SIZE
+ 2];
2876 seq_puts(seq
, "#group: free frags first ["
2877 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2878 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2880 i
= (blocksize_bits
+ 2) * sizeof(sg
.info
.bb_counters
[0]) +
2881 sizeof(struct ext4_group_info
);
2883 grinfo
= ext4_get_group_info(sb
, group
);
2884 /* Load the group info in memory only if not already loaded. */
2885 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo
))) {
2886 err
= ext4_mb_load_buddy(sb
, group
, &e4b
);
2888 seq_printf(seq
, "#%-5u: I/O error\n", group
);
2894 memcpy(&sg
, ext4_get_group_info(sb
, group
), i
);
2897 ext4_mb_unload_buddy(&e4b
);
2899 seq_printf(seq
, "#%-5u: %-5u %-5u %-5u [", group
, sg
.info
.bb_free
,
2900 sg
.info
.bb_fragments
, sg
.info
.bb_first_free
);
2901 for (i
= 0; i
<= 13; i
++)
2902 seq_printf(seq
, " %-5u", i
<= blocksize_bits
+ 1 ?
2903 sg
.info
.bb_counters
[i
] : 0);
2904 seq_puts(seq
, " ]\n");
2909 static void ext4_mb_seq_groups_stop(struct seq_file
*seq
, void *v
)
2913 const struct seq_operations ext4_mb_seq_groups_ops
= {
2914 .start
= ext4_mb_seq_groups_start
,
2915 .next
= ext4_mb_seq_groups_next
,
2916 .stop
= ext4_mb_seq_groups_stop
,
2917 .show
= ext4_mb_seq_groups_show
,
2920 int ext4_seq_mb_stats_show(struct seq_file
*seq
, void *offset
)
2922 struct super_block
*sb
= (struct super_block
*)seq
->private;
2923 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2925 seq_puts(seq
, "mballoc:\n");
2926 if (!sbi
->s_mb_stats
) {
2927 seq_puts(seq
, "\tmb stats collection turned off.\n");
2928 seq_puts(seq
, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2931 seq_printf(seq
, "\treqs: %u\n", atomic_read(&sbi
->s_bal_reqs
));
2932 seq_printf(seq
, "\tsuccess: %u\n", atomic_read(&sbi
->s_bal_success
));
2934 seq_printf(seq
, "\tgroups_scanned: %u\n", atomic_read(&sbi
->s_bal_groups_scanned
));
2936 seq_puts(seq
, "\tcr0_stats:\n");
2937 seq_printf(seq
, "\t\thits: %llu\n", atomic64_read(&sbi
->s_bal_cX_hits
[0]));
2938 seq_printf(seq
, "\t\tgroups_considered: %llu\n",
2939 atomic64_read(&sbi
->s_bal_cX_groups_considered
[0]));
2940 seq_printf(seq
, "\t\tuseless_loops: %llu\n",
2941 atomic64_read(&sbi
->s_bal_cX_failed
[0]));
2942 seq_printf(seq
, "\t\tbad_suggestions: %u\n",
2943 atomic_read(&sbi
->s_bal_cr0_bad_suggestions
));
2945 seq_puts(seq
, "\tcr1_stats:\n");
2946 seq_printf(seq
, "\t\thits: %llu\n", atomic64_read(&sbi
->s_bal_cX_hits
[1]));
2947 seq_printf(seq
, "\t\tgroups_considered: %llu\n",
2948 atomic64_read(&sbi
->s_bal_cX_groups_considered
[1]));
2949 seq_printf(seq
, "\t\tuseless_loops: %llu\n",
2950 atomic64_read(&sbi
->s_bal_cX_failed
[1]));
2951 seq_printf(seq
, "\t\tbad_suggestions: %u\n",
2952 atomic_read(&sbi
->s_bal_cr1_bad_suggestions
));
2954 seq_puts(seq
, "\tcr2_stats:\n");
2955 seq_printf(seq
, "\t\thits: %llu\n", atomic64_read(&sbi
->s_bal_cX_hits
[2]));
2956 seq_printf(seq
, "\t\tgroups_considered: %llu\n",
2957 atomic64_read(&sbi
->s_bal_cX_groups_considered
[2]));
2958 seq_printf(seq
, "\t\tuseless_loops: %llu\n",
2959 atomic64_read(&sbi
->s_bal_cX_failed
[2]));
2961 seq_puts(seq
, "\tcr3_stats:\n");
2962 seq_printf(seq
, "\t\thits: %llu\n", atomic64_read(&sbi
->s_bal_cX_hits
[3]));
2963 seq_printf(seq
, "\t\tgroups_considered: %llu\n",
2964 atomic64_read(&sbi
->s_bal_cX_groups_considered
[3]));
2965 seq_printf(seq
, "\t\tuseless_loops: %llu\n",
2966 atomic64_read(&sbi
->s_bal_cX_failed
[3]));
2967 seq_printf(seq
, "\textents_scanned: %u\n", atomic_read(&sbi
->s_bal_ex_scanned
));
2968 seq_printf(seq
, "\t\tgoal_hits: %u\n", atomic_read(&sbi
->s_bal_goals
));
2969 seq_printf(seq
, "\t\t2^n_hits: %u\n", atomic_read(&sbi
->s_bal_2orders
));
2970 seq_printf(seq
, "\t\tbreaks: %u\n", atomic_read(&sbi
->s_bal_breaks
));
2971 seq_printf(seq
, "\t\tlost: %u\n", atomic_read(&sbi
->s_mb_lost_chunks
));
2973 seq_printf(seq
, "\tbuddies_generated: %u/%u\n",
2974 atomic_read(&sbi
->s_mb_buddies_generated
),
2975 ext4_get_groups_count(sb
));
2976 seq_printf(seq
, "\tbuddies_time_used: %llu\n",
2977 atomic64_read(&sbi
->s_mb_generation_time
));
2978 seq_printf(seq
, "\tpreallocated: %u\n",
2979 atomic_read(&sbi
->s_mb_preallocated
));
2980 seq_printf(seq
, "\tdiscarded: %u\n",
2981 atomic_read(&sbi
->s_mb_discarded
));
2985 static void *ext4_mb_seq_structs_summary_start(struct seq_file
*seq
, loff_t
*pos
)
2986 __acquires(&EXT4_SB(sb
)->s_mb_rb_lock
)
2988 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
2989 unsigned long position
;
2991 read_lock(&EXT4_SB(sb
)->s_mb_rb_lock
);
2993 if (*pos
< 0 || *pos
>= MB_NUM_ORDERS(sb
) + 1)
2995 position
= *pos
+ 1;
2996 return (void *) ((unsigned long) position
);
2999 static void *ext4_mb_seq_structs_summary_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3001 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
3002 unsigned long position
;
3005 if (*pos
< 0 || *pos
>= MB_NUM_ORDERS(sb
) + 1)
3007 position
= *pos
+ 1;
3008 return (void *) ((unsigned long) position
);
3011 static int ext4_mb_seq_structs_summary_show(struct seq_file
*seq
, void *v
)
3013 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
3014 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3015 unsigned long position
= ((unsigned long) v
);
3016 struct ext4_group_info
*grp
;
3018 unsigned int count
, min
, max
;
3021 if (position
>= MB_NUM_ORDERS(sb
)) {
3022 seq_puts(seq
, "fragment_size_tree:\n");
3023 n
= rb_first(&sbi
->s_mb_avg_fragment_size_root
);
3025 seq_puts(seq
, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
3028 grp
= rb_entry(n
, struct ext4_group_info
, bb_avg_fragment_size_rb
);
3029 min
= grp
->bb_fragments
? grp
->bb_free
/ grp
->bb_fragments
: 0;
3031 while (rb_next(n
)) {
3035 grp
= rb_entry(n
, struct ext4_group_info
, bb_avg_fragment_size_rb
);
3036 max
= grp
->bb_fragments
? grp
->bb_free
/ grp
->bb_fragments
: 0;
3038 seq_printf(seq
, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
3043 if (position
== 0) {
3044 seq_printf(seq
, "optimize_scan: %d\n",
3045 test_opt2(sb
, MB_OPTIMIZE_SCAN
) ? 1 : 0);
3046 seq_puts(seq
, "max_free_order_lists:\n");
3049 list_for_each_entry(grp
, &sbi
->s_mb_largest_free_orders
[position
],
3050 bb_largest_free_order_node
)
3052 seq_printf(seq
, "\tlist_order_%u_groups: %u\n",
3053 (unsigned int)position
, count
);
3058 static void ext4_mb_seq_structs_summary_stop(struct seq_file
*seq
, void *v
)
3059 __releases(&EXT4_SB(sb
)->s_mb_rb_lock
)
3061 struct super_block
*sb
= PDE_DATA(file_inode(seq
->file
));
3063 read_unlock(&EXT4_SB(sb
)->s_mb_rb_lock
);
3066 const struct seq_operations ext4_mb_seq_structs_summary_ops
= {
3067 .start
= ext4_mb_seq_structs_summary_start
,
3068 .next
= ext4_mb_seq_structs_summary_next
,
3069 .stop
= ext4_mb_seq_structs_summary_stop
,
3070 .show
= ext4_mb_seq_structs_summary_show
,
3073 static struct kmem_cache
*get_groupinfo_cache(int blocksize_bits
)
3075 int cache_index
= blocksize_bits
- EXT4_MIN_BLOCK_LOG_SIZE
;
3076 struct kmem_cache
*cachep
= ext4_groupinfo_caches
[cache_index
];
3083 * Allocate the top-level s_group_info array for the specified number
3086 int ext4_mb_alloc_groupinfo(struct super_block
*sb
, ext4_group_t ngroups
)
3088 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3090 struct ext4_group_info
***old_groupinfo
, ***new_groupinfo
;
3092 size
= (ngroups
+ EXT4_DESC_PER_BLOCK(sb
) - 1) >>
3093 EXT4_DESC_PER_BLOCK_BITS(sb
);
3094 if (size
<= sbi
->s_group_info_size
)
3097 size
= roundup_pow_of_two(sizeof(*sbi
->s_group_info
) * size
);
3098 new_groupinfo
= kvzalloc(size
, GFP_KERNEL
);
3099 if (!new_groupinfo
) {
3100 ext4_msg(sb
, KERN_ERR
, "can't allocate buddy meta group");
3104 old_groupinfo
= rcu_dereference(sbi
->s_group_info
);
3106 memcpy(new_groupinfo
, old_groupinfo
,
3107 sbi
->s_group_info_size
* sizeof(*sbi
->s_group_info
));
3109 rcu_assign_pointer(sbi
->s_group_info
, new_groupinfo
);
3110 sbi
->s_group_info_size
= size
/ sizeof(*sbi
->s_group_info
);
3112 ext4_kvfree_array_rcu(old_groupinfo
);
3113 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3114 sbi
->s_group_info_size
);
3118 /* Create and initialize ext4_group_info data for the given group. */
3119 int ext4_mb_add_groupinfo(struct super_block
*sb
, ext4_group_t group
,
3120 struct ext4_group_desc
*desc
)
3124 int idx
= group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
3125 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3126 struct ext4_group_info
**meta_group_info
;
3127 struct kmem_cache
*cachep
= get_groupinfo_cache(sb
->s_blocksize_bits
);
3130 * First check if this group is the first of a reserved block.
3131 * If it's true, we have to allocate a new table of pointers
3132 * to ext4_group_info structures
3134 if (group
% EXT4_DESC_PER_BLOCK(sb
) == 0) {
3135 metalen
= sizeof(*meta_group_info
) <<
3136 EXT4_DESC_PER_BLOCK_BITS(sb
);
3137 meta_group_info
= kmalloc(metalen
, GFP_NOFS
);
3138 if (meta_group_info
== NULL
) {
3139 ext4_msg(sb
, KERN_ERR
, "can't allocate mem "
3140 "for a buddy group");
3141 goto exit_meta_group_info
;
3144 rcu_dereference(sbi
->s_group_info
)[idx
] = meta_group_info
;
3148 meta_group_info
= sbi_array_rcu_deref(sbi
, s_group_info
, idx
);
3149 i
= group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
3151 meta_group_info
[i
] = kmem_cache_zalloc(cachep
, GFP_NOFS
);
3152 if (meta_group_info
[i
] == NULL
) {
3153 ext4_msg(sb
, KERN_ERR
, "can't allocate buddy mem");
3154 goto exit_group_info
;
3156 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT
,
3157 &(meta_group_info
[i
]->bb_state
));
3160 * initialize bb_free to be able to skip
3161 * empty groups without initialization
3163 if (ext4_has_group_desc_csum(sb
) &&
3164 (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))) {
3165 meta_group_info
[i
]->bb_free
=
3166 ext4_free_clusters_after_init(sb
, group
, desc
);
3168 meta_group_info
[i
]->bb_free
=
3169 ext4_free_group_clusters(sb
, desc
);
3172 INIT_LIST_HEAD(&meta_group_info
[i
]->bb_prealloc_list
);
3173 init_rwsem(&meta_group_info
[i
]->alloc_sem
);
3174 meta_group_info
[i
]->bb_free_root
= RB_ROOT
;
3175 INIT_LIST_HEAD(&meta_group_info
[i
]->bb_largest_free_order_node
);
3176 RB_CLEAR_NODE(&meta_group_info
[i
]->bb_avg_fragment_size_rb
);
3177 meta_group_info
[i
]->bb_largest_free_order
= -1; /* uninit */
3178 meta_group_info
[i
]->bb_group
= group
;
3180 mb_group_bb_bitmap_alloc(sb
, meta_group_info
[i
], group
);
3184 /* If a meta_group_info table has been allocated, release it now */
3185 if (group
% EXT4_DESC_PER_BLOCK(sb
) == 0) {
3186 struct ext4_group_info
***group_info
;
3189 group_info
= rcu_dereference(sbi
->s_group_info
);
3190 kfree(group_info
[idx
]);
3191 group_info
[idx
] = NULL
;
3194 exit_meta_group_info
:
3196 } /* ext4_mb_add_groupinfo */
3198 static int ext4_mb_init_backend(struct super_block
*sb
)
3200 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
3202 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3204 struct ext4_group_desc
*desc
;
3205 struct ext4_group_info
***group_info
;
3206 struct kmem_cache
*cachep
;
3208 err
= ext4_mb_alloc_groupinfo(sb
, ngroups
);
3212 sbi
->s_buddy_cache
= new_inode(sb
);
3213 if (sbi
->s_buddy_cache
== NULL
) {
3214 ext4_msg(sb
, KERN_ERR
, "can't get new inode");
3217 /* To avoid potentially colliding with an valid on-disk inode number,
3218 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3219 * not in the inode hash, so it should never be found by iget(), but
3220 * this will avoid confusion if it ever shows up during debugging. */
3221 sbi
->s_buddy_cache
->i_ino
= EXT4_BAD_INO
;
3222 EXT4_I(sbi
->s_buddy_cache
)->i_disksize
= 0;
3223 for (i
= 0; i
< ngroups
; i
++) {
3225 desc
= ext4_get_group_desc(sb
, i
, NULL
);
3227 ext4_msg(sb
, KERN_ERR
, "can't read descriptor %u", i
);
3230 if (ext4_mb_add_groupinfo(sb
, i
, desc
) != 0)
3234 if (ext4_has_feature_flex_bg(sb
)) {
3235 /* a single flex group is supposed to be read by a single IO.
3236 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3237 * unsigned integer, so the maximum shift is 32.
3239 if (sbi
->s_es
->s_log_groups_per_flex
>= 32) {
3240 ext4_msg(sb
, KERN_ERR
, "too many log groups per flexible block group");
3243 sbi
->s_mb_prefetch
= min_t(uint
, 1 << sbi
->s_es
->s_log_groups_per_flex
,
3244 BLK_MAX_SEGMENT_SIZE
>> (sb
->s_blocksize_bits
- 9));
3245 sbi
->s_mb_prefetch
*= 8; /* 8 prefetch IOs in flight at most */
3247 sbi
->s_mb_prefetch
= 32;
3249 if (sbi
->s_mb_prefetch
> ext4_get_groups_count(sb
))
3250 sbi
->s_mb_prefetch
= ext4_get_groups_count(sb
);
3251 /* now many real IOs to prefetch within a single allocation at cr=0
3252 * given cr=0 is an CPU-related optimization we shouldn't try to
3253 * load too many groups, at some point we should start to use what
3254 * we've got in memory.
3255 * with an average random access time 5ms, it'd take a second to get
3256 * 200 groups (* N with flex_bg), so let's make this limit 4
3258 sbi
->s_mb_prefetch_limit
= sbi
->s_mb_prefetch
* 4;
3259 if (sbi
->s_mb_prefetch_limit
> ext4_get_groups_count(sb
))
3260 sbi
->s_mb_prefetch_limit
= ext4_get_groups_count(sb
);
3265 cachep
= get_groupinfo_cache(sb
->s_blocksize_bits
);
3267 kmem_cache_free(cachep
, ext4_get_group_info(sb
, i
));
3268 i
= sbi
->s_group_info_size
;
3270 group_info
= rcu_dereference(sbi
->s_group_info
);
3272 kfree(group_info
[i
]);
3274 iput(sbi
->s_buddy_cache
);
3277 kvfree(rcu_dereference(sbi
->s_group_info
));
3282 static void ext4_groupinfo_destroy_slabs(void)
3286 for (i
= 0; i
< NR_GRPINFO_CACHES
; i
++) {
3287 kmem_cache_destroy(ext4_groupinfo_caches
[i
]);
3288 ext4_groupinfo_caches
[i
] = NULL
;
3292 static int ext4_groupinfo_create_slab(size_t size
)
3294 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex
);
3296 int blocksize_bits
= order_base_2(size
);
3297 int cache_index
= blocksize_bits
- EXT4_MIN_BLOCK_LOG_SIZE
;
3298 struct kmem_cache
*cachep
;
3300 if (cache_index
>= NR_GRPINFO_CACHES
)
3303 if (unlikely(cache_index
< 0))
3306 mutex_lock(&ext4_grpinfo_slab_create_mutex
);
3307 if (ext4_groupinfo_caches
[cache_index
]) {
3308 mutex_unlock(&ext4_grpinfo_slab_create_mutex
);
3309 return 0; /* Already created */
3312 slab_size
= offsetof(struct ext4_group_info
,
3313 bb_counters
[blocksize_bits
+ 2]);
3315 cachep
= kmem_cache_create(ext4_groupinfo_slab_names
[cache_index
],
3316 slab_size
, 0, SLAB_RECLAIM_ACCOUNT
,
3319 ext4_groupinfo_caches
[cache_index
] = cachep
;
3321 mutex_unlock(&ext4_grpinfo_slab_create_mutex
);
3324 "EXT4-fs: no memory for groupinfo slab cache\n");
3331 static void ext4_discard_work(struct work_struct
*work
)
3333 struct ext4_sb_info
*sbi
= container_of(work
,
3334 struct ext4_sb_info
, s_discard_work
);
3335 struct super_block
*sb
= sbi
->s_sb
;
3336 struct ext4_free_data
*fd
, *nfd
;
3337 struct ext4_buddy e4b
;
3338 struct list_head discard_list
;
3339 ext4_group_t grp
, load_grp
;
3342 INIT_LIST_HEAD(&discard_list
);
3343 spin_lock(&sbi
->s_md_lock
);
3344 list_splice_init(&sbi
->s_discard_list
, &discard_list
);
3345 spin_unlock(&sbi
->s_md_lock
);
3347 load_grp
= UINT_MAX
;
3348 list_for_each_entry_safe(fd
, nfd
, &discard_list
, efd_list
) {
3350 * If filesystem is umounting or no memory or suffering
3351 * from no space, give up the discard
3353 if ((sb
->s_flags
& SB_ACTIVE
) && !err
&&
3354 !atomic_read(&sbi
->s_retry_alloc_pending
)) {
3355 grp
= fd
->efd_group
;
3356 if (grp
!= load_grp
) {
3357 if (load_grp
!= UINT_MAX
)
3358 ext4_mb_unload_buddy(&e4b
);
3360 err
= ext4_mb_load_buddy(sb
, grp
, &e4b
);
3362 kmem_cache_free(ext4_free_data_cachep
, fd
);
3363 load_grp
= UINT_MAX
;
3370 ext4_lock_group(sb
, grp
);
3371 ext4_try_to_trim_range(sb
, &e4b
, fd
->efd_start_cluster
,
3372 fd
->efd_start_cluster
+ fd
->efd_count
- 1, 1);
3373 ext4_unlock_group(sb
, grp
);
3375 kmem_cache_free(ext4_free_data_cachep
, fd
);
3378 if (load_grp
!= UINT_MAX
)
3379 ext4_mb_unload_buddy(&e4b
);
3382 int ext4_mb_init(struct super_block
*sb
)
3384 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3386 unsigned offset
, offset_incr
;
3390 i
= MB_NUM_ORDERS(sb
) * sizeof(*sbi
->s_mb_offsets
);
3392 sbi
->s_mb_offsets
= kmalloc(i
, GFP_KERNEL
);
3393 if (sbi
->s_mb_offsets
== NULL
) {
3398 i
= MB_NUM_ORDERS(sb
) * sizeof(*sbi
->s_mb_maxs
);
3399 sbi
->s_mb_maxs
= kmalloc(i
, GFP_KERNEL
);
3400 if (sbi
->s_mb_maxs
== NULL
) {
3405 ret
= ext4_groupinfo_create_slab(sb
->s_blocksize
);
3409 /* order 0 is regular bitmap */
3410 sbi
->s_mb_maxs
[0] = sb
->s_blocksize
<< 3;
3411 sbi
->s_mb_offsets
[0] = 0;
3415 offset_incr
= 1 << (sb
->s_blocksize_bits
- 1);
3416 max
= sb
->s_blocksize
<< 2;
3418 sbi
->s_mb_offsets
[i
] = offset
;
3419 sbi
->s_mb_maxs
[i
] = max
;
3420 offset
+= offset_incr
;
3421 offset_incr
= offset_incr
>> 1;
3424 } while (i
< MB_NUM_ORDERS(sb
));
3426 sbi
->s_mb_avg_fragment_size_root
= RB_ROOT
;
3427 sbi
->s_mb_largest_free_orders
=
3428 kmalloc_array(MB_NUM_ORDERS(sb
), sizeof(struct list_head
),
3430 if (!sbi
->s_mb_largest_free_orders
) {
3434 sbi
->s_mb_largest_free_orders_locks
=
3435 kmalloc_array(MB_NUM_ORDERS(sb
), sizeof(rwlock_t
),
3437 if (!sbi
->s_mb_largest_free_orders_locks
) {
3441 for (i
= 0; i
< MB_NUM_ORDERS(sb
); i
++) {
3442 INIT_LIST_HEAD(&sbi
->s_mb_largest_free_orders
[i
]);
3443 rwlock_init(&sbi
->s_mb_largest_free_orders_locks
[i
]);
3445 rwlock_init(&sbi
->s_mb_rb_lock
);
3447 spin_lock_init(&sbi
->s_md_lock
);
3448 sbi
->s_mb_free_pending
= 0;
3449 INIT_LIST_HEAD(&sbi
->s_freed_data_list
);
3450 INIT_LIST_HEAD(&sbi
->s_discard_list
);
3451 INIT_WORK(&sbi
->s_discard_work
, ext4_discard_work
);
3452 atomic_set(&sbi
->s_retry_alloc_pending
, 0);
3454 sbi
->s_mb_max_to_scan
= MB_DEFAULT_MAX_TO_SCAN
;
3455 sbi
->s_mb_min_to_scan
= MB_DEFAULT_MIN_TO_SCAN
;
3456 sbi
->s_mb_stats
= MB_DEFAULT_STATS
;
3457 sbi
->s_mb_stream_request
= MB_DEFAULT_STREAM_THRESHOLD
;
3458 sbi
->s_mb_order2_reqs
= MB_DEFAULT_ORDER2_REQS
;
3459 sbi
->s_mb_max_inode_prealloc
= MB_DEFAULT_MAX_INODE_PREALLOC
;
3461 * The default group preallocation is 512, which for 4k block
3462 * sizes translates to 2 megabytes. However for bigalloc file
3463 * systems, this is probably too big (i.e, if the cluster size
3464 * is 1 megabyte, then group preallocation size becomes half a
3465 * gigabyte!). As a default, we will keep a two megabyte
3466 * group pralloc size for cluster sizes up to 64k, and after
3467 * that, we will force a minimum group preallocation size of
3468 * 32 clusters. This translates to 8 megs when the cluster
3469 * size is 256k, and 32 megs when the cluster size is 1 meg,
3470 * which seems reasonable as a default.
3472 sbi
->s_mb_group_prealloc
= max(MB_DEFAULT_GROUP_PREALLOC
>>
3473 sbi
->s_cluster_bits
, 32);
3475 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3476 * to the lowest multiple of s_stripe which is bigger than
3477 * the s_mb_group_prealloc as determined above. We want
3478 * the preallocation size to be an exact multiple of the
3479 * RAID stripe size so that preallocations don't fragment
3482 if (sbi
->s_stripe
> 1) {
3483 sbi
->s_mb_group_prealloc
= roundup(
3484 sbi
->s_mb_group_prealloc
, sbi
->s_stripe
);
3487 sbi
->s_locality_groups
= alloc_percpu(struct ext4_locality_group
);
3488 if (sbi
->s_locality_groups
== NULL
) {
3492 for_each_possible_cpu(i
) {
3493 struct ext4_locality_group
*lg
;
3494 lg
= per_cpu_ptr(sbi
->s_locality_groups
, i
);
3495 mutex_init(&lg
->lg_mutex
);
3496 for (j
= 0; j
< PREALLOC_TB_SIZE
; j
++)
3497 INIT_LIST_HEAD(&lg
->lg_prealloc_list
[j
]);
3498 spin_lock_init(&lg
->lg_prealloc_lock
);
3501 if (blk_queue_nonrot(bdev_get_queue(sb
->s_bdev
)))
3502 sbi
->s_mb_max_linear_groups
= 0;
3504 sbi
->s_mb_max_linear_groups
= MB_DEFAULT_LINEAR_LIMIT
;
3505 /* init file for buddy data */
3506 ret
= ext4_mb_init_backend(sb
);
3508 goto out_free_locality_groups
;
3512 out_free_locality_groups
:
3513 free_percpu(sbi
->s_locality_groups
);
3514 sbi
->s_locality_groups
= NULL
;
3516 kfree(sbi
->s_mb_largest_free_orders
);
3517 kfree(sbi
->s_mb_largest_free_orders_locks
);
3518 kfree(sbi
->s_mb_offsets
);
3519 sbi
->s_mb_offsets
= NULL
;
3520 kfree(sbi
->s_mb_maxs
);
3521 sbi
->s_mb_maxs
= NULL
;
3525 /* need to called with the ext4 group lock held */
3526 static int ext4_mb_cleanup_pa(struct ext4_group_info
*grp
)
3528 struct ext4_prealloc_space
*pa
;
3529 struct list_head
*cur
, *tmp
;
3532 list_for_each_safe(cur
, tmp
, &grp
->bb_prealloc_list
) {
3533 pa
= list_entry(cur
, struct ext4_prealloc_space
, pa_group_list
);
3534 list_del(&pa
->pa_group_list
);
3536 kmem_cache_free(ext4_pspace_cachep
, pa
);
3541 int ext4_mb_release(struct super_block
*sb
)
3543 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
3545 int num_meta_group_infos
;
3546 struct ext4_group_info
*grinfo
, ***group_info
;
3547 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3548 struct kmem_cache
*cachep
= get_groupinfo_cache(sb
->s_blocksize_bits
);
3551 if (test_opt(sb
, DISCARD
)) {
3553 * wait the discard work to drain all of ext4_free_data
3555 flush_work(&sbi
->s_discard_work
);
3556 WARN_ON_ONCE(!list_empty(&sbi
->s_discard_list
));
3559 if (sbi
->s_group_info
) {
3560 for (i
= 0; i
< ngroups
; i
++) {
3562 grinfo
= ext4_get_group_info(sb
, i
);
3563 mb_group_bb_bitmap_free(grinfo
);
3564 ext4_lock_group(sb
, i
);
3565 count
= ext4_mb_cleanup_pa(grinfo
);
3567 mb_debug(sb
, "mballoc: %d PAs left\n",
3569 ext4_unlock_group(sb
, i
);
3570 kmem_cache_free(cachep
, grinfo
);
3572 num_meta_group_infos
= (ngroups
+
3573 EXT4_DESC_PER_BLOCK(sb
) - 1) >>
3574 EXT4_DESC_PER_BLOCK_BITS(sb
);
3576 group_info
= rcu_dereference(sbi
->s_group_info
);
3577 for (i
= 0; i
< num_meta_group_infos
; i
++)
3578 kfree(group_info
[i
]);
3582 kfree(sbi
->s_mb_largest_free_orders
);
3583 kfree(sbi
->s_mb_largest_free_orders_locks
);
3584 kfree(sbi
->s_mb_offsets
);
3585 kfree(sbi
->s_mb_maxs
);
3586 iput(sbi
->s_buddy_cache
);
3587 if (sbi
->s_mb_stats
) {
3588 ext4_msg(sb
, KERN_INFO
,
3589 "mballoc: %u blocks %u reqs (%u success)",
3590 atomic_read(&sbi
->s_bal_allocated
),
3591 atomic_read(&sbi
->s_bal_reqs
),
3592 atomic_read(&sbi
->s_bal_success
));
3593 ext4_msg(sb
, KERN_INFO
,
3594 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3595 "%u 2^N hits, %u breaks, %u lost",
3596 atomic_read(&sbi
->s_bal_ex_scanned
),
3597 atomic_read(&sbi
->s_bal_groups_scanned
),
3598 atomic_read(&sbi
->s_bal_goals
),
3599 atomic_read(&sbi
->s_bal_2orders
),
3600 atomic_read(&sbi
->s_bal_breaks
),
3601 atomic_read(&sbi
->s_mb_lost_chunks
));
3602 ext4_msg(sb
, KERN_INFO
,
3603 "mballoc: %u generated and it took %llu",
3604 atomic_read(&sbi
->s_mb_buddies_generated
),
3605 atomic64_read(&sbi
->s_mb_generation_time
));
3606 ext4_msg(sb
, KERN_INFO
,
3607 "mballoc: %u preallocated, %u discarded",
3608 atomic_read(&sbi
->s_mb_preallocated
),
3609 atomic_read(&sbi
->s_mb_discarded
));
3612 free_percpu(sbi
->s_locality_groups
);
3617 static inline int ext4_issue_discard(struct super_block
*sb
,
3618 ext4_group_t block_group
, ext4_grpblk_t cluster
, int count
,
3621 ext4_fsblk_t discard_block
;
3623 discard_block
= (EXT4_C2B(EXT4_SB(sb
), cluster
) +
3624 ext4_group_first_block_no(sb
, block_group
));
3625 count
= EXT4_C2B(EXT4_SB(sb
), count
);
3626 trace_ext4_discard_blocks(sb
,
3627 (unsigned long long) discard_block
, count
);
3629 return __blkdev_issue_discard(sb
->s_bdev
,
3630 (sector_t
)discard_block
<< (sb
->s_blocksize_bits
- 9),
3631 (sector_t
)count
<< (sb
->s_blocksize_bits
- 9),
3634 return sb_issue_discard(sb
, discard_block
, count
, GFP_NOFS
, 0);
3637 static void ext4_free_data_in_buddy(struct super_block
*sb
,
3638 struct ext4_free_data
*entry
)
3640 struct ext4_buddy e4b
;
3641 struct ext4_group_info
*db
;
3642 int err
, count
= 0, count2
= 0;
3644 mb_debug(sb
, "gonna free %u blocks in group %u (0x%p):",
3645 entry
->efd_count
, entry
->efd_group
, entry
);
3647 err
= ext4_mb_load_buddy(sb
, entry
->efd_group
, &e4b
);
3648 /* we expect to find existing buddy because it's pinned */
3651 spin_lock(&EXT4_SB(sb
)->s_md_lock
);
3652 EXT4_SB(sb
)->s_mb_free_pending
-= entry
->efd_count
;
3653 spin_unlock(&EXT4_SB(sb
)->s_md_lock
);
3656 /* there are blocks to put in buddy to make them really free */
3657 count
+= entry
->efd_count
;
3659 ext4_lock_group(sb
, entry
->efd_group
);
3660 /* Take it out of per group rb tree */
3661 rb_erase(&entry
->efd_node
, &(db
->bb_free_root
));
3662 mb_free_blocks(NULL
, &e4b
, entry
->efd_start_cluster
, entry
->efd_count
);
3665 * Clear the trimmed flag for the group so that the next
3666 * ext4_trim_fs can trim it.
3667 * If the volume is mounted with -o discard, online discard
3668 * is supported and the free blocks will be trimmed online.
3670 if (!test_opt(sb
, DISCARD
))
3671 EXT4_MB_GRP_CLEAR_TRIMMED(db
);
3673 if (!db
->bb_free_root
.rb_node
) {
3674 /* No more items in the per group rb tree
3675 * balance refcounts from ext4_mb_free_metadata()
3677 put_page(e4b
.bd_buddy_page
);
3678 put_page(e4b
.bd_bitmap_page
);
3680 ext4_unlock_group(sb
, entry
->efd_group
);
3681 ext4_mb_unload_buddy(&e4b
);
3683 mb_debug(sb
, "freed %d blocks in %d structures\n", count
,
3688 * This function is called by the jbd2 layer once the commit has finished,
3689 * so we know we can free the blocks that were released with that commit.
3691 void ext4_process_freed_data(struct super_block
*sb
, tid_t commit_tid
)
3693 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3694 struct ext4_free_data
*entry
, *tmp
;
3695 struct list_head freed_data_list
;
3696 struct list_head
*cut_pos
= NULL
;
3699 INIT_LIST_HEAD(&freed_data_list
);
3701 spin_lock(&sbi
->s_md_lock
);
3702 list_for_each_entry(entry
, &sbi
->s_freed_data_list
, efd_list
) {
3703 if (entry
->efd_tid
!= commit_tid
)
3705 cut_pos
= &entry
->efd_list
;
3708 list_cut_position(&freed_data_list
, &sbi
->s_freed_data_list
,
3710 spin_unlock(&sbi
->s_md_lock
);
3712 list_for_each_entry(entry
, &freed_data_list
, efd_list
)
3713 ext4_free_data_in_buddy(sb
, entry
);
3715 if (test_opt(sb
, DISCARD
)) {
3716 spin_lock(&sbi
->s_md_lock
);
3717 wake
= list_empty(&sbi
->s_discard_list
);
3718 list_splice_tail(&freed_data_list
, &sbi
->s_discard_list
);
3719 spin_unlock(&sbi
->s_md_lock
);
3721 queue_work(system_unbound_wq
, &sbi
->s_discard_work
);
3723 list_for_each_entry_safe(entry
, tmp
, &freed_data_list
, efd_list
)
3724 kmem_cache_free(ext4_free_data_cachep
, entry
);
3728 int __init
ext4_init_mballoc(void)
3730 ext4_pspace_cachep
= KMEM_CACHE(ext4_prealloc_space
,
3731 SLAB_RECLAIM_ACCOUNT
);
3732 if (ext4_pspace_cachep
== NULL
)
3735 ext4_ac_cachep
= KMEM_CACHE(ext4_allocation_context
,
3736 SLAB_RECLAIM_ACCOUNT
);
3737 if (ext4_ac_cachep
== NULL
)
3740 ext4_free_data_cachep
= KMEM_CACHE(ext4_free_data
,
3741 SLAB_RECLAIM_ACCOUNT
);
3742 if (ext4_free_data_cachep
== NULL
)
3748 kmem_cache_destroy(ext4_ac_cachep
);
3750 kmem_cache_destroy(ext4_pspace_cachep
);
3755 void ext4_exit_mballoc(void)
3758 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3759 * before destroying the slab cache.
3762 kmem_cache_destroy(ext4_pspace_cachep
);
3763 kmem_cache_destroy(ext4_ac_cachep
);
3764 kmem_cache_destroy(ext4_free_data_cachep
);
3765 ext4_groupinfo_destroy_slabs();
3770 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3771 * Returns 0 if success or error code
3773 static noinline_for_stack
int
3774 ext4_mb_mark_diskspace_used(struct ext4_allocation_context
*ac
,
3775 handle_t
*handle
, unsigned int reserv_clstrs
)
3777 struct buffer_head
*bitmap_bh
= NULL
;
3778 struct ext4_group_desc
*gdp
;
3779 struct buffer_head
*gdp_bh
;
3780 struct ext4_sb_info
*sbi
;
3781 struct super_block
*sb
;
3785 BUG_ON(ac
->ac_status
!= AC_STATUS_FOUND
);
3786 BUG_ON(ac
->ac_b_ex
.fe_len
<= 0);
3791 bitmap_bh
= ext4_read_block_bitmap(sb
, ac
->ac_b_ex
.fe_group
);
3792 if (IS_ERR(bitmap_bh
)) {
3793 err
= PTR_ERR(bitmap_bh
);
3798 BUFFER_TRACE(bitmap_bh
, "getting write access");
3799 err
= ext4_journal_get_write_access(handle
, sb
, bitmap_bh
,
3805 gdp
= ext4_get_group_desc(sb
, ac
->ac_b_ex
.fe_group
, &gdp_bh
);
3809 ext4_debug("using block group %u(%d)\n", ac
->ac_b_ex
.fe_group
,
3810 ext4_free_group_clusters(sb
, gdp
));
3812 BUFFER_TRACE(gdp_bh
, "get_write_access");
3813 err
= ext4_journal_get_write_access(handle
, sb
, gdp_bh
, EXT4_JTR_NONE
);
3817 block
= ext4_grp_offs_to_block(sb
, &ac
->ac_b_ex
);
3819 len
= EXT4_C2B(sbi
, ac
->ac_b_ex
.fe_len
);
3820 if (!ext4_inode_block_valid(ac
->ac_inode
, block
, len
)) {
3821 ext4_error(sb
, "Allocating blocks %llu-%llu which overlap "
3822 "fs metadata", block
, block
+len
);
3823 /* File system mounted not to panic on error
3824 * Fix the bitmap and return EFSCORRUPTED
3825 * We leak some of the blocks here.
3827 ext4_lock_group(sb
, ac
->ac_b_ex
.fe_group
);
3828 ext4_set_bits(bitmap_bh
->b_data
, ac
->ac_b_ex
.fe_start
,
3829 ac
->ac_b_ex
.fe_len
);
3830 ext4_unlock_group(sb
, ac
->ac_b_ex
.fe_group
);
3831 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
3833 err
= -EFSCORRUPTED
;
3837 ext4_lock_group(sb
, ac
->ac_b_ex
.fe_group
);
3838 #ifdef AGGRESSIVE_CHECK
3841 for (i
= 0; i
< ac
->ac_b_ex
.fe_len
; i
++) {
3842 BUG_ON(mb_test_bit(ac
->ac_b_ex
.fe_start
+ i
,
3843 bitmap_bh
->b_data
));
3847 ext4_set_bits(bitmap_bh
->b_data
, ac
->ac_b_ex
.fe_start
,
3848 ac
->ac_b_ex
.fe_len
);
3849 if (ext4_has_group_desc_csum(sb
) &&
3850 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))) {
3851 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
3852 ext4_free_group_clusters_set(sb
, gdp
,
3853 ext4_free_clusters_after_init(sb
,
3854 ac
->ac_b_ex
.fe_group
, gdp
));
3856 len
= ext4_free_group_clusters(sb
, gdp
) - ac
->ac_b_ex
.fe_len
;
3857 ext4_free_group_clusters_set(sb
, gdp
, len
);
3858 ext4_block_bitmap_csum_set(sb
, ac
->ac_b_ex
.fe_group
, gdp
, bitmap_bh
);
3859 ext4_group_desc_csum_set(sb
, ac
->ac_b_ex
.fe_group
, gdp
);
3861 ext4_unlock_group(sb
, ac
->ac_b_ex
.fe_group
);
3862 percpu_counter_sub(&sbi
->s_freeclusters_counter
, ac
->ac_b_ex
.fe_len
);
3864 * Now reduce the dirty block count also. Should not go negative
3866 if (!(ac
->ac_flags
& EXT4_MB_DELALLOC_RESERVED
))
3867 /* release all the reserved blocks if non delalloc */
3868 percpu_counter_sub(&sbi
->s_dirtyclusters_counter
,
3871 if (sbi
->s_log_groups_per_flex
) {
3872 ext4_group_t flex_group
= ext4_flex_group(sbi
,
3873 ac
->ac_b_ex
.fe_group
);
3874 atomic64_sub(ac
->ac_b_ex
.fe_len
,
3875 &sbi_array_rcu_deref(sbi
, s_flex_groups
,
3876 flex_group
)->free_clusters
);
3879 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
3882 err
= ext4_handle_dirty_metadata(handle
, NULL
, gdp_bh
);
3890 * Idempotent helper for Ext4 fast commit replay path to set the state of
3891 * blocks in bitmaps and update counters.
3893 void ext4_mb_mark_bb(struct super_block
*sb
, ext4_fsblk_t block
,
3896 struct buffer_head
*bitmap_bh
= NULL
;
3897 struct ext4_group_desc
*gdp
;
3898 struct buffer_head
*gdp_bh
;
3899 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3901 ext4_grpblk_t blkoff
;
3904 unsigned int clen
, clen_changed
, thisgrp_len
;
3907 ext4_get_group_no_and_offset(sb
, block
, &group
, &blkoff
);
3910 * Check to see if we are freeing blocks across a group
3912 * In case of flex_bg, this can happen that (block, len) may
3913 * span across more than one group. In that case we need to
3914 * get the corresponding group metadata to work with.
3915 * For this we have goto again loop.
3917 thisgrp_len
= min_t(unsigned int, (unsigned int)len
,
3918 EXT4_BLOCKS_PER_GROUP(sb
) - EXT4_C2B(sbi
, blkoff
));
3919 clen
= EXT4_NUM_B2C(sbi
, thisgrp_len
);
3921 bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
3922 if (IS_ERR(bitmap_bh
)) {
3923 err
= PTR_ERR(bitmap_bh
);
3929 gdp
= ext4_get_group_desc(sb
, group
, &gdp_bh
);
3933 ext4_lock_group(sb
, group
);
3935 for (i
= 0; i
< clen
; i
++)
3936 if (!mb_test_bit(blkoff
+ i
, bitmap_bh
->b_data
) ==
3940 clen_changed
= clen
- already
;
3942 ext4_set_bits(bitmap_bh
->b_data
, blkoff
, clen
);
3944 mb_test_and_clear_bits(bitmap_bh
->b_data
, blkoff
, clen
);
3945 if (ext4_has_group_desc_csum(sb
) &&
3946 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))) {
3947 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
3948 ext4_free_group_clusters_set(sb
, gdp
,
3949 ext4_free_clusters_after_init(sb
, group
, gdp
));
3952 clen
= ext4_free_group_clusters(sb
, gdp
) - clen_changed
;
3954 clen
= ext4_free_group_clusters(sb
, gdp
) + clen_changed
;
3956 ext4_free_group_clusters_set(sb
, gdp
, clen
);
3957 ext4_block_bitmap_csum_set(sb
, group
, gdp
, bitmap_bh
);
3958 ext4_group_desc_csum_set(sb
, group
, gdp
);
3960 ext4_unlock_group(sb
, group
);
3962 if (sbi
->s_log_groups_per_flex
) {
3963 ext4_group_t flex_group
= ext4_flex_group(sbi
, group
);
3964 struct flex_groups
*fg
= sbi_array_rcu_deref(sbi
,
3965 s_flex_groups
, flex_group
);
3968 atomic64_sub(clen_changed
, &fg
->free_clusters
);
3970 atomic64_add(clen_changed
, &fg
->free_clusters
);
3974 err
= ext4_handle_dirty_metadata(NULL
, NULL
, bitmap_bh
);
3977 sync_dirty_buffer(bitmap_bh
);
3978 err
= ext4_handle_dirty_metadata(NULL
, NULL
, gdp_bh
);
3979 sync_dirty_buffer(gdp_bh
);
3983 block
+= thisgrp_len
;
3994 * here we normalize request for locality group
3995 * Group request are normalized to s_mb_group_prealloc, which goes to
3996 * s_strip if we set the same via mount option.
3997 * s_mb_group_prealloc can be configured via
3998 * /sys/fs/ext4/<partition>/mb_group_prealloc
4000 * XXX: should we try to preallocate more than the group has now?
4002 static void ext4_mb_normalize_group_request(struct ext4_allocation_context
*ac
)
4004 struct super_block
*sb
= ac
->ac_sb
;
4005 struct ext4_locality_group
*lg
= ac
->ac_lg
;
4008 ac
->ac_g_ex
.fe_len
= EXT4_SB(sb
)->s_mb_group_prealloc
;
4009 mb_debug(sb
, "goal %u blocks for locality group\n", ac
->ac_g_ex
.fe_len
);
4013 * Normalization means making request better in terms of
4014 * size and alignment
4016 static noinline_for_stack
void
4017 ext4_mb_normalize_request(struct ext4_allocation_context
*ac
,
4018 struct ext4_allocation_request
*ar
)
4020 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
4023 loff_t size
, start_off
;
4024 loff_t orig_size __maybe_unused
;
4026 struct ext4_inode_info
*ei
= EXT4_I(ac
->ac_inode
);
4027 struct ext4_prealloc_space
*pa
;
4029 /* do normalize only data requests, metadata requests
4030 do not need preallocation */
4031 if (!(ac
->ac_flags
& EXT4_MB_HINT_DATA
))
4034 /* sometime caller may want exact blocks */
4035 if (unlikely(ac
->ac_flags
& EXT4_MB_HINT_GOAL_ONLY
))
4038 /* caller may indicate that preallocation isn't
4039 * required (it's a tail, for example) */
4040 if (ac
->ac_flags
& EXT4_MB_HINT_NOPREALLOC
)
4043 if (ac
->ac_flags
& EXT4_MB_HINT_GROUP_ALLOC
) {
4044 ext4_mb_normalize_group_request(ac
);
4048 bsbits
= ac
->ac_sb
->s_blocksize_bits
;
4050 /* first, let's learn actual file size
4051 * given current request is allocated */
4052 size
= ac
->ac_o_ex
.fe_logical
+ EXT4_C2B(sbi
, ac
->ac_o_ex
.fe_len
);
4053 size
= size
<< bsbits
;
4054 if (size
< i_size_read(ac
->ac_inode
))
4055 size
= i_size_read(ac
->ac_inode
);
4058 /* max size of free chunks */
4061 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4062 (req <= (size) || max <= (chunk_size))
4064 /* first, try to predict filesize */
4065 /* XXX: should this table be tunable? */
4067 if (size
<= 16 * 1024) {
4069 } else if (size
<= 32 * 1024) {
4071 } else if (size
<= 64 * 1024) {
4073 } else if (size
<= 128 * 1024) {
4075 } else if (size
<= 256 * 1024) {
4077 } else if (size
<= 512 * 1024) {
4079 } else if (size
<= 1024 * 1024) {
4081 } else if (NRL_CHECK_SIZE(size
, 4 * 1024 * 1024, max
, 2 * 1024)) {
4082 start_off
= ((loff_t
)ac
->ac_o_ex
.fe_logical
>>
4083 (21 - bsbits
)) << 21;
4084 size
= 2 * 1024 * 1024;
4085 } else if (NRL_CHECK_SIZE(size
, 8 * 1024 * 1024, max
, 4 * 1024)) {
4086 start_off
= ((loff_t
)ac
->ac_o_ex
.fe_logical
>>
4087 (22 - bsbits
)) << 22;
4088 size
= 4 * 1024 * 1024;
4089 } else if (NRL_CHECK_SIZE(ac
->ac_o_ex
.fe_len
,
4090 (8<<20)>>bsbits
, max
, 8 * 1024)) {
4091 start_off
= ((loff_t
)ac
->ac_o_ex
.fe_logical
>>
4092 (23 - bsbits
)) << 23;
4093 size
= 8 * 1024 * 1024;
4095 start_off
= (loff_t
) ac
->ac_o_ex
.fe_logical
<< bsbits
;
4096 size
= (loff_t
) EXT4_C2B(EXT4_SB(ac
->ac_sb
),
4097 ac
->ac_o_ex
.fe_len
) << bsbits
;
4099 size
= size
>> bsbits
;
4100 start
= start_off
>> bsbits
;
4102 /* don't cover already allocated blocks in selected range */
4103 if (ar
->pleft
&& start
<= ar
->lleft
) {
4104 size
-= ar
->lleft
+ 1 - start
;
4105 start
= ar
->lleft
+ 1;
4107 if (ar
->pright
&& start
+ size
- 1 >= ar
->lright
)
4108 size
-= start
+ size
- ar
->lright
;
4111 * Trim allocation request for filesystems with artificially small
4114 if (size
> EXT4_BLOCKS_PER_GROUP(ac
->ac_sb
))
4115 size
= EXT4_BLOCKS_PER_GROUP(ac
->ac_sb
);
4119 /* check we don't cross already preallocated blocks */
4121 list_for_each_entry_rcu(pa
, &ei
->i_prealloc_list
, pa_inode_list
) {
4126 spin_lock(&pa
->pa_lock
);
4127 if (pa
->pa_deleted
) {
4128 spin_unlock(&pa
->pa_lock
);
4132 pa_end
= pa
->pa_lstart
+ EXT4_C2B(EXT4_SB(ac
->ac_sb
),
4135 /* PA must not overlap original request */
4136 BUG_ON(!(ac
->ac_o_ex
.fe_logical
>= pa_end
||
4137 ac
->ac_o_ex
.fe_logical
< pa
->pa_lstart
));
4139 /* skip PAs this normalized request doesn't overlap with */
4140 if (pa
->pa_lstart
>= end
|| pa_end
<= start
) {
4141 spin_unlock(&pa
->pa_lock
);
4144 BUG_ON(pa
->pa_lstart
<= start
&& pa_end
>= end
);
4146 /* adjust start or end to be adjacent to this pa */
4147 if (pa_end
<= ac
->ac_o_ex
.fe_logical
) {
4148 BUG_ON(pa_end
< start
);
4150 } else if (pa
->pa_lstart
> ac
->ac_o_ex
.fe_logical
) {
4151 BUG_ON(pa
->pa_lstart
> end
);
4152 end
= pa
->pa_lstart
;
4154 spin_unlock(&pa
->pa_lock
);
4159 /* XXX: extra loop to check we really don't overlap preallocations */
4161 list_for_each_entry_rcu(pa
, &ei
->i_prealloc_list
, pa_inode_list
) {
4164 spin_lock(&pa
->pa_lock
);
4165 if (pa
->pa_deleted
== 0) {
4166 pa_end
= pa
->pa_lstart
+ EXT4_C2B(EXT4_SB(ac
->ac_sb
),
4168 BUG_ON(!(start
>= pa_end
|| end
<= pa
->pa_lstart
));
4170 spin_unlock(&pa
->pa_lock
);
4174 if (start
+ size
<= ac
->ac_o_ex
.fe_logical
&&
4175 start
> ac
->ac_o_ex
.fe_logical
) {
4176 ext4_msg(ac
->ac_sb
, KERN_ERR
,
4177 "start %lu, size %lu, fe_logical %lu",
4178 (unsigned long) start
, (unsigned long) size
,
4179 (unsigned long) ac
->ac_o_ex
.fe_logical
);
4182 BUG_ON(size
<= 0 || size
> EXT4_BLOCKS_PER_GROUP(ac
->ac_sb
));
4184 /* now prepare goal request */
4186 /* XXX: is it better to align blocks WRT to logical
4187 * placement or satisfy big request as is */
4188 ac
->ac_g_ex
.fe_logical
= start
;
4189 ac
->ac_g_ex
.fe_len
= EXT4_NUM_B2C(sbi
, size
);
4191 /* define goal start in order to merge */
4192 if (ar
->pright
&& (ar
->lright
== (start
+ size
))) {
4193 /* merge to the right */
4194 ext4_get_group_no_and_offset(ac
->ac_sb
, ar
->pright
- size
,
4195 &ac
->ac_f_ex
.fe_group
,
4196 &ac
->ac_f_ex
.fe_start
);
4197 ac
->ac_flags
|= EXT4_MB_HINT_TRY_GOAL
;
4199 if (ar
->pleft
&& (ar
->lleft
+ 1 == start
)) {
4200 /* merge to the left */
4201 ext4_get_group_no_and_offset(ac
->ac_sb
, ar
->pleft
+ 1,
4202 &ac
->ac_f_ex
.fe_group
,
4203 &ac
->ac_f_ex
.fe_start
);
4204 ac
->ac_flags
|= EXT4_MB_HINT_TRY_GOAL
;
4207 mb_debug(ac
->ac_sb
, "goal: %lld(was %lld) blocks at %u\n", size
,
4211 static void ext4_mb_collect_stats(struct ext4_allocation_context
*ac
)
4213 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
4215 if (sbi
->s_mb_stats
&& ac
->ac_g_ex
.fe_len
>= 1) {
4216 atomic_inc(&sbi
->s_bal_reqs
);
4217 atomic_add(ac
->ac_b_ex
.fe_len
, &sbi
->s_bal_allocated
);
4218 if (ac
->ac_b_ex
.fe_len
>= ac
->ac_o_ex
.fe_len
)
4219 atomic_inc(&sbi
->s_bal_success
);
4220 atomic_add(ac
->ac_found
, &sbi
->s_bal_ex_scanned
);
4221 atomic_add(ac
->ac_groups_scanned
, &sbi
->s_bal_groups_scanned
);
4222 if (ac
->ac_g_ex
.fe_start
== ac
->ac_b_ex
.fe_start
&&
4223 ac
->ac_g_ex
.fe_group
== ac
->ac_b_ex
.fe_group
)
4224 atomic_inc(&sbi
->s_bal_goals
);
4225 if (ac
->ac_found
> sbi
->s_mb_max_to_scan
)
4226 atomic_inc(&sbi
->s_bal_breaks
);
4229 if (ac
->ac_op
== EXT4_MB_HISTORY_ALLOC
)
4230 trace_ext4_mballoc_alloc(ac
);
4232 trace_ext4_mballoc_prealloc(ac
);
4236 * Called on failure; free up any blocks from the inode PA for this
4237 * context. We don't need this for MB_GROUP_PA because we only change
4238 * pa_free in ext4_mb_release_context(), but on failure, we've already
4239 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4241 static void ext4_discard_allocated_blocks(struct ext4_allocation_context
*ac
)
4243 struct ext4_prealloc_space
*pa
= ac
->ac_pa
;
4244 struct ext4_buddy e4b
;
4248 if (ac
->ac_f_ex
.fe_len
== 0)
4250 err
= ext4_mb_load_buddy(ac
->ac_sb
, ac
->ac_f_ex
.fe_group
, &e4b
);
4253 * This should never happen since we pin the
4254 * pages in the ext4_allocation_context so
4255 * ext4_mb_load_buddy() should never fail.
4257 WARN(1, "mb_load_buddy failed (%d)", err
);
4260 ext4_lock_group(ac
->ac_sb
, ac
->ac_f_ex
.fe_group
);
4261 mb_free_blocks(ac
->ac_inode
, &e4b
, ac
->ac_f_ex
.fe_start
,
4262 ac
->ac_f_ex
.fe_len
);
4263 ext4_unlock_group(ac
->ac_sb
, ac
->ac_f_ex
.fe_group
);
4264 ext4_mb_unload_buddy(&e4b
);
4267 if (pa
->pa_type
== MB_INODE_PA
)
4268 pa
->pa_free
+= ac
->ac_b_ex
.fe_len
;
4272 * use blocks preallocated to inode
4274 static void ext4_mb_use_inode_pa(struct ext4_allocation_context
*ac
,
4275 struct ext4_prealloc_space
*pa
)
4277 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
4282 /* found preallocated blocks, use them */
4283 start
= pa
->pa_pstart
+ (ac
->ac_o_ex
.fe_logical
- pa
->pa_lstart
);
4284 end
= min(pa
->pa_pstart
+ EXT4_C2B(sbi
, pa
->pa_len
),
4285 start
+ EXT4_C2B(sbi
, ac
->ac_o_ex
.fe_len
));
4286 len
= EXT4_NUM_B2C(sbi
, end
- start
);
4287 ext4_get_group_no_and_offset(ac
->ac_sb
, start
, &ac
->ac_b_ex
.fe_group
,
4288 &ac
->ac_b_ex
.fe_start
);
4289 ac
->ac_b_ex
.fe_len
= len
;
4290 ac
->ac_status
= AC_STATUS_FOUND
;
4293 BUG_ON(start
< pa
->pa_pstart
);
4294 BUG_ON(end
> pa
->pa_pstart
+ EXT4_C2B(sbi
, pa
->pa_len
));
4295 BUG_ON(pa
->pa_free
< len
);
4298 mb_debug(ac
->ac_sb
, "use %llu/%d from inode pa %p\n", start
, len
, pa
);
4302 * use blocks preallocated to locality group
4304 static void ext4_mb_use_group_pa(struct ext4_allocation_context
*ac
,
4305 struct ext4_prealloc_space
*pa
)
4307 unsigned int len
= ac
->ac_o_ex
.fe_len
;
4309 ext4_get_group_no_and_offset(ac
->ac_sb
, pa
->pa_pstart
,
4310 &ac
->ac_b_ex
.fe_group
,
4311 &ac
->ac_b_ex
.fe_start
);
4312 ac
->ac_b_ex
.fe_len
= len
;
4313 ac
->ac_status
= AC_STATUS_FOUND
;
4316 /* we don't correct pa_pstart or pa_plen here to avoid
4317 * possible race when the group is being loaded concurrently
4318 * instead we correct pa later, after blocks are marked
4319 * in on-disk bitmap -- see ext4_mb_release_context()
4320 * Other CPUs are prevented from allocating from this pa by lg_mutex
4322 mb_debug(ac
->ac_sb
, "use %u/%u from group pa %p\n",
4323 pa
->pa_lstart
-len
, len
, pa
);
4327 * Return the prealloc space that have minimal distance
4328 * from the goal block. @cpa is the prealloc
4329 * space that is having currently known minimal distance
4330 * from the goal block.
4332 static struct ext4_prealloc_space
*
4333 ext4_mb_check_group_pa(ext4_fsblk_t goal_block
,
4334 struct ext4_prealloc_space
*pa
,
4335 struct ext4_prealloc_space
*cpa
)
4337 ext4_fsblk_t cur_distance
, new_distance
;
4340 atomic_inc(&pa
->pa_count
);
4343 cur_distance
= abs(goal_block
- cpa
->pa_pstart
);
4344 new_distance
= abs(goal_block
- pa
->pa_pstart
);
4346 if (cur_distance
<= new_distance
)
4349 /* drop the previous reference */
4350 atomic_dec(&cpa
->pa_count
);
4351 atomic_inc(&pa
->pa_count
);
4356 * search goal blocks in preallocated space
4358 static noinline_for_stack
bool
4359 ext4_mb_use_preallocated(struct ext4_allocation_context
*ac
)
4361 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
4363 struct ext4_inode_info
*ei
= EXT4_I(ac
->ac_inode
);
4364 struct ext4_locality_group
*lg
;
4365 struct ext4_prealloc_space
*pa
, *cpa
= NULL
;
4366 ext4_fsblk_t goal_block
;
4368 /* only data can be preallocated */
4369 if (!(ac
->ac_flags
& EXT4_MB_HINT_DATA
))
4372 /* first, try per-file preallocation */
4374 list_for_each_entry_rcu(pa
, &ei
->i_prealloc_list
, pa_inode_list
) {
4376 /* all fields in this condition don't change,
4377 * so we can skip locking for them */
4378 if (ac
->ac_o_ex
.fe_logical
< pa
->pa_lstart
||
4379 ac
->ac_o_ex
.fe_logical
>= (pa
->pa_lstart
+
4380 EXT4_C2B(sbi
, pa
->pa_len
)))
4383 /* non-extent files can't have physical blocks past 2^32 */
4384 if (!(ext4_test_inode_flag(ac
->ac_inode
, EXT4_INODE_EXTENTS
)) &&
4385 (pa
->pa_pstart
+ EXT4_C2B(sbi
, pa
->pa_len
) >
4386 EXT4_MAX_BLOCK_FILE_PHYS
))
4389 /* found preallocated blocks, use them */
4390 spin_lock(&pa
->pa_lock
);
4391 if (pa
->pa_deleted
== 0 && pa
->pa_free
) {
4392 atomic_inc(&pa
->pa_count
);
4393 ext4_mb_use_inode_pa(ac
, pa
);
4394 spin_unlock(&pa
->pa_lock
);
4395 ac
->ac_criteria
= 10;
4399 spin_unlock(&pa
->pa_lock
);
4403 /* can we use group allocation? */
4404 if (!(ac
->ac_flags
& EXT4_MB_HINT_GROUP_ALLOC
))
4407 /* inode may have no locality group for some reason */
4411 order
= fls(ac
->ac_o_ex
.fe_len
) - 1;
4412 if (order
> PREALLOC_TB_SIZE
- 1)
4413 /* The max size of hash table is PREALLOC_TB_SIZE */
4414 order
= PREALLOC_TB_SIZE
- 1;
4416 goal_block
= ext4_grp_offs_to_block(ac
->ac_sb
, &ac
->ac_g_ex
);
4418 * search for the prealloc space that is having
4419 * minimal distance from the goal block.
4421 for (i
= order
; i
< PREALLOC_TB_SIZE
; i
++) {
4423 list_for_each_entry_rcu(pa
, &lg
->lg_prealloc_list
[i
],
4425 spin_lock(&pa
->pa_lock
);
4426 if (pa
->pa_deleted
== 0 &&
4427 pa
->pa_free
>= ac
->ac_o_ex
.fe_len
) {
4429 cpa
= ext4_mb_check_group_pa(goal_block
,
4432 spin_unlock(&pa
->pa_lock
);
4437 ext4_mb_use_group_pa(ac
, cpa
);
4438 ac
->ac_criteria
= 20;
4445 * the function goes through all block freed in the group
4446 * but not yet committed and marks them used in in-core bitmap.
4447 * buddy must be generated from this bitmap
4448 * Need to be called with the ext4 group lock held
4450 static void ext4_mb_generate_from_freelist(struct super_block
*sb
, void *bitmap
,
4454 struct ext4_group_info
*grp
;
4455 struct ext4_free_data
*entry
;
4457 grp
= ext4_get_group_info(sb
, group
);
4458 n
= rb_first(&(grp
->bb_free_root
));
4461 entry
= rb_entry(n
, struct ext4_free_data
, efd_node
);
4462 ext4_set_bits(bitmap
, entry
->efd_start_cluster
, entry
->efd_count
);
4469 * the function goes through all preallocation in this group and marks them
4470 * used in in-core bitmap. buddy must be generated from this bitmap
4471 * Need to be called with ext4 group lock held
4473 static noinline_for_stack
4474 void ext4_mb_generate_from_pa(struct super_block
*sb
, void *bitmap
,
4477 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
4478 struct ext4_prealloc_space
*pa
;
4479 struct list_head
*cur
;
4480 ext4_group_t groupnr
;
4481 ext4_grpblk_t start
;
4482 int preallocated
= 0;
4485 /* all form of preallocation discards first load group,
4486 * so the only competing code is preallocation use.
4487 * we don't need any locking here
4488 * notice we do NOT ignore preallocations with pa_deleted
4489 * otherwise we could leave used blocks available for
4490 * allocation in buddy when concurrent ext4_mb_put_pa()
4491 * is dropping preallocation
4493 list_for_each(cur
, &grp
->bb_prealloc_list
) {
4494 pa
= list_entry(cur
, struct ext4_prealloc_space
, pa_group_list
);
4495 spin_lock(&pa
->pa_lock
);
4496 ext4_get_group_no_and_offset(sb
, pa
->pa_pstart
,
4499 spin_unlock(&pa
->pa_lock
);
4500 if (unlikely(len
== 0))
4502 BUG_ON(groupnr
!= group
);
4503 ext4_set_bits(bitmap
, start
, len
);
4504 preallocated
+= len
;
4506 mb_debug(sb
, "preallocated %d for group %u\n", preallocated
, group
);
4509 static void ext4_mb_mark_pa_deleted(struct super_block
*sb
,
4510 struct ext4_prealloc_space
*pa
)
4512 struct ext4_inode_info
*ei
;
4514 if (pa
->pa_deleted
) {
4515 ext4_warning(sb
, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4516 pa
->pa_type
, pa
->pa_pstart
, pa
->pa_lstart
,
4523 if (pa
->pa_type
== MB_INODE_PA
) {
4524 ei
= EXT4_I(pa
->pa_inode
);
4525 atomic_dec(&ei
->i_prealloc_active
);
4529 static void ext4_mb_pa_callback(struct rcu_head
*head
)
4531 struct ext4_prealloc_space
*pa
;
4532 pa
= container_of(head
, struct ext4_prealloc_space
, u
.pa_rcu
);
4534 BUG_ON(atomic_read(&pa
->pa_count
));
4535 BUG_ON(pa
->pa_deleted
== 0);
4536 kmem_cache_free(ext4_pspace_cachep
, pa
);
4540 * drops a reference to preallocated space descriptor
4541 * if this was the last reference and the space is consumed
4543 static void ext4_mb_put_pa(struct ext4_allocation_context
*ac
,
4544 struct super_block
*sb
, struct ext4_prealloc_space
*pa
)
4547 ext4_fsblk_t grp_blk
;
4549 /* in this short window concurrent discard can set pa_deleted */
4550 spin_lock(&pa
->pa_lock
);
4551 if (!atomic_dec_and_test(&pa
->pa_count
) || pa
->pa_free
!= 0) {
4552 spin_unlock(&pa
->pa_lock
);
4556 if (pa
->pa_deleted
== 1) {
4557 spin_unlock(&pa
->pa_lock
);
4561 ext4_mb_mark_pa_deleted(sb
, pa
);
4562 spin_unlock(&pa
->pa_lock
);
4564 grp_blk
= pa
->pa_pstart
;
4566 * If doing group-based preallocation, pa_pstart may be in the
4567 * next group when pa is used up
4569 if (pa
->pa_type
== MB_GROUP_PA
)
4572 grp
= ext4_get_group_number(sb
, grp_blk
);
4577 * P1 (buddy init) P2 (regular allocation)
4578 * find block B in PA
4579 * copy on-disk bitmap to buddy
4580 * mark B in on-disk bitmap
4581 * drop PA from group
4582 * mark all PAs in buddy
4584 * thus, P1 initializes buddy with B available. to prevent this
4585 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4588 ext4_lock_group(sb
, grp
);
4589 list_del(&pa
->pa_group_list
);
4590 ext4_unlock_group(sb
, grp
);
4592 spin_lock(pa
->pa_obj_lock
);
4593 list_del_rcu(&pa
->pa_inode_list
);
4594 spin_unlock(pa
->pa_obj_lock
);
4596 call_rcu(&(pa
)->u
.pa_rcu
, ext4_mb_pa_callback
);
4600 * creates new preallocated space for given inode
4602 static noinline_for_stack
void
4603 ext4_mb_new_inode_pa(struct ext4_allocation_context
*ac
)
4605 struct super_block
*sb
= ac
->ac_sb
;
4606 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4607 struct ext4_prealloc_space
*pa
;
4608 struct ext4_group_info
*grp
;
4609 struct ext4_inode_info
*ei
;
4611 /* preallocate only when found space is larger then requested */
4612 BUG_ON(ac
->ac_o_ex
.fe_len
>= ac
->ac_b_ex
.fe_len
);
4613 BUG_ON(ac
->ac_status
!= AC_STATUS_FOUND
);
4614 BUG_ON(!S_ISREG(ac
->ac_inode
->i_mode
));
4615 BUG_ON(ac
->ac_pa
== NULL
);
4619 if (ac
->ac_b_ex
.fe_len
< ac
->ac_g_ex
.fe_len
) {
4625 /* we can't allocate as much as normalizer wants.
4626 * so, found space must get proper lstart
4627 * to cover original request */
4628 BUG_ON(ac
->ac_g_ex
.fe_logical
> ac
->ac_o_ex
.fe_logical
);
4629 BUG_ON(ac
->ac_g_ex
.fe_len
< ac
->ac_o_ex
.fe_len
);
4631 /* we're limited by original request in that
4632 * logical block must be covered any way
4633 * winl is window we can move our chunk within */
4634 winl
= ac
->ac_o_ex
.fe_logical
- ac
->ac_g_ex
.fe_logical
;
4636 /* also, we should cover whole original request */
4637 wins
= EXT4_C2B(sbi
, ac
->ac_b_ex
.fe_len
- ac
->ac_o_ex
.fe_len
);
4639 /* the smallest one defines real window */
4640 win
= min(winl
, wins
);
4642 offs
= ac
->ac_o_ex
.fe_logical
%
4643 EXT4_C2B(sbi
, ac
->ac_b_ex
.fe_len
);
4644 if (offs
&& offs
< win
)
4647 ac
->ac_b_ex
.fe_logical
= ac
->ac_o_ex
.fe_logical
-
4648 EXT4_NUM_B2C(sbi
, win
);
4649 BUG_ON(ac
->ac_o_ex
.fe_logical
< ac
->ac_b_ex
.fe_logical
);
4650 BUG_ON(ac
->ac_o_ex
.fe_len
> ac
->ac_b_ex
.fe_len
);
4653 /* preallocation can change ac_b_ex, thus we store actually
4654 * allocated blocks for history */
4655 ac
->ac_f_ex
= ac
->ac_b_ex
;
4657 pa
->pa_lstart
= ac
->ac_b_ex
.fe_logical
;
4658 pa
->pa_pstart
= ext4_grp_offs_to_block(sb
, &ac
->ac_b_ex
);
4659 pa
->pa_len
= ac
->ac_b_ex
.fe_len
;
4660 pa
->pa_free
= pa
->pa_len
;
4661 spin_lock_init(&pa
->pa_lock
);
4662 INIT_LIST_HEAD(&pa
->pa_inode_list
);
4663 INIT_LIST_HEAD(&pa
->pa_group_list
);
4665 pa
->pa_type
= MB_INODE_PA
;
4667 mb_debug(sb
, "new inode pa %p: %llu/%d for %u\n", pa
, pa
->pa_pstart
,
4668 pa
->pa_len
, pa
->pa_lstart
);
4669 trace_ext4_mb_new_inode_pa(ac
, pa
);
4671 ext4_mb_use_inode_pa(ac
, pa
);
4672 atomic_add(pa
->pa_free
, &sbi
->s_mb_preallocated
);
4674 ei
= EXT4_I(ac
->ac_inode
);
4675 grp
= ext4_get_group_info(sb
, ac
->ac_b_ex
.fe_group
);
4677 pa
->pa_obj_lock
= &ei
->i_prealloc_lock
;
4678 pa
->pa_inode
= ac
->ac_inode
;
4680 list_add(&pa
->pa_group_list
, &grp
->bb_prealloc_list
);
4682 spin_lock(pa
->pa_obj_lock
);
4683 list_add_rcu(&pa
->pa_inode_list
, &ei
->i_prealloc_list
);
4684 spin_unlock(pa
->pa_obj_lock
);
4685 atomic_inc(&ei
->i_prealloc_active
);
4689 * creates new preallocated space for locality group inodes belongs to
4691 static noinline_for_stack
void
4692 ext4_mb_new_group_pa(struct ext4_allocation_context
*ac
)
4694 struct super_block
*sb
= ac
->ac_sb
;
4695 struct ext4_locality_group
*lg
;
4696 struct ext4_prealloc_space
*pa
;
4697 struct ext4_group_info
*grp
;
4699 /* preallocate only when found space is larger then requested */
4700 BUG_ON(ac
->ac_o_ex
.fe_len
>= ac
->ac_b_ex
.fe_len
);
4701 BUG_ON(ac
->ac_status
!= AC_STATUS_FOUND
);
4702 BUG_ON(!S_ISREG(ac
->ac_inode
->i_mode
));
4703 BUG_ON(ac
->ac_pa
== NULL
);
4707 /* preallocation can change ac_b_ex, thus we store actually
4708 * allocated blocks for history */
4709 ac
->ac_f_ex
= ac
->ac_b_ex
;
4711 pa
->pa_pstart
= ext4_grp_offs_to_block(sb
, &ac
->ac_b_ex
);
4712 pa
->pa_lstart
= pa
->pa_pstart
;
4713 pa
->pa_len
= ac
->ac_b_ex
.fe_len
;
4714 pa
->pa_free
= pa
->pa_len
;
4715 spin_lock_init(&pa
->pa_lock
);
4716 INIT_LIST_HEAD(&pa
->pa_inode_list
);
4717 INIT_LIST_HEAD(&pa
->pa_group_list
);
4719 pa
->pa_type
= MB_GROUP_PA
;
4721 mb_debug(sb
, "new group pa %p: %llu/%d for %u\n", pa
, pa
->pa_pstart
,
4722 pa
->pa_len
, pa
->pa_lstart
);
4723 trace_ext4_mb_new_group_pa(ac
, pa
);
4725 ext4_mb_use_group_pa(ac
, pa
);
4726 atomic_add(pa
->pa_free
, &EXT4_SB(sb
)->s_mb_preallocated
);
4728 grp
= ext4_get_group_info(sb
, ac
->ac_b_ex
.fe_group
);
4732 pa
->pa_obj_lock
= &lg
->lg_prealloc_lock
;
4733 pa
->pa_inode
= NULL
;
4735 list_add(&pa
->pa_group_list
, &grp
->bb_prealloc_list
);
4738 * We will later add the new pa to the right bucket
4739 * after updating the pa_free in ext4_mb_release_context
4743 static void ext4_mb_new_preallocation(struct ext4_allocation_context
*ac
)
4745 if (ac
->ac_flags
& EXT4_MB_HINT_GROUP_ALLOC
)
4746 ext4_mb_new_group_pa(ac
);
4748 ext4_mb_new_inode_pa(ac
);
4752 * finds all unused blocks in on-disk bitmap, frees them in
4753 * in-core bitmap and buddy.
4754 * @pa must be unlinked from inode and group lists, so that
4755 * nobody else can find/use it.
4756 * the caller MUST hold group/inode locks.
4757 * TODO: optimize the case when there are no in-core structures yet
4759 static noinline_for_stack
int
4760 ext4_mb_release_inode_pa(struct ext4_buddy
*e4b
, struct buffer_head
*bitmap_bh
,
4761 struct ext4_prealloc_space
*pa
)
4763 struct super_block
*sb
= e4b
->bd_sb
;
4764 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4769 unsigned long long grp_blk_start
;
4772 BUG_ON(pa
->pa_deleted
== 0);
4773 ext4_get_group_no_and_offset(sb
, pa
->pa_pstart
, &group
, &bit
);
4774 grp_blk_start
= pa
->pa_pstart
- EXT4_C2B(sbi
, bit
);
4775 BUG_ON(group
!= e4b
->bd_group
&& pa
->pa_len
!= 0);
4776 end
= bit
+ pa
->pa_len
;
4779 bit
= mb_find_next_zero_bit(bitmap_bh
->b_data
, end
, bit
);
4782 next
= mb_find_next_bit(bitmap_bh
->b_data
, end
, bit
);
4783 mb_debug(sb
, "free preallocated %u/%u in group %u\n",
4784 (unsigned) ext4_group_first_block_no(sb
, group
) + bit
,
4785 (unsigned) next
- bit
, (unsigned) group
);
4788 trace_ext4_mballoc_discard(sb
, NULL
, group
, bit
, next
- bit
);
4789 trace_ext4_mb_release_inode_pa(pa
, (grp_blk_start
+
4790 EXT4_C2B(sbi
, bit
)),
4792 mb_free_blocks(pa
->pa_inode
, e4b
, bit
, next
- bit
);
4795 if (free
!= pa
->pa_free
) {
4796 ext4_msg(e4b
->bd_sb
, KERN_CRIT
,
4797 "pa %p: logic %lu, phys. %lu, len %d",
4798 pa
, (unsigned long) pa
->pa_lstart
,
4799 (unsigned long) pa
->pa_pstart
,
4801 ext4_grp_locked_error(sb
, group
, 0, 0, "free %u, pa_free %u",
4804 * pa is already deleted so we use the value obtained
4805 * from the bitmap and continue.
4808 atomic_add(free
, &sbi
->s_mb_discarded
);
4813 static noinline_for_stack
int
4814 ext4_mb_release_group_pa(struct ext4_buddy
*e4b
,
4815 struct ext4_prealloc_space
*pa
)
4817 struct super_block
*sb
= e4b
->bd_sb
;
4821 trace_ext4_mb_release_group_pa(sb
, pa
);
4822 BUG_ON(pa
->pa_deleted
== 0);
4823 ext4_get_group_no_and_offset(sb
, pa
->pa_pstart
, &group
, &bit
);
4824 BUG_ON(group
!= e4b
->bd_group
&& pa
->pa_len
!= 0);
4825 mb_free_blocks(pa
->pa_inode
, e4b
, bit
, pa
->pa_len
);
4826 atomic_add(pa
->pa_len
, &EXT4_SB(sb
)->s_mb_discarded
);
4827 trace_ext4_mballoc_discard(sb
, NULL
, group
, bit
, pa
->pa_len
);
4833 * releases all preallocations in given group
4835 * first, we need to decide discard policy:
4836 * - when do we discard
4838 * - how many do we discard
4839 * 1) how many requested
4841 static noinline_for_stack
int
4842 ext4_mb_discard_group_preallocations(struct super_block
*sb
,
4843 ext4_group_t group
, int *busy
)
4845 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
4846 struct buffer_head
*bitmap_bh
= NULL
;
4847 struct ext4_prealloc_space
*pa
, *tmp
;
4848 struct list_head list
;
4849 struct ext4_buddy e4b
;
4853 mb_debug(sb
, "discard preallocation for group %u\n", group
);
4854 if (list_empty(&grp
->bb_prealloc_list
))
4857 bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
4858 if (IS_ERR(bitmap_bh
)) {
4859 err
= PTR_ERR(bitmap_bh
);
4860 ext4_error_err(sb
, -err
,
4861 "Error %d reading block bitmap for %u",
4866 err
= ext4_mb_load_buddy(sb
, group
, &e4b
);
4868 ext4_warning(sb
, "Error %d loading buddy information for %u",
4874 INIT_LIST_HEAD(&list
);
4875 ext4_lock_group(sb
, group
);
4876 list_for_each_entry_safe(pa
, tmp
,
4877 &grp
->bb_prealloc_list
, pa_group_list
) {
4878 spin_lock(&pa
->pa_lock
);
4879 if (atomic_read(&pa
->pa_count
)) {
4880 spin_unlock(&pa
->pa_lock
);
4884 if (pa
->pa_deleted
) {
4885 spin_unlock(&pa
->pa_lock
);
4889 /* seems this one can be freed ... */
4890 ext4_mb_mark_pa_deleted(sb
, pa
);
4893 this_cpu_inc(discard_pa_seq
);
4895 /* we can trust pa_free ... */
4896 free
+= pa
->pa_free
;
4898 spin_unlock(&pa
->pa_lock
);
4900 list_del(&pa
->pa_group_list
);
4901 list_add(&pa
->u
.pa_tmp_list
, &list
);
4904 /* now free all selected PAs */
4905 list_for_each_entry_safe(pa
, tmp
, &list
, u
.pa_tmp_list
) {
4907 /* remove from object (inode or locality group) */
4908 spin_lock(pa
->pa_obj_lock
);
4909 list_del_rcu(&pa
->pa_inode_list
);
4910 spin_unlock(pa
->pa_obj_lock
);
4912 if (pa
->pa_type
== MB_GROUP_PA
)
4913 ext4_mb_release_group_pa(&e4b
, pa
);
4915 ext4_mb_release_inode_pa(&e4b
, bitmap_bh
, pa
);
4917 list_del(&pa
->u
.pa_tmp_list
);
4918 call_rcu(&(pa
)->u
.pa_rcu
, ext4_mb_pa_callback
);
4921 ext4_unlock_group(sb
, group
);
4922 ext4_mb_unload_buddy(&e4b
);
4925 mb_debug(sb
, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4926 free
, group
, grp
->bb_free
);
4931 * releases all non-used preallocated blocks for given inode
4933 * It's important to discard preallocations under i_data_sem
4934 * We don't want another block to be served from the prealloc
4935 * space when we are discarding the inode prealloc space.
4937 * FIXME!! Make sure it is valid at all the call sites
4939 void ext4_discard_preallocations(struct inode
*inode
, unsigned int needed
)
4941 struct ext4_inode_info
*ei
= EXT4_I(inode
);
4942 struct super_block
*sb
= inode
->i_sb
;
4943 struct buffer_head
*bitmap_bh
= NULL
;
4944 struct ext4_prealloc_space
*pa
, *tmp
;
4945 ext4_group_t group
= 0;
4946 struct list_head list
;
4947 struct ext4_buddy e4b
;
4950 if (!S_ISREG(inode
->i_mode
)) {
4951 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4955 if (EXT4_SB(sb
)->s_mount_state
& EXT4_FC_REPLAY
)
4958 mb_debug(sb
, "discard preallocation for inode %lu\n",
4960 trace_ext4_discard_preallocations(inode
,
4961 atomic_read(&ei
->i_prealloc_active
), needed
);
4963 INIT_LIST_HEAD(&list
);
4969 /* first, collect all pa's in the inode */
4970 spin_lock(&ei
->i_prealloc_lock
);
4971 while (!list_empty(&ei
->i_prealloc_list
) && needed
) {
4972 pa
= list_entry(ei
->i_prealloc_list
.prev
,
4973 struct ext4_prealloc_space
, pa_inode_list
);
4974 BUG_ON(pa
->pa_obj_lock
!= &ei
->i_prealloc_lock
);
4975 spin_lock(&pa
->pa_lock
);
4976 if (atomic_read(&pa
->pa_count
)) {
4977 /* this shouldn't happen often - nobody should
4978 * use preallocation while we're discarding it */
4979 spin_unlock(&pa
->pa_lock
);
4980 spin_unlock(&ei
->i_prealloc_lock
);
4981 ext4_msg(sb
, KERN_ERR
,
4982 "uh-oh! used pa while discarding");
4984 schedule_timeout_uninterruptible(HZ
);
4988 if (pa
->pa_deleted
== 0) {
4989 ext4_mb_mark_pa_deleted(sb
, pa
);
4990 spin_unlock(&pa
->pa_lock
);
4991 list_del_rcu(&pa
->pa_inode_list
);
4992 list_add(&pa
->u
.pa_tmp_list
, &list
);
4997 /* someone is deleting pa right now */
4998 spin_unlock(&pa
->pa_lock
);
4999 spin_unlock(&ei
->i_prealloc_lock
);
5001 /* we have to wait here because pa_deleted
5002 * doesn't mean pa is already unlinked from
5003 * the list. as we might be called from
5004 * ->clear_inode() the inode will get freed
5005 * and concurrent thread which is unlinking
5006 * pa from inode's list may access already
5007 * freed memory, bad-bad-bad */
5009 /* XXX: if this happens too often, we can
5010 * add a flag to force wait only in case
5011 * of ->clear_inode(), but not in case of
5012 * regular truncate */
5013 schedule_timeout_uninterruptible(HZ
);
5016 spin_unlock(&ei
->i_prealloc_lock
);
5018 list_for_each_entry_safe(pa
, tmp
, &list
, u
.pa_tmp_list
) {
5019 BUG_ON(pa
->pa_type
!= MB_INODE_PA
);
5020 group
= ext4_get_group_number(sb
, pa
->pa_pstart
);
5022 err
= ext4_mb_load_buddy_gfp(sb
, group
, &e4b
,
5023 GFP_NOFS
|__GFP_NOFAIL
);
5025 ext4_error_err(sb
, -err
, "Error %d loading buddy information for %u",
5030 bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
5031 if (IS_ERR(bitmap_bh
)) {
5032 err
= PTR_ERR(bitmap_bh
);
5033 ext4_error_err(sb
, -err
, "Error %d reading block bitmap for %u",
5035 ext4_mb_unload_buddy(&e4b
);
5039 ext4_lock_group(sb
, group
);
5040 list_del(&pa
->pa_group_list
);
5041 ext4_mb_release_inode_pa(&e4b
, bitmap_bh
, pa
);
5042 ext4_unlock_group(sb
, group
);
5044 ext4_mb_unload_buddy(&e4b
);
5047 list_del(&pa
->u
.pa_tmp_list
);
5048 call_rcu(&(pa
)->u
.pa_rcu
, ext4_mb_pa_callback
);
5052 static int ext4_mb_pa_alloc(struct ext4_allocation_context
*ac
)
5054 struct ext4_prealloc_space
*pa
;
5056 BUG_ON(ext4_pspace_cachep
== NULL
);
5057 pa
= kmem_cache_zalloc(ext4_pspace_cachep
, GFP_NOFS
);
5060 atomic_set(&pa
->pa_count
, 1);
5065 static void ext4_mb_pa_free(struct ext4_allocation_context
*ac
)
5067 struct ext4_prealloc_space
*pa
= ac
->ac_pa
;
5071 WARN_ON(!atomic_dec_and_test(&pa
->pa_count
));
5072 kmem_cache_free(ext4_pspace_cachep
, pa
);
5075 #ifdef CONFIG_EXT4_DEBUG
5076 static inline void ext4_mb_show_pa(struct super_block
*sb
)
5078 ext4_group_t i
, ngroups
;
5080 if (ext4_test_mount_flag(sb
, EXT4_MF_FS_ABORTED
))
5083 ngroups
= ext4_get_groups_count(sb
);
5084 mb_debug(sb
, "groups: ");
5085 for (i
= 0; i
< ngroups
; i
++) {
5086 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, i
);
5087 struct ext4_prealloc_space
*pa
;
5088 ext4_grpblk_t start
;
5089 struct list_head
*cur
;
5090 ext4_lock_group(sb
, i
);
5091 list_for_each(cur
, &grp
->bb_prealloc_list
) {
5092 pa
= list_entry(cur
, struct ext4_prealloc_space
,
5094 spin_lock(&pa
->pa_lock
);
5095 ext4_get_group_no_and_offset(sb
, pa
->pa_pstart
,
5097 spin_unlock(&pa
->pa_lock
);
5098 mb_debug(sb
, "PA:%u:%d:%d\n", i
, start
,
5101 ext4_unlock_group(sb
, i
);
5102 mb_debug(sb
, "%u: %d/%d\n", i
, grp
->bb_free
,
5107 static void ext4_mb_show_ac(struct ext4_allocation_context
*ac
)
5109 struct super_block
*sb
= ac
->ac_sb
;
5111 if (ext4_test_mount_flag(sb
, EXT4_MF_FS_ABORTED
))
5114 mb_debug(sb
, "Can't allocate:"
5115 " Allocation context details:");
5116 mb_debug(sb
, "status %u flags 0x%x",
5117 ac
->ac_status
, ac
->ac_flags
);
5118 mb_debug(sb
, "orig %lu/%lu/%lu@%lu, "
5119 "goal %lu/%lu/%lu@%lu, "
5120 "best %lu/%lu/%lu@%lu cr %d",
5121 (unsigned long)ac
->ac_o_ex
.fe_group
,
5122 (unsigned long)ac
->ac_o_ex
.fe_start
,
5123 (unsigned long)ac
->ac_o_ex
.fe_len
,
5124 (unsigned long)ac
->ac_o_ex
.fe_logical
,
5125 (unsigned long)ac
->ac_g_ex
.fe_group
,
5126 (unsigned long)ac
->ac_g_ex
.fe_start
,
5127 (unsigned long)ac
->ac_g_ex
.fe_len
,
5128 (unsigned long)ac
->ac_g_ex
.fe_logical
,
5129 (unsigned long)ac
->ac_b_ex
.fe_group
,
5130 (unsigned long)ac
->ac_b_ex
.fe_start
,
5131 (unsigned long)ac
->ac_b_ex
.fe_len
,
5132 (unsigned long)ac
->ac_b_ex
.fe_logical
,
5133 (int)ac
->ac_criteria
);
5134 mb_debug(sb
, "%u found", ac
->ac_found
);
5135 ext4_mb_show_pa(sb
);
5138 static inline void ext4_mb_show_pa(struct super_block
*sb
)
5142 static inline void ext4_mb_show_ac(struct ext4_allocation_context
*ac
)
5144 ext4_mb_show_pa(ac
->ac_sb
);
5150 * We use locality group preallocation for small size file. The size of the
5151 * file is determined by the current size or the resulting size after
5152 * allocation which ever is larger
5154 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5156 static void ext4_mb_group_or_file(struct ext4_allocation_context
*ac
)
5158 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
5159 int bsbits
= ac
->ac_sb
->s_blocksize_bits
;
5162 if (!(ac
->ac_flags
& EXT4_MB_HINT_DATA
))
5165 if (unlikely(ac
->ac_flags
& EXT4_MB_HINT_GOAL_ONLY
))
5168 size
= ac
->ac_o_ex
.fe_logical
+ EXT4_C2B(sbi
, ac
->ac_o_ex
.fe_len
);
5169 isize
= (i_size_read(ac
->ac_inode
) + ac
->ac_sb
->s_blocksize
- 1)
5172 if ((size
== isize
) && !ext4_fs_is_busy(sbi
) &&
5173 !inode_is_open_for_write(ac
->ac_inode
)) {
5174 ac
->ac_flags
|= EXT4_MB_HINT_NOPREALLOC
;
5178 if (sbi
->s_mb_group_prealloc
<= 0) {
5179 ac
->ac_flags
|= EXT4_MB_STREAM_ALLOC
;
5183 /* don't use group allocation for large files */
5184 size
= max(size
, isize
);
5185 if (size
> sbi
->s_mb_stream_request
) {
5186 ac
->ac_flags
|= EXT4_MB_STREAM_ALLOC
;
5190 BUG_ON(ac
->ac_lg
!= NULL
);
5192 * locality group prealloc space are per cpu. The reason for having
5193 * per cpu locality group is to reduce the contention between block
5194 * request from multiple CPUs.
5196 ac
->ac_lg
= raw_cpu_ptr(sbi
->s_locality_groups
);
5198 /* we're going to use group allocation */
5199 ac
->ac_flags
|= EXT4_MB_HINT_GROUP_ALLOC
;
5201 /* serialize all allocations in the group */
5202 mutex_lock(&ac
->ac_lg
->lg_mutex
);
5205 static noinline_for_stack
int
5206 ext4_mb_initialize_context(struct ext4_allocation_context
*ac
,
5207 struct ext4_allocation_request
*ar
)
5209 struct super_block
*sb
= ar
->inode
->i_sb
;
5210 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5211 struct ext4_super_block
*es
= sbi
->s_es
;
5215 ext4_grpblk_t block
;
5217 /* we can't allocate > group size */
5220 /* just a dirty hack to filter too big requests */
5221 if (len
>= EXT4_CLUSTERS_PER_GROUP(sb
))
5222 len
= EXT4_CLUSTERS_PER_GROUP(sb
);
5224 /* start searching from the goal */
5226 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
5227 goal
>= ext4_blocks_count(es
))
5228 goal
= le32_to_cpu(es
->s_first_data_block
);
5229 ext4_get_group_no_and_offset(sb
, goal
, &group
, &block
);
5231 /* set up allocation goals */
5232 ac
->ac_b_ex
.fe_logical
= EXT4_LBLK_CMASK(sbi
, ar
->logical
);
5233 ac
->ac_status
= AC_STATUS_CONTINUE
;
5235 ac
->ac_inode
= ar
->inode
;
5236 ac
->ac_o_ex
.fe_logical
= ac
->ac_b_ex
.fe_logical
;
5237 ac
->ac_o_ex
.fe_group
= group
;
5238 ac
->ac_o_ex
.fe_start
= block
;
5239 ac
->ac_o_ex
.fe_len
= len
;
5240 ac
->ac_g_ex
= ac
->ac_o_ex
;
5241 ac
->ac_flags
= ar
->flags
;
5243 /* we have to define context: we'll work with a file or
5244 * locality group. this is a policy, actually */
5245 ext4_mb_group_or_file(ac
);
5247 mb_debug(sb
, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5248 "left: %u/%u, right %u/%u to %swritable\n",
5249 (unsigned) ar
->len
, (unsigned) ar
->logical
,
5250 (unsigned) ar
->goal
, ac
->ac_flags
, ac
->ac_2order
,
5251 (unsigned) ar
->lleft
, (unsigned) ar
->pleft
,
5252 (unsigned) ar
->lright
, (unsigned) ar
->pright
,
5253 inode_is_open_for_write(ar
->inode
) ? "" : "non-");
5258 static noinline_for_stack
void
5259 ext4_mb_discard_lg_preallocations(struct super_block
*sb
,
5260 struct ext4_locality_group
*lg
,
5261 int order
, int total_entries
)
5263 ext4_group_t group
= 0;
5264 struct ext4_buddy e4b
;
5265 struct list_head discard_list
;
5266 struct ext4_prealloc_space
*pa
, *tmp
;
5268 mb_debug(sb
, "discard locality group preallocation\n");
5270 INIT_LIST_HEAD(&discard_list
);
5272 spin_lock(&lg
->lg_prealloc_lock
);
5273 list_for_each_entry_rcu(pa
, &lg
->lg_prealloc_list
[order
],
5275 lockdep_is_held(&lg
->lg_prealloc_lock
)) {
5276 spin_lock(&pa
->pa_lock
);
5277 if (atomic_read(&pa
->pa_count
)) {
5279 * This is the pa that we just used
5280 * for block allocation. So don't
5283 spin_unlock(&pa
->pa_lock
);
5286 if (pa
->pa_deleted
) {
5287 spin_unlock(&pa
->pa_lock
);
5290 /* only lg prealloc space */
5291 BUG_ON(pa
->pa_type
!= MB_GROUP_PA
);
5293 /* seems this one can be freed ... */
5294 ext4_mb_mark_pa_deleted(sb
, pa
);
5295 spin_unlock(&pa
->pa_lock
);
5297 list_del_rcu(&pa
->pa_inode_list
);
5298 list_add(&pa
->u
.pa_tmp_list
, &discard_list
);
5301 if (total_entries
<= 5) {
5303 * we want to keep only 5 entries
5304 * allowing it to grow to 8. This
5305 * mak sure we don't call discard
5306 * soon for this list.
5311 spin_unlock(&lg
->lg_prealloc_lock
);
5313 list_for_each_entry_safe(pa
, tmp
, &discard_list
, u
.pa_tmp_list
) {
5316 group
= ext4_get_group_number(sb
, pa
->pa_pstart
);
5317 err
= ext4_mb_load_buddy_gfp(sb
, group
, &e4b
,
5318 GFP_NOFS
|__GFP_NOFAIL
);
5320 ext4_error_err(sb
, -err
, "Error %d loading buddy information for %u",
5324 ext4_lock_group(sb
, group
);
5325 list_del(&pa
->pa_group_list
);
5326 ext4_mb_release_group_pa(&e4b
, pa
);
5327 ext4_unlock_group(sb
, group
);
5329 ext4_mb_unload_buddy(&e4b
);
5330 list_del(&pa
->u
.pa_tmp_list
);
5331 call_rcu(&(pa
)->u
.pa_rcu
, ext4_mb_pa_callback
);
5336 * We have incremented pa_count. So it cannot be freed at this
5337 * point. Also we hold lg_mutex. So no parallel allocation is
5338 * possible from this lg. That means pa_free cannot be updated.
5340 * A parallel ext4_mb_discard_group_preallocations is possible.
5341 * which can cause the lg_prealloc_list to be updated.
5344 static void ext4_mb_add_n_trim(struct ext4_allocation_context
*ac
)
5346 int order
, added
= 0, lg_prealloc_count
= 1;
5347 struct super_block
*sb
= ac
->ac_sb
;
5348 struct ext4_locality_group
*lg
= ac
->ac_lg
;
5349 struct ext4_prealloc_space
*tmp_pa
, *pa
= ac
->ac_pa
;
5351 order
= fls(pa
->pa_free
) - 1;
5352 if (order
> PREALLOC_TB_SIZE
- 1)
5353 /* The max size of hash table is PREALLOC_TB_SIZE */
5354 order
= PREALLOC_TB_SIZE
- 1;
5355 /* Add the prealloc space to lg */
5356 spin_lock(&lg
->lg_prealloc_lock
);
5357 list_for_each_entry_rcu(tmp_pa
, &lg
->lg_prealloc_list
[order
],
5359 lockdep_is_held(&lg
->lg_prealloc_lock
)) {
5360 spin_lock(&tmp_pa
->pa_lock
);
5361 if (tmp_pa
->pa_deleted
) {
5362 spin_unlock(&tmp_pa
->pa_lock
);
5365 if (!added
&& pa
->pa_free
< tmp_pa
->pa_free
) {
5366 /* Add to the tail of the previous entry */
5367 list_add_tail_rcu(&pa
->pa_inode_list
,
5368 &tmp_pa
->pa_inode_list
);
5371 * we want to count the total
5372 * number of entries in the list
5375 spin_unlock(&tmp_pa
->pa_lock
);
5376 lg_prealloc_count
++;
5379 list_add_tail_rcu(&pa
->pa_inode_list
,
5380 &lg
->lg_prealloc_list
[order
]);
5381 spin_unlock(&lg
->lg_prealloc_lock
);
5383 /* Now trim the list to be not more than 8 elements */
5384 if (lg_prealloc_count
> 8) {
5385 ext4_mb_discard_lg_preallocations(sb
, lg
,
5386 order
, lg_prealloc_count
);
5393 * if per-inode prealloc list is too long, trim some PA
5395 static void ext4_mb_trim_inode_pa(struct inode
*inode
)
5397 struct ext4_inode_info
*ei
= EXT4_I(inode
);
5398 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
5401 count
= atomic_read(&ei
->i_prealloc_active
);
5402 delta
= (sbi
->s_mb_max_inode_prealloc
>> 2) + 1;
5403 if (count
> sbi
->s_mb_max_inode_prealloc
+ delta
) {
5404 count
-= sbi
->s_mb_max_inode_prealloc
;
5405 ext4_discard_preallocations(inode
, count
);
5410 * release all resource we used in allocation
5412 static int ext4_mb_release_context(struct ext4_allocation_context
*ac
)
5414 struct inode
*inode
= ac
->ac_inode
;
5415 struct ext4_inode_info
*ei
= EXT4_I(inode
);
5416 struct ext4_sb_info
*sbi
= EXT4_SB(ac
->ac_sb
);
5417 struct ext4_prealloc_space
*pa
= ac
->ac_pa
;
5419 if (pa
->pa_type
== MB_GROUP_PA
) {
5420 /* see comment in ext4_mb_use_group_pa() */
5421 spin_lock(&pa
->pa_lock
);
5422 pa
->pa_pstart
+= EXT4_C2B(sbi
, ac
->ac_b_ex
.fe_len
);
5423 pa
->pa_lstart
+= EXT4_C2B(sbi
, ac
->ac_b_ex
.fe_len
);
5424 pa
->pa_free
-= ac
->ac_b_ex
.fe_len
;
5425 pa
->pa_len
-= ac
->ac_b_ex
.fe_len
;
5426 spin_unlock(&pa
->pa_lock
);
5429 * We want to add the pa to the right bucket.
5430 * Remove it from the list and while adding
5431 * make sure the list to which we are adding
5434 if (likely(pa
->pa_free
)) {
5435 spin_lock(pa
->pa_obj_lock
);
5436 list_del_rcu(&pa
->pa_inode_list
);
5437 spin_unlock(pa
->pa_obj_lock
);
5438 ext4_mb_add_n_trim(ac
);
5442 if (pa
->pa_type
== MB_INODE_PA
) {
5444 * treat per-inode prealloc list as a lru list, then try
5445 * to trim the least recently used PA.
5447 spin_lock(pa
->pa_obj_lock
);
5448 list_move(&pa
->pa_inode_list
, &ei
->i_prealloc_list
);
5449 spin_unlock(pa
->pa_obj_lock
);
5452 ext4_mb_put_pa(ac
, ac
->ac_sb
, pa
);
5454 if (ac
->ac_bitmap_page
)
5455 put_page(ac
->ac_bitmap_page
);
5456 if (ac
->ac_buddy_page
)
5457 put_page(ac
->ac_buddy_page
);
5458 if (ac
->ac_flags
& EXT4_MB_HINT_GROUP_ALLOC
)
5459 mutex_unlock(&ac
->ac_lg
->lg_mutex
);
5460 ext4_mb_collect_stats(ac
);
5461 ext4_mb_trim_inode_pa(inode
);
5465 static int ext4_mb_discard_preallocations(struct super_block
*sb
, int needed
)
5467 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
5469 int freed
= 0, busy
= 0;
5472 trace_ext4_mb_discard_preallocations(sb
, needed
);
5475 needed
= EXT4_CLUSTERS_PER_GROUP(sb
) + 1;
5477 for (i
= 0; i
< ngroups
&& needed
> 0; i
++) {
5478 ret
= ext4_mb_discard_group_preallocations(sb
, i
, &busy
);
5484 if (needed
> 0 && busy
&& ++retry
< 3) {
5492 static bool ext4_mb_discard_preallocations_should_retry(struct super_block
*sb
,
5493 struct ext4_allocation_context
*ac
, u64
*seq
)
5499 freed
= ext4_mb_discard_preallocations(sb
, ac
->ac_o_ex
.fe_len
);
5504 seq_retry
= ext4_get_discard_pa_seq_sum();
5505 if (!(ac
->ac_flags
& EXT4_MB_STRICT_CHECK
) || seq_retry
!= *seq
) {
5506 ac
->ac_flags
|= EXT4_MB_STRICT_CHECK
;
5512 mb_debug(sb
, "freed %d, retry ? %s\n", freed
, ret
? "yes" : "no");
5516 static ext4_fsblk_t
ext4_mb_new_blocks_simple(handle_t
*handle
,
5517 struct ext4_allocation_request
*ar
, int *errp
);
5520 * Main entry point into mballoc to allocate blocks
5521 * it tries to use preallocation first, then falls back
5522 * to usual allocation
5524 ext4_fsblk_t
ext4_mb_new_blocks(handle_t
*handle
,
5525 struct ext4_allocation_request
*ar
, int *errp
)
5527 struct ext4_allocation_context
*ac
= NULL
;
5528 struct ext4_sb_info
*sbi
;
5529 struct super_block
*sb
;
5530 ext4_fsblk_t block
= 0;
5531 unsigned int inquota
= 0;
5532 unsigned int reserv_clstrs
= 0;
5536 sb
= ar
->inode
->i_sb
;
5539 trace_ext4_request_blocks(ar
);
5540 if (sbi
->s_mount_state
& EXT4_FC_REPLAY
)
5541 return ext4_mb_new_blocks_simple(handle
, ar
, errp
);
5543 /* Allow to use superuser reservation for quota file */
5544 if (ext4_is_quota_file(ar
->inode
))
5545 ar
->flags
|= EXT4_MB_USE_ROOT_BLOCKS
;
5547 if ((ar
->flags
& EXT4_MB_DELALLOC_RESERVED
) == 0) {
5548 /* Without delayed allocation we need to verify
5549 * there is enough free blocks to do block allocation
5550 * and verify allocation doesn't exceed the quota limits.
5553 ext4_claim_free_clusters(sbi
, ar
->len
, ar
->flags
)) {
5555 /* let others to free the space */
5557 ar
->len
= ar
->len
>> 1;
5560 ext4_mb_show_pa(sb
);
5564 reserv_clstrs
= ar
->len
;
5565 if (ar
->flags
& EXT4_MB_USE_ROOT_BLOCKS
) {
5566 dquot_alloc_block_nofail(ar
->inode
,
5567 EXT4_C2B(sbi
, ar
->len
));
5570 dquot_alloc_block(ar
->inode
,
5571 EXT4_C2B(sbi
, ar
->len
))) {
5573 ar
->flags
|= EXT4_MB_HINT_NOPREALLOC
;
5584 ac
= kmem_cache_zalloc(ext4_ac_cachep
, GFP_NOFS
);
5591 *errp
= ext4_mb_initialize_context(ac
, ar
);
5597 ac
->ac_op
= EXT4_MB_HISTORY_PREALLOC
;
5598 seq
= this_cpu_read(discard_pa_seq
);
5599 if (!ext4_mb_use_preallocated(ac
)) {
5600 ac
->ac_op
= EXT4_MB_HISTORY_ALLOC
;
5601 ext4_mb_normalize_request(ac
, ar
);
5603 *errp
= ext4_mb_pa_alloc(ac
);
5607 /* allocate space in core */
5608 *errp
= ext4_mb_regular_allocator(ac
);
5610 * pa allocated above is added to grp->bb_prealloc_list only
5611 * when we were able to allocate some block i.e. when
5612 * ac->ac_status == AC_STATUS_FOUND.
5613 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5614 * So we have to free this pa here itself.
5617 ext4_mb_pa_free(ac
);
5618 ext4_discard_allocated_blocks(ac
);
5621 if (ac
->ac_status
== AC_STATUS_FOUND
&&
5622 ac
->ac_o_ex
.fe_len
>= ac
->ac_f_ex
.fe_len
)
5623 ext4_mb_pa_free(ac
);
5625 if (likely(ac
->ac_status
== AC_STATUS_FOUND
)) {
5626 *errp
= ext4_mb_mark_diskspace_used(ac
, handle
, reserv_clstrs
);
5628 ext4_discard_allocated_blocks(ac
);
5631 block
= ext4_grp_offs_to_block(sb
, &ac
->ac_b_ex
);
5632 ar
->len
= ac
->ac_b_ex
.fe_len
;
5635 if (ext4_mb_discard_preallocations_should_retry(sb
, ac
, &seq
))
5638 * If block allocation fails then the pa allocated above
5639 * needs to be freed here itself.
5641 ext4_mb_pa_free(ac
);
5647 ac
->ac_b_ex
.fe_len
= 0;
5649 ext4_mb_show_ac(ac
);
5651 ext4_mb_release_context(ac
);
5654 kmem_cache_free(ext4_ac_cachep
, ac
);
5655 if (inquota
&& ar
->len
< inquota
)
5656 dquot_free_block(ar
->inode
, EXT4_C2B(sbi
, inquota
- ar
->len
));
5658 if ((ar
->flags
& EXT4_MB_DELALLOC_RESERVED
) == 0)
5659 /* release all the reserved blocks if non delalloc */
5660 percpu_counter_sub(&sbi
->s_dirtyclusters_counter
,
5664 trace_ext4_allocate_blocks(ar
, (unsigned long long)block
);
5670 * We can merge two free data extents only if the physical blocks
5671 * are contiguous, AND the extents were freed by the same transaction,
5672 * AND the blocks are associated with the same group.
5674 static void ext4_try_merge_freed_extent(struct ext4_sb_info
*sbi
,
5675 struct ext4_free_data
*entry
,
5676 struct ext4_free_data
*new_entry
,
5677 struct rb_root
*entry_rb_root
)
5679 if ((entry
->efd_tid
!= new_entry
->efd_tid
) ||
5680 (entry
->efd_group
!= new_entry
->efd_group
))
5682 if (entry
->efd_start_cluster
+ entry
->efd_count
==
5683 new_entry
->efd_start_cluster
) {
5684 new_entry
->efd_start_cluster
= entry
->efd_start_cluster
;
5685 new_entry
->efd_count
+= entry
->efd_count
;
5686 } else if (new_entry
->efd_start_cluster
+ new_entry
->efd_count
==
5687 entry
->efd_start_cluster
) {
5688 new_entry
->efd_count
+= entry
->efd_count
;
5691 spin_lock(&sbi
->s_md_lock
);
5692 list_del(&entry
->efd_list
);
5693 spin_unlock(&sbi
->s_md_lock
);
5694 rb_erase(&entry
->efd_node
, entry_rb_root
);
5695 kmem_cache_free(ext4_free_data_cachep
, entry
);
5698 static noinline_for_stack
int
5699 ext4_mb_free_metadata(handle_t
*handle
, struct ext4_buddy
*e4b
,
5700 struct ext4_free_data
*new_entry
)
5702 ext4_group_t group
= e4b
->bd_group
;
5703 ext4_grpblk_t cluster
;
5704 ext4_grpblk_t clusters
= new_entry
->efd_count
;
5705 struct ext4_free_data
*entry
;
5706 struct ext4_group_info
*db
= e4b
->bd_info
;
5707 struct super_block
*sb
= e4b
->bd_sb
;
5708 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5709 struct rb_node
**n
= &db
->bb_free_root
.rb_node
, *node
;
5710 struct rb_node
*parent
= NULL
, *new_node
;
5712 BUG_ON(!ext4_handle_valid(handle
));
5713 BUG_ON(e4b
->bd_bitmap_page
== NULL
);
5714 BUG_ON(e4b
->bd_buddy_page
== NULL
);
5716 new_node
= &new_entry
->efd_node
;
5717 cluster
= new_entry
->efd_start_cluster
;
5720 /* first free block exent. We need to
5721 protect buddy cache from being freed,
5722 * otherwise we'll refresh it from
5723 * on-disk bitmap and lose not-yet-available
5725 get_page(e4b
->bd_buddy_page
);
5726 get_page(e4b
->bd_bitmap_page
);
5730 entry
= rb_entry(parent
, struct ext4_free_data
, efd_node
);
5731 if (cluster
< entry
->efd_start_cluster
)
5733 else if (cluster
>= (entry
->efd_start_cluster
+ entry
->efd_count
))
5734 n
= &(*n
)->rb_right
;
5736 ext4_grp_locked_error(sb
, group
, 0,
5737 ext4_group_first_block_no(sb
, group
) +
5738 EXT4_C2B(sbi
, cluster
),
5739 "Block already on to-be-freed list");
5740 kmem_cache_free(ext4_free_data_cachep
, new_entry
);
5745 rb_link_node(new_node
, parent
, n
);
5746 rb_insert_color(new_node
, &db
->bb_free_root
);
5748 /* Now try to see the extent can be merged to left and right */
5749 node
= rb_prev(new_node
);
5751 entry
= rb_entry(node
, struct ext4_free_data
, efd_node
);
5752 ext4_try_merge_freed_extent(sbi
, entry
, new_entry
,
5753 &(db
->bb_free_root
));
5756 node
= rb_next(new_node
);
5758 entry
= rb_entry(node
, struct ext4_free_data
, efd_node
);
5759 ext4_try_merge_freed_extent(sbi
, entry
, new_entry
,
5760 &(db
->bb_free_root
));
5763 spin_lock(&sbi
->s_md_lock
);
5764 list_add_tail(&new_entry
->efd_list
, &sbi
->s_freed_data_list
);
5765 sbi
->s_mb_free_pending
+= clusters
;
5766 spin_unlock(&sbi
->s_md_lock
);
5771 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5772 * linearly starting at the goal block and also excludes the blocks which
5773 * are going to be in use after fast commit replay.
5775 static ext4_fsblk_t
ext4_mb_new_blocks_simple(handle_t
*handle
,
5776 struct ext4_allocation_request
*ar
, int *errp
)
5778 struct buffer_head
*bitmap_bh
;
5779 struct super_block
*sb
= ar
->inode
->i_sb
;
5781 ext4_grpblk_t blkoff
;
5782 ext4_grpblk_t max
= EXT4_CLUSTERS_PER_GROUP(sb
);
5783 ext4_grpblk_t i
= 0;
5784 ext4_fsblk_t goal
, block
;
5785 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
5788 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
5789 goal
>= ext4_blocks_count(es
))
5790 goal
= le32_to_cpu(es
->s_first_data_block
);
5793 ext4_get_group_no_and_offset(sb
, goal
, &group
, &blkoff
);
5794 for (; group
< ext4_get_groups_count(sb
); group
++) {
5795 bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
5796 if (IS_ERR(bitmap_bh
)) {
5797 *errp
= PTR_ERR(bitmap_bh
);
5798 pr_warn("Failed to read block bitmap\n");
5802 ext4_get_group_no_and_offset(sb
,
5803 max(ext4_group_first_block_no(sb
, group
), goal
),
5806 i
= mb_find_next_zero_bit(bitmap_bh
->b_data
, max
,
5810 if (ext4_fc_replay_check_excluded(sb
,
5811 ext4_group_first_block_no(sb
, group
) + i
)) {
5821 if (group
>= ext4_get_groups_count(sb
) || i
>= max
) {
5826 block
= ext4_group_first_block_no(sb
, group
) + i
;
5827 ext4_mb_mark_bb(sb
, block
, 1, 1);
5833 static void ext4_free_blocks_simple(struct inode
*inode
, ext4_fsblk_t block
,
5834 unsigned long count
)
5836 struct buffer_head
*bitmap_bh
;
5837 struct super_block
*sb
= inode
->i_sb
;
5838 struct ext4_group_desc
*gdp
;
5839 struct buffer_head
*gdp_bh
;
5841 ext4_grpblk_t blkoff
;
5842 int already_freed
= 0, err
, i
;
5844 ext4_get_group_no_and_offset(sb
, block
, &group
, &blkoff
);
5845 bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
5846 if (IS_ERR(bitmap_bh
)) {
5847 err
= PTR_ERR(bitmap_bh
);
5848 pr_warn("Failed to read block bitmap\n");
5851 gdp
= ext4_get_group_desc(sb
, group
, &gdp_bh
);
5855 for (i
= 0; i
< count
; i
++) {
5856 if (!mb_test_bit(blkoff
+ i
, bitmap_bh
->b_data
))
5859 mb_clear_bits(bitmap_bh
->b_data
, blkoff
, count
);
5860 err
= ext4_handle_dirty_metadata(NULL
, NULL
, bitmap_bh
);
5863 ext4_free_group_clusters_set(
5864 sb
, gdp
, ext4_free_group_clusters(sb
, gdp
) +
5865 count
- already_freed
);
5866 ext4_block_bitmap_csum_set(sb
, group
, gdp
, bitmap_bh
);
5867 ext4_group_desc_csum_set(sb
, group
, gdp
);
5868 ext4_handle_dirty_metadata(NULL
, NULL
, gdp_bh
);
5869 sync_dirty_buffer(bitmap_bh
);
5870 sync_dirty_buffer(gdp_bh
);
5875 * ext4_free_blocks() -- Free given blocks and update quota
5876 * @handle: handle for this transaction
5878 * @bh: optional buffer of the block to be freed
5879 * @block: starting physical block to be freed
5880 * @count: number of blocks to be freed
5881 * @flags: flags used by ext4_free_blocks
5883 void ext4_free_blocks(handle_t
*handle
, struct inode
*inode
,
5884 struct buffer_head
*bh
, ext4_fsblk_t block
,
5885 unsigned long count
, int flags
)
5887 struct buffer_head
*bitmap_bh
= NULL
;
5888 struct super_block
*sb
= inode
->i_sb
;
5889 struct ext4_group_desc
*gdp
;
5890 unsigned int overflow
;
5892 struct buffer_head
*gd_bh
;
5893 ext4_group_t block_group
;
5894 struct ext4_sb_info
*sbi
;
5895 struct ext4_buddy e4b
;
5896 unsigned int count_clusters
;
5902 if (sbi
->s_mount_state
& EXT4_FC_REPLAY
) {
5903 ext4_free_blocks_simple(inode
, block
, count
);
5910 BUG_ON(block
!= bh
->b_blocknr
);
5912 block
= bh
->b_blocknr
;
5915 if (!(flags
& EXT4_FREE_BLOCKS_VALIDATED
) &&
5916 !ext4_inode_block_valid(inode
, block
, count
)) {
5917 ext4_error(sb
, "Freeing blocks not in datazone - "
5918 "block = %llu, count = %lu", block
, count
);
5922 ext4_debug("freeing block %llu\n", block
);
5923 trace_ext4_free_blocks(inode
, block
, count
, flags
);
5925 if (bh
&& (flags
& EXT4_FREE_BLOCKS_FORGET
)) {
5928 ext4_forget(handle
, flags
& EXT4_FREE_BLOCKS_METADATA
,
5933 * If the extent to be freed does not begin on a cluster
5934 * boundary, we need to deal with partial clusters at the
5935 * beginning and end of the extent. Normally we will free
5936 * blocks at the beginning or the end unless we are explicitly
5937 * requested to avoid doing so.
5939 overflow
= EXT4_PBLK_COFF(sbi
, block
);
5941 if (flags
& EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER
) {
5942 overflow
= sbi
->s_cluster_ratio
- overflow
;
5944 if (count
> overflow
)
5953 overflow
= EXT4_LBLK_COFF(sbi
, count
);
5955 if (flags
& EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER
) {
5956 if (count
> overflow
)
5961 count
+= sbi
->s_cluster_ratio
- overflow
;
5964 if (!bh
&& (flags
& EXT4_FREE_BLOCKS_FORGET
)) {
5966 int is_metadata
= flags
& EXT4_FREE_BLOCKS_METADATA
;
5968 for (i
= 0; i
< count
; i
++) {
5971 bh
= sb_find_get_block(inode
->i_sb
, block
+ i
);
5972 ext4_forget(handle
, is_metadata
, inode
, bh
, block
+ i
);
5978 ext4_get_group_no_and_offset(sb
, block
, &block_group
, &bit
);
5980 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5981 ext4_get_group_info(sb
, block_group
))))
5985 * Check to see if we are freeing blocks across a group
5988 if (EXT4_C2B(sbi
, bit
) + count
> EXT4_BLOCKS_PER_GROUP(sb
)) {
5989 overflow
= EXT4_C2B(sbi
, bit
) + count
-
5990 EXT4_BLOCKS_PER_GROUP(sb
);
5993 count_clusters
= EXT4_NUM_B2C(sbi
, count
);
5994 bitmap_bh
= ext4_read_block_bitmap(sb
, block_group
);
5995 if (IS_ERR(bitmap_bh
)) {
5996 err
= PTR_ERR(bitmap_bh
);
6000 gdp
= ext4_get_group_desc(sb
, block_group
, &gd_bh
);
6006 if (in_range(ext4_block_bitmap(sb
, gdp
), block
, count
) ||
6007 in_range(ext4_inode_bitmap(sb
, gdp
), block
, count
) ||
6008 in_range(block
, ext4_inode_table(sb
, gdp
),
6009 sbi
->s_itb_per_group
) ||
6010 in_range(block
+ count
- 1, ext4_inode_table(sb
, gdp
),
6011 sbi
->s_itb_per_group
)) {
6013 ext4_error(sb
, "Freeing blocks in system zone - "
6014 "Block = %llu, count = %lu", block
, count
);
6015 /* err = 0. ext4_std_error should be a no op */
6019 BUFFER_TRACE(bitmap_bh
, "getting write access");
6020 err
= ext4_journal_get_write_access(handle
, sb
, bitmap_bh
,
6026 * We are about to modify some metadata. Call the journal APIs
6027 * to unshare ->b_data if a currently-committing transaction is
6030 BUFFER_TRACE(gd_bh
, "get_write_access");
6031 err
= ext4_journal_get_write_access(handle
, sb
, gd_bh
, EXT4_JTR_NONE
);
6034 #ifdef AGGRESSIVE_CHECK
6037 for (i
= 0; i
< count_clusters
; i
++)
6038 BUG_ON(!mb_test_bit(bit
+ i
, bitmap_bh
->b_data
));
6041 trace_ext4_mballoc_free(sb
, inode
, block_group
, bit
, count_clusters
);
6043 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6044 err
= ext4_mb_load_buddy_gfp(sb
, block_group
, &e4b
,
6045 GFP_NOFS
|__GFP_NOFAIL
);
6050 * We need to make sure we don't reuse the freed block until after the
6051 * transaction is committed. We make an exception if the inode is to be
6052 * written in writeback mode since writeback mode has weak data
6053 * consistency guarantees.
6055 if (ext4_handle_valid(handle
) &&
6056 ((flags
& EXT4_FREE_BLOCKS_METADATA
) ||
6057 !ext4_should_writeback_data(inode
))) {
6058 struct ext4_free_data
*new_entry
;
6060 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6063 new_entry
= kmem_cache_alloc(ext4_free_data_cachep
,
6064 GFP_NOFS
|__GFP_NOFAIL
);
6065 new_entry
->efd_start_cluster
= bit
;
6066 new_entry
->efd_group
= block_group
;
6067 new_entry
->efd_count
= count_clusters
;
6068 new_entry
->efd_tid
= handle
->h_transaction
->t_tid
;
6070 ext4_lock_group(sb
, block_group
);
6071 mb_clear_bits(bitmap_bh
->b_data
, bit
, count_clusters
);
6072 ext4_mb_free_metadata(handle
, &e4b
, new_entry
);
6074 /* need to update group_info->bb_free and bitmap
6075 * with group lock held. generate_buddy look at
6076 * them with group lock_held
6078 if (test_opt(sb
, DISCARD
)) {
6079 err
= ext4_issue_discard(sb
, block_group
, bit
, count
,
6081 if (err
&& err
!= -EOPNOTSUPP
)
6082 ext4_msg(sb
, KERN_WARNING
, "discard request in"
6083 " group:%d block:%d count:%lu failed"
6084 " with %d", block_group
, bit
, count
,
6087 EXT4_MB_GRP_CLEAR_TRIMMED(e4b
.bd_info
);
6089 ext4_lock_group(sb
, block_group
);
6090 mb_clear_bits(bitmap_bh
->b_data
, bit
, count_clusters
);
6091 mb_free_blocks(inode
, &e4b
, bit
, count_clusters
);
6094 ret
= ext4_free_group_clusters(sb
, gdp
) + count_clusters
;
6095 ext4_free_group_clusters_set(sb
, gdp
, ret
);
6096 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bitmap_bh
);
6097 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
6098 ext4_unlock_group(sb
, block_group
);
6100 if (sbi
->s_log_groups_per_flex
) {
6101 ext4_group_t flex_group
= ext4_flex_group(sbi
, block_group
);
6102 atomic64_add(count_clusters
,
6103 &sbi_array_rcu_deref(sbi
, s_flex_groups
,
6104 flex_group
)->free_clusters
);
6108 * on a bigalloc file system, defer the s_freeclusters_counter
6109 * update to the caller (ext4_remove_space and friends) so they
6110 * can determine if a cluster freed here should be rereserved
6112 if (!(flags
& EXT4_FREE_BLOCKS_RERESERVE_CLUSTER
)) {
6113 if (!(flags
& EXT4_FREE_BLOCKS_NO_QUOT_UPDATE
))
6114 dquot_free_block(inode
, EXT4_C2B(sbi
, count_clusters
));
6115 percpu_counter_add(&sbi
->s_freeclusters_counter
,
6119 ext4_mb_unload_buddy(&e4b
);
6121 /* We dirtied the bitmap block */
6122 BUFFER_TRACE(bitmap_bh
, "dirtied bitmap block");
6123 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
6125 /* And the group descriptor block */
6126 BUFFER_TRACE(gd_bh
, "dirtied group descriptor block");
6127 ret
= ext4_handle_dirty_metadata(handle
, NULL
, gd_bh
);
6131 if (overflow
&& !err
) {
6139 ext4_std_error(sb
, err
);
6144 * ext4_group_add_blocks() -- Add given blocks to an existing group
6145 * @handle: handle to this transaction
6147 * @block: start physical block to add to the block group
6148 * @count: number of blocks to free
6150 * This marks the blocks as free in the bitmap and buddy.
6152 int ext4_group_add_blocks(handle_t
*handle
, struct super_block
*sb
,
6153 ext4_fsblk_t block
, unsigned long count
)
6155 struct buffer_head
*bitmap_bh
= NULL
;
6156 struct buffer_head
*gd_bh
;
6157 ext4_group_t block_group
;
6160 struct ext4_group_desc
*desc
;
6161 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
6162 struct ext4_buddy e4b
;
6163 int err
= 0, ret
, free_clusters_count
;
6164 ext4_grpblk_t clusters_freed
;
6165 ext4_fsblk_t first_cluster
= EXT4_B2C(sbi
, block
);
6166 ext4_fsblk_t last_cluster
= EXT4_B2C(sbi
, block
+ count
- 1);
6167 unsigned long cluster_count
= last_cluster
- first_cluster
+ 1;
6169 ext4_debug("Adding block(s) %llu-%llu\n", block
, block
+ count
- 1);
6174 ext4_get_group_no_and_offset(sb
, block
, &block_group
, &bit
);
6176 * Check to see if we are freeing blocks across a group
6179 if (bit
+ cluster_count
> EXT4_CLUSTERS_PER_GROUP(sb
)) {
6180 ext4_warning(sb
, "too many blocks added to group %u",
6186 bitmap_bh
= ext4_read_block_bitmap(sb
, block_group
);
6187 if (IS_ERR(bitmap_bh
)) {
6188 err
= PTR_ERR(bitmap_bh
);
6193 desc
= ext4_get_group_desc(sb
, block_group
, &gd_bh
);
6199 if (in_range(ext4_block_bitmap(sb
, desc
), block
, count
) ||
6200 in_range(ext4_inode_bitmap(sb
, desc
), block
, count
) ||
6201 in_range(block
, ext4_inode_table(sb
, desc
), sbi
->s_itb_per_group
) ||
6202 in_range(block
+ count
- 1, ext4_inode_table(sb
, desc
),
6203 sbi
->s_itb_per_group
)) {
6204 ext4_error(sb
, "Adding blocks in system zones - "
6205 "Block = %llu, count = %lu",
6211 BUFFER_TRACE(bitmap_bh
, "getting write access");
6212 err
= ext4_journal_get_write_access(handle
, sb
, bitmap_bh
,
6218 * We are about to modify some metadata. Call the journal APIs
6219 * to unshare ->b_data if a currently-committing transaction is
6222 BUFFER_TRACE(gd_bh
, "get_write_access");
6223 err
= ext4_journal_get_write_access(handle
, sb
, gd_bh
, EXT4_JTR_NONE
);
6227 for (i
= 0, clusters_freed
= 0; i
< cluster_count
; i
++) {
6228 BUFFER_TRACE(bitmap_bh
, "clear bit");
6229 if (!mb_test_bit(bit
+ i
, bitmap_bh
->b_data
)) {
6230 ext4_error(sb
, "bit already cleared for block %llu",
6231 (ext4_fsblk_t
)(block
+ i
));
6232 BUFFER_TRACE(bitmap_bh
, "bit already cleared");
6238 err
= ext4_mb_load_buddy(sb
, block_group
, &e4b
);
6243 * need to update group_info->bb_free and bitmap
6244 * with group lock held. generate_buddy look at
6245 * them with group lock_held
6247 ext4_lock_group(sb
, block_group
);
6248 mb_clear_bits(bitmap_bh
->b_data
, bit
, cluster_count
);
6249 mb_free_blocks(NULL
, &e4b
, bit
, cluster_count
);
6250 free_clusters_count
= clusters_freed
+
6251 ext4_free_group_clusters(sb
, desc
);
6252 ext4_free_group_clusters_set(sb
, desc
, free_clusters_count
);
6253 ext4_block_bitmap_csum_set(sb
, block_group
, desc
, bitmap_bh
);
6254 ext4_group_desc_csum_set(sb
, block_group
, desc
);
6255 ext4_unlock_group(sb
, block_group
);
6256 percpu_counter_add(&sbi
->s_freeclusters_counter
,
6259 if (sbi
->s_log_groups_per_flex
) {
6260 ext4_group_t flex_group
= ext4_flex_group(sbi
, block_group
);
6261 atomic64_add(clusters_freed
,
6262 &sbi_array_rcu_deref(sbi
, s_flex_groups
,
6263 flex_group
)->free_clusters
);
6266 ext4_mb_unload_buddy(&e4b
);
6268 /* We dirtied the bitmap block */
6269 BUFFER_TRACE(bitmap_bh
, "dirtied bitmap block");
6270 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
6272 /* And the group descriptor block */
6273 BUFFER_TRACE(gd_bh
, "dirtied group descriptor block");
6274 ret
= ext4_handle_dirty_metadata(handle
, NULL
, gd_bh
);
6280 ext4_std_error(sb
, err
);
6285 * ext4_trim_extent -- function to TRIM one single free extent in the group
6286 * @sb: super block for the file system
6287 * @start: starting block of the free extent in the alloc. group
6288 * @count: number of blocks to TRIM
6289 * @e4b: ext4 buddy for the group
6291 * Trim "count" blocks starting at "start" in the "group". To assure that no
6292 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6293 * be called with under the group lock.
6295 static int ext4_trim_extent(struct super_block
*sb
,
6296 int start
, int count
, struct ext4_buddy
*e4b
)
6300 struct ext4_free_extent ex
;
6301 ext4_group_t group
= e4b
->bd_group
;
6304 trace_ext4_trim_extent(sb
, group
, start
, count
);
6306 assert_spin_locked(ext4_group_lock_ptr(sb
, group
));
6308 ex
.fe_start
= start
;
6309 ex
.fe_group
= group
;
6313 * Mark blocks used, so no one can reuse them while
6316 mb_mark_used(e4b
, &ex
);
6317 ext4_unlock_group(sb
, group
);
6318 ret
= ext4_issue_discard(sb
, group
, start
, count
, NULL
);
6319 ext4_lock_group(sb
, group
);
6320 mb_free_blocks(NULL
, e4b
, start
, ex
.fe_len
);
6324 static int ext4_try_to_trim_range(struct super_block
*sb
,
6325 struct ext4_buddy
*e4b
, ext4_grpblk_t start
,
6326 ext4_grpblk_t max
, ext4_grpblk_t minblocks
)
6327 __acquires(ext4_group_lock_ptr(sb
, e4b
->bd_group
))
6328 __releases(ext4_group_lock_ptr(sb
, e4b
->bd_group
))
6330 ext4_grpblk_t next
, count
, free_count
;
6334 bitmap
= e4b
->bd_bitmap
;
6335 start
= (e4b
->bd_info
->bb_first_free
> start
) ?
6336 e4b
->bd_info
->bb_first_free
: start
;
6340 while (start
<= max
) {
6341 start
= mb_find_next_zero_bit(bitmap
, max
+ 1, start
);
6344 next
= mb_find_next_bit(bitmap
, max
+ 1, start
);
6346 if ((next
- start
) >= minblocks
) {
6347 ret
= ext4_trim_extent(sb
, start
, next
- start
, e4b
);
6348 if (ret
&& ret
!= -EOPNOTSUPP
)
6351 count
+= next
- start
;
6353 free_count
+= next
- start
;
6356 if (fatal_signal_pending(current
)) {
6357 count
= -ERESTARTSYS
;
6361 if (need_resched()) {
6362 ext4_unlock_group(sb
, e4b
->bd_group
);
6364 ext4_lock_group(sb
, e4b
->bd_group
);
6367 if ((e4b
->bd_info
->bb_free
- free_count
) < minblocks
)
6375 * ext4_trim_all_free -- function to trim all free space in alloc. group
6376 * @sb: super block for file system
6377 * @group: group to be trimmed
6378 * @start: first group block to examine
6379 * @max: last group block to examine
6380 * @minblocks: minimum extent block count
6382 * ext4_trim_all_free walks through group's block bitmap searching for free
6383 * extents. When the free extent is found, mark it as used in group buddy
6384 * bitmap. Then issue a TRIM command on this extent and free the extent in
6385 * the group buddy bitmap.
6387 static ext4_grpblk_t
6388 ext4_trim_all_free(struct super_block
*sb
, ext4_group_t group
,
6389 ext4_grpblk_t start
, ext4_grpblk_t max
,
6390 ext4_grpblk_t minblocks
)
6392 struct ext4_buddy e4b
;
6395 trace_ext4_trim_all_free(sb
, group
, start
, max
);
6397 ret
= ext4_mb_load_buddy(sb
, group
, &e4b
);
6399 ext4_warning(sb
, "Error %d loading buddy information for %u",
6404 ext4_lock_group(sb
, group
);
6406 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b
.bd_info
) ||
6407 minblocks
< atomic_read(&EXT4_SB(sb
)->s_last_trim_minblks
)) {
6408 ret
= ext4_try_to_trim_range(sb
, &e4b
, start
, max
, minblocks
);
6410 EXT4_MB_GRP_SET_TRIMMED(e4b
.bd_info
);
6415 ext4_unlock_group(sb
, group
);
6416 ext4_mb_unload_buddy(&e4b
);
6418 ext4_debug("trimmed %d blocks in the group %d\n",
6425 * ext4_trim_fs() -- trim ioctl handle function
6426 * @sb: superblock for filesystem
6427 * @range: fstrim_range structure
6429 * start: First Byte to trim
6430 * len: number of Bytes to trim from start
6431 * minlen: minimum extent length in Bytes
6432 * ext4_trim_fs goes through all allocation groups containing Bytes from
6433 * start to start+len. For each such a group ext4_trim_all_free function
6434 * is invoked to trim all free space.
6436 int ext4_trim_fs(struct super_block
*sb
, struct fstrim_range
*range
)
6438 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
6439 struct ext4_group_info
*grp
;
6440 ext4_group_t group
, first_group
, last_group
;
6441 ext4_grpblk_t cnt
= 0, first_cluster
, last_cluster
;
6442 uint64_t start
, end
, minlen
, trimmed
= 0;
6443 ext4_fsblk_t first_data_blk
=
6444 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_data_block
);
6445 ext4_fsblk_t max_blks
= ext4_blocks_count(EXT4_SB(sb
)->s_es
);
6448 start
= range
->start
>> sb
->s_blocksize_bits
;
6449 end
= start
+ (range
->len
>> sb
->s_blocksize_bits
) - 1;
6450 minlen
= EXT4_NUM_B2C(EXT4_SB(sb
),
6451 range
->minlen
>> sb
->s_blocksize_bits
);
6453 if (minlen
> EXT4_CLUSTERS_PER_GROUP(sb
) ||
6454 start
>= max_blks
||
6455 range
->len
< sb
->s_blocksize
)
6457 /* No point to try to trim less than discard granularity */
6458 if (range
->minlen
< q
->limits
.discard_granularity
) {
6459 minlen
= EXT4_NUM_B2C(EXT4_SB(sb
),
6460 q
->limits
.discard_granularity
>> sb
->s_blocksize_bits
);
6461 if (minlen
> EXT4_CLUSTERS_PER_GROUP(sb
))
6464 if (end
>= max_blks
)
6466 if (end
<= first_data_blk
)
6468 if (start
< first_data_blk
)
6469 start
= first_data_blk
;
6471 /* Determine first and last group to examine based on start and end */
6472 ext4_get_group_no_and_offset(sb
, (ext4_fsblk_t
) start
,
6473 &first_group
, &first_cluster
);
6474 ext4_get_group_no_and_offset(sb
, (ext4_fsblk_t
) end
,
6475 &last_group
, &last_cluster
);
6477 /* end now represents the last cluster to discard in this group */
6478 end
= EXT4_CLUSTERS_PER_GROUP(sb
) - 1;
6480 for (group
= first_group
; group
<= last_group
; group
++) {
6481 grp
= ext4_get_group_info(sb
, group
);
6482 /* We only do this if the grp has never been initialized */
6483 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp
))) {
6484 ret
= ext4_mb_init_group(sb
, group
, GFP_NOFS
);
6490 * For all the groups except the last one, last cluster will
6491 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6492 * change it for the last group, note that last_cluster is
6493 * already computed earlier by ext4_get_group_no_and_offset()
6495 if (group
== last_group
)
6498 if (grp
->bb_free
>= minlen
) {
6499 cnt
= ext4_trim_all_free(sb
, group
, first_cluster
,
6509 * For every group except the first one, we are sure
6510 * that the first cluster to discard will be cluster #0.
6516 atomic_set(&EXT4_SB(sb
)->s_last_trim_minblks
, minlen
);
6519 range
->len
= EXT4_C2B(EXT4_SB(sb
), trimmed
) << sb
->s_blocksize_bits
;
6523 /* Iterate all the free extents in the group. */
6525 ext4_mballoc_query_range(
6526 struct super_block
*sb
,
6528 ext4_grpblk_t start
,
6530 ext4_mballoc_query_range_fn formatter
,
6535 struct ext4_buddy e4b
;
6538 error
= ext4_mb_load_buddy(sb
, group
, &e4b
);
6541 bitmap
= e4b
.bd_bitmap
;
6543 ext4_lock_group(sb
, group
);
6545 start
= (e4b
.bd_info
->bb_first_free
> start
) ?
6546 e4b
.bd_info
->bb_first_free
: start
;
6547 if (end
>= EXT4_CLUSTERS_PER_GROUP(sb
))
6548 end
= EXT4_CLUSTERS_PER_GROUP(sb
) - 1;
6550 while (start
<= end
) {
6551 start
= mb_find_next_zero_bit(bitmap
, end
+ 1, start
);
6554 next
= mb_find_next_bit(bitmap
, end
+ 1, start
);
6556 ext4_unlock_group(sb
, group
);
6557 error
= formatter(sb
, group
, start
, next
- start
, priv
);
6560 ext4_lock_group(sb
, group
);
6565 ext4_unlock_group(sb
, group
);
6567 ext4_mb_unload_buddy(&e4b
);