]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/ext4/mballoc.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / fs / ext4 / mballoc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 */
6
7
8 /*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
20
21 /*
22 * MUSTDO:
23 * - test ext4_ext_search_left() and ext4_ext_search_right()
24 * - search for metadata in few groups
25 *
26 * TODO v4:
27 * - normalization should take into account whether file is still open
28 * - discard preallocations if no free space left (policy?)
29 * - don't normalize tails
30 * - quota
31 * - reservation for superuser
32 *
33 * TODO v3:
34 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
35 * - track min/max extents in each group for better group selection
36 * - mb_mark_used() may allocate chunk right after splitting buddy
37 * - tree of groups sorted by number of free blocks
38 * - error handling
39 */
40
41 /*
42 * The allocation request involve request for multiple number of blocks
43 * near to the goal(block) value specified.
44 *
45 * During initialization phase of the allocator we decide to use the
46 * group preallocation or inode preallocation depending on the size of
47 * the file. The size of the file could be the resulting file size we
48 * would have after allocation, or the current file size, which ever
49 * is larger. If the size is less than sbi->s_mb_stream_request we
50 * select to use the group preallocation. The default value of
51 * s_mb_stream_request is 16 blocks. This can also be tuned via
52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53 * terms of number of blocks.
54 *
55 * The main motivation for having small file use group preallocation is to
56 * ensure that we have small files closer together on the disk.
57 *
58 * First stage the allocator looks at the inode prealloc list,
59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60 * spaces for this particular inode. The inode prealloc space is
61 * represented as:
62 *
63 * pa_lstart -> the logical start block for this prealloc space
64 * pa_pstart -> the physical start block for this prealloc space
65 * pa_len -> length for this prealloc space (in clusters)
66 * pa_free -> free space available in this prealloc space (in clusters)
67 *
68 * The inode preallocation space is used looking at the _logical_ start
69 * block. If only the logical file block falls within the range of prealloc
70 * space we will consume the particular prealloc space. This makes sure that
71 * we have contiguous physical blocks representing the file blocks
72 *
73 * The important thing to be noted in case of inode prealloc space is that
74 * we don't modify the values associated to inode prealloc space except
75 * pa_free.
76 *
77 * If we are not able to find blocks in the inode prealloc space and if we
78 * have the group allocation flag set then we look at the locality group
79 * prealloc space. These are per CPU prealloc list represented as
80 *
81 * ext4_sb_info.s_locality_groups[smp_processor_id()]
82 *
83 * The reason for having a per cpu locality group is to reduce the contention
84 * between CPUs. It is possible to get scheduled at this point.
85 *
86 * The locality group prealloc space is used looking at whether we have
87 * enough free space (pa_free) within the prealloc space.
88 *
89 * If we can't allocate blocks via inode prealloc or/and locality group
90 * prealloc then we look at the buddy cache. The buddy cache is represented
91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92 * mapped to the buddy and bitmap information regarding different
93 * groups. The buddy information is attached to buddy cache inode so that
94 * we can access them through the page cache. The information regarding
95 * each group is loaded via ext4_mb_load_buddy. The information involve
96 * block bitmap and buddy information. The information are stored in the
97 * inode as:
98 *
99 * { page }
100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
101 *
102 *
103 * one block each for bitmap and buddy information. So for each group we
104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105 * blocksize) blocks. So it can have information regarding groups_per_page
106 * which is blocks_per_page/2
107 *
108 * The buddy cache inode is not stored on disk. The inode is thrown
109 * away when the filesystem is unmounted.
110 *
111 * We look for count number of blocks in the buddy cache. If we were able
112 * to locate that many free blocks we return with additional information
113 * regarding rest of the contiguous physical block available
114 *
115 * Before allocating blocks via buddy cache we normalize the request
116 * blocks. This ensure we ask for more blocks that we needed. The extra
117 * blocks that we get after allocation is added to the respective prealloc
118 * list. In case of inode preallocation we follow a list of heuristics
119 * based on file size. This can be found in ext4_mb_normalize_request. If
120 * we are doing a group prealloc we try to normalize the request to
121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
122 * dependent on the cluster size; for non-bigalloc file systems, it is
123 * 512 blocks. This can be tuned via
124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125 * terms of number of blocks. If we have mounted the file system with -O
126 * stripe=<value> option the group prealloc request is normalized to the
127 * smallest multiple of the stripe value (sbi->s_stripe) which is
128 * greater than the default mb_group_prealloc.
129 *
130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131 * structures in two data structures:
132 *
133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
134 *
135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
136 *
137 * This is an array of lists where the index in the array represents the
138 * largest free order in the buddy bitmap of the participating group infos of
139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140 * number of buddy bitmap orders possible) number of lists. Group-infos are
141 * placed in appropriate lists.
142 *
143 * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
144 *
145 * Locking: sbi->s_mb_rb_lock (rwlock)
146 *
147 * This is a red black tree consisting of group infos and the tree is sorted
148 * by average fragment sizes (which is calculated as ext4_group_info->bb_free
149 * / ext4_group_info->bb_fragments).
150 *
151 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
152 * structures to decide the order in which groups are to be traversed for
153 * fulfilling an allocation request.
154 *
155 * At CR = 0, we look for groups which have the largest_free_order >= the order
156 * of the request. We directly look at the largest free order list in the data
157 * structure (1) above where largest_free_order = order of the request. If that
158 * list is empty, we look at remaining list in the increasing order of
159 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
160 *
161 * At CR = 1, we only consider groups where average fragment size > request
162 * size. So, we lookup a group which has average fragment size just above or
163 * equal to request size using our rb tree (data structure 2) in O(log N) time.
164 *
165 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
166 * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
167 *
168 * The regular allocator (using the buddy cache) supports a few tunables.
169 *
170 * /sys/fs/ext4/<partition>/mb_min_to_scan
171 * /sys/fs/ext4/<partition>/mb_max_to_scan
172 * /sys/fs/ext4/<partition>/mb_order2_req
173 * /sys/fs/ext4/<partition>/mb_linear_limit
174 *
175 * The regular allocator uses buddy scan only if the request len is power of
176 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
177 * value of s_mb_order2_reqs can be tuned via
178 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
179 * stripe size (sbi->s_stripe), we try to search for contiguous block in
180 * stripe size. This should result in better allocation on RAID setups. If
181 * not, we search in the specific group using bitmap for best extents. The
182 * tunable min_to_scan and max_to_scan control the behaviour here.
183 * min_to_scan indicate how long the mballoc __must__ look for a best
184 * extent and max_to_scan indicates how long the mballoc __can__ look for a
185 * best extent in the found extents. Searching for the blocks starts with
186 * the group specified as the goal value in allocation context via
187 * ac_g_ex. Each group is first checked based on the criteria whether it
188 * can be used for allocation. ext4_mb_good_group explains how the groups are
189 * checked.
190 *
191 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
192 * get traversed linearly. That may result in subsequent allocations being not
193 * close to each other. And so, the underlying device may get filled up in a
194 * non-linear fashion. While that may not matter on non-rotational devices, for
195 * rotational devices that may result in higher seek times. "mb_linear_limit"
196 * tells mballoc how many groups mballoc should search linearly before
197 * performing consulting above data structures for more efficient lookups. For
198 * non rotational devices, this value defaults to 0 and for rotational devices
199 * this is set to MB_DEFAULT_LINEAR_LIMIT.
200 *
201 * Both the prealloc space are getting populated as above. So for the first
202 * request we will hit the buddy cache which will result in this prealloc
203 * space getting filled. The prealloc space is then later used for the
204 * subsequent request.
205 */
206
207 /*
208 * mballoc operates on the following data:
209 * - on-disk bitmap
210 * - in-core buddy (actually includes buddy and bitmap)
211 * - preallocation descriptors (PAs)
212 *
213 * there are two types of preallocations:
214 * - inode
215 * assiged to specific inode and can be used for this inode only.
216 * it describes part of inode's space preallocated to specific
217 * physical blocks. any block from that preallocated can be used
218 * independent. the descriptor just tracks number of blocks left
219 * unused. so, before taking some block from descriptor, one must
220 * make sure corresponded logical block isn't allocated yet. this
221 * also means that freeing any block within descriptor's range
222 * must discard all preallocated blocks.
223 * - locality group
224 * assigned to specific locality group which does not translate to
225 * permanent set of inodes: inode can join and leave group. space
226 * from this type of preallocation can be used for any inode. thus
227 * it's consumed from the beginning to the end.
228 *
229 * relation between them can be expressed as:
230 * in-core buddy = on-disk bitmap + preallocation descriptors
231 *
232 * this mean blocks mballoc considers used are:
233 * - allocated blocks (persistent)
234 * - preallocated blocks (non-persistent)
235 *
236 * consistency in mballoc world means that at any time a block is either
237 * free or used in ALL structures. notice: "any time" should not be read
238 * literally -- time is discrete and delimited by locks.
239 *
240 * to keep it simple, we don't use block numbers, instead we count number of
241 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
242 *
243 * all operations can be expressed as:
244 * - init buddy: buddy = on-disk + PAs
245 * - new PA: buddy += N; PA = N
246 * - use inode PA: on-disk += N; PA -= N
247 * - discard inode PA buddy -= on-disk - PA; PA = 0
248 * - use locality group PA on-disk += N; PA -= N
249 * - discard locality group PA buddy -= PA; PA = 0
250 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
251 * is used in real operation because we can't know actual used
252 * bits from PA, only from on-disk bitmap
253 *
254 * if we follow this strict logic, then all operations above should be atomic.
255 * given some of them can block, we'd have to use something like semaphores
256 * killing performance on high-end SMP hardware. let's try to relax it using
257 * the following knowledge:
258 * 1) if buddy is referenced, it's already initialized
259 * 2) while block is used in buddy and the buddy is referenced,
260 * nobody can re-allocate that block
261 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
262 * bit set and PA claims same block, it's OK. IOW, one can set bit in
263 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
264 * block
265 *
266 * so, now we're building a concurrency table:
267 * - init buddy vs.
268 * - new PA
269 * blocks for PA are allocated in the buddy, buddy must be referenced
270 * until PA is linked to allocation group to avoid concurrent buddy init
271 * - use inode PA
272 * we need to make sure that either on-disk bitmap or PA has uptodate data
273 * given (3) we care that PA-=N operation doesn't interfere with init
274 * - discard inode PA
275 * the simplest way would be to have buddy initialized by the discard
276 * - use locality group PA
277 * again PA-=N must be serialized with init
278 * - discard locality group PA
279 * the simplest way would be to have buddy initialized by the discard
280 * - new PA vs.
281 * - use inode PA
282 * i_data_sem serializes them
283 * - discard inode PA
284 * discard process must wait until PA isn't used by another process
285 * - use locality group PA
286 * some mutex should serialize them
287 * - discard locality group PA
288 * discard process must wait until PA isn't used by another process
289 * - use inode PA
290 * - use inode PA
291 * i_data_sem or another mutex should serializes them
292 * - discard inode PA
293 * discard process must wait until PA isn't used by another process
294 * - use locality group PA
295 * nothing wrong here -- they're different PAs covering different blocks
296 * - discard locality group PA
297 * discard process must wait until PA isn't used by another process
298 *
299 * now we're ready to make few consequences:
300 * - PA is referenced and while it is no discard is possible
301 * - PA is referenced until block isn't marked in on-disk bitmap
302 * - PA changes only after on-disk bitmap
303 * - discard must not compete with init. either init is done before
304 * any discard or they're serialized somehow
305 * - buddy init as sum of on-disk bitmap and PAs is done atomically
306 *
307 * a special case when we've used PA to emptiness. no need to modify buddy
308 * in this case, but we should care about concurrent init
309 *
310 */
311
312 /*
313 * Logic in few words:
314 *
315 * - allocation:
316 * load group
317 * find blocks
318 * mark bits in on-disk bitmap
319 * release group
320 *
321 * - use preallocation:
322 * find proper PA (per-inode or group)
323 * load group
324 * mark bits in on-disk bitmap
325 * release group
326 * release PA
327 *
328 * - free:
329 * load group
330 * mark bits in on-disk bitmap
331 * release group
332 *
333 * - discard preallocations in group:
334 * mark PAs deleted
335 * move them onto local list
336 * load on-disk bitmap
337 * load group
338 * remove PA from object (inode or locality group)
339 * mark free blocks in-core
340 *
341 * - discard inode's preallocations:
342 */
343
344 /*
345 * Locking rules
346 *
347 * Locks:
348 * - bitlock on a group (group)
349 * - object (inode/locality) (object)
350 * - per-pa lock (pa)
351 * - cr0 lists lock (cr0)
352 * - cr1 tree lock (cr1)
353 *
354 * Paths:
355 * - new pa
356 * object
357 * group
358 *
359 * - find and use pa:
360 * pa
361 *
362 * - release consumed pa:
363 * pa
364 * group
365 * object
366 *
367 * - generate in-core bitmap:
368 * group
369 * pa
370 *
371 * - discard all for given object (inode, locality group):
372 * object
373 * pa
374 * group
375 *
376 * - discard all for given group:
377 * group
378 * pa
379 * group
380 * object
381 *
382 * - allocation path (ext4_mb_regular_allocator)
383 * group
384 * cr0/cr1
385 */
386 static struct kmem_cache *ext4_pspace_cachep;
387 static struct kmem_cache *ext4_ac_cachep;
388 static struct kmem_cache *ext4_free_data_cachep;
389
390 /* We create slab caches for groupinfo data structures based on the
391 * superblock block size. There will be one per mounted filesystem for
392 * each unique s_blocksize_bits */
393 #define NR_GRPINFO_CACHES 8
394 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
395
396 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
397 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
398 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
399 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
400 };
401
402 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
403 ext4_group_t group);
404 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
405 ext4_group_t group);
406 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
407
408 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
409 ext4_group_t group, int cr);
410
411 static int ext4_try_to_trim_range(struct super_block *sb,
412 struct ext4_buddy *e4b, ext4_grpblk_t start,
413 ext4_grpblk_t max, ext4_grpblk_t minblocks);
414
415 /*
416 * The algorithm using this percpu seq counter goes below:
417 * 1. We sample the percpu discard_pa_seq counter before trying for block
418 * allocation in ext4_mb_new_blocks().
419 * 2. We increment this percpu discard_pa_seq counter when we either allocate
420 * or free these blocks i.e. while marking those blocks as used/free in
421 * mb_mark_used()/mb_free_blocks().
422 * 3. We also increment this percpu seq counter when we successfully identify
423 * that the bb_prealloc_list is not empty and hence proceed for discarding
424 * of those PAs inside ext4_mb_discard_group_preallocations().
425 *
426 * Now to make sure that the regular fast path of block allocation is not
427 * affected, as a small optimization we only sample the percpu seq counter
428 * on that cpu. Only when the block allocation fails and when freed blocks
429 * found were 0, that is when we sample percpu seq counter for all cpus using
430 * below function ext4_get_discard_pa_seq_sum(). This happens after making
431 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
432 */
433 static DEFINE_PER_CPU(u64, discard_pa_seq);
434 static inline u64 ext4_get_discard_pa_seq_sum(void)
435 {
436 int __cpu;
437 u64 __seq = 0;
438
439 for_each_possible_cpu(__cpu)
440 __seq += per_cpu(discard_pa_seq, __cpu);
441 return __seq;
442 }
443
444 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
445 {
446 #if BITS_PER_LONG == 64
447 *bit += ((unsigned long) addr & 7UL) << 3;
448 addr = (void *) ((unsigned long) addr & ~7UL);
449 #elif BITS_PER_LONG == 32
450 *bit += ((unsigned long) addr & 3UL) << 3;
451 addr = (void *) ((unsigned long) addr & ~3UL);
452 #else
453 #error "how many bits you are?!"
454 #endif
455 return addr;
456 }
457
458 static inline int mb_test_bit(int bit, void *addr)
459 {
460 /*
461 * ext4_test_bit on architecture like powerpc
462 * needs unsigned long aligned address
463 */
464 addr = mb_correct_addr_and_bit(&bit, addr);
465 return ext4_test_bit(bit, addr);
466 }
467
468 static inline void mb_set_bit(int bit, void *addr)
469 {
470 addr = mb_correct_addr_and_bit(&bit, addr);
471 ext4_set_bit(bit, addr);
472 }
473
474 static inline void mb_clear_bit(int bit, void *addr)
475 {
476 addr = mb_correct_addr_and_bit(&bit, addr);
477 ext4_clear_bit(bit, addr);
478 }
479
480 static inline int mb_test_and_clear_bit(int bit, void *addr)
481 {
482 addr = mb_correct_addr_and_bit(&bit, addr);
483 return ext4_test_and_clear_bit(bit, addr);
484 }
485
486 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
487 {
488 int fix = 0, ret, tmpmax;
489 addr = mb_correct_addr_and_bit(&fix, addr);
490 tmpmax = max + fix;
491 start += fix;
492
493 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
494 if (ret > max)
495 return max;
496 return ret;
497 }
498
499 static inline int mb_find_next_bit(void *addr, int max, int start)
500 {
501 int fix = 0, ret, tmpmax;
502 addr = mb_correct_addr_and_bit(&fix, addr);
503 tmpmax = max + fix;
504 start += fix;
505
506 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
507 if (ret > max)
508 return max;
509 return ret;
510 }
511
512 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
513 {
514 char *bb;
515
516 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
517 BUG_ON(max == NULL);
518
519 if (order > e4b->bd_blkbits + 1) {
520 *max = 0;
521 return NULL;
522 }
523
524 /* at order 0 we see each particular block */
525 if (order == 0) {
526 *max = 1 << (e4b->bd_blkbits + 3);
527 return e4b->bd_bitmap;
528 }
529
530 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
531 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
532
533 return bb;
534 }
535
536 #ifdef DOUBLE_CHECK
537 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
538 int first, int count)
539 {
540 int i;
541 struct super_block *sb = e4b->bd_sb;
542
543 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
544 return;
545 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
546 for (i = 0; i < count; i++) {
547 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
548 ext4_fsblk_t blocknr;
549
550 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
551 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
552 ext4_grp_locked_error(sb, e4b->bd_group,
553 inode ? inode->i_ino : 0,
554 blocknr,
555 "freeing block already freed "
556 "(bit %u)",
557 first + i);
558 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
559 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
560 }
561 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
562 }
563 }
564
565 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
566 {
567 int i;
568
569 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
570 return;
571 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
572 for (i = 0; i < count; i++) {
573 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
574 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
575 }
576 }
577
578 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
579 {
580 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
581 return;
582 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
583 unsigned char *b1, *b2;
584 int i;
585 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
586 b2 = (unsigned char *) bitmap;
587 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
588 if (b1[i] != b2[i]) {
589 ext4_msg(e4b->bd_sb, KERN_ERR,
590 "corruption in group %u "
591 "at byte %u(%u): %x in copy != %x "
592 "on disk/prealloc",
593 e4b->bd_group, i, i * 8, b1[i], b2[i]);
594 BUG();
595 }
596 }
597 }
598 }
599
600 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
601 struct ext4_group_info *grp, ext4_group_t group)
602 {
603 struct buffer_head *bh;
604
605 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
606 if (!grp->bb_bitmap)
607 return;
608
609 bh = ext4_read_block_bitmap(sb, group);
610 if (IS_ERR_OR_NULL(bh)) {
611 kfree(grp->bb_bitmap);
612 grp->bb_bitmap = NULL;
613 return;
614 }
615
616 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
617 put_bh(bh);
618 }
619
620 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
621 {
622 kfree(grp->bb_bitmap);
623 }
624
625 #else
626 static inline void mb_free_blocks_double(struct inode *inode,
627 struct ext4_buddy *e4b, int first, int count)
628 {
629 return;
630 }
631 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
632 int first, int count)
633 {
634 return;
635 }
636 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
637 {
638 return;
639 }
640
641 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
642 struct ext4_group_info *grp, ext4_group_t group)
643 {
644 return;
645 }
646
647 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
648 {
649 return;
650 }
651 #endif
652
653 #ifdef AGGRESSIVE_CHECK
654
655 #define MB_CHECK_ASSERT(assert) \
656 do { \
657 if (!(assert)) { \
658 printk(KERN_EMERG \
659 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
660 function, file, line, # assert); \
661 BUG(); \
662 } \
663 } while (0)
664
665 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
666 const char *function, int line)
667 {
668 struct super_block *sb = e4b->bd_sb;
669 int order = e4b->bd_blkbits + 1;
670 int max;
671 int max2;
672 int i;
673 int j;
674 int k;
675 int count;
676 struct ext4_group_info *grp;
677 int fragments = 0;
678 int fstart;
679 struct list_head *cur;
680 void *buddy;
681 void *buddy2;
682
683 if (e4b->bd_info->bb_check_counter++ % 10)
684 return 0;
685
686 while (order > 1) {
687 buddy = mb_find_buddy(e4b, order, &max);
688 MB_CHECK_ASSERT(buddy);
689 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
690 MB_CHECK_ASSERT(buddy2);
691 MB_CHECK_ASSERT(buddy != buddy2);
692 MB_CHECK_ASSERT(max * 2 == max2);
693
694 count = 0;
695 for (i = 0; i < max; i++) {
696
697 if (mb_test_bit(i, buddy)) {
698 /* only single bit in buddy2 may be 1 */
699 if (!mb_test_bit(i << 1, buddy2)) {
700 MB_CHECK_ASSERT(
701 mb_test_bit((i<<1)+1, buddy2));
702 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
703 MB_CHECK_ASSERT(
704 mb_test_bit(i << 1, buddy2));
705 }
706 continue;
707 }
708
709 /* both bits in buddy2 must be 1 */
710 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
711 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
712
713 for (j = 0; j < (1 << order); j++) {
714 k = (i * (1 << order)) + j;
715 MB_CHECK_ASSERT(
716 !mb_test_bit(k, e4b->bd_bitmap));
717 }
718 count++;
719 }
720 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
721 order--;
722 }
723
724 fstart = -1;
725 buddy = mb_find_buddy(e4b, 0, &max);
726 for (i = 0; i < max; i++) {
727 if (!mb_test_bit(i, buddy)) {
728 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
729 if (fstart == -1) {
730 fragments++;
731 fstart = i;
732 }
733 continue;
734 }
735 fstart = -1;
736 /* check used bits only */
737 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
738 buddy2 = mb_find_buddy(e4b, j, &max2);
739 k = i >> j;
740 MB_CHECK_ASSERT(k < max2);
741 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
742 }
743 }
744 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
745 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
746
747 grp = ext4_get_group_info(sb, e4b->bd_group);
748 list_for_each(cur, &grp->bb_prealloc_list) {
749 ext4_group_t groupnr;
750 struct ext4_prealloc_space *pa;
751 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
752 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
753 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
754 for (i = 0; i < pa->pa_len; i++)
755 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
756 }
757 return 0;
758 }
759 #undef MB_CHECK_ASSERT
760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
761 __FILE__, __func__, __LINE__)
762 #else
763 #define mb_check_buddy(e4b)
764 #endif
765
766 /*
767 * Divide blocks started from @first with length @len into
768 * smaller chunks with power of 2 blocks.
769 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
770 * then increase bb_counters[] for corresponded chunk size.
771 */
772 static void ext4_mb_mark_free_simple(struct super_block *sb,
773 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
774 struct ext4_group_info *grp)
775 {
776 struct ext4_sb_info *sbi = EXT4_SB(sb);
777 ext4_grpblk_t min;
778 ext4_grpblk_t max;
779 ext4_grpblk_t chunk;
780 unsigned int border;
781
782 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
783
784 border = 2 << sb->s_blocksize_bits;
785
786 while (len > 0) {
787 /* find how many blocks can be covered since this position */
788 max = ffs(first | border) - 1;
789
790 /* find how many blocks of power 2 we need to mark */
791 min = fls(len) - 1;
792
793 if (max < min)
794 min = max;
795 chunk = 1 << min;
796
797 /* mark multiblock chunks only */
798 grp->bb_counters[min]++;
799 if (min > 0)
800 mb_clear_bit(first >> min,
801 buddy + sbi->s_mb_offsets[min]);
802
803 len -= chunk;
804 first += chunk;
805 }
806 }
807
808 static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
809 int (*cmp)(struct rb_node *, struct rb_node *))
810 {
811 struct rb_node **iter = &root->rb_node, *parent = NULL;
812
813 while (*iter) {
814 parent = *iter;
815 if (cmp(new, *iter) > 0)
816 iter = &((*iter)->rb_left);
817 else
818 iter = &((*iter)->rb_right);
819 }
820
821 rb_link_node(new, parent, iter);
822 rb_insert_color(new, root);
823 }
824
825 static int
826 ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
827 {
828 struct ext4_group_info *grp1 = rb_entry(rb1,
829 struct ext4_group_info,
830 bb_avg_fragment_size_rb);
831 struct ext4_group_info *grp2 = rb_entry(rb2,
832 struct ext4_group_info,
833 bb_avg_fragment_size_rb);
834 int num_frags_1, num_frags_2;
835
836 num_frags_1 = grp1->bb_fragments ?
837 grp1->bb_free / grp1->bb_fragments : 0;
838 num_frags_2 = grp2->bb_fragments ?
839 grp2->bb_free / grp2->bb_fragments : 0;
840
841 return (num_frags_2 - num_frags_1);
842 }
843
844 /*
845 * Reinsert grpinfo into the avg_fragment_size tree with new average
846 * fragment size.
847 */
848 static void
849 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
850 {
851 struct ext4_sb_info *sbi = EXT4_SB(sb);
852
853 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
854 return;
855
856 write_lock(&sbi->s_mb_rb_lock);
857 if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
858 rb_erase(&grp->bb_avg_fragment_size_rb,
859 &sbi->s_mb_avg_fragment_size_root);
860 RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
861 }
862
863 ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
864 &grp->bb_avg_fragment_size_rb,
865 ext4_mb_avg_fragment_size_cmp);
866 write_unlock(&sbi->s_mb_rb_lock);
867 }
868
869 /*
870 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
871 * cr level needs an update.
872 */
873 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
874 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
875 {
876 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
877 struct ext4_group_info *iter, *grp;
878 int i;
879
880 if (ac->ac_status == AC_STATUS_FOUND)
881 return;
882
883 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
884 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
885
886 grp = NULL;
887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
888 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
889 continue;
890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
893 continue;
894 }
895 grp = NULL;
896 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
897 bb_largest_free_order_node) {
898 if (sbi->s_mb_stats)
899 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
900 if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
901 grp = iter;
902 break;
903 }
904 }
905 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
906 if (grp)
907 break;
908 }
909
910 if (!grp) {
911 /* Increment cr and search again */
912 *new_cr = 1;
913 } else {
914 *group = grp->bb_group;
915 ac->ac_last_optimal_group = *group;
916 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
917 }
918 }
919
920 /*
921 * Choose next group by traversing average fragment size tree. Updates *new_cr
922 * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
923 * the linear search should continue for one iteration since there's lock
924 * contention on the rb tree lock.
925 */
926 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
927 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
928 {
929 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
930 int avg_fragment_size, best_so_far;
931 struct rb_node *node, *found;
932 struct ext4_group_info *grp;
933
934 /*
935 * If there is contention on the lock, instead of waiting for the lock
936 * to become available, just continue searching lineraly. We'll resume
937 * our rb tree search later starting at ac->ac_last_optimal_group.
938 */
939 if (!read_trylock(&sbi->s_mb_rb_lock)) {
940 ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
941 return;
942 }
943
944 if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
945 if (sbi->s_mb_stats)
946 atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
947 /* We have found something at CR 1 in the past */
948 grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
949 for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
950 found = rb_next(found)) {
951 grp = rb_entry(found, struct ext4_group_info,
952 bb_avg_fragment_size_rb);
953 if (sbi->s_mb_stats)
954 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
955 if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
956 break;
957 }
958 goto done;
959 }
960
961 node = sbi->s_mb_avg_fragment_size_root.rb_node;
962 best_so_far = 0;
963 found = NULL;
964
965 while (node) {
966 grp = rb_entry(node, struct ext4_group_info,
967 bb_avg_fragment_size_rb);
968 avg_fragment_size = 0;
969 if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
970 avg_fragment_size = grp->bb_fragments ?
971 grp->bb_free / grp->bb_fragments : 0;
972 if (!best_so_far || avg_fragment_size < best_so_far) {
973 best_so_far = avg_fragment_size;
974 found = node;
975 }
976 }
977 if (avg_fragment_size > ac->ac_g_ex.fe_len)
978 node = node->rb_right;
979 else
980 node = node->rb_left;
981 }
982
983 done:
984 if (found) {
985 grp = rb_entry(found, struct ext4_group_info,
986 bb_avg_fragment_size_rb);
987 *group = grp->bb_group;
988 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
989 } else {
990 *new_cr = 2;
991 }
992
993 read_unlock(&sbi->s_mb_rb_lock);
994 ac->ac_last_optimal_group = *group;
995 }
996
997 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
998 {
999 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1000 return 0;
1001 if (ac->ac_criteria >= 2)
1002 return 0;
1003 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1004 return 0;
1005 return 1;
1006 }
1007
1008 /*
1009 * Return next linear group for allocation. If linear traversal should not be
1010 * performed, this function just returns the same group
1011 */
1012 static int
1013 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
1014 {
1015 if (!should_optimize_scan(ac))
1016 goto inc_and_return;
1017
1018 if (ac->ac_groups_linear_remaining) {
1019 ac->ac_groups_linear_remaining--;
1020 goto inc_and_return;
1021 }
1022
1023 if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
1024 ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
1025 goto inc_and_return;
1026 }
1027
1028 return group;
1029 inc_and_return:
1030 /*
1031 * Artificially restricted ngroups for non-extent
1032 * files makes group > ngroups possible on first loop.
1033 */
1034 return group + 1 >= ngroups ? 0 : group + 1;
1035 }
1036
1037 /*
1038 * ext4_mb_choose_next_group: choose next group for allocation.
1039 *
1040 * @ac Allocation Context
1041 * @new_cr This is an output parameter. If the there is no good group
1042 * available at current CR level, this field is updated to indicate
1043 * the new cr level that should be used.
1044 * @group This is an input / output parameter. As an input it indicates the
1045 * next group that the allocator intends to use for allocation. As
1046 * output, this field indicates the next group that should be used as
1047 * determined by the optimization functions.
1048 * @ngroups Total number of groups
1049 */
1050 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1051 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1052 {
1053 *new_cr = ac->ac_criteria;
1054
1055 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
1056 return;
1057
1058 if (*new_cr == 0) {
1059 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1060 } else if (*new_cr == 1) {
1061 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1062 } else {
1063 /*
1064 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1065 * bb_free. But until that happens, we should never come here.
1066 */
1067 WARN_ON(1);
1068 }
1069 }
1070
1071 /*
1072 * Cache the order of the largest free extent we have available in this block
1073 * group.
1074 */
1075 static void
1076 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1077 {
1078 struct ext4_sb_info *sbi = EXT4_SB(sb);
1079 int i;
1080
1081 if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
1082 write_lock(&sbi->s_mb_largest_free_orders_locks[
1083 grp->bb_largest_free_order]);
1084 list_del_init(&grp->bb_largest_free_order_node);
1085 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1086 grp->bb_largest_free_order]);
1087 }
1088 grp->bb_largest_free_order = -1; /* uninit */
1089
1090 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
1091 if (grp->bb_counters[i] > 0) {
1092 grp->bb_largest_free_order = i;
1093 break;
1094 }
1095 }
1096 if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
1097 grp->bb_largest_free_order >= 0 && grp->bb_free) {
1098 write_lock(&sbi->s_mb_largest_free_orders_locks[
1099 grp->bb_largest_free_order]);
1100 list_add_tail(&grp->bb_largest_free_order_node,
1101 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1102 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1103 grp->bb_largest_free_order]);
1104 }
1105 }
1106
1107 static noinline_for_stack
1108 void ext4_mb_generate_buddy(struct super_block *sb,
1109 void *buddy, void *bitmap, ext4_group_t group)
1110 {
1111 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1112 struct ext4_sb_info *sbi = EXT4_SB(sb);
1113 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1114 ext4_grpblk_t i = 0;
1115 ext4_grpblk_t first;
1116 ext4_grpblk_t len;
1117 unsigned free = 0;
1118 unsigned fragments = 0;
1119 unsigned long long period = get_cycles();
1120
1121 /* initialize buddy from bitmap which is aggregation
1122 * of on-disk bitmap and preallocations */
1123 i = mb_find_next_zero_bit(bitmap, max, 0);
1124 grp->bb_first_free = i;
1125 while (i < max) {
1126 fragments++;
1127 first = i;
1128 i = mb_find_next_bit(bitmap, max, i);
1129 len = i - first;
1130 free += len;
1131 if (len > 1)
1132 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1133 else
1134 grp->bb_counters[0]++;
1135 if (i < max)
1136 i = mb_find_next_zero_bit(bitmap, max, i);
1137 }
1138 grp->bb_fragments = fragments;
1139
1140 if (free != grp->bb_free) {
1141 ext4_grp_locked_error(sb, group, 0, 0,
1142 "block bitmap and bg descriptor "
1143 "inconsistent: %u vs %u free clusters",
1144 free, grp->bb_free);
1145 /*
1146 * If we intend to continue, we consider group descriptor
1147 * corrupt and update bb_free using bitmap value
1148 */
1149 grp->bb_free = free;
1150 ext4_mark_group_bitmap_corrupted(sb, group,
1151 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1152 }
1153 mb_set_largest_free_order(sb, grp);
1154
1155 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1156
1157 period = get_cycles() - period;
1158 atomic_inc(&sbi->s_mb_buddies_generated);
1159 atomic64_add(period, &sbi->s_mb_generation_time);
1160 mb_update_avg_fragment_size(sb, grp);
1161 }
1162
1163 /* The buddy information is attached the buddy cache inode
1164 * for convenience. The information regarding each group
1165 * is loaded via ext4_mb_load_buddy. The information involve
1166 * block bitmap and buddy information. The information are
1167 * stored in the inode as
1168 *
1169 * { page }
1170 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1171 *
1172 *
1173 * one block each for bitmap and buddy information.
1174 * So for each group we take up 2 blocks. A page can
1175 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1176 * So it can have information regarding groups_per_page which
1177 * is blocks_per_page/2
1178 *
1179 * Locking note: This routine takes the block group lock of all groups
1180 * for this page; do not hold this lock when calling this routine!
1181 */
1182
1183 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1184 {
1185 ext4_group_t ngroups;
1186 int blocksize;
1187 int blocks_per_page;
1188 int groups_per_page;
1189 int err = 0;
1190 int i;
1191 ext4_group_t first_group, group;
1192 int first_block;
1193 struct super_block *sb;
1194 struct buffer_head *bhs;
1195 struct buffer_head **bh = NULL;
1196 struct inode *inode;
1197 char *data;
1198 char *bitmap;
1199 struct ext4_group_info *grinfo;
1200
1201 inode = page->mapping->host;
1202 sb = inode->i_sb;
1203 ngroups = ext4_get_groups_count(sb);
1204 blocksize = i_blocksize(inode);
1205 blocks_per_page = PAGE_SIZE / blocksize;
1206
1207 mb_debug(sb, "init page %lu\n", page->index);
1208
1209 groups_per_page = blocks_per_page >> 1;
1210 if (groups_per_page == 0)
1211 groups_per_page = 1;
1212
1213 /* allocate buffer_heads to read bitmaps */
1214 if (groups_per_page > 1) {
1215 i = sizeof(struct buffer_head *) * groups_per_page;
1216 bh = kzalloc(i, gfp);
1217 if (bh == NULL) {
1218 err = -ENOMEM;
1219 goto out;
1220 }
1221 } else
1222 bh = &bhs;
1223
1224 first_group = page->index * blocks_per_page / 2;
1225
1226 /* read all groups the page covers into the cache */
1227 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1228 if (group >= ngroups)
1229 break;
1230
1231 grinfo = ext4_get_group_info(sb, group);
1232 /*
1233 * If page is uptodate then we came here after online resize
1234 * which added some new uninitialized group info structs, so
1235 * we must skip all initialized uptodate buddies on the page,
1236 * which may be currently in use by an allocating task.
1237 */
1238 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1239 bh[i] = NULL;
1240 continue;
1241 }
1242 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1243 if (IS_ERR(bh[i])) {
1244 err = PTR_ERR(bh[i]);
1245 bh[i] = NULL;
1246 goto out;
1247 }
1248 mb_debug(sb, "read bitmap for group %u\n", group);
1249 }
1250
1251 /* wait for I/O completion */
1252 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1253 int err2;
1254
1255 if (!bh[i])
1256 continue;
1257 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1258 if (!err)
1259 err = err2;
1260 }
1261
1262 first_block = page->index * blocks_per_page;
1263 for (i = 0; i < blocks_per_page; i++) {
1264 group = (first_block + i) >> 1;
1265 if (group >= ngroups)
1266 break;
1267
1268 if (!bh[group - first_group])
1269 /* skip initialized uptodate buddy */
1270 continue;
1271
1272 if (!buffer_verified(bh[group - first_group]))
1273 /* Skip faulty bitmaps */
1274 continue;
1275 err = 0;
1276
1277 /*
1278 * data carry information regarding this
1279 * particular group in the format specified
1280 * above
1281 *
1282 */
1283 data = page_address(page) + (i * blocksize);
1284 bitmap = bh[group - first_group]->b_data;
1285
1286 /*
1287 * We place the buddy block and bitmap block
1288 * close together
1289 */
1290 if ((first_block + i) & 1) {
1291 /* this is block of buddy */
1292 BUG_ON(incore == NULL);
1293 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1294 group, page->index, i * blocksize);
1295 trace_ext4_mb_buddy_bitmap_load(sb, group);
1296 grinfo = ext4_get_group_info(sb, group);
1297 grinfo->bb_fragments = 0;
1298 memset(grinfo->bb_counters, 0,
1299 sizeof(*grinfo->bb_counters) *
1300 (MB_NUM_ORDERS(sb)));
1301 /*
1302 * incore got set to the group block bitmap below
1303 */
1304 ext4_lock_group(sb, group);
1305 /* init the buddy */
1306 memset(data, 0xff, blocksize);
1307 ext4_mb_generate_buddy(sb, data, incore, group);
1308 ext4_unlock_group(sb, group);
1309 incore = NULL;
1310 } else {
1311 /* this is block of bitmap */
1312 BUG_ON(incore != NULL);
1313 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1314 group, page->index, i * blocksize);
1315 trace_ext4_mb_bitmap_load(sb, group);
1316
1317 /* see comments in ext4_mb_put_pa() */
1318 ext4_lock_group(sb, group);
1319 memcpy(data, bitmap, blocksize);
1320
1321 /* mark all preallocated blks used in in-core bitmap */
1322 ext4_mb_generate_from_pa(sb, data, group);
1323 ext4_mb_generate_from_freelist(sb, data, group);
1324 ext4_unlock_group(sb, group);
1325
1326 /* set incore so that the buddy information can be
1327 * generated using this
1328 */
1329 incore = data;
1330 }
1331 }
1332 SetPageUptodate(page);
1333
1334 out:
1335 if (bh) {
1336 for (i = 0; i < groups_per_page; i++)
1337 brelse(bh[i]);
1338 if (bh != &bhs)
1339 kfree(bh);
1340 }
1341 return err;
1342 }
1343
1344 /*
1345 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1346 * on the same buddy page doesn't happen whild holding the buddy page lock.
1347 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1348 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1349 */
1350 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1351 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1352 {
1353 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1354 int block, pnum, poff;
1355 int blocks_per_page;
1356 struct page *page;
1357
1358 e4b->bd_buddy_page = NULL;
1359 e4b->bd_bitmap_page = NULL;
1360
1361 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1362 /*
1363 * the buddy cache inode stores the block bitmap
1364 * and buddy information in consecutive blocks.
1365 * So for each group we need two blocks.
1366 */
1367 block = group * 2;
1368 pnum = block / blocks_per_page;
1369 poff = block % blocks_per_page;
1370 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1371 if (!page)
1372 return -ENOMEM;
1373 BUG_ON(page->mapping != inode->i_mapping);
1374 e4b->bd_bitmap_page = page;
1375 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1376
1377 if (blocks_per_page >= 2) {
1378 /* buddy and bitmap are on the same page */
1379 return 0;
1380 }
1381
1382 block++;
1383 pnum = block / blocks_per_page;
1384 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1385 if (!page)
1386 return -ENOMEM;
1387 BUG_ON(page->mapping != inode->i_mapping);
1388 e4b->bd_buddy_page = page;
1389 return 0;
1390 }
1391
1392 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1393 {
1394 if (e4b->bd_bitmap_page) {
1395 unlock_page(e4b->bd_bitmap_page);
1396 put_page(e4b->bd_bitmap_page);
1397 }
1398 if (e4b->bd_buddy_page) {
1399 unlock_page(e4b->bd_buddy_page);
1400 put_page(e4b->bd_buddy_page);
1401 }
1402 }
1403
1404 /*
1405 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1406 * block group lock of all groups for this page; do not hold the BG lock when
1407 * calling this routine!
1408 */
1409 static noinline_for_stack
1410 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1411 {
1412
1413 struct ext4_group_info *this_grp;
1414 struct ext4_buddy e4b;
1415 struct page *page;
1416 int ret = 0;
1417
1418 might_sleep();
1419 mb_debug(sb, "init group %u\n", group);
1420 this_grp = ext4_get_group_info(sb, group);
1421 /*
1422 * This ensures that we don't reinit the buddy cache
1423 * page which map to the group from which we are already
1424 * allocating. If we are looking at the buddy cache we would
1425 * have taken a reference using ext4_mb_load_buddy and that
1426 * would have pinned buddy page to page cache.
1427 * The call to ext4_mb_get_buddy_page_lock will mark the
1428 * page accessed.
1429 */
1430 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1431 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1432 /*
1433 * somebody initialized the group
1434 * return without doing anything
1435 */
1436 goto err;
1437 }
1438
1439 page = e4b.bd_bitmap_page;
1440 ret = ext4_mb_init_cache(page, NULL, gfp);
1441 if (ret)
1442 goto err;
1443 if (!PageUptodate(page)) {
1444 ret = -EIO;
1445 goto err;
1446 }
1447
1448 if (e4b.bd_buddy_page == NULL) {
1449 /*
1450 * If both the bitmap and buddy are in
1451 * the same page we don't need to force
1452 * init the buddy
1453 */
1454 ret = 0;
1455 goto err;
1456 }
1457 /* init buddy cache */
1458 page = e4b.bd_buddy_page;
1459 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1460 if (ret)
1461 goto err;
1462 if (!PageUptodate(page)) {
1463 ret = -EIO;
1464 goto err;
1465 }
1466 err:
1467 ext4_mb_put_buddy_page_lock(&e4b);
1468 return ret;
1469 }
1470
1471 /*
1472 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1473 * block group lock of all groups for this page; do not hold the BG lock when
1474 * calling this routine!
1475 */
1476 static noinline_for_stack int
1477 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1478 struct ext4_buddy *e4b, gfp_t gfp)
1479 {
1480 int blocks_per_page;
1481 int block;
1482 int pnum;
1483 int poff;
1484 struct page *page;
1485 int ret;
1486 struct ext4_group_info *grp;
1487 struct ext4_sb_info *sbi = EXT4_SB(sb);
1488 struct inode *inode = sbi->s_buddy_cache;
1489
1490 might_sleep();
1491 mb_debug(sb, "load group %u\n", group);
1492
1493 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1494 grp = ext4_get_group_info(sb, group);
1495
1496 e4b->bd_blkbits = sb->s_blocksize_bits;
1497 e4b->bd_info = grp;
1498 e4b->bd_sb = sb;
1499 e4b->bd_group = group;
1500 e4b->bd_buddy_page = NULL;
1501 e4b->bd_bitmap_page = NULL;
1502
1503 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1504 /*
1505 * we need full data about the group
1506 * to make a good selection
1507 */
1508 ret = ext4_mb_init_group(sb, group, gfp);
1509 if (ret)
1510 return ret;
1511 }
1512
1513 /*
1514 * the buddy cache inode stores the block bitmap
1515 * and buddy information in consecutive blocks.
1516 * So for each group we need two blocks.
1517 */
1518 block = group * 2;
1519 pnum = block / blocks_per_page;
1520 poff = block % blocks_per_page;
1521
1522 /* we could use find_or_create_page(), but it locks page
1523 * what we'd like to avoid in fast path ... */
1524 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1525 if (page == NULL || !PageUptodate(page)) {
1526 if (page)
1527 /*
1528 * drop the page reference and try
1529 * to get the page with lock. If we
1530 * are not uptodate that implies
1531 * somebody just created the page but
1532 * is yet to initialize the same. So
1533 * wait for it to initialize.
1534 */
1535 put_page(page);
1536 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1537 if (page) {
1538 BUG_ON(page->mapping != inode->i_mapping);
1539 if (!PageUptodate(page)) {
1540 ret = ext4_mb_init_cache(page, NULL, gfp);
1541 if (ret) {
1542 unlock_page(page);
1543 goto err;
1544 }
1545 mb_cmp_bitmaps(e4b, page_address(page) +
1546 (poff * sb->s_blocksize));
1547 }
1548 unlock_page(page);
1549 }
1550 }
1551 if (page == NULL) {
1552 ret = -ENOMEM;
1553 goto err;
1554 }
1555 if (!PageUptodate(page)) {
1556 ret = -EIO;
1557 goto err;
1558 }
1559
1560 /* Pages marked accessed already */
1561 e4b->bd_bitmap_page = page;
1562 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1563
1564 block++;
1565 pnum = block / blocks_per_page;
1566 poff = block % blocks_per_page;
1567
1568 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1569 if (page == NULL || !PageUptodate(page)) {
1570 if (page)
1571 put_page(page);
1572 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1573 if (page) {
1574 BUG_ON(page->mapping != inode->i_mapping);
1575 if (!PageUptodate(page)) {
1576 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1577 gfp);
1578 if (ret) {
1579 unlock_page(page);
1580 goto err;
1581 }
1582 }
1583 unlock_page(page);
1584 }
1585 }
1586 if (page == NULL) {
1587 ret = -ENOMEM;
1588 goto err;
1589 }
1590 if (!PageUptodate(page)) {
1591 ret = -EIO;
1592 goto err;
1593 }
1594
1595 /* Pages marked accessed already */
1596 e4b->bd_buddy_page = page;
1597 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1598
1599 return 0;
1600
1601 err:
1602 if (page)
1603 put_page(page);
1604 if (e4b->bd_bitmap_page)
1605 put_page(e4b->bd_bitmap_page);
1606 if (e4b->bd_buddy_page)
1607 put_page(e4b->bd_buddy_page);
1608 e4b->bd_buddy = NULL;
1609 e4b->bd_bitmap = NULL;
1610 return ret;
1611 }
1612
1613 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1614 struct ext4_buddy *e4b)
1615 {
1616 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1617 }
1618
1619 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1620 {
1621 if (e4b->bd_bitmap_page)
1622 put_page(e4b->bd_bitmap_page);
1623 if (e4b->bd_buddy_page)
1624 put_page(e4b->bd_buddy_page);
1625 }
1626
1627
1628 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1629 {
1630 int order = 1, max;
1631 void *bb;
1632
1633 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1634 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1635
1636 while (order <= e4b->bd_blkbits + 1) {
1637 bb = mb_find_buddy(e4b, order, &max);
1638 if (!mb_test_bit(block >> order, bb)) {
1639 /* this block is part of buddy of order 'order' */
1640 return order;
1641 }
1642 order++;
1643 }
1644 return 0;
1645 }
1646
1647 static void mb_clear_bits(void *bm, int cur, int len)
1648 {
1649 __u32 *addr;
1650
1651 len = cur + len;
1652 while (cur < len) {
1653 if ((cur & 31) == 0 && (len - cur) >= 32) {
1654 /* fast path: clear whole word at once */
1655 addr = bm + (cur >> 3);
1656 *addr = 0;
1657 cur += 32;
1658 continue;
1659 }
1660 mb_clear_bit(cur, bm);
1661 cur++;
1662 }
1663 }
1664
1665 /* clear bits in given range
1666 * will return first found zero bit if any, -1 otherwise
1667 */
1668 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1669 {
1670 __u32 *addr;
1671 int zero_bit = -1;
1672
1673 len = cur + len;
1674 while (cur < len) {
1675 if ((cur & 31) == 0 && (len - cur) >= 32) {
1676 /* fast path: clear whole word at once */
1677 addr = bm + (cur >> 3);
1678 if (*addr != (__u32)(-1) && zero_bit == -1)
1679 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1680 *addr = 0;
1681 cur += 32;
1682 continue;
1683 }
1684 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1685 zero_bit = cur;
1686 cur++;
1687 }
1688
1689 return zero_bit;
1690 }
1691
1692 void ext4_set_bits(void *bm, int cur, int len)
1693 {
1694 __u32 *addr;
1695
1696 len = cur + len;
1697 while (cur < len) {
1698 if ((cur & 31) == 0 && (len - cur) >= 32) {
1699 /* fast path: set whole word at once */
1700 addr = bm + (cur >> 3);
1701 *addr = 0xffffffff;
1702 cur += 32;
1703 continue;
1704 }
1705 mb_set_bit(cur, bm);
1706 cur++;
1707 }
1708 }
1709
1710 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1711 {
1712 if (mb_test_bit(*bit + side, bitmap)) {
1713 mb_clear_bit(*bit, bitmap);
1714 (*bit) -= side;
1715 return 1;
1716 }
1717 else {
1718 (*bit) += side;
1719 mb_set_bit(*bit, bitmap);
1720 return -1;
1721 }
1722 }
1723
1724 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1725 {
1726 int max;
1727 int order = 1;
1728 void *buddy = mb_find_buddy(e4b, order, &max);
1729
1730 while (buddy) {
1731 void *buddy2;
1732
1733 /* Bits in range [first; last] are known to be set since
1734 * corresponding blocks were allocated. Bits in range
1735 * (first; last) will stay set because they form buddies on
1736 * upper layer. We just deal with borders if they don't
1737 * align with upper layer and then go up.
1738 * Releasing entire group is all about clearing
1739 * single bit of highest order buddy.
1740 */
1741
1742 /* Example:
1743 * ---------------------------------
1744 * | 1 | 1 | 1 | 1 |
1745 * ---------------------------------
1746 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1747 * ---------------------------------
1748 * 0 1 2 3 4 5 6 7
1749 * \_____________________/
1750 *
1751 * Neither [1] nor [6] is aligned to above layer.
1752 * Left neighbour [0] is free, so mark it busy,
1753 * decrease bb_counters and extend range to
1754 * [0; 6]
1755 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1756 * mark [6] free, increase bb_counters and shrink range to
1757 * [0; 5].
1758 * Then shift range to [0; 2], go up and do the same.
1759 */
1760
1761
1762 if (first & 1)
1763 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1764 if (!(last & 1))
1765 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1766 if (first > last)
1767 break;
1768 order++;
1769
1770 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1771 mb_clear_bits(buddy, first, last - first + 1);
1772 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1773 break;
1774 }
1775 first >>= 1;
1776 last >>= 1;
1777 buddy = buddy2;
1778 }
1779 }
1780
1781 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1782 int first, int count)
1783 {
1784 int left_is_free = 0;
1785 int right_is_free = 0;
1786 int block;
1787 int last = first + count - 1;
1788 struct super_block *sb = e4b->bd_sb;
1789
1790 if (WARN_ON(count == 0))
1791 return;
1792 BUG_ON(last >= (sb->s_blocksize << 3));
1793 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1794 /* Don't bother if the block group is corrupt. */
1795 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1796 return;
1797
1798 mb_check_buddy(e4b);
1799 mb_free_blocks_double(inode, e4b, first, count);
1800
1801 this_cpu_inc(discard_pa_seq);
1802 e4b->bd_info->bb_free += count;
1803 if (first < e4b->bd_info->bb_first_free)
1804 e4b->bd_info->bb_first_free = first;
1805
1806 /* access memory sequentially: check left neighbour,
1807 * clear range and then check right neighbour
1808 */
1809 if (first != 0)
1810 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1811 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1812 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1813 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1814
1815 if (unlikely(block != -1)) {
1816 struct ext4_sb_info *sbi = EXT4_SB(sb);
1817 ext4_fsblk_t blocknr;
1818
1819 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1820 blocknr += EXT4_C2B(sbi, block);
1821 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1822 ext4_grp_locked_error(sb, e4b->bd_group,
1823 inode ? inode->i_ino : 0,
1824 blocknr,
1825 "freeing already freed block (bit %u); block bitmap corrupt.",
1826 block);
1827 ext4_mark_group_bitmap_corrupted(
1828 sb, e4b->bd_group,
1829 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1830 }
1831 goto done;
1832 }
1833
1834 /* let's maintain fragments counter */
1835 if (left_is_free && right_is_free)
1836 e4b->bd_info->bb_fragments--;
1837 else if (!left_is_free && !right_is_free)
1838 e4b->bd_info->bb_fragments++;
1839
1840 /* buddy[0] == bd_bitmap is a special case, so handle
1841 * it right away and let mb_buddy_mark_free stay free of
1842 * zero order checks.
1843 * Check if neighbours are to be coaleasced,
1844 * adjust bitmap bb_counters and borders appropriately.
1845 */
1846 if (first & 1) {
1847 first += !left_is_free;
1848 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1849 }
1850 if (!(last & 1)) {
1851 last -= !right_is_free;
1852 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1853 }
1854
1855 if (first <= last)
1856 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1857
1858 done:
1859 mb_set_largest_free_order(sb, e4b->bd_info);
1860 mb_update_avg_fragment_size(sb, e4b->bd_info);
1861 mb_check_buddy(e4b);
1862 }
1863
1864 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1865 int needed, struct ext4_free_extent *ex)
1866 {
1867 int next = block;
1868 int max, order;
1869 void *buddy;
1870
1871 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1872 BUG_ON(ex == NULL);
1873
1874 buddy = mb_find_buddy(e4b, 0, &max);
1875 BUG_ON(buddy == NULL);
1876 BUG_ON(block >= max);
1877 if (mb_test_bit(block, buddy)) {
1878 ex->fe_len = 0;
1879 ex->fe_start = 0;
1880 ex->fe_group = 0;
1881 return 0;
1882 }
1883
1884 /* find actual order */
1885 order = mb_find_order_for_block(e4b, block);
1886 block = block >> order;
1887
1888 ex->fe_len = 1 << order;
1889 ex->fe_start = block << order;
1890 ex->fe_group = e4b->bd_group;
1891
1892 /* calc difference from given start */
1893 next = next - ex->fe_start;
1894 ex->fe_len -= next;
1895 ex->fe_start += next;
1896
1897 while (needed > ex->fe_len &&
1898 mb_find_buddy(e4b, order, &max)) {
1899
1900 if (block + 1 >= max)
1901 break;
1902
1903 next = (block + 1) * (1 << order);
1904 if (mb_test_bit(next, e4b->bd_bitmap))
1905 break;
1906
1907 order = mb_find_order_for_block(e4b, next);
1908
1909 block = next >> order;
1910 ex->fe_len += 1 << order;
1911 }
1912
1913 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1914 /* Should never happen! (but apparently sometimes does?!?) */
1915 WARN_ON(1);
1916 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1917 "corruption or bug in mb_find_extent "
1918 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1919 block, order, needed, ex->fe_group, ex->fe_start,
1920 ex->fe_len, ex->fe_logical);
1921 ex->fe_len = 0;
1922 ex->fe_start = 0;
1923 ex->fe_group = 0;
1924 }
1925 return ex->fe_len;
1926 }
1927
1928 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1929 {
1930 int ord;
1931 int mlen = 0;
1932 int max = 0;
1933 int cur;
1934 int start = ex->fe_start;
1935 int len = ex->fe_len;
1936 unsigned ret = 0;
1937 int len0 = len;
1938 void *buddy;
1939
1940 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1941 BUG_ON(e4b->bd_group != ex->fe_group);
1942 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1943 mb_check_buddy(e4b);
1944 mb_mark_used_double(e4b, start, len);
1945
1946 this_cpu_inc(discard_pa_seq);
1947 e4b->bd_info->bb_free -= len;
1948 if (e4b->bd_info->bb_first_free == start)
1949 e4b->bd_info->bb_first_free += len;
1950
1951 /* let's maintain fragments counter */
1952 if (start != 0)
1953 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1954 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1955 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1956 if (mlen && max)
1957 e4b->bd_info->bb_fragments++;
1958 else if (!mlen && !max)
1959 e4b->bd_info->bb_fragments--;
1960
1961 /* let's maintain buddy itself */
1962 while (len) {
1963 ord = mb_find_order_for_block(e4b, start);
1964
1965 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1966 /* the whole chunk may be allocated at once! */
1967 mlen = 1 << ord;
1968 buddy = mb_find_buddy(e4b, ord, &max);
1969 BUG_ON((start >> ord) >= max);
1970 mb_set_bit(start >> ord, buddy);
1971 e4b->bd_info->bb_counters[ord]--;
1972 start += mlen;
1973 len -= mlen;
1974 BUG_ON(len < 0);
1975 continue;
1976 }
1977
1978 /* store for history */
1979 if (ret == 0)
1980 ret = len | (ord << 16);
1981
1982 /* we have to split large buddy */
1983 BUG_ON(ord <= 0);
1984 buddy = mb_find_buddy(e4b, ord, &max);
1985 mb_set_bit(start >> ord, buddy);
1986 e4b->bd_info->bb_counters[ord]--;
1987
1988 ord--;
1989 cur = (start >> ord) & ~1U;
1990 buddy = mb_find_buddy(e4b, ord, &max);
1991 mb_clear_bit(cur, buddy);
1992 mb_clear_bit(cur + 1, buddy);
1993 e4b->bd_info->bb_counters[ord]++;
1994 e4b->bd_info->bb_counters[ord]++;
1995 }
1996 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1997
1998 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1999 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2000 mb_check_buddy(e4b);
2001
2002 return ret;
2003 }
2004
2005 /*
2006 * Must be called under group lock!
2007 */
2008 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2009 struct ext4_buddy *e4b)
2010 {
2011 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2012 int ret;
2013
2014 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2015 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2016
2017 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2018 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2019 ret = mb_mark_used(e4b, &ac->ac_b_ex);
2020
2021 /* preallocation can change ac_b_ex, thus we store actually
2022 * allocated blocks for history */
2023 ac->ac_f_ex = ac->ac_b_ex;
2024
2025 ac->ac_status = AC_STATUS_FOUND;
2026 ac->ac_tail = ret & 0xffff;
2027 ac->ac_buddy = ret >> 16;
2028
2029 /*
2030 * take the page reference. We want the page to be pinned
2031 * so that we don't get a ext4_mb_init_cache_call for this
2032 * group until we update the bitmap. That would mean we
2033 * double allocate blocks. The reference is dropped
2034 * in ext4_mb_release_context
2035 */
2036 ac->ac_bitmap_page = e4b->bd_bitmap_page;
2037 get_page(ac->ac_bitmap_page);
2038 ac->ac_buddy_page = e4b->bd_buddy_page;
2039 get_page(ac->ac_buddy_page);
2040 /* store last allocated for subsequent stream allocation */
2041 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2042 spin_lock(&sbi->s_md_lock);
2043 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2044 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2045 spin_unlock(&sbi->s_md_lock);
2046 }
2047 /*
2048 * As we've just preallocated more space than
2049 * user requested originally, we store allocated
2050 * space in a special descriptor.
2051 */
2052 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2053 ext4_mb_new_preallocation(ac);
2054
2055 }
2056
2057 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2058 struct ext4_buddy *e4b,
2059 int finish_group)
2060 {
2061 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2062 struct ext4_free_extent *bex = &ac->ac_b_ex;
2063 struct ext4_free_extent *gex = &ac->ac_g_ex;
2064 struct ext4_free_extent ex;
2065 int max;
2066
2067 if (ac->ac_status == AC_STATUS_FOUND)
2068 return;
2069 /*
2070 * We don't want to scan for a whole year
2071 */
2072 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2073 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2074 ac->ac_status = AC_STATUS_BREAK;
2075 return;
2076 }
2077
2078 /*
2079 * Haven't found good chunk so far, let's continue
2080 */
2081 if (bex->fe_len < gex->fe_len)
2082 return;
2083
2084 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2085 && bex->fe_group == e4b->bd_group) {
2086 /* recheck chunk's availability - we don't know
2087 * when it was found (within this lock-unlock
2088 * period or not) */
2089 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2090 if (max >= gex->fe_len) {
2091 ext4_mb_use_best_found(ac, e4b);
2092 return;
2093 }
2094 }
2095 }
2096
2097 /*
2098 * The routine checks whether found extent is good enough. If it is,
2099 * then the extent gets marked used and flag is set to the context
2100 * to stop scanning. Otherwise, the extent is compared with the
2101 * previous found extent and if new one is better, then it's stored
2102 * in the context. Later, the best found extent will be used, if
2103 * mballoc can't find good enough extent.
2104 *
2105 * FIXME: real allocation policy is to be designed yet!
2106 */
2107 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2108 struct ext4_free_extent *ex,
2109 struct ext4_buddy *e4b)
2110 {
2111 struct ext4_free_extent *bex = &ac->ac_b_ex;
2112 struct ext4_free_extent *gex = &ac->ac_g_ex;
2113
2114 BUG_ON(ex->fe_len <= 0);
2115 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2116 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2117 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2118
2119 ac->ac_found++;
2120
2121 /*
2122 * The special case - take what you catch first
2123 */
2124 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2125 *bex = *ex;
2126 ext4_mb_use_best_found(ac, e4b);
2127 return;
2128 }
2129
2130 /*
2131 * Let's check whether the chuck is good enough
2132 */
2133 if (ex->fe_len == gex->fe_len) {
2134 *bex = *ex;
2135 ext4_mb_use_best_found(ac, e4b);
2136 return;
2137 }
2138
2139 /*
2140 * If this is first found extent, just store it in the context
2141 */
2142 if (bex->fe_len == 0) {
2143 *bex = *ex;
2144 return;
2145 }
2146
2147 /*
2148 * If new found extent is better, store it in the context
2149 */
2150 if (bex->fe_len < gex->fe_len) {
2151 /* if the request isn't satisfied, any found extent
2152 * larger than previous best one is better */
2153 if (ex->fe_len > bex->fe_len)
2154 *bex = *ex;
2155 } else if (ex->fe_len > gex->fe_len) {
2156 /* if the request is satisfied, then we try to find
2157 * an extent that still satisfy the request, but is
2158 * smaller than previous one */
2159 if (ex->fe_len < bex->fe_len)
2160 *bex = *ex;
2161 }
2162
2163 ext4_mb_check_limits(ac, e4b, 0);
2164 }
2165
2166 static noinline_for_stack
2167 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2168 struct ext4_buddy *e4b)
2169 {
2170 struct ext4_free_extent ex = ac->ac_b_ex;
2171 ext4_group_t group = ex.fe_group;
2172 int max;
2173 int err;
2174
2175 BUG_ON(ex.fe_len <= 0);
2176 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2177 if (err)
2178 return err;
2179
2180 ext4_lock_group(ac->ac_sb, group);
2181 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2182
2183 if (max > 0) {
2184 ac->ac_b_ex = ex;
2185 ext4_mb_use_best_found(ac, e4b);
2186 }
2187
2188 ext4_unlock_group(ac->ac_sb, group);
2189 ext4_mb_unload_buddy(e4b);
2190
2191 return 0;
2192 }
2193
2194 static noinline_for_stack
2195 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2196 struct ext4_buddy *e4b)
2197 {
2198 ext4_group_t group = ac->ac_g_ex.fe_group;
2199 int max;
2200 int err;
2201 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2202 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2203 struct ext4_free_extent ex;
2204
2205 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2206 return 0;
2207 if (grp->bb_free == 0)
2208 return 0;
2209
2210 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2211 if (err)
2212 return err;
2213
2214 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2215 ext4_mb_unload_buddy(e4b);
2216 return 0;
2217 }
2218
2219 ext4_lock_group(ac->ac_sb, group);
2220 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2221 ac->ac_g_ex.fe_len, &ex);
2222 ex.fe_logical = 0xDEADFA11; /* debug value */
2223
2224 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2225 ext4_fsblk_t start;
2226
2227 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2228 ex.fe_start;
2229 /* use do_div to get remainder (would be 64-bit modulo) */
2230 if (do_div(start, sbi->s_stripe) == 0) {
2231 ac->ac_found++;
2232 ac->ac_b_ex = ex;
2233 ext4_mb_use_best_found(ac, e4b);
2234 }
2235 } else if (max >= ac->ac_g_ex.fe_len) {
2236 BUG_ON(ex.fe_len <= 0);
2237 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2238 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2239 ac->ac_found++;
2240 ac->ac_b_ex = ex;
2241 ext4_mb_use_best_found(ac, e4b);
2242 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2243 /* Sometimes, caller may want to merge even small
2244 * number of blocks to an existing extent */
2245 BUG_ON(ex.fe_len <= 0);
2246 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2247 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2248 ac->ac_found++;
2249 ac->ac_b_ex = ex;
2250 ext4_mb_use_best_found(ac, e4b);
2251 }
2252 ext4_unlock_group(ac->ac_sb, group);
2253 ext4_mb_unload_buddy(e4b);
2254
2255 return 0;
2256 }
2257
2258 /*
2259 * The routine scans buddy structures (not bitmap!) from given order
2260 * to max order and tries to find big enough chunk to satisfy the req
2261 */
2262 static noinline_for_stack
2263 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2264 struct ext4_buddy *e4b)
2265 {
2266 struct super_block *sb = ac->ac_sb;
2267 struct ext4_group_info *grp = e4b->bd_info;
2268 void *buddy;
2269 int i;
2270 int k;
2271 int max;
2272
2273 BUG_ON(ac->ac_2order <= 0);
2274 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2275 if (grp->bb_counters[i] == 0)
2276 continue;
2277
2278 buddy = mb_find_buddy(e4b, i, &max);
2279 BUG_ON(buddy == NULL);
2280
2281 k = mb_find_next_zero_bit(buddy, max, 0);
2282 if (k >= max) {
2283 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2284 "%d free clusters of order %d. But found 0",
2285 grp->bb_counters[i], i);
2286 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2287 e4b->bd_group,
2288 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2289 break;
2290 }
2291 ac->ac_found++;
2292
2293 ac->ac_b_ex.fe_len = 1 << i;
2294 ac->ac_b_ex.fe_start = k << i;
2295 ac->ac_b_ex.fe_group = e4b->bd_group;
2296
2297 ext4_mb_use_best_found(ac, e4b);
2298
2299 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2300
2301 if (EXT4_SB(sb)->s_mb_stats)
2302 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2303
2304 break;
2305 }
2306 }
2307
2308 /*
2309 * The routine scans the group and measures all found extents.
2310 * In order to optimize scanning, caller must pass number of
2311 * free blocks in the group, so the routine can know upper limit.
2312 */
2313 static noinline_for_stack
2314 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2315 struct ext4_buddy *e4b)
2316 {
2317 struct super_block *sb = ac->ac_sb;
2318 void *bitmap = e4b->bd_bitmap;
2319 struct ext4_free_extent ex;
2320 int i;
2321 int free;
2322
2323 free = e4b->bd_info->bb_free;
2324 if (WARN_ON(free <= 0))
2325 return;
2326
2327 i = e4b->bd_info->bb_first_free;
2328
2329 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2330 i = mb_find_next_zero_bit(bitmap,
2331 EXT4_CLUSTERS_PER_GROUP(sb), i);
2332 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2333 /*
2334 * IF we have corrupt bitmap, we won't find any
2335 * free blocks even though group info says we
2336 * have free blocks
2337 */
2338 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2339 "%d free clusters as per "
2340 "group info. But bitmap says 0",
2341 free);
2342 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2343 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2344 break;
2345 }
2346
2347 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2348 if (WARN_ON(ex.fe_len <= 0))
2349 break;
2350 if (free < ex.fe_len) {
2351 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2352 "%d free clusters as per "
2353 "group info. But got %d blocks",
2354 free, ex.fe_len);
2355 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2356 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2357 /*
2358 * The number of free blocks differs. This mostly
2359 * indicate that the bitmap is corrupt. So exit
2360 * without claiming the space.
2361 */
2362 break;
2363 }
2364 ex.fe_logical = 0xDEADC0DE; /* debug value */
2365 ext4_mb_measure_extent(ac, &ex, e4b);
2366
2367 i += ex.fe_len;
2368 free -= ex.fe_len;
2369 }
2370
2371 ext4_mb_check_limits(ac, e4b, 1);
2372 }
2373
2374 /*
2375 * This is a special case for storages like raid5
2376 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2377 */
2378 static noinline_for_stack
2379 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2380 struct ext4_buddy *e4b)
2381 {
2382 struct super_block *sb = ac->ac_sb;
2383 struct ext4_sb_info *sbi = EXT4_SB(sb);
2384 void *bitmap = e4b->bd_bitmap;
2385 struct ext4_free_extent ex;
2386 ext4_fsblk_t first_group_block;
2387 ext4_fsblk_t a;
2388 ext4_grpblk_t i;
2389 int max;
2390
2391 BUG_ON(sbi->s_stripe == 0);
2392
2393 /* find first stripe-aligned block in group */
2394 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2395
2396 a = first_group_block + sbi->s_stripe - 1;
2397 do_div(a, sbi->s_stripe);
2398 i = (a * sbi->s_stripe) - first_group_block;
2399
2400 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2401 if (!mb_test_bit(i, bitmap)) {
2402 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2403 if (max >= sbi->s_stripe) {
2404 ac->ac_found++;
2405 ex.fe_logical = 0xDEADF00D; /* debug value */
2406 ac->ac_b_ex = ex;
2407 ext4_mb_use_best_found(ac, e4b);
2408 break;
2409 }
2410 }
2411 i += sbi->s_stripe;
2412 }
2413 }
2414
2415 /*
2416 * This is also called BEFORE we load the buddy bitmap.
2417 * Returns either 1 or 0 indicating that the group is either suitable
2418 * for the allocation or not.
2419 */
2420 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2421 ext4_group_t group, int cr)
2422 {
2423 ext4_grpblk_t free, fragments;
2424 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2425 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2426
2427 BUG_ON(cr < 0 || cr >= 4);
2428
2429 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2430 return false;
2431
2432 free = grp->bb_free;
2433 if (free == 0)
2434 return false;
2435
2436 fragments = grp->bb_fragments;
2437 if (fragments == 0)
2438 return false;
2439
2440 switch (cr) {
2441 case 0:
2442 BUG_ON(ac->ac_2order == 0);
2443
2444 /* Avoid using the first bg of a flexgroup for data files */
2445 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2446 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2447 ((group % flex_size) == 0))
2448 return false;
2449
2450 if (free < ac->ac_g_ex.fe_len)
2451 return false;
2452
2453 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2454 return true;
2455
2456 if (grp->bb_largest_free_order < ac->ac_2order)
2457 return false;
2458
2459 return true;
2460 case 1:
2461 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2462 return true;
2463 break;
2464 case 2:
2465 if (free >= ac->ac_g_ex.fe_len)
2466 return true;
2467 break;
2468 case 3:
2469 return true;
2470 default:
2471 BUG();
2472 }
2473
2474 return false;
2475 }
2476
2477 /*
2478 * This could return negative error code if something goes wrong
2479 * during ext4_mb_init_group(). This should not be called with
2480 * ext4_lock_group() held.
2481 *
2482 * Note: because we are conditionally operating with the group lock in
2483 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2484 * function using __acquire and __release. This means we need to be
2485 * super careful before messing with the error path handling via "goto
2486 * out"!
2487 */
2488 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2489 ext4_group_t group, int cr)
2490 {
2491 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2492 struct super_block *sb = ac->ac_sb;
2493 struct ext4_sb_info *sbi = EXT4_SB(sb);
2494 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2495 ext4_grpblk_t free;
2496 int ret = 0;
2497
2498 if (sbi->s_mb_stats)
2499 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2500 if (should_lock) {
2501 ext4_lock_group(sb, group);
2502 __release(ext4_group_lock_ptr(sb, group));
2503 }
2504 free = grp->bb_free;
2505 if (free == 0)
2506 goto out;
2507 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2508 goto out;
2509 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2510 goto out;
2511 if (should_lock) {
2512 __acquire(ext4_group_lock_ptr(sb, group));
2513 ext4_unlock_group(sb, group);
2514 }
2515
2516 /* We only do this if the grp has never been initialized */
2517 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2518 struct ext4_group_desc *gdp =
2519 ext4_get_group_desc(sb, group, NULL);
2520 int ret;
2521
2522 /* cr=0/1 is a very optimistic search to find large
2523 * good chunks almost for free. If buddy data is not
2524 * ready, then this optimization makes no sense. But
2525 * we never skip the first block group in a flex_bg,
2526 * since this gets used for metadata block allocation,
2527 * and we want to make sure we locate metadata blocks
2528 * in the first block group in the flex_bg if possible.
2529 */
2530 if (cr < 2 &&
2531 (!sbi->s_log_groups_per_flex ||
2532 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2533 !(ext4_has_group_desc_csum(sb) &&
2534 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2535 return 0;
2536 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2537 if (ret)
2538 return ret;
2539 }
2540
2541 if (should_lock) {
2542 ext4_lock_group(sb, group);
2543 __release(ext4_group_lock_ptr(sb, group));
2544 }
2545 ret = ext4_mb_good_group(ac, group, cr);
2546 out:
2547 if (should_lock) {
2548 __acquire(ext4_group_lock_ptr(sb, group));
2549 ext4_unlock_group(sb, group);
2550 }
2551 return ret;
2552 }
2553
2554 /*
2555 * Start prefetching @nr block bitmaps starting at @group.
2556 * Return the next group which needs to be prefetched.
2557 */
2558 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2559 unsigned int nr, int *cnt)
2560 {
2561 ext4_group_t ngroups = ext4_get_groups_count(sb);
2562 struct buffer_head *bh;
2563 struct blk_plug plug;
2564
2565 blk_start_plug(&plug);
2566 while (nr-- > 0) {
2567 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2568 NULL);
2569 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2570
2571 /*
2572 * Prefetch block groups with free blocks; but don't
2573 * bother if it is marked uninitialized on disk, since
2574 * it won't require I/O to read. Also only try to
2575 * prefetch once, so we avoid getblk() call, which can
2576 * be expensive.
2577 */
2578 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2579 EXT4_MB_GRP_NEED_INIT(grp) &&
2580 ext4_free_group_clusters(sb, gdp) > 0 &&
2581 !(ext4_has_group_desc_csum(sb) &&
2582 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2583 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2584 if (bh && !IS_ERR(bh)) {
2585 if (!buffer_uptodate(bh) && cnt)
2586 (*cnt)++;
2587 brelse(bh);
2588 }
2589 }
2590 if (++group >= ngroups)
2591 group = 0;
2592 }
2593 blk_finish_plug(&plug);
2594 return group;
2595 }
2596
2597 /*
2598 * Prefetching reads the block bitmap into the buffer cache; but we
2599 * need to make sure that the buddy bitmap in the page cache has been
2600 * initialized. Note that ext4_mb_init_group() will block if the I/O
2601 * is not yet completed, or indeed if it was not initiated by
2602 * ext4_mb_prefetch did not start the I/O.
2603 *
2604 * TODO: We should actually kick off the buddy bitmap setup in a work
2605 * queue when the buffer I/O is completed, so that we don't block
2606 * waiting for the block allocation bitmap read to finish when
2607 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2608 */
2609 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2610 unsigned int nr)
2611 {
2612 while (nr-- > 0) {
2613 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2614 NULL);
2615 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2616
2617 if (!group)
2618 group = ext4_get_groups_count(sb);
2619 group--;
2620 grp = ext4_get_group_info(sb, group);
2621
2622 if (EXT4_MB_GRP_NEED_INIT(grp) &&
2623 ext4_free_group_clusters(sb, gdp) > 0 &&
2624 !(ext4_has_group_desc_csum(sb) &&
2625 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2626 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2627 break;
2628 }
2629 }
2630 }
2631
2632 static noinline_for_stack int
2633 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2634 {
2635 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2636 int cr = -1;
2637 int err = 0, first_err = 0;
2638 unsigned int nr = 0, prefetch_ios = 0;
2639 struct ext4_sb_info *sbi;
2640 struct super_block *sb;
2641 struct ext4_buddy e4b;
2642 int lost;
2643
2644 sb = ac->ac_sb;
2645 sbi = EXT4_SB(sb);
2646 ngroups = ext4_get_groups_count(sb);
2647 /* non-extent files are limited to low blocks/groups */
2648 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2649 ngroups = sbi->s_blockfile_groups;
2650
2651 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2652
2653 /* first, try the goal */
2654 err = ext4_mb_find_by_goal(ac, &e4b);
2655 if (err || ac->ac_status == AC_STATUS_FOUND)
2656 goto out;
2657
2658 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2659 goto out;
2660
2661 /*
2662 * ac->ac_2order is set only if the fe_len is a power of 2
2663 * if ac->ac_2order is set we also set criteria to 0 so that we
2664 * try exact allocation using buddy.
2665 */
2666 i = fls(ac->ac_g_ex.fe_len);
2667 ac->ac_2order = 0;
2668 /*
2669 * We search using buddy data only if the order of the request
2670 * is greater than equal to the sbi_s_mb_order2_reqs
2671 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2672 * We also support searching for power-of-two requests only for
2673 * requests upto maximum buddy size we have constructed.
2674 */
2675 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2676 /*
2677 * This should tell if fe_len is exactly power of 2
2678 */
2679 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2680 ac->ac_2order = array_index_nospec(i - 1,
2681 MB_NUM_ORDERS(sb));
2682 }
2683
2684 /* if stream allocation is enabled, use global goal */
2685 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2686 /* TBD: may be hot point */
2687 spin_lock(&sbi->s_md_lock);
2688 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2689 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2690 spin_unlock(&sbi->s_md_lock);
2691 }
2692
2693 /* Let's just scan groups to find more-less suitable blocks */
2694 cr = ac->ac_2order ? 0 : 1;
2695 /*
2696 * cr == 0 try to get exact allocation,
2697 * cr == 3 try to get anything
2698 */
2699 repeat:
2700 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2701 ac->ac_criteria = cr;
2702 /*
2703 * searching for the right group start
2704 * from the goal value specified
2705 */
2706 group = ac->ac_g_ex.fe_group;
2707 ac->ac_last_optimal_group = group;
2708 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2709 prefetch_grp = group;
2710
2711 for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
2712 i++) {
2713 int ret = 0, new_cr;
2714
2715 cond_resched();
2716
2717 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
2718 if (new_cr != cr) {
2719 cr = new_cr;
2720 goto repeat;
2721 }
2722
2723 /*
2724 * Batch reads of the block allocation bitmaps
2725 * to get multiple READs in flight; limit
2726 * prefetching at cr=0/1, otherwise mballoc can
2727 * spend a lot of time loading imperfect groups
2728 */
2729 if ((prefetch_grp == group) &&
2730 (cr > 1 ||
2731 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2732 unsigned int curr_ios = prefetch_ios;
2733
2734 nr = sbi->s_mb_prefetch;
2735 if (ext4_has_feature_flex_bg(sb)) {
2736 nr = 1 << sbi->s_log_groups_per_flex;
2737 nr -= group & (nr - 1);
2738 nr = min(nr, sbi->s_mb_prefetch);
2739 }
2740 prefetch_grp = ext4_mb_prefetch(sb, group,
2741 nr, &prefetch_ios);
2742 if (prefetch_ios == curr_ios)
2743 nr = 0;
2744 }
2745
2746 /* This now checks without needing the buddy page */
2747 ret = ext4_mb_good_group_nolock(ac, group, cr);
2748 if (ret <= 0) {
2749 if (!first_err)
2750 first_err = ret;
2751 continue;
2752 }
2753
2754 err = ext4_mb_load_buddy(sb, group, &e4b);
2755 if (err)
2756 goto out;
2757
2758 ext4_lock_group(sb, group);
2759
2760 /*
2761 * We need to check again after locking the
2762 * block group
2763 */
2764 ret = ext4_mb_good_group(ac, group, cr);
2765 if (ret == 0) {
2766 ext4_unlock_group(sb, group);
2767 ext4_mb_unload_buddy(&e4b);
2768 continue;
2769 }
2770
2771 ac->ac_groups_scanned++;
2772 if (cr == 0)
2773 ext4_mb_simple_scan_group(ac, &e4b);
2774 else if (cr == 1 && sbi->s_stripe &&
2775 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2776 ext4_mb_scan_aligned(ac, &e4b);
2777 else
2778 ext4_mb_complex_scan_group(ac, &e4b);
2779
2780 ext4_unlock_group(sb, group);
2781 ext4_mb_unload_buddy(&e4b);
2782
2783 if (ac->ac_status != AC_STATUS_CONTINUE)
2784 break;
2785 }
2786 /* Processed all groups and haven't found blocks */
2787 if (sbi->s_mb_stats && i == ngroups)
2788 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2789 }
2790
2791 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2792 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2793 /*
2794 * We've been searching too long. Let's try to allocate
2795 * the best chunk we've found so far
2796 */
2797 ext4_mb_try_best_found(ac, &e4b);
2798 if (ac->ac_status != AC_STATUS_FOUND) {
2799 /*
2800 * Someone more lucky has already allocated it.
2801 * The only thing we can do is just take first
2802 * found block(s)
2803 */
2804 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2805 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2806 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2807 ac->ac_b_ex.fe_len, lost);
2808
2809 ac->ac_b_ex.fe_group = 0;
2810 ac->ac_b_ex.fe_start = 0;
2811 ac->ac_b_ex.fe_len = 0;
2812 ac->ac_status = AC_STATUS_CONTINUE;
2813 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2814 cr = 3;
2815 goto repeat;
2816 }
2817 }
2818
2819 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2820 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2821 out:
2822 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2823 err = first_err;
2824
2825 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2826 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2827 ac->ac_flags, cr, err);
2828
2829 if (nr)
2830 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2831
2832 return err;
2833 }
2834
2835 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2836 {
2837 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2838 ext4_group_t group;
2839
2840 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2841 return NULL;
2842 group = *pos + 1;
2843 return (void *) ((unsigned long) group);
2844 }
2845
2846 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2847 {
2848 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2849 ext4_group_t group;
2850
2851 ++*pos;
2852 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2853 return NULL;
2854 group = *pos + 1;
2855 return (void *) ((unsigned long) group);
2856 }
2857
2858 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2859 {
2860 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2861 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2862 int i;
2863 int err, buddy_loaded = 0;
2864 struct ext4_buddy e4b;
2865 struct ext4_group_info *grinfo;
2866 unsigned char blocksize_bits = min_t(unsigned char,
2867 sb->s_blocksize_bits,
2868 EXT4_MAX_BLOCK_LOG_SIZE);
2869 struct sg {
2870 struct ext4_group_info info;
2871 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2872 } sg;
2873
2874 group--;
2875 if (group == 0)
2876 seq_puts(seq, "#group: free frags first ["
2877 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2878 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2879
2880 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2881 sizeof(struct ext4_group_info);
2882
2883 grinfo = ext4_get_group_info(sb, group);
2884 /* Load the group info in memory only if not already loaded. */
2885 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2886 err = ext4_mb_load_buddy(sb, group, &e4b);
2887 if (err) {
2888 seq_printf(seq, "#%-5u: I/O error\n", group);
2889 return 0;
2890 }
2891 buddy_loaded = 1;
2892 }
2893
2894 memcpy(&sg, ext4_get_group_info(sb, group), i);
2895
2896 if (buddy_loaded)
2897 ext4_mb_unload_buddy(&e4b);
2898
2899 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2900 sg.info.bb_fragments, sg.info.bb_first_free);
2901 for (i = 0; i <= 13; i++)
2902 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2903 sg.info.bb_counters[i] : 0);
2904 seq_puts(seq, " ]\n");
2905
2906 return 0;
2907 }
2908
2909 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2910 {
2911 }
2912
2913 const struct seq_operations ext4_mb_seq_groups_ops = {
2914 .start = ext4_mb_seq_groups_start,
2915 .next = ext4_mb_seq_groups_next,
2916 .stop = ext4_mb_seq_groups_stop,
2917 .show = ext4_mb_seq_groups_show,
2918 };
2919
2920 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2921 {
2922 struct super_block *sb = (struct super_block *)seq->private;
2923 struct ext4_sb_info *sbi = EXT4_SB(sb);
2924
2925 seq_puts(seq, "mballoc:\n");
2926 if (!sbi->s_mb_stats) {
2927 seq_puts(seq, "\tmb stats collection turned off.\n");
2928 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2929 return 0;
2930 }
2931 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2932 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2933
2934 seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned));
2935
2936 seq_puts(seq, "\tcr0_stats:\n");
2937 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2938 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2939 atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2940 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2941 atomic64_read(&sbi->s_bal_cX_failed[0]));
2942 seq_printf(seq, "\t\tbad_suggestions: %u\n",
2943 atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2944
2945 seq_puts(seq, "\tcr1_stats:\n");
2946 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2947 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2948 atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2949 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2950 atomic64_read(&sbi->s_bal_cX_failed[1]));
2951 seq_printf(seq, "\t\tbad_suggestions: %u\n",
2952 atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2953
2954 seq_puts(seq, "\tcr2_stats:\n");
2955 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2956 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2957 atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2958 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2959 atomic64_read(&sbi->s_bal_cX_failed[2]));
2960
2961 seq_puts(seq, "\tcr3_stats:\n");
2962 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2963 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2964 atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2965 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2966 atomic64_read(&sbi->s_bal_cX_failed[3]));
2967 seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2968 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2969 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2970 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2971 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2972
2973 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2974 atomic_read(&sbi->s_mb_buddies_generated),
2975 ext4_get_groups_count(sb));
2976 seq_printf(seq, "\tbuddies_time_used: %llu\n",
2977 atomic64_read(&sbi->s_mb_generation_time));
2978 seq_printf(seq, "\tpreallocated: %u\n",
2979 atomic_read(&sbi->s_mb_preallocated));
2980 seq_printf(seq, "\tdiscarded: %u\n",
2981 atomic_read(&sbi->s_mb_discarded));
2982 return 0;
2983 }
2984
2985 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2986 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2987 {
2988 struct super_block *sb = PDE_DATA(file_inode(seq->file));
2989 unsigned long position;
2990
2991 read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
2992
2993 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
2994 return NULL;
2995 position = *pos + 1;
2996 return (void *) ((unsigned long) position);
2997 }
2998
2999 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3000 {
3001 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3002 unsigned long position;
3003
3004 ++*pos;
3005 if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
3006 return NULL;
3007 position = *pos + 1;
3008 return (void *) ((unsigned long) position);
3009 }
3010
3011 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3012 {
3013 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3014 struct ext4_sb_info *sbi = EXT4_SB(sb);
3015 unsigned long position = ((unsigned long) v);
3016 struct ext4_group_info *grp;
3017 struct rb_node *n;
3018 unsigned int count, min, max;
3019
3020 position--;
3021 if (position >= MB_NUM_ORDERS(sb)) {
3022 seq_puts(seq, "fragment_size_tree:\n");
3023 n = rb_first(&sbi->s_mb_avg_fragment_size_root);
3024 if (!n) {
3025 seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
3026 return 0;
3027 }
3028 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3029 min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3030 count = 1;
3031 while (rb_next(n)) {
3032 count++;
3033 n = rb_next(n);
3034 }
3035 grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
3036 max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
3037
3038 seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
3039 min, max, count);
3040 return 0;
3041 }
3042
3043 if (position == 0) {
3044 seq_printf(seq, "optimize_scan: %d\n",
3045 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3046 seq_puts(seq, "max_free_order_lists:\n");
3047 }
3048 count = 0;
3049 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3050 bb_largest_free_order_node)
3051 count++;
3052 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3053 (unsigned int)position, count);
3054
3055 return 0;
3056 }
3057
3058 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3059 __releases(&EXT4_SB(sb)->s_mb_rb_lock)
3060 {
3061 struct super_block *sb = PDE_DATA(file_inode(seq->file));
3062
3063 read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
3064 }
3065
3066 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3067 .start = ext4_mb_seq_structs_summary_start,
3068 .next = ext4_mb_seq_structs_summary_next,
3069 .stop = ext4_mb_seq_structs_summary_stop,
3070 .show = ext4_mb_seq_structs_summary_show,
3071 };
3072
3073 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3074 {
3075 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3076 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3077
3078 BUG_ON(!cachep);
3079 return cachep;
3080 }
3081
3082 /*
3083 * Allocate the top-level s_group_info array for the specified number
3084 * of groups
3085 */
3086 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3087 {
3088 struct ext4_sb_info *sbi = EXT4_SB(sb);
3089 unsigned size;
3090 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3091
3092 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3093 EXT4_DESC_PER_BLOCK_BITS(sb);
3094 if (size <= sbi->s_group_info_size)
3095 return 0;
3096
3097 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3098 new_groupinfo = kvzalloc(size, GFP_KERNEL);
3099 if (!new_groupinfo) {
3100 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3101 return -ENOMEM;
3102 }
3103 rcu_read_lock();
3104 old_groupinfo = rcu_dereference(sbi->s_group_info);
3105 if (old_groupinfo)
3106 memcpy(new_groupinfo, old_groupinfo,
3107 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3108 rcu_read_unlock();
3109 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3110 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3111 if (old_groupinfo)
3112 ext4_kvfree_array_rcu(old_groupinfo);
3113 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3114 sbi->s_group_info_size);
3115 return 0;
3116 }
3117
3118 /* Create and initialize ext4_group_info data for the given group. */
3119 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3120 struct ext4_group_desc *desc)
3121 {
3122 int i;
3123 int metalen = 0;
3124 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3125 struct ext4_sb_info *sbi = EXT4_SB(sb);
3126 struct ext4_group_info **meta_group_info;
3127 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3128
3129 /*
3130 * First check if this group is the first of a reserved block.
3131 * If it's true, we have to allocate a new table of pointers
3132 * to ext4_group_info structures
3133 */
3134 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3135 metalen = sizeof(*meta_group_info) <<
3136 EXT4_DESC_PER_BLOCK_BITS(sb);
3137 meta_group_info = kmalloc(metalen, GFP_NOFS);
3138 if (meta_group_info == NULL) {
3139 ext4_msg(sb, KERN_ERR, "can't allocate mem "
3140 "for a buddy group");
3141 goto exit_meta_group_info;
3142 }
3143 rcu_read_lock();
3144 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3145 rcu_read_unlock();
3146 }
3147
3148 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3149 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3150
3151 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3152 if (meta_group_info[i] == NULL) {
3153 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3154 goto exit_group_info;
3155 }
3156 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3157 &(meta_group_info[i]->bb_state));
3158
3159 /*
3160 * initialize bb_free to be able to skip
3161 * empty groups without initialization
3162 */
3163 if (ext4_has_group_desc_csum(sb) &&
3164 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3165 meta_group_info[i]->bb_free =
3166 ext4_free_clusters_after_init(sb, group, desc);
3167 } else {
3168 meta_group_info[i]->bb_free =
3169 ext4_free_group_clusters(sb, desc);
3170 }
3171
3172 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3173 init_rwsem(&meta_group_info[i]->alloc_sem);
3174 meta_group_info[i]->bb_free_root = RB_ROOT;
3175 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3176 RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
3177 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
3178 meta_group_info[i]->bb_group = group;
3179
3180 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3181 return 0;
3182
3183 exit_group_info:
3184 /* If a meta_group_info table has been allocated, release it now */
3185 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3186 struct ext4_group_info ***group_info;
3187
3188 rcu_read_lock();
3189 group_info = rcu_dereference(sbi->s_group_info);
3190 kfree(group_info[idx]);
3191 group_info[idx] = NULL;
3192 rcu_read_unlock();
3193 }
3194 exit_meta_group_info:
3195 return -ENOMEM;
3196 } /* ext4_mb_add_groupinfo */
3197
3198 static int ext4_mb_init_backend(struct super_block *sb)
3199 {
3200 ext4_group_t ngroups = ext4_get_groups_count(sb);
3201 ext4_group_t i;
3202 struct ext4_sb_info *sbi = EXT4_SB(sb);
3203 int err;
3204 struct ext4_group_desc *desc;
3205 struct ext4_group_info ***group_info;
3206 struct kmem_cache *cachep;
3207
3208 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3209 if (err)
3210 return err;
3211
3212 sbi->s_buddy_cache = new_inode(sb);
3213 if (sbi->s_buddy_cache == NULL) {
3214 ext4_msg(sb, KERN_ERR, "can't get new inode");
3215 goto err_freesgi;
3216 }
3217 /* To avoid potentially colliding with an valid on-disk inode number,
3218 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3219 * not in the inode hash, so it should never be found by iget(), but
3220 * this will avoid confusion if it ever shows up during debugging. */
3221 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3222 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3223 for (i = 0; i < ngroups; i++) {
3224 cond_resched();
3225 desc = ext4_get_group_desc(sb, i, NULL);
3226 if (desc == NULL) {
3227 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3228 goto err_freebuddy;
3229 }
3230 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3231 goto err_freebuddy;
3232 }
3233
3234 if (ext4_has_feature_flex_bg(sb)) {
3235 /* a single flex group is supposed to be read by a single IO.
3236 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3237 * unsigned integer, so the maximum shift is 32.
3238 */
3239 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3240 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3241 goto err_freebuddy;
3242 }
3243 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3244 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3245 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3246 } else {
3247 sbi->s_mb_prefetch = 32;
3248 }
3249 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3250 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3251 /* now many real IOs to prefetch within a single allocation at cr=0
3252 * given cr=0 is an CPU-related optimization we shouldn't try to
3253 * load too many groups, at some point we should start to use what
3254 * we've got in memory.
3255 * with an average random access time 5ms, it'd take a second to get
3256 * 200 groups (* N with flex_bg), so let's make this limit 4
3257 */
3258 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3259 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3260 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3261
3262 return 0;
3263
3264 err_freebuddy:
3265 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3266 while (i-- > 0)
3267 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3268 i = sbi->s_group_info_size;
3269 rcu_read_lock();
3270 group_info = rcu_dereference(sbi->s_group_info);
3271 while (i-- > 0)
3272 kfree(group_info[i]);
3273 rcu_read_unlock();
3274 iput(sbi->s_buddy_cache);
3275 err_freesgi:
3276 rcu_read_lock();
3277 kvfree(rcu_dereference(sbi->s_group_info));
3278 rcu_read_unlock();
3279 return -ENOMEM;
3280 }
3281
3282 static void ext4_groupinfo_destroy_slabs(void)
3283 {
3284 int i;
3285
3286 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3287 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3288 ext4_groupinfo_caches[i] = NULL;
3289 }
3290 }
3291
3292 static int ext4_groupinfo_create_slab(size_t size)
3293 {
3294 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3295 int slab_size;
3296 int blocksize_bits = order_base_2(size);
3297 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3298 struct kmem_cache *cachep;
3299
3300 if (cache_index >= NR_GRPINFO_CACHES)
3301 return -EINVAL;
3302
3303 if (unlikely(cache_index < 0))
3304 cache_index = 0;
3305
3306 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3307 if (ext4_groupinfo_caches[cache_index]) {
3308 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3309 return 0; /* Already created */
3310 }
3311
3312 slab_size = offsetof(struct ext4_group_info,
3313 bb_counters[blocksize_bits + 2]);
3314
3315 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3316 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3317 NULL);
3318
3319 ext4_groupinfo_caches[cache_index] = cachep;
3320
3321 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3322 if (!cachep) {
3323 printk(KERN_EMERG
3324 "EXT4-fs: no memory for groupinfo slab cache\n");
3325 return -ENOMEM;
3326 }
3327
3328 return 0;
3329 }
3330
3331 static void ext4_discard_work(struct work_struct *work)
3332 {
3333 struct ext4_sb_info *sbi = container_of(work,
3334 struct ext4_sb_info, s_discard_work);
3335 struct super_block *sb = sbi->s_sb;
3336 struct ext4_free_data *fd, *nfd;
3337 struct ext4_buddy e4b;
3338 struct list_head discard_list;
3339 ext4_group_t grp, load_grp;
3340 int err = 0;
3341
3342 INIT_LIST_HEAD(&discard_list);
3343 spin_lock(&sbi->s_md_lock);
3344 list_splice_init(&sbi->s_discard_list, &discard_list);
3345 spin_unlock(&sbi->s_md_lock);
3346
3347 load_grp = UINT_MAX;
3348 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3349 /*
3350 * If filesystem is umounting or no memory or suffering
3351 * from no space, give up the discard
3352 */
3353 if ((sb->s_flags & SB_ACTIVE) && !err &&
3354 !atomic_read(&sbi->s_retry_alloc_pending)) {
3355 grp = fd->efd_group;
3356 if (grp != load_grp) {
3357 if (load_grp != UINT_MAX)
3358 ext4_mb_unload_buddy(&e4b);
3359
3360 err = ext4_mb_load_buddy(sb, grp, &e4b);
3361 if (err) {
3362 kmem_cache_free(ext4_free_data_cachep, fd);
3363 load_grp = UINT_MAX;
3364 continue;
3365 } else {
3366 load_grp = grp;
3367 }
3368 }
3369
3370 ext4_lock_group(sb, grp);
3371 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3372 fd->efd_start_cluster + fd->efd_count - 1, 1);
3373 ext4_unlock_group(sb, grp);
3374 }
3375 kmem_cache_free(ext4_free_data_cachep, fd);
3376 }
3377
3378 if (load_grp != UINT_MAX)
3379 ext4_mb_unload_buddy(&e4b);
3380 }
3381
3382 int ext4_mb_init(struct super_block *sb)
3383 {
3384 struct ext4_sb_info *sbi = EXT4_SB(sb);
3385 unsigned i, j;
3386 unsigned offset, offset_incr;
3387 unsigned max;
3388 int ret;
3389
3390 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3391
3392 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3393 if (sbi->s_mb_offsets == NULL) {
3394 ret = -ENOMEM;
3395 goto out;
3396 }
3397
3398 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3399 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3400 if (sbi->s_mb_maxs == NULL) {
3401 ret = -ENOMEM;
3402 goto out;
3403 }
3404
3405 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3406 if (ret < 0)
3407 goto out;
3408
3409 /* order 0 is regular bitmap */
3410 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3411 sbi->s_mb_offsets[0] = 0;
3412
3413 i = 1;
3414 offset = 0;
3415 offset_incr = 1 << (sb->s_blocksize_bits - 1);
3416 max = sb->s_blocksize << 2;
3417 do {
3418 sbi->s_mb_offsets[i] = offset;
3419 sbi->s_mb_maxs[i] = max;
3420 offset += offset_incr;
3421 offset_incr = offset_incr >> 1;
3422 max = max >> 1;
3423 i++;
3424 } while (i < MB_NUM_ORDERS(sb));
3425
3426 sbi->s_mb_avg_fragment_size_root = RB_ROOT;
3427 sbi->s_mb_largest_free_orders =
3428 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3429 GFP_KERNEL);
3430 if (!sbi->s_mb_largest_free_orders) {
3431 ret = -ENOMEM;
3432 goto out;
3433 }
3434 sbi->s_mb_largest_free_orders_locks =
3435 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3436 GFP_KERNEL);
3437 if (!sbi->s_mb_largest_free_orders_locks) {
3438 ret = -ENOMEM;
3439 goto out;
3440 }
3441 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3442 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3443 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3444 }
3445 rwlock_init(&sbi->s_mb_rb_lock);
3446
3447 spin_lock_init(&sbi->s_md_lock);
3448 sbi->s_mb_free_pending = 0;
3449 INIT_LIST_HEAD(&sbi->s_freed_data_list);
3450 INIT_LIST_HEAD(&sbi->s_discard_list);
3451 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3452 atomic_set(&sbi->s_retry_alloc_pending, 0);
3453
3454 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3455 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3456 sbi->s_mb_stats = MB_DEFAULT_STATS;
3457 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3458 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3459 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3460 /*
3461 * The default group preallocation is 512, which for 4k block
3462 * sizes translates to 2 megabytes. However for bigalloc file
3463 * systems, this is probably too big (i.e, if the cluster size
3464 * is 1 megabyte, then group preallocation size becomes half a
3465 * gigabyte!). As a default, we will keep a two megabyte
3466 * group pralloc size for cluster sizes up to 64k, and after
3467 * that, we will force a minimum group preallocation size of
3468 * 32 clusters. This translates to 8 megs when the cluster
3469 * size is 256k, and 32 megs when the cluster size is 1 meg,
3470 * which seems reasonable as a default.
3471 */
3472 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3473 sbi->s_cluster_bits, 32);
3474 /*
3475 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3476 * to the lowest multiple of s_stripe which is bigger than
3477 * the s_mb_group_prealloc as determined above. We want
3478 * the preallocation size to be an exact multiple of the
3479 * RAID stripe size so that preallocations don't fragment
3480 * the stripes.
3481 */
3482 if (sbi->s_stripe > 1) {
3483 sbi->s_mb_group_prealloc = roundup(
3484 sbi->s_mb_group_prealloc, sbi->s_stripe);
3485 }
3486
3487 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3488 if (sbi->s_locality_groups == NULL) {
3489 ret = -ENOMEM;
3490 goto out;
3491 }
3492 for_each_possible_cpu(i) {
3493 struct ext4_locality_group *lg;
3494 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3495 mutex_init(&lg->lg_mutex);
3496 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3497 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3498 spin_lock_init(&lg->lg_prealloc_lock);
3499 }
3500
3501 if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev)))
3502 sbi->s_mb_max_linear_groups = 0;
3503 else
3504 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3505 /* init file for buddy data */
3506 ret = ext4_mb_init_backend(sb);
3507 if (ret != 0)
3508 goto out_free_locality_groups;
3509
3510 return 0;
3511
3512 out_free_locality_groups:
3513 free_percpu(sbi->s_locality_groups);
3514 sbi->s_locality_groups = NULL;
3515 out:
3516 kfree(sbi->s_mb_largest_free_orders);
3517 kfree(sbi->s_mb_largest_free_orders_locks);
3518 kfree(sbi->s_mb_offsets);
3519 sbi->s_mb_offsets = NULL;
3520 kfree(sbi->s_mb_maxs);
3521 sbi->s_mb_maxs = NULL;
3522 return ret;
3523 }
3524
3525 /* need to called with the ext4 group lock held */
3526 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3527 {
3528 struct ext4_prealloc_space *pa;
3529 struct list_head *cur, *tmp;
3530 int count = 0;
3531
3532 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3533 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3534 list_del(&pa->pa_group_list);
3535 count++;
3536 kmem_cache_free(ext4_pspace_cachep, pa);
3537 }
3538 return count;
3539 }
3540
3541 int ext4_mb_release(struct super_block *sb)
3542 {
3543 ext4_group_t ngroups = ext4_get_groups_count(sb);
3544 ext4_group_t i;
3545 int num_meta_group_infos;
3546 struct ext4_group_info *grinfo, ***group_info;
3547 struct ext4_sb_info *sbi = EXT4_SB(sb);
3548 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3549 int count;
3550
3551 if (test_opt(sb, DISCARD)) {
3552 /*
3553 * wait the discard work to drain all of ext4_free_data
3554 */
3555 flush_work(&sbi->s_discard_work);
3556 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3557 }
3558
3559 if (sbi->s_group_info) {
3560 for (i = 0; i < ngroups; i++) {
3561 cond_resched();
3562 grinfo = ext4_get_group_info(sb, i);
3563 mb_group_bb_bitmap_free(grinfo);
3564 ext4_lock_group(sb, i);
3565 count = ext4_mb_cleanup_pa(grinfo);
3566 if (count)
3567 mb_debug(sb, "mballoc: %d PAs left\n",
3568 count);
3569 ext4_unlock_group(sb, i);
3570 kmem_cache_free(cachep, grinfo);
3571 }
3572 num_meta_group_infos = (ngroups +
3573 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3574 EXT4_DESC_PER_BLOCK_BITS(sb);
3575 rcu_read_lock();
3576 group_info = rcu_dereference(sbi->s_group_info);
3577 for (i = 0; i < num_meta_group_infos; i++)
3578 kfree(group_info[i]);
3579 kvfree(group_info);
3580 rcu_read_unlock();
3581 }
3582 kfree(sbi->s_mb_largest_free_orders);
3583 kfree(sbi->s_mb_largest_free_orders_locks);
3584 kfree(sbi->s_mb_offsets);
3585 kfree(sbi->s_mb_maxs);
3586 iput(sbi->s_buddy_cache);
3587 if (sbi->s_mb_stats) {
3588 ext4_msg(sb, KERN_INFO,
3589 "mballoc: %u blocks %u reqs (%u success)",
3590 atomic_read(&sbi->s_bal_allocated),
3591 atomic_read(&sbi->s_bal_reqs),
3592 atomic_read(&sbi->s_bal_success));
3593 ext4_msg(sb, KERN_INFO,
3594 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3595 "%u 2^N hits, %u breaks, %u lost",
3596 atomic_read(&sbi->s_bal_ex_scanned),
3597 atomic_read(&sbi->s_bal_groups_scanned),
3598 atomic_read(&sbi->s_bal_goals),
3599 atomic_read(&sbi->s_bal_2orders),
3600 atomic_read(&sbi->s_bal_breaks),
3601 atomic_read(&sbi->s_mb_lost_chunks));
3602 ext4_msg(sb, KERN_INFO,
3603 "mballoc: %u generated and it took %llu",
3604 atomic_read(&sbi->s_mb_buddies_generated),
3605 atomic64_read(&sbi->s_mb_generation_time));
3606 ext4_msg(sb, KERN_INFO,
3607 "mballoc: %u preallocated, %u discarded",
3608 atomic_read(&sbi->s_mb_preallocated),
3609 atomic_read(&sbi->s_mb_discarded));
3610 }
3611
3612 free_percpu(sbi->s_locality_groups);
3613
3614 return 0;
3615 }
3616
3617 static inline int ext4_issue_discard(struct super_block *sb,
3618 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3619 struct bio **biop)
3620 {
3621 ext4_fsblk_t discard_block;
3622
3623 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3624 ext4_group_first_block_no(sb, block_group));
3625 count = EXT4_C2B(EXT4_SB(sb), count);
3626 trace_ext4_discard_blocks(sb,
3627 (unsigned long long) discard_block, count);
3628 if (biop) {
3629 return __blkdev_issue_discard(sb->s_bdev,
3630 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3631 (sector_t)count << (sb->s_blocksize_bits - 9),
3632 GFP_NOFS, 0, biop);
3633 } else
3634 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3635 }
3636
3637 static void ext4_free_data_in_buddy(struct super_block *sb,
3638 struct ext4_free_data *entry)
3639 {
3640 struct ext4_buddy e4b;
3641 struct ext4_group_info *db;
3642 int err, count = 0, count2 = 0;
3643
3644 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3645 entry->efd_count, entry->efd_group, entry);
3646
3647 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3648 /* we expect to find existing buddy because it's pinned */
3649 BUG_ON(err != 0);
3650
3651 spin_lock(&EXT4_SB(sb)->s_md_lock);
3652 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3653 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3654
3655 db = e4b.bd_info;
3656 /* there are blocks to put in buddy to make them really free */
3657 count += entry->efd_count;
3658 count2++;
3659 ext4_lock_group(sb, entry->efd_group);
3660 /* Take it out of per group rb tree */
3661 rb_erase(&entry->efd_node, &(db->bb_free_root));
3662 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3663
3664 /*
3665 * Clear the trimmed flag for the group so that the next
3666 * ext4_trim_fs can trim it.
3667 * If the volume is mounted with -o discard, online discard
3668 * is supported and the free blocks will be trimmed online.
3669 */
3670 if (!test_opt(sb, DISCARD))
3671 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3672
3673 if (!db->bb_free_root.rb_node) {
3674 /* No more items in the per group rb tree
3675 * balance refcounts from ext4_mb_free_metadata()
3676 */
3677 put_page(e4b.bd_buddy_page);
3678 put_page(e4b.bd_bitmap_page);
3679 }
3680 ext4_unlock_group(sb, entry->efd_group);
3681 ext4_mb_unload_buddy(&e4b);
3682
3683 mb_debug(sb, "freed %d blocks in %d structures\n", count,
3684 count2);
3685 }
3686
3687 /*
3688 * This function is called by the jbd2 layer once the commit has finished,
3689 * so we know we can free the blocks that were released with that commit.
3690 */
3691 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3692 {
3693 struct ext4_sb_info *sbi = EXT4_SB(sb);
3694 struct ext4_free_data *entry, *tmp;
3695 struct list_head freed_data_list;
3696 struct list_head *cut_pos = NULL;
3697 bool wake;
3698
3699 INIT_LIST_HEAD(&freed_data_list);
3700
3701 spin_lock(&sbi->s_md_lock);
3702 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3703 if (entry->efd_tid != commit_tid)
3704 break;
3705 cut_pos = &entry->efd_list;
3706 }
3707 if (cut_pos)
3708 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3709 cut_pos);
3710 spin_unlock(&sbi->s_md_lock);
3711
3712 list_for_each_entry(entry, &freed_data_list, efd_list)
3713 ext4_free_data_in_buddy(sb, entry);
3714
3715 if (test_opt(sb, DISCARD)) {
3716 spin_lock(&sbi->s_md_lock);
3717 wake = list_empty(&sbi->s_discard_list);
3718 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3719 spin_unlock(&sbi->s_md_lock);
3720 if (wake)
3721 queue_work(system_unbound_wq, &sbi->s_discard_work);
3722 } else {
3723 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3724 kmem_cache_free(ext4_free_data_cachep, entry);
3725 }
3726 }
3727
3728 int __init ext4_init_mballoc(void)
3729 {
3730 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3731 SLAB_RECLAIM_ACCOUNT);
3732 if (ext4_pspace_cachep == NULL)
3733 goto out;
3734
3735 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3736 SLAB_RECLAIM_ACCOUNT);
3737 if (ext4_ac_cachep == NULL)
3738 goto out_pa_free;
3739
3740 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3741 SLAB_RECLAIM_ACCOUNT);
3742 if (ext4_free_data_cachep == NULL)
3743 goto out_ac_free;
3744
3745 return 0;
3746
3747 out_ac_free:
3748 kmem_cache_destroy(ext4_ac_cachep);
3749 out_pa_free:
3750 kmem_cache_destroy(ext4_pspace_cachep);
3751 out:
3752 return -ENOMEM;
3753 }
3754
3755 void ext4_exit_mballoc(void)
3756 {
3757 /*
3758 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3759 * before destroying the slab cache.
3760 */
3761 rcu_barrier();
3762 kmem_cache_destroy(ext4_pspace_cachep);
3763 kmem_cache_destroy(ext4_ac_cachep);
3764 kmem_cache_destroy(ext4_free_data_cachep);
3765 ext4_groupinfo_destroy_slabs();
3766 }
3767
3768
3769 /*
3770 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3771 * Returns 0 if success or error code
3772 */
3773 static noinline_for_stack int
3774 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3775 handle_t *handle, unsigned int reserv_clstrs)
3776 {
3777 struct buffer_head *bitmap_bh = NULL;
3778 struct ext4_group_desc *gdp;
3779 struct buffer_head *gdp_bh;
3780 struct ext4_sb_info *sbi;
3781 struct super_block *sb;
3782 ext4_fsblk_t block;
3783 int err, len;
3784
3785 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3786 BUG_ON(ac->ac_b_ex.fe_len <= 0);
3787
3788 sb = ac->ac_sb;
3789 sbi = EXT4_SB(sb);
3790
3791 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3792 if (IS_ERR(bitmap_bh)) {
3793 err = PTR_ERR(bitmap_bh);
3794 bitmap_bh = NULL;
3795 goto out_err;
3796 }
3797
3798 BUFFER_TRACE(bitmap_bh, "getting write access");
3799 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3800 EXT4_JTR_NONE);
3801 if (err)
3802 goto out_err;
3803
3804 err = -EIO;
3805 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3806 if (!gdp)
3807 goto out_err;
3808
3809 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3810 ext4_free_group_clusters(sb, gdp));
3811
3812 BUFFER_TRACE(gdp_bh, "get_write_access");
3813 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3814 if (err)
3815 goto out_err;
3816
3817 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3818
3819 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3820 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3821 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3822 "fs metadata", block, block+len);
3823 /* File system mounted not to panic on error
3824 * Fix the bitmap and return EFSCORRUPTED
3825 * We leak some of the blocks here.
3826 */
3827 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3828 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3829 ac->ac_b_ex.fe_len);
3830 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3831 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3832 if (!err)
3833 err = -EFSCORRUPTED;
3834 goto out_err;
3835 }
3836
3837 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3838 #ifdef AGGRESSIVE_CHECK
3839 {
3840 int i;
3841 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3842 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3843 bitmap_bh->b_data));
3844 }
3845 }
3846 #endif
3847 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3848 ac->ac_b_ex.fe_len);
3849 if (ext4_has_group_desc_csum(sb) &&
3850 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3851 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3852 ext4_free_group_clusters_set(sb, gdp,
3853 ext4_free_clusters_after_init(sb,
3854 ac->ac_b_ex.fe_group, gdp));
3855 }
3856 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3857 ext4_free_group_clusters_set(sb, gdp, len);
3858 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3859 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3860
3861 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3862 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3863 /*
3864 * Now reduce the dirty block count also. Should not go negative
3865 */
3866 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3867 /* release all the reserved blocks if non delalloc */
3868 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3869 reserv_clstrs);
3870
3871 if (sbi->s_log_groups_per_flex) {
3872 ext4_group_t flex_group = ext4_flex_group(sbi,
3873 ac->ac_b_ex.fe_group);
3874 atomic64_sub(ac->ac_b_ex.fe_len,
3875 &sbi_array_rcu_deref(sbi, s_flex_groups,
3876 flex_group)->free_clusters);
3877 }
3878
3879 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3880 if (err)
3881 goto out_err;
3882 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3883
3884 out_err:
3885 brelse(bitmap_bh);
3886 return err;
3887 }
3888
3889 /*
3890 * Idempotent helper for Ext4 fast commit replay path to set the state of
3891 * blocks in bitmaps and update counters.
3892 */
3893 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3894 int len, int state)
3895 {
3896 struct buffer_head *bitmap_bh = NULL;
3897 struct ext4_group_desc *gdp;
3898 struct buffer_head *gdp_bh;
3899 struct ext4_sb_info *sbi = EXT4_SB(sb);
3900 ext4_group_t group;
3901 ext4_grpblk_t blkoff;
3902 int i, err;
3903 int already;
3904 unsigned int clen, clen_changed, thisgrp_len;
3905
3906 while (len > 0) {
3907 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3908
3909 /*
3910 * Check to see if we are freeing blocks across a group
3911 * boundary.
3912 * In case of flex_bg, this can happen that (block, len) may
3913 * span across more than one group. In that case we need to
3914 * get the corresponding group metadata to work with.
3915 * For this we have goto again loop.
3916 */
3917 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3918 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3919 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3920
3921 bitmap_bh = ext4_read_block_bitmap(sb, group);
3922 if (IS_ERR(bitmap_bh)) {
3923 err = PTR_ERR(bitmap_bh);
3924 bitmap_bh = NULL;
3925 break;
3926 }
3927
3928 err = -EIO;
3929 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3930 if (!gdp)
3931 break;
3932
3933 ext4_lock_group(sb, group);
3934 already = 0;
3935 for (i = 0; i < clen; i++)
3936 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3937 !state)
3938 already++;
3939
3940 clen_changed = clen - already;
3941 if (state)
3942 ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3943 else
3944 mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
3945 if (ext4_has_group_desc_csum(sb) &&
3946 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3947 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3948 ext4_free_group_clusters_set(sb, gdp,
3949 ext4_free_clusters_after_init(sb, group, gdp));
3950 }
3951 if (state)
3952 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3953 else
3954 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3955
3956 ext4_free_group_clusters_set(sb, gdp, clen);
3957 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3958 ext4_group_desc_csum_set(sb, group, gdp);
3959
3960 ext4_unlock_group(sb, group);
3961
3962 if (sbi->s_log_groups_per_flex) {
3963 ext4_group_t flex_group = ext4_flex_group(sbi, group);
3964 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3965 s_flex_groups, flex_group);
3966
3967 if (state)
3968 atomic64_sub(clen_changed, &fg->free_clusters);
3969 else
3970 atomic64_add(clen_changed, &fg->free_clusters);
3971
3972 }
3973
3974 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3975 if (err)
3976 break;
3977 sync_dirty_buffer(bitmap_bh);
3978 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3979 sync_dirty_buffer(gdp_bh);
3980 if (err)
3981 break;
3982
3983 block += thisgrp_len;
3984 len -= thisgrp_len;
3985 brelse(bitmap_bh);
3986 BUG_ON(len < 0);
3987 }
3988
3989 if (err)
3990 brelse(bitmap_bh);
3991 }
3992
3993 /*
3994 * here we normalize request for locality group
3995 * Group request are normalized to s_mb_group_prealloc, which goes to
3996 * s_strip if we set the same via mount option.
3997 * s_mb_group_prealloc can be configured via
3998 * /sys/fs/ext4/<partition>/mb_group_prealloc
3999 *
4000 * XXX: should we try to preallocate more than the group has now?
4001 */
4002 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4003 {
4004 struct super_block *sb = ac->ac_sb;
4005 struct ext4_locality_group *lg = ac->ac_lg;
4006
4007 BUG_ON(lg == NULL);
4008 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4009 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4010 }
4011
4012 /*
4013 * Normalization means making request better in terms of
4014 * size and alignment
4015 */
4016 static noinline_for_stack void
4017 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4018 struct ext4_allocation_request *ar)
4019 {
4020 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4021 int bsbits, max;
4022 ext4_lblk_t end;
4023 loff_t size, start_off;
4024 loff_t orig_size __maybe_unused;
4025 ext4_lblk_t start;
4026 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4027 struct ext4_prealloc_space *pa;
4028
4029 /* do normalize only data requests, metadata requests
4030 do not need preallocation */
4031 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4032 return;
4033
4034 /* sometime caller may want exact blocks */
4035 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4036 return;
4037
4038 /* caller may indicate that preallocation isn't
4039 * required (it's a tail, for example) */
4040 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4041 return;
4042
4043 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4044 ext4_mb_normalize_group_request(ac);
4045 return ;
4046 }
4047
4048 bsbits = ac->ac_sb->s_blocksize_bits;
4049
4050 /* first, let's learn actual file size
4051 * given current request is allocated */
4052 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4053 size = size << bsbits;
4054 if (size < i_size_read(ac->ac_inode))
4055 size = i_size_read(ac->ac_inode);
4056 orig_size = size;
4057
4058 /* max size of free chunks */
4059 max = 2 << bsbits;
4060
4061 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4062 (req <= (size) || max <= (chunk_size))
4063
4064 /* first, try to predict filesize */
4065 /* XXX: should this table be tunable? */
4066 start_off = 0;
4067 if (size <= 16 * 1024) {
4068 size = 16 * 1024;
4069 } else if (size <= 32 * 1024) {
4070 size = 32 * 1024;
4071 } else if (size <= 64 * 1024) {
4072 size = 64 * 1024;
4073 } else if (size <= 128 * 1024) {
4074 size = 128 * 1024;
4075 } else if (size <= 256 * 1024) {
4076 size = 256 * 1024;
4077 } else if (size <= 512 * 1024) {
4078 size = 512 * 1024;
4079 } else if (size <= 1024 * 1024) {
4080 size = 1024 * 1024;
4081 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4082 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4083 (21 - bsbits)) << 21;
4084 size = 2 * 1024 * 1024;
4085 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4086 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4087 (22 - bsbits)) << 22;
4088 size = 4 * 1024 * 1024;
4089 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4090 (8<<20)>>bsbits, max, 8 * 1024)) {
4091 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4092 (23 - bsbits)) << 23;
4093 size = 8 * 1024 * 1024;
4094 } else {
4095 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4096 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4097 ac->ac_o_ex.fe_len) << bsbits;
4098 }
4099 size = size >> bsbits;
4100 start = start_off >> bsbits;
4101
4102 /* don't cover already allocated blocks in selected range */
4103 if (ar->pleft && start <= ar->lleft) {
4104 size -= ar->lleft + 1 - start;
4105 start = ar->lleft + 1;
4106 }
4107 if (ar->pright && start + size - 1 >= ar->lright)
4108 size -= start + size - ar->lright;
4109
4110 /*
4111 * Trim allocation request for filesystems with artificially small
4112 * groups.
4113 */
4114 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4115 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4116
4117 end = start + size;
4118
4119 /* check we don't cross already preallocated blocks */
4120 rcu_read_lock();
4121 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4122 ext4_lblk_t pa_end;
4123
4124 if (pa->pa_deleted)
4125 continue;
4126 spin_lock(&pa->pa_lock);
4127 if (pa->pa_deleted) {
4128 spin_unlock(&pa->pa_lock);
4129 continue;
4130 }
4131
4132 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4133 pa->pa_len);
4134
4135 /* PA must not overlap original request */
4136 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4137 ac->ac_o_ex.fe_logical < pa->pa_lstart));
4138
4139 /* skip PAs this normalized request doesn't overlap with */
4140 if (pa->pa_lstart >= end || pa_end <= start) {
4141 spin_unlock(&pa->pa_lock);
4142 continue;
4143 }
4144 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4145
4146 /* adjust start or end to be adjacent to this pa */
4147 if (pa_end <= ac->ac_o_ex.fe_logical) {
4148 BUG_ON(pa_end < start);
4149 start = pa_end;
4150 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4151 BUG_ON(pa->pa_lstart > end);
4152 end = pa->pa_lstart;
4153 }
4154 spin_unlock(&pa->pa_lock);
4155 }
4156 rcu_read_unlock();
4157 size = end - start;
4158
4159 /* XXX: extra loop to check we really don't overlap preallocations */
4160 rcu_read_lock();
4161 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4162 ext4_lblk_t pa_end;
4163
4164 spin_lock(&pa->pa_lock);
4165 if (pa->pa_deleted == 0) {
4166 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4167 pa->pa_len);
4168 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4169 }
4170 spin_unlock(&pa->pa_lock);
4171 }
4172 rcu_read_unlock();
4173
4174 if (start + size <= ac->ac_o_ex.fe_logical &&
4175 start > ac->ac_o_ex.fe_logical) {
4176 ext4_msg(ac->ac_sb, KERN_ERR,
4177 "start %lu, size %lu, fe_logical %lu",
4178 (unsigned long) start, (unsigned long) size,
4179 (unsigned long) ac->ac_o_ex.fe_logical);
4180 BUG();
4181 }
4182 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4183
4184 /* now prepare goal request */
4185
4186 /* XXX: is it better to align blocks WRT to logical
4187 * placement or satisfy big request as is */
4188 ac->ac_g_ex.fe_logical = start;
4189 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4190
4191 /* define goal start in order to merge */
4192 if (ar->pright && (ar->lright == (start + size))) {
4193 /* merge to the right */
4194 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4195 &ac->ac_f_ex.fe_group,
4196 &ac->ac_f_ex.fe_start);
4197 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4198 }
4199 if (ar->pleft && (ar->lleft + 1 == start)) {
4200 /* merge to the left */
4201 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4202 &ac->ac_f_ex.fe_group,
4203 &ac->ac_f_ex.fe_start);
4204 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4205 }
4206
4207 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4208 orig_size, start);
4209 }
4210
4211 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4212 {
4213 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4214
4215 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4216 atomic_inc(&sbi->s_bal_reqs);
4217 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4218 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4219 atomic_inc(&sbi->s_bal_success);
4220 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4221 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4222 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4223 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4224 atomic_inc(&sbi->s_bal_goals);
4225 if (ac->ac_found > sbi->s_mb_max_to_scan)
4226 atomic_inc(&sbi->s_bal_breaks);
4227 }
4228
4229 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4230 trace_ext4_mballoc_alloc(ac);
4231 else
4232 trace_ext4_mballoc_prealloc(ac);
4233 }
4234
4235 /*
4236 * Called on failure; free up any blocks from the inode PA for this
4237 * context. We don't need this for MB_GROUP_PA because we only change
4238 * pa_free in ext4_mb_release_context(), but on failure, we've already
4239 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4240 */
4241 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4242 {
4243 struct ext4_prealloc_space *pa = ac->ac_pa;
4244 struct ext4_buddy e4b;
4245 int err;
4246
4247 if (pa == NULL) {
4248 if (ac->ac_f_ex.fe_len == 0)
4249 return;
4250 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4251 if (err) {
4252 /*
4253 * This should never happen since we pin the
4254 * pages in the ext4_allocation_context so
4255 * ext4_mb_load_buddy() should never fail.
4256 */
4257 WARN(1, "mb_load_buddy failed (%d)", err);
4258 return;
4259 }
4260 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4261 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4262 ac->ac_f_ex.fe_len);
4263 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4264 ext4_mb_unload_buddy(&e4b);
4265 return;
4266 }
4267 if (pa->pa_type == MB_INODE_PA)
4268 pa->pa_free += ac->ac_b_ex.fe_len;
4269 }
4270
4271 /*
4272 * use blocks preallocated to inode
4273 */
4274 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4275 struct ext4_prealloc_space *pa)
4276 {
4277 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4278 ext4_fsblk_t start;
4279 ext4_fsblk_t end;
4280 int len;
4281
4282 /* found preallocated blocks, use them */
4283 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4284 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4285 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4286 len = EXT4_NUM_B2C(sbi, end - start);
4287 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4288 &ac->ac_b_ex.fe_start);
4289 ac->ac_b_ex.fe_len = len;
4290 ac->ac_status = AC_STATUS_FOUND;
4291 ac->ac_pa = pa;
4292
4293 BUG_ON(start < pa->pa_pstart);
4294 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4295 BUG_ON(pa->pa_free < len);
4296 pa->pa_free -= len;
4297
4298 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4299 }
4300
4301 /*
4302 * use blocks preallocated to locality group
4303 */
4304 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4305 struct ext4_prealloc_space *pa)
4306 {
4307 unsigned int len = ac->ac_o_ex.fe_len;
4308
4309 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4310 &ac->ac_b_ex.fe_group,
4311 &ac->ac_b_ex.fe_start);
4312 ac->ac_b_ex.fe_len = len;
4313 ac->ac_status = AC_STATUS_FOUND;
4314 ac->ac_pa = pa;
4315
4316 /* we don't correct pa_pstart or pa_plen here to avoid
4317 * possible race when the group is being loaded concurrently
4318 * instead we correct pa later, after blocks are marked
4319 * in on-disk bitmap -- see ext4_mb_release_context()
4320 * Other CPUs are prevented from allocating from this pa by lg_mutex
4321 */
4322 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4323 pa->pa_lstart-len, len, pa);
4324 }
4325
4326 /*
4327 * Return the prealloc space that have minimal distance
4328 * from the goal block. @cpa is the prealloc
4329 * space that is having currently known minimal distance
4330 * from the goal block.
4331 */
4332 static struct ext4_prealloc_space *
4333 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4334 struct ext4_prealloc_space *pa,
4335 struct ext4_prealloc_space *cpa)
4336 {
4337 ext4_fsblk_t cur_distance, new_distance;
4338
4339 if (cpa == NULL) {
4340 atomic_inc(&pa->pa_count);
4341 return pa;
4342 }
4343 cur_distance = abs(goal_block - cpa->pa_pstart);
4344 new_distance = abs(goal_block - pa->pa_pstart);
4345
4346 if (cur_distance <= new_distance)
4347 return cpa;
4348
4349 /* drop the previous reference */
4350 atomic_dec(&cpa->pa_count);
4351 atomic_inc(&pa->pa_count);
4352 return pa;
4353 }
4354
4355 /*
4356 * search goal blocks in preallocated space
4357 */
4358 static noinline_for_stack bool
4359 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4360 {
4361 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4362 int order, i;
4363 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4364 struct ext4_locality_group *lg;
4365 struct ext4_prealloc_space *pa, *cpa = NULL;
4366 ext4_fsblk_t goal_block;
4367
4368 /* only data can be preallocated */
4369 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4370 return false;
4371
4372 /* first, try per-file preallocation */
4373 rcu_read_lock();
4374 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4375
4376 /* all fields in this condition don't change,
4377 * so we can skip locking for them */
4378 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4379 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4380 EXT4_C2B(sbi, pa->pa_len)))
4381 continue;
4382
4383 /* non-extent files can't have physical blocks past 2^32 */
4384 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4385 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4386 EXT4_MAX_BLOCK_FILE_PHYS))
4387 continue;
4388
4389 /* found preallocated blocks, use them */
4390 spin_lock(&pa->pa_lock);
4391 if (pa->pa_deleted == 0 && pa->pa_free) {
4392 atomic_inc(&pa->pa_count);
4393 ext4_mb_use_inode_pa(ac, pa);
4394 spin_unlock(&pa->pa_lock);
4395 ac->ac_criteria = 10;
4396 rcu_read_unlock();
4397 return true;
4398 }
4399 spin_unlock(&pa->pa_lock);
4400 }
4401 rcu_read_unlock();
4402
4403 /* can we use group allocation? */
4404 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4405 return false;
4406
4407 /* inode may have no locality group for some reason */
4408 lg = ac->ac_lg;
4409 if (lg == NULL)
4410 return false;
4411 order = fls(ac->ac_o_ex.fe_len) - 1;
4412 if (order > PREALLOC_TB_SIZE - 1)
4413 /* The max size of hash table is PREALLOC_TB_SIZE */
4414 order = PREALLOC_TB_SIZE - 1;
4415
4416 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4417 /*
4418 * search for the prealloc space that is having
4419 * minimal distance from the goal block.
4420 */
4421 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4422 rcu_read_lock();
4423 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4424 pa_inode_list) {
4425 spin_lock(&pa->pa_lock);
4426 if (pa->pa_deleted == 0 &&
4427 pa->pa_free >= ac->ac_o_ex.fe_len) {
4428
4429 cpa = ext4_mb_check_group_pa(goal_block,
4430 pa, cpa);
4431 }
4432 spin_unlock(&pa->pa_lock);
4433 }
4434 rcu_read_unlock();
4435 }
4436 if (cpa) {
4437 ext4_mb_use_group_pa(ac, cpa);
4438 ac->ac_criteria = 20;
4439 return true;
4440 }
4441 return false;
4442 }
4443
4444 /*
4445 * the function goes through all block freed in the group
4446 * but not yet committed and marks them used in in-core bitmap.
4447 * buddy must be generated from this bitmap
4448 * Need to be called with the ext4 group lock held
4449 */
4450 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4451 ext4_group_t group)
4452 {
4453 struct rb_node *n;
4454 struct ext4_group_info *grp;
4455 struct ext4_free_data *entry;
4456
4457 grp = ext4_get_group_info(sb, group);
4458 n = rb_first(&(grp->bb_free_root));
4459
4460 while (n) {
4461 entry = rb_entry(n, struct ext4_free_data, efd_node);
4462 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4463 n = rb_next(n);
4464 }
4465 return;
4466 }
4467
4468 /*
4469 * the function goes through all preallocation in this group and marks them
4470 * used in in-core bitmap. buddy must be generated from this bitmap
4471 * Need to be called with ext4 group lock held
4472 */
4473 static noinline_for_stack
4474 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4475 ext4_group_t group)
4476 {
4477 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4478 struct ext4_prealloc_space *pa;
4479 struct list_head *cur;
4480 ext4_group_t groupnr;
4481 ext4_grpblk_t start;
4482 int preallocated = 0;
4483 int len;
4484
4485 /* all form of preallocation discards first load group,
4486 * so the only competing code is preallocation use.
4487 * we don't need any locking here
4488 * notice we do NOT ignore preallocations with pa_deleted
4489 * otherwise we could leave used blocks available for
4490 * allocation in buddy when concurrent ext4_mb_put_pa()
4491 * is dropping preallocation
4492 */
4493 list_for_each(cur, &grp->bb_prealloc_list) {
4494 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4495 spin_lock(&pa->pa_lock);
4496 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4497 &groupnr, &start);
4498 len = pa->pa_len;
4499 spin_unlock(&pa->pa_lock);
4500 if (unlikely(len == 0))
4501 continue;
4502 BUG_ON(groupnr != group);
4503 ext4_set_bits(bitmap, start, len);
4504 preallocated += len;
4505 }
4506 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4507 }
4508
4509 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4510 struct ext4_prealloc_space *pa)
4511 {
4512 struct ext4_inode_info *ei;
4513
4514 if (pa->pa_deleted) {
4515 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4516 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4517 pa->pa_len);
4518 return;
4519 }
4520
4521 pa->pa_deleted = 1;
4522
4523 if (pa->pa_type == MB_INODE_PA) {
4524 ei = EXT4_I(pa->pa_inode);
4525 atomic_dec(&ei->i_prealloc_active);
4526 }
4527 }
4528
4529 static void ext4_mb_pa_callback(struct rcu_head *head)
4530 {
4531 struct ext4_prealloc_space *pa;
4532 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4533
4534 BUG_ON(atomic_read(&pa->pa_count));
4535 BUG_ON(pa->pa_deleted == 0);
4536 kmem_cache_free(ext4_pspace_cachep, pa);
4537 }
4538
4539 /*
4540 * drops a reference to preallocated space descriptor
4541 * if this was the last reference and the space is consumed
4542 */
4543 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4544 struct super_block *sb, struct ext4_prealloc_space *pa)
4545 {
4546 ext4_group_t grp;
4547 ext4_fsblk_t grp_blk;
4548
4549 /* in this short window concurrent discard can set pa_deleted */
4550 spin_lock(&pa->pa_lock);
4551 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4552 spin_unlock(&pa->pa_lock);
4553 return;
4554 }
4555
4556 if (pa->pa_deleted == 1) {
4557 spin_unlock(&pa->pa_lock);
4558 return;
4559 }
4560
4561 ext4_mb_mark_pa_deleted(sb, pa);
4562 spin_unlock(&pa->pa_lock);
4563
4564 grp_blk = pa->pa_pstart;
4565 /*
4566 * If doing group-based preallocation, pa_pstart may be in the
4567 * next group when pa is used up
4568 */
4569 if (pa->pa_type == MB_GROUP_PA)
4570 grp_blk--;
4571
4572 grp = ext4_get_group_number(sb, grp_blk);
4573
4574 /*
4575 * possible race:
4576 *
4577 * P1 (buddy init) P2 (regular allocation)
4578 * find block B in PA
4579 * copy on-disk bitmap to buddy
4580 * mark B in on-disk bitmap
4581 * drop PA from group
4582 * mark all PAs in buddy
4583 *
4584 * thus, P1 initializes buddy with B available. to prevent this
4585 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4586 * against that pair
4587 */
4588 ext4_lock_group(sb, grp);
4589 list_del(&pa->pa_group_list);
4590 ext4_unlock_group(sb, grp);
4591
4592 spin_lock(pa->pa_obj_lock);
4593 list_del_rcu(&pa->pa_inode_list);
4594 spin_unlock(pa->pa_obj_lock);
4595
4596 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4597 }
4598
4599 /*
4600 * creates new preallocated space for given inode
4601 */
4602 static noinline_for_stack void
4603 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4604 {
4605 struct super_block *sb = ac->ac_sb;
4606 struct ext4_sb_info *sbi = EXT4_SB(sb);
4607 struct ext4_prealloc_space *pa;
4608 struct ext4_group_info *grp;
4609 struct ext4_inode_info *ei;
4610
4611 /* preallocate only when found space is larger then requested */
4612 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4613 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4614 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4615 BUG_ON(ac->ac_pa == NULL);
4616
4617 pa = ac->ac_pa;
4618
4619 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4620 int winl;
4621 int wins;
4622 int win;
4623 int offs;
4624
4625 /* we can't allocate as much as normalizer wants.
4626 * so, found space must get proper lstart
4627 * to cover original request */
4628 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4629 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4630
4631 /* we're limited by original request in that
4632 * logical block must be covered any way
4633 * winl is window we can move our chunk within */
4634 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4635
4636 /* also, we should cover whole original request */
4637 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4638
4639 /* the smallest one defines real window */
4640 win = min(winl, wins);
4641
4642 offs = ac->ac_o_ex.fe_logical %
4643 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4644 if (offs && offs < win)
4645 win = offs;
4646
4647 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4648 EXT4_NUM_B2C(sbi, win);
4649 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4650 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4651 }
4652
4653 /* preallocation can change ac_b_ex, thus we store actually
4654 * allocated blocks for history */
4655 ac->ac_f_ex = ac->ac_b_ex;
4656
4657 pa->pa_lstart = ac->ac_b_ex.fe_logical;
4658 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4659 pa->pa_len = ac->ac_b_ex.fe_len;
4660 pa->pa_free = pa->pa_len;
4661 spin_lock_init(&pa->pa_lock);
4662 INIT_LIST_HEAD(&pa->pa_inode_list);
4663 INIT_LIST_HEAD(&pa->pa_group_list);
4664 pa->pa_deleted = 0;
4665 pa->pa_type = MB_INODE_PA;
4666
4667 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4668 pa->pa_len, pa->pa_lstart);
4669 trace_ext4_mb_new_inode_pa(ac, pa);
4670
4671 ext4_mb_use_inode_pa(ac, pa);
4672 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4673
4674 ei = EXT4_I(ac->ac_inode);
4675 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4676
4677 pa->pa_obj_lock = &ei->i_prealloc_lock;
4678 pa->pa_inode = ac->ac_inode;
4679
4680 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4681
4682 spin_lock(pa->pa_obj_lock);
4683 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4684 spin_unlock(pa->pa_obj_lock);
4685 atomic_inc(&ei->i_prealloc_active);
4686 }
4687
4688 /*
4689 * creates new preallocated space for locality group inodes belongs to
4690 */
4691 static noinline_for_stack void
4692 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4693 {
4694 struct super_block *sb = ac->ac_sb;
4695 struct ext4_locality_group *lg;
4696 struct ext4_prealloc_space *pa;
4697 struct ext4_group_info *grp;
4698
4699 /* preallocate only when found space is larger then requested */
4700 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4701 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4702 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4703 BUG_ON(ac->ac_pa == NULL);
4704
4705 pa = ac->ac_pa;
4706
4707 /* preallocation can change ac_b_ex, thus we store actually
4708 * allocated blocks for history */
4709 ac->ac_f_ex = ac->ac_b_ex;
4710
4711 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4712 pa->pa_lstart = pa->pa_pstart;
4713 pa->pa_len = ac->ac_b_ex.fe_len;
4714 pa->pa_free = pa->pa_len;
4715 spin_lock_init(&pa->pa_lock);
4716 INIT_LIST_HEAD(&pa->pa_inode_list);
4717 INIT_LIST_HEAD(&pa->pa_group_list);
4718 pa->pa_deleted = 0;
4719 pa->pa_type = MB_GROUP_PA;
4720
4721 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4722 pa->pa_len, pa->pa_lstart);
4723 trace_ext4_mb_new_group_pa(ac, pa);
4724
4725 ext4_mb_use_group_pa(ac, pa);
4726 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4727
4728 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4729 lg = ac->ac_lg;
4730 BUG_ON(lg == NULL);
4731
4732 pa->pa_obj_lock = &lg->lg_prealloc_lock;
4733 pa->pa_inode = NULL;
4734
4735 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4736
4737 /*
4738 * We will later add the new pa to the right bucket
4739 * after updating the pa_free in ext4_mb_release_context
4740 */
4741 }
4742
4743 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4744 {
4745 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4746 ext4_mb_new_group_pa(ac);
4747 else
4748 ext4_mb_new_inode_pa(ac);
4749 }
4750
4751 /*
4752 * finds all unused blocks in on-disk bitmap, frees them in
4753 * in-core bitmap and buddy.
4754 * @pa must be unlinked from inode and group lists, so that
4755 * nobody else can find/use it.
4756 * the caller MUST hold group/inode locks.
4757 * TODO: optimize the case when there are no in-core structures yet
4758 */
4759 static noinline_for_stack int
4760 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4761 struct ext4_prealloc_space *pa)
4762 {
4763 struct super_block *sb = e4b->bd_sb;
4764 struct ext4_sb_info *sbi = EXT4_SB(sb);
4765 unsigned int end;
4766 unsigned int next;
4767 ext4_group_t group;
4768 ext4_grpblk_t bit;
4769 unsigned long long grp_blk_start;
4770 int free = 0;
4771
4772 BUG_ON(pa->pa_deleted == 0);
4773 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4774 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4775 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4776 end = bit + pa->pa_len;
4777
4778 while (bit < end) {
4779 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4780 if (bit >= end)
4781 break;
4782 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4783 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4784 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4785 (unsigned) next - bit, (unsigned) group);
4786 free += next - bit;
4787
4788 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4789 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4790 EXT4_C2B(sbi, bit)),
4791 next - bit);
4792 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4793 bit = next + 1;
4794 }
4795 if (free != pa->pa_free) {
4796 ext4_msg(e4b->bd_sb, KERN_CRIT,
4797 "pa %p: logic %lu, phys. %lu, len %d",
4798 pa, (unsigned long) pa->pa_lstart,
4799 (unsigned long) pa->pa_pstart,
4800 pa->pa_len);
4801 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4802 free, pa->pa_free);
4803 /*
4804 * pa is already deleted so we use the value obtained
4805 * from the bitmap and continue.
4806 */
4807 }
4808 atomic_add(free, &sbi->s_mb_discarded);
4809
4810 return 0;
4811 }
4812
4813 static noinline_for_stack int
4814 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4815 struct ext4_prealloc_space *pa)
4816 {
4817 struct super_block *sb = e4b->bd_sb;
4818 ext4_group_t group;
4819 ext4_grpblk_t bit;
4820
4821 trace_ext4_mb_release_group_pa(sb, pa);
4822 BUG_ON(pa->pa_deleted == 0);
4823 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4824 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4825 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4826 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4827 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4828
4829 return 0;
4830 }
4831
4832 /*
4833 * releases all preallocations in given group
4834 *
4835 * first, we need to decide discard policy:
4836 * - when do we discard
4837 * 1) ENOSPC
4838 * - how many do we discard
4839 * 1) how many requested
4840 */
4841 static noinline_for_stack int
4842 ext4_mb_discard_group_preallocations(struct super_block *sb,
4843 ext4_group_t group, int *busy)
4844 {
4845 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4846 struct buffer_head *bitmap_bh = NULL;
4847 struct ext4_prealloc_space *pa, *tmp;
4848 struct list_head list;
4849 struct ext4_buddy e4b;
4850 int err;
4851 int free = 0;
4852
4853 mb_debug(sb, "discard preallocation for group %u\n", group);
4854 if (list_empty(&grp->bb_prealloc_list))
4855 goto out_dbg;
4856
4857 bitmap_bh = ext4_read_block_bitmap(sb, group);
4858 if (IS_ERR(bitmap_bh)) {
4859 err = PTR_ERR(bitmap_bh);
4860 ext4_error_err(sb, -err,
4861 "Error %d reading block bitmap for %u",
4862 err, group);
4863 goto out_dbg;
4864 }
4865
4866 err = ext4_mb_load_buddy(sb, group, &e4b);
4867 if (err) {
4868 ext4_warning(sb, "Error %d loading buddy information for %u",
4869 err, group);
4870 put_bh(bitmap_bh);
4871 goto out_dbg;
4872 }
4873
4874 INIT_LIST_HEAD(&list);
4875 ext4_lock_group(sb, group);
4876 list_for_each_entry_safe(pa, tmp,
4877 &grp->bb_prealloc_list, pa_group_list) {
4878 spin_lock(&pa->pa_lock);
4879 if (atomic_read(&pa->pa_count)) {
4880 spin_unlock(&pa->pa_lock);
4881 *busy = 1;
4882 continue;
4883 }
4884 if (pa->pa_deleted) {
4885 spin_unlock(&pa->pa_lock);
4886 continue;
4887 }
4888
4889 /* seems this one can be freed ... */
4890 ext4_mb_mark_pa_deleted(sb, pa);
4891
4892 if (!free)
4893 this_cpu_inc(discard_pa_seq);
4894
4895 /* we can trust pa_free ... */
4896 free += pa->pa_free;
4897
4898 spin_unlock(&pa->pa_lock);
4899
4900 list_del(&pa->pa_group_list);
4901 list_add(&pa->u.pa_tmp_list, &list);
4902 }
4903
4904 /* now free all selected PAs */
4905 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4906
4907 /* remove from object (inode or locality group) */
4908 spin_lock(pa->pa_obj_lock);
4909 list_del_rcu(&pa->pa_inode_list);
4910 spin_unlock(pa->pa_obj_lock);
4911
4912 if (pa->pa_type == MB_GROUP_PA)
4913 ext4_mb_release_group_pa(&e4b, pa);
4914 else
4915 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4916
4917 list_del(&pa->u.pa_tmp_list);
4918 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4919 }
4920
4921 ext4_unlock_group(sb, group);
4922 ext4_mb_unload_buddy(&e4b);
4923 put_bh(bitmap_bh);
4924 out_dbg:
4925 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4926 free, group, grp->bb_free);
4927 return free;
4928 }
4929
4930 /*
4931 * releases all non-used preallocated blocks for given inode
4932 *
4933 * It's important to discard preallocations under i_data_sem
4934 * We don't want another block to be served from the prealloc
4935 * space when we are discarding the inode prealloc space.
4936 *
4937 * FIXME!! Make sure it is valid at all the call sites
4938 */
4939 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4940 {
4941 struct ext4_inode_info *ei = EXT4_I(inode);
4942 struct super_block *sb = inode->i_sb;
4943 struct buffer_head *bitmap_bh = NULL;
4944 struct ext4_prealloc_space *pa, *tmp;
4945 ext4_group_t group = 0;
4946 struct list_head list;
4947 struct ext4_buddy e4b;
4948 int err;
4949
4950 if (!S_ISREG(inode->i_mode)) {
4951 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4952 return;
4953 }
4954
4955 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4956 return;
4957
4958 mb_debug(sb, "discard preallocation for inode %lu\n",
4959 inode->i_ino);
4960 trace_ext4_discard_preallocations(inode,
4961 atomic_read(&ei->i_prealloc_active), needed);
4962
4963 INIT_LIST_HEAD(&list);
4964
4965 if (needed == 0)
4966 needed = UINT_MAX;
4967
4968 repeat:
4969 /* first, collect all pa's in the inode */
4970 spin_lock(&ei->i_prealloc_lock);
4971 while (!list_empty(&ei->i_prealloc_list) && needed) {
4972 pa = list_entry(ei->i_prealloc_list.prev,
4973 struct ext4_prealloc_space, pa_inode_list);
4974 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4975 spin_lock(&pa->pa_lock);
4976 if (atomic_read(&pa->pa_count)) {
4977 /* this shouldn't happen often - nobody should
4978 * use preallocation while we're discarding it */
4979 spin_unlock(&pa->pa_lock);
4980 spin_unlock(&ei->i_prealloc_lock);
4981 ext4_msg(sb, KERN_ERR,
4982 "uh-oh! used pa while discarding");
4983 WARN_ON(1);
4984 schedule_timeout_uninterruptible(HZ);
4985 goto repeat;
4986
4987 }
4988 if (pa->pa_deleted == 0) {
4989 ext4_mb_mark_pa_deleted(sb, pa);
4990 spin_unlock(&pa->pa_lock);
4991 list_del_rcu(&pa->pa_inode_list);
4992 list_add(&pa->u.pa_tmp_list, &list);
4993 needed--;
4994 continue;
4995 }
4996
4997 /* someone is deleting pa right now */
4998 spin_unlock(&pa->pa_lock);
4999 spin_unlock(&ei->i_prealloc_lock);
5000
5001 /* we have to wait here because pa_deleted
5002 * doesn't mean pa is already unlinked from
5003 * the list. as we might be called from
5004 * ->clear_inode() the inode will get freed
5005 * and concurrent thread which is unlinking
5006 * pa from inode's list may access already
5007 * freed memory, bad-bad-bad */
5008
5009 /* XXX: if this happens too often, we can
5010 * add a flag to force wait only in case
5011 * of ->clear_inode(), but not in case of
5012 * regular truncate */
5013 schedule_timeout_uninterruptible(HZ);
5014 goto repeat;
5015 }
5016 spin_unlock(&ei->i_prealloc_lock);
5017
5018 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5019 BUG_ON(pa->pa_type != MB_INODE_PA);
5020 group = ext4_get_group_number(sb, pa->pa_pstart);
5021
5022 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5023 GFP_NOFS|__GFP_NOFAIL);
5024 if (err) {
5025 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5026 err, group);
5027 continue;
5028 }
5029
5030 bitmap_bh = ext4_read_block_bitmap(sb, group);
5031 if (IS_ERR(bitmap_bh)) {
5032 err = PTR_ERR(bitmap_bh);
5033 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5034 err, group);
5035 ext4_mb_unload_buddy(&e4b);
5036 continue;
5037 }
5038
5039 ext4_lock_group(sb, group);
5040 list_del(&pa->pa_group_list);
5041 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5042 ext4_unlock_group(sb, group);
5043
5044 ext4_mb_unload_buddy(&e4b);
5045 put_bh(bitmap_bh);
5046
5047 list_del(&pa->u.pa_tmp_list);
5048 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5049 }
5050 }
5051
5052 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5053 {
5054 struct ext4_prealloc_space *pa;
5055
5056 BUG_ON(ext4_pspace_cachep == NULL);
5057 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5058 if (!pa)
5059 return -ENOMEM;
5060 atomic_set(&pa->pa_count, 1);
5061 ac->ac_pa = pa;
5062 return 0;
5063 }
5064
5065 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5066 {
5067 struct ext4_prealloc_space *pa = ac->ac_pa;
5068
5069 BUG_ON(!pa);
5070 ac->ac_pa = NULL;
5071 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5072 kmem_cache_free(ext4_pspace_cachep, pa);
5073 }
5074
5075 #ifdef CONFIG_EXT4_DEBUG
5076 static inline void ext4_mb_show_pa(struct super_block *sb)
5077 {
5078 ext4_group_t i, ngroups;
5079
5080 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5081 return;
5082
5083 ngroups = ext4_get_groups_count(sb);
5084 mb_debug(sb, "groups: ");
5085 for (i = 0; i < ngroups; i++) {
5086 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5087 struct ext4_prealloc_space *pa;
5088 ext4_grpblk_t start;
5089 struct list_head *cur;
5090 ext4_lock_group(sb, i);
5091 list_for_each(cur, &grp->bb_prealloc_list) {
5092 pa = list_entry(cur, struct ext4_prealloc_space,
5093 pa_group_list);
5094 spin_lock(&pa->pa_lock);
5095 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5096 NULL, &start);
5097 spin_unlock(&pa->pa_lock);
5098 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5099 pa->pa_len);
5100 }
5101 ext4_unlock_group(sb, i);
5102 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5103 grp->bb_fragments);
5104 }
5105 }
5106
5107 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5108 {
5109 struct super_block *sb = ac->ac_sb;
5110
5111 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5112 return;
5113
5114 mb_debug(sb, "Can't allocate:"
5115 " Allocation context details:");
5116 mb_debug(sb, "status %u flags 0x%x",
5117 ac->ac_status, ac->ac_flags);
5118 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5119 "goal %lu/%lu/%lu@%lu, "
5120 "best %lu/%lu/%lu@%lu cr %d",
5121 (unsigned long)ac->ac_o_ex.fe_group,
5122 (unsigned long)ac->ac_o_ex.fe_start,
5123 (unsigned long)ac->ac_o_ex.fe_len,
5124 (unsigned long)ac->ac_o_ex.fe_logical,
5125 (unsigned long)ac->ac_g_ex.fe_group,
5126 (unsigned long)ac->ac_g_ex.fe_start,
5127 (unsigned long)ac->ac_g_ex.fe_len,
5128 (unsigned long)ac->ac_g_ex.fe_logical,
5129 (unsigned long)ac->ac_b_ex.fe_group,
5130 (unsigned long)ac->ac_b_ex.fe_start,
5131 (unsigned long)ac->ac_b_ex.fe_len,
5132 (unsigned long)ac->ac_b_ex.fe_logical,
5133 (int)ac->ac_criteria);
5134 mb_debug(sb, "%u found", ac->ac_found);
5135 ext4_mb_show_pa(sb);
5136 }
5137 #else
5138 static inline void ext4_mb_show_pa(struct super_block *sb)
5139 {
5140 return;
5141 }
5142 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5143 {
5144 ext4_mb_show_pa(ac->ac_sb);
5145 return;
5146 }
5147 #endif
5148
5149 /*
5150 * We use locality group preallocation for small size file. The size of the
5151 * file is determined by the current size or the resulting size after
5152 * allocation which ever is larger
5153 *
5154 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5155 */
5156 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5157 {
5158 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5159 int bsbits = ac->ac_sb->s_blocksize_bits;
5160 loff_t size, isize;
5161
5162 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5163 return;
5164
5165 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5166 return;
5167
5168 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5169 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5170 >> bsbits;
5171
5172 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5173 !inode_is_open_for_write(ac->ac_inode)) {
5174 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5175 return;
5176 }
5177
5178 if (sbi->s_mb_group_prealloc <= 0) {
5179 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5180 return;
5181 }
5182
5183 /* don't use group allocation for large files */
5184 size = max(size, isize);
5185 if (size > sbi->s_mb_stream_request) {
5186 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5187 return;
5188 }
5189
5190 BUG_ON(ac->ac_lg != NULL);
5191 /*
5192 * locality group prealloc space are per cpu. The reason for having
5193 * per cpu locality group is to reduce the contention between block
5194 * request from multiple CPUs.
5195 */
5196 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5197
5198 /* we're going to use group allocation */
5199 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5200
5201 /* serialize all allocations in the group */
5202 mutex_lock(&ac->ac_lg->lg_mutex);
5203 }
5204
5205 static noinline_for_stack int
5206 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5207 struct ext4_allocation_request *ar)
5208 {
5209 struct super_block *sb = ar->inode->i_sb;
5210 struct ext4_sb_info *sbi = EXT4_SB(sb);
5211 struct ext4_super_block *es = sbi->s_es;
5212 ext4_group_t group;
5213 unsigned int len;
5214 ext4_fsblk_t goal;
5215 ext4_grpblk_t block;
5216
5217 /* we can't allocate > group size */
5218 len = ar->len;
5219
5220 /* just a dirty hack to filter too big requests */
5221 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5222 len = EXT4_CLUSTERS_PER_GROUP(sb);
5223
5224 /* start searching from the goal */
5225 goal = ar->goal;
5226 if (goal < le32_to_cpu(es->s_first_data_block) ||
5227 goal >= ext4_blocks_count(es))
5228 goal = le32_to_cpu(es->s_first_data_block);
5229 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5230
5231 /* set up allocation goals */
5232 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5233 ac->ac_status = AC_STATUS_CONTINUE;
5234 ac->ac_sb = sb;
5235 ac->ac_inode = ar->inode;
5236 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5237 ac->ac_o_ex.fe_group = group;
5238 ac->ac_o_ex.fe_start = block;
5239 ac->ac_o_ex.fe_len = len;
5240 ac->ac_g_ex = ac->ac_o_ex;
5241 ac->ac_flags = ar->flags;
5242
5243 /* we have to define context: we'll work with a file or
5244 * locality group. this is a policy, actually */
5245 ext4_mb_group_or_file(ac);
5246
5247 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5248 "left: %u/%u, right %u/%u to %swritable\n",
5249 (unsigned) ar->len, (unsigned) ar->logical,
5250 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5251 (unsigned) ar->lleft, (unsigned) ar->pleft,
5252 (unsigned) ar->lright, (unsigned) ar->pright,
5253 inode_is_open_for_write(ar->inode) ? "" : "non-");
5254 return 0;
5255
5256 }
5257
5258 static noinline_for_stack void
5259 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5260 struct ext4_locality_group *lg,
5261 int order, int total_entries)
5262 {
5263 ext4_group_t group = 0;
5264 struct ext4_buddy e4b;
5265 struct list_head discard_list;
5266 struct ext4_prealloc_space *pa, *tmp;
5267
5268 mb_debug(sb, "discard locality group preallocation\n");
5269
5270 INIT_LIST_HEAD(&discard_list);
5271
5272 spin_lock(&lg->lg_prealloc_lock);
5273 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5274 pa_inode_list,
5275 lockdep_is_held(&lg->lg_prealloc_lock)) {
5276 spin_lock(&pa->pa_lock);
5277 if (atomic_read(&pa->pa_count)) {
5278 /*
5279 * This is the pa that we just used
5280 * for block allocation. So don't
5281 * free that
5282 */
5283 spin_unlock(&pa->pa_lock);
5284 continue;
5285 }
5286 if (pa->pa_deleted) {
5287 spin_unlock(&pa->pa_lock);
5288 continue;
5289 }
5290 /* only lg prealloc space */
5291 BUG_ON(pa->pa_type != MB_GROUP_PA);
5292
5293 /* seems this one can be freed ... */
5294 ext4_mb_mark_pa_deleted(sb, pa);
5295 spin_unlock(&pa->pa_lock);
5296
5297 list_del_rcu(&pa->pa_inode_list);
5298 list_add(&pa->u.pa_tmp_list, &discard_list);
5299
5300 total_entries--;
5301 if (total_entries <= 5) {
5302 /*
5303 * we want to keep only 5 entries
5304 * allowing it to grow to 8. This
5305 * mak sure we don't call discard
5306 * soon for this list.
5307 */
5308 break;
5309 }
5310 }
5311 spin_unlock(&lg->lg_prealloc_lock);
5312
5313 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5314 int err;
5315
5316 group = ext4_get_group_number(sb, pa->pa_pstart);
5317 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5318 GFP_NOFS|__GFP_NOFAIL);
5319 if (err) {
5320 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5321 err, group);
5322 continue;
5323 }
5324 ext4_lock_group(sb, group);
5325 list_del(&pa->pa_group_list);
5326 ext4_mb_release_group_pa(&e4b, pa);
5327 ext4_unlock_group(sb, group);
5328
5329 ext4_mb_unload_buddy(&e4b);
5330 list_del(&pa->u.pa_tmp_list);
5331 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5332 }
5333 }
5334
5335 /*
5336 * We have incremented pa_count. So it cannot be freed at this
5337 * point. Also we hold lg_mutex. So no parallel allocation is
5338 * possible from this lg. That means pa_free cannot be updated.
5339 *
5340 * A parallel ext4_mb_discard_group_preallocations is possible.
5341 * which can cause the lg_prealloc_list to be updated.
5342 */
5343
5344 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5345 {
5346 int order, added = 0, lg_prealloc_count = 1;
5347 struct super_block *sb = ac->ac_sb;
5348 struct ext4_locality_group *lg = ac->ac_lg;
5349 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5350
5351 order = fls(pa->pa_free) - 1;
5352 if (order > PREALLOC_TB_SIZE - 1)
5353 /* The max size of hash table is PREALLOC_TB_SIZE */
5354 order = PREALLOC_TB_SIZE - 1;
5355 /* Add the prealloc space to lg */
5356 spin_lock(&lg->lg_prealloc_lock);
5357 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5358 pa_inode_list,
5359 lockdep_is_held(&lg->lg_prealloc_lock)) {
5360 spin_lock(&tmp_pa->pa_lock);
5361 if (tmp_pa->pa_deleted) {
5362 spin_unlock(&tmp_pa->pa_lock);
5363 continue;
5364 }
5365 if (!added && pa->pa_free < tmp_pa->pa_free) {
5366 /* Add to the tail of the previous entry */
5367 list_add_tail_rcu(&pa->pa_inode_list,
5368 &tmp_pa->pa_inode_list);
5369 added = 1;
5370 /*
5371 * we want to count the total
5372 * number of entries in the list
5373 */
5374 }
5375 spin_unlock(&tmp_pa->pa_lock);
5376 lg_prealloc_count++;
5377 }
5378 if (!added)
5379 list_add_tail_rcu(&pa->pa_inode_list,
5380 &lg->lg_prealloc_list[order]);
5381 spin_unlock(&lg->lg_prealloc_lock);
5382
5383 /* Now trim the list to be not more than 8 elements */
5384 if (lg_prealloc_count > 8) {
5385 ext4_mb_discard_lg_preallocations(sb, lg,
5386 order, lg_prealloc_count);
5387 return;
5388 }
5389 return ;
5390 }
5391
5392 /*
5393 * if per-inode prealloc list is too long, trim some PA
5394 */
5395 static void ext4_mb_trim_inode_pa(struct inode *inode)
5396 {
5397 struct ext4_inode_info *ei = EXT4_I(inode);
5398 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5399 int count, delta;
5400
5401 count = atomic_read(&ei->i_prealloc_active);
5402 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5403 if (count > sbi->s_mb_max_inode_prealloc + delta) {
5404 count -= sbi->s_mb_max_inode_prealloc;
5405 ext4_discard_preallocations(inode, count);
5406 }
5407 }
5408
5409 /*
5410 * release all resource we used in allocation
5411 */
5412 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5413 {
5414 struct inode *inode = ac->ac_inode;
5415 struct ext4_inode_info *ei = EXT4_I(inode);
5416 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5417 struct ext4_prealloc_space *pa = ac->ac_pa;
5418 if (pa) {
5419 if (pa->pa_type == MB_GROUP_PA) {
5420 /* see comment in ext4_mb_use_group_pa() */
5421 spin_lock(&pa->pa_lock);
5422 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5423 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5424 pa->pa_free -= ac->ac_b_ex.fe_len;
5425 pa->pa_len -= ac->ac_b_ex.fe_len;
5426 spin_unlock(&pa->pa_lock);
5427
5428 /*
5429 * We want to add the pa to the right bucket.
5430 * Remove it from the list and while adding
5431 * make sure the list to which we are adding
5432 * doesn't grow big.
5433 */
5434 if (likely(pa->pa_free)) {
5435 spin_lock(pa->pa_obj_lock);
5436 list_del_rcu(&pa->pa_inode_list);
5437 spin_unlock(pa->pa_obj_lock);
5438 ext4_mb_add_n_trim(ac);
5439 }
5440 }
5441
5442 if (pa->pa_type == MB_INODE_PA) {
5443 /*
5444 * treat per-inode prealloc list as a lru list, then try
5445 * to trim the least recently used PA.
5446 */
5447 spin_lock(pa->pa_obj_lock);
5448 list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5449 spin_unlock(pa->pa_obj_lock);
5450 }
5451
5452 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5453 }
5454 if (ac->ac_bitmap_page)
5455 put_page(ac->ac_bitmap_page);
5456 if (ac->ac_buddy_page)
5457 put_page(ac->ac_buddy_page);
5458 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5459 mutex_unlock(&ac->ac_lg->lg_mutex);
5460 ext4_mb_collect_stats(ac);
5461 ext4_mb_trim_inode_pa(inode);
5462 return 0;
5463 }
5464
5465 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5466 {
5467 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5468 int ret;
5469 int freed = 0, busy = 0;
5470 int retry = 0;
5471
5472 trace_ext4_mb_discard_preallocations(sb, needed);
5473
5474 if (needed == 0)
5475 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5476 repeat:
5477 for (i = 0; i < ngroups && needed > 0; i++) {
5478 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5479 freed += ret;
5480 needed -= ret;
5481 cond_resched();
5482 }
5483
5484 if (needed > 0 && busy && ++retry < 3) {
5485 busy = 0;
5486 goto repeat;
5487 }
5488
5489 return freed;
5490 }
5491
5492 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5493 struct ext4_allocation_context *ac, u64 *seq)
5494 {
5495 int freed;
5496 u64 seq_retry = 0;
5497 bool ret = false;
5498
5499 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5500 if (freed) {
5501 ret = true;
5502 goto out_dbg;
5503 }
5504 seq_retry = ext4_get_discard_pa_seq_sum();
5505 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5506 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5507 *seq = seq_retry;
5508 ret = true;
5509 }
5510
5511 out_dbg:
5512 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5513 return ret;
5514 }
5515
5516 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5517 struct ext4_allocation_request *ar, int *errp);
5518
5519 /*
5520 * Main entry point into mballoc to allocate blocks
5521 * it tries to use preallocation first, then falls back
5522 * to usual allocation
5523 */
5524 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5525 struct ext4_allocation_request *ar, int *errp)
5526 {
5527 struct ext4_allocation_context *ac = NULL;
5528 struct ext4_sb_info *sbi;
5529 struct super_block *sb;
5530 ext4_fsblk_t block = 0;
5531 unsigned int inquota = 0;
5532 unsigned int reserv_clstrs = 0;
5533 u64 seq;
5534
5535 might_sleep();
5536 sb = ar->inode->i_sb;
5537 sbi = EXT4_SB(sb);
5538
5539 trace_ext4_request_blocks(ar);
5540 if (sbi->s_mount_state & EXT4_FC_REPLAY)
5541 return ext4_mb_new_blocks_simple(handle, ar, errp);
5542
5543 /* Allow to use superuser reservation for quota file */
5544 if (ext4_is_quota_file(ar->inode))
5545 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5546
5547 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5548 /* Without delayed allocation we need to verify
5549 * there is enough free blocks to do block allocation
5550 * and verify allocation doesn't exceed the quota limits.
5551 */
5552 while (ar->len &&
5553 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5554
5555 /* let others to free the space */
5556 cond_resched();
5557 ar->len = ar->len >> 1;
5558 }
5559 if (!ar->len) {
5560 ext4_mb_show_pa(sb);
5561 *errp = -ENOSPC;
5562 return 0;
5563 }
5564 reserv_clstrs = ar->len;
5565 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5566 dquot_alloc_block_nofail(ar->inode,
5567 EXT4_C2B(sbi, ar->len));
5568 } else {
5569 while (ar->len &&
5570 dquot_alloc_block(ar->inode,
5571 EXT4_C2B(sbi, ar->len))) {
5572
5573 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5574 ar->len--;
5575 }
5576 }
5577 inquota = ar->len;
5578 if (ar->len == 0) {
5579 *errp = -EDQUOT;
5580 goto out;
5581 }
5582 }
5583
5584 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5585 if (!ac) {
5586 ar->len = 0;
5587 *errp = -ENOMEM;
5588 goto out;
5589 }
5590
5591 *errp = ext4_mb_initialize_context(ac, ar);
5592 if (*errp) {
5593 ar->len = 0;
5594 goto out;
5595 }
5596
5597 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5598 seq = this_cpu_read(discard_pa_seq);
5599 if (!ext4_mb_use_preallocated(ac)) {
5600 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5601 ext4_mb_normalize_request(ac, ar);
5602
5603 *errp = ext4_mb_pa_alloc(ac);
5604 if (*errp)
5605 goto errout;
5606 repeat:
5607 /* allocate space in core */
5608 *errp = ext4_mb_regular_allocator(ac);
5609 /*
5610 * pa allocated above is added to grp->bb_prealloc_list only
5611 * when we were able to allocate some block i.e. when
5612 * ac->ac_status == AC_STATUS_FOUND.
5613 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5614 * So we have to free this pa here itself.
5615 */
5616 if (*errp) {
5617 ext4_mb_pa_free(ac);
5618 ext4_discard_allocated_blocks(ac);
5619 goto errout;
5620 }
5621 if (ac->ac_status == AC_STATUS_FOUND &&
5622 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5623 ext4_mb_pa_free(ac);
5624 }
5625 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5626 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5627 if (*errp) {
5628 ext4_discard_allocated_blocks(ac);
5629 goto errout;
5630 } else {
5631 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5632 ar->len = ac->ac_b_ex.fe_len;
5633 }
5634 } else {
5635 if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5636 goto repeat;
5637 /*
5638 * If block allocation fails then the pa allocated above
5639 * needs to be freed here itself.
5640 */
5641 ext4_mb_pa_free(ac);
5642 *errp = -ENOSPC;
5643 }
5644
5645 errout:
5646 if (*errp) {
5647 ac->ac_b_ex.fe_len = 0;
5648 ar->len = 0;
5649 ext4_mb_show_ac(ac);
5650 }
5651 ext4_mb_release_context(ac);
5652 out:
5653 if (ac)
5654 kmem_cache_free(ext4_ac_cachep, ac);
5655 if (inquota && ar->len < inquota)
5656 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5657 if (!ar->len) {
5658 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5659 /* release all the reserved blocks if non delalloc */
5660 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5661 reserv_clstrs);
5662 }
5663
5664 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5665
5666 return block;
5667 }
5668
5669 /*
5670 * We can merge two free data extents only if the physical blocks
5671 * are contiguous, AND the extents were freed by the same transaction,
5672 * AND the blocks are associated with the same group.
5673 */
5674 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5675 struct ext4_free_data *entry,
5676 struct ext4_free_data *new_entry,
5677 struct rb_root *entry_rb_root)
5678 {
5679 if ((entry->efd_tid != new_entry->efd_tid) ||
5680 (entry->efd_group != new_entry->efd_group))
5681 return;
5682 if (entry->efd_start_cluster + entry->efd_count ==
5683 new_entry->efd_start_cluster) {
5684 new_entry->efd_start_cluster = entry->efd_start_cluster;
5685 new_entry->efd_count += entry->efd_count;
5686 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5687 entry->efd_start_cluster) {
5688 new_entry->efd_count += entry->efd_count;
5689 } else
5690 return;
5691 spin_lock(&sbi->s_md_lock);
5692 list_del(&entry->efd_list);
5693 spin_unlock(&sbi->s_md_lock);
5694 rb_erase(&entry->efd_node, entry_rb_root);
5695 kmem_cache_free(ext4_free_data_cachep, entry);
5696 }
5697
5698 static noinline_for_stack int
5699 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5700 struct ext4_free_data *new_entry)
5701 {
5702 ext4_group_t group = e4b->bd_group;
5703 ext4_grpblk_t cluster;
5704 ext4_grpblk_t clusters = new_entry->efd_count;
5705 struct ext4_free_data *entry;
5706 struct ext4_group_info *db = e4b->bd_info;
5707 struct super_block *sb = e4b->bd_sb;
5708 struct ext4_sb_info *sbi = EXT4_SB(sb);
5709 struct rb_node **n = &db->bb_free_root.rb_node, *node;
5710 struct rb_node *parent = NULL, *new_node;
5711
5712 BUG_ON(!ext4_handle_valid(handle));
5713 BUG_ON(e4b->bd_bitmap_page == NULL);
5714 BUG_ON(e4b->bd_buddy_page == NULL);
5715
5716 new_node = &new_entry->efd_node;
5717 cluster = new_entry->efd_start_cluster;
5718
5719 if (!*n) {
5720 /* first free block exent. We need to
5721 protect buddy cache from being freed,
5722 * otherwise we'll refresh it from
5723 * on-disk bitmap and lose not-yet-available
5724 * blocks */
5725 get_page(e4b->bd_buddy_page);
5726 get_page(e4b->bd_bitmap_page);
5727 }
5728 while (*n) {
5729 parent = *n;
5730 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5731 if (cluster < entry->efd_start_cluster)
5732 n = &(*n)->rb_left;
5733 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5734 n = &(*n)->rb_right;
5735 else {
5736 ext4_grp_locked_error(sb, group, 0,
5737 ext4_group_first_block_no(sb, group) +
5738 EXT4_C2B(sbi, cluster),
5739 "Block already on to-be-freed list");
5740 kmem_cache_free(ext4_free_data_cachep, new_entry);
5741 return 0;
5742 }
5743 }
5744
5745 rb_link_node(new_node, parent, n);
5746 rb_insert_color(new_node, &db->bb_free_root);
5747
5748 /* Now try to see the extent can be merged to left and right */
5749 node = rb_prev(new_node);
5750 if (node) {
5751 entry = rb_entry(node, struct ext4_free_data, efd_node);
5752 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5753 &(db->bb_free_root));
5754 }
5755
5756 node = rb_next(new_node);
5757 if (node) {
5758 entry = rb_entry(node, struct ext4_free_data, efd_node);
5759 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5760 &(db->bb_free_root));
5761 }
5762
5763 spin_lock(&sbi->s_md_lock);
5764 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5765 sbi->s_mb_free_pending += clusters;
5766 spin_unlock(&sbi->s_md_lock);
5767 return 0;
5768 }
5769
5770 /*
5771 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5772 * linearly starting at the goal block and also excludes the blocks which
5773 * are going to be in use after fast commit replay.
5774 */
5775 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5776 struct ext4_allocation_request *ar, int *errp)
5777 {
5778 struct buffer_head *bitmap_bh;
5779 struct super_block *sb = ar->inode->i_sb;
5780 ext4_group_t group;
5781 ext4_grpblk_t blkoff;
5782 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5783 ext4_grpblk_t i = 0;
5784 ext4_fsblk_t goal, block;
5785 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5786
5787 goal = ar->goal;
5788 if (goal < le32_to_cpu(es->s_first_data_block) ||
5789 goal >= ext4_blocks_count(es))
5790 goal = le32_to_cpu(es->s_first_data_block);
5791
5792 ar->len = 0;
5793 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5794 for (; group < ext4_get_groups_count(sb); group++) {
5795 bitmap_bh = ext4_read_block_bitmap(sb, group);
5796 if (IS_ERR(bitmap_bh)) {
5797 *errp = PTR_ERR(bitmap_bh);
5798 pr_warn("Failed to read block bitmap\n");
5799 return 0;
5800 }
5801
5802 ext4_get_group_no_and_offset(sb,
5803 max(ext4_group_first_block_no(sb, group), goal),
5804 NULL, &blkoff);
5805 while (1) {
5806 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5807 blkoff);
5808 if (i >= max)
5809 break;
5810 if (ext4_fc_replay_check_excluded(sb,
5811 ext4_group_first_block_no(sb, group) + i)) {
5812 blkoff = i + 1;
5813 } else
5814 break;
5815 }
5816 brelse(bitmap_bh);
5817 if (i < max)
5818 break;
5819 }
5820
5821 if (group >= ext4_get_groups_count(sb) || i >= max) {
5822 *errp = -ENOSPC;
5823 return 0;
5824 }
5825
5826 block = ext4_group_first_block_no(sb, group) + i;
5827 ext4_mb_mark_bb(sb, block, 1, 1);
5828 ar->len = 1;
5829
5830 return block;
5831 }
5832
5833 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5834 unsigned long count)
5835 {
5836 struct buffer_head *bitmap_bh;
5837 struct super_block *sb = inode->i_sb;
5838 struct ext4_group_desc *gdp;
5839 struct buffer_head *gdp_bh;
5840 ext4_group_t group;
5841 ext4_grpblk_t blkoff;
5842 int already_freed = 0, err, i;
5843
5844 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5845 bitmap_bh = ext4_read_block_bitmap(sb, group);
5846 if (IS_ERR(bitmap_bh)) {
5847 err = PTR_ERR(bitmap_bh);
5848 pr_warn("Failed to read block bitmap\n");
5849 return;
5850 }
5851 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5852 if (!gdp)
5853 return;
5854
5855 for (i = 0; i < count; i++) {
5856 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5857 already_freed++;
5858 }
5859 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5860 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5861 if (err)
5862 return;
5863 ext4_free_group_clusters_set(
5864 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5865 count - already_freed);
5866 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5867 ext4_group_desc_csum_set(sb, group, gdp);
5868 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5869 sync_dirty_buffer(bitmap_bh);
5870 sync_dirty_buffer(gdp_bh);
5871 brelse(bitmap_bh);
5872 }
5873
5874 /**
5875 * ext4_free_blocks() -- Free given blocks and update quota
5876 * @handle: handle for this transaction
5877 * @inode: inode
5878 * @bh: optional buffer of the block to be freed
5879 * @block: starting physical block to be freed
5880 * @count: number of blocks to be freed
5881 * @flags: flags used by ext4_free_blocks
5882 */
5883 void ext4_free_blocks(handle_t *handle, struct inode *inode,
5884 struct buffer_head *bh, ext4_fsblk_t block,
5885 unsigned long count, int flags)
5886 {
5887 struct buffer_head *bitmap_bh = NULL;
5888 struct super_block *sb = inode->i_sb;
5889 struct ext4_group_desc *gdp;
5890 unsigned int overflow;
5891 ext4_grpblk_t bit;
5892 struct buffer_head *gd_bh;
5893 ext4_group_t block_group;
5894 struct ext4_sb_info *sbi;
5895 struct ext4_buddy e4b;
5896 unsigned int count_clusters;
5897 int err = 0;
5898 int ret;
5899
5900 sbi = EXT4_SB(sb);
5901
5902 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
5903 ext4_free_blocks_simple(inode, block, count);
5904 return;
5905 }
5906
5907 might_sleep();
5908 if (bh) {
5909 if (block)
5910 BUG_ON(block != bh->b_blocknr);
5911 else
5912 block = bh->b_blocknr;
5913 }
5914
5915 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5916 !ext4_inode_block_valid(inode, block, count)) {
5917 ext4_error(sb, "Freeing blocks not in datazone - "
5918 "block = %llu, count = %lu", block, count);
5919 goto error_return;
5920 }
5921
5922 ext4_debug("freeing block %llu\n", block);
5923 trace_ext4_free_blocks(inode, block, count, flags);
5924
5925 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5926 BUG_ON(count > 1);
5927
5928 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
5929 inode, bh, block);
5930 }
5931
5932 /*
5933 * If the extent to be freed does not begin on a cluster
5934 * boundary, we need to deal with partial clusters at the
5935 * beginning and end of the extent. Normally we will free
5936 * blocks at the beginning or the end unless we are explicitly
5937 * requested to avoid doing so.
5938 */
5939 overflow = EXT4_PBLK_COFF(sbi, block);
5940 if (overflow) {
5941 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
5942 overflow = sbi->s_cluster_ratio - overflow;
5943 block += overflow;
5944 if (count > overflow)
5945 count -= overflow;
5946 else
5947 return;
5948 } else {
5949 block -= overflow;
5950 count += overflow;
5951 }
5952 }
5953 overflow = EXT4_LBLK_COFF(sbi, count);
5954 if (overflow) {
5955 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
5956 if (count > overflow)
5957 count -= overflow;
5958 else
5959 return;
5960 } else
5961 count += sbi->s_cluster_ratio - overflow;
5962 }
5963
5964 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5965 int i;
5966 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
5967
5968 for (i = 0; i < count; i++) {
5969 cond_resched();
5970 if (is_metadata)
5971 bh = sb_find_get_block(inode->i_sb, block + i);
5972 ext4_forget(handle, is_metadata, inode, bh, block + i);
5973 }
5974 }
5975
5976 do_more:
5977 overflow = 0;
5978 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5979
5980 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5981 ext4_get_group_info(sb, block_group))))
5982 return;
5983
5984 /*
5985 * Check to see if we are freeing blocks across a group
5986 * boundary.
5987 */
5988 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5989 overflow = EXT4_C2B(sbi, bit) + count -
5990 EXT4_BLOCKS_PER_GROUP(sb);
5991 count -= overflow;
5992 }
5993 count_clusters = EXT4_NUM_B2C(sbi, count);
5994 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5995 if (IS_ERR(bitmap_bh)) {
5996 err = PTR_ERR(bitmap_bh);
5997 bitmap_bh = NULL;
5998 goto error_return;
5999 }
6000 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
6001 if (!gdp) {
6002 err = -EIO;
6003 goto error_return;
6004 }
6005
6006 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
6007 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
6008 in_range(block, ext4_inode_table(sb, gdp),
6009 sbi->s_itb_per_group) ||
6010 in_range(block + count - 1, ext4_inode_table(sb, gdp),
6011 sbi->s_itb_per_group)) {
6012
6013 ext4_error(sb, "Freeing blocks in system zone - "
6014 "Block = %llu, count = %lu", block, count);
6015 /* err = 0. ext4_std_error should be a no op */
6016 goto error_return;
6017 }
6018
6019 BUFFER_TRACE(bitmap_bh, "getting write access");
6020 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6021 EXT4_JTR_NONE);
6022 if (err)
6023 goto error_return;
6024
6025 /*
6026 * We are about to modify some metadata. Call the journal APIs
6027 * to unshare ->b_data if a currently-committing transaction is
6028 * using it
6029 */
6030 BUFFER_TRACE(gd_bh, "get_write_access");
6031 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6032 if (err)
6033 goto error_return;
6034 #ifdef AGGRESSIVE_CHECK
6035 {
6036 int i;
6037 for (i = 0; i < count_clusters; i++)
6038 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6039 }
6040 #endif
6041 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6042
6043 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6044 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6045 GFP_NOFS|__GFP_NOFAIL);
6046 if (err)
6047 goto error_return;
6048
6049 /*
6050 * We need to make sure we don't reuse the freed block until after the
6051 * transaction is committed. We make an exception if the inode is to be
6052 * written in writeback mode since writeback mode has weak data
6053 * consistency guarantees.
6054 */
6055 if (ext4_handle_valid(handle) &&
6056 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6057 !ext4_should_writeback_data(inode))) {
6058 struct ext4_free_data *new_entry;
6059 /*
6060 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6061 * to fail.
6062 */
6063 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6064 GFP_NOFS|__GFP_NOFAIL);
6065 new_entry->efd_start_cluster = bit;
6066 new_entry->efd_group = block_group;
6067 new_entry->efd_count = count_clusters;
6068 new_entry->efd_tid = handle->h_transaction->t_tid;
6069
6070 ext4_lock_group(sb, block_group);
6071 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6072 ext4_mb_free_metadata(handle, &e4b, new_entry);
6073 } else {
6074 /* need to update group_info->bb_free and bitmap
6075 * with group lock held. generate_buddy look at
6076 * them with group lock_held
6077 */
6078 if (test_opt(sb, DISCARD)) {
6079 err = ext4_issue_discard(sb, block_group, bit, count,
6080 NULL);
6081 if (err && err != -EOPNOTSUPP)
6082 ext4_msg(sb, KERN_WARNING, "discard request in"
6083 " group:%d block:%d count:%lu failed"
6084 " with %d", block_group, bit, count,
6085 err);
6086 } else
6087 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6088
6089 ext4_lock_group(sb, block_group);
6090 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6091 mb_free_blocks(inode, &e4b, bit, count_clusters);
6092 }
6093
6094 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6095 ext4_free_group_clusters_set(sb, gdp, ret);
6096 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6097 ext4_group_desc_csum_set(sb, block_group, gdp);
6098 ext4_unlock_group(sb, block_group);
6099
6100 if (sbi->s_log_groups_per_flex) {
6101 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6102 atomic64_add(count_clusters,
6103 &sbi_array_rcu_deref(sbi, s_flex_groups,
6104 flex_group)->free_clusters);
6105 }
6106
6107 /*
6108 * on a bigalloc file system, defer the s_freeclusters_counter
6109 * update to the caller (ext4_remove_space and friends) so they
6110 * can determine if a cluster freed here should be rereserved
6111 */
6112 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6113 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6114 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6115 percpu_counter_add(&sbi->s_freeclusters_counter,
6116 count_clusters);
6117 }
6118
6119 ext4_mb_unload_buddy(&e4b);
6120
6121 /* We dirtied the bitmap block */
6122 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6123 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6124
6125 /* And the group descriptor block */
6126 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6127 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6128 if (!err)
6129 err = ret;
6130
6131 if (overflow && !err) {
6132 block += count;
6133 count = overflow;
6134 put_bh(bitmap_bh);
6135 goto do_more;
6136 }
6137 error_return:
6138 brelse(bitmap_bh);
6139 ext4_std_error(sb, err);
6140 return;
6141 }
6142
6143 /**
6144 * ext4_group_add_blocks() -- Add given blocks to an existing group
6145 * @handle: handle to this transaction
6146 * @sb: super block
6147 * @block: start physical block to add to the block group
6148 * @count: number of blocks to free
6149 *
6150 * This marks the blocks as free in the bitmap and buddy.
6151 */
6152 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6153 ext4_fsblk_t block, unsigned long count)
6154 {
6155 struct buffer_head *bitmap_bh = NULL;
6156 struct buffer_head *gd_bh;
6157 ext4_group_t block_group;
6158 ext4_grpblk_t bit;
6159 unsigned int i;
6160 struct ext4_group_desc *desc;
6161 struct ext4_sb_info *sbi = EXT4_SB(sb);
6162 struct ext4_buddy e4b;
6163 int err = 0, ret, free_clusters_count;
6164 ext4_grpblk_t clusters_freed;
6165 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6166 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6167 unsigned long cluster_count = last_cluster - first_cluster + 1;
6168
6169 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6170
6171 if (count == 0)
6172 return 0;
6173
6174 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6175 /*
6176 * Check to see if we are freeing blocks across a group
6177 * boundary.
6178 */
6179 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6180 ext4_warning(sb, "too many blocks added to group %u",
6181 block_group);
6182 err = -EINVAL;
6183 goto error_return;
6184 }
6185
6186 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6187 if (IS_ERR(bitmap_bh)) {
6188 err = PTR_ERR(bitmap_bh);
6189 bitmap_bh = NULL;
6190 goto error_return;
6191 }
6192
6193 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6194 if (!desc) {
6195 err = -EIO;
6196 goto error_return;
6197 }
6198
6199 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
6200 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
6201 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
6202 in_range(block + count - 1, ext4_inode_table(sb, desc),
6203 sbi->s_itb_per_group)) {
6204 ext4_error(sb, "Adding blocks in system zones - "
6205 "Block = %llu, count = %lu",
6206 block, count);
6207 err = -EINVAL;
6208 goto error_return;
6209 }
6210
6211 BUFFER_TRACE(bitmap_bh, "getting write access");
6212 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6213 EXT4_JTR_NONE);
6214 if (err)
6215 goto error_return;
6216
6217 /*
6218 * We are about to modify some metadata. Call the journal APIs
6219 * to unshare ->b_data if a currently-committing transaction is
6220 * using it
6221 */
6222 BUFFER_TRACE(gd_bh, "get_write_access");
6223 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6224 if (err)
6225 goto error_return;
6226
6227 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6228 BUFFER_TRACE(bitmap_bh, "clear bit");
6229 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6230 ext4_error(sb, "bit already cleared for block %llu",
6231 (ext4_fsblk_t)(block + i));
6232 BUFFER_TRACE(bitmap_bh, "bit already cleared");
6233 } else {
6234 clusters_freed++;
6235 }
6236 }
6237
6238 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6239 if (err)
6240 goto error_return;
6241
6242 /*
6243 * need to update group_info->bb_free and bitmap
6244 * with group lock held. generate_buddy look at
6245 * them with group lock_held
6246 */
6247 ext4_lock_group(sb, block_group);
6248 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6249 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6250 free_clusters_count = clusters_freed +
6251 ext4_free_group_clusters(sb, desc);
6252 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6253 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6254 ext4_group_desc_csum_set(sb, block_group, desc);
6255 ext4_unlock_group(sb, block_group);
6256 percpu_counter_add(&sbi->s_freeclusters_counter,
6257 clusters_freed);
6258
6259 if (sbi->s_log_groups_per_flex) {
6260 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6261 atomic64_add(clusters_freed,
6262 &sbi_array_rcu_deref(sbi, s_flex_groups,
6263 flex_group)->free_clusters);
6264 }
6265
6266 ext4_mb_unload_buddy(&e4b);
6267
6268 /* We dirtied the bitmap block */
6269 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6270 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6271
6272 /* And the group descriptor block */
6273 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6274 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6275 if (!err)
6276 err = ret;
6277
6278 error_return:
6279 brelse(bitmap_bh);
6280 ext4_std_error(sb, err);
6281 return err;
6282 }
6283
6284 /**
6285 * ext4_trim_extent -- function to TRIM one single free extent in the group
6286 * @sb: super block for the file system
6287 * @start: starting block of the free extent in the alloc. group
6288 * @count: number of blocks to TRIM
6289 * @e4b: ext4 buddy for the group
6290 *
6291 * Trim "count" blocks starting at "start" in the "group". To assure that no
6292 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6293 * be called with under the group lock.
6294 */
6295 static int ext4_trim_extent(struct super_block *sb,
6296 int start, int count, struct ext4_buddy *e4b)
6297 __releases(bitlock)
6298 __acquires(bitlock)
6299 {
6300 struct ext4_free_extent ex;
6301 ext4_group_t group = e4b->bd_group;
6302 int ret = 0;
6303
6304 trace_ext4_trim_extent(sb, group, start, count);
6305
6306 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6307
6308 ex.fe_start = start;
6309 ex.fe_group = group;
6310 ex.fe_len = count;
6311
6312 /*
6313 * Mark blocks used, so no one can reuse them while
6314 * being trimmed.
6315 */
6316 mb_mark_used(e4b, &ex);
6317 ext4_unlock_group(sb, group);
6318 ret = ext4_issue_discard(sb, group, start, count, NULL);
6319 ext4_lock_group(sb, group);
6320 mb_free_blocks(NULL, e4b, start, ex.fe_len);
6321 return ret;
6322 }
6323
6324 static int ext4_try_to_trim_range(struct super_block *sb,
6325 struct ext4_buddy *e4b, ext4_grpblk_t start,
6326 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6327 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6328 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6329 {
6330 ext4_grpblk_t next, count, free_count;
6331 void *bitmap;
6332 int ret = 0;
6333
6334 bitmap = e4b->bd_bitmap;
6335 start = (e4b->bd_info->bb_first_free > start) ?
6336 e4b->bd_info->bb_first_free : start;
6337 count = 0;
6338 free_count = 0;
6339
6340 while (start <= max) {
6341 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6342 if (start > max)
6343 break;
6344 next = mb_find_next_bit(bitmap, max + 1, start);
6345
6346 if ((next - start) >= minblocks) {
6347 ret = ext4_trim_extent(sb, start, next - start, e4b);
6348 if (ret && ret != -EOPNOTSUPP)
6349 break;
6350 ret = 0;
6351 count += next - start;
6352 }
6353 free_count += next - start;
6354 start = next + 1;
6355
6356 if (fatal_signal_pending(current)) {
6357 count = -ERESTARTSYS;
6358 break;
6359 }
6360
6361 if (need_resched()) {
6362 ext4_unlock_group(sb, e4b->bd_group);
6363 cond_resched();
6364 ext4_lock_group(sb, e4b->bd_group);
6365 }
6366
6367 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6368 break;
6369 }
6370
6371 return count;
6372 }
6373
6374 /**
6375 * ext4_trim_all_free -- function to trim all free space in alloc. group
6376 * @sb: super block for file system
6377 * @group: group to be trimmed
6378 * @start: first group block to examine
6379 * @max: last group block to examine
6380 * @minblocks: minimum extent block count
6381 *
6382 * ext4_trim_all_free walks through group's block bitmap searching for free
6383 * extents. When the free extent is found, mark it as used in group buddy
6384 * bitmap. Then issue a TRIM command on this extent and free the extent in
6385 * the group buddy bitmap.
6386 */
6387 static ext4_grpblk_t
6388 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6389 ext4_grpblk_t start, ext4_grpblk_t max,
6390 ext4_grpblk_t minblocks)
6391 {
6392 struct ext4_buddy e4b;
6393 int ret;
6394
6395 trace_ext4_trim_all_free(sb, group, start, max);
6396
6397 ret = ext4_mb_load_buddy(sb, group, &e4b);
6398 if (ret) {
6399 ext4_warning(sb, "Error %d loading buddy information for %u",
6400 ret, group);
6401 return ret;
6402 }
6403
6404 ext4_lock_group(sb, group);
6405
6406 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6407 minblocks < atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) {
6408 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6409 if (ret >= 0)
6410 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6411 } else {
6412 ret = 0;
6413 }
6414
6415 ext4_unlock_group(sb, group);
6416 ext4_mb_unload_buddy(&e4b);
6417
6418 ext4_debug("trimmed %d blocks in the group %d\n",
6419 ret, group);
6420
6421 return ret;
6422 }
6423
6424 /**
6425 * ext4_trim_fs() -- trim ioctl handle function
6426 * @sb: superblock for filesystem
6427 * @range: fstrim_range structure
6428 *
6429 * start: First Byte to trim
6430 * len: number of Bytes to trim from start
6431 * minlen: minimum extent length in Bytes
6432 * ext4_trim_fs goes through all allocation groups containing Bytes from
6433 * start to start+len. For each such a group ext4_trim_all_free function
6434 * is invoked to trim all free space.
6435 */
6436 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6437 {
6438 struct request_queue *q = bdev_get_queue(sb->s_bdev);
6439 struct ext4_group_info *grp;
6440 ext4_group_t group, first_group, last_group;
6441 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6442 uint64_t start, end, minlen, trimmed = 0;
6443 ext4_fsblk_t first_data_blk =
6444 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6445 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6446 int ret = 0;
6447
6448 start = range->start >> sb->s_blocksize_bits;
6449 end = start + (range->len >> sb->s_blocksize_bits) - 1;
6450 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6451 range->minlen >> sb->s_blocksize_bits);
6452
6453 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6454 start >= max_blks ||
6455 range->len < sb->s_blocksize)
6456 return -EINVAL;
6457 /* No point to try to trim less than discard granularity */
6458 if (range->minlen < q->limits.discard_granularity) {
6459 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6460 q->limits.discard_granularity >> sb->s_blocksize_bits);
6461 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6462 goto out;
6463 }
6464 if (end >= max_blks)
6465 end = max_blks - 1;
6466 if (end <= first_data_blk)
6467 goto out;
6468 if (start < first_data_blk)
6469 start = first_data_blk;
6470
6471 /* Determine first and last group to examine based on start and end */
6472 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6473 &first_group, &first_cluster);
6474 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6475 &last_group, &last_cluster);
6476
6477 /* end now represents the last cluster to discard in this group */
6478 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6479
6480 for (group = first_group; group <= last_group; group++) {
6481 grp = ext4_get_group_info(sb, group);
6482 /* We only do this if the grp has never been initialized */
6483 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6484 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6485 if (ret)
6486 break;
6487 }
6488
6489 /*
6490 * For all the groups except the last one, last cluster will
6491 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6492 * change it for the last group, note that last_cluster is
6493 * already computed earlier by ext4_get_group_no_and_offset()
6494 */
6495 if (group == last_group)
6496 end = last_cluster;
6497
6498 if (grp->bb_free >= minlen) {
6499 cnt = ext4_trim_all_free(sb, group, first_cluster,
6500 end, minlen);
6501 if (cnt < 0) {
6502 ret = cnt;
6503 break;
6504 }
6505 trimmed += cnt;
6506 }
6507
6508 /*
6509 * For every group except the first one, we are sure
6510 * that the first cluster to discard will be cluster #0.
6511 */
6512 first_cluster = 0;
6513 }
6514
6515 if (!ret)
6516 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
6517
6518 out:
6519 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6520 return ret;
6521 }
6522
6523 /* Iterate all the free extents in the group. */
6524 int
6525 ext4_mballoc_query_range(
6526 struct super_block *sb,
6527 ext4_group_t group,
6528 ext4_grpblk_t start,
6529 ext4_grpblk_t end,
6530 ext4_mballoc_query_range_fn formatter,
6531 void *priv)
6532 {
6533 void *bitmap;
6534 ext4_grpblk_t next;
6535 struct ext4_buddy e4b;
6536 int error;
6537
6538 error = ext4_mb_load_buddy(sb, group, &e4b);
6539 if (error)
6540 return error;
6541 bitmap = e4b.bd_bitmap;
6542
6543 ext4_lock_group(sb, group);
6544
6545 start = (e4b.bd_info->bb_first_free > start) ?
6546 e4b.bd_info->bb_first_free : start;
6547 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6548 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6549
6550 while (start <= end) {
6551 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6552 if (start > end)
6553 break;
6554 next = mb_find_next_bit(bitmap, end + 1, start);
6555
6556 ext4_unlock_group(sb, group);
6557 error = formatter(sb, group, start, next - start, priv);
6558 if (error)
6559 goto out_unload;
6560 ext4_lock_group(sb, group);
6561
6562 start = next + 1;
6563 }
6564
6565 ext4_unlock_group(sb, group);
6566 out_unload:
6567 ext4_mb_unload_buddy(&e4b);
6568
6569 return error;
6570 }