]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ext4/mballoc.c
ext4: fix race when setting bitmap_uptodate flag
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / mballoc.c
CommitLineData
c9de560d
AT
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
8f6e39a7 24#include "mballoc.h"
6ba495e9 25#include <linux/debugfs.h>
5a0e3ad6 26#include <linux/slab.h>
9bffad1e
TT
27#include <trace/events/ext4.h>
28
c9de560d
AT
29/*
30 * MUSTDO:
31 * - test ext4_ext_search_left() and ext4_ext_search_right()
32 * - search for metadata in few groups
33 *
34 * TODO v4:
35 * - normalization should take into account whether file is still open
36 * - discard preallocations if no free space left (policy?)
37 * - don't normalize tails
38 * - quota
39 * - reservation for superuser
40 *
41 * TODO v3:
42 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
43 * - track min/max extents in each group for better group selection
44 * - mb_mark_used() may allocate chunk right after splitting buddy
45 * - tree of groups sorted by number of free blocks
46 * - error handling
47 */
48
49/*
50 * The allocation request involve request for multiple number of blocks
51 * near to the goal(block) value specified.
52 *
b713a5ec
TT
53 * During initialization phase of the allocator we decide to use the
54 * group preallocation or inode preallocation depending on the size of
55 * the file. The size of the file could be the resulting file size we
56 * would have after allocation, or the current file size, which ever
57 * is larger. If the size is less than sbi->s_mb_stream_request we
58 * select to use the group preallocation. The default value of
59 * s_mb_stream_request is 16 blocks. This can also be tuned via
60 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
61 * terms of number of blocks.
c9de560d
AT
62 *
63 * The main motivation for having small file use group preallocation is to
b713a5ec 64 * ensure that we have small files closer together on the disk.
c9de560d 65 *
b713a5ec
TT
66 * First stage the allocator looks at the inode prealloc list,
67 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
68 * spaces for this particular inode. The inode prealloc space is
69 * represented as:
c9de560d
AT
70 *
71 * pa_lstart -> the logical start block for this prealloc space
72 * pa_pstart -> the physical start block for this prealloc space
53accfa9
TT
73 * pa_len -> length for this prealloc space (in clusters)
74 * pa_free -> free space available in this prealloc space (in clusters)
c9de560d
AT
75 *
76 * The inode preallocation space is used looking at the _logical_ start
77 * block. If only the logical file block falls within the range of prealloc
caaf7a29
TM
78 * space we will consume the particular prealloc space. This makes sure that
79 * we have contiguous physical blocks representing the file blocks
c9de560d
AT
80 *
81 * The important thing to be noted in case of inode prealloc space is that
82 * we don't modify the values associated to inode prealloc space except
83 * pa_free.
84 *
85 * If we are not able to find blocks in the inode prealloc space and if we
86 * have the group allocation flag set then we look at the locality group
caaf7a29 87 * prealloc space. These are per CPU prealloc list represented as
c9de560d
AT
88 *
89 * ext4_sb_info.s_locality_groups[smp_processor_id()]
90 *
91 * The reason for having a per cpu locality group is to reduce the contention
92 * between CPUs. It is possible to get scheduled at this point.
93 *
94 * The locality group prealloc space is used looking at whether we have
25985edc 95 * enough free space (pa_free) within the prealloc space.
c9de560d
AT
96 *
97 * If we can't allocate blocks via inode prealloc or/and locality group
98 * prealloc then we look at the buddy cache. The buddy cache is represented
99 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
100 * mapped to the buddy and bitmap information regarding different
101 * groups. The buddy information is attached to buddy cache inode so that
102 * we can access them through the page cache. The information regarding
103 * each group is loaded via ext4_mb_load_buddy. The information involve
104 * block bitmap and buddy information. The information are stored in the
105 * inode as:
106 *
107 * { page }
c3a326a6 108 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
109 *
110 *
111 * one block each for bitmap and buddy information. So for each group we
112 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
113 * blocksize) blocks. So it can have information regarding groups_per_page
114 * which is blocks_per_page/2
115 *
116 * The buddy cache inode is not stored on disk. The inode is thrown
117 * away when the filesystem is unmounted.
118 *
119 * We look for count number of blocks in the buddy cache. If we were able
120 * to locate that many free blocks we return with additional information
121 * regarding rest of the contiguous physical block available
122 *
123 * Before allocating blocks via buddy cache we normalize the request
124 * blocks. This ensure we ask for more blocks that we needed. The extra
125 * blocks that we get after allocation is added to the respective prealloc
126 * list. In case of inode preallocation we follow a list of heuristics
127 * based on file size. This can be found in ext4_mb_normalize_request. If
128 * we are doing a group prealloc we try to normalize the request to
27baebb8
TT
129 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
130 * dependent on the cluster size; for non-bigalloc file systems, it is
c9de560d 131 * 512 blocks. This can be tuned via
d7a1fee1 132 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
c9de560d
AT
133 * terms of number of blocks. If we have mounted the file system with -O
134 * stripe=<value> option the group prealloc request is normalized to the
d7a1fee1
DE
135 * the smallest multiple of the stripe value (sbi->s_stripe) which is
136 * greater than the default mb_group_prealloc.
c9de560d 137 *
d7a1fee1 138 * The regular allocator (using the buddy cache) supports a few tunables.
c9de560d 139 *
b713a5ec
TT
140 * /sys/fs/ext4/<partition>/mb_min_to_scan
141 * /sys/fs/ext4/<partition>/mb_max_to_scan
142 * /sys/fs/ext4/<partition>/mb_order2_req
c9de560d 143 *
b713a5ec 144 * The regular allocator uses buddy scan only if the request len is power of
c9de560d
AT
145 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
146 * value of s_mb_order2_reqs can be tuned via
b713a5ec 147 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
af901ca1 148 * stripe size (sbi->s_stripe), we try to search for contiguous block in
b713a5ec
TT
149 * stripe size. This should result in better allocation on RAID setups. If
150 * not, we search in the specific group using bitmap for best extents. The
151 * tunable min_to_scan and max_to_scan control the behaviour here.
c9de560d 152 * min_to_scan indicate how long the mballoc __must__ look for a best
b713a5ec 153 * extent and max_to_scan indicates how long the mballoc __can__ look for a
c9de560d
AT
154 * best extent in the found extents. Searching for the blocks starts with
155 * the group specified as the goal value in allocation context via
156 * ac_g_ex. Each group is first checked based on the criteria whether it
caaf7a29 157 * can be used for allocation. ext4_mb_good_group explains how the groups are
c9de560d
AT
158 * checked.
159 *
160 * Both the prealloc space are getting populated as above. So for the first
161 * request we will hit the buddy cache which will result in this prealloc
162 * space getting filled. The prealloc space is then later used for the
163 * subsequent request.
164 */
165
166/*
167 * mballoc operates on the following data:
168 * - on-disk bitmap
169 * - in-core buddy (actually includes buddy and bitmap)
170 * - preallocation descriptors (PAs)
171 *
172 * there are two types of preallocations:
173 * - inode
174 * assiged to specific inode and can be used for this inode only.
175 * it describes part of inode's space preallocated to specific
176 * physical blocks. any block from that preallocated can be used
177 * independent. the descriptor just tracks number of blocks left
178 * unused. so, before taking some block from descriptor, one must
179 * make sure corresponded logical block isn't allocated yet. this
180 * also means that freeing any block within descriptor's range
181 * must discard all preallocated blocks.
182 * - locality group
183 * assigned to specific locality group which does not translate to
184 * permanent set of inodes: inode can join and leave group. space
185 * from this type of preallocation can be used for any inode. thus
186 * it's consumed from the beginning to the end.
187 *
188 * relation between them can be expressed as:
189 * in-core buddy = on-disk bitmap + preallocation descriptors
190 *
191 * this mean blocks mballoc considers used are:
192 * - allocated blocks (persistent)
193 * - preallocated blocks (non-persistent)
194 *
195 * consistency in mballoc world means that at any time a block is either
196 * free or used in ALL structures. notice: "any time" should not be read
197 * literally -- time is discrete and delimited by locks.
198 *
199 * to keep it simple, we don't use block numbers, instead we count number of
200 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
201 *
202 * all operations can be expressed as:
203 * - init buddy: buddy = on-disk + PAs
204 * - new PA: buddy += N; PA = N
205 * - use inode PA: on-disk += N; PA -= N
206 * - discard inode PA buddy -= on-disk - PA; PA = 0
207 * - use locality group PA on-disk += N; PA -= N
208 * - discard locality group PA buddy -= PA; PA = 0
209 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
210 * is used in real operation because we can't know actual used
211 * bits from PA, only from on-disk bitmap
212 *
213 * if we follow this strict logic, then all operations above should be atomic.
214 * given some of them can block, we'd have to use something like semaphores
215 * killing performance on high-end SMP hardware. let's try to relax it using
216 * the following knowledge:
217 * 1) if buddy is referenced, it's already initialized
218 * 2) while block is used in buddy and the buddy is referenced,
219 * nobody can re-allocate that block
220 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
221 * bit set and PA claims same block, it's OK. IOW, one can set bit in
222 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
223 * block
224 *
225 * so, now we're building a concurrency table:
226 * - init buddy vs.
227 * - new PA
228 * blocks for PA are allocated in the buddy, buddy must be referenced
229 * until PA is linked to allocation group to avoid concurrent buddy init
230 * - use inode PA
231 * we need to make sure that either on-disk bitmap or PA has uptodate data
232 * given (3) we care that PA-=N operation doesn't interfere with init
233 * - discard inode PA
234 * the simplest way would be to have buddy initialized by the discard
235 * - use locality group PA
236 * again PA-=N must be serialized with init
237 * - discard locality group PA
238 * the simplest way would be to have buddy initialized by the discard
239 * - new PA vs.
240 * - use inode PA
241 * i_data_sem serializes them
242 * - discard inode PA
243 * discard process must wait until PA isn't used by another process
244 * - use locality group PA
245 * some mutex should serialize them
246 * - discard locality group PA
247 * discard process must wait until PA isn't used by another process
248 * - use inode PA
249 * - use inode PA
250 * i_data_sem or another mutex should serializes them
251 * - discard inode PA
252 * discard process must wait until PA isn't used by another process
253 * - use locality group PA
254 * nothing wrong here -- they're different PAs covering different blocks
255 * - discard locality group PA
256 * discard process must wait until PA isn't used by another process
257 *
258 * now we're ready to make few consequences:
259 * - PA is referenced and while it is no discard is possible
260 * - PA is referenced until block isn't marked in on-disk bitmap
261 * - PA changes only after on-disk bitmap
262 * - discard must not compete with init. either init is done before
263 * any discard or they're serialized somehow
264 * - buddy init as sum of on-disk bitmap and PAs is done atomically
265 *
266 * a special case when we've used PA to emptiness. no need to modify buddy
267 * in this case, but we should care about concurrent init
268 *
269 */
270
271 /*
272 * Logic in few words:
273 *
274 * - allocation:
275 * load group
276 * find blocks
277 * mark bits in on-disk bitmap
278 * release group
279 *
280 * - use preallocation:
281 * find proper PA (per-inode or group)
282 * load group
283 * mark bits in on-disk bitmap
284 * release group
285 * release PA
286 *
287 * - free:
288 * load group
289 * mark bits in on-disk bitmap
290 * release group
291 *
292 * - discard preallocations in group:
293 * mark PAs deleted
294 * move them onto local list
295 * load on-disk bitmap
296 * load group
297 * remove PA from object (inode or locality group)
298 * mark free blocks in-core
299 *
300 * - discard inode's preallocations:
301 */
302
303/*
304 * Locking rules
305 *
306 * Locks:
307 * - bitlock on a group (group)
308 * - object (inode/locality) (object)
309 * - per-pa lock (pa)
310 *
311 * Paths:
312 * - new pa
313 * object
314 * group
315 *
316 * - find and use pa:
317 * pa
318 *
319 * - release consumed pa:
320 * pa
321 * group
322 * object
323 *
324 * - generate in-core bitmap:
325 * group
326 * pa
327 *
328 * - discard all for given object (inode, locality group):
329 * object
330 * pa
331 * group
332 *
333 * - discard all for given group:
334 * group
335 * pa
336 * group
337 * object
338 *
339 */
c3a326a6
AK
340static struct kmem_cache *ext4_pspace_cachep;
341static struct kmem_cache *ext4_ac_cachep;
342static struct kmem_cache *ext4_free_ext_cachep;
fb1813f4
CW
343
344/* We create slab caches for groupinfo data structures based on the
345 * superblock block size. There will be one per mounted filesystem for
346 * each unique s_blocksize_bits */
2892c15d 347#define NR_GRPINFO_CACHES 8
fb1813f4
CW
348static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
349
2892c15d
ES
350static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
351 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
352 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
353 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
354};
355
c3a326a6
AK
356static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
357 ext4_group_t group);
7a2fcbf7
AK
358static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
359 ext4_group_t group);
c3a326a6
AK
360static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
361
ffad0a44
AK
362static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
363{
c9de560d 364#if BITS_PER_LONG == 64
ffad0a44
AK
365 *bit += ((unsigned long) addr & 7UL) << 3;
366 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 367#elif BITS_PER_LONG == 32
ffad0a44
AK
368 *bit += ((unsigned long) addr & 3UL) << 3;
369 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
370#else
371#error "how many bits you are?!"
372#endif
ffad0a44
AK
373 return addr;
374}
c9de560d
AT
375
376static inline int mb_test_bit(int bit, void *addr)
377{
378 /*
379 * ext4_test_bit on architecture like powerpc
380 * needs unsigned long aligned address
381 */
ffad0a44 382 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
383 return ext4_test_bit(bit, addr);
384}
385
386static inline void mb_set_bit(int bit, void *addr)
387{
ffad0a44 388 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
389 ext4_set_bit(bit, addr);
390}
391
c9de560d
AT
392static inline void mb_clear_bit(int bit, void *addr)
393{
ffad0a44 394 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
395 ext4_clear_bit(bit, addr);
396}
397
ffad0a44
AK
398static inline int mb_find_next_zero_bit(void *addr, int max, int start)
399{
e7dfb246 400 int fix = 0, ret, tmpmax;
ffad0a44 401 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 402 tmpmax = max + fix;
ffad0a44
AK
403 start += fix;
404
e7dfb246
AK
405 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
406 if (ret > max)
407 return max;
408 return ret;
ffad0a44
AK
409}
410
411static inline int mb_find_next_bit(void *addr, int max, int start)
412{
e7dfb246 413 int fix = 0, ret, tmpmax;
ffad0a44 414 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 415 tmpmax = max + fix;
ffad0a44
AK
416 start += fix;
417
e7dfb246
AK
418 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
419 if (ret > max)
420 return max;
421 return ret;
ffad0a44
AK
422}
423
c9de560d
AT
424static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
425{
426 char *bb;
427
c9de560d
AT
428 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
429 BUG_ON(max == NULL);
430
431 if (order > e4b->bd_blkbits + 1) {
432 *max = 0;
433 return NULL;
434 }
435
436 /* at order 0 we see each particular block */
84b775a3
CL
437 if (order == 0) {
438 *max = 1 << (e4b->bd_blkbits + 3);
c9de560d 439 return EXT4_MB_BITMAP(e4b);
84b775a3 440 }
c9de560d
AT
441
442 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
443 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
444
445 return bb;
446}
447
448#ifdef DOUBLE_CHECK
449static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
450 int first, int count)
451{
452 int i;
453 struct super_block *sb = e4b->bd_sb;
454
455 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
456 return;
bc8e6740 457 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
458 for (i = 0; i < count; i++) {
459 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
460 ext4_fsblk_t blocknr;
5661bd68
AM
461
462 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
53accfa9 463 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
5d1b1b3f 464 ext4_grp_locked_error(sb, e4b->bd_group,
e29136f8
TT
465 inode ? inode->i_ino : 0,
466 blocknr,
467 "freeing block already freed "
468 "(bit %u)",
469 first + i);
c9de560d
AT
470 }
471 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
472 }
473}
474
475static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
476{
477 int i;
478
479 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
480 return;
bc8e6740 481 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
482 for (i = 0; i < count; i++) {
483 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
484 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
485 }
486}
487
488static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
489{
490 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
491 unsigned char *b1, *b2;
492 int i;
493 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
494 b2 = (unsigned char *) bitmap;
495 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
496 if (b1[i] != b2[i]) {
9d8b9ec4
TT
497 ext4_msg(e4b->bd_sb, KERN_ERR,
498 "corruption in group %u "
499 "at byte %u(%u): %x in copy != %x "
500 "on disk/prealloc",
501 e4b->bd_group, i, i * 8, b1[i], b2[i]);
c9de560d
AT
502 BUG();
503 }
504 }
505 }
506}
507
508#else
509static inline void mb_free_blocks_double(struct inode *inode,
510 struct ext4_buddy *e4b, int first, int count)
511{
512 return;
513}
514static inline void mb_mark_used_double(struct ext4_buddy *e4b,
515 int first, int count)
516{
517 return;
518}
519static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
520{
521 return;
522}
523#endif
524
525#ifdef AGGRESSIVE_CHECK
526
527#define MB_CHECK_ASSERT(assert) \
528do { \
529 if (!(assert)) { \
530 printk(KERN_EMERG \
531 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
532 function, file, line, # assert); \
533 BUG(); \
534 } \
535} while (0)
536
537static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
538 const char *function, int line)
539{
540 struct super_block *sb = e4b->bd_sb;
541 int order = e4b->bd_blkbits + 1;
542 int max;
543 int max2;
544 int i;
545 int j;
546 int k;
547 int count;
548 struct ext4_group_info *grp;
549 int fragments = 0;
550 int fstart;
551 struct list_head *cur;
552 void *buddy;
553 void *buddy2;
554
c9de560d
AT
555 {
556 static int mb_check_counter;
557 if (mb_check_counter++ % 100 != 0)
558 return 0;
559 }
560
561 while (order > 1) {
562 buddy = mb_find_buddy(e4b, order, &max);
563 MB_CHECK_ASSERT(buddy);
564 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
565 MB_CHECK_ASSERT(buddy2);
566 MB_CHECK_ASSERT(buddy != buddy2);
567 MB_CHECK_ASSERT(max * 2 == max2);
568
569 count = 0;
570 for (i = 0; i < max; i++) {
571
572 if (mb_test_bit(i, buddy)) {
573 /* only single bit in buddy2 may be 1 */
574 if (!mb_test_bit(i << 1, buddy2)) {
575 MB_CHECK_ASSERT(
576 mb_test_bit((i<<1)+1, buddy2));
577 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
578 MB_CHECK_ASSERT(
579 mb_test_bit(i << 1, buddy2));
580 }
581 continue;
582 }
583
0a10da73 584 /* both bits in buddy2 must be 1 */
c9de560d
AT
585 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
586 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
587
588 for (j = 0; j < (1 << order); j++) {
589 k = (i * (1 << order)) + j;
590 MB_CHECK_ASSERT(
591 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
592 }
593 count++;
594 }
595 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
596 order--;
597 }
598
599 fstart = -1;
600 buddy = mb_find_buddy(e4b, 0, &max);
601 for (i = 0; i < max; i++) {
602 if (!mb_test_bit(i, buddy)) {
603 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
604 if (fstart == -1) {
605 fragments++;
606 fstart = i;
607 }
608 continue;
609 }
610 fstart = -1;
611 /* check used bits only */
612 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
613 buddy2 = mb_find_buddy(e4b, j, &max2);
614 k = i >> j;
615 MB_CHECK_ASSERT(k < max2);
616 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
617 }
618 }
619 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
620 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
621
622 grp = ext4_get_group_info(sb, e4b->bd_group);
c9de560d
AT
623 list_for_each(cur, &grp->bb_prealloc_list) {
624 ext4_group_t groupnr;
625 struct ext4_prealloc_space *pa;
60bd63d1
SR
626 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
627 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
c9de560d 628 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
60bd63d1 629 for (i = 0; i < pa->pa_len; i++)
c9de560d
AT
630 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
631 }
632 return 0;
633}
634#undef MB_CHECK_ASSERT
635#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
46e665e9 636 __FILE__, __func__, __LINE__)
c9de560d
AT
637#else
638#define mb_check_buddy(e4b)
639#endif
640
7c786059
CL
641/*
642 * Divide blocks started from @first with length @len into
643 * smaller chunks with power of 2 blocks.
644 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
645 * then increase bb_counters[] for corresponded chunk size.
646 */
c9de560d 647static void ext4_mb_mark_free_simple(struct super_block *sb,
a36b4498 648 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
c9de560d
AT
649 struct ext4_group_info *grp)
650{
651 struct ext4_sb_info *sbi = EXT4_SB(sb);
a36b4498
ES
652 ext4_grpblk_t min;
653 ext4_grpblk_t max;
654 ext4_grpblk_t chunk;
c9de560d
AT
655 unsigned short border;
656
7137d7a4 657 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
c9de560d
AT
658
659 border = 2 << sb->s_blocksize_bits;
660
661 while (len > 0) {
662 /* find how many blocks can be covered since this position */
663 max = ffs(first | border) - 1;
664
665 /* find how many blocks of power 2 we need to mark */
666 min = fls(len) - 1;
667
668 if (max < min)
669 min = max;
670 chunk = 1 << min;
671
672 /* mark multiblock chunks only */
673 grp->bb_counters[min]++;
674 if (min > 0)
675 mb_clear_bit(first >> min,
676 buddy + sbi->s_mb_offsets[min]);
677
678 len -= chunk;
679 first += chunk;
680 }
681}
682
8a57d9d6
CW
683/*
684 * Cache the order of the largest free extent we have available in this block
685 * group.
686 */
687static void
688mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
689{
690 int i;
691 int bits;
692
693 grp->bb_largest_free_order = -1; /* uninit */
694
695 bits = sb->s_blocksize_bits + 1;
696 for (i = bits; i >= 0; i--) {
697 if (grp->bb_counters[i] > 0) {
698 grp->bb_largest_free_order = i;
699 break;
700 }
701 }
702}
703
089ceecc
ES
704static noinline_for_stack
705void ext4_mb_generate_buddy(struct super_block *sb,
c9de560d
AT
706 void *buddy, void *bitmap, ext4_group_t group)
707{
708 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
7137d7a4 709 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
a36b4498
ES
710 ext4_grpblk_t i = 0;
711 ext4_grpblk_t first;
712 ext4_grpblk_t len;
c9de560d
AT
713 unsigned free = 0;
714 unsigned fragments = 0;
715 unsigned long long period = get_cycles();
716
717 /* initialize buddy from bitmap which is aggregation
718 * of on-disk bitmap and preallocations */
ffad0a44 719 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
720 grp->bb_first_free = i;
721 while (i < max) {
722 fragments++;
723 first = i;
ffad0a44 724 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
725 len = i - first;
726 free += len;
727 if (len > 1)
728 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
729 else
730 grp->bb_counters[0]++;
731 if (i < max)
ffad0a44 732 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
733 }
734 grp->bb_fragments = fragments;
735
736 if (free != grp->bb_free) {
e29136f8 737 ext4_grp_locked_error(sb, group, 0, 0,
53accfa9 738 "%u clusters in bitmap, %u in gd",
e29136f8 739 free, grp->bb_free);
e56eb659
AK
740 /*
741 * If we intent to continue, we consider group descritor
742 * corrupt and update bb_free using bitmap value
743 */
c9de560d
AT
744 grp->bb_free = free;
745 }
8a57d9d6 746 mb_set_largest_free_order(sb, grp);
c9de560d
AT
747
748 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
749
750 period = get_cycles() - period;
751 spin_lock(&EXT4_SB(sb)->s_bal_lock);
752 EXT4_SB(sb)->s_mb_buddies_generated++;
753 EXT4_SB(sb)->s_mb_generation_time += period;
754 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
755}
756
757/* The buddy information is attached the buddy cache inode
758 * for convenience. The information regarding each group
759 * is loaded via ext4_mb_load_buddy. The information involve
760 * block bitmap and buddy information. The information are
761 * stored in the inode as
762 *
763 * { page }
c3a326a6 764 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
765 *
766 *
767 * one block each for bitmap and buddy information.
768 * So for each group we take up 2 blocks. A page can
769 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
770 * So it can have information regarding groups_per_page which
771 * is blocks_per_page/2
8a57d9d6
CW
772 *
773 * Locking note: This routine takes the block group lock of all groups
774 * for this page; do not hold this lock when calling this routine!
c9de560d
AT
775 */
776
777static int ext4_mb_init_cache(struct page *page, char *incore)
778{
8df9675f 779 ext4_group_t ngroups;
c9de560d
AT
780 int blocksize;
781 int blocks_per_page;
782 int groups_per_page;
783 int err = 0;
784 int i;
813e5727 785 ext4_group_t first_group, group;
c9de560d
AT
786 int first_block;
787 struct super_block *sb;
788 struct buffer_head *bhs;
789 struct buffer_head **bh;
790 struct inode *inode;
791 char *data;
792 char *bitmap;
9b8b7d35 793 struct ext4_group_info *grinfo;
c9de560d 794
6ba495e9 795 mb_debug(1, "init page %lu\n", page->index);
c9de560d
AT
796
797 inode = page->mapping->host;
798 sb = inode->i_sb;
8df9675f 799 ngroups = ext4_get_groups_count(sb);
c9de560d
AT
800 blocksize = 1 << inode->i_blkbits;
801 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
802
803 groups_per_page = blocks_per_page >> 1;
804 if (groups_per_page == 0)
805 groups_per_page = 1;
806
807 /* allocate buffer_heads to read bitmaps */
808 if (groups_per_page > 1) {
c9de560d
AT
809 i = sizeof(struct buffer_head *) * groups_per_page;
810 bh = kzalloc(i, GFP_NOFS);
813e5727
TT
811 if (bh == NULL) {
812 err = -ENOMEM;
c9de560d 813 goto out;
813e5727 814 }
c9de560d
AT
815 } else
816 bh = &bhs;
817
818 first_group = page->index * blocks_per_page / 2;
819
820 /* read all groups the page covers into the cache */
813e5727
TT
821 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
822 if (group >= ngroups)
c9de560d
AT
823 break;
824
813e5727 825 grinfo = ext4_get_group_info(sb, group);
9b8b7d35
AG
826 /*
827 * If page is uptodate then we came here after online resize
828 * which added some new uninitialized group info structs, so
829 * we must skip all initialized uptodate buddies on the page,
830 * which may be currently in use by an allocating task.
831 */
832 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
833 bh[i] = NULL;
834 continue;
835 }
813e5727
TT
836 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
837 err = -ENOMEM;
c9de560d 838 goto out;
2ccb5fb9 839 }
813e5727 840 mb_debug(1, "read bitmap for group %u\n", group);
c9de560d
AT
841 }
842
843 /* wait for I/O completion */
813e5727
TT
844 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
845 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
846 err = -EIO;
c9de560d 847 goto out;
813e5727
TT
848 }
849 }
c9de560d
AT
850
851 first_block = page->index * blocks_per_page;
852 for (i = 0; i < blocks_per_page; i++) {
853 int group;
c9de560d
AT
854
855 group = (first_block + i) >> 1;
8df9675f 856 if (group >= ngroups)
c9de560d
AT
857 break;
858
9b8b7d35
AG
859 if (!bh[group - first_group])
860 /* skip initialized uptodate buddy */
861 continue;
862
c9de560d
AT
863 /*
864 * data carry information regarding this
865 * particular group in the format specified
866 * above
867 *
868 */
869 data = page_address(page) + (i * blocksize);
870 bitmap = bh[group - first_group]->b_data;
871
872 /*
873 * We place the buddy block and bitmap block
874 * close together
875 */
876 if ((first_block + i) & 1) {
877 /* this is block of buddy */
878 BUG_ON(incore == NULL);
6ba495e9 879 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
c9de560d 880 group, page->index, i * blocksize);
f307333e 881 trace_ext4_mb_buddy_bitmap_load(sb, group);
c9de560d
AT
882 grinfo = ext4_get_group_info(sb, group);
883 grinfo->bb_fragments = 0;
884 memset(grinfo->bb_counters, 0,
1927805e
ES
885 sizeof(*grinfo->bb_counters) *
886 (sb->s_blocksize_bits+2));
c9de560d
AT
887 /*
888 * incore got set to the group block bitmap below
889 */
7a2fcbf7 890 ext4_lock_group(sb, group);
9b8b7d35
AG
891 /* init the buddy */
892 memset(data, 0xff, blocksize);
c9de560d 893 ext4_mb_generate_buddy(sb, data, incore, group);
7a2fcbf7 894 ext4_unlock_group(sb, group);
c9de560d
AT
895 incore = NULL;
896 } else {
897 /* this is block of bitmap */
898 BUG_ON(incore != NULL);
6ba495e9 899 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
c9de560d 900 group, page->index, i * blocksize);
f307333e 901 trace_ext4_mb_bitmap_load(sb, group);
c9de560d
AT
902
903 /* see comments in ext4_mb_put_pa() */
904 ext4_lock_group(sb, group);
905 memcpy(data, bitmap, blocksize);
906
907 /* mark all preallocated blks used in in-core bitmap */
908 ext4_mb_generate_from_pa(sb, data, group);
7a2fcbf7 909 ext4_mb_generate_from_freelist(sb, data, group);
c9de560d
AT
910 ext4_unlock_group(sb, group);
911
912 /* set incore so that the buddy information can be
913 * generated using this
914 */
915 incore = data;
916 }
917 }
918 SetPageUptodate(page);
919
920out:
921 if (bh) {
9b8b7d35 922 for (i = 0; i < groups_per_page; i++)
c9de560d
AT
923 brelse(bh[i]);
924 if (bh != &bhs)
925 kfree(bh);
926 }
927 return err;
928}
929
eee4adc7 930/*
2de8807b
AG
931 * Lock the buddy and bitmap pages. This make sure other parallel init_group
932 * on the same buddy page doesn't happen whild holding the buddy page lock.
933 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
934 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
eee4adc7 935 */
2de8807b
AG
936static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
937 ext4_group_t group, struct ext4_buddy *e4b)
eee4adc7 938{
2de8807b
AG
939 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
940 int block, pnum, poff;
eee4adc7 941 int blocks_per_page;
2de8807b
AG
942 struct page *page;
943
944 e4b->bd_buddy_page = NULL;
945 e4b->bd_bitmap_page = NULL;
eee4adc7
ES
946
947 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
948 /*
949 * the buddy cache inode stores the block bitmap
950 * and buddy information in consecutive blocks.
951 * So for each group we need two blocks.
952 */
953 block = group * 2;
954 pnum = block / blocks_per_page;
2de8807b
AG
955 poff = block % blocks_per_page;
956 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
957 if (!page)
958 return -EIO;
959 BUG_ON(page->mapping != inode->i_mapping);
960 e4b->bd_bitmap_page = page;
961 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
962
963 if (blocks_per_page >= 2) {
964 /* buddy and bitmap are on the same page */
965 return 0;
eee4adc7 966 }
2de8807b
AG
967
968 block++;
969 pnum = block / blocks_per_page;
970 poff = block % blocks_per_page;
971 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
972 if (!page)
973 return -EIO;
974 BUG_ON(page->mapping != inode->i_mapping);
975 e4b->bd_buddy_page = page;
976 return 0;
eee4adc7
ES
977}
978
2de8807b 979static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
eee4adc7 980{
2de8807b
AG
981 if (e4b->bd_bitmap_page) {
982 unlock_page(e4b->bd_bitmap_page);
983 page_cache_release(e4b->bd_bitmap_page);
984 }
985 if (e4b->bd_buddy_page) {
986 unlock_page(e4b->bd_buddy_page);
987 page_cache_release(e4b->bd_buddy_page);
eee4adc7 988 }
eee4adc7
ES
989}
990
8a57d9d6
CW
991/*
992 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
993 * block group lock of all groups for this page; do not hold the BG lock when
994 * calling this routine!
995 */
b6a758ec
AK
996static noinline_for_stack
997int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
998{
999
b6a758ec 1000 struct ext4_group_info *this_grp;
2de8807b
AG
1001 struct ext4_buddy e4b;
1002 struct page *page;
1003 int ret = 0;
b6a758ec
AK
1004
1005 mb_debug(1, "init group %u\n", group);
b6a758ec
AK
1006 this_grp = ext4_get_group_info(sb, group);
1007 /*
08c3a813
AK
1008 * This ensures that we don't reinit the buddy cache
1009 * page which map to the group from which we are already
1010 * allocating. If we are looking at the buddy cache we would
1011 * have taken a reference using ext4_mb_load_buddy and that
2de8807b 1012 * would have pinned buddy page to page cache.
b6a758ec 1013 */
2de8807b
AG
1014 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1015 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
b6a758ec
AK
1016 /*
1017 * somebody initialized the group
1018 * return without doing anything
1019 */
b6a758ec
AK
1020 goto err;
1021 }
2de8807b
AG
1022
1023 page = e4b.bd_bitmap_page;
1024 ret = ext4_mb_init_cache(page, NULL);
1025 if (ret)
1026 goto err;
1027 if (!PageUptodate(page)) {
b6a758ec
AK
1028 ret = -EIO;
1029 goto err;
1030 }
1031 mark_page_accessed(page);
b6a758ec 1032
2de8807b 1033 if (e4b.bd_buddy_page == NULL) {
b6a758ec
AK
1034 /*
1035 * If both the bitmap and buddy are in
1036 * the same page we don't need to force
1037 * init the buddy
1038 */
2de8807b
AG
1039 ret = 0;
1040 goto err;
b6a758ec 1041 }
2de8807b
AG
1042 /* init buddy cache */
1043 page = e4b.bd_buddy_page;
1044 ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1045 if (ret)
1046 goto err;
1047 if (!PageUptodate(page)) {
b6a758ec
AK
1048 ret = -EIO;
1049 goto err;
1050 }
1051 mark_page_accessed(page);
1052err:
2de8807b 1053 ext4_mb_put_buddy_page_lock(&e4b);
b6a758ec
AK
1054 return ret;
1055}
1056
8a57d9d6
CW
1057/*
1058 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1059 * block group lock of all groups for this page; do not hold the BG lock when
1060 * calling this routine!
1061 */
4ddfef7b
ES
1062static noinline_for_stack int
1063ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1064 struct ext4_buddy *e4b)
c9de560d 1065{
c9de560d
AT
1066 int blocks_per_page;
1067 int block;
1068 int pnum;
1069 int poff;
1070 struct page *page;
fdf6c7a7 1071 int ret;
920313a7
AK
1072 struct ext4_group_info *grp;
1073 struct ext4_sb_info *sbi = EXT4_SB(sb);
1074 struct inode *inode = sbi->s_buddy_cache;
c9de560d 1075
6ba495e9 1076 mb_debug(1, "load group %u\n", group);
c9de560d
AT
1077
1078 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
920313a7 1079 grp = ext4_get_group_info(sb, group);
c9de560d
AT
1080
1081 e4b->bd_blkbits = sb->s_blocksize_bits;
529da704 1082 e4b->bd_info = grp;
c9de560d
AT
1083 e4b->bd_sb = sb;
1084 e4b->bd_group = group;
1085 e4b->bd_buddy_page = NULL;
1086 e4b->bd_bitmap_page = NULL;
1087
f41c0750 1088 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
f41c0750
AK
1089 /*
1090 * we need full data about the group
1091 * to make a good selection
1092 */
1093 ret = ext4_mb_init_group(sb, group);
1094 if (ret)
1095 return ret;
f41c0750
AK
1096 }
1097
c9de560d
AT
1098 /*
1099 * the buddy cache inode stores the block bitmap
1100 * and buddy information in consecutive blocks.
1101 * So for each group we need two blocks.
1102 */
1103 block = group * 2;
1104 pnum = block / blocks_per_page;
1105 poff = block % blocks_per_page;
1106
1107 /* we could use find_or_create_page(), but it locks page
1108 * what we'd like to avoid in fast path ... */
1109 page = find_get_page(inode->i_mapping, pnum);
1110 if (page == NULL || !PageUptodate(page)) {
1111 if (page)
920313a7
AK
1112 /*
1113 * drop the page reference and try
1114 * to get the page with lock. If we
1115 * are not uptodate that implies
1116 * somebody just created the page but
1117 * is yet to initialize the same. So
1118 * wait for it to initialize.
1119 */
c9de560d
AT
1120 page_cache_release(page);
1121 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1122 if (page) {
1123 BUG_ON(page->mapping != inode->i_mapping);
1124 if (!PageUptodate(page)) {
fdf6c7a7
SF
1125 ret = ext4_mb_init_cache(page, NULL);
1126 if (ret) {
1127 unlock_page(page);
1128 goto err;
1129 }
c9de560d
AT
1130 mb_cmp_bitmaps(e4b, page_address(page) +
1131 (poff * sb->s_blocksize));
1132 }
1133 unlock_page(page);
1134 }
1135 }
fdf6c7a7
SF
1136 if (page == NULL || !PageUptodate(page)) {
1137 ret = -EIO;
c9de560d 1138 goto err;
fdf6c7a7 1139 }
c9de560d
AT
1140 e4b->bd_bitmap_page = page;
1141 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1142 mark_page_accessed(page);
1143
1144 block++;
1145 pnum = block / blocks_per_page;
1146 poff = block % blocks_per_page;
1147
1148 page = find_get_page(inode->i_mapping, pnum);
1149 if (page == NULL || !PageUptodate(page)) {
1150 if (page)
1151 page_cache_release(page);
1152 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1153 if (page) {
1154 BUG_ON(page->mapping != inode->i_mapping);
fdf6c7a7
SF
1155 if (!PageUptodate(page)) {
1156 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1157 if (ret) {
1158 unlock_page(page);
1159 goto err;
1160 }
1161 }
c9de560d
AT
1162 unlock_page(page);
1163 }
1164 }
fdf6c7a7
SF
1165 if (page == NULL || !PageUptodate(page)) {
1166 ret = -EIO;
c9de560d 1167 goto err;
fdf6c7a7 1168 }
c9de560d
AT
1169 e4b->bd_buddy_page = page;
1170 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1171 mark_page_accessed(page);
1172
1173 BUG_ON(e4b->bd_bitmap_page == NULL);
1174 BUG_ON(e4b->bd_buddy_page == NULL);
1175
1176 return 0;
1177
1178err:
26626f11
YR
1179 if (page)
1180 page_cache_release(page);
c9de560d
AT
1181 if (e4b->bd_bitmap_page)
1182 page_cache_release(e4b->bd_bitmap_page);
1183 if (e4b->bd_buddy_page)
1184 page_cache_release(e4b->bd_buddy_page);
1185 e4b->bd_buddy = NULL;
1186 e4b->bd_bitmap = NULL;
fdf6c7a7 1187 return ret;
c9de560d
AT
1188}
1189
e39e07fd 1190static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
c9de560d
AT
1191{
1192 if (e4b->bd_bitmap_page)
1193 page_cache_release(e4b->bd_bitmap_page);
1194 if (e4b->bd_buddy_page)
1195 page_cache_release(e4b->bd_buddy_page);
1196}
1197
1198
1199static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1200{
1201 int order = 1;
1202 void *bb;
1203
1204 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1205 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1206
1207 bb = EXT4_MB_BUDDY(e4b);
1208 while (order <= e4b->bd_blkbits + 1) {
1209 block = block >> 1;
1210 if (!mb_test_bit(block, bb)) {
1211 /* this block is part of buddy of order 'order' */
1212 return order;
1213 }
1214 bb += 1 << (e4b->bd_blkbits - order);
1215 order++;
1216 }
1217 return 0;
1218}
1219
955ce5f5 1220static void mb_clear_bits(void *bm, int cur, int len)
c9de560d
AT
1221{
1222 __u32 *addr;
1223
1224 len = cur + len;
1225 while (cur < len) {
1226 if ((cur & 31) == 0 && (len - cur) >= 32) {
1227 /* fast path: clear whole word at once */
1228 addr = bm + (cur >> 3);
1229 *addr = 0;
1230 cur += 32;
1231 continue;
1232 }
955ce5f5 1233 mb_clear_bit(cur, bm);
c9de560d
AT
1234 cur++;
1235 }
1236}
1237
c3e94d1d 1238void ext4_set_bits(void *bm, int cur, int len)
c9de560d
AT
1239{
1240 __u32 *addr;
1241
1242 len = cur + len;
1243 while (cur < len) {
1244 if ((cur & 31) == 0 && (len - cur) >= 32) {
1245 /* fast path: set whole word at once */
1246 addr = bm + (cur >> 3);
1247 *addr = 0xffffffff;
1248 cur += 32;
1249 continue;
1250 }
955ce5f5 1251 mb_set_bit(cur, bm);
c9de560d
AT
1252 cur++;
1253 }
1254}
1255
7e5a8cdd 1256static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
c9de560d
AT
1257 int first, int count)
1258{
1259 int block = 0;
1260 int max = 0;
1261 int order;
1262 void *buddy;
1263 void *buddy2;
1264 struct super_block *sb = e4b->bd_sb;
1265
1266 BUG_ON(first + count > (sb->s_blocksize << 3));
bc8e6740 1267 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
1268 mb_check_buddy(e4b);
1269 mb_free_blocks_double(inode, e4b, first, count);
1270
1271 e4b->bd_info->bb_free += count;
1272 if (first < e4b->bd_info->bb_first_free)
1273 e4b->bd_info->bb_first_free = first;
1274
1275 /* let's maintain fragments counter */
1276 if (first != 0)
1277 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1278 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1279 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1280 if (block && max)
1281 e4b->bd_info->bb_fragments--;
1282 else if (!block && !max)
1283 e4b->bd_info->bb_fragments++;
1284
1285 /* let's maintain buddy itself */
1286 while (count-- > 0) {
1287 block = first++;
1288 order = 0;
1289
1290 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1291 ext4_fsblk_t blocknr;
5661bd68
AM
1292
1293 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
53accfa9 1294 blocknr += EXT4_C2B(EXT4_SB(sb), block);
5d1b1b3f 1295 ext4_grp_locked_error(sb, e4b->bd_group,
e29136f8
TT
1296 inode ? inode->i_ino : 0,
1297 blocknr,
1298 "freeing already freed block "
1299 "(bit %u)", block);
c9de560d
AT
1300 }
1301 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1302 e4b->bd_info->bb_counters[order]++;
1303
1304 /* start of the buddy */
1305 buddy = mb_find_buddy(e4b, order, &max);
1306
1307 do {
1308 block &= ~1UL;
1309 if (mb_test_bit(block, buddy) ||
1310 mb_test_bit(block + 1, buddy))
1311 break;
1312
1313 /* both the buddies are free, try to coalesce them */
1314 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1315
1316 if (!buddy2)
1317 break;
1318
1319 if (order > 0) {
1320 /* for special purposes, we don't set
1321 * free bits in bitmap */
1322 mb_set_bit(block, buddy);
1323 mb_set_bit(block + 1, buddy);
1324 }
1325 e4b->bd_info->bb_counters[order]--;
1326 e4b->bd_info->bb_counters[order]--;
1327
1328 block = block >> 1;
1329 order++;
1330 e4b->bd_info->bb_counters[order]++;
1331
1332 mb_clear_bit(block, buddy2);
1333 buddy = buddy2;
1334 } while (1);
1335 }
8a57d9d6 1336 mb_set_largest_free_order(sb, e4b->bd_info);
c9de560d 1337 mb_check_buddy(e4b);
c9de560d
AT
1338}
1339
1340static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1341 int needed, struct ext4_free_extent *ex)
1342{
1343 int next = block;
1344 int max;
c9de560d
AT
1345 void *buddy;
1346
bc8e6740 1347 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1348 BUG_ON(ex == NULL);
1349
1350 buddy = mb_find_buddy(e4b, order, &max);
1351 BUG_ON(buddy == NULL);
1352 BUG_ON(block >= max);
1353 if (mb_test_bit(block, buddy)) {
1354 ex->fe_len = 0;
1355 ex->fe_start = 0;
1356 ex->fe_group = 0;
1357 return 0;
1358 }
1359
1360 /* FIXME dorp order completely ? */
1361 if (likely(order == 0)) {
1362 /* find actual order */
1363 order = mb_find_order_for_block(e4b, block);
1364 block = block >> order;
1365 }
1366
1367 ex->fe_len = 1 << order;
1368 ex->fe_start = block << order;
1369 ex->fe_group = e4b->bd_group;
1370
1371 /* calc difference from given start */
1372 next = next - ex->fe_start;
1373 ex->fe_len -= next;
1374 ex->fe_start += next;
1375
1376 while (needed > ex->fe_len &&
1377 (buddy = mb_find_buddy(e4b, order, &max))) {
1378
1379 if (block + 1 >= max)
1380 break;
1381
1382 next = (block + 1) * (1 << order);
1383 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1384 break;
1385
b051d8dc 1386 order = mb_find_order_for_block(e4b, next);
c9de560d 1387
c9de560d
AT
1388 block = next >> order;
1389 ex->fe_len += 1 << order;
1390 }
1391
1392 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1393 return ex->fe_len;
1394}
1395
1396static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1397{
1398 int ord;
1399 int mlen = 0;
1400 int max = 0;
1401 int cur;
1402 int start = ex->fe_start;
1403 int len = ex->fe_len;
1404 unsigned ret = 0;
1405 int len0 = len;
1406 void *buddy;
1407
1408 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1409 BUG_ON(e4b->bd_group != ex->fe_group);
bc8e6740 1410 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1411 mb_check_buddy(e4b);
1412 mb_mark_used_double(e4b, start, len);
1413
1414 e4b->bd_info->bb_free -= len;
1415 if (e4b->bd_info->bb_first_free == start)
1416 e4b->bd_info->bb_first_free += len;
1417
1418 /* let's maintain fragments counter */
1419 if (start != 0)
1420 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1421 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1422 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1423 if (mlen && max)
1424 e4b->bd_info->bb_fragments++;
1425 else if (!mlen && !max)
1426 e4b->bd_info->bb_fragments--;
1427
1428 /* let's maintain buddy itself */
1429 while (len) {
1430 ord = mb_find_order_for_block(e4b, start);
1431
1432 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1433 /* the whole chunk may be allocated at once! */
1434 mlen = 1 << ord;
1435 buddy = mb_find_buddy(e4b, ord, &max);
1436 BUG_ON((start >> ord) >= max);
1437 mb_set_bit(start >> ord, buddy);
1438 e4b->bd_info->bb_counters[ord]--;
1439 start += mlen;
1440 len -= mlen;
1441 BUG_ON(len < 0);
1442 continue;
1443 }
1444
1445 /* store for history */
1446 if (ret == 0)
1447 ret = len | (ord << 16);
1448
1449 /* we have to split large buddy */
1450 BUG_ON(ord <= 0);
1451 buddy = mb_find_buddy(e4b, ord, &max);
1452 mb_set_bit(start >> ord, buddy);
1453 e4b->bd_info->bb_counters[ord]--;
1454
1455 ord--;
1456 cur = (start >> ord) & ~1U;
1457 buddy = mb_find_buddy(e4b, ord, &max);
1458 mb_clear_bit(cur, buddy);
1459 mb_clear_bit(cur + 1, buddy);
1460 e4b->bd_info->bb_counters[ord]++;
1461 e4b->bd_info->bb_counters[ord]++;
1462 }
8a57d9d6 1463 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
c9de560d 1464
c3e94d1d 1465 ext4_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
c9de560d
AT
1466 mb_check_buddy(e4b);
1467
1468 return ret;
1469}
1470
1471/*
1472 * Must be called under group lock!
1473 */
1474static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1475 struct ext4_buddy *e4b)
1476{
1477 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1478 int ret;
1479
1480 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1481 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1482
1483 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1484 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1485 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1486
1487 /* preallocation can change ac_b_ex, thus we store actually
1488 * allocated blocks for history */
1489 ac->ac_f_ex = ac->ac_b_ex;
1490
1491 ac->ac_status = AC_STATUS_FOUND;
1492 ac->ac_tail = ret & 0xffff;
1493 ac->ac_buddy = ret >> 16;
1494
c3a326a6
AK
1495 /*
1496 * take the page reference. We want the page to be pinned
1497 * so that we don't get a ext4_mb_init_cache_call for this
1498 * group until we update the bitmap. That would mean we
1499 * double allocate blocks. The reference is dropped
1500 * in ext4_mb_release_context
1501 */
c9de560d
AT
1502 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1503 get_page(ac->ac_bitmap_page);
1504 ac->ac_buddy_page = e4b->bd_buddy_page;
1505 get_page(ac->ac_buddy_page);
c9de560d 1506 /* store last allocated for subsequent stream allocation */
4ba74d00 1507 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
1508 spin_lock(&sbi->s_md_lock);
1509 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1510 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1511 spin_unlock(&sbi->s_md_lock);
1512 }
1513}
1514
1515/*
1516 * regular allocator, for general purposes allocation
1517 */
1518
1519static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1520 struct ext4_buddy *e4b,
1521 int finish_group)
1522{
1523 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1524 struct ext4_free_extent *bex = &ac->ac_b_ex;
1525 struct ext4_free_extent *gex = &ac->ac_g_ex;
1526 struct ext4_free_extent ex;
1527 int max;
1528
032115fc
AK
1529 if (ac->ac_status == AC_STATUS_FOUND)
1530 return;
c9de560d
AT
1531 /*
1532 * We don't want to scan for a whole year
1533 */
1534 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1535 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1536 ac->ac_status = AC_STATUS_BREAK;
1537 return;
1538 }
1539
1540 /*
1541 * Haven't found good chunk so far, let's continue
1542 */
1543 if (bex->fe_len < gex->fe_len)
1544 return;
1545
1546 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1547 && bex->fe_group == e4b->bd_group) {
1548 /* recheck chunk's availability - we don't know
1549 * when it was found (within this lock-unlock
1550 * period or not) */
1551 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1552 if (max >= gex->fe_len) {
1553 ext4_mb_use_best_found(ac, e4b);
1554 return;
1555 }
1556 }
1557}
1558
1559/*
1560 * The routine checks whether found extent is good enough. If it is,
1561 * then the extent gets marked used and flag is set to the context
1562 * to stop scanning. Otherwise, the extent is compared with the
1563 * previous found extent and if new one is better, then it's stored
1564 * in the context. Later, the best found extent will be used, if
1565 * mballoc can't find good enough extent.
1566 *
1567 * FIXME: real allocation policy is to be designed yet!
1568 */
1569static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1570 struct ext4_free_extent *ex,
1571 struct ext4_buddy *e4b)
1572{
1573 struct ext4_free_extent *bex = &ac->ac_b_ex;
1574 struct ext4_free_extent *gex = &ac->ac_g_ex;
1575
1576 BUG_ON(ex->fe_len <= 0);
7137d7a4
TT
1577 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1578 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
c9de560d
AT
1579 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1580
1581 ac->ac_found++;
1582
1583 /*
1584 * The special case - take what you catch first
1585 */
1586 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1587 *bex = *ex;
1588 ext4_mb_use_best_found(ac, e4b);
1589 return;
1590 }
1591
1592 /*
1593 * Let's check whether the chuck is good enough
1594 */
1595 if (ex->fe_len == gex->fe_len) {
1596 *bex = *ex;
1597 ext4_mb_use_best_found(ac, e4b);
1598 return;
1599 }
1600
1601 /*
1602 * If this is first found extent, just store it in the context
1603 */
1604 if (bex->fe_len == 0) {
1605 *bex = *ex;
1606 return;
1607 }
1608
1609 /*
1610 * If new found extent is better, store it in the context
1611 */
1612 if (bex->fe_len < gex->fe_len) {
1613 /* if the request isn't satisfied, any found extent
1614 * larger than previous best one is better */
1615 if (ex->fe_len > bex->fe_len)
1616 *bex = *ex;
1617 } else if (ex->fe_len > gex->fe_len) {
1618 /* if the request is satisfied, then we try to find
1619 * an extent that still satisfy the request, but is
1620 * smaller than previous one */
1621 if (ex->fe_len < bex->fe_len)
1622 *bex = *ex;
1623 }
1624
1625 ext4_mb_check_limits(ac, e4b, 0);
1626}
1627
089ceecc
ES
1628static noinline_for_stack
1629int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
c9de560d
AT
1630 struct ext4_buddy *e4b)
1631{
1632 struct ext4_free_extent ex = ac->ac_b_ex;
1633 ext4_group_t group = ex.fe_group;
1634 int max;
1635 int err;
1636
1637 BUG_ON(ex.fe_len <= 0);
1638 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1639 if (err)
1640 return err;
1641
1642 ext4_lock_group(ac->ac_sb, group);
1643 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1644
1645 if (max > 0) {
1646 ac->ac_b_ex = ex;
1647 ext4_mb_use_best_found(ac, e4b);
1648 }
1649
1650 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 1651 ext4_mb_unload_buddy(e4b);
c9de560d
AT
1652
1653 return 0;
1654}
1655
089ceecc
ES
1656static noinline_for_stack
1657int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
c9de560d
AT
1658 struct ext4_buddy *e4b)
1659{
1660 ext4_group_t group = ac->ac_g_ex.fe_group;
1661 int max;
1662 int err;
1663 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
c9de560d
AT
1664 struct ext4_free_extent ex;
1665
1666 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1667 return 0;
1668
1669 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1670 if (err)
1671 return err;
1672
1673 ext4_lock_group(ac->ac_sb, group);
1674 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1675 ac->ac_g_ex.fe_len, &ex);
1676
1677 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1678 ext4_fsblk_t start;
1679
5661bd68
AM
1680 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1681 ex.fe_start;
c9de560d
AT
1682 /* use do_div to get remainder (would be 64-bit modulo) */
1683 if (do_div(start, sbi->s_stripe) == 0) {
1684 ac->ac_found++;
1685 ac->ac_b_ex = ex;
1686 ext4_mb_use_best_found(ac, e4b);
1687 }
1688 } else if (max >= ac->ac_g_ex.fe_len) {
1689 BUG_ON(ex.fe_len <= 0);
1690 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1691 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1692 ac->ac_found++;
1693 ac->ac_b_ex = ex;
1694 ext4_mb_use_best_found(ac, e4b);
1695 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1696 /* Sometimes, caller may want to merge even small
1697 * number of blocks to an existing extent */
1698 BUG_ON(ex.fe_len <= 0);
1699 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1700 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1701 ac->ac_found++;
1702 ac->ac_b_ex = ex;
1703 ext4_mb_use_best_found(ac, e4b);
1704 }
1705 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 1706 ext4_mb_unload_buddy(e4b);
c9de560d
AT
1707
1708 return 0;
1709}
1710
1711/*
1712 * The routine scans buddy structures (not bitmap!) from given order
1713 * to max order and tries to find big enough chunk to satisfy the req
1714 */
089ceecc
ES
1715static noinline_for_stack
1716void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
1717 struct ext4_buddy *e4b)
1718{
1719 struct super_block *sb = ac->ac_sb;
1720 struct ext4_group_info *grp = e4b->bd_info;
1721 void *buddy;
1722 int i;
1723 int k;
1724 int max;
1725
1726 BUG_ON(ac->ac_2order <= 0);
1727 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1728 if (grp->bb_counters[i] == 0)
1729 continue;
1730
1731 buddy = mb_find_buddy(e4b, i, &max);
1732 BUG_ON(buddy == NULL);
1733
ffad0a44 1734 k = mb_find_next_zero_bit(buddy, max, 0);
c9de560d
AT
1735 BUG_ON(k >= max);
1736
1737 ac->ac_found++;
1738
1739 ac->ac_b_ex.fe_len = 1 << i;
1740 ac->ac_b_ex.fe_start = k << i;
1741 ac->ac_b_ex.fe_group = e4b->bd_group;
1742
1743 ext4_mb_use_best_found(ac, e4b);
1744
1745 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1746
1747 if (EXT4_SB(sb)->s_mb_stats)
1748 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1749
1750 break;
1751 }
1752}
1753
1754/*
1755 * The routine scans the group and measures all found extents.
1756 * In order to optimize scanning, caller must pass number of
1757 * free blocks in the group, so the routine can know upper limit.
1758 */
089ceecc
ES
1759static noinline_for_stack
1760void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
1761 struct ext4_buddy *e4b)
1762{
1763 struct super_block *sb = ac->ac_sb;
1764 void *bitmap = EXT4_MB_BITMAP(e4b);
1765 struct ext4_free_extent ex;
1766 int i;
1767 int free;
1768
1769 free = e4b->bd_info->bb_free;
1770 BUG_ON(free <= 0);
1771
1772 i = e4b->bd_info->bb_first_free;
1773
1774 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 1775 i = mb_find_next_zero_bit(bitmap,
7137d7a4
TT
1776 EXT4_CLUSTERS_PER_GROUP(sb), i);
1777 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
26346ff6 1778 /*
e56eb659 1779 * IF we have corrupt bitmap, we won't find any
26346ff6
AK
1780 * free blocks even though group info says we
1781 * we have free blocks
1782 */
e29136f8 1783 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
53accfa9 1784 "%d free clusters as per "
fde4d95a 1785 "group info. But bitmap says 0",
26346ff6 1786 free);
c9de560d
AT
1787 break;
1788 }
1789
1790 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1791 BUG_ON(ex.fe_len <= 0);
26346ff6 1792 if (free < ex.fe_len) {
e29136f8 1793 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
53accfa9 1794 "%d free clusters as per "
fde4d95a 1795 "group info. But got %d blocks",
26346ff6 1796 free, ex.fe_len);
e56eb659
AK
1797 /*
1798 * The number of free blocks differs. This mostly
1799 * indicate that the bitmap is corrupt. So exit
1800 * without claiming the space.
1801 */
1802 break;
26346ff6 1803 }
c9de560d
AT
1804
1805 ext4_mb_measure_extent(ac, &ex, e4b);
1806
1807 i += ex.fe_len;
1808 free -= ex.fe_len;
1809 }
1810
1811 ext4_mb_check_limits(ac, e4b, 1);
1812}
1813
1814/*
1815 * This is a special case for storages like raid5
506bf2d8 1816 * we try to find stripe-aligned chunks for stripe-size-multiple requests
c9de560d 1817 */
089ceecc
ES
1818static noinline_for_stack
1819void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
c9de560d
AT
1820 struct ext4_buddy *e4b)
1821{
1822 struct super_block *sb = ac->ac_sb;
1823 struct ext4_sb_info *sbi = EXT4_SB(sb);
1824 void *bitmap = EXT4_MB_BITMAP(e4b);
1825 struct ext4_free_extent ex;
1826 ext4_fsblk_t first_group_block;
1827 ext4_fsblk_t a;
1828 ext4_grpblk_t i;
1829 int max;
1830
1831 BUG_ON(sbi->s_stripe == 0);
1832
1833 /* find first stripe-aligned block in group */
5661bd68
AM
1834 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1835
c9de560d
AT
1836 a = first_group_block + sbi->s_stripe - 1;
1837 do_div(a, sbi->s_stripe);
1838 i = (a * sbi->s_stripe) - first_group_block;
1839
7137d7a4 1840 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
c9de560d
AT
1841 if (!mb_test_bit(i, bitmap)) {
1842 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1843 if (max >= sbi->s_stripe) {
1844 ac->ac_found++;
1845 ac->ac_b_ex = ex;
1846 ext4_mb_use_best_found(ac, e4b);
1847 break;
1848 }
1849 }
1850 i += sbi->s_stripe;
1851 }
1852}
1853
8a57d9d6 1854/* This is now called BEFORE we load the buddy bitmap. */
c9de560d
AT
1855static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1856 ext4_group_t group, int cr)
1857{
1858 unsigned free, fragments;
a4912123 1859 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
c9de560d
AT
1860 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1861
1862 BUG_ON(cr < 0 || cr >= 4);
8a57d9d6
CW
1863
1864 /* We only do this if the grp has never been initialized */
1865 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1866 int ret = ext4_mb_init_group(ac->ac_sb, group);
1867 if (ret)
1868 return 0;
1869 }
c9de560d
AT
1870
1871 free = grp->bb_free;
1872 fragments = grp->bb_fragments;
1873 if (free == 0)
1874 return 0;
1875 if (fragments == 0)
1876 return 0;
1877
1878 switch (cr) {
1879 case 0:
1880 BUG_ON(ac->ac_2order == 0);
c9de560d 1881
8a57d9d6
CW
1882 if (grp->bb_largest_free_order < ac->ac_2order)
1883 return 0;
1884
a4912123
TT
1885 /* Avoid using the first bg of a flexgroup for data files */
1886 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1887 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1888 ((group % flex_size) == 0))
1889 return 0;
1890
8a57d9d6 1891 return 1;
c9de560d
AT
1892 case 1:
1893 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1894 return 1;
1895 break;
1896 case 2:
1897 if (free >= ac->ac_g_ex.fe_len)
1898 return 1;
1899 break;
1900 case 3:
1901 return 1;
1902 default:
1903 BUG();
1904 }
1905
1906 return 0;
1907}
1908
4ddfef7b
ES
1909static noinline_for_stack int
1910ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
c9de560d 1911{
8df9675f 1912 ext4_group_t ngroups, group, i;
c9de560d
AT
1913 int cr;
1914 int err = 0;
c9de560d
AT
1915 struct ext4_sb_info *sbi;
1916 struct super_block *sb;
1917 struct ext4_buddy e4b;
c9de560d
AT
1918
1919 sb = ac->ac_sb;
1920 sbi = EXT4_SB(sb);
8df9675f 1921 ngroups = ext4_get_groups_count(sb);
fb0a387d 1922 /* non-extent files are limited to low blocks/groups */
12e9b892 1923 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
fb0a387d
ES
1924 ngroups = sbi->s_blockfile_groups;
1925
c9de560d
AT
1926 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1927
1928 /* first, try the goal */
1929 err = ext4_mb_find_by_goal(ac, &e4b);
1930 if (err || ac->ac_status == AC_STATUS_FOUND)
1931 goto out;
1932
1933 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1934 goto out;
1935
1936 /*
1937 * ac->ac2_order is set only if the fe_len is a power of 2
1938 * if ac2_order is set we also set criteria to 0 so that we
1939 * try exact allocation using buddy.
1940 */
1941 i = fls(ac->ac_g_ex.fe_len);
1942 ac->ac_2order = 0;
1943 /*
1944 * We search using buddy data only if the order of the request
1945 * is greater than equal to the sbi_s_mb_order2_reqs
b713a5ec 1946 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
c9de560d
AT
1947 */
1948 if (i >= sbi->s_mb_order2_reqs) {
1949 /*
1950 * This should tell if fe_len is exactly power of 2
1951 */
1952 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1953 ac->ac_2order = i - 1;
1954 }
1955
4ba74d00
TT
1956 /* if stream allocation is enabled, use global goal */
1957 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
1958 /* TBD: may be hot point */
1959 spin_lock(&sbi->s_md_lock);
1960 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1961 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1962 spin_unlock(&sbi->s_md_lock);
1963 }
4ba74d00 1964
c9de560d
AT
1965 /* Let's just scan groups to find more-less suitable blocks */
1966 cr = ac->ac_2order ? 0 : 1;
1967 /*
1968 * cr == 0 try to get exact allocation,
1969 * cr == 3 try to get anything
1970 */
1971repeat:
1972 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1973 ac->ac_criteria = cr;
ed8f9c75
AK
1974 /*
1975 * searching for the right group start
1976 * from the goal value specified
1977 */
1978 group = ac->ac_g_ex.fe_group;
1979
8df9675f 1980 for (i = 0; i < ngroups; group++, i++) {
8df9675f 1981 if (group == ngroups)
c9de560d
AT
1982 group = 0;
1983
8a57d9d6
CW
1984 /* This now checks without needing the buddy page */
1985 if (!ext4_mb_good_group(ac, group, cr))
c9de560d
AT
1986 continue;
1987
c9de560d
AT
1988 err = ext4_mb_load_buddy(sb, group, &e4b);
1989 if (err)
1990 goto out;
1991
1992 ext4_lock_group(sb, group);
8a57d9d6
CW
1993
1994 /*
1995 * We need to check again after locking the
1996 * block group
1997 */
c9de560d 1998 if (!ext4_mb_good_group(ac, group, cr)) {
c9de560d 1999 ext4_unlock_group(sb, group);
e39e07fd 2000 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2001 continue;
2002 }
2003
2004 ac->ac_groups_scanned++;
75507efb 2005 if (cr == 0)
c9de560d 2006 ext4_mb_simple_scan_group(ac, &e4b);
506bf2d8
ES
2007 else if (cr == 1 && sbi->s_stripe &&
2008 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
c9de560d
AT
2009 ext4_mb_scan_aligned(ac, &e4b);
2010 else
2011 ext4_mb_complex_scan_group(ac, &e4b);
2012
2013 ext4_unlock_group(sb, group);
e39e07fd 2014 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2015
2016 if (ac->ac_status != AC_STATUS_CONTINUE)
2017 break;
2018 }
2019 }
2020
2021 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2022 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2023 /*
2024 * We've been searching too long. Let's try to allocate
2025 * the best chunk we've found so far
2026 */
2027
2028 ext4_mb_try_best_found(ac, &e4b);
2029 if (ac->ac_status != AC_STATUS_FOUND) {
2030 /*
2031 * Someone more lucky has already allocated it.
2032 * The only thing we can do is just take first
2033 * found block(s)
2034 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2035 */
2036 ac->ac_b_ex.fe_group = 0;
2037 ac->ac_b_ex.fe_start = 0;
2038 ac->ac_b_ex.fe_len = 0;
2039 ac->ac_status = AC_STATUS_CONTINUE;
2040 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2041 cr = 3;
2042 atomic_inc(&sbi->s_mb_lost_chunks);
2043 goto repeat;
2044 }
2045 }
2046out:
2047 return err;
2048}
2049
c9de560d
AT
2050static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2051{
2052 struct super_block *sb = seq->private;
c9de560d
AT
2053 ext4_group_t group;
2054
8df9675f 2055 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d 2056 return NULL;
c9de560d 2057 group = *pos + 1;
a9df9a49 2058 return (void *) ((unsigned long) group);
c9de560d
AT
2059}
2060
2061static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2062{
2063 struct super_block *sb = seq->private;
c9de560d
AT
2064 ext4_group_t group;
2065
2066 ++*pos;
8df9675f 2067 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d
AT
2068 return NULL;
2069 group = *pos + 1;
a9df9a49 2070 return (void *) ((unsigned long) group);
c9de560d
AT
2071}
2072
2073static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2074{
2075 struct super_block *sb = seq->private;
a9df9a49 2076 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
c9de560d
AT
2077 int i;
2078 int err;
2079 struct ext4_buddy e4b;
2080 struct sg {
2081 struct ext4_group_info info;
a36b4498 2082 ext4_grpblk_t counters[16];
c9de560d
AT
2083 } sg;
2084
2085 group--;
2086 if (group == 0)
2087 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2088 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2089 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2090 "group", "free", "frags", "first",
2091 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2092 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2093
2094 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2095 sizeof(struct ext4_group_info);
2096 err = ext4_mb_load_buddy(sb, group, &e4b);
2097 if (err) {
a9df9a49 2098 seq_printf(seq, "#%-5u: I/O error\n", group);
c9de560d
AT
2099 return 0;
2100 }
2101 ext4_lock_group(sb, group);
2102 memcpy(&sg, ext4_get_group_info(sb, group), i);
2103 ext4_unlock_group(sb, group);
e39e07fd 2104 ext4_mb_unload_buddy(&e4b);
c9de560d 2105
a9df9a49 2106 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
c9de560d
AT
2107 sg.info.bb_fragments, sg.info.bb_first_free);
2108 for (i = 0; i <= 13; i++)
2109 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2110 sg.info.bb_counters[i] : 0);
2111 seq_printf(seq, " ]\n");
2112
2113 return 0;
2114}
2115
2116static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2117{
2118}
2119
7f1346a9 2120static const struct seq_operations ext4_mb_seq_groups_ops = {
c9de560d
AT
2121 .start = ext4_mb_seq_groups_start,
2122 .next = ext4_mb_seq_groups_next,
2123 .stop = ext4_mb_seq_groups_stop,
2124 .show = ext4_mb_seq_groups_show,
2125};
2126
2127static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2128{
2129 struct super_block *sb = PDE(inode)->data;
2130 int rc;
2131
2132 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2133 if (rc == 0) {
a271fe85 2134 struct seq_file *m = file->private_data;
c9de560d
AT
2135 m->private = sb;
2136 }
2137 return rc;
2138
2139}
2140
7f1346a9 2141static const struct file_operations ext4_mb_seq_groups_fops = {
c9de560d
AT
2142 .owner = THIS_MODULE,
2143 .open = ext4_mb_seq_groups_open,
2144 .read = seq_read,
2145 .llseek = seq_lseek,
2146 .release = seq_release,
2147};
2148
fb1813f4
CW
2149static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2150{
2151 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2152 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2153
2154 BUG_ON(!cachep);
2155 return cachep;
2156}
5f21b0e6
FB
2157
2158/* Create and initialize ext4_group_info data for the given group. */
920313a7 2159int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5f21b0e6
FB
2160 struct ext4_group_desc *desc)
2161{
fb1813f4 2162 int i;
5f21b0e6
FB
2163 int metalen = 0;
2164 struct ext4_sb_info *sbi = EXT4_SB(sb);
2165 struct ext4_group_info **meta_group_info;
fb1813f4 2166 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5f21b0e6
FB
2167
2168 /*
2169 * First check if this group is the first of a reserved block.
2170 * If it's true, we have to allocate a new table of pointers
2171 * to ext4_group_info structures
2172 */
2173 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2174 metalen = sizeof(*meta_group_info) <<
2175 EXT4_DESC_PER_BLOCK_BITS(sb);
2176 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2177 if (meta_group_info == NULL) {
9d8b9ec4
TT
2178 ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate mem "
2179 "for a buddy group");
5f21b0e6
FB
2180 goto exit_meta_group_info;
2181 }
2182 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2183 meta_group_info;
2184 }
2185
5f21b0e6
FB
2186 meta_group_info =
2187 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2188 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2189
fb1813f4 2190 meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
5f21b0e6 2191 if (meta_group_info[i] == NULL) {
9d8b9ec4 2192 ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate buddy mem");
5f21b0e6
FB
2193 goto exit_group_info;
2194 }
fb1813f4 2195 memset(meta_group_info[i], 0, kmem_cache_size(cachep));
5f21b0e6
FB
2196 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2197 &(meta_group_info[i]->bb_state));
2198
2199 /*
2200 * initialize bb_free to be able to skip
2201 * empty groups without initialization
2202 */
2203 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2204 meta_group_info[i]->bb_free =
cff1dfd7 2205 ext4_free_clusters_after_init(sb, group, desc);
5f21b0e6
FB
2206 } else {
2207 meta_group_info[i]->bb_free =
021b65bb 2208 ext4_free_group_clusters(sb, desc);
5f21b0e6
FB
2209 }
2210
2211 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
920313a7 2212 init_rwsem(&meta_group_info[i]->alloc_sem);
64e290ec 2213 meta_group_info[i]->bb_free_root = RB_ROOT;
8a57d9d6 2214 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
5f21b0e6
FB
2215
2216#ifdef DOUBLE_CHECK
2217 {
2218 struct buffer_head *bh;
2219 meta_group_info[i]->bb_bitmap =
2220 kmalloc(sb->s_blocksize, GFP_KERNEL);
2221 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2222 bh = ext4_read_block_bitmap(sb, group);
2223 BUG_ON(bh == NULL);
2224 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2225 sb->s_blocksize);
2226 put_bh(bh);
2227 }
2228#endif
2229
2230 return 0;
2231
2232exit_group_info:
2233 /* If a meta_group_info table has been allocated, release it now */
caaf7a29 2234 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
5f21b0e6 2235 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
caaf7a29
TM
2236 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2237 }
5f21b0e6
FB
2238exit_meta_group_info:
2239 return -ENOMEM;
2240} /* ext4_mb_add_groupinfo */
2241
c9de560d
AT
2242static int ext4_mb_init_backend(struct super_block *sb)
2243{
8df9675f 2244 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d 2245 ext4_group_t i;
c9de560d 2246 struct ext4_sb_info *sbi = EXT4_SB(sb);
5f21b0e6
FB
2247 struct ext4_super_block *es = sbi->s_es;
2248 int num_meta_group_infos;
2249 int num_meta_group_infos_max;
2250 int array_size;
5f21b0e6 2251 struct ext4_group_desc *desc;
fb1813f4 2252 struct kmem_cache *cachep;
5f21b0e6
FB
2253
2254 /* This is the number of blocks used by GDT */
8df9675f 2255 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
5f21b0e6
FB
2256 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2257
2258 /*
2259 * This is the total number of blocks used by GDT including
2260 * the number of reserved blocks for GDT.
2261 * The s_group_info array is allocated with this value
2262 * to allow a clean online resize without a complex
2263 * manipulation of pointer.
2264 * The drawback is the unused memory when no resize
2265 * occurs but it's very low in terms of pages
2266 * (see comments below)
2267 * Need to handle this properly when META_BG resizing is allowed
2268 */
2269 num_meta_group_infos_max = num_meta_group_infos +
2270 le16_to_cpu(es->s_reserved_gdt_blocks);
c9de560d 2271
5f21b0e6
FB
2272 /*
2273 * array_size is the size of s_group_info array. We round it
2274 * to the next power of two because this approximation is done
2275 * internally by kmalloc so we can have some more memory
2276 * for free here (e.g. may be used for META_BG resize).
2277 */
2278 array_size = 1;
2279 while (array_size < sizeof(*sbi->s_group_info) *
2280 num_meta_group_infos_max)
2281 array_size = array_size << 1;
c9de560d
AT
2282 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2283 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2284 * So a two level scheme suffices for now. */
f18a5f21 2285 sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
c9de560d 2286 if (sbi->s_group_info == NULL) {
9d8b9ec4 2287 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
c9de560d
AT
2288 return -ENOMEM;
2289 }
2290 sbi->s_buddy_cache = new_inode(sb);
2291 if (sbi->s_buddy_cache == NULL) {
9d8b9ec4 2292 ext4_msg(sb, KERN_ERR, "can't get new inode");
c9de560d
AT
2293 goto err_freesgi;
2294 }
48e6061b
YJ
2295 /* To avoid potentially colliding with an valid on-disk inode number,
2296 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
2297 * not in the inode hash, so it should never be found by iget(), but
2298 * this will avoid confusion if it ever shows up during debugging. */
2299 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
c9de560d 2300 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
8df9675f 2301 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2302 desc = ext4_get_group_desc(sb, i, NULL);
2303 if (desc == NULL) {
9d8b9ec4 2304 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
c9de560d
AT
2305 goto err_freebuddy;
2306 }
5f21b0e6
FB
2307 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2308 goto err_freebuddy;
c9de560d
AT
2309 }
2310
2311 return 0;
2312
2313err_freebuddy:
fb1813f4 2314 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
f1fa3342 2315 while (i-- > 0)
fb1813f4 2316 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
c9de560d 2317 i = num_meta_group_infos;
f1fa3342 2318 while (i-- > 0)
c9de560d
AT
2319 kfree(sbi->s_group_info[i]);
2320 iput(sbi->s_buddy_cache);
2321err_freesgi:
f18a5f21 2322 ext4_kvfree(sbi->s_group_info);
c9de560d
AT
2323 return -ENOMEM;
2324}
2325
2892c15d
ES
2326static void ext4_groupinfo_destroy_slabs(void)
2327{
2328 int i;
2329
2330 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2331 if (ext4_groupinfo_caches[i])
2332 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2333 ext4_groupinfo_caches[i] = NULL;
2334 }
2335}
2336
2337static int ext4_groupinfo_create_slab(size_t size)
2338{
2339 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2340 int slab_size;
2341 int blocksize_bits = order_base_2(size);
2342 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2343 struct kmem_cache *cachep;
2344
2345 if (cache_index >= NR_GRPINFO_CACHES)
2346 return -EINVAL;
2347
2348 if (unlikely(cache_index < 0))
2349 cache_index = 0;
2350
2351 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2352 if (ext4_groupinfo_caches[cache_index]) {
2353 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2354 return 0; /* Already created */
2355 }
2356
2357 slab_size = offsetof(struct ext4_group_info,
2358 bb_counters[blocksize_bits + 2]);
2359
2360 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2361 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2362 NULL);
2363
823ba01f
TM
2364 ext4_groupinfo_caches[cache_index] = cachep;
2365
2892c15d
ES
2366 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2367 if (!cachep) {
9d8b9ec4
TT
2368 printk(KERN_EMERG
2369 "EXT4-fs: no memory for groupinfo slab cache\n");
2892c15d
ES
2370 return -ENOMEM;
2371 }
2372
2892c15d
ES
2373 return 0;
2374}
2375
c9de560d
AT
2376int ext4_mb_init(struct super_block *sb, int needs_recovery)
2377{
2378 struct ext4_sb_info *sbi = EXT4_SB(sb);
6be2ded1 2379 unsigned i, j;
c9de560d
AT
2380 unsigned offset;
2381 unsigned max;
74767c5a 2382 int ret;
c9de560d 2383
1927805e 2384 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
c9de560d
AT
2385
2386 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2387 if (sbi->s_mb_offsets == NULL) {
fb1813f4
CW
2388 ret = -ENOMEM;
2389 goto out;
c9de560d 2390 }
ff7ef329 2391
1927805e 2392 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
c9de560d
AT
2393 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2394 if (sbi->s_mb_maxs == NULL) {
fb1813f4
CW
2395 ret = -ENOMEM;
2396 goto out;
2397 }
2398
2892c15d
ES
2399 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2400 if (ret < 0)
2401 goto out;
c9de560d
AT
2402
2403 /* order 0 is regular bitmap */
2404 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2405 sbi->s_mb_offsets[0] = 0;
2406
2407 i = 1;
2408 offset = 0;
2409 max = sb->s_blocksize << 2;
2410 do {
2411 sbi->s_mb_offsets[i] = offset;
2412 sbi->s_mb_maxs[i] = max;
2413 offset += 1 << (sb->s_blocksize_bits - i);
2414 max = max >> 1;
2415 i++;
2416 } while (i <= sb->s_blocksize_bits + 1);
2417
c9de560d 2418 spin_lock_init(&sbi->s_md_lock);
c9de560d
AT
2419 spin_lock_init(&sbi->s_bal_lock);
2420
2421 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2422 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2423 sbi->s_mb_stats = MB_DEFAULT_STATS;
2424 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2425 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
27baebb8
TT
2426 /*
2427 * The default group preallocation is 512, which for 4k block
2428 * sizes translates to 2 megabytes. However for bigalloc file
2429 * systems, this is probably too big (i.e, if the cluster size
2430 * is 1 megabyte, then group preallocation size becomes half a
2431 * gigabyte!). As a default, we will keep a two megabyte
2432 * group pralloc size for cluster sizes up to 64k, and after
2433 * that, we will force a minimum group preallocation size of
2434 * 32 clusters. This translates to 8 megs when the cluster
2435 * size is 256k, and 32 megs when the cluster size is 1 meg,
2436 * which seems reasonable as a default.
2437 */
2438 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2439 sbi->s_cluster_bits, 32);
d7a1fee1
DE
2440 /*
2441 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2442 * to the lowest multiple of s_stripe which is bigger than
2443 * the s_mb_group_prealloc as determined above. We want
2444 * the preallocation size to be an exact multiple of the
2445 * RAID stripe size so that preallocations don't fragment
2446 * the stripes.
2447 */
2448 if (sbi->s_stripe > 1) {
2449 sbi->s_mb_group_prealloc = roundup(
2450 sbi->s_mb_group_prealloc, sbi->s_stripe);
2451 }
c9de560d 2452
730c213c 2453 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
c9de560d 2454 if (sbi->s_locality_groups == NULL) {
fb1813f4 2455 ret = -ENOMEM;
7aa0baea 2456 goto out_free_groupinfo_slab;
c9de560d 2457 }
730c213c 2458 for_each_possible_cpu(i) {
c9de560d 2459 struct ext4_locality_group *lg;
730c213c 2460 lg = per_cpu_ptr(sbi->s_locality_groups, i);
c9de560d 2461 mutex_init(&lg->lg_mutex);
6be2ded1
AK
2462 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2463 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
c9de560d
AT
2464 spin_lock_init(&lg->lg_prealloc_lock);
2465 }
2466
79a77c5a
YJ
2467 /* init file for buddy data */
2468 ret = ext4_mb_init_backend(sb);
7aa0baea
TM
2469 if (ret != 0)
2470 goto out_free_locality_groups;
79a77c5a 2471
296c355c
TT
2472 if (sbi->s_proc)
2473 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2474 &ext4_mb_seq_groups_fops, sb);
c9de560d 2475
0390131b
FM
2476 if (sbi->s_journal)
2477 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
7aa0baea
TM
2478
2479 return 0;
2480
2481out_free_locality_groups:
2482 free_percpu(sbi->s_locality_groups);
2483 sbi->s_locality_groups = NULL;
2484out_free_groupinfo_slab:
2485 ext4_groupinfo_destroy_slabs();
fb1813f4 2486out:
7aa0baea
TM
2487 kfree(sbi->s_mb_offsets);
2488 sbi->s_mb_offsets = NULL;
2489 kfree(sbi->s_mb_maxs);
2490 sbi->s_mb_maxs = NULL;
fb1813f4 2491 return ret;
c9de560d
AT
2492}
2493
955ce5f5 2494/* need to called with the ext4 group lock held */
c9de560d
AT
2495static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2496{
2497 struct ext4_prealloc_space *pa;
2498 struct list_head *cur, *tmp;
2499 int count = 0;
2500
2501 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2502 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2503 list_del(&pa->pa_group_list);
2504 count++;
688f05a0 2505 kmem_cache_free(ext4_pspace_cachep, pa);
c9de560d
AT
2506 }
2507 if (count)
6ba495e9 2508 mb_debug(1, "mballoc: %u PAs left\n", count);
c9de560d
AT
2509
2510}
2511
2512int ext4_mb_release(struct super_block *sb)
2513{
8df9675f 2514 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d
AT
2515 ext4_group_t i;
2516 int num_meta_group_infos;
2517 struct ext4_group_info *grinfo;
2518 struct ext4_sb_info *sbi = EXT4_SB(sb);
fb1813f4 2519 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
c9de560d 2520
c9de560d 2521 if (sbi->s_group_info) {
8df9675f 2522 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2523 grinfo = ext4_get_group_info(sb, i);
2524#ifdef DOUBLE_CHECK
2525 kfree(grinfo->bb_bitmap);
2526#endif
2527 ext4_lock_group(sb, i);
2528 ext4_mb_cleanup_pa(grinfo);
2529 ext4_unlock_group(sb, i);
fb1813f4 2530 kmem_cache_free(cachep, grinfo);
c9de560d 2531 }
8df9675f 2532 num_meta_group_infos = (ngroups +
c9de560d
AT
2533 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2534 EXT4_DESC_PER_BLOCK_BITS(sb);
2535 for (i = 0; i < num_meta_group_infos; i++)
2536 kfree(sbi->s_group_info[i]);
f18a5f21 2537 ext4_kvfree(sbi->s_group_info);
c9de560d
AT
2538 }
2539 kfree(sbi->s_mb_offsets);
2540 kfree(sbi->s_mb_maxs);
2541 if (sbi->s_buddy_cache)
2542 iput(sbi->s_buddy_cache);
2543 if (sbi->s_mb_stats) {
9d8b9ec4
TT
2544 ext4_msg(sb, KERN_INFO,
2545 "mballoc: %u blocks %u reqs (%u success)",
c9de560d
AT
2546 atomic_read(&sbi->s_bal_allocated),
2547 atomic_read(&sbi->s_bal_reqs),
2548 atomic_read(&sbi->s_bal_success));
9d8b9ec4
TT
2549 ext4_msg(sb, KERN_INFO,
2550 "mballoc: %u extents scanned, %u goal hits, "
2551 "%u 2^N hits, %u breaks, %u lost",
c9de560d
AT
2552 atomic_read(&sbi->s_bal_ex_scanned),
2553 atomic_read(&sbi->s_bal_goals),
2554 atomic_read(&sbi->s_bal_2orders),
2555 atomic_read(&sbi->s_bal_breaks),
2556 atomic_read(&sbi->s_mb_lost_chunks));
9d8b9ec4
TT
2557 ext4_msg(sb, KERN_INFO,
2558 "mballoc: %lu generated and it took %Lu",
ced156e4 2559 sbi->s_mb_buddies_generated,
c9de560d 2560 sbi->s_mb_generation_time);
9d8b9ec4
TT
2561 ext4_msg(sb, KERN_INFO,
2562 "mballoc: %u preallocated, %u discarded",
c9de560d
AT
2563 atomic_read(&sbi->s_mb_preallocated),
2564 atomic_read(&sbi->s_mb_discarded));
2565 }
2566
730c213c 2567 free_percpu(sbi->s_locality_groups);
296c355c
TT
2568 if (sbi->s_proc)
2569 remove_proc_entry("mb_groups", sbi->s_proc);
c9de560d
AT
2570
2571 return 0;
2572}
2573
77ca6cdf 2574static inline int ext4_issue_discard(struct super_block *sb,
84130193 2575 ext4_group_t block_group, ext4_grpblk_t cluster, int count)
5c521830 2576{
5c521830
JZ
2577 ext4_fsblk_t discard_block;
2578
84130193
TT
2579 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2580 ext4_group_first_block_no(sb, block_group));
2581 count = EXT4_C2B(EXT4_SB(sb), count);
5c521830
JZ
2582 trace_ext4_discard_blocks(sb,
2583 (unsigned long long) discard_block, count);
93259636 2584 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
5c521830
JZ
2585}
2586
3e624fc7
TT
2587/*
2588 * This function is called by the jbd2 layer once the commit has finished,
2589 * so we know we can free the blocks that were released with that commit.
2590 */
2591static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
c9de560d 2592{
3e624fc7 2593 struct super_block *sb = journal->j_private;
c9de560d 2594 struct ext4_buddy e4b;
c894058d 2595 struct ext4_group_info *db;
d9f34504 2596 int err, count = 0, count2 = 0;
c894058d 2597 struct ext4_free_data *entry;
3e624fc7 2598 struct list_head *l, *ltmp;
c9de560d 2599
3e624fc7
TT
2600 list_for_each_safe(l, ltmp, &txn->t_private_list) {
2601 entry = list_entry(l, struct ext4_free_data, list);
c9de560d 2602
6ba495e9 2603 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
3e624fc7 2604 entry->count, entry->group, entry);
c9de560d 2605
d9f34504
TT
2606 if (test_opt(sb, DISCARD))
2607 ext4_issue_discard(sb, entry->group,
84130193 2608 entry->start_cluster, entry->count);
b90f6870 2609
c894058d 2610 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
c9de560d
AT
2611 /* we expect to find existing buddy because it's pinned */
2612 BUG_ON(err != 0);
2613
c894058d 2614 db = e4b.bd_info;
c9de560d 2615 /* there are blocks to put in buddy to make them really free */
c894058d 2616 count += entry->count;
c9de560d 2617 count2++;
c894058d
AK
2618 ext4_lock_group(sb, entry->group);
2619 /* Take it out of per group rb tree */
2620 rb_erase(&entry->node, &(db->bb_free_root));
84130193 2621 mb_free_blocks(NULL, &e4b, entry->start_cluster, entry->count);
c894058d 2622
3d56b8d2
TM
2623 /*
2624 * Clear the trimmed flag for the group so that the next
2625 * ext4_trim_fs can trim it.
2626 * If the volume is mounted with -o discard, online discard
2627 * is supported and the free blocks will be trimmed online.
2628 */
2629 if (!test_opt(sb, DISCARD))
2630 EXT4_MB_GRP_CLEAR_TRIMMED(db);
2631
c894058d
AK
2632 if (!db->bb_free_root.rb_node) {
2633 /* No more items in the per group rb tree
2634 * balance refcounts from ext4_mb_free_metadata()
2635 */
2636 page_cache_release(e4b.bd_buddy_page);
2637 page_cache_release(e4b.bd_bitmap_page);
c9de560d 2638 }
c894058d 2639 ext4_unlock_group(sb, entry->group);
c894058d 2640 kmem_cache_free(ext4_free_ext_cachep, entry);
e39e07fd 2641 ext4_mb_unload_buddy(&e4b);
3e624fc7 2642 }
c9de560d 2643
6ba495e9 2644 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
c9de560d
AT
2645}
2646
6ba495e9
TT
2647#ifdef CONFIG_EXT4_DEBUG
2648u8 mb_enable_debug __read_mostly;
2649
2650static struct dentry *debugfs_dir;
2651static struct dentry *debugfs_debug;
2652
2653static void __init ext4_create_debugfs_entry(void)
2654{
2655 debugfs_dir = debugfs_create_dir("ext4", NULL);
2656 if (debugfs_dir)
2657 debugfs_debug = debugfs_create_u8("mballoc-debug",
2658 S_IRUGO | S_IWUSR,
2659 debugfs_dir,
2660 &mb_enable_debug);
2661}
2662
2663static void ext4_remove_debugfs_entry(void)
2664{
2665 debugfs_remove(debugfs_debug);
2666 debugfs_remove(debugfs_dir);
2667}
2668
2669#else
2670
2671static void __init ext4_create_debugfs_entry(void)
2672{
2673}
2674
2675static void ext4_remove_debugfs_entry(void)
2676{
2677}
2678
2679#endif
2680
5dabfc78 2681int __init ext4_init_mballoc(void)
c9de560d 2682{
16828088
TT
2683 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2684 SLAB_RECLAIM_ACCOUNT);
c9de560d
AT
2685 if (ext4_pspace_cachep == NULL)
2686 return -ENOMEM;
2687
16828088
TT
2688 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2689 SLAB_RECLAIM_ACCOUNT);
256bdb49
ES
2690 if (ext4_ac_cachep == NULL) {
2691 kmem_cache_destroy(ext4_pspace_cachep);
2692 return -ENOMEM;
2693 }
c894058d 2694
16828088
TT
2695 ext4_free_ext_cachep = KMEM_CACHE(ext4_free_data,
2696 SLAB_RECLAIM_ACCOUNT);
c894058d
AK
2697 if (ext4_free_ext_cachep == NULL) {
2698 kmem_cache_destroy(ext4_pspace_cachep);
2699 kmem_cache_destroy(ext4_ac_cachep);
2700 return -ENOMEM;
2701 }
6ba495e9 2702 ext4_create_debugfs_entry();
c9de560d
AT
2703 return 0;
2704}
2705
5dabfc78 2706void ext4_exit_mballoc(void)
c9de560d 2707{
60e6679e 2708 /*
3e03f9ca
JDB
2709 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2710 * before destroying the slab cache.
2711 */
2712 rcu_barrier();
c9de560d 2713 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 2714 kmem_cache_destroy(ext4_ac_cachep);
c894058d 2715 kmem_cache_destroy(ext4_free_ext_cachep);
2892c15d 2716 ext4_groupinfo_destroy_slabs();
6ba495e9 2717 ext4_remove_debugfs_entry();
c9de560d
AT
2718}
2719
2720
2721/*
73b2c716 2722 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
c9de560d
AT
2723 * Returns 0 if success or error code
2724 */
4ddfef7b
ES
2725static noinline_for_stack int
2726ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
53accfa9 2727 handle_t *handle, unsigned int reserv_clstrs)
c9de560d
AT
2728{
2729 struct buffer_head *bitmap_bh = NULL;
c9de560d
AT
2730 struct ext4_group_desc *gdp;
2731 struct buffer_head *gdp_bh;
2732 struct ext4_sb_info *sbi;
2733 struct super_block *sb;
2734 ext4_fsblk_t block;
519deca0 2735 int err, len;
c9de560d
AT
2736
2737 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2738 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2739
2740 sb = ac->ac_sb;
2741 sbi = EXT4_SB(sb);
c9de560d
AT
2742
2743 err = -EIO;
574ca174 2744 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2745 if (!bitmap_bh)
2746 goto out_err;
2747
2748 err = ext4_journal_get_write_access(handle, bitmap_bh);
2749 if (err)
2750 goto out_err;
2751
2752 err = -EIO;
2753 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2754 if (!gdp)
2755 goto out_err;
2756
a9df9a49 2757 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
021b65bb 2758 ext4_free_group_clusters(sb, gdp));
03cddb80 2759
c9de560d
AT
2760 err = ext4_journal_get_write_access(handle, gdp_bh);
2761 if (err)
2762 goto out_err;
2763
bda00de7 2764 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
c9de560d 2765
53accfa9 2766 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6fd058f7 2767 if (!ext4_data_block_valid(sbi, block, len)) {
12062ddd 2768 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
6fd058f7 2769 "fs metadata\n", block, block+len);
519deca0
AK
2770 /* File system mounted not to panic on error
2771 * Fix the bitmap and repeat the block allocation
2772 * We leak some of the blocks here.
2773 */
955ce5f5 2774 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
c3e94d1d
YY
2775 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2776 ac->ac_b_ex.fe_len);
955ce5f5 2777 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
0390131b 2778 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
519deca0
AK
2779 if (!err)
2780 err = -EAGAIN;
2781 goto out_err;
c9de560d 2782 }
955ce5f5
AK
2783
2784 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2785#ifdef AGGRESSIVE_CHECK
2786 {
2787 int i;
2788 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2789 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2790 bitmap_bh->b_data));
2791 }
2792 }
2793#endif
c3e94d1d
YY
2794 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2795 ac->ac_b_ex.fe_len);
c9de560d
AT
2796 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2797 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
021b65bb 2798 ext4_free_group_clusters_set(sb, gdp,
cff1dfd7 2799 ext4_free_clusters_after_init(sb,
021b65bb 2800 ac->ac_b_ex.fe_group, gdp));
c9de560d 2801 }
021b65bb
TT
2802 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2803 ext4_free_group_clusters_set(sb, gdp, len);
c9de560d 2804 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
955ce5f5
AK
2805
2806 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
57042651 2807 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
d2a17637 2808 /*
6bc6e63f 2809 * Now reduce the dirty block count also. Should not go negative
d2a17637 2810 */
6bc6e63f
AK
2811 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2812 /* release all the reserved blocks if non delalloc */
57042651
TT
2813 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2814 reserv_clstrs);
c9de560d 2815
772cb7c8
JS
2816 if (sbi->s_log_groups_per_flex) {
2817 ext4_group_t flex_group = ext4_flex_group(sbi,
2818 ac->ac_b_ex.fe_group);
9f24e420 2819 atomic_sub(ac->ac_b_ex.fe_len,
24aaa8ef 2820 &sbi->s_flex_groups[flex_group].free_clusters);
772cb7c8
JS
2821 }
2822
0390131b 2823 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
c9de560d
AT
2824 if (err)
2825 goto out_err;
0390131b 2826 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
c9de560d
AT
2827
2828out_err:
a0375156 2829 ext4_mark_super_dirty(sb);
42a10add 2830 brelse(bitmap_bh);
c9de560d
AT
2831 return err;
2832}
2833
2834/*
2835 * here we normalize request for locality group
d7a1fee1
DE
2836 * Group request are normalized to s_mb_group_prealloc, which goes to
2837 * s_strip if we set the same via mount option.
2838 * s_mb_group_prealloc can be configured via
b713a5ec 2839 * /sys/fs/ext4/<partition>/mb_group_prealloc
c9de560d
AT
2840 *
2841 * XXX: should we try to preallocate more than the group has now?
2842 */
2843static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2844{
2845 struct super_block *sb = ac->ac_sb;
2846 struct ext4_locality_group *lg = ac->ac_lg;
2847
2848 BUG_ON(lg == NULL);
d7a1fee1 2849 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
6ba495e9 2850 mb_debug(1, "#%u: goal %u blocks for locality group\n",
c9de560d
AT
2851 current->pid, ac->ac_g_ex.fe_len);
2852}
2853
2854/*
2855 * Normalization means making request better in terms of
2856 * size and alignment
2857 */
4ddfef7b
ES
2858static noinline_for_stack void
2859ext4_mb_normalize_request(struct ext4_allocation_context *ac,
c9de560d
AT
2860 struct ext4_allocation_request *ar)
2861{
53accfa9 2862 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
c9de560d
AT
2863 int bsbits, max;
2864 ext4_lblk_t end;
c9de560d 2865 loff_t size, orig_size, start_off;
5a0790c2 2866 ext4_lblk_t start;
c9de560d 2867 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
9a0762c5 2868 struct ext4_prealloc_space *pa;
c9de560d
AT
2869
2870 /* do normalize only data requests, metadata requests
2871 do not need preallocation */
2872 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2873 return;
2874
2875 /* sometime caller may want exact blocks */
2876 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2877 return;
2878
2879 /* caller may indicate that preallocation isn't
2880 * required (it's a tail, for example) */
2881 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2882 return;
2883
2884 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2885 ext4_mb_normalize_group_request(ac);
2886 return ;
2887 }
2888
2889 bsbits = ac->ac_sb->s_blocksize_bits;
2890
2891 /* first, let's learn actual file size
2892 * given current request is allocated */
53accfa9 2893 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
c9de560d
AT
2894 size = size << bsbits;
2895 if (size < i_size_read(ac->ac_inode))
2896 size = i_size_read(ac->ac_inode);
5a0790c2 2897 orig_size = size;
c9de560d 2898
1930479c
VC
2899 /* max size of free chunks */
2900 max = 2 << bsbits;
c9de560d 2901
1930479c
VC
2902#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
2903 (req <= (size) || max <= (chunk_size))
c9de560d
AT
2904
2905 /* first, try to predict filesize */
2906 /* XXX: should this table be tunable? */
2907 start_off = 0;
2908 if (size <= 16 * 1024) {
2909 size = 16 * 1024;
2910 } else if (size <= 32 * 1024) {
2911 size = 32 * 1024;
2912 } else if (size <= 64 * 1024) {
2913 size = 64 * 1024;
2914 } else if (size <= 128 * 1024) {
2915 size = 128 * 1024;
2916 } else if (size <= 256 * 1024) {
2917 size = 256 * 1024;
2918 } else if (size <= 512 * 1024) {
2919 size = 512 * 1024;
2920 } else if (size <= 1024 * 1024) {
2921 size = 1024 * 1024;
1930479c 2922 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
c9de560d 2923 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
1930479c
VC
2924 (21 - bsbits)) << 21;
2925 size = 2 * 1024 * 1024;
2926 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
c9de560d
AT
2927 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2928 (22 - bsbits)) << 22;
2929 size = 4 * 1024 * 1024;
2930 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
1930479c 2931 (8<<20)>>bsbits, max, 8 * 1024)) {
c9de560d
AT
2932 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2933 (23 - bsbits)) << 23;
2934 size = 8 * 1024 * 1024;
2935 } else {
2936 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2937 size = ac->ac_o_ex.fe_len << bsbits;
2938 }
5a0790c2
AK
2939 size = size >> bsbits;
2940 start = start_off >> bsbits;
c9de560d
AT
2941
2942 /* don't cover already allocated blocks in selected range */
2943 if (ar->pleft && start <= ar->lleft) {
2944 size -= ar->lleft + 1 - start;
2945 start = ar->lleft + 1;
2946 }
2947 if (ar->pright && start + size - 1 >= ar->lright)
2948 size -= start + size - ar->lright;
2949
2950 end = start + size;
2951
2952 /* check we don't cross already preallocated blocks */
2953 rcu_read_lock();
9a0762c5 2954 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 2955 ext4_lblk_t pa_end;
c9de560d 2956
c9de560d
AT
2957 if (pa->pa_deleted)
2958 continue;
2959 spin_lock(&pa->pa_lock);
2960 if (pa->pa_deleted) {
2961 spin_unlock(&pa->pa_lock);
2962 continue;
2963 }
2964
53accfa9
TT
2965 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2966 pa->pa_len);
c9de560d
AT
2967
2968 /* PA must not overlap original request */
2969 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2970 ac->ac_o_ex.fe_logical < pa->pa_lstart));
2971
38877f4e
ES
2972 /* skip PAs this normalized request doesn't overlap with */
2973 if (pa->pa_lstart >= end || pa_end <= start) {
c9de560d
AT
2974 spin_unlock(&pa->pa_lock);
2975 continue;
2976 }
2977 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2978
38877f4e 2979 /* adjust start or end to be adjacent to this pa */
c9de560d
AT
2980 if (pa_end <= ac->ac_o_ex.fe_logical) {
2981 BUG_ON(pa_end < start);
2982 start = pa_end;
38877f4e 2983 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
c9de560d
AT
2984 BUG_ON(pa->pa_lstart > end);
2985 end = pa->pa_lstart;
2986 }
2987 spin_unlock(&pa->pa_lock);
2988 }
2989 rcu_read_unlock();
2990 size = end - start;
2991
2992 /* XXX: extra loop to check we really don't overlap preallocations */
2993 rcu_read_lock();
9a0762c5 2994 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 2995 ext4_lblk_t pa_end;
53accfa9 2996
c9de560d
AT
2997 spin_lock(&pa->pa_lock);
2998 if (pa->pa_deleted == 0) {
53accfa9
TT
2999 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3000 pa->pa_len);
c9de560d
AT
3001 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3002 }
3003 spin_unlock(&pa->pa_lock);
3004 }
3005 rcu_read_unlock();
3006
3007 if (start + size <= ac->ac_o_ex.fe_logical &&
3008 start > ac->ac_o_ex.fe_logical) {
9d8b9ec4
TT
3009 ext4_msg(ac->ac_sb, KERN_ERR,
3010 "start %lu, size %lu, fe_logical %lu",
3011 (unsigned long) start, (unsigned long) size,
3012 (unsigned long) ac->ac_o_ex.fe_logical);
c9de560d
AT
3013 }
3014 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3015 start > ac->ac_o_ex.fe_logical);
7137d7a4 3016 BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
c9de560d
AT
3017
3018 /* now prepare goal request */
3019
3020 /* XXX: is it better to align blocks WRT to logical
3021 * placement or satisfy big request as is */
3022 ac->ac_g_ex.fe_logical = start;
53accfa9 3023 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
c9de560d
AT
3024
3025 /* define goal start in order to merge */
3026 if (ar->pright && (ar->lright == (start + size))) {
3027 /* merge to the right */
3028 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3029 &ac->ac_f_ex.fe_group,
3030 &ac->ac_f_ex.fe_start);
3031 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3032 }
3033 if (ar->pleft && (ar->lleft + 1 == start)) {
3034 /* merge to the left */
3035 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3036 &ac->ac_f_ex.fe_group,
3037 &ac->ac_f_ex.fe_start);
3038 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3039 }
3040
6ba495e9 3041 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
c9de560d
AT
3042 (unsigned) orig_size, (unsigned) start);
3043}
3044
3045static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3046{
3047 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3048
3049 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3050 atomic_inc(&sbi->s_bal_reqs);
3051 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
291dae47 3052 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
c9de560d
AT
3053 atomic_inc(&sbi->s_bal_success);
3054 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3055 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3056 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3057 atomic_inc(&sbi->s_bal_goals);
3058 if (ac->ac_found > sbi->s_mb_max_to_scan)
3059 atomic_inc(&sbi->s_bal_breaks);
3060 }
3061
296c355c
TT
3062 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3063 trace_ext4_mballoc_alloc(ac);
3064 else
3065 trace_ext4_mballoc_prealloc(ac);
c9de560d
AT
3066}
3067
b844167e
CW
3068/*
3069 * Called on failure; free up any blocks from the inode PA for this
3070 * context. We don't need this for MB_GROUP_PA because we only change
3071 * pa_free in ext4_mb_release_context(), but on failure, we've already
3072 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3073 */
3074static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3075{
3076 struct ext4_prealloc_space *pa = ac->ac_pa;
3077 int len;
3078
3079 if (pa && pa->pa_type == MB_INODE_PA) {
3080 len = ac->ac_b_ex.fe_len;
3081 pa->pa_free += len;
3082 }
3083
3084}
3085
c9de560d
AT
3086/*
3087 * use blocks preallocated to inode
3088 */
3089static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3090 struct ext4_prealloc_space *pa)
3091{
53accfa9 3092 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
c9de560d
AT
3093 ext4_fsblk_t start;
3094 ext4_fsblk_t end;
3095 int len;
3096
3097 /* found preallocated blocks, use them */
3098 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
53accfa9
TT
3099 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3100 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3101 len = EXT4_NUM_B2C(sbi, end - start);
c9de560d
AT
3102 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3103 &ac->ac_b_ex.fe_start);
3104 ac->ac_b_ex.fe_len = len;
3105 ac->ac_status = AC_STATUS_FOUND;
3106 ac->ac_pa = pa;
3107
3108 BUG_ON(start < pa->pa_pstart);
53accfa9 3109 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
c9de560d
AT
3110 BUG_ON(pa->pa_free < len);
3111 pa->pa_free -= len;
3112
6ba495e9 3113 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
c9de560d
AT
3114}
3115
3116/*
3117 * use blocks preallocated to locality group
3118 */
3119static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3120 struct ext4_prealloc_space *pa)
3121{
03cddb80 3122 unsigned int len = ac->ac_o_ex.fe_len;
6be2ded1 3123
c9de560d
AT
3124 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3125 &ac->ac_b_ex.fe_group,
3126 &ac->ac_b_ex.fe_start);
3127 ac->ac_b_ex.fe_len = len;
3128 ac->ac_status = AC_STATUS_FOUND;
3129 ac->ac_pa = pa;
3130
3131 /* we don't correct pa_pstart or pa_plen here to avoid
26346ff6 3132 * possible race when the group is being loaded concurrently
c9de560d 3133 * instead we correct pa later, after blocks are marked
26346ff6
AK
3134 * in on-disk bitmap -- see ext4_mb_release_context()
3135 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d 3136 */
6ba495e9 3137 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
c9de560d
AT
3138}
3139
5e745b04
AK
3140/*
3141 * Return the prealloc space that have minimal distance
3142 * from the goal block. @cpa is the prealloc
3143 * space that is having currently known minimal distance
3144 * from the goal block.
3145 */
3146static struct ext4_prealloc_space *
3147ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3148 struct ext4_prealloc_space *pa,
3149 struct ext4_prealloc_space *cpa)
3150{
3151 ext4_fsblk_t cur_distance, new_distance;
3152
3153 if (cpa == NULL) {
3154 atomic_inc(&pa->pa_count);
3155 return pa;
3156 }
3157 cur_distance = abs(goal_block - cpa->pa_pstart);
3158 new_distance = abs(goal_block - pa->pa_pstart);
3159
5a54b2f1 3160 if (cur_distance <= new_distance)
5e745b04
AK
3161 return cpa;
3162
3163 /* drop the previous reference */
3164 atomic_dec(&cpa->pa_count);
3165 atomic_inc(&pa->pa_count);
3166 return pa;
3167}
3168
c9de560d
AT
3169/*
3170 * search goal blocks in preallocated space
3171 */
4ddfef7b
ES
3172static noinline_for_stack int
3173ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
c9de560d 3174{
53accfa9 3175 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
6be2ded1 3176 int order, i;
c9de560d
AT
3177 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3178 struct ext4_locality_group *lg;
5e745b04
AK
3179 struct ext4_prealloc_space *pa, *cpa = NULL;
3180 ext4_fsblk_t goal_block;
c9de560d
AT
3181
3182 /* only data can be preallocated */
3183 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3184 return 0;
3185
3186 /* first, try per-file preallocation */
3187 rcu_read_lock();
9a0762c5 3188 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d
AT
3189
3190 /* all fields in this condition don't change,
3191 * so we can skip locking for them */
3192 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
53accfa9
TT
3193 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3194 EXT4_C2B(sbi, pa->pa_len)))
c9de560d
AT
3195 continue;
3196
fb0a387d 3197 /* non-extent files can't have physical blocks past 2^32 */
12e9b892 3198 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
53accfa9
TT
3199 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3200 EXT4_MAX_BLOCK_FILE_PHYS))
fb0a387d
ES
3201 continue;
3202
c9de560d
AT
3203 /* found preallocated blocks, use them */
3204 spin_lock(&pa->pa_lock);
3205 if (pa->pa_deleted == 0 && pa->pa_free) {
3206 atomic_inc(&pa->pa_count);
3207 ext4_mb_use_inode_pa(ac, pa);
3208 spin_unlock(&pa->pa_lock);
3209 ac->ac_criteria = 10;
3210 rcu_read_unlock();
3211 return 1;
3212 }
3213 spin_unlock(&pa->pa_lock);
3214 }
3215 rcu_read_unlock();
3216
3217 /* can we use group allocation? */
3218 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3219 return 0;
3220
3221 /* inode may have no locality group for some reason */
3222 lg = ac->ac_lg;
3223 if (lg == NULL)
3224 return 0;
6be2ded1
AK
3225 order = fls(ac->ac_o_ex.fe_len) - 1;
3226 if (order > PREALLOC_TB_SIZE - 1)
3227 /* The max size of hash table is PREALLOC_TB_SIZE */
3228 order = PREALLOC_TB_SIZE - 1;
3229
bda00de7 3230 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
5e745b04
AK
3231 /*
3232 * search for the prealloc space that is having
3233 * minimal distance from the goal block.
3234 */
6be2ded1
AK
3235 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3236 rcu_read_lock();
3237 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3238 pa_inode_list) {
3239 spin_lock(&pa->pa_lock);
3240 if (pa->pa_deleted == 0 &&
3241 pa->pa_free >= ac->ac_o_ex.fe_len) {
5e745b04
AK
3242
3243 cpa = ext4_mb_check_group_pa(goal_block,
3244 pa, cpa);
6be2ded1 3245 }
c9de560d 3246 spin_unlock(&pa->pa_lock);
c9de560d 3247 }
6be2ded1 3248 rcu_read_unlock();
c9de560d 3249 }
5e745b04
AK
3250 if (cpa) {
3251 ext4_mb_use_group_pa(ac, cpa);
3252 ac->ac_criteria = 20;
3253 return 1;
3254 }
c9de560d
AT
3255 return 0;
3256}
3257
7a2fcbf7
AK
3258/*
3259 * the function goes through all block freed in the group
3260 * but not yet committed and marks them used in in-core bitmap.
3261 * buddy must be generated from this bitmap
955ce5f5 3262 * Need to be called with the ext4 group lock held
7a2fcbf7
AK
3263 */
3264static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3265 ext4_group_t group)
3266{
3267 struct rb_node *n;
3268 struct ext4_group_info *grp;
3269 struct ext4_free_data *entry;
3270
3271 grp = ext4_get_group_info(sb, group);
3272 n = rb_first(&(grp->bb_free_root));
3273
3274 while (n) {
3275 entry = rb_entry(n, struct ext4_free_data, node);
84130193 3276 ext4_set_bits(bitmap, entry->start_cluster, entry->count);
7a2fcbf7
AK
3277 n = rb_next(n);
3278 }
3279 return;
3280}
3281
c9de560d
AT
3282/*
3283 * the function goes through all preallocation in this group and marks them
3284 * used in in-core bitmap. buddy must be generated from this bitmap
955ce5f5 3285 * Need to be called with ext4 group lock held
c9de560d 3286 */
089ceecc
ES
3287static noinline_for_stack
3288void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
c9de560d
AT
3289 ext4_group_t group)
3290{
3291 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3292 struct ext4_prealloc_space *pa;
3293 struct list_head *cur;
3294 ext4_group_t groupnr;
3295 ext4_grpblk_t start;
3296 int preallocated = 0;
c9de560d
AT
3297 int len;
3298
3299 /* all form of preallocation discards first load group,
3300 * so the only competing code is preallocation use.
3301 * we don't need any locking here
3302 * notice we do NOT ignore preallocations with pa_deleted
3303 * otherwise we could leave used blocks available for
3304 * allocation in buddy when concurrent ext4_mb_put_pa()
3305 * is dropping preallocation
3306 */
3307 list_for_each(cur, &grp->bb_prealloc_list) {
3308 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3309 spin_lock(&pa->pa_lock);
3310 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3311 &groupnr, &start);
3312 len = pa->pa_len;
3313 spin_unlock(&pa->pa_lock);
3314 if (unlikely(len == 0))
3315 continue;
3316 BUG_ON(groupnr != group);
c3e94d1d 3317 ext4_set_bits(bitmap, start, len);
c9de560d 3318 preallocated += len;
c9de560d 3319 }
6ba495e9 3320 mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
c9de560d
AT
3321}
3322
3323static void ext4_mb_pa_callback(struct rcu_head *head)
3324{
3325 struct ext4_prealloc_space *pa;
3326 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3327 kmem_cache_free(ext4_pspace_cachep, pa);
3328}
3329
3330/*
3331 * drops a reference to preallocated space descriptor
3332 * if this was the last reference and the space is consumed
3333 */
3334static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3335 struct super_block *sb, struct ext4_prealloc_space *pa)
3336{
a9df9a49 3337 ext4_group_t grp;
d33a1976 3338 ext4_fsblk_t grp_blk;
c9de560d
AT
3339
3340 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3341 return;
3342
3343 /* in this short window concurrent discard can set pa_deleted */
3344 spin_lock(&pa->pa_lock);
3345 if (pa->pa_deleted == 1) {
3346 spin_unlock(&pa->pa_lock);
3347 return;
3348 }
3349
3350 pa->pa_deleted = 1;
3351 spin_unlock(&pa->pa_lock);
3352
d33a1976 3353 grp_blk = pa->pa_pstart;
60e6679e 3354 /*
cc0fb9ad
AK
3355 * If doing group-based preallocation, pa_pstart may be in the
3356 * next group when pa is used up
3357 */
3358 if (pa->pa_type == MB_GROUP_PA)
d33a1976
ES
3359 grp_blk--;
3360
3361 ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
c9de560d
AT
3362
3363 /*
3364 * possible race:
3365 *
3366 * P1 (buddy init) P2 (regular allocation)
3367 * find block B in PA
3368 * copy on-disk bitmap to buddy
3369 * mark B in on-disk bitmap
3370 * drop PA from group
3371 * mark all PAs in buddy
3372 *
3373 * thus, P1 initializes buddy with B available. to prevent this
3374 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3375 * against that pair
3376 */
3377 ext4_lock_group(sb, grp);
3378 list_del(&pa->pa_group_list);
3379 ext4_unlock_group(sb, grp);
3380
3381 spin_lock(pa->pa_obj_lock);
3382 list_del_rcu(&pa->pa_inode_list);
3383 spin_unlock(pa->pa_obj_lock);
3384
3385 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3386}
3387
3388/*
3389 * creates new preallocated space for given inode
3390 */
4ddfef7b
ES
3391static noinline_for_stack int
3392ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3393{
3394 struct super_block *sb = ac->ac_sb;
53accfa9 3395 struct ext4_sb_info *sbi = EXT4_SB(sb);
c9de560d
AT
3396 struct ext4_prealloc_space *pa;
3397 struct ext4_group_info *grp;
3398 struct ext4_inode_info *ei;
3399
3400 /* preallocate only when found space is larger then requested */
3401 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3402 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3403 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3404
3405 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3406 if (pa == NULL)
3407 return -ENOMEM;
3408
3409 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3410 int winl;
3411 int wins;
3412 int win;
3413 int offs;
3414
3415 /* we can't allocate as much as normalizer wants.
3416 * so, found space must get proper lstart
3417 * to cover original request */
3418 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3419 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3420
3421 /* we're limited by original request in that
3422 * logical block must be covered any way
3423 * winl is window we can move our chunk within */
3424 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3425
3426 /* also, we should cover whole original request */
53accfa9 3427 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
c9de560d
AT
3428
3429 /* the smallest one defines real window */
3430 win = min(winl, wins);
3431
53accfa9
TT
3432 offs = ac->ac_o_ex.fe_logical %
3433 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
c9de560d
AT
3434 if (offs && offs < win)
3435 win = offs;
3436
53accfa9
TT
3437 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3438 EXT4_B2C(sbi, win);
c9de560d
AT
3439 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3440 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3441 }
3442
3443 /* preallocation can change ac_b_ex, thus we store actually
3444 * allocated blocks for history */
3445 ac->ac_f_ex = ac->ac_b_ex;
3446
3447 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3448 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3449 pa->pa_len = ac->ac_b_ex.fe_len;
3450 pa->pa_free = pa->pa_len;
3451 atomic_set(&pa->pa_count, 1);
3452 spin_lock_init(&pa->pa_lock);
d794bf8e
AK
3453 INIT_LIST_HEAD(&pa->pa_inode_list);
3454 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3455 pa->pa_deleted = 0;
cc0fb9ad 3456 pa->pa_type = MB_INODE_PA;
c9de560d 3457
6ba495e9 3458 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
c9de560d 3459 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
9bffad1e 3460 trace_ext4_mb_new_inode_pa(ac, pa);
c9de560d
AT
3461
3462 ext4_mb_use_inode_pa(ac, pa);
53accfa9 3463 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
c9de560d
AT
3464
3465 ei = EXT4_I(ac->ac_inode);
3466 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3467
3468 pa->pa_obj_lock = &ei->i_prealloc_lock;
3469 pa->pa_inode = ac->ac_inode;
3470
3471 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3472 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3473 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3474
3475 spin_lock(pa->pa_obj_lock);
3476 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3477 spin_unlock(pa->pa_obj_lock);
3478
3479 return 0;
3480}
3481
3482/*
3483 * creates new preallocated space for locality group inodes belongs to
3484 */
4ddfef7b
ES
3485static noinline_for_stack int
3486ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3487{
3488 struct super_block *sb = ac->ac_sb;
3489 struct ext4_locality_group *lg;
3490 struct ext4_prealloc_space *pa;
3491 struct ext4_group_info *grp;
3492
3493 /* preallocate only when found space is larger then requested */
3494 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3495 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3496 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3497
3498 BUG_ON(ext4_pspace_cachep == NULL);
3499 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3500 if (pa == NULL)
3501 return -ENOMEM;
3502
3503 /* preallocation can change ac_b_ex, thus we store actually
3504 * allocated blocks for history */
3505 ac->ac_f_ex = ac->ac_b_ex;
3506
3507 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3508 pa->pa_lstart = pa->pa_pstart;
3509 pa->pa_len = ac->ac_b_ex.fe_len;
3510 pa->pa_free = pa->pa_len;
3511 atomic_set(&pa->pa_count, 1);
3512 spin_lock_init(&pa->pa_lock);
6be2ded1 3513 INIT_LIST_HEAD(&pa->pa_inode_list);
d794bf8e 3514 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3515 pa->pa_deleted = 0;
cc0fb9ad 3516 pa->pa_type = MB_GROUP_PA;
c9de560d 3517
6ba495e9 3518 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
9bffad1e
TT
3519 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3520 trace_ext4_mb_new_group_pa(ac, pa);
c9de560d
AT
3521
3522 ext4_mb_use_group_pa(ac, pa);
3523 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3524
3525 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3526 lg = ac->ac_lg;
3527 BUG_ON(lg == NULL);
3528
3529 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3530 pa->pa_inode = NULL;
3531
3532 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3533 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3534 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3535
6be2ded1
AK
3536 /*
3537 * We will later add the new pa to the right bucket
3538 * after updating the pa_free in ext4_mb_release_context
3539 */
c9de560d
AT
3540 return 0;
3541}
3542
3543static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3544{
3545 int err;
3546
3547 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3548 err = ext4_mb_new_group_pa(ac);
3549 else
3550 err = ext4_mb_new_inode_pa(ac);
3551 return err;
3552}
3553
3554/*
3555 * finds all unused blocks in on-disk bitmap, frees them in
3556 * in-core bitmap and buddy.
3557 * @pa must be unlinked from inode and group lists, so that
3558 * nobody else can find/use it.
3559 * the caller MUST hold group/inode locks.
3560 * TODO: optimize the case when there are no in-core structures yet
3561 */
4ddfef7b
ES
3562static noinline_for_stack int
3563ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3e1e5f50 3564 struct ext4_prealloc_space *pa)
c9de560d 3565{
c9de560d
AT
3566 struct super_block *sb = e4b->bd_sb;
3567 struct ext4_sb_info *sbi = EXT4_SB(sb);
498e5f24
TT
3568 unsigned int end;
3569 unsigned int next;
c9de560d
AT
3570 ext4_group_t group;
3571 ext4_grpblk_t bit;
ba80b101 3572 unsigned long long grp_blk_start;
c9de560d
AT
3573 int err = 0;
3574 int free = 0;
3575
3576 BUG_ON(pa->pa_deleted == 0);
3577 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
53accfa9 3578 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
c9de560d
AT
3579 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3580 end = bit + pa->pa_len;
3581
c9de560d 3582 while (bit < end) {
ffad0a44 3583 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3584 if (bit >= end)
3585 break;
ffad0a44 3586 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
6ba495e9 3587 mb_debug(1, " free preallocated %u/%u in group %u\n",
5a0790c2
AK
3588 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3589 (unsigned) next - bit, (unsigned) group);
c9de560d
AT
3590 free += next - bit;
3591
3e1e5f50 3592 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
53accfa9
TT
3593 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3594 EXT4_C2B(sbi, bit)),
a9c667f8 3595 next - bit);
c9de560d
AT
3596 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3597 bit = next + 1;
3598 }
3599 if (free != pa->pa_free) {
9d8b9ec4
TT
3600 ext4_msg(e4b->bd_sb, KERN_CRIT,
3601 "pa %p: logic %lu, phys. %lu, len %lu",
3602 pa, (unsigned long) pa->pa_lstart,
3603 (unsigned long) pa->pa_pstart,
3604 (unsigned long) pa->pa_len);
e29136f8 3605 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5d1b1b3f 3606 free, pa->pa_free);
e56eb659
AK
3607 /*
3608 * pa is already deleted so we use the value obtained
3609 * from the bitmap and continue.
3610 */
c9de560d 3611 }
c9de560d
AT
3612 atomic_add(free, &sbi->s_mb_discarded);
3613
3614 return err;
3615}
3616
4ddfef7b
ES
3617static noinline_for_stack int
3618ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3e1e5f50 3619 struct ext4_prealloc_space *pa)
c9de560d 3620{
c9de560d
AT
3621 struct super_block *sb = e4b->bd_sb;
3622 ext4_group_t group;
3623 ext4_grpblk_t bit;
3624
60e07cf5 3625 trace_ext4_mb_release_group_pa(sb, pa);
c9de560d
AT
3626 BUG_ON(pa->pa_deleted == 0);
3627 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3628 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3629 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3630 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3e1e5f50 3631 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
c9de560d
AT
3632
3633 return 0;
3634}
3635
3636/*
3637 * releases all preallocations in given group
3638 *
3639 * first, we need to decide discard policy:
3640 * - when do we discard
3641 * 1) ENOSPC
3642 * - how many do we discard
3643 * 1) how many requested
3644 */
4ddfef7b
ES
3645static noinline_for_stack int
3646ext4_mb_discard_group_preallocations(struct super_block *sb,
c9de560d
AT
3647 ext4_group_t group, int needed)
3648{
3649 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3650 struct buffer_head *bitmap_bh = NULL;
3651 struct ext4_prealloc_space *pa, *tmp;
3652 struct list_head list;
3653 struct ext4_buddy e4b;
3654 int err;
3655 int busy = 0;
3656 int free = 0;
3657
6ba495e9 3658 mb_debug(1, "discard preallocation for group %u\n", group);
c9de560d
AT
3659
3660 if (list_empty(&grp->bb_prealloc_list))
3661 return 0;
3662
574ca174 3663 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3664 if (bitmap_bh == NULL) {
12062ddd 3665 ext4_error(sb, "Error reading block bitmap for %u", group);
ce89f46c 3666 return 0;
c9de560d
AT
3667 }
3668
3669 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c 3670 if (err) {
12062ddd 3671 ext4_error(sb, "Error loading buddy information for %u", group);
ce89f46c
AK
3672 put_bh(bitmap_bh);
3673 return 0;
3674 }
c9de560d
AT
3675
3676 if (needed == 0)
7137d7a4 3677 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
c9de560d 3678
c9de560d 3679 INIT_LIST_HEAD(&list);
c9de560d
AT
3680repeat:
3681 ext4_lock_group(sb, group);
3682 list_for_each_entry_safe(pa, tmp,
3683 &grp->bb_prealloc_list, pa_group_list) {
3684 spin_lock(&pa->pa_lock);
3685 if (atomic_read(&pa->pa_count)) {
3686 spin_unlock(&pa->pa_lock);
3687 busy = 1;
3688 continue;
3689 }
3690 if (pa->pa_deleted) {
3691 spin_unlock(&pa->pa_lock);
3692 continue;
3693 }
3694
3695 /* seems this one can be freed ... */
3696 pa->pa_deleted = 1;
3697
3698 /* we can trust pa_free ... */
3699 free += pa->pa_free;
3700
3701 spin_unlock(&pa->pa_lock);
3702
3703 list_del(&pa->pa_group_list);
3704 list_add(&pa->u.pa_tmp_list, &list);
3705 }
3706
3707 /* if we still need more blocks and some PAs were used, try again */
3708 if (free < needed && busy) {
3709 busy = 0;
3710 ext4_unlock_group(sb, group);
3711 /*
3712 * Yield the CPU here so that we don't get soft lockup
3713 * in non preempt case.
3714 */
3715 yield();
3716 goto repeat;
3717 }
3718
3719 /* found anything to free? */
3720 if (list_empty(&list)) {
3721 BUG_ON(free != 0);
3722 goto out;
3723 }
3724
3725 /* now free all selected PAs */
3726 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3727
3728 /* remove from object (inode or locality group) */
3729 spin_lock(pa->pa_obj_lock);
3730 list_del_rcu(&pa->pa_inode_list);
3731 spin_unlock(pa->pa_obj_lock);
3732
cc0fb9ad 3733 if (pa->pa_type == MB_GROUP_PA)
3e1e5f50 3734 ext4_mb_release_group_pa(&e4b, pa);
c9de560d 3735 else
3e1e5f50 3736 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
c9de560d
AT
3737
3738 list_del(&pa->u.pa_tmp_list);
3739 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3740 }
3741
3742out:
3743 ext4_unlock_group(sb, group);
e39e07fd 3744 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
3745 put_bh(bitmap_bh);
3746 return free;
3747}
3748
3749/*
3750 * releases all non-used preallocated blocks for given inode
3751 *
3752 * It's important to discard preallocations under i_data_sem
3753 * We don't want another block to be served from the prealloc
3754 * space when we are discarding the inode prealloc space.
3755 *
3756 * FIXME!! Make sure it is valid at all the call sites
3757 */
c2ea3fde 3758void ext4_discard_preallocations(struct inode *inode)
c9de560d
AT
3759{
3760 struct ext4_inode_info *ei = EXT4_I(inode);
3761 struct super_block *sb = inode->i_sb;
3762 struct buffer_head *bitmap_bh = NULL;
3763 struct ext4_prealloc_space *pa, *tmp;
3764 ext4_group_t group = 0;
3765 struct list_head list;
3766 struct ext4_buddy e4b;
3767 int err;
3768
c2ea3fde 3769 if (!S_ISREG(inode->i_mode)) {
c9de560d
AT
3770 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3771 return;
3772 }
3773
6ba495e9 3774 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
9bffad1e 3775 trace_ext4_discard_preallocations(inode);
c9de560d
AT
3776
3777 INIT_LIST_HEAD(&list);
3778
3779repeat:
3780 /* first, collect all pa's in the inode */
3781 spin_lock(&ei->i_prealloc_lock);
3782 while (!list_empty(&ei->i_prealloc_list)) {
3783 pa = list_entry(ei->i_prealloc_list.next,
3784 struct ext4_prealloc_space, pa_inode_list);
3785 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3786 spin_lock(&pa->pa_lock);
3787 if (atomic_read(&pa->pa_count)) {
3788 /* this shouldn't happen often - nobody should
3789 * use preallocation while we're discarding it */
3790 spin_unlock(&pa->pa_lock);
3791 spin_unlock(&ei->i_prealloc_lock);
9d8b9ec4
TT
3792 ext4_msg(sb, KERN_ERR,
3793 "uh-oh! used pa while discarding");
c9de560d
AT
3794 WARN_ON(1);
3795 schedule_timeout_uninterruptible(HZ);
3796 goto repeat;
3797
3798 }
3799 if (pa->pa_deleted == 0) {
3800 pa->pa_deleted = 1;
3801 spin_unlock(&pa->pa_lock);
3802 list_del_rcu(&pa->pa_inode_list);
3803 list_add(&pa->u.pa_tmp_list, &list);
3804 continue;
3805 }
3806
3807 /* someone is deleting pa right now */
3808 spin_unlock(&pa->pa_lock);
3809 spin_unlock(&ei->i_prealloc_lock);
3810
3811 /* we have to wait here because pa_deleted
3812 * doesn't mean pa is already unlinked from
3813 * the list. as we might be called from
3814 * ->clear_inode() the inode will get freed
3815 * and concurrent thread which is unlinking
3816 * pa from inode's list may access already
3817 * freed memory, bad-bad-bad */
3818
3819 /* XXX: if this happens too often, we can
3820 * add a flag to force wait only in case
3821 * of ->clear_inode(), but not in case of
3822 * regular truncate */
3823 schedule_timeout_uninterruptible(HZ);
3824 goto repeat;
3825 }
3826 spin_unlock(&ei->i_prealloc_lock);
3827
3828 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
cc0fb9ad 3829 BUG_ON(pa->pa_type != MB_INODE_PA);
c9de560d
AT
3830 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3831
3832 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c 3833 if (err) {
12062ddd
ES
3834 ext4_error(sb, "Error loading buddy information for %u",
3835 group);
ce89f46c
AK
3836 continue;
3837 }
c9de560d 3838
574ca174 3839 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3840 if (bitmap_bh == NULL) {
12062ddd
ES
3841 ext4_error(sb, "Error reading block bitmap for %u",
3842 group);
e39e07fd 3843 ext4_mb_unload_buddy(&e4b);
ce89f46c 3844 continue;
c9de560d
AT
3845 }
3846
3847 ext4_lock_group(sb, group);
3848 list_del(&pa->pa_group_list);
3e1e5f50 3849 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
c9de560d
AT
3850 ext4_unlock_group(sb, group);
3851
e39e07fd 3852 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
3853 put_bh(bitmap_bh);
3854
3855 list_del(&pa->u.pa_tmp_list);
3856 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3857 }
3858}
3859
6ba495e9 3860#ifdef CONFIG_EXT4_DEBUG
c9de560d
AT
3861static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3862{
3863 struct super_block *sb = ac->ac_sb;
8df9675f 3864 ext4_group_t ngroups, i;
c9de560d 3865
4dd89fc6
TT
3866 if (!mb_enable_debug ||
3867 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
e3570639
ES
3868 return;
3869
9d8b9ec4
TT
3870 ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: Can't allocate:"
3871 " Allocation context details:");
3872 ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: status %d flags %d",
c9de560d 3873 ac->ac_status, ac->ac_flags);
9d8b9ec4
TT
3874 ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: orig %lu/%lu/%lu@%lu, "
3875 "goal %lu/%lu/%lu@%lu, "
3876 "best %lu/%lu/%lu@%lu cr %d",
c9de560d
AT
3877 (unsigned long)ac->ac_o_ex.fe_group,
3878 (unsigned long)ac->ac_o_ex.fe_start,
3879 (unsigned long)ac->ac_o_ex.fe_len,
3880 (unsigned long)ac->ac_o_ex.fe_logical,
3881 (unsigned long)ac->ac_g_ex.fe_group,
3882 (unsigned long)ac->ac_g_ex.fe_start,
3883 (unsigned long)ac->ac_g_ex.fe_len,
3884 (unsigned long)ac->ac_g_ex.fe_logical,
3885 (unsigned long)ac->ac_b_ex.fe_group,
3886 (unsigned long)ac->ac_b_ex.fe_start,
3887 (unsigned long)ac->ac_b_ex.fe_len,
3888 (unsigned long)ac->ac_b_ex.fe_logical,
3889 (int)ac->ac_criteria);
9d8b9ec4
TT
3890 ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: %lu scanned, %d found",
3891 ac->ac_ex_scanned, ac->ac_found);
3892 ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: groups: ");
8df9675f
TT
3893 ngroups = ext4_get_groups_count(sb);
3894 for (i = 0; i < ngroups; i++) {
c9de560d
AT
3895 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3896 struct ext4_prealloc_space *pa;
3897 ext4_grpblk_t start;
3898 struct list_head *cur;
3899 ext4_lock_group(sb, i);
3900 list_for_each(cur, &grp->bb_prealloc_list) {
3901 pa = list_entry(cur, struct ext4_prealloc_space,
3902 pa_group_list);
3903 spin_lock(&pa->pa_lock);
3904 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3905 NULL, &start);
3906 spin_unlock(&pa->pa_lock);
1c718505
AF
3907 printk(KERN_ERR "PA:%u:%d:%u \n", i,
3908 start, pa->pa_len);
c9de560d 3909 }
60bd63d1 3910 ext4_unlock_group(sb, i);
c9de560d
AT
3911
3912 if (grp->bb_free == 0)
3913 continue;
1c718505 3914 printk(KERN_ERR "%u: %d/%d \n",
c9de560d
AT
3915 i, grp->bb_free, grp->bb_fragments);
3916 }
3917 printk(KERN_ERR "\n");
3918}
3919#else
3920static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3921{
3922 return;
3923}
3924#endif
3925
3926/*
3927 * We use locality group preallocation for small size file. The size of the
3928 * file is determined by the current size or the resulting size after
3929 * allocation which ever is larger
3930 *
b713a5ec 3931 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
c9de560d
AT
3932 */
3933static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3934{
3935 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3936 int bsbits = ac->ac_sb->s_blocksize_bits;
3937 loff_t size, isize;
3938
3939 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3940 return;
3941
4ba74d00
TT
3942 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3943 return;
3944
53accfa9 3945 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
50797481
TT
3946 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3947 >> bsbits;
c9de560d 3948
50797481
TT
3949 if ((size == isize) &&
3950 !ext4_fs_is_busy(sbi) &&
3951 (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3952 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3953 return;
3954 }
3955
ebbe0277
RD
3956 if (sbi->s_mb_group_prealloc <= 0) {
3957 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3958 return;
3959 }
3960
c9de560d 3961 /* don't use group allocation for large files */
71780577 3962 size = max(size, isize);
cc483f10 3963 if (size > sbi->s_mb_stream_request) {
4ba74d00 3964 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
c9de560d 3965 return;
4ba74d00 3966 }
c9de560d
AT
3967
3968 BUG_ON(ac->ac_lg != NULL);
3969 /*
3970 * locality group prealloc space are per cpu. The reason for having
3971 * per cpu locality group is to reduce the contention between block
3972 * request from multiple CPUs.
3973 */
ca0c9584 3974 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
c9de560d
AT
3975
3976 /* we're going to use group allocation */
3977 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3978
3979 /* serialize all allocations in the group */
3980 mutex_lock(&ac->ac_lg->lg_mutex);
3981}
3982
4ddfef7b
ES
3983static noinline_for_stack int
3984ext4_mb_initialize_context(struct ext4_allocation_context *ac,
c9de560d
AT
3985 struct ext4_allocation_request *ar)
3986{
3987 struct super_block *sb = ar->inode->i_sb;
3988 struct ext4_sb_info *sbi = EXT4_SB(sb);
3989 struct ext4_super_block *es = sbi->s_es;
3990 ext4_group_t group;
498e5f24
TT
3991 unsigned int len;
3992 ext4_fsblk_t goal;
c9de560d
AT
3993 ext4_grpblk_t block;
3994
3995 /* we can't allocate > group size */
3996 len = ar->len;
3997
3998 /* just a dirty hack to filter too big requests */
7137d7a4
TT
3999 if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
4000 len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
c9de560d
AT
4001
4002 /* start searching from the goal */
4003 goal = ar->goal;
4004 if (goal < le32_to_cpu(es->s_first_data_block) ||
4005 goal >= ext4_blocks_count(es))
4006 goal = le32_to_cpu(es->s_first_data_block);
4007 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4008
4009 /* set up allocation goals */
833576b3 4010 memset(ac, 0, sizeof(struct ext4_allocation_context));
53accfa9 4011 ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
c9de560d 4012 ac->ac_status = AC_STATUS_CONTINUE;
c9de560d
AT
4013 ac->ac_sb = sb;
4014 ac->ac_inode = ar->inode;
53accfa9 4015 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
c9de560d
AT
4016 ac->ac_o_ex.fe_group = group;
4017 ac->ac_o_ex.fe_start = block;
4018 ac->ac_o_ex.fe_len = len;
53accfa9 4019 ac->ac_g_ex = ac->ac_o_ex;
c9de560d 4020 ac->ac_flags = ar->flags;
c9de560d
AT
4021
4022 /* we have to define context: we'll we work with a file or
4023 * locality group. this is a policy, actually */
4024 ext4_mb_group_or_file(ac);
4025
6ba495e9 4026 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
c9de560d
AT
4027 "left: %u/%u, right %u/%u to %swritable\n",
4028 (unsigned) ar->len, (unsigned) ar->logical,
4029 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4030 (unsigned) ar->lleft, (unsigned) ar->pleft,
4031 (unsigned) ar->lright, (unsigned) ar->pright,
4032 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4033 return 0;
4034
4035}
4036
6be2ded1
AK
4037static noinline_for_stack void
4038ext4_mb_discard_lg_preallocations(struct super_block *sb,
4039 struct ext4_locality_group *lg,
4040 int order, int total_entries)
4041{
4042 ext4_group_t group = 0;
4043 struct ext4_buddy e4b;
4044 struct list_head discard_list;
4045 struct ext4_prealloc_space *pa, *tmp;
6be2ded1 4046
6ba495e9 4047 mb_debug(1, "discard locality group preallocation\n");
6be2ded1
AK
4048
4049 INIT_LIST_HEAD(&discard_list);
6be2ded1
AK
4050
4051 spin_lock(&lg->lg_prealloc_lock);
4052 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4053 pa_inode_list) {
4054 spin_lock(&pa->pa_lock);
4055 if (atomic_read(&pa->pa_count)) {
4056 /*
4057 * This is the pa that we just used
4058 * for block allocation. So don't
4059 * free that
4060 */
4061 spin_unlock(&pa->pa_lock);
4062 continue;
4063 }
4064 if (pa->pa_deleted) {
4065 spin_unlock(&pa->pa_lock);
4066 continue;
4067 }
4068 /* only lg prealloc space */
cc0fb9ad 4069 BUG_ON(pa->pa_type != MB_GROUP_PA);
6be2ded1
AK
4070
4071 /* seems this one can be freed ... */
4072 pa->pa_deleted = 1;
4073 spin_unlock(&pa->pa_lock);
4074
4075 list_del_rcu(&pa->pa_inode_list);
4076 list_add(&pa->u.pa_tmp_list, &discard_list);
4077
4078 total_entries--;
4079 if (total_entries <= 5) {
4080 /*
4081 * we want to keep only 5 entries
4082 * allowing it to grow to 8. This
4083 * mak sure we don't call discard
4084 * soon for this list.
4085 */
4086 break;
4087 }
4088 }
4089 spin_unlock(&lg->lg_prealloc_lock);
4090
4091 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4092
4093 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4094 if (ext4_mb_load_buddy(sb, group, &e4b)) {
12062ddd
ES
4095 ext4_error(sb, "Error loading buddy information for %u",
4096 group);
6be2ded1
AK
4097 continue;
4098 }
4099 ext4_lock_group(sb, group);
4100 list_del(&pa->pa_group_list);
3e1e5f50 4101 ext4_mb_release_group_pa(&e4b, pa);
6be2ded1
AK
4102 ext4_unlock_group(sb, group);
4103
e39e07fd 4104 ext4_mb_unload_buddy(&e4b);
6be2ded1
AK
4105 list_del(&pa->u.pa_tmp_list);
4106 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4107 }
6be2ded1
AK
4108}
4109
4110/*
4111 * We have incremented pa_count. So it cannot be freed at this
4112 * point. Also we hold lg_mutex. So no parallel allocation is
4113 * possible from this lg. That means pa_free cannot be updated.
4114 *
4115 * A parallel ext4_mb_discard_group_preallocations is possible.
4116 * which can cause the lg_prealloc_list to be updated.
4117 */
4118
4119static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4120{
4121 int order, added = 0, lg_prealloc_count = 1;
4122 struct super_block *sb = ac->ac_sb;
4123 struct ext4_locality_group *lg = ac->ac_lg;
4124 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4125
4126 order = fls(pa->pa_free) - 1;
4127 if (order > PREALLOC_TB_SIZE - 1)
4128 /* The max size of hash table is PREALLOC_TB_SIZE */
4129 order = PREALLOC_TB_SIZE - 1;
4130 /* Add the prealloc space to lg */
4131 rcu_read_lock();
4132 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4133 pa_inode_list) {
4134 spin_lock(&tmp_pa->pa_lock);
4135 if (tmp_pa->pa_deleted) {
e7c9e3e9 4136 spin_unlock(&tmp_pa->pa_lock);
6be2ded1
AK
4137 continue;
4138 }
4139 if (!added && pa->pa_free < tmp_pa->pa_free) {
4140 /* Add to the tail of the previous entry */
4141 list_add_tail_rcu(&pa->pa_inode_list,
4142 &tmp_pa->pa_inode_list);
4143 added = 1;
4144 /*
4145 * we want to count the total
4146 * number of entries in the list
4147 */
4148 }
4149 spin_unlock(&tmp_pa->pa_lock);
4150 lg_prealloc_count++;
4151 }
4152 if (!added)
4153 list_add_tail_rcu(&pa->pa_inode_list,
4154 &lg->lg_prealloc_list[order]);
4155 rcu_read_unlock();
4156
4157 /* Now trim the list to be not more than 8 elements */
4158 if (lg_prealloc_count > 8) {
4159 ext4_mb_discard_lg_preallocations(sb, lg,
4160 order, lg_prealloc_count);
4161 return;
4162 }
4163 return ;
4164}
4165
c9de560d
AT
4166/*
4167 * release all resource we used in allocation
4168 */
4169static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4170{
53accfa9 4171 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
6be2ded1
AK
4172 struct ext4_prealloc_space *pa = ac->ac_pa;
4173 if (pa) {
cc0fb9ad 4174 if (pa->pa_type == MB_GROUP_PA) {
c9de560d 4175 /* see comment in ext4_mb_use_group_pa() */
6be2ded1 4176 spin_lock(&pa->pa_lock);
53accfa9
TT
4177 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4178 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6be2ded1
AK
4179 pa->pa_free -= ac->ac_b_ex.fe_len;
4180 pa->pa_len -= ac->ac_b_ex.fe_len;
4181 spin_unlock(&pa->pa_lock);
c9de560d 4182 }
c9de560d 4183 }
ba443916
AK
4184 if (pa) {
4185 /*
4186 * We want to add the pa to the right bucket.
4187 * Remove it from the list and while adding
4188 * make sure the list to which we are adding
44183d42 4189 * doesn't grow big.
ba443916 4190 */
cc0fb9ad 4191 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
ba443916
AK
4192 spin_lock(pa->pa_obj_lock);
4193 list_del_rcu(&pa->pa_inode_list);
4194 spin_unlock(pa->pa_obj_lock);
4195 ext4_mb_add_n_trim(ac);
4196 }
4197 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4198 }
c9de560d
AT
4199 if (ac->ac_bitmap_page)
4200 page_cache_release(ac->ac_bitmap_page);
4201 if (ac->ac_buddy_page)
4202 page_cache_release(ac->ac_buddy_page);
4203 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4204 mutex_unlock(&ac->ac_lg->lg_mutex);
4205 ext4_mb_collect_stats(ac);
4206 return 0;
4207}
4208
4209static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4210{
8df9675f 4211 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
c9de560d
AT
4212 int ret;
4213 int freed = 0;
4214
9bffad1e 4215 trace_ext4_mb_discard_preallocations(sb, needed);
8df9675f 4216 for (i = 0; i < ngroups && needed > 0; i++) {
c9de560d
AT
4217 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4218 freed += ret;
4219 needed -= ret;
4220 }
4221
4222 return freed;
4223}
4224
4225/*
4226 * Main entry point into mballoc to allocate blocks
4227 * it tries to use preallocation first, then falls back
4228 * to usual allocation
4229 */
4230ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6c7a120a 4231 struct ext4_allocation_request *ar, int *errp)
c9de560d 4232{
6bc6e63f 4233 int freed;
256bdb49 4234 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4235 struct ext4_sb_info *sbi;
4236 struct super_block *sb;
4237 ext4_fsblk_t block = 0;
60e58e0f 4238 unsigned int inquota = 0;
53accfa9 4239 unsigned int reserv_clstrs = 0;
c9de560d
AT
4240
4241 sb = ar->inode->i_sb;
4242 sbi = EXT4_SB(sb);
4243
9bffad1e 4244 trace_ext4_request_blocks(ar);
ba80b101 4245
45dc63e7
DM
4246 /* Allow to use superuser reservation for quota file */
4247 if (IS_NOQUOTA(ar->inode))
4248 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4249
60e58e0f
MC
4250 /*
4251 * For delayed allocation, we could skip the ENOSPC and
4252 * EDQUOT check, as blocks and quotas have been already
4253 * reserved when data being copied into pagecache.
4254 */
f2321097 4255 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
60e58e0f
MC
4256 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4257 else {
4258 /* Without delayed allocation we need to verify
4259 * there is enough free blocks to do block allocation
4260 * and verify allocation doesn't exceed the quota limits.
d2a17637 4261 */
55f020db 4262 while (ar->len &&
e7d5f315 4263 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
55f020db 4264
030ba6bc
AK
4265 /* let others to free the space */
4266 yield();
4267 ar->len = ar->len >> 1;
4268 }
4269 if (!ar->len) {
a30d542a
AK
4270 *errp = -ENOSPC;
4271 return 0;
4272 }
53accfa9 4273 reserv_clstrs = ar->len;
55f020db 4274 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
53accfa9
TT
4275 dquot_alloc_block_nofail(ar->inode,
4276 EXT4_C2B(sbi, ar->len));
55f020db
AH
4277 } else {
4278 while (ar->len &&
53accfa9
TT
4279 dquot_alloc_block(ar->inode,
4280 EXT4_C2B(sbi, ar->len))) {
55f020db
AH
4281
4282 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4283 ar->len--;
4284 }
60e58e0f
MC
4285 }
4286 inquota = ar->len;
4287 if (ar->len == 0) {
4288 *errp = -EDQUOT;
6c7a120a 4289 goto out;
60e58e0f 4290 }
07031431 4291 }
d2a17637 4292
256bdb49 4293 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
833576b3 4294 if (!ac) {
363d4251 4295 ar->len = 0;
256bdb49 4296 *errp = -ENOMEM;
6c7a120a 4297 goto out;
256bdb49
ES
4298 }
4299
256bdb49 4300 *errp = ext4_mb_initialize_context(ac, ar);
c9de560d
AT
4301 if (*errp) {
4302 ar->len = 0;
6c7a120a 4303 goto out;
c9de560d
AT
4304 }
4305
256bdb49
ES
4306 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4307 if (!ext4_mb_use_preallocated(ac)) {
256bdb49
ES
4308 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4309 ext4_mb_normalize_request(ac, ar);
c9de560d
AT
4310repeat:
4311 /* allocate space in core */
6c7a120a
AK
4312 *errp = ext4_mb_regular_allocator(ac);
4313 if (*errp)
4314 goto errout;
c9de560d
AT
4315
4316 /* as we've just preallocated more space than
4317 * user requested orinally, we store allocated
4318 * space in a special descriptor */
256bdb49
ES
4319 if (ac->ac_status == AC_STATUS_FOUND &&
4320 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4321 ext4_mb_new_preallocation(ac);
c9de560d 4322 }
256bdb49 4323 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
53accfa9 4324 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6c7a120a 4325 if (*errp == -EAGAIN) {
8556e8f3
AK
4326 /*
4327 * drop the reference that we took
4328 * in ext4_mb_use_best_found
4329 */
4330 ext4_mb_release_context(ac);
519deca0
AK
4331 ac->ac_b_ex.fe_group = 0;
4332 ac->ac_b_ex.fe_start = 0;
4333 ac->ac_b_ex.fe_len = 0;
4334 ac->ac_status = AC_STATUS_CONTINUE;
4335 goto repeat;
6c7a120a
AK
4336 } else if (*errp)
4337 errout:
b844167e 4338 ext4_discard_allocated_blocks(ac);
6c7a120a 4339 else {
519deca0
AK
4340 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4341 ar->len = ac->ac_b_ex.fe_len;
4342 }
c9de560d 4343 } else {
256bdb49 4344 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
c9de560d
AT
4345 if (freed)
4346 goto repeat;
4347 *errp = -ENOSPC;
6c7a120a
AK
4348 }
4349
4350 if (*errp) {
256bdb49 4351 ac->ac_b_ex.fe_len = 0;
c9de560d 4352 ar->len = 0;
256bdb49 4353 ext4_mb_show_ac(ac);
c9de560d 4354 }
256bdb49 4355 ext4_mb_release_context(ac);
6c7a120a
AK
4356out:
4357 if (ac)
4358 kmem_cache_free(ext4_ac_cachep, ac);
60e58e0f 4359 if (inquota && ar->len < inquota)
53accfa9 4360 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
0087d9fb 4361 if (!ar->len) {
f2321097
TT
4362 if (!ext4_test_inode_state(ar->inode,
4363 EXT4_STATE_DELALLOC_RESERVED))
0087d9fb 4364 /* release all the reserved blocks if non delalloc */
57042651 4365 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
53accfa9 4366 reserv_clstrs);
0087d9fb 4367 }
c9de560d 4368
9bffad1e 4369 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
ba80b101 4370
c9de560d
AT
4371 return block;
4372}
c9de560d 4373
c894058d
AK
4374/*
4375 * We can merge two free data extents only if the physical blocks
4376 * are contiguous, AND the extents were freed by the same transaction,
4377 * AND the blocks are associated with the same group.
4378 */
4379static int can_merge(struct ext4_free_data *entry1,
4380 struct ext4_free_data *entry2)
4381{
4382 if ((entry1->t_tid == entry2->t_tid) &&
4383 (entry1->group == entry2->group) &&
84130193 4384 ((entry1->start_cluster + entry1->count) == entry2->start_cluster))
c894058d
AK
4385 return 1;
4386 return 0;
4387}
4388
4ddfef7b
ES
4389static noinline_for_stack int
4390ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
7a2fcbf7 4391 struct ext4_free_data *new_entry)
c9de560d 4392{
e29136f8 4393 ext4_group_t group = e4b->bd_group;
84130193 4394 ext4_grpblk_t cluster;
7a2fcbf7 4395 struct ext4_free_data *entry;
c9de560d
AT
4396 struct ext4_group_info *db = e4b->bd_info;
4397 struct super_block *sb = e4b->bd_sb;
4398 struct ext4_sb_info *sbi = EXT4_SB(sb);
c894058d
AK
4399 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4400 struct rb_node *parent = NULL, *new_node;
4401
0390131b 4402 BUG_ON(!ext4_handle_valid(handle));
c9de560d
AT
4403 BUG_ON(e4b->bd_bitmap_page == NULL);
4404 BUG_ON(e4b->bd_buddy_page == NULL);
4405
c894058d 4406 new_node = &new_entry->node;
84130193 4407 cluster = new_entry->start_cluster;
c894058d 4408
c894058d
AK
4409 if (!*n) {
4410 /* first free block exent. We need to
4411 protect buddy cache from being freed,
4412 * otherwise we'll refresh it from
4413 * on-disk bitmap and lose not-yet-available
4414 * blocks */
4415 page_cache_get(e4b->bd_buddy_page);
4416 page_cache_get(e4b->bd_bitmap_page);
4417 }
4418 while (*n) {
4419 parent = *n;
4420 entry = rb_entry(parent, struct ext4_free_data, node);
84130193 4421 if (cluster < entry->start_cluster)
c894058d 4422 n = &(*n)->rb_left;
84130193 4423 else if (cluster >= (entry->start_cluster + entry->count))
c894058d
AK
4424 n = &(*n)->rb_right;
4425 else {
e29136f8 4426 ext4_grp_locked_error(sb, group, 0,
84130193
TT
4427 ext4_group_first_block_no(sb, group) +
4428 EXT4_C2B(sbi, cluster),
e29136f8 4429 "Block already on to-be-freed list");
c894058d 4430 return 0;
c9de560d 4431 }
c894058d 4432 }
c9de560d 4433
c894058d
AK
4434 rb_link_node(new_node, parent, n);
4435 rb_insert_color(new_node, &db->bb_free_root);
4436
4437 /* Now try to see the extent can be merged to left and right */
4438 node = rb_prev(new_node);
4439 if (node) {
4440 entry = rb_entry(node, struct ext4_free_data, node);
4441 if (can_merge(entry, new_entry)) {
84130193 4442 new_entry->start_cluster = entry->start_cluster;
c894058d
AK
4443 new_entry->count += entry->count;
4444 rb_erase(node, &(db->bb_free_root));
4445 spin_lock(&sbi->s_md_lock);
4446 list_del(&entry->list);
4447 spin_unlock(&sbi->s_md_lock);
4448 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d 4449 }
c894058d 4450 }
c9de560d 4451
c894058d
AK
4452 node = rb_next(new_node);
4453 if (node) {
4454 entry = rb_entry(node, struct ext4_free_data, node);
4455 if (can_merge(new_entry, entry)) {
4456 new_entry->count += entry->count;
4457 rb_erase(node, &(db->bb_free_root));
4458 spin_lock(&sbi->s_md_lock);
4459 list_del(&entry->list);
4460 spin_unlock(&sbi->s_md_lock);
4461 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d
AT
4462 }
4463 }
3e624fc7 4464 /* Add the extent to transaction's private list */
c894058d 4465 spin_lock(&sbi->s_md_lock);
3e624fc7 4466 list_add(&new_entry->list, &handle->h_transaction->t_private_list);
c894058d 4467 spin_unlock(&sbi->s_md_lock);
c9de560d
AT
4468 return 0;
4469}
4470
44338711
TT
4471/**
4472 * ext4_free_blocks() -- Free given blocks and update quota
4473 * @handle: handle for this transaction
4474 * @inode: inode
4475 * @block: start physical block to free
4476 * @count: number of blocks to count
5def1360 4477 * @flags: flags used by ext4_free_blocks
c9de560d 4478 */
44338711 4479void ext4_free_blocks(handle_t *handle, struct inode *inode,
e6362609
TT
4480 struct buffer_head *bh, ext4_fsblk_t block,
4481 unsigned long count, int flags)
c9de560d 4482{
26346ff6 4483 struct buffer_head *bitmap_bh = NULL;
c9de560d 4484 struct super_block *sb = inode->i_sb;
c9de560d 4485 struct ext4_group_desc *gdp;
44338711 4486 unsigned long freed = 0;
498e5f24 4487 unsigned int overflow;
c9de560d
AT
4488 ext4_grpblk_t bit;
4489 struct buffer_head *gd_bh;
4490 ext4_group_t block_group;
4491 struct ext4_sb_info *sbi;
4492 struct ext4_buddy e4b;
84130193 4493 unsigned int count_clusters;
c9de560d
AT
4494 int err = 0;
4495 int ret;
4496
e6362609
TT
4497 if (bh) {
4498 if (block)
4499 BUG_ON(block != bh->b_blocknr);
4500 else
4501 block = bh->b_blocknr;
4502 }
c9de560d 4503
c9de560d 4504 sbi = EXT4_SB(sb);
1f2acb60
TT
4505 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4506 !ext4_data_block_valid(sbi, block, count)) {
12062ddd 4507 ext4_error(sb, "Freeing blocks not in datazone - "
1f2acb60 4508 "block = %llu, count = %lu", block, count);
c9de560d
AT
4509 goto error_return;
4510 }
4511
0610b6e9 4512 ext4_debug("freeing block %llu\n", block);
e6362609
TT
4513 trace_ext4_free_blocks(inode, block, count, flags);
4514
4515 if (flags & EXT4_FREE_BLOCKS_FORGET) {
4516 struct buffer_head *tbh = bh;
4517 int i;
4518
4519 BUG_ON(bh && (count > 1));
4520
4521 for (i = 0; i < count; i++) {
4522 if (!bh)
4523 tbh = sb_find_get_block(inode->i_sb,
4524 block + i);
87783690
NK
4525 if (unlikely(!tbh))
4526 continue;
60e6679e 4527 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
e6362609
TT
4528 inode, tbh, block + i);
4529 }
4530 }
4531
60e6679e 4532 /*
e6362609
TT
4533 * We need to make sure we don't reuse the freed block until
4534 * after the transaction is committed, which we can do by
4535 * treating the block as metadata, below. We make an
4536 * exception if the inode is to be written in writeback mode
4537 * since writeback mode has weak data consistency guarantees.
4538 */
4539 if (!ext4_should_writeback_data(inode))
4540 flags |= EXT4_FREE_BLOCKS_METADATA;
c9de560d 4541
84130193
TT
4542 /*
4543 * If the extent to be freed does not begin on a cluster
4544 * boundary, we need to deal with partial clusters at the
4545 * beginning and end of the extent. Normally we will free
4546 * blocks at the beginning or the end unless we are explicitly
4547 * requested to avoid doing so.
4548 */
4549 overflow = block & (sbi->s_cluster_ratio - 1);
4550 if (overflow) {
4551 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4552 overflow = sbi->s_cluster_ratio - overflow;
4553 block += overflow;
4554 if (count > overflow)
4555 count -= overflow;
4556 else
4557 return;
4558 } else {
4559 block -= overflow;
4560 count += overflow;
4561 }
4562 }
4563 overflow = count & (sbi->s_cluster_ratio - 1);
4564 if (overflow) {
4565 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4566 if (count > overflow)
4567 count -= overflow;
4568 else
4569 return;
4570 } else
4571 count += sbi->s_cluster_ratio - overflow;
4572 }
4573
c9de560d
AT
4574do_more:
4575 overflow = 0;
4576 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4577
4578 /*
4579 * Check to see if we are freeing blocks across a group
4580 * boundary.
4581 */
84130193
TT
4582 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4583 overflow = EXT4_C2B(sbi, bit) + count -
4584 EXT4_BLOCKS_PER_GROUP(sb);
c9de560d
AT
4585 count -= overflow;
4586 }
84130193 4587 count_clusters = EXT4_B2C(sbi, count);
574ca174 4588 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
ce89f46c
AK
4589 if (!bitmap_bh) {
4590 err = -EIO;
c9de560d 4591 goto error_return;
ce89f46c 4592 }
c9de560d 4593 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
ce89f46c
AK
4594 if (!gdp) {
4595 err = -EIO;
c9de560d 4596 goto error_return;
ce89f46c 4597 }
c9de560d
AT
4598
4599 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4600 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4601 in_range(block, ext4_inode_table(sb, gdp),
84130193 4602 EXT4_SB(sb)->s_itb_per_group) ||
c9de560d 4603 in_range(block + count - 1, ext4_inode_table(sb, gdp),
84130193 4604 EXT4_SB(sb)->s_itb_per_group)) {
c9de560d 4605
12062ddd 4606 ext4_error(sb, "Freeing blocks in system zone - "
0610b6e9 4607 "Block = %llu, count = %lu", block, count);
519deca0
AK
4608 /* err = 0. ext4_std_error should be a no op */
4609 goto error_return;
c9de560d
AT
4610 }
4611
4612 BUFFER_TRACE(bitmap_bh, "getting write access");
4613 err = ext4_journal_get_write_access(handle, bitmap_bh);
4614 if (err)
4615 goto error_return;
4616
4617 /*
4618 * We are about to modify some metadata. Call the journal APIs
4619 * to unshare ->b_data if a currently-committing transaction is
4620 * using it
4621 */
4622 BUFFER_TRACE(gd_bh, "get_write_access");
4623 err = ext4_journal_get_write_access(handle, gd_bh);
4624 if (err)
4625 goto error_return;
c9de560d
AT
4626#ifdef AGGRESSIVE_CHECK
4627 {
4628 int i;
84130193 4629 for (i = 0; i < count_clusters; i++)
c9de560d
AT
4630 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4631 }
4632#endif
84130193 4633 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
c9de560d 4634
920313a7
AK
4635 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4636 if (err)
4637 goto error_return;
e6362609
TT
4638
4639 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
7a2fcbf7
AK
4640 struct ext4_free_data *new_entry;
4641 /*
4642 * blocks being freed are metadata. these blocks shouldn't
4643 * be used until this transaction is committed
4644 */
b72143ab
TT
4645 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4646 if (!new_entry) {
4647 err = -ENOMEM;
4648 goto error_return;
4649 }
84130193 4650 new_entry->start_cluster = bit;
7a2fcbf7 4651 new_entry->group = block_group;
84130193 4652 new_entry->count = count_clusters;
7a2fcbf7 4653 new_entry->t_tid = handle->h_transaction->t_tid;
955ce5f5 4654
7a2fcbf7 4655 ext4_lock_group(sb, block_group);
84130193 4656 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
7a2fcbf7 4657 ext4_mb_free_metadata(handle, &e4b, new_entry);
c9de560d 4658 } else {
7a2fcbf7
AK
4659 /* need to update group_info->bb_free and bitmap
4660 * with group lock held. generate_buddy look at
4661 * them with group lock_held
4662 */
955ce5f5 4663 ext4_lock_group(sb, block_group);
84130193
TT
4664 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4665 mb_free_blocks(inode, &e4b, bit, count_clusters);
c9de560d
AT
4666 }
4667
021b65bb
TT
4668 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4669 ext4_free_group_clusters_set(sb, gdp, ret);
c9de560d 4670 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
955ce5f5 4671 ext4_unlock_group(sb, block_group);
57042651 4672 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
c9de560d 4673
772cb7c8
JS
4674 if (sbi->s_log_groups_per_flex) {
4675 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
24aaa8ef
TT
4676 atomic_add(count_clusters,
4677 &sbi->s_flex_groups[flex_group].free_clusters);
772cb7c8
JS
4678 }
4679
e39e07fd 4680 ext4_mb_unload_buddy(&e4b);
c9de560d 4681
44338711 4682 freed += count;
c9de560d 4683
7b415bf6
AK
4684 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4685 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4686
7a2fcbf7
AK
4687 /* We dirtied the bitmap block */
4688 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4689 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4690
c9de560d
AT
4691 /* And the group descriptor block */
4692 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
0390131b 4693 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
c9de560d
AT
4694 if (!err)
4695 err = ret;
4696
4697 if (overflow && !err) {
4698 block += count;
4699 count = overflow;
4700 put_bh(bitmap_bh);
4701 goto do_more;
4702 }
a0375156 4703 ext4_mark_super_dirty(sb);
c9de560d
AT
4704error_return:
4705 brelse(bitmap_bh);
4706 ext4_std_error(sb, err);
4707 return;
4708}
7360d173 4709
2846e820 4710/**
0529155e 4711 * ext4_group_add_blocks() -- Add given blocks to an existing group
2846e820
AG
4712 * @handle: handle to this transaction
4713 * @sb: super block
4714 * @block: start physcial block to add to the block group
4715 * @count: number of blocks to free
4716 *
e73a347b 4717 * This marks the blocks as free in the bitmap and buddy.
2846e820 4718 */
cc7365df 4719int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
2846e820
AG
4720 ext4_fsblk_t block, unsigned long count)
4721{
4722 struct buffer_head *bitmap_bh = NULL;
4723 struct buffer_head *gd_bh;
4724 ext4_group_t block_group;
4725 ext4_grpblk_t bit;
4726 unsigned int i;
4727 struct ext4_group_desc *desc;
4728 struct ext4_sb_info *sbi = EXT4_SB(sb);
e73a347b 4729 struct ext4_buddy e4b;
2846e820
AG
4730 int err = 0, ret, blk_free_count;
4731 ext4_grpblk_t blocks_freed;
2846e820
AG
4732
4733 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4734
4740b830
YY
4735 if (count == 0)
4736 return 0;
4737
2846e820 4738 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
2846e820
AG
4739 /*
4740 * Check to see if we are freeing blocks across a group
4741 * boundary.
4742 */
cc7365df
YY
4743 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4744 ext4_warning(sb, "too much blocks added to group %u\n",
4745 block_group);
4746 err = -EINVAL;
2846e820 4747 goto error_return;
cc7365df 4748 }
2cd05cc3 4749
2846e820 4750 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
cc7365df
YY
4751 if (!bitmap_bh) {
4752 err = -EIO;
2846e820 4753 goto error_return;
cc7365df
YY
4754 }
4755
2846e820 4756 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
cc7365df
YY
4757 if (!desc) {
4758 err = -EIO;
2846e820 4759 goto error_return;
cc7365df 4760 }
2846e820
AG
4761
4762 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4763 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4764 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4765 in_range(block + count - 1, ext4_inode_table(sb, desc),
4766 sbi->s_itb_per_group)) {
4767 ext4_error(sb, "Adding blocks in system zones - "
4768 "Block = %llu, count = %lu",
4769 block, count);
cc7365df 4770 err = -EINVAL;
2846e820
AG
4771 goto error_return;
4772 }
4773
2cd05cc3
TT
4774 BUFFER_TRACE(bitmap_bh, "getting write access");
4775 err = ext4_journal_get_write_access(handle, bitmap_bh);
2846e820
AG
4776 if (err)
4777 goto error_return;
4778
4779 /*
4780 * We are about to modify some metadata. Call the journal APIs
4781 * to unshare ->b_data if a currently-committing transaction is
4782 * using it
4783 */
4784 BUFFER_TRACE(gd_bh, "get_write_access");
4785 err = ext4_journal_get_write_access(handle, gd_bh);
4786 if (err)
4787 goto error_return;
e73a347b 4788
2846e820
AG
4789 for (i = 0, blocks_freed = 0; i < count; i++) {
4790 BUFFER_TRACE(bitmap_bh, "clear bit");
e73a347b 4791 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
2846e820
AG
4792 ext4_error(sb, "bit already cleared for block %llu",
4793 (ext4_fsblk_t)(block + i));
4794 BUFFER_TRACE(bitmap_bh, "bit already cleared");
4795 } else {
4796 blocks_freed++;
4797 }
4798 }
e73a347b
AG
4799
4800 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4801 if (err)
4802 goto error_return;
4803
4804 /*
4805 * need to update group_info->bb_free and bitmap
4806 * with group lock held. generate_buddy look at
4807 * them with group lock_held
4808 */
2846e820 4809 ext4_lock_group(sb, block_group);
e73a347b
AG
4810 mb_clear_bits(bitmap_bh->b_data, bit, count);
4811 mb_free_blocks(NULL, &e4b, bit, count);
021b65bb
TT
4812 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4813 ext4_free_group_clusters_set(sb, desc, blk_free_count);
2846e820
AG
4814 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
4815 ext4_unlock_group(sb, block_group);
57042651
TT
4816 percpu_counter_add(&sbi->s_freeclusters_counter,
4817 EXT4_B2C(sbi, blocks_freed));
2846e820
AG
4818
4819 if (sbi->s_log_groups_per_flex) {
4820 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
24aaa8ef
TT
4821 atomic_add(EXT4_B2C(sbi, blocks_freed),
4822 &sbi->s_flex_groups[flex_group].free_clusters);
2846e820 4823 }
e73a347b
AG
4824
4825 ext4_mb_unload_buddy(&e4b);
2846e820
AG
4826
4827 /* We dirtied the bitmap block */
4828 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4829 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4830
4831 /* And the group descriptor block */
4832 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4833 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4834 if (!err)
4835 err = ret;
4836
4837error_return:
4838 brelse(bitmap_bh);
4839 ext4_std_error(sb, err);
cc7365df 4840 return err;
2846e820
AG
4841}
4842
7360d173
LC
4843/**
4844 * ext4_trim_extent -- function to TRIM one single free extent in the group
4845 * @sb: super block for the file system
4846 * @start: starting block of the free extent in the alloc. group
4847 * @count: number of blocks to TRIM
4848 * @group: alloc. group we are working with
4849 * @e4b: ext4 buddy for the group
4850 *
4851 * Trim "count" blocks starting at "start" in the "group". To assure that no
4852 * one will allocate those blocks, mark it as used in buddy bitmap. This must
4853 * be called with under the group lock.
4854 */
d9f34504
TT
4855static void ext4_trim_extent(struct super_block *sb, int start, int count,
4856 ext4_group_t group, struct ext4_buddy *e4b)
7360d173
LC
4857{
4858 struct ext4_free_extent ex;
7360d173 4859
b3d4c2b1
TM
4860 trace_ext4_trim_extent(sb, group, start, count);
4861
7360d173
LC
4862 assert_spin_locked(ext4_group_lock_ptr(sb, group));
4863
4864 ex.fe_start = start;
4865 ex.fe_group = group;
4866 ex.fe_len = count;
4867
4868 /*
4869 * Mark blocks used, so no one can reuse them while
4870 * being trimmed.
4871 */
4872 mb_mark_used(e4b, &ex);
4873 ext4_unlock_group(sb, group);
d9f34504 4874 ext4_issue_discard(sb, group, start, count);
7360d173
LC
4875 ext4_lock_group(sb, group);
4876 mb_free_blocks(NULL, e4b, start, ex.fe_len);
7360d173
LC
4877}
4878
4879/**
4880 * ext4_trim_all_free -- function to trim all free space in alloc. group
4881 * @sb: super block for file system
22612283 4882 * @group: group to be trimmed
7360d173
LC
4883 * @start: first group block to examine
4884 * @max: last group block to examine
4885 * @minblocks: minimum extent block count
4886 *
4887 * ext4_trim_all_free walks through group's buddy bitmap searching for free
4888 * extents. When the free block is found, ext4_trim_extent is called to TRIM
4889 * the extent.
4890 *
4891 *
4892 * ext4_trim_all_free walks through group's block bitmap searching for free
4893 * extents. When the free extent is found, mark it as used in group buddy
4894 * bitmap. Then issue a TRIM command on this extent and free the extent in
4895 * the group buddy bitmap. This is done until whole group is scanned.
4896 */
0b75a840 4897static ext4_grpblk_t
78944086
LC
4898ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4899 ext4_grpblk_t start, ext4_grpblk_t max,
4900 ext4_grpblk_t minblocks)
7360d173
LC
4901{
4902 void *bitmap;
169ddc3e 4903 ext4_grpblk_t next, count = 0, free_count = 0;
78944086
LC
4904 struct ext4_buddy e4b;
4905 int ret;
7360d173 4906
b3d4c2b1
TM
4907 trace_ext4_trim_all_free(sb, group, start, max);
4908
78944086
LC
4909 ret = ext4_mb_load_buddy(sb, group, &e4b);
4910 if (ret) {
4911 ext4_error(sb, "Error in loading buddy "
4912 "information for %u", group);
4913 return ret;
4914 }
78944086 4915 bitmap = e4b.bd_bitmap;
28739eea
LC
4916
4917 ext4_lock_group(sb, group);
3d56b8d2
TM
4918 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4919 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4920 goto out;
4921
78944086
LC
4922 start = (e4b.bd_info->bb_first_free > start) ?
4923 e4b.bd_info->bb_first_free : start;
7360d173
LC
4924
4925 while (start < max) {
4926 start = mb_find_next_zero_bit(bitmap, max, start);
4927 if (start >= max)
4928 break;
4929 next = mb_find_next_bit(bitmap, max, start);
4930
4931 if ((next - start) >= minblocks) {
d9f34504 4932 ext4_trim_extent(sb, start,
78944086 4933 next - start, group, &e4b);
7360d173
LC
4934 count += next - start;
4935 }
169ddc3e 4936 free_count += next - start;
7360d173
LC
4937 start = next + 1;
4938
4939 if (fatal_signal_pending(current)) {
4940 count = -ERESTARTSYS;
4941 break;
4942 }
4943
4944 if (need_resched()) {
4945 ext4_unlock_group(sb, group);
4946 cond_resched();
4947 ext4_lock_group(sb, group);
4948 }
4949
169ddc3e 4950 if ((e4b.bd_info->bb_free - free_count) < minblocks)
7360d173
LC
4951 break;
4952 }
3d56b8d2
TM
4953
4954 if (!ret)
4955 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4956out:
7360d173 4957 ext4_unlock_group(sb, group);
78944086 4958 ext4_mb_unload_buddy(&e4b);
7360d173
LC
4959
4960 ext4_debug("trimmed %d blocks in the group %d\n",
4961 count, group);
4962
7360d173
LC
4963 return count;
4964}
4965
4966/**
4967 * ext4_trim_fs() -- trim ioctl handle function
4968 * @sb: superblock for filesystem
4969 * @range: fstrim_range structure
4970 *
4971 * start: First Byte to trim
4972 * len: number of Bytes to trim from start
4973 * minlen: minimum extent length in Bytes
4974 * ext4_trim_fs goes through all allocation groups containing Bytes from
4975 * start to start+len. For each such a group ext4_trim_all_free function
4976 * is invoked to trim all free space.
4977 */
4978int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4979{
78944086 4980 struct ext4_group_info *grp;
7360d173
LC
4981 ext4_group_t first_group, last_group;
4982 ext4_group_t group, ngroups = ext4_get_groups_count(sb);
7137d7a4 4983 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
78944086 4984 uint64_t start, len, minlen, trimmed = 0;
0f0a25bf
JK
4985 ext4_fsblk_t first_data_blk =
4986 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7360d173
LC
4987 int ret = 0;
4988
4989 start = range->start >> sb->s_blocksize_bits;
4990 len = range->len >> sb->s_blocksize_bits;
4991 minlen = range->minlen >> sb->s_blocksize_bits;
7360d173 4992
7137d7a4 4993 if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)))
7360d173 4994 return -EINVAL;
22f10457
TM
4995 if (start + len <= first_data_blk)
4996 goto out;
0f0a25bf
JK
4997 if (start < first_data_blk) {
4998 len -= first_data_blk - start;
4999 start = first_data_blk;
5000 }
7360d173
LC
5001
5002 /* Determine first and last group to examine based on start and len */
5003 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
7137d7a4 5004 &first_group, &first_cluster);
7360d173 5005 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) (start + len),
7137d7a4 5006 &last_group, &last_cluster);
7360d173 5007 last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
7137d7a4 5008 last_cluster = EXT4_CLUSTERS_PER_GROUP(sb);
7360d173
LC
5009
5010 if (first_group > last_group)
5011 return -EINVAL;
5012
5013 for (group = first_group; group <= last_group; group++) {
78944086
LC
5014 grp = ext4_get_group_info(sb, group);
5015 /* We only do this if the grp has never been initialized */
5016 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5017 ret = ext4_mb_init_group(sb, group);
5018 if (ret)
5019 break;
7360d173
LC
5020 }
5021
0ba08517
TM
5022 /*
5023 * For all the groups except the last one, last block will
5024 * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
5025 * change it for the last group in which case start +
5026 * len < EXT4_BLOCKS_PER_GROUP(sb).
5027 */
7137d7a4
TT
5028 if (first_cluster + len < EXT4_CLUSTERS_PER_GROUP(sb))
5029 last_cluster = first_cluster + len;
5030 len -= last_cluster - first_cluster;
7360d173 5031
78944086 5032 if (grp->bb_free >= minlen) {
7137d7a4
TT
5033 cnt = ext4_trim_all_free(sb, group, first_cluster,
5034 last_cluster, minlen);
7360d173
LC
5035 if (cnt < 0) {
5036 ret = cnt;
7360d173
LC
5037 break;
5038 }
5039 }
7360d173 5040 trimmed += cnt;
7137d7a4 5041 first_cluster = 0;
7360d173
LC
5042 }
5043 range->len = trimmed * sb->s_blocksize;
5044
3d56b8d2
TM
5045 if (!ret)
5046 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5047
22f10457 5048out:
7360d173
LC
5049 return ret;
5050}