]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ext4/mballoc.c
ext4: Speed up FITRIM by recording flags in ext4_group_info
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / mballoc.c
CommitLineData
c9de560d
AT
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
8f6e39a7 24#include "mballoc.h"
6ba495e9 25#include <linux/debugfs.h>
5a0e3ad6 26#include <linux/slab.h>
9bffad1e
TT
27#include <trace/events/ext4.h>
28
c9de560d
AT
29/*
30 * MUSTDO:
31 * - test ext4_ext_search_left() and ext4_ext_search_right()
32 * - search for metadata in few groups
33 *
34 * TODO v4:
35 * - normalization should take into account whether file is still open
36 * - discard preallocations if no free space left (policy?)
37 * - don't normalize tails
38 * - quota
39 * - reservation for superuser
40 *
41 * TODO v3:
42 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
43 * - track min/max extents in each group for better group selection
44 * - mb_mark_used() may allocate chunk right after splitting buddy
45 * - tree of groups sorted by number of free blocks
46 * - error handling
47 */
48
49/*
50 * The allocation request involve request for multiple number of blocks
51 * near to the goal(block) value specified.
52 *
b713a5ec
TT
53 * During initialization phase of the allocator we decide to use the
54 * group preallocation or inode preallocation depending on the size of
55 * the file. The size of the file could be the resulting file size we
56 * would have after allocation, or the current file size, which ever
57 * is larger. If the size is less than sbi->s_mb_stream_request we
58 * select to use the group preallocation. The default value of
59 * s_mb_stream_request is 16 blocks. This can also be tuned via
60 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
61 * terms of number of blocks.
c9de560d
AT
62 *
63 * The main motivation for having small file use group preallocation is to
b713a5ec 64 * ensure that we have small files closer together on the disk.
c9de560d 65 *
b713a5ec
TT
66 * First stage the allocator looks at the inode prealloc list,
67 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
68 * spaces for this particular inode. The inode prealloc space is
69 * represented as:
c9de560d
AT
70 *
71 * pa_lstart -> the logical start block for this prealloc space
72 * pa_pstart -> the physical start block for this prealloc space
1537a363 73 * pa_len -> length for this prealloc space
c9de560d
AT
74 * pa_free -> free space available in this prealloc space
75 *
76 * The inode preallocation space is used looking at the _logical_ start
77 * block. If only the logical file block falls within the range of prealloc
78 * space we will consume the particular prealloc space. This make sure that
79 * that the we have contiguous physical blocks representing the file blocks
80 *
81 * The important thing to be noted in case of inode prealloc space is that
82 * we don't modify the values associated to inode prealloc space except
83 * pa_free.
84 *
85 * If we are not able to find blocks in the inode prealloc space and if we
86 * have the group allocation flag set then we look at the locality group
87 * prealloc space. These are per CPU prealloc list repreasented as
88 *
89 * ext4_sb_info.s_locality_groups[smp_processor_id()]
90 *
91 * The reason for having a per cpu locality group is to reduce the contention
92 * between CPUs. It is possible to get scheduled at this point.
93 *
94 * The locality group prealloc space is used looking at whether we have
25985edc 95 * enough free space (pa_free) within the prealloc space.
c9de560d
AT
96 *
97 * If we can't allocate blocks via inode prealloc or/and locality group
98 * prealloc then we look at the buddy cache. The buddy cache is represented
99 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
100 * mapped to the buddy and bitmap information regarding different
101 * groups. The buddy information is attached to buddy cache inode so that
102 * we can access them through the page cache. The information regarding
103 * each group is loaded via ext4_mb_load_buddy. The information involve
104 * block bitmap and buddy information. The information are stored in the
105 * inode as:
106 *
107 * { page }
c3a326a6 108 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
109 *
110 *
111 * one block each for bitmap and buddy information. So for each group we
112 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
113 * blocksize) blocks. So it can have information regarding groups_per_page
114 * which is blocks_per_page/2
115 *
116 * The buddy cache inode is not stored on disk. The inode is thrown
117 * away when the filesystem is unmounted.
118 *
119 * We look for count number of blocks in the buddy cache. If we were able
120 * to locate that many free blocks we return with additional information
121 * regarding rest of the contiguous physical block available
122 *
123 * Before allocating blocks via buddy cache we normalize the request
124 * blocks. This ensure we ask for more blocks that we needed. The extra
125 * blocks that we get after allocation is added to the respective prealloc
126 * list. In case of inode preallocation we follow a list of heuristics
127 * based on file size. This can be found in ext4_mb_normalize_request. If
128 * we are doing a group prealloc we try to normalize the request to
b713a5ec 129 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
c9de560d 130 * 512 blocks. This can be tuned via
b713a5ec 131 * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
c9de560d
AT
132 * terms of number of blocks. If we have mounted the file system with -O
133 * stripe=<value> option the group prealloc request is normalized to the
134 * stripe value (sbi->s_stripe)
135 *
b713a5ec 136 * The regular allocator(using the buddy cache) supports few tunables.
c9de560d 137 *
b713a5ec
TT
138 * /sys/fs/ext4/<partition>/mb_min_to_scan
139 * /sys/fs/ext4/<partition>/mb_max_to_scan
140 * /sys/fs/ext4/<partition>/mb_order2_req
c9de560d 141 *
b713a5ec 142 * The regular allocator uses buddy scan only if the request len is power of
c9de560d
AT
143 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
144 * value of s_mb_order2_reqs can be tuned via
b713a5ec 145 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
af901ca1 146 * stripe size (sbi->s_stripe), we try to search for contiguous block in
b713a5ec
TT
147 * stripe size. This should result in better allocation on RAID setups. If
148 * not, we search in the specific group using bitmap for best extents. The
149 * tunable min_to_scan and max_to_scan control the behaviour here.
c9de560d 150 * min_to_scan indicate how long the mballoc __must__ look for a best
b713a5ec 151 * extent and max_to_scan indicates how long the mballoc __can__ look for a
c9de560d
AT
152 * best extent in the found extents. Searching for the blocks starts with
153 * the group specified as the goal value in allocation context via
154 * ac_g_ex. Each group is first checked based on the criteria whether it
155 * can used for allocation. ext4_mb_good_group explains how the groups are
156 * checked.
157 *
158 * Both the prealloc space are getting populated as above. So for the first
159 * request we will hit the buddy cache which will result in this prealloc
160 * space getting filled. The prealloc space is then later used for the
161 * subsequent request.
162 */
163
164/*
165 * mballoc operates on the following data:
166 * - on-disk bitmap
167 * - in-core buddy (actually includes buddy and bitmap)
168 * - preallocation descriptors (PAs)
169 *
170 * there are two types of preallocations:
171 * - inode
172 * assiged to specific inode and can be used for this inode only.
173 * it describes part of inode's space preallocated to specific
174 * physical blocks. any block from that preallocated can be used
175 * independent. the descriptor just tracks number of blocks left
176 * unused. so, before taking some block from descriptor, one must
177 * make sure corresponded logical block isn't allocated yet. this
178 * also means that freeing any block within descriptor's range
179 * must discard all preallocated blocks.
180 * - locality group
181 * assigned to specific locality group which does not translate to
182 * permanent set of inodes: inode can join and leave group. space
183 * from this type of preallocation can be used for any inode. thus
184 * it's consumed from the beginning to the end.
185 *
186 * relation between them can be expressed as:
187 * in-core buddy = on-disk bitmap + preallocation descriptors
188 *
189 * this mean blocks mballoc considers used are:
190 * - allocated blocks (persistent)
191 * - preallocated blocks (non-persistent)
192 *
193 * consistency in mballoc world means that at any time a block is either
194 * free or used in ALL structures. notice: "any time" should not be read
195 * literally -- time is discrete and delimited by locks.
196 *
197 * to keep it simple, we don't use block numbers, instead we count number of
198 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
199 *
200 * all operations can be expressed as:
201 * - init buddy: buddy = on-disk + PAs
202 * - new PA: buddy += N; PA = N
203 * - use inode PA: on-disk += N; PA -= N
204 * - discard inode PA buddy -= on-disk - PA; PA = 0
205 * - use locality group PA on-disk += N; PA -= N
206 * - discard locality group PA buddy -= PA; PA = 0
207 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
208 * is used in real operation because we can't know actual used
209 * bits from PA, only from on-disk bitmap
210 *
211 * if we follow this strict logic, then all operations above should be atomic.
212 * given some of them can block, we'd have to use something like semaphores
213 * killing performance on high-end SMP hardware. let's try to relax it using
214 * the following knowledge:
215 * 1) if buddy is referenced, it's already initialized
216 * 2) while block is used in buddy and the buddy is referenced,
217 * nobody can re-allocate that block
218 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
219 * bit set and PA claims same block, it's OK. IOW, one can set bit in
220 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
221 * block
222 *
223 * so, now we're building a concurrency table:
224 * - init buddy vs.
225 * - new PA
226 * blocks for PA are allocated in the buddy, buddy must be referenced
227 * until PA is linked to allocation group to avoid concurrent buddy init
228 * - use inode PA
229 * we need to make sure that either on-disk bitmap or PA has uptodate data
230 * given (3) we care that PA-=N operation doesn't interfere with init
231 * - discard inode PA
232 * the simplest way would be to have buddy initialized by the discard
233 * - use locality group PA
234 * again PA-=N must be serialized with init
235 * - discard locality group PA
236 * the simplest way would be to have buddy initialized by the discard
237 * - new PA vs.
238 * - use inode PA
239 * i_data_sem serializes them
240 * - discard inode PA
241 * discard process must wait until PA isn't used by another process
242 * - use locality group PA
243 * some mutex should serialize them
244 * - discard locality group PA
245 * discard process must wait until PA isn't used by another process
246 * - use inode PA
247 * - use inode PA
248 * i_data_sem or another mutex should serializes them
249 * - discard inode PA
250 * discard process must wait until PA isn't used by another process
251 * - use locality group PA
252 * nothing wrong here -- they're different PAs covering different blocks
253 * - discard locality group PA
254 * discard process must wait until PA isn't used by another process
255 *
256 * now we're ready to make few consequences:
257 * - PA is referenced and while it is no discard is possible
258 * - PA is referenced until block isn't marked in on-disk bitmap
259 * - PA changes only after on-disk bitmap
260 * - discard must not compete with init. either init is done before
261 * any discard or they're serialized somehow
262 * - buddy init as sum of on-disk bitmap and PAs is done atomically
263 *
264 * a special case when we've used PA to emptiness. no need to modify buddy
265 * in this case, but we should care about concurrent init
266 *
267 */
268
269 /*
270 * Logic in few words:
271 *
272 * - allocation:
273 * load group
274 * find blocks
275 * mark bits in on-disk bitmap
276 * release group
277 *
278 * - use preallocation:
279 * find proper PA (per-inode or group)
280 * load group
281 * mark bits in on-disk bitmap
282 * release group
283 * release PA
284 *
285 * - free:
286 * load group
287 * mark bits in on-disk bitmap
288 * release group
289 *
290 * - discard preallocations in group:
291 * mark PAs deleted
292 * move them onto local list
293 * load on-disk bitmap
294 * load group
295 * remove PA from object (inode or locality group)
296 * mark free blocks in-core
297 *
298 * - discard inode's preallocations:
299 */
300
301/*
302 * Locking rules
303 *
304 * Locks:
305 * - bitlock on a group (group)
306 * - object (inode/locality) (object)
307 * - per-pa lock (pa)
308 *
309 * Paths:
310 * - new pa
311 * object
312 * group
313 *
314 * - find and use pa:
315 * pa
316 *
317 * - release consumed pa:
318 * pa
319 * group
320 * object
321 *
322 * - generate in-core bitmap:
323 * group
324 * pa
325 *
326 * - discard all for given object (inode, locality group):
327 * object
328 * pa
329 * group
330 *
331 * - discard all for given group:
332 * group
333 * pa
334 * group
335 * object
336 *
337 */
c3a326a6
AK
338static struct kmem_cache *ext4_pspace_cachep;
339static struct kmem_cache *ext4_ac_cachep;
340static struct kmem_cache *ext4_free_ext_cachep;
fb1813f4
CW
341
342/* We create slab caches for groupinfo data structures based on the
343 * superblock block size. There will be one per mounted filesystem for
344 * each unique s_blocksize_bits */
2892c15d 345#define NR_GRPINFO_CACHES 8
fb1813f4
CW
346static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
347
2892c15d
ES
348static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
349 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
350 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
351 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
352};
353
c3a326a6
AK
354static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
355 ext4_group_t group);
7a2fcbf7
AK
356static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
357 ext4_group_t group);
c3a326a6
AK
358static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
359
ffad0a44
AK
360static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
361{
c9de560d 362#if BITS_PER_LONG == 64
ffad0a44
AK
363 *bit += ((unsigned long) addr & 7UL) << 3;
364 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 365#elif BITS_PER_LONG == 32
ffad0a44
AK
366 *bit += ((unsigned long) addr & 3UL) << 3;
367 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
368#else
369#error "how many bits you are?!"
370#endif
ffad0a44
AK
371 return addr;
372}
c9de560d
AT
373
374static inline int mb_test_bit(int bit, void *addr)
375{
376 /*
377 * ext4_test_bit on architecture like powerpc
378 * needs unsigned long aligned address
379 */
ffad0a44 380 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
381 return ext4_test_bit(bit, addr);
382}
383
384static inline void mb_set_bit(int bit, void *addr)
385{
ffad0a44 386 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
387 ext4_set_bit(bit, addr);
388}
389
c9de560d
AT
390static inline void mb_clear_bit(int bit, void *addr)
391{
ffad0a44 392 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
393 ext4_clear_bit(bit, addr);
394}
395
ffad0a44
AK
396static inline int mb_find_next_zero_bit(void *addr, int max, int start)
397{
e7dfb246 398 int fix = 0, ret, tmpmax;
ffad0a44 399 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 400 tmpmax = max + fix;
ffad0a44
AK
401 start += fix;
402
e7dfb246
AK
403 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
404 if (ret > max)
405 return max;
406 return ret;
ffad0a44
AK
407}
408
409static inline int mb_find_next_bit(void *addr, int max, int start)
410{
e7dfb246 411 int fix = 0, ret, tmpmax;
ffad0a44 412 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 413 tmpmax = max + fix;
ffad0a44
AK
414 start += fix;
415
e7dfb246
AK
416 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
417 if (ret > max)
418 return max;
419 return ret;
ffad0a44
AK
420}
421
c9de560d
AT
422static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
423{
424 char *bb;
425
c9de560d
AT
426 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
427 BUG_ON(max == NULL);
428
429 if (order > e4b->bd_blkbits + 1) {
430 *max = 0;
431 return NULL;
432 }
433
434 /* at order 0 we see each particular block */
84b775a3
CL
435 if (order == 0) {
436 *max = 1 << (e4b->bd_blkbits + 3);
c9de560d 437 return EXT4_MB_BITMAP(e4b);
84b775a3 438 }
c9de560d
AT
439
440 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
441 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
442
443 return bb;
444}
445
446#ifdef DOUBLE_CHECK
447static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
448 int first, int count)
449{
450 int i;
451 struct super_block *sb = e4b->bd_sb;
452
453 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
454 return;
bc8e6740 455 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
456 for (i = 0; i < count; i++) {
457 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
458 ext4_fsblk_t blocknr;
5661bd68
AM
459
460 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
c9de560d 461 blocknr += first + i;
5d1b1b3f 462 ext4_grp_locked_error(sb, e4b->bd_group,
e29136f8
TT
463 inode ? inode->i_ino : 0,
464 blocknr,
465 "freeing block already freed "
466 "(bit %u)",
467 first + i);
c9de560d
AT
468 }
469 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
470 }
471}
472
473static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
474{
475 int i;
476
477 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
478 return;
bc8e6740 479 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
480 for (i = 0; i < count; i++) {
481 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
482 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
483 }
484}
485
486static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
487{
488 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
489 unsigned char *b1, *b2;
490 int i;
491 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
492 b2 = (unsigned char *) bitmap;
493 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
494 if (b1[i] != b2[i]) {
a9df9a49 495 printk(KERN_ERR "corruption in group %u "
4776004f
TT
496 "at byte %u(%u): %x in copy != %x "
497 "on disk/prealloc\n",
498 e4b->bd_group, i, i * 8, b1[i], b2[i]);
c9de560d
AT
499 BUG();
500 }
501 }
502 }
503}
504
505#else
506static inline void mb_free_blocks_double(struct inode *inode,
507 struct ext4_buddy *e4b, int first, int count)
508{
509 return;
510}
511static inline void mb_mark_used_double(struct ext4_buddy *e4b,
512 int first, int count)
513{
514 return;
515}
516static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
517{
518 return;
519}
520#endif
521
522#ifdef AGGRESSIVE_CHECK
523
524#define MB_CHECK_ASSERT(assert) \
525do { \
526 if (!(assert)) { \
527 printk(KERN_EMERG \
528 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
529 function, file, line, # assert); \
530 BUG(); \
531 } \
532} while (0)
533
534static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
535 const char *function, int line)
536{
537 struct super_block *sb = e4b->bd_sb;
538 int order = e4b->bd_blkbits + 1;
539 int max;
540 int max2;
541 int i;
542 int j;
543 int k;
544 int count;
545 struct ext4_group_info *grp;
546 int fragments = 0;
547 int fstart;
548 struct list_head *cur;
549 void *buddy;
550 void *buddy2;
551
c9de560d
AT
552 {
553 static int mb_check_counter;
554 if (mb_check_counter++ % 100 != 0)
555 return 0;
556 }
557
558 while (order > 1) {
559 buddy = mb_find_buddy(e4b, order, &max);
560 MB_CHECK_ASSERT(buddy);
561 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
562 MB_CHECK_ASSERT(buddy2);
563 MB_CHECK_ASSERT(buddy != buddy2);
564 MB_CHECK_ASSERT(max * 2 == max2);
565
566 count = 0;
567 for (i = 0; i < max; i++) {
568
569 if (mb_test_bit(i, buddy)) {
570 /* only single bit in buddy2 may be 1 */
571 if (!mb_test_bit(i << 1, buddy2)) {
572 MB_CHECK_ASSERT(
573 mb_test_bit((i<<1)+1, buddy2));
574 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
575 MB_CHECK_ASSERT(
576 mb_test_bit(i << 1, buddy2));
577 }
578 continue;
579 }
580
581 /* both bits in buddy2 must be 0 */
582 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
583 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
584
585 for (j = 0; j < (1 << order); j++) {
586 k = (i * (1 << order)) + j;
587 MB_CHECK_ASSERT(
588 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
589 }
590 count++;
591 }
592 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
593 order--;
594 }
595
596 fstart = -1;
597 buddy = mb_find_buddy(e4b, 0, &max);
598 for (i = 0; i < max; i++) {
599 if (!mb_test_bit(i, buddy)) {
600 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
601 if (fstart == -1) {
602 fragments++;
603 fstart = i;
604 }
605 continue;
606 }
607 fstart = -1;
608 /* check used bits only */
609 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
610 buddy2 = mb_find_buddy(e4b, j, &max2);
611 k = i >> j;
612 MB_CHECK_ASSERT(k < max2);
613 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
614 }
615 }
616 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
617 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
618
619 grp = ext4_get_group_info(sb, e4b->bd_group);
c9de560d
AT
620 list_for_each(cur, &grp->bb_prealloc_list) {
621 ext4_group_t groupnr;
622 struct ext4_prealloc_space *pa;
60bd63d1
SR
623 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
624 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
c9de560d 625 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
60bd63d1 626 for (i = 0; i < pa->pa_len; i++)
c9de560d
AT
627 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
628 }
629 return 0;
630}
631#undef MB_CHECK_ASSERT
632#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
46e665e9 633 __FILE__, __func__, __LINE__)
c9de560d
AT
634#else
635#define mb_check_buddy(e4b)
636#endif
637
7c786059
CL
638/*
639 * Divide blocks started from @first with length @len into
640 * smaller chunks with power of 2 blocks.
641 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
642 * then increase bb_counters[] for corresponded chunk size.
643 */
c9de560d 644static void ext4_mb_mark_free_simple(struct super_block *sb,
a36b4498 645 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
c9de560d
AT
646 struct ext4_group_info *grp)
647{
648 struct ext4_sb_info *sbi = EXT4_SB(sb);
a36b4498
ES
649 ext4_grpblk_t min;
650 ext4_grpblk_t max;
651 ext4_grpblk_t chunk;
c9de560d
AT
652 unsigned short border;
653
b73fce69 654 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
c9de560d
AT
655
656 border = 2 << sb->s_blocksize_bits;
657
658 while (len > 0) {
659 /* find how many blocks can be covered since this position */
660 max = ffs(first | border) - 1;
661
662 /* find how many blocks of power 2 we need to mark */
663 min = fls(len) - 1;
664
665 if (max < min)
666 min = max;
667 chunk = 1 << min;
668
669 /* mark multiblock chunks only */
670 grp->bb_counters[min]++;
671 if (min > 0)
672 mb_clear_bit(first >> min,
673 buddy + sbi->s_mb_offsets[min]);
674
675 len -= chunk;
676 first += chunk;
677 }
678}
679
8a57d9d6
CW
680/*
681 * Cache the order of the largest free extent we have available in this block
682 * group.
683 */
684static void
685mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
686{
687 int i;
688 int bits;
689
690 grp->bb_largest_free_order = -1; /* uninit */
691
692 bits = sb->s_blocksize_bits + 1;
693 for (i = bits; i >= 0; i--) {
694 if (grp->bb_counters[i] > 0) {
695 grp->bb_largest_free_order = i;
696 break;
697 }
698 }
699}
700
089ceecc
ES
701static noinline_for_stack
702void ext4_mb_generate_buddy(struct super_block *sb,
c9de560d
AT
703 void *buddy, void *bitmap, ext4_group_t group)
704{
705 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
a36b4498
ES
706 ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb);
707 ext4_grpblk_t i = 0;
708 ext4_grpblk_t first;
709 ext4_grpblk_t len;
c9de560d
AT
710 unsigned free = 0;
711 unsigned fragments = 0;
712 unsigned long long period = get_cycles();
713
714 /* initialize buddy from bitmap which is aggregation
715 * of on-disk bitmap and preallocations */
ffad0a44 716 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
717 grp->bb_first_free = i;
718 while (i < max) {
719 fragments++;
720 first = i;
ffad0a44 721 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
722 len = i - first;
723 free += len;
724 if (len > 1)
725 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
726 else
727 grp->bb_counters[0]++;
728 if (i < max)
ffad0a44 729 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
730 }
731 grp->bb_fragments = fragments;
732
733 if (free != grp->bb_free) {
e29136f8
TT
734 ext4_grp_locked_error(sb, group, 0, 0,
735 "%u blocks in bitmap, %u in gd",
736 free, grp->bb_free);
e56eb659
AK
737 /*
738 * If we intent to continue, we consider group descritor
739 * corrupt and update bb_free using bitmap value
740 */
c9de560d
AT
741 grp->bb_free = free;
742 }
8a57d9d6 743 mb_set_largest_free_order(sb, grp);
c9de560d
AT
744
745 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
746
747 period = get_cycles() - period;
748 spin_lock(&EXT4_SB(sb)->s_bal_lock);
749 EXT4_SB(sb)->s_mb_buddies_generated++;
750 EXT4_SB(sb)->s_mb_generation_time += period;
751 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
752}
753
754/* The buddy information is attached the buddy cache inode
755 * for convenience. The information regarding each group
756 * is loaded via ext4_mb_load_buddy. The information involve
757 * block bitmap and buddy information. The information are
758 * stored in the inode as
759 *
760 * { page }
c3a326a6 761 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
762 *
763 *
764 * one block each for bitmap and buddy information.
765 * So for each group we take up 2 blocks. A page can
766 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
767 * So it can have information regarding groups_per_page which
768 * is blocks_per_page/2
8a57d9d6
CW
769 *
770 * Locking note: This routine takes the block group lock of all groups
771 * for this page; do not hold this lock when calling this routine!
c9de560d
AT
772 */
773
774static int ext4_mb_init_cache(struct page *page, char *incore)
775{
8df9675f 776 ext4_group_t ngroups;
c9de560d
AT
777 int blocksize;
778 int blocks_per_page;
779 int groups_per_page;
780 int err = 0;
781 int i;
782 ext4_group_t first_group;
783 int first_block;
784 struct super_block *sb;
785 struct buffer_head *bhs;
786 struct buffer_head **bh;
787 struct inode *inode;
788 char *data;
789 char *bitmap;
9b8b7d35 790 struct ext4_group_info *grinfo;
c9de560d 791
6ba495e9 792 mb_debug(1, "init page %lu\n", page->index);
c9de560d
AT
793
794 inode = page->mapping->host;
795 sb = inode->i_sb;
8df9675f 796 ngroups = ext4_get_groups_count(sb);
c9de560d
AT
797 blocksize = 1 << inode->i_blkbits;
798 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
799
800 groups_per_page = blocks_per_page >> 1;
801 if (groups_per_page == 0)
802 groups_per_page = 1;
803
804 /* allocate buffer_heads to read bitmaps */
805 if (groups_per_page > 1) {
806 err = -ENOMEM;
807 i = sizeof(struct buffer_head *) * groups_per_page;
808 bh = kzalloc(i, GFP_NOFS);
809 if (bh == NULL)
810 goto out;
811 } else
812 bh = &bhs;
813
814 first_group = page->index * blocks_per_page / 2;
815
816 /* read all groups the page covers into the cache */
817 for (i = 0; i < groups_per_page; i++) {
818 struct ext4_group_desc *desc;
819
8df9675f 820 if (first_group + i >= ngroups)
c9de560d
AT
821 break;
822
9b8b7d35
AG
823 grinfo = ext4_get_group_info(sb, first_group + i);
824 /*
825 * If page is uptodate then we came here after online resize
826 * which added some new uninitialized group info structs, so
827 * we must skip all initialized uptodate buddies on the page,
828 * which may be currently in use by an allocating task.
829 */
830 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
831 bh[i] = NULL;
832 continue;
833 }
834
c9de560d
AT
835 err = -EIO;
836 desc = ext4_get_group_desc(sb, first_group + i, NULL);
837 if (desc == NULL)
838 goto out;
839
840 err = -ENOMEM;
841 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
842 if (bh[i] == NULL)
843 goto out;
844
2ccb5fb9 845 if (bitmap_uptodate(bh[i]))
c9de560d
AT
846 continue;
847
c806e68f 848 lock_buffer(bh[i]);
2ccb5fb9
AK
849 if (bitmap_uptodate(bh[i])) {
850 unlock_buffer(bh[i]);
851 continue;
852 }
955ce5f5 853 ext4_lock_group(sb, first_group + i);
c9de560d
AT
854 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
855 ext4_init_block_bitmap(sb, bh[i],
856 first_group + i, desc);
2ccb5fb9 857 set_bitmap_uptodate(bh[i]);
c9de560d 858 set_buffer_uptodate(bh[i]);
955ce5f5 859 ext4_unlock_group(sb, first_group + i);
3300beda 860 unlock_buffer(bh[i]);
c9de560d
AT
861 continue;
862 }
955ce5f5 863 ext4_unlock_group(sb, first_group + i);
2ccb5fb9
AK
864 if (buffer_uptodate(bh[i])) {
865 /*
866 * if not uninit if bh is uptodate,
867 * bitmap is also uptodate
868 */
869 set_bitmap_uptodate(bh[i]);
870 unlock_buffer(bh[i]);
871 continue;
872 }
c9de560d 873 get_bh(bh[i]);
2ccb5fb9
AK
874 /*
875 * submit the buffer_head for read. We can
876 * safely mark the bitmap as uptodate now.
877 * We do it here so the bitmap uptodate bit
878 * get set with buffer lock held.
879 */
880 set_bitmap_uptodate(bh[i]);
c9de560d
AT
881 bh[i]->b_end_io = end_buffer_read_sync;
882 submit_bh(READ, bh[i]);
6ba495e9 883 mb_debug(1, "read bitmap for group %u\n", first_group + i);
c9de560d
AT
884 }
885
886 /* wait for I/O completion */
9b8b7d35
AG
887 for (i = 0; i < groups_per_page; i++)
888 if (bh[i])
889 wait_on_buffer(bh[i]);
c9de560d
AT
890
891 err = -EIO;
9b8b7d35
AG
892 for (i = 0; i < groups_per_page; i++)
893 if (bh[i] && !buffer_uptodate(bh[i]))
c9de560d
AT
894 goto out;
895
31b481dc 896 err = 0;
c9de560d
AT
897 first_block = page->index * blocks_per_page;
898 for (i = 0; i < blocks_per_page; i++) {
899 int group;
c9de560d
AT
900
901 group = (first_block + i) >> 1;
8df9675f 902 if (group >= ngroups)
c9de560d
AT
903 break;
904
9b8b7d35
AG
905 if (!bh[group - first_group])
906 /* skip initialized uptodate buddy */
907 continue;
908
c9de560d
AT
909 /*
910 * data carry information regarding this
911 * particular group in the format specified
912 * above
913 *
914 */
915 data = page_address(page) + (i * blocksize);
916 bitmap = bh[group - first_group]->b_data;
917
918 /*
919 * We place the buddy block and bitmap block
920 * close together
921 */
922 if ((first_block + i) & 1) {
923 /* this is block of buddy */
924 BUG_ON(incore == NULL);
6ba495e9 925 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
c9de560d 926 group, page->index, i * blocksize);
f307333e 927 trace_ext4_mb_buddy_bitmap_load(sb, group);
c9de560d
AT
928 grinfo = ext4_get_group_info(sb, group);
929 grinfo->bb_fragments = 0;
930 memset(grinfo->bb_counters, 0,
1927805e
ES
931 sizeof(*grinfo->bb_counters) *
932 (sb->s_blocksize_bits+2));
c9de560d
AT
933 /*
934 * incore got set to the group block bitmap below
935 */
7a2fcbf7 936 ext4_lock_group(sb, group);
9b8b7d35
AG
937 /* init the buddy */
938 memset(data, 0xff, blocksize);
c9de560d 939 ext4_mb_generate_buddy(sb, data, incore, group);
7a2fcbf7 940 ext4_unlock_group(sb, group);
c9de560d
AT
941 incore = NULL;
942 } else {
943 /* this is block of bitmap */
944 BUG_ON(incore != NULL);
6ba495e9 945 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
c9de560d 946 group, page->index, i * blocksize);
f307333e 947 trace_ext4_mb_bitmap_load(sb, group);
c9de560d
AT
948
949 /* see comments in ext4_mb_put_pa() */
950 ext4_lock_group(sb, group);
951 memcpy(data, bitmap, blocksize);
952
953 /* mark all preallocated blks used in in-core bitmap */
954 ext4_mb_generate_from_pa(sb, data, group);
7a2fcbf7 955 ext4_mb_generate_from_freelist(sb, data, group);
c9de560d
AT
956 ext4_unlock_group(sb, group);
957
958 /* set incore so that the buddy information can be
959 * generated using this
960 */
961 incore = data;
962 }
963 }
964 SetPageUptodate(page);
965
966out:
967 if (bh) {
9b8b7d35 968 for (i = 0; i < groups_per_page; i++)
c9de560d
AT
969 brelse(bh[i]);
970 if (bh != &bhs)
971 kfree(bh);
972 }
973 return err;
974}
975
eee4adc7 976/*
2de8807b
AG
977 * Lock the buddy and bitmap pages. This make sure other parallel init_group
978 * on the same buddy page doesn't happen whild holding the buddy page lock.
979 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
980 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
eee4adc7 981 */
2de8807b
AG
982static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
983 ext4_group_t group, struct ext4_buddy *e4b)
eee4adc7 984{
2de8807b
AG
985 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
986 int block, pnum, poff;
eee4adc7 987 int blocks_per_page;
2de8807b
AG
988 struct page *page;
989
990 e4b->bd_buddy_page = NULL;
991 e4b->bd_bitmap_page = NULL;
eee4adc7
ES
992
993 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
994 /*
995 * the buddy cache inode stores the block bitmap
996 * and buddy information in consecutive blocks.
997 * So for each group we need two blocks.
998 */
999 block = group * 2;
1000 pnum = block / blocks_per_page;
2de8807b
AG
1001 poff = block % blocks_per_page;
1002 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1003 if (!page)
1004 return -EIO;
1005 BUG_ON(page->mapping != inode->i_mapping);
1006 e4b->bd_bitmap_page = page;
1007 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1008
1009 if (blocks_per_page >= 2) {
1010 /* buddy and bitmap are on the same page */
1011 return 0;
eee4adc7 1012 }
2de8807b
AG
1013
1014 block++;
1015 pnum = block / blocks_per_page;
1016 poff = block % blocks_per_page;
1017 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1018 if (!page)
1019 return -EIO;
1020 BUG_ON(page->mapping != inode->i_mapping);
1021 e4b->bd_buddy_page = page;
1022 return 0;
eee4adc7
ES
1023}
1024
2de8807b 1025static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
eee4adc7 1026{
2de8807b
AG
1027 if (e4b->bd_bitmap_page) {
1028 unlock_page(e4b->bd_bitmap_page);
1029 page_cache_release(e4b->bd_bitmap_page);
1030 }
1031 if (e4b->bd_buddy_page) {
1032 unlock_page(e4b->bd_buddy_page);
1033 page_cache_release(e4b->bd_buddy_page);
eee4adc7 1034 }
eee4adc7
ES
1035}
1036
8a57d9d6
CW
1037/*
1038 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1039 * block group lock of all groups for this page; do not hold the BG lock when
1040 * calling this routine!
1041 */
b6a758ec
AK
1042static noinline_for_stack
1043int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1044{
1045
b6a758ec 1046 struct ext4_group_info *this_grp;
2de8807b
AG
1047 struct ext4_buddy e4b;
1048 struct page *page;
1049 int ret = 0;
b6a758ec
AK
1050
1051 mb_debug(1, "init group %u\n", group);
b6a758ec
AK
1052 this_grp = ext4_get_group_info(sb, group);
1053 /*
08c3a813
AK
1054 * This ensures that we don't reinit the buddy cache
1055 * page which map to the group from which we are already
1056 * allocating. If we are looking at the buddy cache we would
1057 * have taken a reference using ext4_mb_load_buddy and that
2de8807b 1058 * would have pinned buddy page to page cache.
b6a758ec 1059 */
2de8807b
AG
1060 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1061 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
b6a758ec
AK
1062 /*
1063 * somebody initialized the group
1064 * return without doing anything
1065 */
b6a758ec
AK
1066 goto err;
1067 }
2de8807b
AG
1068
1069 page = e4b.bd_bitmap_page;
1070 ret = ext4_mb_init_cache(page, NULL);
1071 if (ret)
1072 goto err;
1073 if (!PageUptodate(page)) {
b6a758ec
AK
1074 ret = -EIO;
1075 goto err;
1076 }
1077 mark_page_accessed(page);
b6a758ec 1078
2de8807b 1079 if (e4b.bd_buddy_page == NULL) {
b6a758ec
AK
1080 /*
1081 * If both the bitmap and buddy are in
1082 * the same page we don't need to force
1083 * init the buddy
1084 */
2de8807b
AG
1085 ret = 0;
1086 goto err;
b6a758ec 1087 }
2de8807b
AG
1088 /* init buddy cache */
1089 page = e4b.bd_buddy_page;
1090 ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1091 if (ret)
1092 goto err;
1093 if (!PageUptodate(page)) {
b6a758ec
AK
1094 ret = -EIO;
1095 goto err;
1096 }
1097 mark_page_accessed(page);
1098err:
2de8807b 1099 ext4_mb_put_buddy_page_lock(&e4b);
b6a758ec
AK
1100 return ret;
1101}
1102
8a57d9d6
CW
1103/*
1104 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1105 * block group lock of all groups for this page; do not hold the BG lock when
1106 * calling this routine!
1107 */
4ddfef7b
ES
1108static noinline_for_stack int
1109ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1110 struct ext4_buddy *e4b)
c9de560d 1111{
c9de560d
AT
1112 int blocks_per_page;
1113 int block;
1114 int pnum;
1115 int poff;
1116 struct page *page;
fdf6c7a7 1117 int ret;
920313a7
AK
1118 struct ext4_group_info *grp;
1119 struct ext4_sb_info *sbi = EXT4_SB(sb);
1120 struct inode *inode = sbi->s_buddy_cache;
c9de560d 1121
6ba495e9 1122 mb_debug(1, "load group %u\n", group);
c9de560d
AT
1123
1124 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
920313a7 1125 grp = ext4_get_group_info(sb, group);
c9de560d
AT
1126
1127 e4b->bd_blkbits = sb->s_blocksize_bits;
1128 e4b->bd_info = ext4_get_group_info(sb, group);
1129 e4b->bd_sb = sb;
1130 e4b->bd_group = group;
1131 e4b->bd_buddy_page = NULL;
1132 e4b->bd_bitmap_page = NULL;
1133
f41c0750 1134 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
f41c0750
AK
1135 /*
1136 * we need full data about the group
1137 * to make a good selection
1138 */
1139 ret = ext4_mb_init_group(sb, group);
1140 if (ret)
1141 return ret;
f41c0750
AK
1142 }
1143
c9de560d
AT
1144 /*
1145 * the buddy cache inode stores the block bitmap
1146 * and buddy information in consecutive blocks.
1147 * So for each group we need two blocks.
1148 */
1149 block = group * 2;
1150 pnum = block / blocks_per_page;
1151 poff = block % blocks_per_page;
1152
1153 /* we could use find_or_create_page(), but it locks page
1154 * what we'd like to avoid in fast path ... */
1155 page = find_get_page(inode->i_mapping, pnum);
1156 if (page == NULL || !PageUptodate(page)) {
1157 if (page)
920313a7
AK
1158 /*
1159 * drop the page reference and try
1160 * to get the page with lock. If we
1161 * are not uptodate that implies
1162 * somebody just created the page but
1163 * is yet to initialize the same. So
1164 * wait for it to initialize.
1165 */
c9de560d
AT
1166 page_cache_release(page);
1167 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1168 if (page) {
1169 BUG_ON(page->mapping != inode->i_mapping);
1170 if (!PageUptodate(page)) {
fdf6c7a7
SF
1171 ret = ext4_mb_init_cache(page, NULL);
1172 if (ret) {
1173 unlock_page(page);
1174 goto err;
1175 }
c9de560d
AT
1176 mb_cmp_bitmaps(e4b, page_address(page) +
1177 (poff * sb->s_blocksize));
1178 }
1179 unlock_page(page);
1180 }
1181 }
fdf6c7a7
SF
1182 if (page == NULL || !PageUptodate(page)) {
1183 ret = -EIO;
c9de560d 1184 goto err;
fdf6c7a7 1185 }
c9de560d
AT
1186 e4b->bd_bitmap_page = page;
1187 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1188 mark_page_accessed(page);
1189
1190 block++;
1191 pnum = block / blocks_per_page;
1192 poff = block % blocks_per_page;
1193
1194 page = find_get_page(inode->i_mapping, pnum);
1195 if (page == NULL || !PageUptodate(page)) {
1196 if (page)
1197 page_cache_release(page);
1198 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1199 if (page) {
1200 BUG_ON(page->mapping != inode->i_mapping);
fdf6c7a7
SF
1201 if (!PageUptodate(page)) {
1202 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1203 if (ret) {
1204 unlock_page(page);
1205 goto err;
1206 }
1207 }
c9de560d
AT
1208 unlock_page(page);
1209 }
1210 }
fdf6c7a7
SF
1211 if (page == NULL || !PageUptodate(page)) {
1212 ret = -EIO;
c9de560d 1213 goto err;
fdf6c7a7 1214 }
c9de560d
AT
1215 e4b->bd_buddy_page = page;
1216 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1217 mark_page_accessed(page);
1218
1219 BUG_ON(e4b->bd_bitmap_page == NULL);
1220 BUG_ON(e4b->bd_buddy_page == NULL);
1221
1222 return 0;
1223
1224err:
26626f11
YR
1225 if (page)
1226 page_cache_release(page);
c9de560d
AT
1227 if (e4b->bd_bitmap_page)
1228 page_cache_release(e4b->bd_bitmap_page);
1229 if (e4b->bd_buddy_page)
1230 page_cache_release(e4b->bd_buddy_page);
1231 e4b->bd_buddy = NULL;
1232 e4b->bd_bitmap = NULL;
fdf6c7a7 1233 return ret;
c9de560d
AT
1234}
1235
e39e07fd 1236static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
c9de560d
AT
1237{
1238 if (e4b->bd_bitmap_page)
1239 page_cache_release(e4b->bd_bitmap_page);
1240 if (e4b->bd_buddy_page)
1241 page_cache_release(e4b->bd_buddy_page);
1242}
1243
1244
1245static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1246{
1247 int order = 1;
1248 void *bb;
1249
1250 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1251 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1252
1253 bb = EXT4_MB_BUDDY(e4b);
1254 while (order <= e4b->bd_blkbits + 1) {
1255 block = block >> 1;
1256 if (!mb_test_bit(block, bb)) {
1257 /* this block is part of buddy of order 'order' */
1258 return order;
1259 }
1260 bb += 1 << (e4b->bd_blkbits - order);
1261 order++;
1262 }
1263 return 0;
1264}
1265
955ce5f5 1266static void mb_clear_bits(void *bm, int cur, int len)
c9de560d
AT
1267{
1268 __u32 *addr;
1269
1270 len = cur + len;
1271 while (cur < len) {
1272 if ((cur & 31) == 0 && (len - cur) >= 32) {
1273 /* fast path: clear whole word at once */
1274 addr = bm + (cur >> 3);
1275 *addr = 0;
1276 cur += 32;
1277 continue;
1278 }
955ce5f5 1279 mb_clear_bit(cur, bm);
c9de560d
AT
1280 cur++;
1281 }
1282}
1283
955ce5f5 1284static void mb_set_bits(void *bm, int cur, int len)
c9de560d
AT
1285{
1286 __u32 *addr;
1287
1288 len = cur + len;
1289 while (cur < len) {
1290 if ((cur & 31) == 0 && (len - cur) >= 32) {
1291 /* fast path: set whole word at once */
1292 addr = bm + (cur >> 3);
1293 *addr = 0xffffffff;
1294 cur += 32;
1295 continue;
1296 }
955ce5f5 1297 mb_set_bit(cur, bm);
c9de560d
AT
1298 cur++;
1299 }
1300}
1301
7e5a8cdd 1302static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
c9de560d
AT
1303 int first, int count)
1304{
1305 int block = 0;
1306 int max = 0;
1307 int order;
1308 void *buddy;
1309 void *buddy2;
1310 struct super_block *sb = e4b->bd_sb;
1311
1312 BUG_ON(first + count > (sb->s_blocksize << 3));
bc8e6740 1313 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
1314 mb_check_buddy(e4b);
1315 mb_free_blocks_double(inode, e4b, first, count);
1316
1317 e4b->bd_info->bb_free += count;
1318 if (first < e4b->bd_info->bb_first_free)
1319 e4b->bd_info->bb_first_free = first;
1320
1321 /* let's maintain fragments counter */
1322 if (first != 0)
1323 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1324 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1325 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1326 if (block && max)
1327 e4b->bd_info->bb_fragments--;
1328 else if (!block && !max)
1329 e4b->bd_info->bb_fragments++;
1330
1331 /* let's maintain buddy itself */
1332 while (count-- > 0) {
1333 block = first++;
1334 order = 0;
1335
1336 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1337 ext4_fsblk_t blocknr;
5661bd68
AM
1338
1339 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
c9de560d 1340 blocknr += block;
5d1b1b3f 1341 ext4_grp_locked_error(sb, e4b->bd_group,
e29136f8
TT
1342 inode ? inode->i_ino : 0,
1343 blocknr,
1344 "freeing already freed block "
1345 "(bit %u)", block);
c9de560d
AT
1346 }
1347 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1348 e4b->bd_info->bb_counters[order]++;
1349
1350 /* start of the buddy */
1351 buddy = mb_find_buddy(e4b, order, &max);
1352
1353 do {
1354 block &= ~1UL;
1355 if (mb_test_bit(block, buddy) ||
1356 mb_test_bit(block + 1, buddy))
1357 break;
1358
1359 /* both the buddies are free, try to coalesce them */
1360 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1361
1362 if (!buddy2)
1363 break;
1364
1365 if (order > 0) {
1366 /* for special purposes, we don't set
1367 * free bits in bitmap */
1368 mb_set_bit(block, buddy);
1369 mb_set_bit(block + 1, buddy);
1370 }
1371 e4b->bd_info->bb_counters[order]--;
1372 e4b->bd_info->bb_counters[order]--;
1373
1374 block = block >> 1;
1375 order++;
1376 e4b->bd_info->bb_counters[order]++;
1377
1378 mb_clear_bit(block, buddy2);
1379 buddy = buddy2;
1380 } while (1);
1381 }
8a57d9d6 1382 mb_set_largest_free_order(sb, e4b->bd_info);
c9de560d 1383 mb_check_buddy(e4b);
c9de560d
AT
1384}
1385
1386static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1387 int needed, struct ext4_free_extent *ex)
1388{
1389 int next = block;
1390 int max;
1391 int ord;
1392 void *buddy;
1393
bc8e6740 1394 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1395 BUG_ON(ex == NULL);
1396
1397 buddy = mb_find_buddy(e4b, order, &max);
1398 BUG_ON(buddy == NULL);
1399 BUG_ON(block >= max);
1400 if (mb_test_bit(block, buddy)) {
1401 ex->fe_len = 0;
1402 ex->fe_start = 0;
1403 ex->fe_group = 0;
1404 return 0;
1405 }
1406
1407 /* FIXME dorp order completely ? */
1408 if (likely(order == 0)) {
1409 /* find actual order */
1410 order = mb_find_order_for_block(e4b, block);
1411 block = block >> order;
1412 }
1413
1414 ex->fe_len = 1 << order;
1415 ex->fe_start = block << order;
1416 ex->fe_group = e4b->bd_group;
1417
1418 /* calc difference from given start */
1419 next = next - ex->fe_start;
1420 ex->fe_len -= next;
1421 ex->fe_start += next;
1422
1423 while (needed > ex->fe_len &&
1424 (buddy = mb_find_buddy(e4b, order, &max))) {
1425
1426 if (block + 1 >= max)
1427 break;
1428
1429 next = (block + 1) * (1 << order);
1430 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1431 break;
1432
1433 ord = mb_find_order_for_block(e4b, next);
1434
1435 order = ord;
1436 block = next >> order;
1437 ex->fe_len += 1 << order;
1438 }
1439
1440 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1441 return ex->fe_len;
1442}
1443
1444static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1445{
1446 int ord;
1447 int mlen = 0;
1448 int max = 0;
1449 int cur;
1450 int start = ex->fe_start;
1451 int len = ex->fe_len;
1452 unsigned ret = 0;
1453 int len0 = len;
1454 void *buddy;
1455
1456 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1457 BUG_ON(e4b->bd_group != ex->fe_group);
bc8e6740 1458 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1459 mb_check_buddy(e4b);
1460 mb_mark_used_double(e4b, start, len);
1461
1462 e4b->bd_info->bb_free -= len;
1463 if (e4b->bd_info->bb_first_free == start)
1464 e4b->bd_info->bb_first_free += len;
1465
1466 /* let's maintain fragments counter */
1467 if (start != 0)
1468 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1469 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1470 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1471 if (mlen && max)
1472 e4b->bd_info->bb_fragments++;
1473 else if (!mlen && !max)
1474 e4b->bd_info->bb_fragments--;
1475
1476 /* let's maintain buddy itself */
1477 while (len) {
1478 ord = mb_find_order_for_block(e4b, start);
1479
1480 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1481 /* the whole chunk may be allocated at once! */
1482 mlen = 1 << ord;
1483 buddy = mb_find_buddy(e4b, ord, &max);
1484 BUG_ON((start >> ord) >= max);
1485 mb_set_bit(start >> ord, buddy);
1486 e4b->bd_info->bb_counters[ord]--;
1487 start += mlen;
1488 len -= mlen;
1489 BUG_ON(len < 0);
1490 continue;
1491 }
1492
1493 /* store for history */
1494 if (ret == 0)
1495 ret = len | (ord << 16);
1496
1497 /* we have to split large buddy */
1498 BUG_ON(ord <= 0);
1499 buddy = mb_find_buddy(e4b, ord, &max);
1500 mb_set_bit(start >> ord, buddy);
1501 e4b->bd_info->bb_counters[ord]--;
1502
1503 ord--;
1504 cur = (start >> ord) & ~1U;
1505 buddy = mb_find_buddy(e4b, ord, &max);
1506 mb_clear_bit(cur, buddy);
1507 mb_clear_bit(cur + 1, buddy);
1508 e4b->bd_info->bb_counters[ord]++;
1509 e4b->bd_info->bb_counters[ord]++;
1510 }
8a57d9d6 1511 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
c9de560d 1512
955ce5f5 1513 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
c9de560d
AT
1514 mb_check_buddy(e4b);
1515
1516 return ret;
1517}
1518
1519/*
1520 * Must be called under group lock!
1521 */
1522static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1523 struct ext4_buddy *e4b)
1524{
1525 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1526 int ret;
1527
1528 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1529 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1530
1531 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1532 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1533 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1534
1535 /* preallocation can change ac_b_ex, thus we store actually
1536 * allocated blocks for history */
1537 ac->ac_f_ex = ac->ac_b_ex;
1538
1539 ac->ac_status = AC_STATUS_FOUND;
1540 ac->ac_tail = ret & 0xffff;
1541 ac->ac_buddy = ret >> 16;
1542
c3a326a6
AK
1543 /*
1544 * take the page reference. We want the page to be pinned
1545 * so that we don't get a ext4_mb_init_cache_call for this
1546 * group until we update the bitmap. That would mean we
1547 * double allocate blocks. The reference is dropped
1548 * in ext4_mb_release_context
1549 */
c9de560d
AT
1550 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1551 get_page(ac->ac_bitmap_page);
1552 ac->ac_buddy_page = e4b->bd_buddy_page;
1553 get_page(ac->ac_buddy_page);
c9de560d 1554 /* store last allocated for subsequent stream allocation */
4ba74d00 1555 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
1556 spin_lock(&sbi->s_md_lock);
1557 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1558 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1559 spin_unlock(&sbi->s_md_lock);
1560 }
1561}
1562
1563/*
1564 * regular allocator, for general purposes allocation
1565 */
1566
1567static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1568 struct ext4_buddy *e4b,
1569 int finish_group)
1570{
1571 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1572 struct ext4_free_extent *bex = &ac->ac_b_ex;
1573 struct ext4_free_extent *gex = &ac->ac_g_ex;
1574 struct ext4_free_extent ex;
1575 int max;
1576
032115fc
AK
1577 if (ac->ac_status == AC_STATUS_FOUND)
1578 return;
c9de560d
AT
1579 /*
1580 * We don't want to scan for a whole year
1581 */
1582 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1583 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1584 ac->ac_status = AC_STATUS_BREAK;
1585 return;
1586 }
1587
1588 /*
1589 * Haven't found good chunk so far, let's continue
1590 */
1591 if (bex->fe_len < gex->fe_len)
1592 return;
1593
1594 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1595 && bex->fe_group == e4b->bd_group) {
1596 /* recheck chunk's availability - we don't know
1597 * when it was found (within this lock-unlock
1598 * period or not) */
1599 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1600 if (max >= gex->fe_len) {
1601 ext4_mb_use_best_found(ac, e4b);
1602 return;
1603 }
1604 }
1605}
1606
1607/*
1608 * The routine checks whether found extent is good enough. If it is,
1609 * then the extent gets marked used and flag is set to the context
1610 * to stop scanning. Otherwise, the extent is compared with the
1611 * previous found extent and if new one is better, then it's stored
1612 * in the context. Later, the best found extent will be used, if
1613 * mballoc can't find good enough extent.
1614 *
1615 * FIXME: real allocation policy is to be designed yet!
1616 */
1617static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1618 struct ext4_free_extent *ex,
1619 struct ext4_buddy *e4b)
1620{
1621 struct ext4_free_extent *bex = &ac->ac_b_ex;
1622 struct ext4_free_extent *gex = &ac->ac_g_ex;
1623
1624 BUG_ON(ex->fe_len <= 0);
8d03c7a0 1625 BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
c9de560d
AT
1626 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1627 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1628
1629 ac->ac_found++;
1630
1631 /*
1632 * The special case - take what you catch first
1633 */
1634 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1635 *bex = *ex;
1636 ext4_mb_use_best_found(ac, e4b);
1637 return;
1638 }
1639
1640 /*
1641 * Let's check whether the chuck is good enough
1642 */
1643 if (ex->fe_len == gex->fe_len) {
1644 *bex = *ex;
1645 ext4_mb_use_best_found(ac, e4b);
1646 return;
1647 }
1648
1649 /*
1650 * If this is first found extent, just store it in the context
1651 */
1652 if (bex->fe_len == 0) {
1653 *bex = *ex;
1654 return;
1655 }
1656
1657 /*
1658 * If new found extent is better, store it in the context
1659 */
1660 if (bex->fe_len < gex->fe_len) {
1661 /* if the request isn't satisfied, any found extent
1662 * larger than previous best one is better */
1663 if (ex->fe_len > bex->fe_len)
1664 *bex = *ex;
1665 } else if (ex->fe_len > gex->fe_len) {
1666 /* if the request is satisfied, then we try to find
1667 * an extent that still satisfy the request, but is
1668 * smaller than previous one */
1669 if (ex->fe_len < bex->fe_len)
1670 *bex = *ex;
1671 }
1672
1673 ext4_mb_check_limits(ac, e4b, 0);
1674}
1675
089ceecc
ES
1676static noinline_for_stack
1677int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
c9de560d
AT
1678 struct ext4_buddy *e4b)
1679{
1680 struct ext4_free_extent ex = ac->ac_b_ex;
1681 ext4_group_t group = ex.fe_group;
1682 int max;
1683 int err;
1684
1685 BUG_ON(ex.fe_len <= 0);
1686 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1687 if (err)
1688 return err;
1689
1690 ext4_lock_group(ac->ac_sb, group);
1691 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1692
1693 if (max > 0) {
1694 ac->ac_b_ex = ex;
1695 ext4_mb_use_best_found(ac, e4b);
1696 }
1697
1698 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 1699 ext4_mb_unload_buddy(e4b);
c9de560d
AT
1700
1701 return 0;
1702}
1703
089ceecc
ES
1704static noinline_for_stack
1705int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
c9de560d
AT
1706 struct ext4_buddy *e4b)
1707{
1708 ext4_group_t group = ac->ac_g_ex.fe_group;
1709 int max;
1710 int err;
1711 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
c9de560d
AT
1712 struct ext4_free_extent ex;
1713
1714 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1715 return 0;
1716
1717 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1718 if (err)
1719 return err;
1720
1721 ext4_lock_group(ac->ac_sb, group);
1722 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1723 ac->ac_g_ex.fe_len, &ex);
1724
1725 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1726 ext4_fsblk_t start;
1727
5661bd68
AM
1728 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1729 ex.fe_start;
c9de560d
AT
1730 /* use do_div to get remainder (would be 64-bit modulo) */
1731 if (do_div(start, sbi->s_stripe) == 0) {
1732 ac->ac_found++;
1733 ac->ac_b_ex = ex;
1734 ext4_mb_use_best_found(ac, e4b);
1735 }
1736 } else if (max >= ac->ac_g_ex.fe_len) {
1737 BUG_ON(ex.fe_len <= 0);
1738 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1739 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1740 ac->ac_found++;
1741 ac->ac_b_ex = ex;
1742 ext4_mb_use_best_found(ac, e4b);
1743 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1744 /* Sometimes, caller may want to merge even small
1745 * number of blocks to an existing extent */
1746 BUG_ON(ex.fe_len <= 0);
1747 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1748 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1749 ac->ac_found++;
1750 ac->ac_b_ex = ex;
1751 ext4_mb_use_best_found(ac, e4b);
1752 }
1753 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 1754 ext4_mb_unload_buddy(e4b);
c9de560d
AT
1755
1756 return 0;
1757}
1758
1759/*
1760 * The routine scans buddy structures (not bitmap!) from given order
1761 * to max order and tries to find big enough chunk to satisfy the req
1762 */
089ceecc
ES
1763static noinline_for_stack
1764void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
1765 struct ext4_buddy *e4b)
1766{
1767 struct super_block *sb = ac->ac_sb;
1768 struct ext4_group_info *grp = e4b->bd_info;
1769 void *buddy;
1770 int i;
1771 int k;
1772 int max;
1773
1774 BUG_ON(ac->ac_2order <= 0);
1775 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1776 if (grp->bb_counters[i] == 0)
1777 continue;
1778
1779 buddy = mb_find_buddy(e4b, i, &max);
1780 BUG_ON(buddy == NULL);
1781
ffad0a44 1782 k = mb_find_next_zero_bit(buddy, max, 0);
c9de560d
AT
1783 BUG_ON(k >= max);
1784
1785 ac->ac_found++;
1786
1787 ac->ac_b_ex.fe_len = 1 << i;
1788 ac->ac_b_ex.fe_start = k << i;
1789 ac->ac_b_ex.fe_group = e4b->bd_group;
1790
1791 ext4_mb_use_best_found(ac, e4b);
1792
1793 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1794
1795 if (EXT4_SB(sb)->s_mb_stats)
1796 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1797
1798 break;
1799 }
1800}
1801
1802/*
1803 * The routine scans the group and measures all found extents.
1804 * In order to optimize scanning, caller must pass number of
1805 * free blocks in the group, so the routine can know upper limit.
1806 */
089ceecc
ES
1807static noinline_for_stack
1808void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
1809 struct ext4_buddy *e4b)
1810{
1811 struct super_block *sb = ac->ac_sb;
1812 void *bitmap = EXT4_MB_BITMAP(e4b);
1813 struct ext4_free_extent ex;
1814 int i;
1815 int free;
1816
1817 free = e4b->bd_info->bb_free;
1818 BUG_ON(free <= 0);
1819
1820 i = e4b->bd_info->bb_first_free;
1821
1822 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 1823 i = mb_find_next_zero_bit(bitmap,
c9de560d
AT
1824 EXT4_BLOCKS_PER_GROUP(sb), i);
1825 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
26346ff6 1826 /*
e56eb659 1827 * IF we have corrupt bitmap, we won't find any
26346ff6
AK
1828 * free blocks even though group info says we
1829 * we have free blocks
1830 */
e29136f8
TT
1831 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1832 "%d free blocks as per "
fde4d95a 1833 "group info. But bitmap says 0",
26346ff6 1834 free);
c9de560d
AT
1835 break;
1836 }
1837
1838 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1839 BUG_ON(ex.fe_len <= 0);
26346ff6 1840 if (free < ex.fe_len) {
e29136f8
TT
1841 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1842 "%d free blocks as per "
fde4d95a 1843 "group info. But got %d blocks",
26346ff6 1844 free, ex.fe_len);
e56eb659
AK
1845 /*
1846 * The number of free blocks differs. This mostly
1847 * indicate that the bitmap is corrupt. So exit
1848 * without claiming the space.
1849 */
1850 break;
26346ff6 1851 }
c9de560d
AT
1852
1853 ext4_mb_measure_extent(ac, &ex, e4b);
1854
1855 i += ex.fe_len;
1856 free -= ex.fe_len;
1857 }
1858
1859 ext4_mb_check_limits(ac, e4b, 1);
1860}
1861
1862/*
1863 * This is a special case for storages like raid5
506bf2d8 1864 * we try to find stripe-aligned chunks for stripe-size-multiple requests
c9de560d 1865 */
089ceecc
ES
1866static noinline_for_stack
1867void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
c9de560d
AT
1868 struct ext4_buddy *e4b)
1869{
1870 struct super_block *sb = ac->ac_sb;
1871 struct ext4_sb_info *sbi = EXT4_SB(sb);
1872 void *bitmap = EXT4_MB_BITMAP(e4b);
1873 struct ext4_free_extent ex;
1874 ext4_fsblk_t first_group_block;
1875 ext4_fsblk_t a;
1876 ext4_grpblk_t i;
1877 int max;
1878
1879 BUG_ON(sbi->s_stripe == 0);
1880
1881 /* find first stripe-aligned block in group */
5661bd68
AM
1882 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1883
c9de560d
AT
1884 a = first_group_block + sbi->s_stripe - 1;
1885 do_div(a, sbi->s_stripe);
1886 i = (a * sbi->s_stripe) - first_group_block;
1887
1888 while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1889 if (!mb_test_bit(i, bitmap)) {
1890 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1891 if (max >= sbi->s_stripe) {
1892 ac->ac_found++;
1893 ac->ac_b_ex = ex;
1894 ext4_mb_use_best_found(ac, e4b);
1895 break;
1896 }
1897 }
1898 i += sbi->s_stripe;
1899 }
1900}
1901
8a57d9d6 1902/* This is now called BEFORE we load the buddy bitmap. */
c9de560d
AT
1903static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1904 ext4_group_t group, int cr)
1905{
1906 unsigned free, fragments;
a4912123 1907 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
c9de560d
AT
1908 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1909
1910 BUG_ON(cr < 0 || cr >= 4);
8a57d9d6
CW
1911
1912 /* We only do this if the grp has never been initialized */
1913 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1914 int ret = ext4_mb_init_group(ac->ac_sb, group);
1915 if (ret)
1916 return 0;
1917 }
c9de560d
AT
1918
1919 free = grp->bb_free;
1920 fragments = grp->bb_fragments;
1921 if (free == 0)
1922 return 0;
1923 if (fragments == 0)
1924 return 0;
1925
1926 switch (cr) {
1927 case 0:
1928 BUG_ON(ac->ac_2order == 0);
c9de560d 1929
8a57d9d6
CW
1930 if (grp->bb_largest_free_order < ac->ac_2order)
1931 return 0;
1932
a4912123
TT
1933 /* Avoid using the first bg of a flexgroup for data files */
1934 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1935 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1936 ((group % flex_size) == 0))
1937 return 0;
1938
8a57d9d6 1939 return 1;
c9de560d
AT
1940 case 1:
1941 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1942 return 1;
1943 break;
1944 case 2:
1945 if (free >= ac->ac_g_ex.fe_len)
1946 return 1;
1947 break;
1948 case 3:
1949 return 1;
1950 default:
1951 BUG();
1952 }
1953
1954 return 0;
1955}
1956
4ddfef7b
ES
1957static noinline_for_stack int
1958ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
c9de560d 1959{
8df9675f 1960 ext4_group_t ngroups, group, i;
c9de560d
AT
1961 int cr;
1962 int err = 0;
c9de560d
AT
1963 struct ext4_sb_info *sbi;
1964 struct super_block *sb;
1965 struct ext4_buddy e4b;
c9de560d
AT
1966
1967 sb = ac->ac_sb;
1968 sbi = EXT4_SB(sb);
8df9675f 1969 ngroups = ext4_get_groups_count(sb);
fb0a387d 1970 /* non-extent files are limited to low blocks/groups */
12e9b892 1971 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
fb0a387d
ES
1972 ngroups = sbi->s_blockfile_groups;
1973
c9de560d
AT
1974 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1975
1976 /* first, try the goal */
1977 err = ext4_mb_find_by_goal(ac, &e4b);
1978 if (err || ac->ac_status == AC_STATUS_FOUND)
1979 goto out;
1980
1981 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1982 goto out;
1983
1984 /*
1985 * ac->ac2_order is set only if the fe_len is a power of 2
1986 * if ac2_order is set we also set criteria to 0 so that we
1987 * try exact allocation using buddy.
1988 */
1989 i = fls(ac->ac_g_ex.fe_len);
1990 ac->ac_2order = 0;
1991 /*
1992 * We search using buddy data only if the order of the request
1993 * is greater than equal to the sbi_s_mb_order2_reqs
b713a5ec 1994 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
c9de560d
AT
1995 */
1996 if (i >= sbi->s_mb_order2_reqs) {
1997 /*
1998 * This should tell if fe_len is exactly power of 2
1999 */
2000 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2001 ac->ac_2order = i - 1;
2002 }
2003
4ba74d00
TT
2004 /* if stream allocation is enabled, use global goal */
2005 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
2006 /* TBD: may be hot point */
2007 spin_lock(&sbi->s_md_lock);
2008 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2009 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2010 spin_unlock(&sbi->s_md_lock);
2011 }
4ba74d00 2012
c9de560d
AT
2013 /* Let's just scan groups to find more-less suitable blocks */
2014 cr = ac->ac_2order ? 0 : 1;
2015 /*
2016 * cr == 0 try to get exact allocation,
2017 * cr == 3 try to get anything
2018 */
2019repeat:
2020 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2021 ac->ac_criteria = cr;
ed8f9c75
AK
2022 /*
2023 * searching for the right group start
2024 * from the goal value specified
2025 */
2026 group = ac->ac_g_ex.fe_group;
2027
8df9675f 2028 for (i = 0; i < ngroups; group++, i++) {
8df9675f 2029 if (group == ngroups)
c9de560d
AT
2030 group = 0;
2031
8a57d9d6
CW
2032 /* This now checks without needing the buddy page */
2033 if (!ext4_mb_good_group(ac, group, cr))
c9de560d
AT
2034 continue;
2035
c9de560d
AT
2036 err = ext4_mb_load_buddy(sb, group, &e4b);
2037 if (err)
2038 goto out;
2039
2040 ext4_lock_group(sb, group);
8a57d9d6
CW
2041
2042 /*
2043 * We need to check again after locking the
2044 * block group
2045 */
c9de560d 2046 if (!ext4_mb_good_group(ac, group, cr)) {
c9de560d 2047 ext4_unlock_group(sb, group);
e39e07fd 2048 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2049 continue;
2050 }
2051
2052 ac->ac_groups_scanned++;
75507efb 2053 if (cr == 0)
c9de560d 2054 ext4_mb_simple_scan_group(ac, &e4b);
506bf2d8
ES
2055 else if (cr == 1 && sbi->s_stripe &&
2056 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
c9de560d
AT
2057 ext4_mb_scan_aligned(ac, &e4b);
2058 else
2059 ext4_mb_complex_scan_group(ac, &e4b);
2060
2061 ext4_unlock_group(sb, group);
e39e07fd 2062 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2063
2064 if (ac->ac_status != AC_STATUS_CONTINUE)
2065 break;
2066 }
2067 }
2068
2069 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2070 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2071 /*
2072 * We've been searching too long. Let's try to allocate
2073 * the best chunk we've found so far
2074 */
2075
2076 ext4_mb_try_best_found(ac, &e4b);
2077 if (ac->ac_status != AC_STATUS_FOUND) {
2078 /*
2079 * Someone more lucky has already allocated it.
2080 * The only thing we can do is just take first
2081 * found block(s)
2082 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2083 */
2084 ac->ac_b_ex.fe_group = 0;
2085 ac->ac_b_ex.fe_start = 0;
2086 ac->ac_b_ex.fe_len = 0;
2087 ac->ac_status = AC_STATUS_CONTINUE;
2088 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2089 cr = 3;
2090 atomic_inc(&sbi->s_mb_lost_chunks);
2091 goto repeat;
2092 }
2093 }
2094out:
2095 return err;
2096}
2097
c9de560d
AT
2098static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2099{
2100 struct super_block *sb = seq->private;
c9de560d
AT
2101 ext4_group_t group;
2102
8df9675f 2103 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d 2104 return NULL;
c9de560d 2105 group = *pos + 1;
a9df9a49 2106 return (void *) ((unsigned long) group);
c9de560d
AT
2107}
2108
2109static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2110{
2111 struct super_block *sb = seq->private;
c9de560d
AT
2112 ext4_group_t group;
2113
2114 ++*pos;
8df9675f 2115 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d
AT
2116 return NULL;
2117 group = *pos + 1;
a9df9a49 2118 return (void *) ((unsigned long) group);
c9de560d
AT
2119}
2120
2121static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2122{
2123 struct super_block *sb = seq->private;
a9df9a49 2124 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
c9de560d
AT
2125 int i;
2126 int err;
2127 struct ext4_buddy e4b;
2128 struct sg {
2129 struct ext4_group_info info;
a36b4498 2130 ext4_grpblk_t counters[16];
c9de560d
AT
2131 } sg;
2132
2133 group--;
2134 if (group == 0)
2135 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2136 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2137 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2138 "group", "free", "frags", "first",
2139 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2140 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2141
2142 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2143 sizeof(struct ext4_group_info);
2144 err = ext4_mb_load_buddy(sb, group, &e4b);
2145 if (err) {
a9df9a49 2146 seq_printf(seq, "#%-5u: I/O error\n", group);
c9de560d
AT
2147 return 0;
2148 }
2149 ext4_lock_group(sb, group);
2150 memcpy(&sg, ext4_get_group_info(sb, group), i);
2151 ext4_unlock_group(sb, group);
e39e07fd 2152 ext4_mb_unload_buddy(&e4b);
c9de560d 2153
a9df9a49 2154 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
c9de560d
AT
2155 sg.info.bb_fragments, sg.info.bb_first_free);
2156 for (i = 0; i <= 13; i++)
2157 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2158 sg.info.bb_counters[i] : 0);
2159 seq_printf(seq, " ]\n");
2160
2161 return 0;
2162}
2163
2164static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2165{
2166}
2167
7f1346a9 2168static const struct seq_operations ext4_mb_seq_groups_ops = {
c9de560d
AT
2169 .start = ext4_mb_seq_groups_start,
2170 .next = ext4_mb_seq_groups_next,
2171 .stop = ext4_mb_seq_groups_stop,
2172 .show = ext4_mb_seq_groups_show,
2173};
2174
2175static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2176{
2177 struct super_block *sb = PDE(inode)->data;
2178 int rc;
2179
2180 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2181 if (rc == 0) {
a271fe85 2182 struct seq_file *m = file->private_data;
c9de560d
AT
2183 m->private = sb;
2184 }
2185 return rc;
2186
2187}
2188
7f1346a9 2189static const struct file_operations ext4_mb_seq_groups_fops = {
c9de560d
AT
2190 .owner = THIS_MODULE,
2191 .open = ext4_mb_seq_groups_open,
2192 .read = seq_read,
2193 .llseek = seq_lseek,
2194 .release = seq_release,
2195};
2196
fb1813f4
CW
2197static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2198{
2199 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2200 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2201
2202 BUG_ON(!cachep);
2203 return cachep;
2204}
5f21b0e6
FB
2205
2206/* Create and initialize ext4_group_info data for the given group. */
920313a7 2207int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5f21b0e6
FB
2208 struct ext4_group_desc *desc)
2209{
fb1813f4 2210 int i;
5f21b0e6
FB
2211 int metalen = 0;
2212 struct ext4_sb_info *sbi = EXT4_SB(sb);
2213 struct ext4_group_info **meta_group_info;
fb1813f4 2214 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5f21b0e6
FB
2215
2216 /*
2217 * First check if this group is the first of a reserved block.
2218 * If it's true, we have to allocate a new table of pointers
2219 * to ext4_group_info structures
2220 */
2221 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2222 metalen = sizeof(*meta_group_info) <<
2223 EXT4_DESC_PER_BLOCK_BITS(sb);
2224 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2225 if (meta_group_info == NULL) {
2226 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2227 "buddy group\n");
2228 goto exit_meta_group_info;
2229 }
2230 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2231 meta_group_info;
2232 }
2233
5f21b0e6
FB
2234 meta_group_info =
2235 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2236 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2237
fb1813f4 2238 meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
5f21b0e6
FB
2239 if (meta_group_info[i] == NULL) {
2240 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2241 goto exit_group_info;
2242 }
fb1813f4 2243 memset(meta_group_info[i], 0, kmem_cache_size(cachep));
5f21b0e6
FB
2244 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2245 &(meta_group_info[i]->bb_state));
2246
2247 /*
2248 * initialize bb_free to be able to skip
2249 * empty groups without initialization
2250 */
2251 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2252 meta_group_info[i]->bb_free =
2253 ext4_free_blocks_after_init(sb, group, desc);
2254 } else {
2255 meta_group_info[i]->bb_free =
560671a0 2256 ext4_free_blks_count(sb, desc);
5f21b0e6
FB
2257 }
2258
2259 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
920313a7 2260 init_rwsem(&meta_group_info[i]->alloc_sem);
64e290ec 2261 meta_group_info[i]->bb_free_root = RB_ROOT;
8a57d9d6 2262 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
5f21b0e6
FB
2263
2264#ifdef DOUBLE_CHECK
2265 {
2266 struct buffer_head *bh;
2267 meta_group_info[i]->bb_bitmap =
2268 kmalloc(sb->s_blocksize, GFP_KERNEL);
2269 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2270 bh = ext4_read_block_bitmap(sb, group);
2271 BUG_ON(bh == NULL);
2272 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2273 sb->s_blocksize);
2274 put_bh(bh);
2275 }
2276#endif
2277
2278 return 0;
2279
2280exit_group_info:
2281 /* If a meta_group_info table has been allocated, release it now */
2282 if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2283 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2284exit_meta_group_info:
2285 return -ENOMEM;
2286} /* ext4_mb_add_groupinfo */
2287
c9de560d
AT
2288static int ext4_mb_init_backend(struct super_block *sb)
2289{
8df9675f 2290 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d 2291 ext4_group_t i;
c9de560d 2292 struct ext4_sb_info *sbi = EXT4_SB(sb);
5f21b0e6
FB
2293 struct ext4_super_block *es = sbi->s_es;
2294 int num_meta_group_infos;
2295 int num_meta_group_infos_max;
2296 int array_size;
5f21b0e6 2297 struct ext4_group_desc *desc;
fb1813f4 2298 struct kmem_cache *cachep;
5f21b0e6
FB
2299
2300 /* This is the number of blocks used by GDT */
8df9675f 2301 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
5f21b0e6
FB
2302 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2303
2304 /*
2305 * This is the total number of blocks used by GDT including
2306 * the number of reserved blocks for GDT.
2307 * The s_group_info array is allocated with this value
2308 * to allow a clean online resize without a complex
2309 * manipulation of pointer.
2310 * The drawback is the unused memory when no resize
2311 * occurs but it's very low in terms of pages
2312 * (see comments below)
2313 * Need to handle this properly when META_BG resizing is allowed
2314 */
2315 num_meta_group_infos_max = num_meta_group_infos +
2316 le16_to_cpu(es->s_reserved_gdt_blocks);
c9de560d 2317
5f21b0e6
FB
2318 /*
2319 * array_size is the size of s_group_info array. We round it
2320 * to the next power of two because this approximation is done
2321 * internally by kmalloc so we can have some more memory
2322 * for free here (e.g. may be used for META_BG resize).
2323 */
2324 array_size = 1;
2325 while (array_size < sizeof(*sbi->s_group_info) *
2326 num_meta_group_infos_max)
2327 array_size = array_size << 1;
c9de560d
AT
2328 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2329 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2330 * So a two level scheme suffices for now. */
4596fe07 2331 sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
c9de560d
AT
2332 if (sbi->s_group_info == NULL) {
2333 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2334 return -ENOMEM;
2335 }
2336 sbi->s_buddy_cache = new_inode(sb);
2337 if (sbi->s_buddy_cache == NULL) {
2338 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2339 goto err_freesgi;
2340 }
85fe4025 2341 sbi->s_buddy_cache->i_ino = get_next_ino();
c9de560d 2342 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
8df9675f 2343 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2344 desc = ext4_get_group_desc(sb, i, NULL);
2345 if (desc == NULL) {
2346 printk(KERN_ERR
a9df9a49 2347 "EXT4-fs: can't read descriptor %u\n", i);
c9de560d
AT
2348 goto err_freebuddy;
2349 }
5f21b0e6
FB
2350 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2351 goto err_freebuddy;
c9de560d
AT
2352 }
2353
2354 return 0;
2355
2356err_freebuddy:
fb1813f4 2357 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
f1fa3342 2358 while (i-- > 0)
fb1813f4 2359 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
c9de560d 2360 i = num_meta_group_infos;
f1fa3342 2361 while (i-- > 0)
c9de560d
AT
2362 kfree(sbi->s_group_info[i]);
2363 iput(sbi->s_buddy_cache);
2364err_freesgi:
2365 kfree(sbi->s_group_info);
2366 return -ENOMEM;
2367}
2368
2892c15d
ES
2369static void ext4_groupinfo_destroy_slabs(void)
2370{
2371 int i;
2372
2373 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2374 if (ext4_groupinfo_caches[i])
2375 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2376 ext4_groupinfo_caches[i] = NULL;
2377 }
2378}
2379
2380static int ext4_groupinfo_create_slab(size_t size)
2381{
2382 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2383 int slab_size;
2384 int blocksize_bits = order_base_2(size);
2385 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2386 struct kmem_cache *cachep;
2387
2388 if (cache_index >= NR_GRPINFO_CACHES)
2389 return -EINVAL;
2390
2391 if (unlikely(cache_index < 0))
2392 cache_index = 0;
2393
2394 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2395 if (ext4_groupinfo_caches[cache_index]) {
2396 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2397 return 0; /* Already created */
2398 }
2399
2400 slab_size = offsetof(struct ext4_group_info,
2401 bb_counters[blocksize_bits + 2]);
2402
2403 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2404 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2405 NULL);
2406
2407 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2408 if (!cachep) {
2409 printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
2410 return -ENOMEM;
2411 }
2412
2413 ext4_groupinfo_caches[cache_index] = cachep;
2414
2415 return 0;
2416}
2417
c9de560d
AT
2418int ext4_mb_init(struct super_block *sb, int needs_recovery)
2419{
2420 struct ext4_sb_info *sbi = EXT4_SB(sb);
6be2ded1 2421 unsigned i, j;
c9de560d
AT
2422 unsigned offset;
2423 unsigned max;
74767c5a 2424 int ret;
c9de560d 2425
1927805e 2426 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
c9de560d
AT
2427
2428 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2429 if (sbi->s_mb_offsets == NULL) {
fb1813f4
CW
2430 ret = -ENOMEM;
2431 goto out;
c9de560d 2432 }
ff7ef329 2433
1927805e 2434 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
c9de560d
AT
2435 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2436 if (sbi->s_mb_maxs == NULL) {
fb1813f4
CW
2437 ret = -ENOMEM;
2438 goto out;
2439 }
2440
2892c15d
ES
2441 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2442 if (ret < 0)
2443 goto out;
c9de560d
AT
2444
2445 /* order 0 is regular bitmap */
2446 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2447 sbi->s_mb_offsets[0] = 0;
2448
2449 i = 1;
2450 offset = 0;
2451 max = sb->s_blocksize << 2;
2452 do {
2453 sbi->s_mb_offsets[i] = offset;
2454 sbi->s_mb_maxs[i] = max;
2455 offset += 1 << (sb->s_blocksize_bits - i);
2456 max = max >> 1;
2457 i++;
2458 } while (i <= sb->s_blocksize_bits + 1);
2459
2460 /* init file for buddy data */
74767c5a
SF
2461 ret = ext4_mb_init_backend(sb);
2462 if (ret != 0) {
fb1813f4 2463 goto out;
c9de560d
AT
2464 }
2465
2466 spin_lock_init(&sbi->s_md_lock);
c9de560d
AT
2467 spin_lock_init(&sbi->s_bal_lock);
2468
2469 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2470 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2471 sbi->s_mb_stats = MB_DEFAULT_STATS;
2472 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2473 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
c9de560d
AT
2474 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2475
730c213c 2476 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
c9de560d 2477 if (sbi->s_locality_groups == NULL) {
fb1813f4
CW
2478 ret = -ENOMEM;
2479 goto out;
c9de560d 2480 }
730c213c 2481 for_each_possible_cpu(i) {
c9de560d 2482 struct ext4_locality_group *lg;
730c213c 2483 lg = per_cpu_ptr(sbi->s_locality_groups, i);
c9de560d 2484 mutex_init(&lg->lg_mutex);
6be2ded1
AK
2485 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2486 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
c9de560d
AT
2487 spin_lock_init(&lg->lg_prealloc_lock);
2488 }
2489
296c355c
TT
2490 if (sbi->s_proc)
2491 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2492 &ext4_mb_seq_groups_fops, sb);
c9de560d 2493
0390131b
FM
2494 if (sbi->s_journal)
2495 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
fb1813f4
CW
2496out:
2497 if (ret) {
2498 kfree(sbi->s_mb_offsets);
2499 kfree(sbi->s_mb_maxs);
fb1813f4
CW
2500 }
2501 return ret;
c9de560d
AT
2502}
2503
955ce5f5 2504/* need to called with the ext4 group lock held */
c9de560d
AT
2505static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2506{
2507 struct ext4_prealloc_space *pa;
2508 struct list_head *cur, *tmp;
2509 int count = 0;
2510
2511 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2512 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2513 list_del(&pa->pa_group_list);
2514 count++;
688f05a0 2515 kmem_cache_free(ext4_pspace_cachep, pa);
c9de560d
AT
2516 }
2517 if (count)
6ba495e9 2518 mb_debug(1, "mballoc: %u PAs left\n", count);
c9de560d
AT
2519
2520}
2521
2522int ext4_mb_release(struct super_block *sb)
2523{
8df9675f 2524 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d
AT
2525 ext4_group_t i;
2526 int num_meta_group_infos;
2527 struct ext4_group_info *grinfo;
2528 struct ext4_sb_info *sbi = EXT4_SB(sb);
fb1813f4 2529 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
c9de560d 2530
c9de560d 2531 if (sbi->s_group_info) {
8df9675f 2532 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2533 grinfo = ext4_get_group_info(sb, i);
2534#ifdef DOUBLE_CHECK
2535 kfree(grinfo->bb_bitmap);
2536#endif
2537 ext4_lock_group(sb, i);
2538 ext4_mb_cleanup_pa(grinfo);
2539 ext4_unlock_group(sb, i);
fb1813f4 2540 kmem_cache_free(cachep, grinfo);
c9de560d 2541 }
8df9675f 2542 num_meta_group_infos = (ngroups +
c9de560d
AT
2543 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2544 EXT4_DESC_PER_BLOCK_BITS(sb);
2545 for (i = 0; i < num_meta_group_infos; i++)
2546 kfree(sbi->s_group_info[i]);
2547 kfree(sbi->s_group_info);
2548 }
2549 kfree(sbi->s_mb_offsets);
2550 kfree(sbi->s_mb_maxs);
2551 if (sbi->s_buddy_cache)
2552 iput(sbi->s_buddy_cache);
2553 if (sbi->s_mb_stats) {
2554 printk(KERN_INFO
2555 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2556 atomic_read(&sbi->s_bal_allocated),
2557 atomic_read(&sbi->s_bal_reqs),
2558 atomic_read(&sbi->s_bal_success));
2559 printk(KERN_INFO
2560 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2561 "%u 2^N hits, %u breaks, %u lost\n",
2562 atomic_read(&sbi->s_bal_ex_scanned),
2563 atomic_read(&sbi->s_bal_goals),
2564 atomic_read(&sbi->s_bal_2orders),
2565 atomic_read(&sbi->s_bal_breaks),
2566 atomic_read(&sbi->s_mb_lost_chunks));
2567 printk(KERN_INFO
2568 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2569 sbi->s_mb_buddies_generated++,
2570 sbi->s_mb_generation_time);
2571 printk(KERN_INFO
2572 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2573 atomic_read(&sbi->s_mb_preallocated),
2574 atomic_read(&sbi->s_mb_discarded));
2575 }
2576
730c213c 2577 free_percpu(sbi->s_locality_groups);
296c355c
TT
2578 if (sbi->s_proc)
2579 remove_proc_entry("mb_groups", sbi->s_proc);
c9de560d
AT
2580
2581 return 0;
2582}
2583
77ca6cdf 2584static inline int ext4_issue_discard(struct super_block *sb,
5c521830
JZ
2585 ext4_group_t block_group, ext4_grpblk_t block, int count)
2586{
5c521830
JZ
2587 ext4_fsblk_t discard_block;
2588
2589 discard_block = block + ext4_group_first_block_no(sb, block_group);
2590 trace_ext4_discard_blocks(sb,
2591 (unsigned long long) discard_block, count);
93259636 2592 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
5c521830
JZ
2593}
2594
3e624fc7
TT
2595/*
2596 * This function is called by the jbd2 layer once the commit has finished,
2597 * so we know we can free the blocks that were released with that commit.
2598 */
2599static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
c9de560d 2600{
3e624fc7 2601 struct super_block *sb = journal->j_private;
c9de560d 2602 struct ext4_buddy e4b;
c894058d 2603 struct ext4_group_info *db;
d9f34504 2604 int err, count = 0, count2 = 0;
c894058d 2605 struct ext4_free_data *entry;
3e624fc7 2606 struct list_head *l, *ltmp;
c9de560d 2607
3e624fc7
TT
2608 list_for_each_safe(l, ltmp, &txn->t_private_list) {
2609 entry = list_entry(l, struct ext4_free_data, list);
c9de560d 2610
6ba495e9 2611 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
3e624fc7 2612 entry->count, entry->group, entry);
c9de560d 2613
d9f34504
TT
2614 if (test_opt(sb, DISCARD))
2615 ext4_issue_discard(sb, entry->group,
2616 entry->start_blk, entry->count);
b90f6870 2617
c894058d 2618 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
c9de560d
AT
2619 /* we expect to find existing buddy because it's pinned */
2620 BUG_ON(err != 0);
2621
c894058d 2622 db = e4b.bd_info;
c9de560d 2623 /* there are blocks to put in buddy to make them really free */
c894058d 2624 count += entry->count;
c9de560d 2625 count2++;
c894058d
AK
2626 ext4_lock_group(sb, entry->group);
2627 /* Take it out of per group rb tree */
2628 rb_erase(&entry->node, &(db->bb_free_root));
2629 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2630
3d56b8d2
TM
2631 /*
2632 * Clear the trimmed flag for the group so that the next
2633 * ext4_trim_fs can trim it.
2634 * If the volume is mounted with -o discard, online discard
2635 * is supported and the free blocks will be trimmed online.
2636 */
2637 if (!test_opt(sb, DISCARD))
2638 EXT4_MB_GRP_CLEAR_TRIMMED(db);
2639
c894058d
AK
2640 if (!db->bb_free_root.rb_node) {
2641 /* No more items in the per group rb tree
2642 * balance refcounts from ext4_mb_free_metadata()
2643 */
2644 page_cache_release(e4b.bd_buddy_page);
2645 page_cache_release(e4b.bd_bitmap_page);
c9de560d 2646 }
c894058d 2647 ext4_unlock_group(sb, entry->group);
c894058d 2648 kmem_cache_free(ext4_free_ext_cachep, entry);
e39e07fd 2649 ext4_mb_unload_buddy(&e4b);
3e624fc7 2650 }
c9de560d 2651
6ba495e9 2652 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
c9de560d
AT
2653}
2654
6ba495e9
TT
2655#ifdef CONFIG_EXT4_DEBUG
2656u8 mb_enable_debug __read_mostly;
2657
2658static struct dentry *debugfs_dir;
2659static struct dentry *debugfs_debug;
2660
2661static void __init ext4_create_debugfs_entry(void)
2662{
2663 debugfs_dir = debugfs_create_dir("ext4", NULL);
2664 if (debugfs_dir)
2665 debugfs_debug = debugfs_create_u8("mballoc-debug",
2666 S_IRUGO | S_IWUSR,
2667 debugfs_dir,
2668 &mb_enable_debug);
2669}
2670
2671static void ext4_remove_debugfs_entry(void)
2672{
2673 debugfs_remove(debugfs_debug);
2674 debugfs_remove(debugfs_dir);
2675}
2676
2677#else
2678
2679static void __init ext4_create_debugfs_entry(void)
2680{
2681}
2682
2683static void ext4_remove_debugfs_entry(void)
2684{
2685}
2686
2687#endif
2688
5dabfc78 2689int __init ext4_init_mballoc(void)
c9de560d 2690{
16828088
TT
2691 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2692 SLAB_RECLAIM_ACCOUNT);
c9de560d
AT
2693 if (ext4_pspace_cachep == NULL)
2694 return -ENOMEM;
2695
16828088
TT
2696 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2697 SLAB_RECLAIM_ACCOUNT);
256bdb49
ES
2698 if (ext4_ac_cachep == NULL) {
2699 kmem_cache_destroy(ext4_pspace_cachep);
2700 return -ENOMEM;
2701 }
c894058d 2702
16828088
TT
2703 ext4_free_ext_cachep = KMEM_CACHE(ext4_free_data,
2704 SLAB_RECLAIM_ACCOUNT);
c894058d
AK
2705 if (ext4_free_ext_cachep == NULL) {
2706 kmem_cache_destroy(ext4_pspace_cachep);
2707 kmem_cache_destroy(ext4_ac_cachep);
2708 return -ENOMEM;
2709 }
6ba495e9 2710 ext4_create_debugfs_entry();
c9de560d
AT
2711 return 0;
2712}
2713
5dabfc78 2714void ext4_exit_mballoc(void)
c9de560d 2715{
60e6679e 2716 /*
3e03f9ca
JDB
2717 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2718 * before destroying the slab cache.
2719 */
2720 rcu_barrier();
c9de560d 2721 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 2722 kmem_cache_destroy(ext4_ac_cachep);
c894058d 2723 kmem_cache_destroy(ext4_free_ext_cachep);
2892c15d 2724 ext4_groupinfo_destroy_slabs();
6ba495e9 2725 ext4_remove_debugfs_entry();
c9de560d
AT
2726}
2727
2728
2729/*
73b2c716 2730 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
c9de560d
AT
2731 * Returns 0 if success or error code
2732 */
4ddfef7b
ES
2733static noinline_for_stack int
2734ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
498e5f24 2735 handle_t *handle, unsigned int reserv_blks)
c9de560d
AT
2736{
2737 struct buffer_head *bitmap_bh = NULL;
c9de560d
AT
2738 struct ext4_group_desc *gdp;
2739 struct buffer_head *gdp_bh;
2740 struct ext4_sb_info *sbi;
2741 struct super_block *sb;
2742 ext4_fsblk_t block;
519deca0 2743 int err, len;
c9de560d
AT
2744
2745 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2746 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2747
2748 sb = ac->ac_sb;
2749 sbi = EXT4_SB(sb);
c9de560d
AT
2750
2751 err = -EIO;
574ca174 2752 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2753 if (!bitmap_bh)
2754 goto out_err;
2755
2756 err = ext4_journal_get_write_access(handle, bitmap_bh);
2757 if (err)
2758 goto out_err;
2759
2760 err = -EIO;
2761 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2762 if (!gdp)
2763 goto out_err;
2764
a9df9a49 2765 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
9fd9784c 2766 ext4_free_blks_count(sb, gdp));
03cddb80 2767
c9de560d
AT
2768 err = ext4_journal_get_write_access(handle, gdp_bh);
2769 if (err)
2770 goto out_err;
2771
bda00de7 2772 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
c9de560d 2773
519deca0 2774 len = ac->ac_b_ex.fe_len;
6fd058f7 2775 if (!ext4_data_block_valid(sbi, block, len)) {
12062ddd 2776 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
6fd058f7 2777 "fs metadata\n", block, block+len);
519deca0
AK
2778 /* File system mounted not to panic on error
2779 * Fix the bitmap and repeat the block allocation
2780 * We leak some of the blocks here.
2781 */
955ce5f5
AK
2782 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2783 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2784 ac->ac_b_ex.fe_len);
2785 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
0390131b 2786 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
519deca0
AK
2787 if (!err)
2788 err = -EAGAIN;
2789 goto out_err;
c9de560d 2790 }
955ce5f5
AK
2791
2792 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2793#ifdef AGGRESSIVE_CHECK
2794 {
2795 int i;
2796 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2797 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2798 bitmap_bh->b_data));
2799 }
2800 }
2801#endif
955ce5f5 2802 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
c9de560d
AT
2803 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2804 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
560671a0
AK
2805 ext4_free_blks_set(sb, gdp,
2806 ext4_free_blocks_after_init(sb,
2807 ac->ac_b_ex.fe_group, gdp));
c9de560d 2808 }
560671a0
AK
2809 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
2810 ext4_free_blks_set(sb, gdp, len);
c9de560d 2811 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
955ce5f5
AK
2812
2813 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
6bc6e63f 2814 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
d2a17637 2815 /*
6bc6e63f 2816 * Now reduce the dirty block count also. Should not go negative
d2a17637 2817 */
6bc6e63f
AK
2818 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2819 /* release all the reserved blocks if non delalloc */
2820 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
c9de560d 2821
772cb7c8
JS
2822 if (sbi->s_log_groups_per_flex) {
2823 ext4_group_t flex_group = ext4_flex_group(sbi,
2824 ac->ac_b_ex.fe_group);
9f24e420
TT
2825 atomic_sub(ac->ac_b_ex.fe_len,
2826 &sbi->s_flex_groups[flex_group].free_blocks);
772cb7c8
JS
2827 }
2828
0390131b 2829 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
c9de560d
AT
2830 if (err)
2831 goto out_err;
0390131b 2832 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
c9de560d
AT
2833
2834out_err:
a0375156 2835 ext4_mark_super_dirty(sb);
42a10add 2836 brelse(bitmap_bh);
c9de560d
AT
2837 return err;
2838}
2839
2840/*
2841 * here we normalize request for locality group
2842 * Group request are normalized to s_strip size if we set the same via mount
2843 * option. If not we set it to s_mb_group_prealloc which can be configured via
b713a5ec 2844 * /sys/fs/ext4/<partition>/mb_group_prealloc
c9de560d
AT
2845 *
2846 * XXX: should we try to preallocate more than the group has now?
2847 */
2848static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2849{
2850 struct super_block *sb = ac->ac_sb;
2851 struct ext4_locality_group *lg = ac->ac_lg;
2852
2853 BUG_ON(lg == NULL);
2854 if (EXT4_SB(sb)->s_stripe)
2855 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2856 else
2857 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
6ba495e9 2858 mb_debug(1, "#%u: goal %u blocks for locality group\n",
c9de560d
AT
2859 current->pid, ac->ac_g_ex.fe_len);
2860}
2861
2862/*
2863 * Normalization means making request better in terms of
2864 * size and alignment
2865 */
4ddfef7b
ES
2866static noinline_for_stack void
2867ext4_mb_normalize_request(struct ext4_allocation_context *ac,
c9de560d
AT
2868 struct ext4_allocation_request *ar)
2869{
2870 int bsbits, max;
2871 ext4_lblk_t end;
c9de560d 2872 loff_t size, orig_size, start_off;
5a0790c2 2873 ext4_lblk_t start;
c9de560d 2874 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
9a0762c5 2875 struct ext4_prealloc_space *pa;
c9de560d
AT
2876
2877 /* do normalize only data requests, metadata requests
2878 do not need preallocation */
2879 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2880 return;
2881
2882 /* sometime caller may want exact blocks */
2883 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2884 return;
2885
2886 /* caller may indicate that preallocation isn't
2887 * required (it's a tail, for example) */
2888 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2889 return;
2890
2891 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2892 ext4_mb_normalize_group_request(ac);
2893 return ;
2894 }
2895
2896 bsbits = ac->ac_sb->s_blocksize_bits;
2897
2898 /* first, let's learn actual file size
2899 * given current request is allocated */
2900 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2901 size = size << bsbits;
2902 if (size < i_size_read(ac->ac_inode))
2903 size = i_size_read(ac->ac_inode);
5a0790c2 2904 orig_size = size;
c9de560d 2905
1930479c
VC
2906 /* max size of free chunks */
2907 max = 2 << bsbits;
c9de560d 2908
1930479c
VC
2909#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
2910 (req <= (size) || max <= (chunk_size))
c9de560d
AT
2911
2912 /* first, try to predict filesize */
2913 /* XXX: should this table be tunable? */
2914 start_off = 0;
2915 if (size <= 16 * 1024) {
2916 size = 16 * 1024;
2917 } else if (size <= 32 * 1024) {
2918 size = 32 * 1024;
2919 } else if (size <= 64 * 1024) {
2920 size = 64 * 1024;
2921 } else if (size <= 128 * 1024) {
2922 size = 128 * 1024;
2923 } else if (size <= 256 * 1024) {
2924 size = 256 * 1024;
2925 } else if (size <= 512 * 1024) {
2926 size = 512 * 1024;
2927 } else if (size <= 1024 * 1024) {
2928 size = 1024 * 1024;
1930479c 2929 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
c9de560d 2930 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
1930479c
VC
2931 (21 - bsbits)) << 21;
2932 size = 2 * 1024 * 1024;
2933 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
c9de560d
AT
2934 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2935 (22 - bsbits)) << 22;
2936 size = 4 * 1024 * 1024;
2937 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
1930479c 2938 (8<<20)>>bsbits, max, 8 * 1024)) {
c9de560d
AT
2939 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2940 (23 - bsbits)) << 23;
2941 size = 8 * 1024 * 1024;
2942 } else {
2943 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2944 size = ac->ac_o_ex.fe_len << bsbits;
2945 }
5a0790c2
AK
2946 size = size >> bsbits;
2947 start = start_off >> bsbits;
c9de560d
AT
2948
2949 /* don't cover already allocated blocks in selected range */
2950 if (ar->pleft && start <= ar->lleft) {
2951 size -= ar->lleft + 1 - start;
2952 start = ar->lleft + 1;
2953 }
2954 if (ar->pright && start + size - 1 >= ar->lright)
2955 size -= start + size - ar->lright;
2956
2957 end = start + size;
2958
2959 /* check we don't cross already preallocated blocks */
2960 rcu_read_lock();
9a0762c5 2961 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 2962 ext4_lblk_t pa_end;
c9de560d 2963
c9de560d
AT
2964 if (pa->pa_deleted)
2965 continue;
2966 spin_lock(&pa->pa_lock);
2967 if (pa->pa_deleted) {
2968 spin_unlock(&pa->pa_lock);
2969 continue;
2970 }
2971
2972 pa_end = pa->pa_lstart + pa->pa_len;
2973
2974 /* PA must not overlap original request */
2975 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2976 ac->ac_o_ex.fe_logical < pa->pa_lstart));
2977
38877f4e
ES
2978 /* skip PAs this normalized request doesn't overlap with */
2979 if (pa->pa_lstart >= end || pa_end <= start) {
c9de560d
AT
2980 spin_unlock(&pa->pa_lock);
2981 continue;
2982 }
2983 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2984
38877f4e 2985 /* adjust start or end to be adjacent to this pa */
c9de560d
AT
2986 if (pa_end <= ac->ac_o_ex.fe_logical) {
2987 BUG_ON(pa_end < start);
2988 start = pa_end;
38877f4e 2989 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
c9de560d
AT
2990 BUG_ON(pa->pa_lstart > end);
2991 end = pa->pa_lstart;
2992 }
2993 spin_unlock(&pa->pa_lock);
2994 }
2995 rcu_read_unlock();
2996 size = end - start;
2997
2998 /* XXX: extra loop to check we really don't overlap preallocations */
2999 rcu_read_lock();
9a0762c5 3000 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 3001 ext4_lblk_t pa_end;
c9de560d
AT
3002 spin_lock(&pa->pa_lock);
3003 if (pa->pa_deleted == 0) {
3004 pa_end = pa->pa_lstart + pa->pa_len;
3005 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3006 }
3007 spin_unlock(&pa->pa_lock);
3008 }
3009 rcu_read_unlock();
3010
3011 if (start + size <= ac->ac_o_ex.fe_logical &&
3012 start > ac->ac_o_ex.fe_logical) {
3013 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3014 (unsigned long) start, (unsigned long) size,
3015 (unsigned long) ac->ac_o_ex.fe_logical);
3016 }
3017 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3018 start > ac->ac_o_ex.fe_logical);
8d03c7a0 3019 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
c9de560d
AT
3020
3021 /* now prepare goal request */
3022
3023 /* XXX: is it better to align blocks WRT to logical
3024 * placement or satisfy big request as is */
3025 ac->ac_g_ex.fe_logical = start;
3026 ac->ac_g_ex.fe_len = size;
3027
3028 /* define goal start in order to merge */
3029 if (ar->pright && (ar->lright == (start + size))) {
3030 /* merge to the right */
3031 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3032 &ac->ac_f_ex.fe_group,
3033 &ac->ac_f_ex.fe_start);
3034 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3035 }
3036 if (ar->pleft && (ar->lleft + 1 == start)) {
3037 /* merge to the left */
3038 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3039 &ac->ac_f_ex.fe_group,
3040 &ac->ac_f_ex.fe_start);
3041 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3042 }
3043
6ba495e9 3044 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
c9de560d
AT
3045 (unsigned) orig_size, (unsigned) start);
3046}
3047
3048static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3049{
3050 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3051
3052 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3053 atomic_inc(&sbi->s_bal_reqs);
3054 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
291dae47 3055 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
c9de560d
AT
3056 atomic_inc(&sbi->s_bal_success);
3057 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3058 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3059 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3060 atomic_inc(&sbi->s_bal_goals);
3061 if (ac->ac_found > sbi->s_mb_max_to_scan)
3062 atomic_inc(&sbi->s_bal_breaks);
3063 }
3064
296c355c
TT
3065 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3066 trace_ext4_mballoc_alloc(ac);
3067 else
3068 trace_ext4_mballoc_prealloc(ac);
c9de560d
AT
3069}
3070
b844167e
CW
3071/*
3072 * Called on failure; free up any blocks from the inode PA for this
3073 * context. We don't need this for MB_GROUP_PA because we only change
3074 * pa_free in ext4_mb_release_context(), but on failure, we've already
3075 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3076 */
3077static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3078{
3079 struct ext4_prealloc_space *pa = ac->ac_pa;
3080 int len;
3081
3082 if (pa && pa->pa_type == MB_INODE_PA) {
3083 len = ac->ac_b_ex.fe_len;
3084 pa->pa_free += len;
3085 }
3086
3087}
3088
c9de560d
AT
3089/*
3090 * use blocks preallocated to inode
3091 */
3092static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3093 struct ext4_prealloc_space *pa)
3094{
3095 ext4_fsblk_t start;
3096 ext4_fsblk_t end;
3097 int len;
3098
3099 /* found preallocated blocks, use them */
3100 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3101 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3102 len = end - start;
3103 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3104 &ac->ac_b_ex.fe_start);
3105 ac->ac_b_ex.fe_len = len;
3106 ac->ac_status = AC_STATUS_FOUND;
3107 ac->ac_pa = pa;
3108
3109 BUG_ON(start < pa->pa_pstart);
3110 BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3111 BUG_ON(pa->pa_free < len);
3112 pa->pa_free -= len;
3113
6ba495e9 3114 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
c9de560d
AT
3115}
3116
3117/*
3118 * use blocks preallocated to locality group
3119 */
3120static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3121 struct ext4_prealloc_space *pa)
3122{
03cddb80 3123 unsigned int len = ac->ac_o_ex.fe_len;
6be2ded1 3124
c9de560d
AT
3125 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3126 &ac->ac_b_ex.fe_group,
3127 &ac->ac_b_ex.fe_start);
3128 ac->ac_b_ex.fe_len = len;
3129 ac->ac_status = AC_STATUS_FOUND;
3130 ac->ac_pa = pa;
3131
3132 /* we don't correct pa_pstart or pa_plen here to avoid
26346ff6 3133 * possible race when the group is being loaded concurrently
c9de560d 3134 * instead we correct pa later, after blocks are marked
26346ff6
AK
3135 * in on-disk bitmap -- see ext4_mb_release_context()
3136 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d 3137 */
6ba495e9 3138 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
c9de560d
AT
3139}
3140
5e745b04
AK
3141/*
3142 * Return the prealloc space that have minimal distance
3143 * from the goal block. @cpa is the prealloc
3144 * space that is having currently known minimal distance
3145 * from the goal block.
3146 */
3147static struct ext4_prealloc_space *
3148ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3149 struct ext4_prealloc_space *pa,
3150 struct ext4_prealloc_space *cpa)
3151{
3152 ext4_fsblk_t cur_distance, new_distance;
3153
3154 if (cpa == NULL) {
3155 atomic_inc(&pa->pa_count);
3156 return pa;
3157 }
3158 cur_distance = abs(goal_block - cpa->pa_pstart);
3159 new_distance = abs(goal_block - pa->pa_pstart);
3160
5a54b2f1 3161 if (cur_distance <= new_distance)
5e745b04
AK
3162 return cpa;
3163
3164 /* drop the previous reference */
3165 atomic_dec(&cpa->pa_count);
3166 atomic_inc(&pa->pa_count);
3167 return pa;
3168}
3169
c9de560d
AT
3170/*
3171 * search goal blocks in preallocated space
3172 */
4ddfef7b
ES
3173static noinline_for_stack int
3174ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
c9de560d 3175{
6be2ded1 3176 int order, i;
c9de560d
AT
3177 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3178 struct ext4_locality_group *lg;
5e745b04
AK
3179 struct ext4_prealloc_space *pa, *cpa = NULL;
3180 ext4_fsblk_t goal_block;
c9de560d
AT
3181
3182 /* only data can be preallocated */
3183 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3184 return 0;
3185
3186 /* first, try per-file preallocation */
3187 rcu_read_lock();
9a0762c5 3188 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d
AT
3189
3190 /* all fields in this condition don't change,
3191 * so we can skip locking for them */
3192 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3193 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3194 continue;
3195
fb0a387d 3196 /* non-extent files can't have physical blocks past 2^32 */
12e9b892 3197 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
fb0a387d
ES
3198 pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
3199 continue;
3200
c9de560d
AT
3201 /* found preallocated blocks, use them */
3202 spin_lock(&pa->pa_lock);
3203 if (pa->pa_deleted == 0 && pa->pa_free) {
3204 atomic_inc(&pa->pa_count);
3205 ext4_mb_use_inode_pa(ac, pa);
3206 spin_unlock(&pa->pa_lock);
3207 ac->ac_criteria = 10;
3208 rcu_read_unlock();
3209 return 1;
3210 }
3211 spin_unlock(&pa->pa_lock);
3212 }
3213 rcu_read_unlock();
3214
3215 /* can we use group allocation? */
3216 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3217 return 0;
3218
3219 /* inode may have no locality group for some reason */
3220 lg = ac->ac_lg;
3221 if (lg == NULL)
3222 return 0;
6be2ded1
AK
3223 order = fls(ac->ac_o_ex.fe_len) - 1;
3224 if (order > PREALLOC_TB_SIZE - 1)
3225 /* The max size of hash table is PREALLOC_TB_SIZE */
3226 order = PREALLOC_TB_SIZE - 1;
3227
bda00de7 3228 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
5e745b04
AK
3229 /*
3230 * search for the prealloc space that is having
3231 * minimal distance from the goal block.
3232 */
6be2ded1
AK
3233 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3234 rcu_read_lock();
3235 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3236 pa_inode_list) {
3237 spin_lock(&pa->pa_lock);
3238 if (pa->pa_deleted == 0 &&
3239 pa->pa_free >= ac->ac_o_ex.fe_len) {
5e745b04
AK
3240
3241 cpa = ext4_mb_check_group_pa(goal_block,
3242 pa, cpa);
6be2ded1 3243 }
c9de560d 3244 spin_unlock(&pa->pa_lock);
c9de560d 3245 }
6be2ded1 3246 rcu_read_unlock();
c9de560d 3247 }
5e745b04
AK
3248 if (cpa) {
3249 ext4_mb_use_group_pa(ac, cpa);
3250 ac->ac_criteria = 20;
3251 return 1;
3252 }
c9de560d
AT
3253 return 0;
3254}
3255
7a2fcbf7
AK
3256/*
3257 * the function goes through all block freed in the group
3258 * but not yet committed and marks them used in in-core bitmap.
3259 * buddy must be generated from this bitmap
955ce5f5 3260 * Need to be called with the ext4 group lock held
7a2fcbf7
AK
3261 */
3262static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3263 ext4_group_t group)
3264{
3265 struct rb_node *n;
3266 struct ext4_group_info *grp;
3267 struct ext4_free_data *entry;
3268
3269 grp = ext4_get_group_info(sb, group);
3270 n = rb_first(&(grp->bb_free_root));
3271
3272 while (n) {
3273 entry = rb_entry(n, struct ext4_free_data, node);
955ce5f5 3274 mb_set_bits(bitmap, entry->start_blk, entry->count);
7a2fcbf7
AK
3275 n = rb_next(n);
3276 }
3277 return;
3278}
3279
c9de560d
AT
3280/*
3281 * the function goes through all preallocation in this group and marks them
3282 * used in in-core bitmap. buddy must be generated from this bitmap
955ce5f5 3283 * Need to be called with ext4 group lock held
c9de560d 3284 */
089ceecc
ES
3285static noinline_for_stack
3286void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
c9de560d
AT
3287 ext4_group_t group)
3288{
3289 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3290 struct ext4_prealloc_space *pa;
3291 struct list_head *cur;
3292 ext4_group_t groupnr;
3293 ext4_grpblk_t start;
3294 int preallocated = 0;
3295 int count = 0;
3296 int len;
3297
3298 /* all form of preallocation discards first load group,
3299 * so the only competing code is preallocation use.
3300 * we don't need any locking here
3301 * notice we do NOT ignore preallocations with pa_deleted
3302 * otherwise we could leave used blocks available for
3303 * allocation in buddy when concurrent ext4_mb_put_pa()
3304 * is dropping preallocation
3305 */
3306 list_for_each(cur, &grp->bb_prealloc_list) {
3307 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3308 spin_lock(&pa->pa_lock);
3309 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3310 &groupnr, &start);
3311 len = pa->pa_len;
3312 spin_unlock(&pa->pa_lock);
3313 if (unlikely(len == 0))
3314 continue;
3315 BUG_ON(groupnr != group);
955ce5f5 3316 mb_set_bits(bitmap, start, len);
c9de560d
AT
3317 preallocated += len;
3318 count++;
3319 }
6ba495e9 3320 mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
c9de560d
AT
3321}
3322
3323static void ext4_mb_pa_callback(struct rcu_head *head)
3324{
3325 struct ext4_prealloc_space *pa;
3326 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3327 kmem_cache_free(ext4_pspace_cachep, pa);
3328}
3329
3330/*
3331 * drops a reference to preallocated space descriptor
3332 * if this was the last reference and the space is consumed
3333 */
3334static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3335 struct super_block *sb, struct ext4_prealloc_space *pa)
3336{
a9df9a49 3337 ext4_group_t grp;
d33a1976 3338 ext4_fsblk_t grp_blk;
c9de560d
AT
3339
3340 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3341 return;
3342
3343 /* in this short window concurrent discard can set pa_deleted */
3344 spin_lock(&pa->pa_lock);
3345 if (pa->pa_deleted == 1) {
3346 spin_unlock(&pa->pa_lock);
3347 return;
3348 }
3349
3350 pa->pa_deleted = 1;
3351 spin_unlock(&pa->pa_lock);
3352
d33a1976 3353 grp_blk = pa->pa_pstart;
60e6679e 3354 /*
cc0fb9ad
AK
3355 * If doing group-based preallocation, pa_pstart may be in the
3356 * next group when pa is used up
3357 */
3358 if (pa->pa_type == MB_GROUP_PA)
d33a1976
ES
3359 grp_blk--;
3360
3361 ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
c9de560d
AT
3362
3363 /*
3364 * possible race:
3365 *
3366 * P1 (buddy init) P2 (regular allocation)
3367 * find block B in PA
3368 * copy on-disk bitmap to buddy
3369 * mark B in on-disk bitmap
3370 * drop PA from group
3371 * mark all PAs in buddy
3372 *
3373 * thus, P1 initializes buddy with B available. to prevent this
3374 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3375 * against that pair
3376 */
3377 ext4_lock_group(sb, grp);
3378 list_del(&pa->pa_group_list);
3379 ext4_unlock_group(sb, grp);
3380
3381 spin_lock(pa->pa_obj_lock);
3382 list_del_rcu(&pa->pa_inode_list);
3383 spin_unlock(pa->pa_obj_lock);
3384
3385 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3386}
3387
3388/*
3389 * creates new preallocated space for given inode
3390 */
4ddfef7b
ES
3391static noinline_for_stack int
3392ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3393{
3394 struct super_block *sb = ac->ac_sb;
3395 struct ext4_prealloc_space *pa;
3396 struct ext4_group_info *grp;
3397 struct ext4_inode_info *ei;
3398
3399 /* preallocate only when found space is larger then requested */
3400 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3401 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3402 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3403
3404 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3405 if (pa == NULL)
3406 return -ENOMEM;
3407
3408 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3409 int winl;
3410 int wins;
3411 int win;
3412 int offs;
3413
3414 /* we can't allocate as much as normalizer wants.
3415 * so, found space must get proper lstart
3416 * to cover original request */
3417 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3418 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3419
3420 /* we're limited by original request in that
3421 * logical block must be covered any way
3422 * winl is window we can move our chunk within */
3423 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3424
3425 /* also, we should cover whole original request */
3426 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3427
3428 /* the smallest one defines real window */
3429 win = min(winl, wins);
3430
3431 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3432 if (offs && offs < win)
3433 win = offs;
3434
3435 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3436 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3437 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3438 }
3439
3440 /* preallocation can change ac_b_ex, thus we store actually
3441 * allocated blocks for history */
3442 ac->ac_f_ex = ac->ac_b_ex;
3443
3444 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3445 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3446 pa->pa_len = ac->ac_b_ex.fe_len;
3447 pa->pa_free = pa->pa_len;
3448 atomic_set(&pa->pa_count, 1);
3449 spin_lock_init(&pa->pa_lock);
d794bf8e
AK
3450 INIT_LIST_HEAD(&pa->pa_inode_list);
3451 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3452 pa->pa_deleted = 0;
cc0fb9ad 3453 pa->pa_type = MB_INODE_PA;
c9de560d 3454
6ba495e9 3455 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
c9de560d 3456 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
9bffad1e 3457 trace_ext4_mb_new_inode_pa(ac, pa);
c9de560d
AT
3458
3459 ext4_mb_use_inode_pa(ac, pa);
3460 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3461
3462 ei = EXT4_I(ac->ac_inode);
3463 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3464
3465 pa->pa_obj_lock = &ei->i_prealloc_lock;
3466 pa->pa_inode = ac->ac_inode;
3467
3468 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3469 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3470 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3471
3472 spin_lock(pa->pa_obj_lock);
3473 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3474 spin_unlock(pa->pa_obj_lock);
3475
3476 return 0;
3477}
3478
3479/*
3480 * creates new preallocated space for locality group inodes belongs to
3481 */
4ddfef7b
ES
3482static noinline_for_stack int
3483ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3484{
3485 struct super_block *sb = ac->ac_sb;
3486 struct ext4_locality_group *lg;
3487 struct ext4_prealloc_space *pa;
3488 struct ext4_group_info *grp;
3489
3490 /* preallocate only when found space is larger then requested */
3491 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3492 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3493 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3494
3495 BUG_ON(ext4_pspace_cachep == NULL);
3496 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3497 if (pa == NULL)
3498 return -ENOMEM;
3499
3500 /* preallocation can change ac_b_ex, thus we store actually
3501 * allocated blocks for history */
3502 ac->ac_f_ex = ac->ac_b_ex;
3503
3504 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3505 pa->pa_lstart = pa->pa_pstart;
3506 pa->pa_len = ac->ac_b_ex.fe_len;
3507 pa->pa_free = pa->pa_len;
3508 atomic_set(&pa->pa_count, 1);
3509 spin_lock_init(&pa->pa_lock);
6be2ded1 3510 INIT_LIST_HEAD(&pa->pa_inode_list);
d794bf8e 3511 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3512 pa->pa_deleted = 0;
cc0fb9ad 3513 pa->pa_type = MB_GROUP_PA;
c9de560d 3514
6ba495e9 3515 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
9bffad1e
TT
3516 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3517 trace_ext4_mb_new_group_pa(ac, pa);
c9de560d
AT
3518
3519 ext4_mb_use_group_pa(ac, pa);
3520 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3521
3522 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3523 lg = ac->ac_lg;
3524 BUG_ON(lg == NULL);
3525
3526 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3527 pa->pa_inode = NULL;
3528
3529 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3530 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3531 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3532
6be2ded1
AK
3533 /*
3534 * We will later add the new pa to the right bucket
3535 * after updating the pa_free in ext4_mb_release_context
3536 */
c9de560d
AT
3537 return 0;
3538}
3539
3540static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3541{
3542 int err;
3543
3544 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3545 err = ext4_mb_new_group_pa(ac);
3546 else
3547 err = ext4_mb_new_inode_pa(ac);
3548 return err;
3549}
3550
3551/*
3552 * finds all unused blocks in on-disk bitmap, frees them in
3553 * in-core bitmap and buddy.
3554 * @pa must be unlinked from inode and group lists, so that
3555 * nobody else can find/use it.
3556 * the caller MUST hold group/inode locks.
3557 * TODO: optimize the case when there are no in-core structures yet
3558 */
4ddfef7b
ES
3559static noinline_for_stack int
3560ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3e1e5f50 3561 struct ext4_prealloc_space *pa)
c9de560d 3562{
c9de560d
AT
3563 struct super_block *sb = e4b->bd_sb;
3564 struct ext4_sb_info *sbi = EXT4_SB(sb);
498e5f24
TT
3565 unsigned int end;
3566 unsigned int next;
c9de560d
AT
3567 ext4_group_t group;
3568 ext4_grpblk_t bit;
ba80b101 3569 unsigned long long grp_blk_start;
c9de560d
AT
3570 int err = 0;
3571 int free = 0;
3572
3573 BUG_ON(pa->pa_deleted == 0);
3574 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
ba80b101 3575 grp_blk_start = pa->pa_pstart - bit;
c9de560d
AT
3576 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3577 end = bit + pa->pa_len;
3578
c9de560d 3579 while (bit < end) {
ffad0a44 3580 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3581 if (bit >= end)
3582 break;
ffad0a44 3583 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
6ba495e9 3584 mb_debug(1, " free preallocated %u/%u in group %u\n",
5a0790c2
AK
3585 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3586 (unsigned) next - bit, (unsigned) group);
c9de560d
AT
3587 free += next - bit;
3588
3e1e5f50 3589 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
a9c667f8
LC
3590 trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
3591 next - bit);
c9de560d
AT
3592 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3593 bit = next + 1;
3594 }
3595 if (free != pa->pa_free) {
26346ff6 3596 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
c9de560d
AT
3597 pa, (unsigned long) pa->pa_lstart,
3598 (unsigned long) pa->pa_pstart,
3599 (unsigned long) pa->pa_len);
e29136f8 3600 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5d1b1b3f 3601 free, pa->pa_free);
e56eb659
AK
3602 /*
3603 * pa is already deleted so we use the value obtained
3604 * from the bitmap and continue.
3605 */
c9de560d 3606 }
c9de560d
AT
3607 atomic_add(free, &sbi->s_mb_discarded);
3608
3609 return err;
3610}
3611
4ddfef7b
ES
3612static noinline_for_stack int
3613ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3e1e5f50 3614 struct ext4_prealloc_space *pa)
c9de560d 3615{
c9de560d
AT
3616 struct super_block *sb = e4b->bd_sb;
3617 ext4_group_t group;
3618 ext4_grpblk_t bit;
3619
a9c667f8 3620 trace_ext4_mb_release_group_pa(pa);
c9de560d
AT
3621 BUG_ON(pa->pa_deleted == 0);
3622 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3623 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3624 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3625 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3e1e5f50 3626 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
c9de560d
AT
3627
3628 return 0;
3629}
3630
3631/*
3632 * releases all preallocations in given group
3633 *
3634 * first, we need to decide discard policy:
3635 * - when do we discard
3636 * 1) ENOSPC
3637 * - how many do we discard
3638 * 1) how many requested
3639 */
4ddfef7b
ES
3640static noinline_for_stack int
3641ext4_mb_discard_group_preallocations(struct super_block *sb,
c9de560d
AT
3642 ext4_group_t group, int needed)
3643{
3644 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3645 struct buffer_head *bitmap_bh = NULL;
3646 struct ext4_prealloc_space *pa, *tmp;
3647 struct list_head list;
3648 struct ext4_buddy e4b;
3649 int err;
3650 int busy = 0;
3651 int free = 0;
3652
6ba495e9 3653 mb_debug(1, "discard preallocation for group %u\n", group);
c9de560d
AT
3654
3655 if (list_empty(&grp->bb_prealloc_list))
3656 return 0;
3657
574ca174 3658 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3659 if (bitmap_bh == NULL) {
12062ddd 3660 ext4_error(sb, "Error reading block bitmap for %u", group);
ce89f46c 3661 return 0;
c9de560d
AT
3662 }
3663
3664 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c 3665 if (err) {
12062ddd 3666 ext4_error(sb, "Error loading buddy information for %u", group);
ce89f46c
AK
3667 put_bh(bitmap_bh);
3668 return 0;
3669 }
c9de560d
AT
3670
3671 if (needed == 0)
3672 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3673
c9de560d 3674 INIT_LIST_HEAD(&list);
c9de560d
AT
3675repeat:
3676 ext4_lock_group(sb, group);
3677 list_for_each_entry_safe(pa, tmp,
3678 &grp->bb_prealloc_list, pa_group_list) {
3679 spin_lock(&pa->pa_lock);
3680 if (atomic_read(&pa->pa_count)) {
3681 spin_unlock(&pa->pa_lock);
3682 busy = 1;
3683 continue;
3684 }
3685 if (pa->pa_deleted) {
3686 spin_unlock(&pa->pa_lock);
3687 continue;
3688 }
3689
3690 /* seems this one can be freed ... */
3691 pa->pa_deleted = 1;
3692
3693 /* we can trust pa_free ... */
3694 free += pa->pa_free;
3695
3696 spin_unlock(&pa->pa_lock);
3697
3698 list_del(&pa->pa_group_list);
3699 list_add(&pa->u.pa_tmp_list, &list);
3700 }
3701
3702 /* if we still need more blocks and some PAs were used, try again */
3703 if (free < needed && busy) {
3704 busy = 0;
3705 ext4_unlock_group(sb, group);
3706 /*
3707 * Yield the CPU here so that we don't get soft lockup
3708 * in non preempt case.
3709 */
3710 yield();
3711 goto repeat;
3712 }
3713
3714 /* found anything to free? */
3715 if (list_empty(&list)) {
3716 BUG_ON(free != 0);
3717 goto out;
3718 }
3719
3720 /* now free all selected PAs */
3721 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3722
3723 /* remove from object (inode or locality group) */
3724 spin_lock(pa->pa_obj_lock);
3725 list_del_rcu(&pa->pa_inode_list);
3726 spin_unlock(pa->pa_obj_lock);
3727
cc0fb9ad 3728 if (pa->pa_type == MB_GROUP_PA)
3e1e5f50 3729 ext4_mb_release_group_pa(&e4b, pa);
c9de560d 3730 else
3e1e5f50 3731 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
c9de560d
AT
3732
3733 list_del(&pa->u.pa_tmp_list);
3734 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3735 }
3736
3737out:
3738 ext4_unlock_group(sb, group);
e39e07fd 3739 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
3740 put_bh(bitmap_bh);
3741 return free;
3742}
3743
3744/*
3745 * releases all non-used preallocated blocks for given inode
3746 *
3747 * It's important to discard preallocations under i_data_sem
3748 * We don't want another block to be served from the prealloc
3749 * space when we are discarding the inode prealloc space.
3750 *
3751 * FIXME!! Make sure it is valid at all the call sites
3752 */
c2ea3fde 3753void ext4_discard_preallocations(struct inode *inode)
c9de560d
AT
3754{
3755 struct ext4_inode_info *ei = EXT4_I(inode);
3756 struct super_block *sb = inode->i_sb;
3757 struct buffer_head *bitmap_bh = NULL;
3758 struct ext4_prealloc_space *pa, *tmp;
3759 ext4_group_t group = 0;
3760 struct list_head list;
3761 struct ext4_buddy e4b;
3762 int err;
3763
c2ea3fde 3764 if (!S_ISREG(inode->i_mode)) {
c9de560d
AT
3765 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3766 return;
3767 }
3768
6ba495e9 3769 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
9bffad1e 3770 trace_ext4_discard_preallocations(inode);
c9de560d
AT
3771
3772 INIT_LIST_HEAD(&list);
3773
3774repeat:
3775 /* first, collect all pa's in the inode */
3776 spin_lock(&ei->i_prealloc_lock);
3777 while (!list_empty(&ei->i_prealloc_list)) {
3778 pa = list_entry(ei->i_prealloc_list.next,
3779 struct ext4_prealloc_space, pa_inode_list);
3780 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3781 spin_lock(&pa->pa_lock);
3782 if (atomic_read(&pa->pa_count)) {
3783 /* this shouldn't happen often - nobody should
3784 * use preallocation while we're discarding it */
3785 spin_unlock(&pa->pa_lock);
3786 spin_unlock(&ei->i_prealloc_lock);
3787 printk(KERN_ERR "uh-oh! used pa while discarding\n");
3788 WARN_ON(1);
3789 schedule_timeout_uninterruptible(HZ);
3790 goto repeat;
3791
3792 }
3793 if (pa->pa_deleted == 0) {
3794 pa->pa_deleted = 1;
3795 spin_unlock(&pa->pa_lock);
3796 list_del_rcu(&pa->pa_inode_list);
3797 list_add(&pa->u.pa_tmp_list, &list);
3798 continue;
3799 }
3800
3801 /* someone is deleting pa right now */
3802 spin_unlock(&pa->pa_lock);
3803 spin_unlock(&ei->i_prealloc_lock);
3804
3805 /* we have to wait here because pa_deleted
3806 * doesn't mean pa is already unlinked from
3807 * the list. as we might be called from
3808 * ->clear_inode() the inode will get freed
3809 * and concurrent thread which is unlinking
3810 * pa from inode's list may access already
3811 * freed memory, bad-bad-bad */
3812
3813 /* XXX: if this happens too often, we can
3814 * add a flag to force wait only in case
3815 * of ->clear_inode(), but not in case of
3816 * regular truncate */
3817 schedule_timeout_uninterruptible(HZ);
3818 goto repeat;
3819 }
3820 spin_unlock(&ei->i_prealloc_lock);
3821
3822 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
cc0fb9ad 3823 BUG_ON(pa->pa_type != MB_INODE_PA);
c9de560d
AT
3824 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3825
3826 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c 3827 if (err) {
12062ddd
ES
3828 ext4_error(sb, "Error loading buddy information for %u",
3829 group);
ce89f46c
AK
3830 continue;
3831 }
c9de560d 3832
574ca174 3833 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3834 if (bitmap_bh == NULL) {
12062ddd
ES
3835 ext4_error(sb, "Error reading block bitmap for %u",
3836 group);
e39e07fd 3837 ext4_mb_unload_buddy(&e4b);
ce89f46c 3838 continue;
c9de560d
AT
3839 }
3840
3841 ext4_lock_group(sb, group);
3842 list_del(&pa->pa_group_list);
3e1e5f50 3843 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
c9de560d
AT
3844 ext4_unlock_group(sb, group);
3845
e39e07fd 3846 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
3847 put_bh(bitmap_bh);
3848
3849 list_del(&pa->u.pa_tmp_list);
3850 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3851 }
3852}
3853
6ba495e9 3854#ifdef CONFIG_EXT4_DEBUG
c9de560d
AT
3855static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3856{
3857 struct super_block *sb = ac->ac_sb;
8df9675f 3858 ext4_group_t ngroups, i;
c9de560d 3859
4dd89fc6
TT
3860 if (!mb_enable_debug ||
3861 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
e3570639
ES
3862 return;
3863
c9de560d
AT
3864 printk(KERN_ERR "EXT4-fs: Can't allocate:"
3865 " Allocation context details:\n");
3866 printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3867 ac->ac_status, ac->ac_flags);
3868 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3869 "best %lu/%lu/%lu@%lu cr %d\n",
3870 (unsigned long)ac->ac_o_ex.fe_group,
3871 (unsigned long)ac->ac_o_ex.fe_start,
3872 (unsigned long)ac->ac_o_ex.fe_len,
3873 (unsigned long)ac->ac_o_ex.fe_logical,
3874 (unsigned long)ac->ac_g_ex.fe_group,
3875 (unsigned long)ac->ac_g_ex.fe_start,
3876 (unsigned long)ac->ac_g_ex.fe_len,
3877 (unsigned long)ac->ac_g_ex.fe_logical,
3878 (unsigned long)ac->ac_b_ex.fe_group,
3879 (unsigned long)ac->ac_b_ex.fe_start,
3880 (unsigned long)ac->ac_b_ex.fe_len,
3881 (unsigned long)ac->ac_b_ex.fe_logical,
3882 (int)ac->ac_criteria);
3883 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3884 ac->ac_found);
3885 printk(KERN_ERR "EXT4-fs: groups: \n");
8df9675f
TT
3886 ngroups = ext4_get_groups_count(sb);
3887 for (i = 0; i < ngroups; i++) {
c9de560d
AT
3888 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3889 struct ext4_prealloc_space *pa;
3890 ext4_grpblk_t start;
3891 struct list_head *cur;
3892 ext4_lock_group(sb, i);
3893 list_for_each(cur, &grp->bb_prealloc_list) {
3894 pa = list_entry(cur, struct ext4_prealloc_space,
3895 pa_group_list);
3896 spin_lock(&pa->pa_lock);
3897 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3898 NULL, &start);
3899 spin_unlock(&pa->pa_lock);
1c718505
AF
3900 printk(KERN_ERR "PA:%u:%d:%u \n", i,
3901 start, pa->pa_len);
c9de560d 3902 }
60bd63d1 3903 ext4_unlock_group(sb, i);
c9de560d
AT
3904
3905 if (grp->bb_free == 0)
3906 continue;
1c718505 3907 printk(KERN_ERR "%u: %d/%d \n",
c9de560d
AT
3908 i, grp->bb_free, grp->bb_fragments);
3909 }
3910 printk(KERN_ERR "\n");
3911}
3912#else
3913static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3914{
3915 return;
3916}
3917#endif
3918
3919/*
3920 * We use locality group preallocation for small size file. The size of the
3921 * file is determined by the current size or the resulting size after
3922 * allocation which ever is larger
3923 *
b713a5ec 3924 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
c9de560d
AT
3925 */
3926static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3927{
3928 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3929 int bsbits = ac->ac_sb->s_blocksize_bits;
3930 loff_t size, isize;
3931
3932 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3933 return;
3934
4ba74d00
TT
3935 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3936 return;
3937
c9de560d 3938 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
50797481
TT
3939 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3940 >> bsbits;
c9de560d 3941
50797481
TT
3942 if ((size == isize) &&
3943 !ext4_fs_is_busy(sbi) &&
3944 (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3945 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3946 return;
3947 }
3948
c9de560d 3949 /* don't use group allocation for large files */
71780577 3950 size = max(size, isize);
cc483f10 3951 if (size > sbi->s_mb_stream_request) {
4ba74d00 3952 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
c9de560d 3953 return;
4ba74d00 3954 }
c9de560d
AT
3955
3956 BUG_ON(ac->ac_lg != NULL);
3957 /*
3958 * locality group prealloc space are per cpu. The reason for having
3959 * per cpu locality group is to reduce the contention between block
3960 * request from multiple CPUs.
3961 */
ca0c9584 3962 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
c9de560d
AT
3963
3964 /* we're going to use group allocation */
3965 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3966
3967 /* serialize all allocations in the group */
3968 mutex_lock(&ac->ac_lg->lg_mutex);
3969}
3970
4ddfef7b
ES
3971static noinline_for_stack int
3972ext4_mb_initialize_context(struct ext4_allocation_context *ac,
c9de560d
AT
3973 struct ext4_allocation_request *ar)
3974{
3975 struct super_block *sb = ar->inode->i_sb;
3976 struct ext4_sb_info *sbi = EXT4_SB(sb);
3977 struct ext4_super_block *es = sbi->s_es;
3978 ext4_group_t group;
498e5f24
TT
3979 unsigned int len;
3980 ext4_fsblk_t goal;
c9de560d
AT
3981 ext4_grpblk_t block;
3982
3983 /* we can't allocate > group size */
3984 len = ar->len;
3985
3986 /* just a dirty hack to filter too big requests */
3987 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
3988 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
3989
3990 /* start searching from the goal */
3991 goal = ar->goal;
3992 if (goal < le32_to_cpu(es->s_first_data_block) ||
3993 goal >= ext4_blocks_count(es))
3994 goal = le32_to_cpu(es->s_first_data_block);
3995 ext4_get_group_no_and_offset(sb, goal, &group, &block);
3996
3997 /* set up allocation goals */
833576b3 3998 memset(ac, 0, sizeof(struct ext4_allocation_context));
c9de560d 3999 ac->ac_b_ex.fe_logical = ar->logical;
c9de560d 4000 ac->ac_status = AC_STATUS_CONTINUE;
c9de560d
AT
4001 ac->ac_sb = sb;
4002 ac->ac_inode = ar->inode;
4003 ac->ac_o_ex.fe_logical = ar->logical;
4004 ac->ac_o_ex.fe_group = group;
4005 ac->ac_o_ex.fe_start = block;
4006 ac->ac_o_ex.fe_len = len;
4007 ac->ac_g_ex.fe_logical = ar->logical;
4008 ac->ac_g_ex.fe_group = group;
4009 ac->ac_g_ex.fe_start = block;
4010 ac->ac_g_ex.fe_len = len;
c9de560d 4011 ac->ac_flags = ar->flags;
c9de560d
AT
4012
4013 /* we have to define context: we'll we work with a file or
4014 * locality group. this is a policy, actually */
4015 ext4_mb_group_or_file(ac);
4016
6ba495e9 4017 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
c9de560d
AT
4018 "left: %u/%u, right %u/%u to %swritable\n",
4019 (unsigned) ar->len, (unsigned) ar->logical,
4020 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4021 (unsigned) ar->lleft, (unsigned) ar->pleft,
4022 (unsigned) ar->lright, (unsigned) ar->pright,
4023 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4024 return 0;
4025
4026}
4027
6be2ded1
AK
4028static noinline_for_stack void
4029ext4_mb_discard_lg_preallocations(struct super_block *sb,
4030 struct ext4_locality_group *lg,
4031 int order, int total_entries)
4032{
4033 ext4_group_t group = 0;
4034 struct ext4_buddy e4b;
4035 struct list_head discard_list;
4036 struct ext4_prealloc_space *pa, *tmp;
6be2ded1 4037
6ba495e9 4038 mb_debug(1, "discard locality group preallocation\n");
6be2ded1
AK
4039
4040 INIT_LIST_HEAD(&discard_list);
6be2ded1
AK
4041
4042 spin_lock(&lg->lg_prealloc_lock);
4043 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4044 pa_inode_list) {
4045 spin_lock(&pa->pa_lock);
4046 if (atomic_read(&pa->pa_count)) {
4047 /*
4048 * This is the pa that we just used
4049 * for block allocation. So don't
4050 * free that
4051 */
4052 spin_unlock(&pa->pa_lock);
4053 continue;
4054 }
4055 if (pa->pa_deleted) {
4056 spin_unlock(&pa->pa_lock);
4057 continue;
4058 }
4059 /* only lg prealloc space */
cc0fb9ad 4060 BUG_ON(pa->pa_type != MB_GROUP_PA);
6be2ded1
AK
4061
4062 /* seems this one can be freed ... */
4063 pa->pa_deleted = 1;
4064 spin_unlock(&pa->pa_lock);
4065
4066 list_del_rcu(&pa->pa_inode_list);
4067 list_add(&pa->u.pa_tmp_list, &discard_list);
4068
4069 total_entries--;
4070 if (total_entries <= 5) {
4071 /*
4072 * we want to keep only 5 entries
4073 * allowing it to grow to 8. This
4074 * mak sure we don't call discard
4075 * soon for this list.
4076 */
4077 break;
4078 }
4079 }
4080 spin_unlock(&lg->lg_prealloc_lock);
4081
4082 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4083
4084 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4085 if (ext4_mb_load_buddy(sb, group, &e4b)) {
12062ddd
ES
4086 ext4_error(sb, "Error loading buddy information for %u",
4087 group);
6be2ded1
AK
4088 continue;
4089 }
4090 ext4_lock_group(sb, group);
4091 list_del(&pa->pa_group_list);
3e1e5f50 4092 ext4_mb_release_group_pa(&e4b, pa);
6be2ded1
AK
4093 ext4_unlock_group(sb, group);
4094
e39e07fd 4095 ext4_mb_unload_buddy(&e4b);
6be2ded1
AK
4096 list_del(&pa->u.pa_tmp_list);
4097 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4098 }
6be2ded1
AK
4099}
4100
4101/*
4102 * We have incremented pa_count. So it cannot be freed at this
4103 * point. Also we hold lg_mutex. So no parallel allocation is
4104 * possible from this lg. That means pa_free cannot be updated.
4105 *
4106 * A parallel ext4_mb_discard_group_preallocations is possible.
4107 * which can cause the lg_prealloc_list to be updated.
4108 */
4109
4110static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4111{
4112 int order, added = 0, lg_prealloc_count = 1;
4113 struct super_block *sb = ac->ac_sb;
4114 struct ext4_locality_group *lg = ac->ac_lg;
4115 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4116
4117 order = fls(pa->pa_free) - 1;
4118 if (order > PREALLOC_TB_SIZE - 1)
4119 /* The max size of hash table is PREALLOC_TB_SIZE */
4120 order = PREALLOC_TB_SIZE - 1;
4121 /* Add the prealloc space to lg */
4122 rcu_read_lock();
4123 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4124 pa_inode_list) {
4125 spin_lock(&tmp_pa->pa_lock);
4126 if (tmp_pa->pa_deleted) {
e7c9e3e9 4127 spin_unlock(&tmp_pa->pa_lock);
6be2ded1
AK
4128 continue;
4129 }
4130 if (!added && pa->pa_free < tmp_pa->pa_free) {
4131 /* Add to the tail of the previous entry */
4132 list_add_tail_rcu(&pa->pa_inode_list,
4133 &tmp_pa->pa_inode_list);
4134 added = 1;
4135 /*
4136 * we want to count the total
4137 * number of entries in the list
4138 */
4139 }
4140 spin_unlock(&tmp_pa->pa_lock);
4141 lg_prealloc_count++;
4142 }
4143 if (!added)
4144 list_add_tail_rcu(&pa->pa_inode_list,
4145 &lg->lg_prealloc_list[order]);
4146 rcu_read_unlock();
4147
4148 /* Now trim the list to be not more than 8 elements */
4149 if (lg_prealloc_count > 8) {
4150 ext4_mb_discard_lg_preallocations(sb, lg,
4151 order, lg_prealloc_count);
4152 return;
4153 }
4154 return ;
4155}
4156
c9de560d
AT
4157/*
4158 * release all resource we used in allocation
4159 */
4160static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4161{
6be2ded1
AK
4162 struct ext4_prealloc_space *pa = ac->ac_pa;
4163 if (pa) {
cc0fb9ad 4164 if (pa->pa_type == MB_GROUP_PA) {
c9de560d 4165 /* see comment in ext4_mb_use_group_pa() */
6be2ded1
AK
4166 spin_lock(&pa->pa_lock);
4167 pa->pa_pstart += ac->ac_b_ex.fe_len;
4168 pa->pa_lstart += ac->ac_b_ex.fe_len;
4169 pa->pa_free -= ac->ac_b_ex.fe_len;
4170 pa->pa_len -= ac->ac_b_ex.fe_len;
4171 spin_unlock(&pa->pa_lock);
c9de560d 4172 }
c9de560d 4173 }
ba443916
AK
4174 if (pa) {
4175 /*
4176 * We want to add the pa to the right bucket.
4177 * Remove it from the list and while adding
4178 * make sure the list to which we are adding
44183d42 4179 * doesn't grow big.
ba443916 4180 */
cc0fb9ad 4181 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
ba443916
AK
4182 spin_lock(pa->pa_obj_lock);
4183 list_del_rcu(&pa->pa_inode_list);
4184 spin_unlock(pa->pa_obj_lock);
4185 ext4_mb_add_n_trim(ac);
4186 }
4187 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4188 }
c9de560d
AT
4189 if (ac->ac_bitmap_page)
4190 page_cache_release(ac->ac_bitmap_page);
4191 if (ac->ac_buddy_page)
4192 page_cache_release(ac->ac_buddy_page);
4193 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4194 mutex_unlock(&ac->ac_lg->lg_mutex);
4195 ext4_mb_collect_stats(ac);
4196 return 0;
4197}
4198
4199static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4200{
8df9675f 4201 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
c9de560d
AT
4202 int ret;
4203 int freed = 0;
4204
9bffad1e 4205 trace_ext4_mb_discard_preallocations(sb, needed);
8df9675f 4206 for (i = 0; i < ngroups && needed > 0; i++) {
c9de560d
AT
4207 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4208 freed += ret;
4209 needed -= ret;
4210 }
4211
4212 return freed;
4213}
4214
4215/*
4216 * Main entry point into mballoc to allocate blocks
4217 * it tries to use preallocation first, then falls back
4218 * to usual allocation
4219 */
4220ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6c7a120a 4221 struct ext4_allocation_request *ar, int *errp)
c9de560d 4222{
6bc6e63f 4223 int freed;
256bdb49 4224 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4225 struct ext4_sb_info *sbi;
4226 struct super_block *sb;
4227 ext4_fsblk_t block = 0;
60e58e0f 4228 unsigned int inquota = 0;
498e5f24 4229 unsigned int reserv_blks = 0;
c9de560d
AT
4230
4231 sb = ar->inode->i_sb;
4232 sbi = EXT4_SB(sb);
4233
9bffad1e 4234 trace_ext4_request_blocks(ar);
ba80b101 4235
60e58e0f
MC
4236 /*
4237 * For delayed allocation, we could skip the ENOSPC and
4238 * EDQUOT check, as blocks and quotas have been already
4239 * reserved when data being copied into pagecache.
4240 */
f2321097 4241 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
60e58e0f
MC
4242 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4243 else {
4244 /* Without delayed allocation we need to verify
4245 * there is enough free blocks to do block allocation
4246 * and verify allocation doesn't exceed the quota limits.
d2a17637 4247 */
55f020db
AH
4248 while (ar->len &&
4249 ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
4250
030ba6bc
AK
4251 /* let others to free the space */
4252 yield();
4253 ar->len = ar->len >> 1;
4254 }
4255 if (!ar->len) {
a30d542a
AK
4256 *errp = -ENOSPC;
4257 return 0;
4258 }
6bc6e63f 4259 reserv_blks = ar->len;
55f020db
AH
4260 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4261 dquot_alloc_block_nofail(ar->inode, ar->len);
4262 } else {
4263 while (ar->len &&
4264 dquot_alloc_block(ar->inode, ar->len)) {
4265
4266 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4267 ar->len--;
4268 }
60e58e0f
MC
4269 }
4270 inquota = ar->len;
4271 if (ar->len == 0) {
4272 *errp = -EDQUOT;
6c7a120a 4273 goto out;
60e58e0f 4274 }
07031431 4275 }
d2a17637 4276
256bdb49 4277 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
833576b3 4278 if (!ac) {
363d4251 4279 ar->len = 0;
256bdb49 4280 *errp = -ENOMEM;
6c7a120a 4281 goto out;
256bdb49
ES
4282 }
4283
256bdb49 4284 *errp = ext4_mb_initialize_context(ac, ar);
c9de560d
AT
4285 if (*errp) {
4286 ar->len = 0;
6c7a120a 4287 goto out;
c9de560d
AT
4288 }
4289
256bdb49
ES
4290 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4291 if (!ext4_mb_use_preallocated(ac)) {
256bdb49
ES
4292 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4293 ext4_mb_normalize_request(ac, ar);
c9de560d
AT
4294repeat:
4295 /* allocate space in core */
6c7a120a
AK
4296 *errp = ext4_mb_regular_allocator(ac);
4297 if (*errp)
4298 goto errout;
c9de560d
AT
4299
4300 /* as we've just preallocated more space than
4301 * user requested orinally, we store allocated
4302 * space in a special descriptor */
256bdb49
ES
4303 if (ac->ac_status == AC_STATUS_FOUND &&
4304 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4305 ext4_mb_new_preallocation(ac);
c9de560d 4306 }
256bdb49 4307 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6bc6e63f 4308 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
6c7a120a 4309 if (*errp == -EAGAIN) {
8556e8f3
AK
4310 /*
4311 * drop the reference that we took
4312 * in ext4_mb_use_best_found
4313 */
4314 ext4_mb_release_context(ac);
519deca0
AK
4315 ac->ac_b_ex.fe_group = 0;
4316 ac->ac_b_ex.fe_start = 0;
4317 ac->ac_b_ex.fe_len = 0;
4318 ac->ac_status = AC_STATUS_CONTINUE;
4319 goto repeat;
6c7a120a
AK
4320 } else if (*errp)
4321 errout:
b844167e 4322 ext4_discard_allocated_blocks(ac);
6c7a120a 4323 else {
519deca0
AK
4324 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4325 ar->len = ac->ac_b_ex.fe_len;
4326 }
c9de560d 4327 } else {
256bdb49 4328 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
c9de560d
AT
4329 if (freed)
4330 goto repeat;
4331 *errp = -ENOSPC;
6c7a120a
AK
4332 }
4333
4334 if (*errp) {
256bdb49 4335 ac->ac_b_ex.fe_len = 0;
c9de560d 4336 ar->len = 0;
256bdb49 4337 ext4_mb_show_ac(ac);
c9de560d 4338 }
256bdb49 4339 ext4_mb_release_context(ac);
6c7a120a
AK
4340out:
4341 if (ac)
4342 kmem_cache_free(ext4_ac_cachep, ac);
60e58e0f 4343 if (inquota && ar->len < inquota)
5dd4056d 4344 dquot_free_block(ar->inode, inquota - ar->len);
0087d9fb 4345 if (!ar->len) {
f2321097
TT
4346 if (!ext4_test_inode_state(ar->inode,
4347 EXT4_STATE_DELALLOC_RESERVED))
0087d9fb
AK
4348 /* release all the reserved blocks if non delalloc */
4349 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4350 reserv_blks);
4351 }
c9de560d 4352
9bffad1e 4353 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
ba80b101 4354
c9de560d
AT
4355 return block;
4356}
c9de560d 4357
c894058d
AK
4358/*
4359 * We can merge two free data extents only if the physical blocks
4360 * are contiguous, AND the extents were freed by the same transaction,
4361 * AND the blocks are associated with the same group.
4362 */
4363static int can_merge(struct ext4_free_data *entry1,
4364 struct ext4_free_data *entry2)
4365{
4366 if ((entry1->t_tid == entry2->t_tid) &&
4367 (entry1->group == entry2->group) &&
4368 ((entry1->start_blk + entry1->count) == entry2->start_blk))
4369 return 1;
4370 return 0;
4371}
4372
4ddfef7b
ES
4373static noinline_for_stack int
4374ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
7a2fcbf7 4375 struct ext4_free_data *new_entry)
c9de560d 4376{
e29136f8 4377 ext4_group_t group = e4b->bd_group;
7a2fcbf7
AK
4378 ext4_grpblk_t block;
4379 struct ext4_free_data *entry;
c9de560d
AT
4380 struct ext4_group_info *db = e4b->bd_info;
4381 struct super_block *sb = e4b->bd_sb;
4382 struct ext4_sb_info *sbi = EXT4_SB(sb);
c894058d
AK
4383 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4384 struct rb_node *parent = NULL, *new_node;
4385
0390131b 4386 BUG_ON(!ext4_handle_valid(handle));
c9de560d
AT
4387 BUG_ON(e4b->bd_bitmap_page == NULL);
4388 BUG_ON(e4b->bd_buddy_page == NULL);
4389
c894058d 4390 new_node = &new_entry->node;
7a2fcbf7 4391 block = new_entry->start_blk;
c894058d 4392
c894058d
AK
4393 if (!*n) {
4394 /* first free block exent. We need to
4395 protect buddy cache from being freed,
4396 * otherwise we'll refresh it from
4397 * on-disk bitmap and lose not-yet-available
4398 * blocks */
4399 page_cache_get(e4b->bd_buddy_page);
4400 page_cache_get(e4b->bd_bitmap_page);
4401 }
4402 while (*n) {
4403 parent = *n;
4404 entry = rb_entry(parent, struct ext4_free_data, node);
4405 if (block < entry->start_blk)
4406 n = &(*n)->rb_left;
4407 else if (block >= (entry->start_blk + entry->count))
4408 n = &(*n)->rb_right;
4409 else {
e29136f8
TT
4410 ext4_grp_locked_error(sb, group, 0,
4411 ext4_group_first_block_no(sb, group) + block,
4412 "Block already on to-be-freed list");
c894058d 4413 return 0;
c9de560d 4414 }
c894058d 4415 }
c9de560d 4416
c894058d
AK
4417 rb_link_node(new_node, parent, n);
4418 rb_insert_color(new_node, &db->bb_free_root);
4419
4420 /* Now try to see the extent can be merged to left and right */
4421 node = rb_prev(new_node);
4422 if (node) {
4423 entry = rb_entry(node, struct ext4_free_data, node);
4424 if (can_merge(entry, new_entry)) {
4425 new_entry->start_blk = entry->start_blk;
4426 new_entry->count += entry->count;
4427 rb_erase(node, &(db->bb_free_root));
4428 spin_lock(&sbi->s_md_lock);
4429 list_del(&entry->list);
4430 spin_unlock(&sbi->s_md_lock);
4431 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d 4432 }
c894058d 4433 }
c9de560d 4434
c894058d
AK
4435 node = rb_next(new_node);
4436 if (node) {
4437 entry = rb_entry(node, struct ext4_free_data, node);
4438 if (can_merge(new_entry, entry)) {
4439 new_entry->count += entry->count;
4440 rb_erase(node, &(db->bb_free_root));
4441 spin_lock(&sbi->s_md_lock);
4442 list_del(&entry->list);
4443 spin_unlock(&sbi->s_md_lock);
4444 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d
AT
4445 }
4446 }
3e624fc7 4447 /* Add the extent to transaction's private list */
c894058d 4448 spin_lock(&sbi->s_md_lock);
3e624fc7 4449 list_add(&new_entry->list, &handle->h_transaction->t_private_list);
c894058d 4450 spin_unlock(&sbi->s_md_lock);
c9de560d
AT
4451 return 0;
4452}
4453
44338711
TT
4454/**
4455 * ext4_free_blocks() -- Free given blocks and update quota
4456 * @handle: handle for this transaction
4457 * @inode: inode
4458 * @block: start physical block to free
4459 * @count: number of blocks to count
5def1360 4460 * @flags: flags used by ext4_free_blocks
c9de560d 4461 */
44338711 4462void ext4_free_blocks(handle_t *handle, struct inode *inode,
e6362609
TT
4463 struct buffer_head *bh, ext4_fsblk_t block,
4464 unsigned long count, int flags)
c9de560d 4465{
26346ff6 4466 struct buffer_head *bitmap_bh = NULL;
c9de560d 4467 struct super_block *sb = inode->i_sb;
c9de560d 4468 struct ext4_group_desc *gdp;
44338711 4469 unsigned long freed = 0;
498e5f24 4470 unsigned int overflow;
c9de560d
AT
4471 ext4_grpblk_t bit;
4472 struct buffer_head *gd_bh;
4473 ext4_group_t block_group;
4474 struct ext4_sb_info *sbi;
4475 struct ext4_buddy e4b;
4476 int err = 0;
4477 int ret;
4478
e6362609
TT
4479 if (bh) {
4480 if (block)
4481 BUG_ON(block != bh->b_blocknr);
4482 else
4483 block = bh->b_blocknr;
4484 }
c9de560d 4485
c9de560d 4486 sbi = EXT4_SB(sb);
1f2acb60
TT
4487 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4488 !ext4_data_block_valid(sbi, block, count)) {
12062ddd 4489 ext4_error(sb, "Freeing blocks not in datazone - "
1f2acb60 4490 "block = %llu, count = %lu", block, count);
c9de560d
AT
4491 goto error_return;
4492 }
4493
0610b6e9 4494 ext4_debug("freeing block %llu\n", block);
e6362609
TT
4495 trace_ext4_free_blocks(inode, block, count, flags);
4496
4497 if (flags & EXT4_FREE_BLOCKS_FORGET) {
4498 struct buffer_head *tbh = bh;
4499 int i;
4500
4501 BUG_ON(bh && (count > 1));
4502
4503 for (i = 0; i < count; i++) {
4504 if (!bh)
4505 tbh = sb_find_get_block(inode->i_sb,
4506 block + i);
87783690
NK
4507 if (unlikely(!tbh))
4508 continue;
60e6679e 4509 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
e6362609
TT
4510 inode, tbh, block + i);
4511 }
4512 }
4513
60e6679e 4514 /*
e6362609
TT
4515 * We need to make sure we don't reuse the freed block until
4516 * after the transaction is committed, which we can do by
4517 * treating the block as metadata, below. We make an
4518 * exception if the inode is to be written in writeback mode
4519 * since writeback mode has weak data consistency guarantees.
4520 */
4521 if (!ext4_should_writeback_data(inode))
4522 flags |= EXT4_FREE_BLOCKS_METADATA;
c9de560d 4523
c9de560d
AT
4524do_more:
4525 overflow = 0;
4526 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4527
4528 /*
4529 * Check to see if we are freeing blocks across a group
4530 * boundary.
4531 */
4532 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4533 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4534 count -= overflow;
4535 }
574ca174 4536 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
ce89f46c
AK
4537 if (!bitmap_bh) {
4538 err = -EIO;
c9de560d 4539 goto error_return;
ce89f46c 4540 }
c9de560d 4541 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
ce89f46c
AK
4542 if (!gdp) {
4543 err = -EIO;
c9de560d 4544 goto error_return;
ce89f46c 4545 }
c9de560d
AT
4546
4547 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4548 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4549 in_range(block, ext4_inode_table(sb, gdp),
4550 EXT4_SB(sb)->s_itb_per_group) ||
4551 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4552 EXT4_SB(sb)->s_itb_per_group)) {
4553
12062ddd 4554 ext4_error(sb, "Freeing blocks in system zone - "
0610b6e9 4555 "Block = %llu, count = %lu", block, count);
519deca0
AK
4556 /* err = 0. ext4_std_error should be a no op */
4557 goto error_return;
c9de560d
AT
4558 }
4559
4560 BUFFER_TRACE(bitmap_bh, "getting write access");
4561 err = ext4_journal_get_write_access(handle, bitmap_bh);
4562 if (err)
4563 goto error_return;
4564
4565 /*
4566 * We are about to modify some metadata. Call the journal APIs
4567 * to unshare ->b_data if a currently-committing transaction is
4568 * using it
4569 */
4570 BUFFER_TRACE(gd_bh, "get_write_access");
4571 err = ext4_journal_get_write_access(handle, gd_bh);
4572 if (err)
4573 goto error_return;
c9de560d
AT
4574#ifdef AGGRESSIVE_CHECK
4575 {
4576 int i;
4577 for (i = 0; i < count; i++)
4578 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4579 }
4580#endif
3e1e5f50 4581 trace_ext4_mballoc_free(sb, inode, block_group, bit, count);
c9de560d 4582
920313a7
AK
4583 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4584 if (err)
4585 goto error_return;
e6362609
TT
4586
4587 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
7a2fcbf7
AK
4588 struct ext4_free_data *new_entry;
4589 /*
4590 * blocks being freed are metadata. these blocks shouldn't
4591 * be used until this transaction is committed
4592 */
b72143ab
TT
4593 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4594 if (!new_entry) {
4595 err = -ENOMEM;
4596 goto error_return;
4597 }
7a2fcbf7
AK
4598 new_entry->start_blk = bit;
4599 new_entry->group = block_group;
4600 new_entry->count = count;
4601 new_entry->t_tid = handle->h_transaction->t_tid;
955ce5f5 4602
7a2fcbf7 4603 ext4_lock_group(sb, block_group);
955ce5f5 4604 mb_clear_bits(bitmap_bh->b_data, bit, count);
7a2fcbf7 4605 ext4_mb_free_metadata(handle, &e4b, new_entry);
c9de560d 4606 } else {
7a2fcbf7
AK
4607 /* need to update group_info->bb_free and bitmap
4608 * with group lock held. generate_buddy look at
4609 * them with group lock_held
4610 */
955ce5f5
AK
4611 ext4_lock_group(sb, block_group);
4612 mb_clear_bits(bitmap_bh->b_data, bit, count);
7e5a8cdd 4613 mb_free_blocks(inode, &e4b, bit, count);
c9de560d
AT
4614 }
4615
560671a0
AK
4616 ret = ext4_free_blks_count(sb, gdp) + count;
4617 ext4_free_blks_set(sb, gdp, ret);
c9de560d 4618 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
955ce5f5 4619 ext4_unlock_group(sb, block_group);
c9de560d
AT
4620 percpu_counter_add(&sbi->s_freeblocks_counter, count);
4621
772cb7c8
JS
4622 if (sbi->s_log_groups_per_flex) {
4623 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
9f24e420 4624 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
772cb7c8
JS
4625 }
4626
e39e07fd 4627 ext4_mb_unload_buddy(&e4b);
c9de560d 4628
44338711 4629 freed += count;
c9de560d 4630
7a2fcbf7
AK
4631 /* We dirtied the bitmap block */
4632 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4633 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4634
c9de560d
AT
4635 /* And the group descriptor block */
4636 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
0390131b 4637 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
c9de560d
AT
4638 if (!err)
4639 err = ret;
4640
4641 if (overflow && !err) {
4642 block += count;
4643 count = overflow;
4644 put_bh(bitmap_bh);
4645 goto do_more;
4646 }
a0375156 4647 ext4_mark_super_dirty(sb);
c9de560d 4648error_return:
7132de74 4649 if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
5dd4056d 4650 dquot_free_block(inode, freed);
c9de560d
AT
4651 brelse(bitmap_bh);
4652 ext4_std_error(sb, err);
4653 return;
4654}
7360d173 4655
2846e820
AG
4656/**
4657 * ext4_add_groupblocks() -- Add given blocks to an existing group
4658 * @handle: handle to this transaction
4659 * @sb: super block
4660 * @block: start physcial block to add to the block group
4661 * @count: number of blocks to free
4662 *
e73a347b 4663 * This marks the blocks as free in the bitmap and buddy.
2846e820
AG
4664 */
4665void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
4666 ext4_fsblk_t block, unsigned long count)
4667{
4668 struct buffer_head *bitmap_bh = NULL;
4669 struct buffer_head *gd_bh;
4670 ext4_group_t block_group;
4671 ext4_grpblk_t bit;
4672 unsigned int i;
4673 struct ext4_group_desc *desc;
4674 struct ext4_sb_info *sbi = EXT4_SB(sb);
e73a347b 4675 struct ext4_buddy e4b;
2846e820
AG
4676 int err = 0, ret, blk_free_count;
4677 ext4_grpblk_t blocks_freed;
2846e820
AG
4678
4679 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4680
4681 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
2846e820
AG
4682 /*
4683 * Check to see if we are freeing blocks across a group
4684 * boundary.
4685 */
2cd05cc3 4686 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
2846e820 4687 goto error_return;
2cd05cc3 4688
2846e820
AG
4689 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4690 if (!bitmap_bh)
4691 goto error_return;
4692 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4693 if (!desc)
4694 goto error_return;
4695
4696 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4697 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4698 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4699 in_range(block + count - 1, ext4_inode_table(sb, desc),
4700 sbi->s_itb_per_group)) {
4701 ext4_error(sb, "Adding blocks in system zones - "
4702 "Block = %llu, count = %lu",
4703 block, count);
4704 goto error_return;
4705 }
4706
2cd05cc3
TT
4707 BUFFER_TRACE(bitmap_bh, "getting write access");
4708 err = ext4_journal_get_write_access(handle, bitmap_bh);
2846e820
AG
4709 if (err)
4710 goto error_return;
4711
4712 /*
4713 * We are about to modify some metadata. Call the journal APIs
4714 * to unshare ->b_data if a currently-committing transaction is
4715 * using it
4716 */
4717 BUFFER_TRACE(gd_bh, "get_write_access");
4718 err = ext4_journal_get_write_access(handle, gd_bh);
4719 if (err)
4720 goto error_return;
e73a347b 4721
2846e820
AG
4722 for (i = 0, blocks_freed = 0; i < count; i++) {
4723 BUFFER_TRACE(bitmap_bh, "clear bit");
e73a347b 4724 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
2846e820
AG
4725 ext4_error(sb, "bit already cleared for block %llu",
4726 (ext4_fsblk_t)(block + i));
4727 BUFFER_TRACE(bitmap_bh, "bit already cleared");
4728 } else {
4729 blocks_freed++;
4730 }
4731 }
e73a347b
AG
4732
4733 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4734 if (err)
4735 goto error_return;
4736
4737 /*
4738 * need to update group_info->bb_free and bitmap
4739 * with group lock held. generate_buddy look at
4740 * them with group lock_held
4741 */
2846e820 4742 ext4_lock_group(sb, block_group);
e73a347b
AG
4743 mb_clear_bits(bitmap_bh->b_data, bit, count);
4744 mb_free_blocks(NULL, &e4b, bit, count);
2846e820
AG
4745 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
4746 ext4_free_blks_set(sb, desc, blk_free_count);
4747 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
4748 ext4_unlock_group(sb, block_group);
4749 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
4750
4751 if (sbi->s_log_groups_per_flex) {
4752 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4753 atomic_add(blocks_freed,
4754 &sbi->s_flex_groups[flex_group].free_blocks);
4755 }
e73a347b
AG
4756
4757 ext4_mb_unload_buddy(&e4b);
2846e820
AG
4758
4759 /* We dirtied the bitmap block */
4760 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4761 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4762
4763 /* And the group descriptor block */
4764 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4765 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4766 if (!err)
4767 err = ret;
4768
4769error_return:
4770 brelse(bitmap_bh);
4771 ext4_std_error(sb, err);
4772 return;
4773}
4774
7360d173
LC
4775/**
4776 * ext4_trim_extent -- function to TRIM one single free extent in the group
4777 * @sb: super block for the file system
4778 * @start: starting block of the free extent in the alloc. group
4779 * @count: number of blocks to TRIM
4780 * @group: alloc. group we are working with
4781 * @e4b: ext4 buddy for the group
4782 *
4783 * Trim "count" blocks starting at "start" in the "group". To assure that no
4784 * one will allocate those blocks, mark it as used in buddy bitmap. This must
4785 * be called with under the group lock.
4786 */
d9f34504
TT
4787static void ext4_trim_extent(struct super_block *sb, int start, int count,
4788 ext4_group_t group, struct ext4_buddy *e4b)
7360d173
LC
4789{
4790 struct ext4_free_extent ex;
7360d173 4791
b3d4c2b1
TM
4792 trace_ext4_trim_extent(sb, group, start, count);
4793
7360d173
LC
4794 assert_spin_locked(ext4_group_lock_ptr(sb, group));
4795
4796 ex.fe_start = start;
4797 ex.fe_group = group;
4798 ex.fe_len = count;
4799
4800 /*
4801 * Mark blocks used, so no one can reuse them while
4802 * being trimmed.
4803 */
4804 mb_mark_used(e4b, &ex);
4805 ext4_unlock_group(sb, group);
d9f34504 4806 ext4_issue_discard(sb, group, start, count);
7360d173
LC
4807 ext4_lock_group(sb, group);
4808 mb_free_blocks(NULL, e4b, start, ex.fe_len);
7360d173
LC
4809}
4810
4811/**
4812 * ext4_trim_all_free -- function to trim all free space in alloc. group
4813 * @sb: super block for file system
4814 * @e4b: ext4 buddy
4815 * @start: first group block to examine
4816 * @max: last group block to examine
4817 * @minblocks: minimum extent block count
4818 *
4819 * ext4_trim_all_free walks through group's buddy bitmap searching for free
4820 * extents. When the free block is found, ext4_trim_extent is called to TRIM
4821 * the extent.
4822 *
4823 *
4824 * ext4_trim_all_free walks through group's block bitmap searching for free
4825 * extents. When the free extent is found, mark it as used in group buddy
4826 * bitmap. Then issue a TRIM command on this extent and free the extent in
4827 * the group buddy bitmap. This is done until whole group is scanned.
4828 */
0b75a840 4829static ext4_grpblk_t
78944086
LC
4830ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4831 ext4_grpblk_t start, ext4_grpblk_t max,
4832 ext4_grpblk_t minblocks)
7360d173
LC
4833{
4834 void *bitmap;
169ddc3e 4835 ext4_grpblk_t next, count = 0, free_count = 0;
78944086
LC
4836 struct ext4_buddy e4b;
4837 int ret;
7360d173 4838
b3d4c2b1
TM
4839 trace_ext4_trim_all_free(sb, group, start, max);
4840
78944086
LC
4841 ret = ext4_mb_load_buddy(sb, group, &e4b);
4842 if (ret) {
4843 ext4_error(sb, "Error in loading buddy "
4844 "information for %u", group);
4845 return ret;
4846 }
78944086 4847 bitmap = e4b.bd_bitmap;
28739eea
LC
4848
4849 ext4_lock_group(sb, group);
3d56b8d2
TM
4850 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4851 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4852 goto out;
4853
78944086
LC
4854 start = (e4b.bd_info->bb_first_free > start) ?
4855 e4b.bd_info->bb_first_free : start;
7360d173
LC
4856
4857 while (start < max) {
4858 start = mb_find_next_zero_bit(bitmap, max, start);
4859 if (start >= max)
4860 break;
4861 next = mb_find_next_bit(bitmap, max, start);
4862
4863 if ((next - start) >= minblocks) {
d9f34504 4864 ext4_trim_extent(sb, start,
78944086 4865 next - start, group, &e4b);
7360d173
LC
4866 count += next - start;
4867 }
169ddc3e 4868 free_count += next - start;
7360d173
LC
4869 start = next + 1;
4870
4871 if (fatal_signal_pending(current)) {
4872 count = -ERESTARTSYS;
4873 break;
4874 }
4875
4876 if (need_resched()) {
4877 ext4_unlock_group(sb, group);
4878 cond_resched();
4879 ext4_lock_group(sb, group);
4880 }
4881
169ddc3e 4882 if ((e4b.bd_info->bb_free - free_count) < minblocks)
7360d173
LC
4883 break;
4884 }
3d56b8d2
TM
4885
4886 if (!ret)
4887 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4888out:
7360d173 4889 ext4_unlock_group(sb, group);
78944086 4890 ext4_mb_unload_buddy(&e4b);
7360d173
LC
4891
4892 ext4_debug("trimmed %d blocks in the group %d\n",
4893 count, group);
4894
7360d173
LC
4895 return count;
4896}
4897
4898/**
4899 * ext4_trim_fs() -- trim ioctl handle function
4900 * @sb: superblock for filesystem
4901 * @range: fstrim_range structure
4902 *
4903 * start: First Byte to trim
4904 * len: number of Bytes to trim from start
4905 * minlen: minimum extent length in Bytes
4906 * ext4_trim_fs goes through all allocation groups containing Bytes from
4907 * start to start+len. For each such a group ext4_trim_all_free function
4908 * is invoked to trim all free space.
4909 */
4910int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4911{
78944086 4912 struct ext4_group_info *grp;
7360d173
LC
4913 ext4_group_t first_group, last_group;
4914 ext4_group_t group, ngroups = ext4_get_groups_count(sb);
4915 ext4_grpblk_t cnt = 0, first_block, last_block;
78944086 4916 uint64_t start, len, minlen, trimmed = 0;
0f0a25bf
JK
4917 ext4_fsblk_t first_data_blk =
4918 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7360d173
LC
4919 int ret = 0;
4920
4921 start = range->start >> sb->s_blocksize_bits;
4922 len = range->len >> sb->s_blocksize_bits;
4923 minlen = range->minlen >> sb->s_blocksize_bits;
7360d173
LC
4924
4925 if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
4926 return -EINVAL;
22f10457
TM
4927 if (start + len <= first_data_blk)
4928 goto out;
0f0a25bf
JK
4929 if (start < first_data_blk) {
4930 len -= first_data_blk - start;
4931 start = first_data_blk;
4932 }
7360d173
LC
4933
4934 /* Determine first and last group to examine based on start and len */
4935 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
4936 &first_group, &first_block);
4937 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) (start + len),
4938 &last_group, &last_block);
4939 last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
4940 last_block = EXT4_BLOCKS_PER_GROUP(sb);
4941
4942 if (first_group > last_group)
4943 return -EINVAL;
4944
4945 for (group = first_group; group <= last_group; group++) {
78944086
LC
4946 grp = ext4_get_group_info(sb, group);
4947 /* We only do this if the grp has never been initialized */
4948 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
4949 ret = ext4_mb_init_group(sb, group);
4950 if (ret)
4951 break;
7360d173
LC
4952 }
4953
0ba08517
TM
4954 /*
4955 * For all the groups except the last one, last block will
4956 * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
4957 * change it for the last group in which case start +
4958 * len < EXT4_BLOCKS_PER_GROUP(sb).
4959 */
4960 if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb))
ca6e909f 4961 last_block = first_block + len;
0ba08517 4962 len -= last_block - first_block;
7360d173 4963
78944086
LC
4964 if (grp->bb_free >= minlen) {
4965 cnt = ext4_trim_all_free(sb, group, first_block,
7360d173
LC
4966 last_block, minlen);
4967 if (cnt < 0) {
4968 ret = cnt;
7360d173
LC
4969 break;
4970 }
4971 }
7360d173
LC
4972 trimmed += cnt;
4973 first_block = 0;
4974 }
4975 range->len = trimmed * sb->s_blocksize;
4976
3d56b8d2
TM
4977 if (!ret)
4978 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
4979
22f10457 4980out:
7360d173
LC
4981 return ret;
4982}