]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ext4/mballoc.c
ext4: Clean up ext4_get_blocks() so it does not depend on bh_result->b_state
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / mballoc.c
CommitLineData
c9de560d
AT
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
8f6e39a7 24#include "mballoc.h"
c9de560d
AT
25/*
26 * MUSTDO:
27 * - test ext4_ext_search_left() and ext4_ext_search_right()
28 * - search for metadata in few groups
29 *
30 * TODO v4:
31 * - normalization should take into account whether file is still open
32 * - discard preallocations if no free space left (policy?)
33 * - don't normalize tails
34 * - quota
35 * - reservation for superuser
36 *
37 * TODO v3:
38 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
39 * - track min/max extents in each group for better group selection
40 * - mb_mark_used() may allocate chunk right after splitting buddy
41 * - tree of groups sorted by number of free blocks
42 * - error handling
43 */
44
45/*
46 * The allocation request involve request for multiple number of blocks
47 * near to the goal(block) value specified.
48 *
b713a5ec
TT
49 * During initialization phase of the allocator we decide to use the
50 * group preallocation or inode preallocation depending on the size of
51 * the file. The size of the file could be the resulting file size we
52 * would have after allocation, or the current file size, which ever
53 * is larger. If the size is less than sbi->s_mb_stream_request we
54 * select to use the group preallocation. The default value of
55 * s_mb_stream_request is 16 blocks. This can also be tuned via
56 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
57 * terms of number of blocks.
c9de560d
AT
58 *
59 * The main motivation for having small file use group preallocation is to
b713a5ec 60 * ensure that we have small files closer together on the disk.
c9de560d 61 *
b713a5ec
TT
62 * First stage the allocator looks at the inode prealloc list,
63 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
64 * spaces for this particular inode. The inode prealloc space is
65 * represented as:
c9de560d
AT
66 *
67 * pa_lstart -> the logical start block for this prealloc space
68 * pa_pstart -> the physical start block for this prealloc space
69 * pa_len -> lenght for this prealloc space
70 * pa_free -> free space available in this prealloc space
71 *
72 * The inode preallocation space is used looking at the _logical_ start
73 * block. If only the logical file block falls within the range of prealloc
74 * space we will consume the particular prealloc space. This make sure that
75 * that the we have contiguous physical blocks representing the file blocks
76 *
77 * The important thing to be noted in case of inode prealloc space is that
78 * we don't modify the values associated to inode prealloc space except
79 * pa_free.
80 *
81 * If we are not able to find blocks in the inode prealloc space and if we
82 * have the group allocation flag set then we look at the locality group
83 * prealloc space. These are per CPU prealloc list repreasented as
84 *
85 * ext4_sb_info.s_locality_groups[smp_processor_id()]
86 *
87 * The reason for having a per cpu locality group is to reduce the contention
88 * between CPUs. It is possible to get scheduled at this point.
89 *
90 * The locality group prealloc space is used looking at whether we have
91 * enough free space (pa_free) withing the prealloc space.
92 *
93 * If we can't allocate blocks via inode prealloc or/and locality group
94 * prealloc then we look at the buddy cache. The buddy cache is represented
95 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
96 * mapped to the buddy and bitmap information regarding different
97 * groups. The buddy information is attached to buddy cache inode so that
98 * we can access them through the page cache. The information regarding
99 * each group is loaded via ext4_mb_load_buddy. The information involve
100 * block bitmap and buddy information. The information are stored in the
101 * inode as:
102 *
103 * { page }
c3a326a6 104 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
105 *
106 *
107 * one block each for bitmap and buddy information. So for each group we
108 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
109 * blocksize) blocks. So it can have information regarding groups_per_page
110 * which is blocks_per_page/2
111 *
112 * The buddy cache inode is not stored on disk. The inode is thrown
113 * away when the filesystem is unmounted.
114 *
115 * We look for count number of blocks in the buddy cache. If we were able
116 * to locate that many free blocks we return with additional information
117 * regarding rest of the contiguous physical block available
118 *
119 * Before allocating blocks via buddy cache we normalize the request
120 * blocks. This ensure we ask for more blocks that we needed. The extra
121 * blocks that we get after allocation is added to the respective prealloc
122 * list. In case of inode preallocation we follow a list of heuristics
123 * based on file size. This can be found in ext4_mb_normalize_request. If
124 * we are doing a group prealloc we try to normalize the request to
b713a5ec 125 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
c9de560d 126 * 512 blocks. This can be tuned via
b713a5ec 127 * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
c9de560d
AT
128 * terms of number of blocks. If we have mounted the file system with -O
129 * stripe=<value> option the group prealloc request is normalized to the
130 * stripe value (sbi->s_stripe)
131 *
b713a5ec 132 * The regular allocator(using the buddy cache) supports few tunables.
c9de560d 133 *
b713a5ec
TT
134 * /sys/fs/ext4/<partition>/mb_min_to_scan
135 * /sys/fs/ext4/<partition>/mb_max_to_scan
136 * /sys/fs/ext4/<partition>/mb_order2_req
c9de560d 137 *
b713a5ec 138 * The regular allocator uses buddy scan only if the request len is power of
c9de560d
AT
139 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
140 * value of s_mb_order2_reqs can be tuned via
b713a5ec 141 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
c9de560d 142 * stripe size (sbi->s_stripe), we try to search for contigous block in
b713a5ec
TT
143 * stripe size. This should result in better allocation on RAID setups. If
144 * not, we search in the specific group using bitmap for best extents. The
145 * tunable min_to_scan and max_to_scan control the behaviour here.
c9de560d 146 * min_to_scan indicate how long the mballoc __must__ look for a best
b713a5ec 147 * extent and max_to_scan indicates how long the mballoc __can__ look for a
c9de560d
AT
148 * best extent in the found extents. Searching for the blocks starts with
149 * the group specified as the goal value in allocation context via
150 * ac_g_ex. Each group is first checked based on the criteria whether it
151 * can used for allocation. ext4_mb_good_group explains how the groups are
152 * checked.
153 *
154 * Both the prealloc space are getting populated as above. So for the first
155 * request we will hit the buddy cache which will result in this prealloc
156 * space getting filled. The prealloc space is then later used for the
157 * subsequent request.
158 */
159
160/*
161 * mballoc operates on the following data:
162 * - on-disk bitmap
163 * - in-core buddy (actually includes buddy and bitmap)
164 * - preallocation descriptors (PAs)
165 *
166 * there are two types of preallocations:
167 * - inode
168 * assiged to specific inode and can be used for this inode only.
169 * it describes part of inode's space preallocated to specific
170 * physical blocks. any block from that preallocated can be used
171 * independent. the descriptor just tracks number of blocks left
172 * unused. so, before taking some block from descriptor, one must
173 * make sure corresponded logical block isn't allocated yet. this
174 * also means that freeing any block within descriptor's range
175 * must discard all preallocated blocks.
176 * - locality group
177 * assigned to specific locality group which does not translate to
178 * permanent set of inodes: inode can join and leave group. space
179 * from this type of preallocation can be used for any inode. thus
180 * it's consumed from the beginning to the end.
181 *
182 * relation between them can be expressed as:
183 * in-core buddy = on-disk bitmap + preallocation descriptors
184 *
185 * this mean blocks mballoc considers used are:
186 * - allocated blocks (persistent)
187 * - preallocated blocks (non-persistent)
188 *
189 * consistency in mballoc world means that at any time a block is either
190 * free or used in ALL structures. notice: "any time" should not be read
191 * literally -- time is discrete and delimited by locks.
192 *
193 * to keep it simple, we don't use block numbers, instead we count number of
194 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
195 *
196 * all operations can be expressed as:
197 * - init buddy: buddy = on-disk + PAs
198 * - new PA: buddy += N; PA = N
199 * - use inode PA: on-disk += N; PA -= N
200 * - discard inode PA buddy -= on-disk - PA; PA = 0
201 * - use locality group PA on-disk += N; PA -= N
202 * - discard locality group PA buddy -= PA; PA = 0
203 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
204 * is used in real operation because we can't know actual used
205 * bits from PA, only from on-disk bitmap
206 *
207 * if we follow this strict logic, then all operations above should be atomic.
208 * given some of them can block, we'd have to use something like semaphores
209 * killing performance on high-end SMP hardware. let's try to relax it using
210 * the following knowledge:
211 * 1) if buddy is referenced, it's already initialized
212 * 2) while block is used in buddy and the buddy is referenced,
213 * nobody can re-allocate that block
214 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
215 * bit set and PA claims same block, it's OK. IOW, one can set bit in
216 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
217 * block
218 *
219 * so, now we're building a concurrency table:
220 * - init buddy vs.
221 * - new PA
222 * blocks for PA are allocated in the buddy, buddy must be referenced
223 * until PA is linked to allocation group to avoid concurrent buddy init
224 * - use inode PA
225 * we need to make sure that either on-disk bitmap or PA has uptodate data
226 * given (3) we care that PA-=N operation doesn't interfere with init
227 * - discard inode PA
228 * the simplest way would be to have buddy initialized by the discard
229 * - use locality group PA
230 * again PA-=N must be serialized with init
231 * - discard locality group PA
232 * the simplest way would be to have buddy initialized by the discard
233 * - new PA vs.
234 * - use inode PA
235 * i_data_sem serializes them
236 * - discard inode PA
237 * discard process must wait until PA isn't used by another process
238 * - use locality group PA
239 * some mutex should serialize them
240 * - discard locality group PA
241 * discard process must wait until PA isn't used by another process
242 * - use inode PA
243 * - use inode PA
244 * i_data_sem or another mutex should serializes them
245 * - discard inode PA
246 * discard process must wait until PA isn't used by another process
247 * - use locality group PA
248 * nothing wrong here -- they're different PAs covering different blocks
249 * - discard locality group PA
250 * discard process must wait until PA isn't used by another process
251 *
252 * now we're ready to make few consequences:
253 * - PA is referenced and while it is no discard is possible
254 * - PA is referenced until block isn't marked in on-disk bitmap
255 * - PA changes only after on-disk bitmap
256 * - discard must not compete with init. either init is done before
257 * any discard or they're serialized somehow
258 * - buddy init as sum of on-disk bitmap and PAs is done atomically
259 *
260 * a special case when we've used PA to emptiness. no need to modify buddy
261 * in this case, but we should care about concurrent init
262 *
263 */
264
265 /*
266 * Logic in few words:
267 *
268 * - allocation:
269 * load group
270 * find blocks
271 * mark bits in on-disk bitmap
272 * release group
273 *
274 * - use preallocation:
275 * find proper PA (per-inode or group)
276 * load group
277 * mark bits in on-disk bitmap
278 * release group
279 * release PA
280 *
281 * - free:
282 * load group
283 * mark bits in on-disk bitmap
284 * release group
285 *
286 * - discard preallocations in group:
287 * mark PAs deleted
288 * move them onto local list
289 * load on-disk bitmap
290 * load group
291 * remove PA from object (inode or locality group)
292 * mark free blocks in-core
293 *
294 * - discard inode's preallocations:
295 */
296
297/*
298 * Locking rules
299 *
300 * Locks:
301 * - bitlock on a group (group)
302 * - object (inode/locality) (object)
303 * - per-pa lock (pa)
304 *
305 * Paths:
306 * - new pa
307 * object
308 * group
309 *
310 * - find and use pa:
311 * pa
312 *
313 * - release consumed pa:
314 * pa
315 * group
316 * object
317 *
318 * - generate in-core bitmap:
319 * group
320 * pa
321 *
322 * - discard all for given object (inode, locality group):
323 * object
324 * pa
325 * group
326 *
327 * - discard all for given group:
328 * group
329 * pa
330 * group
331 * object
332 *
333 */
c3a326a6
AK
334static struct kmem_cache *ext4_pspace_cachep;
335static struct kmem_cache *ext4_ac_cachep;
336static struct kmem_cache *ext4_free_ext_cachep;
337static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
338 ext4_group_t group);
7a2fcbf7
AK
339static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
340 ext4_group_t group);
c3a326a6
AK
341static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
342
343
c9de560d 344
ffad0a44
AK
345static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
346{
c9de560d 347#if BITS_PER_LONG == 64
ffad0a44
AK
348 *bit += ((unsigned long) addr & 7UL) << 3;
349 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 350#elif BITS_PER_LONG == 32
ffad0a44
AK
351 *bit += ((unsigned long) addr & 3UL) << 3;
352 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
353#else
354#error "how many bits you are?!"
355#endif
ffad0a44
AK
356 return addr;
357}
c9de560d
AT
358
359static inline int mb_test_bit(int bit, void *addr)
360{
361 /*
362 * ext4_test_bit on architecture like powerpc
363 * needs unsigned long aligned address
364 */
ffad0a44 365 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
366 return ext4_test_bit(bit, addr);
367}
368
369static inline void mb_set_bit(int bit, void *addr)
370{
ffad0a44 371 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
372 ext4_set_bit(bit, addr);
373}
374
c9de560d
AT
375static inline void mb_clear_bit(int bit, void *addr)
376{
ffad0a44 377 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
378 ext4_clear_bit(bit, addr);
379}
380
ffad0a44
AK
381static inline int mb_find_next_zero_bit(void *addr, int max, int start)
382{
e7dfb246 383 int fix = 0, ret, tmpmax;
ffad0a44 384 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 385 tmpmax = max + fix;
ffad0a44
AK
386 start += fix;
387
e7dfb246
AK
388 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
389 if (ret > max)
390 return max;
391 return ret;
ffad0a44
AK
392}
393
394static inline int mb_find_next_bit(void *addr, int max, int start)
395{
e7dfb246 396 int fix = 0, ret, tmpmax;
ffad0a44 397 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 398 tmpmax = max + fix;
ffad0a44
AK
399 start += fix;
400
e7dfb246
AK
401 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
402 if (ret > max)
403 return max;
404 return ret;
ffad0a44
AK
405}
406
c9de560d
AT
407static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
408{
409 char *bb;
410
c9de560d
AT
411 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
412 BUG_ON(max == NULL);
413
414 if (order > e4b->bd_blkbits + 1) {
415 *max = 0;
416 return NULL;
417 }
418
419 /* at order 0 we see each particular block */
420 *max = 1 << (e4b->bd_blkbits + 3);
421 if (order == 0)
422 return EXT4_MB_BITMAP(e4b);
423
424 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
425 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
426
427 return bb;
428}
429
430#ifdef DOUBLE_CHECK
431static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
432 int first, int count)
433{
434 int i;
435 struct super_block *sb = e4b->bd_sb;
436
437 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
438 return;
bc8e6740 439 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
440 for (i = 0; i < count; i++) {
441 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
442 ext4_fsblk_t blocknr;
443 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
444 blocknr += first + i;
445 blocknr +=
446 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5d1b1b3f
AK
447 ext4_grp_locked_error(sb, e4b->bd_group,
448 __func__, "double-free of inode"
a9df9a49 449 " %lu's block %llu(bit %u in group %u)",
c9de560d
AT
450 inode ? inode->i_ino : 0, blocknr,
451 first + i, e4b->bd_group);
452 }
453 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
454 }
455}
456
457static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
458{
459 int i;
460
461 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
462 return;
bc8e6740 463 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
464 for (i = 0; i < count; i++) {
465 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
466 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
467 }
468}
469
470static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
471{
472 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
473 unsigned char *b1, *b2;
474 int i;
475 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
476 b2 = (unsigned char *) bitmap;
477 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
478 if (b1[i] != b2[i]) {
a9df9a49 479 printk(KERN_ERR "corruption in group %u "
4776004f
TT
480 "at byte %u(%u): %x in copy != %x "
481 "on disk/prealloc\n",
482 e4b->bd_group, i, i * 8, b1[i], b2[i]);
c9de560d
AT
483 BUG();
484 }
485 }
486 }
487}
488
489#else
490static inline void mb_free_blocks_double(struct inode *inode,
491 struct ext4_buddy *e4b, int first, int count)
492{
493 return;
494}
495static inline void mb_mark_used_double(struct ext4_buddy *e4b,
496 int first, int count)
497{
498 return;
499}
500static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
501{
502 return;
503}
504#endif
505
506#ifdef AGGRESSIVE_CHECK
507
508#define MB_CHECK_ASSERT(assert) \
509do { \
510 if (!(assert)) { \
511 printk(KERN_EMERG \
512 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
513 function, file, line, # assert); \
514 BUG(); \
515 } \
516} while (0)
517
518static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
519 const char *function, int line)
520{
521 struct super_block *sb = e4b->bd_sb;
522 int order = e4b->bd_blkbits + 1;
523 int max;
524 int max2;
525 int i;
526 int j;
527 int k;
528 int count;
529 struct ext4_group_info *grp;
530 int fragments = 0;
531 int fstart;
532 struct list_head *cur;
533 void *buddy;
534 void *buddy2;
535
c9de560d
AT
536 {
537 static int mb_check_counter;
538 if (mb_check_counter++ % 100 != 0)
539 return 0;
540 }
541
542 while (order > 1) {
543 buddy = mb_find_buddy(e4b, order, &max);
544 MB_CHECK_ASSERT(buddy);
545 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
546 MB_CHECK_ASSERT(buddy2);
547 MB_CHECK_ASSERT(buddy != buddy2);
548 MB_CHECK_ASSERT(max * 2 == max2);
549
550 count = 0;
551 for (i = 0; i < max; i++) {
552
553 if (mb_test_bit(i, buddy)) {
554 /* only single bit in buddy2 may be 1 */
555 if (!mb_test_bit(i << 1, buddy2)) {
556 MB_CHECK_ASSERT(
557 mb_test_bit((i<<1)+1, buddy2));
558 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
559 MB_CHECK_ASSERT(
560 mb_test_bit(i << 1, buddy2));
561 }
562 continue;
563 }
564
565 /* both bits in buddy2 must be 0 */
566 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
567 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
568
569 for (j = 0; j < (1 << order); j++) {
570 k = (i * (1 << order)) + j;
571 MB_CHECK_ASSERT(
572 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
573 }
574 count++;
575 }
576 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
577 order--;
578 }
579
580 fstart = -1;
581 buddy = mb_find_buddy(e4b, 0, &max);
582 for (i = 0; i < max; i++) {
583 if (!mb_test_bit(i, buddy)) {
584 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
585 if (fstart == -1) {
586 fragments++;
587 fstart = i;
588 }
589 continue;
590 }
591 fstart = -1;
592 /* check used bits only */
593 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
594 buddy2 = mb_find_buddy(e4b, j, &max2);
595 k = i >> j;
596 MB_CHECK_ASSERT(k < max2);
597 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
598 }
599 }
600 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
601 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
602
603 grp = ext4_get_group_info(sb, e4b->bd_group);
604 buddy = mb_find_buddy(e4b, 0, &max);
605 list_for_each(cur, &grp->bb_prealloc_list) {
606 ext4_group_t groupnr;
607 struct ext4_prealloc_space *pa;
60bd63d1
SR
608 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
609 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
c9de560d 610 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
60bd63d1 611 for (i = 0; i < pa->pa_len; i++)
c9de560d
AT
612 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
613 }
614 return 0;
615}
616#undef MB_CHECK_ASSERT
617#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
46e665e9 618 __FILE__, __func__, __LINE__)
c9de560d
AT
619#else
620#define mb_check_buddy(e4b)
621#endif
622
623/* FIXME!! need more doc */
624static void ext4_mb_mark_free_simple(struct super_block *sb,
625 void *buddy, unsigned first, int len,
626 struct ext4_group_info *grp)
627{
628 struct ext4_sb_info *sbi = EXT4_SB(sb);
629 unsigned short min;
630 unsigned short max;
631 unsigned short chunk;
632 unsigned short border;
633
b73fce69 634 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
c9de560d
AT
635
636 border = 2 << sb->s_blocksize_bits;
637
638 while (len > 0) {
639 /* find how many blocks can be covered since this position */
640 max = ffs(first | border) - 1;
641
642 /* find how many blocks of power 2 we need to mark */
643 min = fls(len) - 1;
644
645 if (max < min)
646 min = max;
647 chunk = 1 << min;
648
649 /* mark multiblock chunks only */
650 grp->bb_counters[min]++;
651 if (min > 0)
652 mb_clear_bit(first >> min,
653 buddy + sbi->s_mb_offsets[min]);
654
655 len -= chunk;
656 first += chunk;
657 }
658}
659
660static void ext4_mb_generate_buddy(struct super_block *sb,
661 void *buddy, void *bitmap, ext4_group_t group)
662{
663 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
664 unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
665 unsigned short i = 0;
666 unsigned short first;
667 unsigned short len;
668 unsigned free = 0;
669 unsigned fragments = 0;
670 unsigned long long period = get_cycles();
671
672 /* initialize buddy from bitmap which is aggregation
673 * of on-disk bitmap and preallocations */
ffad0a44 674 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
675 grp->bb_first_free = i;
676 while (i < max) {
677 fragments++;
678 first = i;
ffad0a44 679 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
680 len = i - first;
681 free += len;
682 if (len > 1)
683 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
684 else
685 grp->bb_counters[0]++;
686 if (i < max)
ffad0a44 687 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
688 }
689 grp->bb_fragments = fragments;
690
691 if (free != grp->bb_free) {
5d1b1b3f 692 ext4_grp_locked_error(sb, group, __func__,
a9df9a49 693 "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
c9de560d 694 group, free, grp->bb_free);
e56eb659
AK
695 /*
696 * If we intent to continue, we consider group descritor
697 * corrupt and update bb_free using bitmap value
698 */
c9de560d
AT
699 grp->bb_free = free;
700 }
701
702 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
703
704 period = get_cycles() - period;
705 spin_lock(&EXT4_SB(sb)->s_bal_lock);
706 EXT4_SB(sb)->s_mb_buddies_generated++;
707 EXT4_SB(sb)->s_mb_generation_time += period;
708 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
709}
710
711/* The buddy information is attached the buddy cache inode
712 * for convenience. The information regarding each group
713 * is loaded via ext4_mb_load_buddy. The information involve
714 * block bitmap and buddy information. The information are
715 * stored in the inode as
716 *
717 * { page }
c3a326a6 718 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
719 *
720 *
721 * one block each for bitmap and buddy information.
722 * So for each group we take up 2 blocks. A page can
723 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
724 * So it can have information regarding groups_per_page which
725 * is blocks_per_page/2
726 */
727
728static int ext4_mb_init_cache(struct page *page, char *incore)
729{
8df9675f 730 ext4_group_t ngroups;
c9de560d
AT
731 int blocksize;
732 int blocks_per_page;
733 int groups_per_page;
734 int err = 0;
735 int i;
736 ext4_group_t first_group;
737 int first_block;
738 struct super_block *sb;
739 struct buffer_head *bhs;
740 struct buffer_head **bh;
741 struct inode *inode;
742 char *data;
743 char *bitmap;
744
745 mb_debug("init page %lu\n", page->index);
746
747 inode = page->mapping->host;
748 sb = inode->i_sb;
8df9675f 749 ngroups = ext4_get_groups_count(sb);
c9de560d
AT
750 blocksize = 1 << inode->i_blkbits;
751 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
752
753 groups_per_page = blocks_per_page >> 1;
754 if (groups_per_page == 0)
755 groups_per_page = 1;
756
757 /* allocate buffer_heads to read bitmaps */
758 if (groups_per_page > 1) {
759 err = -ENOMEM;
760 i = sizeof(struct buffer_head *) * groups_per_page;
761 bh = kzalloc(i, GFP_NOFS);
762 if (bh == NULL)
763 goto out;
764 } else
765 bh = &bhs;
766
767 first_group = page->index * blocks_per_page / 2;
768
769 /* read all groups the page covers into the cache */
770 for (i = 0; i < groups_per_page; i++) {
771 struct ext4_group_desc *desc;
772
8df9675f 773 if (first_group + i >= ngroups)
c9de560d
AT
774 break;
775
776 err = -EIO;
777 desc = ext4_get_group_desc(sb, first_group + i, NULL);
778 if (desc == NULL)
779 goto out;
780
781 err = -ENOMEM;
782 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
783 if (bh[i] == NULL)
784 goto out;
785
2ccb5fb9 786 if (bitmap_uptodate(bh[i]))
c9de560d
AT
787 continue;
788
c806e68f 789 lock_buffer(bh[i]);
2ccb5fb9
AK
790 if (bitmap_uptodate(bh[i])) {
791 unlock_buffer(bh[i]);
792 continue;
793 }
955ce5f5 794 ext4_lock_group(sb, first_group + i);
c9de560d
AT
795 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
796 ext4_init_block_bitmap(sb, bh[i],
797 first_group + i, desc);
2ccb5fb9 798 set_bitmap_uptodate(bh[i]);
c9de560d 799 set_buffer_uptodate(bh[i]);
955ce5f5 800 ext4_unlock_group(sb, first_group + i);
3300beda 801 unlock_buffer(bh[i]);
c9de560d
AT
802 continue;
803 }
955ce5f5 804 ext4_unlock_group(sb, first_group + i);
2ccb5fb9
AK
805 if (buffer_uptodate(bh[i])) {
806 /*
807 * if not uninit if bh is uptodate,
808 * bitmap is also uptodate
809 */
810 set_bitmap_uptodate(bh[i]);
811 unlock_buffer(bh[i]);
812 continue;
813 }
c9de560d 814 get_bh(bh[i]);
2ccb5fb9
AK
815 /*
816 * submit the buffer_head for read. We can
817 * safely mark the bitmap as uptodate now.
818 * We do it here so the bitmap uptodate bit
819 * get set with buffer lock held.
820 */
821 set_bitmap_uptodate(bh[i]);
c9de560d
AT
822 bh[i]->b_end_io = end_buffer_read_sync;
823 submit_bh(READ, bh[i]);
a9df9a49 824 mb_debug("read bitmap for group %u\n", first_group + i);
c9de560d
AT
825 }
826
827 /* wait for I/O completion */
828 for (i = 0; i < groups_per_page && bh[i]; i++)
829 wait_on_buffer(bh[i]);
830
831 err = -EIO;
832 for (i = 0; i < groups_per_page && bh[i]; i++)
833 if (!buffer_uptodate(bh[i]))
834 goto out;
835
31b481dc 836 err = 0;
c9de560d 837 first_block = page->index * blocks_per_page;
29eaf024
AK
838 /* init the page */
839 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
c9de560d
AT
840 for (i = 0; i < blocks_per_page; i++) {
841 int group;
842 struct ext4_group_info *grinfo;
843
844 group = (first_block + i) >> 1;
8df9675f 845 if (group >= ngroups)
c9de560d
AT
846 break;
847
848 /*
849 * data carry information regarding this
850 * particular group in the format specified
851 * above
852 *
853 */
854 data = page_address(page) + (i * blocksize);
855 bitmap = bh[group - first_group]->b_data;
856
857 /*
858 * We place the buddy block and bitmap block
859 * close together
860 */
861 if ((first_block + i) & 1) {
862 /* this is block of buddy */
863 BUG_ON(incore == NULL);
864 mb_debug("put buddy for group %u in page %lu/%x\n",
865 group, page->index, i * blocksize);
c9de560d
AT
866 grinfo = ext4_get_group_info(sb, group);
867 grinfo->bb_fragments = 0;
868 memset(grinfo->bb_counters, 0,
869 sizeof(unsigned short)*(sb->s_blocksize_bits+2));
870 /*
871 * incore got set to the group block bitmap below
872 */
7a2fcbf7 873 ext4_lock_group(sb, group);
c9de560d 874 ext4_mb_generate_buddy(sb, data, incore, group);
7a2fcbf7 875 ext4_unlock_group(sb, group);
c9de560d
AT
876 incore = NULL;
877 } else {
878 /* this is block of bitmap */
879 BUG_ON(incore != NULL);
880 mb_debug("put bitmap for group %u in page %lu/%x\n",
881 group, page->index, i * blocksize);
882
883 /* see comments in ext4_mb_put_pa() */
884 ext4_lock_group(sb, group);
885 memcpy(data, bitmap, blocksize);
886
887 /* mark all preallocated blks used in in-core bitmap */
888 ext4_mb_generate_from_pa(sb, data, group);
7a2fcbf7 889 ext4_mb_generate_from_freelist(sb, data, group);
c9de560d
AT
890 ext4_unlock_group(sb, group);
891
892 /* set incore so that the buddy information can be
893 * generated using this
894 */
895 incore = data;
896 }
897 }
898 SetPageUptodate(page);
899
900out:
901 if (bh) {
902 for (i = 0; i < groups_per_page && bh[i]; i++)
903 brelse(bh[i]);
904 if (bh != &bhs)
905 kfree(bh);
906 }
907 return err;
908}
909
4ddfef7b
ES
910static noinline_for_stack int
911ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
912 struct ext4_buddy *e4b)
c9de560d 913{
c9de560d
AT
914 int blocks_per_page;
915 int block;
916 int pnum;
917 int poff;
918 struct page *page;
fdf6c7a7 919 int ret;
920313a7
AK
920 struct ext4_group_info *grp;
921 struct ext4_sb_info *sbi = EXT4_SB(sb);
922 struct inode *inode = sbi->s_buddy_cache;
c9de560d 923
a9df9a49 924 mb_debug("load group %u\n", group);
c9de560d
AT
925
926 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
920313a7 927 grp = ext4_get_group_info(sb, group);
c9de560d
AT
928
929 e4b->bd_blkbits = sb->s_blocksize_bits;
930 e4b->bd_info = ext4_get_group_info(sb, group);
931 e4b->bd_sb = sb;
932 e4b->bd_group = group;
933 e4b->bd_buddy_page = NULL;
934 e4b->bd_bitmap_page = NULL;
920313a7
AK
935 e4b->alloc_semp = &grp->alloc_sem;
936
937 /* Take the read lock on the group alloc
938 * sem. This would make sure a parallel
939 * ext4_mb_init_group happening on other
940 * groups mapped by the page is blocked
941 * till we are done with allocation
942 */
943 down_read(e4b->alloc_semp);
c9de560d
AT
944
945 /*
946 * the buddy cache inode stores the block bitmap
947 * and buddy information in consecutive blocks.
948 * So for each group we need two blocks.
949 */
950 block = group * 2;
951 pnum = block / blocks_per_page;
952 poff = block % blocks_per_page;
953
954 /* we could use find_or_create_page(), but it locks page
955 * what we'd like to avoid in fast path ... */
956 page = find_get_page(inode->i_mapping, pnum);
957 if (page == NULL || !PageUptodate(page)) {
958 if (page)
920313a7
AK
959 /*
960 * drop the page reference and try
961 * to get the page with lock. If we
962 * are not uptodate that implies
963 * somebody just created the page but
964 * is yet to initialize the same. So
965 * wait for it to initialize.
966 */
c9de560d
AT
967 page_cache_release(page);
968 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
969 if (page) {
970 BUG_ON(page->mapping != inode->i_mapping);
971 if (!PageUptodate(page)) {
fdf6c7a7
SF
972 ret = ext4_mb_init_cache(page, NULL);
973 if (ret) {
974 unlock_page(page);
975 goto err;
976 }
c9de560d
AT
977 mb_cmp_bitmaps(e4b, page_address(page) +
978 (poff * sb->s_blocksize));
979 }
980 unlock_page(page);
981 }
982 }
fdf6c7a7
SF
983 if (page == NULL || !PageUptodate(page)) {
984 ret = -EIO;
c9de560d 985 goto err;
fdf6c7a7 986 }
c9de560d
AT
987 e4b->bd_bitmap_page = page;
988 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
989 mark_page_accessed(page);
990
991 block++;
992 pnum = block / blocks_per_page;
993 poff = block % blocks_per_page;
994
995 page = find_get_page(inode->i_mapping, pnum);
996 if (page == NULL || !PageUptodate(page)) {
997 if (page)
998 page_cache_release(page);
999 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1000 if (page) {
1001 BUG_ON(page->mapping != inode->i_mapping);
fdf6c7a7
SF
1002 if (!PageUptodate(page)) {
1003 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1004 if (ret) {
1005 unlock_page(page);
1006 goto err;
1007 }
1008 }
c9de560d
AT
1009 unlock_page(page);
1010 }
1011 }
fdf6c7a7
SF
1012 if (page == NULL || !PageUptodate(page)) {
1013 ret = -EIO;
c9de560d 1014 goto err;
fdf6c7a7 1015 }
c9de560d
AT
1016 e4b->bd_buddy_page = page;
1017 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1018 mark_page_accessed(page);
1019
1020 BUG_ON(e4b->bd_bitmap_page == NULL);
1021 BUG_ON(e4b->bd_buddy_page == NULL);
1022
1023 return 0;
1024
1025err:
1026 if (e4b->bd_bitmap_page)
1027 page_cache_release(e4b->bd_bitmap_page);
1028 if (e4b->bd_buddy_page)
1029 page_cache_release(e4b->bd_buddy_page);
1030 e4b->bd_buddy = NULL;
1031 e4b->bd_bitmap = NULL;
920313a7
AK
1032
1033 /* Done with the buddy cache */
1034 up_read(e4b->alloc_semp);
fdf6c7a7 1035 return ret;
c9de560d
AT
1036}
1037
1038static void ext4_mb_release_desc(struct ext4_buddy *e4b)
1039{
1040 if (e4b->bd_bitmap_page)
1041 page_cache_release(e4b->bd_bitmap_page);
1042 if (e4b->bd_buddy_page)
1043 page_cache_release(e4b->bd_buddy_page);
920313a7 1044 /* Done with the buddy cache */
8556e8f3
AK
1045 if (e4b->alloc_semp)
1046 up_read(e4b->alloc_semp);
c9de560d
AT
1047}
1048
1049
1050static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1051{
1052 int order = 1;
1053 void *bb;
1054
1055 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1056 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1057
1058 bb = EXT4_MB_BUDDY(e4b);
1059 while (order <= e4b->bd_blkbits + 1) {
1060 block = block >> 1;
1061 if (!mb_test_bit(block, bb)) {
1062 /* this block is part of buddy of order 'order' */
1063 return order;
1064 }
1065 bb += 1 << (e4b->bd_blkbits - order);
1066 order++;
1067 }
1068 return 0;
1069}
1070
955ce5f5 1071static void mb_clear_bits(void *bm, int cur, int len)
c9de560d
AT
1072{
1073 __u32 *addr;
1074
1075 len = cur + len;
1076 while (cur < len) {
1077 if ((cur & 31) == 0 && (len - cur) >= 32) {
1078 /* fast path: clear whole word at once */
1079 addr = bm + (cur >> 3);
1080 *addr = 0;
1081 cur += 32;
1082 continue;
1083 }
955ce5f5 1084 mb_clear_bit(cur, bm);
c9de560d
AT
1085 cur++;
1086 }
1087}
1088
955ce5f5 1089static void mb_set_bits(void *bm, int cur, int len)
c9de560d
AT
1090{
1091 __u32 *addr;
1092
1093 len = cur + len;
1094 while (cur < len) {
1095 if ((cur & 31) == 0 && (len - cur) >= 32) {
1096 /* fast path: set whole word at once */
1097 addr = bm + (cur >> 3);
1098 *addr = 0xffffffff;
1099 cur += 32;
1100 continue;
1101 }
955ce5f5 1102 mb_set_bit(cur, bm);
c9de560d
AT
1103 cur++;
1104 }
1105}
1106
7e5a8cdd 1107static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
c9de560d
AT
1108 int first, int count)
1109{
1110 int block = 0;
1111 int max = 0;
1112 int order;
1113 void *buddy;
1114 void *buddy2;
1115 struct super_block *sb = e4b->bd_sb;
1116
1117 BUG_ON(first + count > (sb->s_blocksize << 3));
bc8e6740 1118 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
1119 mb_check_buddy(e4b);
1120 mb_free_blocks_double(inode, e4b, first, count);
1121
1122 e4b->bd_info->bb_free += count;
1123 if (first < e4b->bd_info->bb_first_free)
1124 e4b->bd_info->bb_first_free = first;
1125
1126 /* let's maintain fragments counter */
1127 if (first != 0)
1128 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1129 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1130 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1131 if (block && max)
1132 e4b->bd_info->bb_fragments--;
1133 else if (!block && !max)
1134 e4b->bd_info->bb_fragments++;
1135
1136 /* let's maintain buddy itself */
1137 while (count-- > 0) {
1138 block = first++;
1139 order = 0;
1140
1141 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1142 ext4_fsblk_t blocknr;
1143 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1144 blocknr += block;
1145 blocknr +=
1146 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5d1b1b3f
AK
1147 ext4_grp_locked_error(sb, e4b->bd_group,
1148 __func__, "double-free of inode"
a9df9a49 1149 " %lu's block %llu(bit %u in group %u)",
c9de560d
AT
1150 inode ? inode->i_ino : 0, blocknr, block,
1151 e4b->bd_group);
1152 }
1153 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1154 e4b->bd_info->bb_counters[order]++;
1155
1156 /* start of the buddy */
1157 buddy = mb_find_buddy(e4b, order, &max);
1158
1159 do {
1160 block &= ~1UL;
1161 if (mb_test_bit(block, buddy) ||
1162 mb_test_bit(block + 1, buddy))
1163 break;
1164
1165 /* both the buddies are free, try to coalesce them */
1166 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1167
1168 if (!buddy2)
1169 break;
1170
1171 if (order > 0) {
1172 /* for special purposes, we don't set
1173 * free bits in bitmap */
1174 mb_set_bit(block, buddy);
1175 mb_set_bit(block + 1, buddy);
1176 }
1177 e4b->bd_info->bb_counters[order]--;
1178 e4b->bd_info->bb_counters[order]--;
1179
1180 block = block >> 1;
1181 order++;
1182 e4b->bd_info->bb_counters[order]++;
1183
1184 mb_clear_bit(block, buddy2);
1185 buddy = buddy2;
1186 } while (1);
1187 }
1188 mb_check_buddy(e4b);
c9de560d
AT
1189}
1190
1191static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1192 int needed, struct ext4_free_extent *ex)
1193{
1194 int next = block;
1195 int max;
1196 int ord;
1197 void *buddy;
1198
bc8e6740 1199 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1200 BUG_ON(ex == NULL);
1201
1202 buddy = mb_find_buddy(e4b, order, &max);
1203 BUG_ON(buddy == NULL);
1204 BUG_ON(block >= max);
1205 if (mb_test_bit(block, buddy)) {
1206 ex->fe_len = 0;
1207 ex->fe_start = 0;
1208 ex->fe_group = 0;
1209 return 0;
1210 }
1211
1212 /* FIXME dorp order completely ? */
1213 if (likely(order == 0)) {
1214 /* find actual order */
1215 order = mb_find_order_for_block(e4b, block);
1216 block = block >> order;
1217 }
1218
1219 ex->fe_len = 1 << order;
1220 ex->fe_start = block << order;
1221 ex->fe_group = e4b->bd_group;
1222
1223 /* calc difference from given start */
1224 next = next - ex->fe_start;
1225 ex->fe_len -= next;
1226 ex->fe_start += next;
1227
1228 while (needed > ex->fe_len &&
1229 (buddy = mb_find_buddy(e4b, order, &max))) {
1230
1231 if (block + 1 >= max)
1232 break;
1233
1234 next = (block + 1) * (1 << order);
1235 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1236 break;
1237
1238 ord = mb_find_order_for_block(e4b, next);
1239
1240 order = ord;
1241 block = next >> order;
1242 ex->fe_len += 1 << order;
1243 }
1244
1245 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1246 return ex->fe_len;
1247}
1248
1249static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1250{
1251 int ord;
1252 int mlen = 0;
1253 int max = 0;
1254 int cur;
1255 int start = ex->fe_start;
1256 int len = ex->fe_len;
1257 unsigned ret = 0;
1258 int len0 = len;
1259 void *buddy;
1260
1261 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1262 BUG_ON(e4b->bd_group != ex->fe_group);
bc8e6740 1263 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1264 mb_check_buddy(e4b);
1265 mb_mark_used_double(e4b, start, len);
1266
1267 e4b->bd_info->bb_free -= len;
1268 if (e4b->bd_info->bb_first_free == start)
1269 e4b->bd_info->bb_first_free += len;
1270
1271 /* let's maintain fragments counter */
1272 if (start != 0)
1273 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1274 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1275 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1276 if (mlen && max)
1277 e4b->bd_info->bb_fragments++;
1278 else if (!mlen && !max)
1279 e4b->bd_info->bb_fragments--;
1280
1281 /* let's maintain buddy itself */
1282 while (len) {
1283 ord = mb_find_order_for_block(e4b, start);
1284
1285 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1286 /* the whole chunk may be allocated at once! */
1287 mlen = 1 << ord;
1288 buddy = mb_find_buddy(e4b, ord, &max);
1289 BUG_ON((start >> ord) >= max);
1290 mb_set_bit(start >> ord, buddy);
1291 e4b->bd_info->bb_counters[ord]--;
1292 start += mlen;
1293 len -= mlen;
1294 BUG_ON(len < 0);
1295 continue;
1296 }
1297
1298 /* store for history */
1299 if (ret == 0)
1300 ret = len | (ord << 16);
1301
1302 /* we have to split large buddy */
1303 BUG_ON(ord <= 0);
1304 buddy = mb_find_buddy(e4b, ord, &max);
1305 mb_set_bit(start >> ord, buddy);
1306 e4b->bd_info->bb_counters[ord]--;
1307
1308 ord--;
1309 cur = (start >> ord) & ~1U;
1310 buddy = mb_find_buddy(e4b, ord, &max);
1311 mb_clear_bit(cur, buddy);
1312 mb_clear_bit(cur + 1, buddy);
1313 e4b->bd_info->bb_counters[ord]++;
1314 e4b->bd_info->bb_counters[ord]++;
1315 }
1316
955ce5f5 1317 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
c9de560d
AT
1318 mb_check_buddy(e4b);
1319
1320 return ret;
1321}
1322
1323/*
1324 * Must be called under group lock!
1325 */
1326static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1327 struct ext4_buddy *e4b)
1328{
1329 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1330 int ret;
1331
1332 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1333 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1334
1335 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1336 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1337 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1338
1339 /* preallocation can change ac_b_ex, thus we store actually
1340 * allocated blocks for history */
1341 ac->ac_f_ex = ac->ac_b_ex;
1342
1343 ac->ac_status = AC_STATUS_FOUND;
1344 ac->ac_tail = ret & 0xffff;
1345 ac->ac_buddy = ret >> 16;
1346
c3a326a6
AK
1347 /*
1348 * take the page reference. We want the page to be pinned
1349 * so that we don't get a ext4_mb_init_cache_call for this
1350 * group until we update the bitmap. That would mean we
1351 * double allocate blocks. The reference is dropped
1352 * in ext4_mb_release_context
1353 */
c9de560d
AT
1354 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1355 get_page(ac->ac_bitmap_page);
1356 ac->ac_buddy_page = e4b->bd_buddy_page;
1357 get_page(ac->ac_buddy_page);
8556e8f3
AK
1358 /* on allocation we use ac to track the held semaphore */
1359 ac->alloc_semp = e4b->alloc_semp;
1360 e4b->alloc_semp = NULL;
c9de560d
AT
1361 /* store last allocated for subsequent stream allocation */
1362 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1363 spin_lock(&sbi->s_md_lock);
1364 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1365 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1366 spin_unlock(&sbi->s_md_lock);
1367 }
1368}
1369
1370/*
1371 * regular allocator, for general purposes allocation
1372 */
1373
1374static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1375 struct ext4_buddy *e4b,
1376 int finish_group)
1377{
1378 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1379 struct ext4_free_extent *bex = &ac->ac_b_ex;
1380 struct ext4_free_extent *gex = &ac->ac_g_ex;
1381 struct ext4_free_extent ex;
1382 int max;
1383
032115fc
AK
1384 if (ac->ac_status == AC_STATUS_FOUND)
1385 return;
c9de560d
AT
1386 /*
1387 * We don't want to scan for a whole year
1388 */
1389 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1390 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1391 ac->ac_status = AC_STATUS_BREAK;
1392 return;
1393 }
1394
1395 /*
1396 * Haven't found good chunk so far, let's continue
1397 */
1398 if (bex->fe_len < gex->fe_len)
1399 return;
1400
1401 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1402 && bex->fe_group == e4b->bd_group) {
1403 /* recheck chunk's availability - we don't know
1404 * when it was found (within this lock-unlock
1405 * period or not) */
1406 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1407 if (max >= gex->fe_len) {
1408 ext4_mb_use_best_found(ac, e4b);
1409 return;
1410 }
1411 }
1412}
1413
1414/*
1415 * The routine checks whether found extent is good enough. If it is,
1416 * then the extent gets marked used and flag is set to the context
1417 * to stop scanning. Otherwise, the extent is compared with the
1418 * previous found extent and if new one is better, then it's stored
1419 * in the context. Later, the best found extent will be used, if
1420 * mballoc can't find good enough extent.
1421 *
1422 * FIXME: real allocation policy is to be designed yet!
1423 */
1424static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1425 struct ext4_free_extent *ex,
1426 struct ext4_buddy *e4b)
1427{
1428 struct ext4_free_extent *bex = &ac->ac_b_ex;
1429 struct ext4_free_extent *gex = &ac->ac_g_ex;
1430
1431 BUG_ON(ex->fe_len <= 0);
8d03c7a0 1432 BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
c9de560d
AT
1433 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1434 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1435
1436 ac->ac_found++;
1437
1438 /*
1439 * The special case - take what you catch first
1440 */
1441 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1442 *bex = *ex;
1443 ext4_mb_use_best_found(ac, e4b);
1444 return;
1445 }
1446
1447 /*
1448 * Let's check whether the chuck is good enough
1449 */
1450 if (ex->fe_len == gex->fe_len) {
1451 *bex = *ex;
1452 ext4_mb_use_best_found(ac, e4b);
1453 return;
1454 }
1455
1456 /*
1457 * If this is first found extent, just store it in the context
1458 */
1459 if (bex->fe_len == 0) {
1460 *bex = *ex;
1461 return;
1462 }
1463
1464 /*
1465 * If new found extent is better, store it in the context
1466 */
1467 if (bex->fe_len < gex->fe_len) {
1468 /* if the request isn't satisfied, any found extent
1469 * larger than previous best one is better */
1470 if (ex->fe_len > bex->fe_len)
1471 *bex = *ex;
1472 } else if (ex->fe_len > gex->fe_len) {
1473 /* if the request is satisfied, then we try to find
1474 * an extent that still satisfy the request, but is
1475 * smaller than previous one */
1476 if (ex->fe_len < bex->fe_len)
1477 *bex = *ex;
1478 }
1479
1480 ext4_mb_check_limits(ac, e4b, 0);
1481}
1482
1483static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1484 struct ext4_buddy *e4b)
1485{
1486 struct ext4_free_extent ex = ac->ac_b_ex;
1487 ext4_group_t group = ex.fe_group;
1488 int max;
1489 int err;
1490
1491 BUG_ON(ex.fe_len <= 0);
1492 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1493 if (err)
1494 return err;
1495
1496 ext4_lock_group(ac->ac_sb, group);
1497 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1498
1499 if (max > 0) {
1500 ac->ac_b_ex = ex;
1501 ext4_mb_use_best_found(ac, e4b);
1502 }
1503
1504 ext4_unlock_group(ac->ac_sb, group);
1505 ext4_mb_release_desc(e4b);
1506
1507 return 0;
1508}
1509
1510static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1511 struct ext4_buddy *e4b)
1512{
1513 ext4_group_t group = ac->ac_g_ex.fe_group;
1514 int max;
1515 int err;
1516 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1517 struct ext4_super_block *es = sbi->s_es;
1518 struct ext4_free_extent ex;
1519
1520 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1521 return 0;
1522
1523 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1524 if (err)
1525 return err;
1526
1527 ext4_lock_group(ac->ac_sb, group);
1528 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1529 ac->ac_g_ex.fe_len, &ex);
1530
1531 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1532 ext4_fsblk_t start;
1533
1534 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1535 ex.fe_start + le32_to_cpu(es->s_first_data_block);
1536 /* use do_div to get remainder (would be 64-bit modulo) */
1537 if (do_div(start, sbi->s_stripe) == 0) {
1538 ac->ac_found++;
1539 ac->ac_b_ex = ex;
1540 ext4_mb_use_best_found(ac, e4b);
1541 }
1542 } else if (max >= ac->ac_g_ex.fe_len) {
1543 BUG_ON(ex.fe_len <= 0);
1544 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1545 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1546 ac->ac_found++;
1547 ac->ac_b_ex = ex;
1548 ext4_mb_use_best_found(ac, e4b);
1549 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1550 /* Sometimes, caller may want to merge even small
1551 * number of blocks to an existing extent */
1552 BUG_ON(ex.fe_len <= 0);
1553 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1554 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1555 ac->ac_found++;
1556 ac->ac_b_ex = ex;
1557 ext4_mb_use_best_found(ac, e4b);
1558 }
1559 ext4_unlock_group(ac->ac_sb, group);
1560 ext4_mb_release_desc(e4b);
1561
1562 return 0;
1563}
1564
1565/*
1566 * The routine scans buddy structures (not bitmap!) from given order
1567 * to max order and tries to find big enough chunk to satisfy the req
1568 */
1569static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1570 struct ext4_buddy *e4b)
1571{
1572 struct super_block *sb = ac->ac_sb;
1573 struct ext4_group_info *grp = e4b->bd_info;
1574 void *buddy;
1575 int i;
1576 int k;
1577 int max;
1578
1579 BUG_ON(ac->ac_2order <= 0);
1580 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1581 if (grp->bb_counters[i] == 0)
1582 continue;
1583
1584 buddy = mb_find_buddy(e4b, i, &max);
1585 BUG_ON(buddy == NULL);
1586
ffad0a44 1587 k = mb_find_next_zero_bit(buddy, max, 0);
c9de560d
AT
1588 BUG_ON(k >= max);
1589
1590 ac->ac_found++;
1591
1592 ac->ac_b_ex.fe_len = 1 << i;
1593 ac->ac_b_ex.fe_start = k << i;
1594 ac->ac_b_ex.fe_group = e4b->bd_group;
1595
1596 ext4_mb_use_best_found(ac, e4b);
1597
1598 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1599
1600 if (EXT4_SB(sb)->s_mb_stats)
1601 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1602
1603 break;
1604 }
1605}
1606
1607/*
1608 * The routine scans the group and measures all found extents.
1609 * In order to optimize scanning, caller must pass number of
1610 * free blocks in the group, so the routine can know upper limit.
1611 */
1612static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1613 struct ext4_buddy *e4b)
1614{
1615 struct super_block *sb = ac->ac_sb;
1616 void *bitmap = EXT4_MB_BITMAP(e4b);
1617 struct ext4_free_extent ex;
1618 int i;
1619 int free;
1620
1621 free = e4b->bd_info->bb_free;
1622 BUG_ON(free <= 0);
1623
1624 i = e4b->bd_info->bb_first_free;
1625
1626 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 1627 i = mb_find_next_zero_bit(bitmap,
c9de560d
AT
1628 EXT4_BLOCKS_PER_GROUP(sb), i);
1629 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
26346ff6 1630 /*
e56eb659 1631 * IF we have corrupt bitmap, we won't find any
26346ff6
AK
1632 * free blocks even though group info says we
1633 * we have free blocks
1634 */
5d1b1b3f
AK
1635 ext4_grp_locked_error(sb, e4b->bd_group,
1636 __func__, "%d free blocks as per "
fde4d95a 1637 "group info. But bitmap says 0",
26346ff6 1638 free);
c9de560d
AT
1639 break;
1640 }
1641
1642 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1643 BUG_ON(ex.fe_len <= 0);
26346ff6 1644 if (free < ex.fe_len) {
5d1b1b3f
AK
1645 ext4_grp_locked_error(sb, e4b->bd_group,
1646 __func__, "%d free blocks as per "
fde4d95a 1647 "group info. But got %d blocks",
26346ff6 1648 free, ex.fe_len);
e56eb659
AK
1649 /*
1650 * The number of free blocks differs. This mostly
1651 * indicate that the bitmap is corrupt. So exit
1652 * without claiming the space.
1653 */
1654 break;
26346ff6 1655 }
c9de560d
AT
1656
1657 ext4_mb_measure_extent(ac, &ex, e4b);
1658
1659 i += ex.fe_len;
1660 free -= ex.fe_len;
1661 }
1662
1663 ext4_mb_check_limits(ac, e4b, 1);
1664}
1665
1666/*
1667 * This is a special case for storages like raid5
1668 * we try to find stripe-aligned chunks for stripe-size requests
1669 * XXX should do so at least for multiples of stripe size as well
1670 */
1671static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1672 struct ext4_buddy *e4b)
1673{
1674 struct super_block *sb = ac->ac_sb;
1675 struct ext4_sb_info *sbi = EXT4_SB(sb);
1676 void *bitmap = EXT4_MB_BITMAP(e4b);
1677 struct ext4_free_extent ex;
1678 ext4_fsblk_t first_group_block;
1679 ext4_fsblk_t a;
1680 ext4_grpblk_t i;
1681 int max;
1682
1683 BUG_ON(sbi->s_stripe == 0);
1684
1685 /* find first stripe-aligned block in group */
1686 first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1687 + le32_to_cpu(sbi->s_es->s_first_data_block);
1688 a = first_group_block + sbi->s_stripe - 1;
1689 do_div(a, sbi->s_stripe);
1690 i = (a * sbi->s_stripe) - first_group_block;
1691
1692 while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1693 if (!mb_test_bit(i, bitmap)) {
1694 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1695 if (max >= sbi->s_stripe) {
1696 ac->ac_found++;
1697 ac->ac_b_ex = ex;
1698 ext4_mb_use_best_found(ac, e4b);
1699 break;
1700 }
1701 }
1702 i += sbi->s_stripe;
1703 }
1704}
1705
1706static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1707 ext4_group_t group, int cr)
1708{
1709 unsigned free, fragments;
1710 unsigned i, bits;
a4912123 1711 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
c9de560d
AT
1712 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1713
1714 BUG_ON(cr < 0 || cr >= 4);
1715 BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1716
1717 free = grp->bb_free;
1718 fragments = grp->bb_fragments;
1719 if (free == 0)
1720 return 0;
1721 if (fragments == 0)
1722 return 0;
1723
1724 switch (cr) {
1725 case 0:
1726 BUG_ON(ac->ac_2order == 0);
c9de560d 1727
a4912123
TT
1728 /* Avoid using the first bg of a flexgroup for data files */
1729 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1730 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1731 ((group % flex_size) == 0))
1732 return 0;
1733
c9de560d
AT
1734 bits = ac->ac_sb->s_blocksize_bits + 1;
1735 for (i = ac->ac_2order; i <= bits; i++)
1736 if (grp->bb_counters[i] > 0)
1737 return 1;
1738 break;
1739 case 1:
1740 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1741 return 1;
1742 break;
1743 case 2:
1744 if (free >= ac->ac_g_ex.fe_len)
1745 return 1;
1746 break;
1747 case 3:
1748 return 1;
1749 default:
1750 BUG();
1751 }
1752
1753 return 0;
1754}
1755
920313a7
AK
1756/*
1757 * lock the group_info alloc_sem of all the groups
1758 * belonging to the same buddy cache page. This
1759 * make sure other parallel operation on the buddy
1760 * cache doesn't happen whild holding the buddy cache
1761 * lock
1762 */
1763int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
1764{
1765 int i;
1766 int block, pnum;
1767 int blocks_per_page;
1768 int groups_per_page;
8df9675f 1769 ext4_group_t ngroups = ext4_get_groups_count(sb);
920313a7
AK
1770 ext4_group_t first_group;
1771 struct ext4_group_info *grp;
1772
1773 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1774 /*
1775 * the buddy cache inode stores the block bitmap
1776 * and buddy information in consecutive blocks.
1777 * So for each group we need two blocks.
1778 */
1779 block = group * 2;
1780 pnum = block / blocks_per_page;
1781 first_group = pnum * blocks_per_page / 2;
1782
1783 groups_per_page = blocks_per_page >> 1;
1784 if (groups_per_page == 0)
1785 groups_per_page = 1;
1786 /* read all groups the page covers into the cache */
1787 for (i = 0; i < groups_per_page; i++) {
1788
8df9675f 1789 if ((first_group + i) >= ngroups)
920313a7
AK
1790 break;
1791 grp = ext4_get_group_info(sb, first_group + i);
1792 /* take all groups write allocation
1793 * semaphore. This make sure there is
1794 * no block allocation going on in any
1795 * of that groups
1796 */
b7be019e 1797 down_write_nested(&grp->alloc_sem, i);
920313a7
AK
1798 }
1799 return i;
1800}
1801
1802void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1803 ext4_group_t group, int locked_group)
1804{
1805 int i;
1806 int block, pnum;
1807 int blocks_per_page;
1808 ext4_group_t first_group;
1809 struct ext4_group_info *grp;
1810
1811 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1812 /*
1813 * the buddy cache inode stores the block bitmap
1814 * and buddy information in consecutive blocks.
1815 * So for each group we need two blocks.
1816 */
1817 block = group * 2;
1818 pnum = block / blocks_per_page;
1819 first_group = pnum * blocks_per_page / 2;
1820 /* release locks on all the groups */
1821 for (i = 0; i < locked_group; i++) {
1822
1823 grp = ext4_get_group_info(sb, first_group + i);
1824 /* take all groups write allocation
1825 * semaphore. This make sure there is
1826 * no block allocation going on in any
1827 * of that groups
1828 */
1829 up_write(&grp->alloc_sem);
1830 }
1831
1832}
1833
1834static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1835{
1836
1837 int ret;
1838 void *bitmap;
1839 int blocks_per_page;
1840 int block, pnum, poff;
1841 int num_grp_locked = 0;
1842 struct ext4_group_info *this_grp;
1843 struct ext4_sb_info *sbi = EXT4_SB(sb);
1844 struct inode *inode = sbi->s_buddy_cache;
1845 struct page *page = NULL, *bitmap_page = NULL;
1846
1847 mb_debug("init group %lu\n", group);
1848 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1849 this_grp = ext4_get_group_info(sb, group);
1850 /*
1851 * This ensures we don't add group
1852 * to this buddy cache via resize
1853 */
1854 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
1855 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
1856 /*
1857 * somebody initialized the group
1858 * return without doing anything
1859 */
1860 ret = 0;
1861 goto err;
1862 }
1863 /*
1864 * the buddy cache inode stores the block bitmap
1865 * and buddy information in consecutive blocks.
1866 * So for each group we need two blocks.
1867 */
1868 block = group * 2;
1869 pnum = block / blocks_per_page;
1870 poff = block % blocks_per_page;
1871 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1872 if (page) {
1873 BUG_ON(page->mapping != inode->i_mapping);
1874 ret = ext4_mb_init_cache(page, NULL);
1875 if (ret) {
1876 unlock_page(page);
1877 goto err;
1878 }
1879 unlock_page(page);
1880 }
1881 if (page == NULL || !PageUptodate(page)) {
1882 ret = -EIO;
1883 goto err;
1884 }
1885 mark_page_accessed(page);
1886 bitmap_page = page;
1887 bitmap = page_address(page) + (poff * sb->s_blocksize);
1888
1889 /* init buddy cache */
1890 block++;
1891 pnum = block / blocks_per_page;
1892 poff = block % blocks_per_page;
1893 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1894 if (page == bitmap_page) {
1895 /*
1896 * If both the bitmap and buddy are in
1897 * the same page we don't need to force
1898 * init the buddy
1899 */
1900 unlock_page(page);
1901 } else if (page) {
1902 BUG_ON(page->mapping != inode->i_mapping);
1903 ret = ext4_mb_init_cache(page, bitmap);
1904 if (ret) {
1905 unlock_page(page);
1906 goto err;
1907 }
1908 unlock_page(page);
1909 }
1910 if (page == NULL || !PageUptodate(page)) {
1911 ret = -EIO;
1912 goto err;
1913 }
1914 mark_page_accessed(page);
1915err:
1916 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
1917 if (bitmap_page)
1918 page_cache_release(bitmap_page);
1919 if (page)
1920 page_cache_release(page);
1921 return ret;
1922}
1923
4ddfef7b
ES
1924static noinline_for_stack int
1925ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
c9de560d 1926{
8df9675f 1927 ext4_group_t ngroups, group, i;
c9de560d
AT
1928 int cr;
1929 int err = 0;
1930 int bsbits;
1931 struct ext4_sb_info *sbi;
1932 struct super_block *sb;
1933 struct ext4_buddy e4b;
1934 loff_t size, isize;
1935
1936 sb = ac->ac_sb;
1937 sbi = EXT4_SB(sb);
8df9675f 1938 ngroups = ext4_get_groups_count(sb);
c9de560d
AT
1939 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1940
1941 /* first, try the goal */
1942 err = ext4_mb_find_by_goal(ac, &e4b);
1943 if (err || ac->ac_status == AC_STATUS_FOUND)
1944 goto out;
1945
1946 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1947 goto out;
1948
1949 /*
1950 * ac->ac2_order is set only if the fe_len is a power of 2
1951 * if ac2_order is set we also set criteria to 0 so that we
1952 * try exact allocation using buddy.
1953 */
1954 i = fls(ac->ac_g_ex.fe_len);
1955 ac->ac_2order = 0;
1956 /*
1957 * We search using buddy data only if the order of the request
1958 * is greater than equal to the sbi_s_mb_order2_reqs
b713a5ec 1959 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
c9de560d
AT
1960 */
1961 if (i >= sbi->s_mb_order2_reqs) {
1962 /*
1963 * This should tell if fe_len is exactly power of 2
1964 */
1965 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1966 ac->ac_2order = i - 1;
1967 }
1968
1969 bsbits = ac->ac_sb->s_blocksize_bits;
1970 /* if stream allocation is enabled, use global goal */
1971 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1972 isize = i_size_read(ac->ac_inode) >> bsbits;
1973 if (size < isize)
1974 size = isize;
1975
1976 if (size < sbi->s_mb_stream_request &&
1977 (ac->ac_flags & EXT4_MB_HINT_DATA)) {
1978 /* TBD: may be hot point */
1979 spin_lock(&sbi->s_md_lock);
1980 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1981 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1982 spin_unlock(&sbi->s_md_lock);
1983 }
c9de560d
AT
1984 /* Let's just scan groups to find more-less suitable blocks */
1985 cr = ac->ac_2order ? 0 : 1;
1986 /*
1987 * cr == 0 try to get exact allocation,
1988 * cr == 3 try to get anything
1989 */
1990repeat:
1991 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1992 ac->ac_criteria = cr;
ed8f9c75
AK
1993 /*
1994 * searching for the right group start
1995 * from the goal value specified
1996 */
1997 group = ac->ac_g_ex.fe_group;
1998
8df9675f 1999 for (i = 0; i < ngroups; group++, i++) {
c9de560d
AT
2000 struct ext4_group_info *grp;
2001 struct ext4_group_desc *desc;
2002
8df9675f 2003 if (group == ngroups)
c9de560d
AT
2004 group = 0;
2005
2006 /* quick check to skip empty groups */
920313a7 2007 grp = ext4_get_group_info(sb, group);
c9de560d
AT
2008 if (grp->bb_free == 0)
2009 continue;
2010
2011 /*
2012 * if the group is already init we check whether it is
2013 * a good group and if not we don't load the buddy
2014 */
2015 if (EXT4_MB_GRP_NEED_INIT(grp)) {
2016 /*
2017 * we need full data about the group
2018 * to make a good selection
2019 */
920313a7 2020 err = ext4_mb_init_group(sb, group);
c9de560d
AT
2021 if (err)
2022 goto out;
c9de560d
AT
2023 }
2024
2025 /*
2026 * If the particular group doesn't satisfy our
2027 * criteria we continue with the next group
2028 */
2029 if (!ext4_mb_good_group(ac, group, cr))
2030 continue;
2031
2032 err = ext4_mb_load_buddy(sb, group, &e4b);
2033 if (err)
2034 goto out;
2035
2036 ext4_lock_group(sb, group);
2037 if (!ext4_mb_good_group(ac, group, cr)) {
2038 /* someone did allocation from this group */
2039 ext4_unlock_group(sb, group);
2040 ext4_mb_release_desc(&e4b);
2041 continue;
2042 }
2043
2044 ac->ac_groups_scanned++;
2045 desc = ext4_get_group_desc(sb, group, NULL);
75507efb 2046 if (cr == 0)
c9de560d
AT
2047 ext4_mb_simple_scan_group(ac, &e4b);
2048 else if (cr == 1 &&
2049 ac->ac_g_ex.fe_len == sbi->s_stripe)
2050 ext4_mb_scan_aligned(ac, &e4b);
2051 else
2052 ext4_mb_complex_scan_group(ac, &e4b);
2053
2054 ext4_unlock_group(sb, group);
2055 ext4_mb_release_desc(&e4b);
2056
2057 if (ac->ac_status != AC_STATUS_CONTINUE)
2058 break;
2059 }
2060 }
2061
2062 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2063 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2064 /*
2065 * We've been searching too long. Let's try to allocate
2066 * the best chunk we've found so far
2067 */
2068
2069 ext4_mb_try_best_found(ac, &e4b);
2070 if (ac->ac_status != AC_STATUS_FOUND) {
2071 /*
2072 * Someone more lucky has already allocated it.
2073 * The only thing we can do is just take first
2074 * found block(s)
2075 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2076 */
2077 ac->ac_b_ex.fe_group = 0;
2078 ac->ac_b_ex.fe_start = 0;
2079 ac->ac_b_ex.fe_len = 0;
2080 ac->ac_status = AC_STATUS_CONTINUE;
2081 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2082 cr = 3;
2083 atomic_inc(&sbi->s_mb_lost_chunks);
2084 goto repeat;
2085 }
2086 }
2087out:
2088 return err;
2089}
2090
2091#ifdef EXT4_MB_HISTORY
2092struct ext4_mb_proc_session {
2093 struct ext4_mb_history *history;
2094 struct super_block *sb;
2095 int start;
2096 int max;
2097};
2098
2099static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
2100 struct ext4_mb_history *hs,
2101 int first)
2102{
2103 if (hs == s->history + s->max)
2104 hs = s->history;
2105 if (!first && hs == s->history + s->start)
2106 return NULL;
2107 while (hs->orig.fe_len == 0) {
2108 hs++;
2109 if (hs == s->history + s->max)
2110 hs = s->history;
2111 if (hs == s->history + s->start)
2112 return NULL;
2113 }
2114 return hs;
2115}
2116
2117static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2118{
2119 struct ext4_mb_proc_session *s = seq->private;
2120 struct ext4_mb_history *hs;
2121 int l = *pos;
2122
2123 if (l == 0)
2124 return SEQ_START_TOKEN;
2125 hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2126 if (!hs)
2127 return NULL;
2128 while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2129 return hs;
2130}
2131
2132static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
2133 loff_t *pos)
2134{
2135 struct ext4_mb_proc_session *s = seq->private;
2136 struct ext4_mb_history *hs = v;
2137
2138 ++*pos;
2139 if (v == SEQ_START_TOKEN)
2140 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2141 else
2142 return ext4_mb_history_skip_empty(s, ++hs, 0);
2143}
2144
2145static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
2146{
2147 char buf[25], buf2[25], buf3[25], *fmt;
2148 struct ext4_mb_history *hs = v;
2149
2150 if (v == SEQ_START_TOKEN) {
2151 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2152 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
2153 "pid", "inode", "original", "goal", "result", "found",
2154 "grps", "cr", "flags", "merge", "tail", "broken");
2155 return 0;
2156 }
2157
2158 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
2159 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2160 "%-5u %-5s %-5u %-6u\n";
a9df9a49 2161 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
c9de560d
AT
2162 hs->result.fe_start, hs->result.fe_len,
2163 hs->result.fe_logical);
a9df9a49 2164 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
c9de560d
AT
2165 hs->orig.fe_start, hs->orig.fe_len,
2166 hs->orig.fe_logical);
a9df9a49 2167 sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
c9de560d
AT
2168 hs->goal.fe_start, hs->goal.fe_len,
2169 hs->goal.fe_logical);
2170 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2171 hs->found, hs->groups, hs->cr, hs->flags,
2172 hs->merged ? "M" : "", hs->tail,
2173 hs->buddy ? 1 << hs->buddy : 0);
2174 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
2175 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
a9df9a49 2176 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
c9de560d
AT
2177 hs->result.fe_start, hs->result.fe_len,
2178 hs->result.fe_logical);
a9df9a49 2179 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
c9de560d
AT
2180 hs->orig.fe_start, hs->orig.fe_len,
2181 hs->orig.fe_logical);
2182 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2183 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
a9df9a49 2184 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
c9de560d
AT
2185 hs->result.fe_start, hs->result.fe_len);
2186 seq_printf(seq, "%-5u %-8u %-23s discard\n",
2187 hs->pid, hs->ino, buf2);
2188 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
a9df9a49 2189 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
c9de560d
AT
2190 hs->result.fe_start, hs->result.fe_len);
2191 seq_printf(seq, "%-5u %-8u %-23s free\n",
2192 hs->pid, hs->ino, buf2);
2193 }
2194 return 0;
2195}
2196
2197static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
2198{
2199}
2200
2201static struct seq_operations ext4_mb_seq_history_ops = {
2202 .start = ext4_mb_seq_history_start,
2203 .next = ext4_mb_seq_history_next,
2204 .stop = ext4_mb_seq_history_stop,
2205 .show = ext4_mb_seq_history_show,
2206};
2207
2208static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
2209{
2210 struct super_block *sb = PDE(inode)->data;
2211 struct ext4_sb_info *sbi = EXT4_SB(sb);
2212 struct ext4_mb_proc_session *s;
2213 int rc;
2214 int size;
2215
74767c5a
SF
2216 if (unlikely(sbi->s_mb_history == NULL))
2217 return -ENOMEM;
c9de560d
AT
2218 s = kmalloc(sizeof(*s), GFP_KERNEL);
2219 if (s == NULL)
2220 return -ENOMEM;
2221 s->sb = sb;
2222 size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
2223 s->history = kmalloc(size, GFP_KERNEL);
2224 if (s->history == NULL) {
2225 kfree(s);
2226 return -ENOMEM;
2227 }
2228
2229 spin_lock(&sbi->s_mb_history_lock);
2230 memcpy(s->history, sbi->s_mb_history, size);
2231 s->max = sbi->s_mb_history_max;
2232 s->start = sbi->s_mb_history_cur % s->max;
2233 spin_unlock(&sbi->s_mb_history_lock);
2234
2235 rc = seq_open(file, &ext4_mb_seq_history_ops);
2236 if (rc == 0) {
2237 struct seq_file *m = (struct seq_file *)file->private_data;
2238 m->private = s;
2239 } else {
2240 kfree(s->history);
2241 kfree(s);
2242 }
2243 return rc;
2244
2245}
2246
2247static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2248{
2249 struct seq_file *seq = (struct seq_file *)file->private_data;
2250 struct ext4_mb_proc_session *s = seq->private;
2251 kfree(s->history);
2252 kfree(s);
2253 return seq_release(inode, file);
2254}
2255
2256static ssize_t ext4_mb_seq_history_write(struct file *file,
2257 const char __user *buffer,
2258 size_t count, loff_t *ppos)
2259{
2260 struct seq_file *seq = (struct seq_file *)file->private_data;
2261 struct ext4_mb_proc_session *s = seq->private;
2262 struct super_block *sb = s->sb;
2263 char str[32];
2264 int value;
2265
2266 if (count >= sizeof(str)) {
2267 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2268 "mb_history", (int)sizeof(str));
2269 return -EOVERFLOW;
2270 }
2271
2272 if (copy_from_user(str, buffer, count))
2273 return -EFAULT;
2274
2275 value = simple_strtol(str, NULL, 0);
2276 if (value < 0)
2277 return -ERANGE;
2278 EXT4_SB(sb)->s_mb_history_filter = value;
2279
2280 return count;
2281}
2282
2283static struct file_operations ext4_mb_seq_history_fops = {
2284 .owner = THIS_MODULE,
2285 .open = ext4_mb_seq_history_open,
2286 .read = seq_read,
2287 .write = ext4_mb_seq_history_write,
2288 .llseek = seq_lseek,
2289 .release = ext4_mb_seq_history_release,
2290};
2291
2292static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2293{
2294 struct super_block *sb = seq->private;
c9de560d
AT
2295 ext4_group_t group;
2296
8df9675f 2297 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d 2298 return NULL;
c9de560d 2299 group = *pos + 1;
a9df9a49 2300 return (void *) ((unsigned long) group);
c9de560d
AT
2301}
2302
2303static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2304{
2305 struct super_block *sb = seq->private;
c9de560d
AT
2306 ext4_group_t group;
2307
2308 ++*pos;
8df9675f 2309 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d
AT
2310 return NULL;
2311 group = *pos + 1;
a9df9a49 2312 return (void *) ((unsigned long) group);
c9de560d
AT
2313}
2314
2315static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2316{
2317 struct super_block *sb = seq->private;
a9df9a49 2318 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
c9de560d
AT
2319 int i;
2320 int err;
2321 struct ext4_buddy e4b;
2322 struct sg {
2323 struct ext4_group_info info;
2324 unsigned short counters[16];
2325 } sg;
2326
2327 group--;
2328 if (group == 0)
2329 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2330 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2331 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2332 "group", "free", "frags", "first",
2333 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2334 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2335
2336 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2337 sizeof(struct ext4_group_info);
2338 err = ext4_mb_load_buddy(sb, group, &e4b);
2339 if (err) {
a9df9a49 2340 seq_printf(seq, "#%-5u: I/O error\n", group);
c9de560d
AT
2341 return 0;
2342 }
2343 ext4_lock_group(sb, group);
2344 memcpy(&sg, ext4_get_group_info(sb, group), i);
2345 ext4_unlock_group(sb, group);
2346 ext4_mb_release_desc(&e4b);
2347
a9df9a49 2348 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
c9de560d
AT
2349 sg.info.bb_fragments, sg.info.bb_first_free);
2350 for (i = 0; i <= 13; i++)
2351 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2352 sg.info.bb_counters[i] : 0);
2353 seq_printf(seq, " ]\n");
2354
2355 return 0;
2356}
2357
2358static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2359{
2360}
2361
2362static struct seq_operations ext4_mb_seq_groups_ops = {
2363 .start = ext4_mb_seq_groups_start,
2364 .next = ext4_mb_seq_groups_next,
2365 .stop = ext4_mb_seq_groups_stop,
2366 .show = ext4_mb_seq_groups_show,
2367};
2368
2369static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2370{
2371 struct super_block *sb = PDE(inode)->data;
2372 int rc;
2373
2374 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2375 if (rc == 0) {
2376 struct seq_file *m = (struct seq_file *)file->private_data;
2377 m->private = sb;
2378 }
2379 return rc;
2380
2381}
2382
2383static struct file_operations ext4_mb_seq_groups_fops = {
2384 .owner = THIS_MODULE,
2385 .open = ext4_mb_seq_groups_open,
2386 .read = seq_read,
2387 .llseek = seq_lseek,
2388 .release = seq_release,
2389};
2390
2391static void ext4_mb_history_release(struct super_block *sb)
2392{
2393 struct ext4_sb_info *sbi = EXT4_SB(sb);
2394
9f6200bb
TT
2395 if (sbi->s_proc != NULL) {
2396 remove_proc_entry("mb_groups", sbi->s_proc);
f4033903
CW
2397 if (sbi->s_mb_history_max)
2398 remove_proc_entry("mb_history", sbi->s_proc);
9f6200bb 2399 }
c9de560d
AT
2400 kfree(sbi->s_mb_history);
2401}
2402
2403static void ext4_mb_history_init(struct super_block *sb)
2404{
2405 struct ext4_sb_info *sbi = EXT4_SB(sb);
2406 int i;
2407
9f6200bb 2408 if (sbi->s_proc != NULL) {
f4033903
CW
2409 if (sbi->s_mb_history_max)
2410 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2411 &ext4_mb_seq_history_fops, sb);
9f6200bb 2412 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
46fe74f2 2413 &ext4_mb_seq_groups_fops, sb);
c9de560d
AT
2414 }
2415
c9de560d
AT
2416 sbi->s_mb_history_cur = 0;
2417 spin_lock_init(&sbi->s_mb_history_lock);
2418 i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
f4033903 2419 sbi->s_mb_history = i ? kzalloc(i, GFP_KERNEL) : NULL;
c9de560d
AT
2420 /* if we can't allocate history, then we simple won't use it */
2421}
2422
4ddfef7b
ES
2423static noinline_for_stack void
2424ext4_mb_store_history(struct ext4_allocation_context *ac)
c9de560d
AT
2425{
2426 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2427 struct ext4_mb_history h;
2428
f4033903 2429 if (sbi->s_mb_history == NULL)
c9de560d
AT
2430 return;
2431
2432 if (!(ac->ac_op & sbi->s_mb_history_filter))
2433 return;
2434
2435 h.op = ac->ac_op;
2436 h.pid = current->pid;
2437 h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2438 h.orig = ac->ac_o_ex;
2439 h.result = ac->ac_b_ex;
2440 h.flags = ac->ac_flags;
2441 h.found = ac->ac_found;
2442 h.groups = ac->ac_groups_scanned;
2443 h.cr = ac->ac_criteria;
2444 h.tail = ac->ac_tail;
2445 h.buddy = ac->ac_buddy;
2446 h.merged = 0;
2447 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2448 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2449 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2450 h.merged = 1;
2451 h.goal = ac->ac_g_ex;
2452 h.result = ac->ac_f_ex;
2453 }
2454
2455 spin_lock(&sbi->s_mb_history_lock);
2456 memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2457 if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2458 sbi->s_mb_history_cur = 0;
2459 spin_unlock(&sbi->s_mb_history_lock);
2460}
2461
2462#else
2463#define ext4_mb_history_release(sb)
2464#define ext4_mb_history_init(sb)
2465#endif
2466
5f21b0e6
FB
2467
2468/* Create and initialize ext4_group_info data for the given group. */
920313a7 2469int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5f21b0e6
FB
2470 struct ext4_group_desc *desc)
2471{
2472 int i, len;
2473 int metalen = 0;
2474 struct ext4_sb_info *sbi = EXT4_SB(sb);
2475 struct ext4_group_info **meta_group_info;
2476
2477 /*
2478 * First check if this group is the first of a reserved block.
2479 * If it's true, we have to allocate a new table of pointers
2480 * to ext4_group_info structures
2481 */
2482 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2483 metalen = sizeof(*meta_group_info) <<
2484 EXT4_DESC_PER_BLOCK_BITS(sb);
2485 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2486 if (meta_group_info == NULL) {
2487 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2488 "buddy group\n");
2489 goto exit_meta_group_info;
2490 }
2491 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2492 meta_group_info;
2493 }
2494
2495 /*
2496 * calculate needed size. if change bb_counters size,
2497 * don't forget about ext4_mb_generate_buddy()
2498 */
2499 len = offsetof(typeof(**meta_group_info),
2500 bb_counters[sb->s_blocksize_bits + 2]);
2501
2502 meta_group_info =
2503 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2504 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2505
2506 meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2507 if (meta_group_info[i] == NULL) {
2508 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2509 goto exit_group_info;
2510 }
2511 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2512 &(meta_group_info[i]->bb_state));
2513
2514 /*
2515 * initialize bb_free to be able to skip
2516 * empty groups without initialization
2517 */
2518 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2519 meta_group_info[i]->bb_free =
2520 ext4_free_blocks_after_init(sb, group, desc);
2521 } else {
2522 meta_group_info[i]->bb_free =
560671a0 2523 ext4_free_blks_count(sb, desc);
5f21b0e6
FB
2524 }
2525
2526 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
920313a7 2527 init_rwsem(&meta_group_info[i]->alloc_sem);
c894058d 2528 meta_group_info[i]->bb_free_root.rb_node = NULL;;
5f21b0e6
FB
2529
2530#ifdef DOUBLE_CHECK
2531 {
2532 struct buffer_head *bh;
2533 meta_group_info[i]->bb_bitmap =
2534 kmalloc(sb->s_blocksize, GFP_KERNEL);
2535 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2536 bh = ext4_read_block_bitmap(sb, group);
2537 BUG_ON(bh == NULL);
2538 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2539 sb->s_blocksize);
2540 put_bh(bh);
2541 }
2542#endif
2543
2544 return 0;
2545
2546exit_group_info:
2547 /* If a meta_group_info table has been allocated, release it now */
2548 if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2549 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2550exit_meta_group_info:
2551 return -ENOMEM;
2552} /* ext4_mb_add_groupinfo */
2553
5f21b0e6
FB
2554/*
2555 * Update an existing group.
2556 * This function is used for online resize
2557 */
2558void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2559{
2560 grp->bb_free += add;
2561}
2562
c9de560d
AT
2563static int ext4_mb_init_backend(struct super_block *sb)
2564{
8df9675f 2565 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d 2566 ext4_group_t i;
5f21b0e6 2567 int metalen;
c9de560d 2568 struct ext4_sb_info *sbi = EXT4_SB(sb);
5f21b0e6
FB
2569 struct ext4_super_block *es = sbi->s_es;
2570 int num_meta_group_infos;
2571 int num_meta_group_infos_max;
2572 int array_size;
c9de560d 2573 struct ext4_group_info **meta_group_info;
5f21b0e6
FB
2574 struct ext4_group_desc *desc;
2575
2576 /* This is the number of blocks used by GDT */
8df9675f 2577 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
5f21b0e6
FB
2578 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2579
2580 /*
2581 * This is the total number of blocks used by GDT including
2582 * the number of reserved blocks for GDT.
2583 * The s_group_info array is allocated with this value
2584 * to allow a clean online resize without a complex
2585 * manipulation of pointer.
2586 * The drawback is the unused memory when no resize
2587 * occurs but it's very low in terms of pages
2588 * (see comments below)
2589 * Need to handle this properly when META_BG resizing is allowed
2590 */
2591 num_meta_group_infos_max = num_meta_group_infos +
2592 le16_to_cpu(es->s_reserved_gdt_blocks);
c9de560d 2593
5f21b0e6
FB
2594 /*
2595 * array_size is the size of s_group_info array. We round it
2596 * to the next power of two because this approximation is done
2597 * internally by kmalloc so we can have some more memory
2598 * for free here (e.g. may be used for META_BG resize).
2599 */
2600 array_size = 1;
2601 while (array_size < sizeof(*sbi->s_group_info) *
2602 num_meta_group_infos_max)
2603 array_size = array_size << 1;
c9de560d
AT
2604 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2605 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2606 * So a two level scheme suffices for now. */
5f21b0e6 2607 sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
c9de560d
AT
2608 if (sbi->s_group_info == NULL) {
2609 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2610 return -ENOMEM;
2611 }
2612 sbi->s_buddy_cache = new_inode(sb);
2613 if (sbi->s_buddy_cache == NULL) {
2614 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2615 goto err_freesgi;
2616 }
2617 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2618
2619 metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2620 for (i = 0; i < num_meta_group_infos; i++) {
2621 if ((i + 1) == num_meta_group_infos)
2622 metalen = sizeof(*meta_group_info) *
8df9675f 2623 (ngroups -
c9de560d
AT
2624 (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2625 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2626 if (meta_group_info == NULL) {
2627 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2628 "buddy group\n");
2629 goto err_freemeta;
2630 }
2631 sbi->s_group_info[i] = meta_group_info;
2632 }
2633
8df9675f 2634 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2635 desc = ext4_get_group_desc(sb, i, NULL);
2636 if (desc == NULL) {
2637 printk(KERN_ERR
a9df9a49 2638 "EXT4-fs: can't read descriptor %u\n", i);
c9de560d
AT
2639 goto err_freebuddy;
2640 }
5f21b0e6
FB
2641 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2642 goto err_freebuddy;
c9de560d
AT
2643 }
2644
2645 return 0;
2646
2647err_freebuddy:
f1fa3342 2648 while (i-- > 0)
c9de560d 2649 kfree(ext4_get_group_info(sb, i));
c9de560d
AT
2650 i = num_meta_group_infos;
2651err_freemeta:
f1fa3342 2652 while (i-- > 0)
c9de560d
AT
2653 kfree(sbi->s_group_info[i]);
2654 iput(sbi->s_buddy_cache);
2655err_freesgi:
2656 kfree(sbi->s_group_info);
2657 return -ENOMEM;
2658}
2659
2660int ext4_mb_init(struct super_block *sb, int needs_recovery)
2661{
2662 struct ext4_sb_info *sbi = EXT4_SB(sb);
6be2ded1 2663 unsigned i, j;
c9de560d
AT
2664 unsigned offset;
2665 unsigned max;
74767c5a 2666 int ret;
c9de560d 2667
c9de560d
AT
2668 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2669
2670 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2671 if (sbi->s_mb_offsets == NULL) {
c9de560d
AT
2672 return -ENOMEM;
2673 }
ff7ef329
YG
2674
2675 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
c9de560d
AT
2676 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2677 if (sbi->s_mb_maxs == NULL) {
a7b19448 2678 kfree(sbi->s_mb_offsets);
c9de560d
AT
2679 return -ENOMEM;
2680 }
2681
2682 /* order 0 is regular bitmap */
2683 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2684 sbi->s_mb_offsets[0] = 0;
2685
2686 i = 1;
2687 offset = 0;
2688 max = sb->s_blocksize << 2;
2689 do {
2690 sbi->s_mb_offsets[i] = offset;
2691 sbi->s_mb_maxs[i] = max;
2692 offset += 1 << (sb->s_blocksize_bits - i);
2693 max = max >> 1;
2694 i++;
2695 } while (i <= sb->s_blocksize_bits + 1);
2696
2697 /* init file for buddy data */
74767c5a
SF
2698 ret = ext4_mb_init_backend(sb);
2699 if (ret != 0) {
c9de560d
AT
2700 kfree(sbi->s_mb_offsets);
2701 kfree(sbi->s_mb_maxs);
74767c5a 2702 return ret;
c9de560d
AT
2703 }
2704
2705 spin_lock_init(&sbi->s_md_lock);
c9de560d
AT
2706 spin_lock_init(&sbi->s_bal_lock);
2707
2708 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2709 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2710 sbi->s_mb_stats = MB_DEFAULT_STATS;
2711 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2712 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2713 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2714 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2715
730c213c 2716 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
c9de560d 2717 if (sbi->s_locality_groups == NULL) {
c9de560d
AT
2718 kfree(sbi->s_mb_offsets);
2719 kfree(sbi->s_mb_maxs);
2720 return -ENOMEM;
2721 }
730c213c 2722 for_each_possible_cpu(i) {
c9de560d 2723 struct ext4_locality_group *lg;
730c213c 2724 lg = per_cpu_ptr(sbi->s_locality_groups, i);
c9de560d 2725 mutex_init(&lg->lg_mutex);
6be2ded1
AK
2726 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2727 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
c9de560d
AT
2728 spin_lock_init(&lg->lg_prealloc_lock);
2729 }
2730
c9de560d
AT
2731 ext4_mb_history_init(sb);
2732
0390131b
FM
2733 if (sbi->s_journal)
2734 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
3e624fc7 2735
4776004f 2736 printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
c9de560d
AT
2737 return 0;
2738}
2739
955ce5f5 2740/* need to called with the ext4 group lock held */
c9de560d
AT
2741static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2742{
2743 struct ext4_prealloc_space *pa;
2744 struct list_head *cur, *tmp;
2745 int count = 0;
2746
2747 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2748 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2749 list_del(&pa->pa_group_list);
2750 count++;
688f05a0 2751 kmem_cache_free(ext4_pspace_cachep, pa);
c9de560d
AT
2752 }
2753 if (count)
2754 mb_debug("mballoc: %u PAs left\n", count);
2755
2756}
2757
2758int ext4_mb_release(struct super_block *sb)
2759{
8df9675f 2760 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d
AT
2761 ext4_group_t i;
2762 int num_meta_group_infos;
2763 struct ext4_group_info *grinfo;
2764 struct ext4_sb_info *sbi = EXT4_SB(sb);
2765
c9de560d 2766 if (sbi->s_group_info) {
8df9675f 2767 for (i = 0; i < ngroups; i++) {
c9de560d
AT
2768 grinfo = ext4_get_group_info(sb, i);
2769#ifdef DOUBLE_CHECK
2770 kfree(grinfo->bb_bitmap);
2771#endif
2772 ext4_lock_group(sb, i);
2773 ext4_mb_cleanup_pa(grinfo);
2774 ext4_unlock_group(sb, i);
2775 kfree(grinfo);
2776 }
8df9675f 2777 num_meta_group_infos = (ngroups +
c9de560d
AT
2778 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2779 EXT4_DESC_PER_BLOCK_BITS(sb);
2780 for (i = 0; i < num_meta_group_infos; i++)
2781 kfree(sbi->s_group_info[i]);
2782 kfree(sbi->s_group_info);
2783 }
2784 kfree(sbi->s_mb_offsets);
2785 kfree(sbi->s_mb_maxs);
2786 if (sbi->s_buddy_cache)
2787 iput(sbi->s_buddy_cache);
2788 if (sbi->s_mb_stats) {
2789 printk(KERN_INFO
2790 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2791 atomic_read(&sbi->s_bal_allocated),
2792 atomic_read(&sbi->s_bal_reqs),
2793 atomic_read(&sbi->s_bal_success));
2794 printk(KERN_INFO
2795 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2796 "%u 2^N hits, %u breaks, %u lost\n",
2797 atomic_read(&sbi->s_bal_ex_scanned),
2798 atomic_read(&sbi->s_bal_goals),
2799 atomic_read(&sbi->s_bal_2orders),
2800 atomic_read(&sbi->s_bal_breaks),
2801 atomic_read(&sbi->s_mb_lost_chunks));
2802 printk(KERN_INFO
2803 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2804 sbi->s_mb_buddies_generated++,
2805 sbi->s_mb_generation_time);
2806 printk(KERN_INFO
2807 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2808 atomic_read(&sbi->s_mb_preallocated),
2809 atomic_read(&sbi->s_mb_discarded));
2810 }
2811
730c213c 2812 free_percpu(sbi->s_locality_groups);
c9de560d 2813 ext4_mb_history_release(sb);
c9de560d
AT
2814
2815 return 0;
2816}
2817
3e624fc7
TT
2818/*
2819 * This function is called by the jbd2 layer once the commit has finished,
2820 * so we know we can free the blocks that were released with that commit.
2821 */
2822static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
c9de560d 2823{
3e624fc7 2824 struct super_block *sb = journal->j_private;
c9de560d 2825 struct ext4_buddy e4b;
c894058d 2826 struct ext4_group_info *db;
c894058d
AK
2827 int err, count = 0, count2 = 0;
2828 struct ext4_free_data *entry;
8a0aba73 2829 ext4_fsblk_t discard_block;
3e624fc7 2830 struct list_head *l, *ltmp;
c9de560d 2831
3e624fc7
TT
2832 list_for_each_safe(l, ltmp, &txn->t_private_list) {
2833 entry = list_entry(l, struct ext4_free_data, list);
c9de560d 2834
a9df9a49 2835 mb_debug("gonna free %u blocks in group %u (0x%p):",
3e624fc7 2836 entry->count, entry->group, entry);
c9de560d 2837
c894058d 2838 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
c9de560d
AT
2839 /* we expect to find existing buddy because it's pinned */
2840 BUG_ON(err != 0);
2841
c894058d 2842 db = e4b.bd_info;
c9de560d 2843 /* there are blocks to put in buddy to make them really free */
c894058d 2844 count += entry->count;
c9de560d 2845 count2++;
c894058d
AK
2846 ext4_lock_group(sb, entry->group);
2847 /* Take it out of per group rb tree */
2848 rb_erase(&entry->node, &(db->bb_free_root));
2849 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2850
2851 if (!db->bb_free_root.rb_node) {
2852 /* No more items in the per group rb tree
2853 * balance refcounts from ext4_mb_free_metadata()
2854 */
2855 page_cache_release(e4b.bd_buddy_page);
2856 page_cache_release(e4b.bd_bitmap_page);
c9de560d 2857 }
c894058d 2858 ext4_unlock_group(sb, entry->group);
8a0aba73
TT
2859 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2860 + entry->start_blk
2861 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ba80b101
TT
2862 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u",
2863 sb->s_id, (unsigned long long) discard_block,
2864 entry->count);
8a0aba73 2865 sb_issue_discard(sb, discard_block, entry->count);
c9de560d 2866
c894058d 2867 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d 2868 ext4_mb_release_desc(&e4b);
3e624fc7 2869 }
c9de560d
AT
2870
2871 mb_debug("freed %u blocks in %u structures\n", count, count2);
2872}
2873
c9de560d
AT
2874int __init init_ext4_mballoc(void)
2875{
2876 ext4_pspace_cachep =
2877 kmem_cache_create("ext4_prealloc_space",
2878 sizeof(struct ext4_prealloc_space),
2879 0, SLAB_RECLAIM_ACCOUNT, NULL);
2880 if (ext4_pspace_cachep == NULL)
2881 return -ENOMEM;
2882
256bdb49
ES
2883 ext4_ac_cachep =
2884 kmem_cache_create("ext4_alloc_context",
2885 sizeof(struct ext4_allocation_context),
2886 0, SLAB_RECLAIM_ACCOUNT, NULL);
2887 if (ext4_ac_cachep == NULL) {
2888 kmem_cache_destroy(ext4_pspace_cachep);
2889 return -ENOMEM;
2890 }
c894058d
AK
2891
2892 ext4_free_ext_cachep =
2893 kmem_cache_create("ext4_free_block_extents",
2894 sizeof(struct ext4_free_data),
2895 0, SLAB_RECLAIM_ACCOUNT, NULL);
2896 if (ext4_free_ext_cachep == NULL) {
2897 kmem_cache_destroy(ext4_pspace_cachep);
2898 kmem_cache_destroy(ext4_ac_cachep);
2899 return -ENOMEM;
2900 }
c9de560d
AT
2901 return 0;
2902}
2903
2904void exit_ext4_mballoc(void)
2905{
2906 /* XXX: synchronize_rcu(); */
2907 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 2908 kmem_cache_destroy(ext4_ac_cachep);
c894058d 2909 kmem_cache_destroy(ext4_free_ext_cachep);
c9de560d
AT
2910}
2911
2912
2913/*
2914 * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2915 * Returns 0 if success or error code
2916 */
4ddfef7b
ES
2917static noinline_for_stack int
2918ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
498e5f24 2919 handle_t *handle, unsigned int reserv_blks)
c9de560d
AT
2920{
2921 struct buffer_head *bitmap_bh = NULL;
2922 struct ext4_super_block *es;
2923 struct ext4_group_desc *gdp;
2924 struct buffer_head *gdp_bh;
2925 struct ext4_sb_info *sbi;
2926 struct super_block *sb;
2927 ext4_fsblk_t block;
519deca0 2928 int err, len;
c9de560d
AT
2929
2930 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2931 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2932
2933 sb = ac->ac_sb;
2934 sbi = EXT4_SB(sb);
2935 es = sbi->s_es;
2936
c9de560d
AT
2937
2938 err = -EIO;
574ca174 2939 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2940 if (!bitmap_bh)
2941 goto out_err;
2942
2943 err = ext4_journal_get_write_access(handle, bitmap_bh);
2944 if (err)
2945 goto out_err;
2946
2947 err = -EIO;
2948 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2949 if (!gdp)
2950 goto out_err;
2951
a9df9a49 2952 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
9fd9784c 2953 ext4_free_blks_count(sb, gdp));
03cddb80 2954
c9de560d
AT
2955 err = ext4_journal_get_write_access(handle, gdp_bh);
2956 if (err)
2957 goto out_err;
2958
2959 block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2960 + ac->ac_b_ex.fe_start
2961 + le32_to_cpu(es->s_first_data_block);
2962
519deca0
AK
2963 len = ac->ac_b_ex.fe_len;
2964 if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2965 in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2966 in_range(block, ext4_inode_table(sb, gdp),
2967 EXT4_SB(sb)->s_itb_per_group) ||
2968 in_range(block + len - 1, ext4_inode_table(sb, gdp),
2969 EXT4_SB(sb)->s_itb_per_group)) {
46e665e9 2970 ext4_error(sb, __func__,
648f5879
AK
2971 "Allocating block %llu in system zone of %d group\n",
2972 block, ac->ac_b_ex.fe_group);
519deca0
AK
2973 /* File system mounted not to panic on error
2974 * Fix the bitmap and repeat the block allocation
2975 * We leak some of the blocks here.
2976 */
955ce5f5
AK
2977 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2978 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2979 ac->ac_b_ex.fe_len);
2980 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
0390131b 2981 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
519deca0
AK
2982 if (!err)
2983 err = -EAGAIN;
2984 goto out_err;
c9de560d 2985 }
955ce5f5
AK
2986
2987 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2988#ifdef AGGRESSIVE_CHECK
2989 {
2990 int i;
2991 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2992 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2993 bitmap_bh->b_data));
2994 }
2995 }
2996#endif
955ce5f5 2997 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
c9de560d
AT
2998 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2999 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
560671a0
AK
3000 ext4_free_blks_set(sb, gdp,
3001 ext4_free_blocks_after_init(sb,
3002 ac->ac_b_ex.fe_group, gdp));
c9de560d 3003 }
560671a0
AK
3004 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
3005 ext4_free_blks_set(sb, gdp, len);
c9de560d 3006 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
955ce5f5
AK
3007
3008 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
6bc6e63f 3009 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
d2a17637 3010 /*
6bc6e63f 3011 * Now reduce the dirty block count also. Should not go negative
d2a17637 3012 */
6bc6e63f
AK
3013 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3014 /* release all the reserved blocks if non delalloc */
3015 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
60e58e0f 3016 else {
6bc6e63f
AK
3017 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
3018 ac->ac_b_ex.fe_len);
60e58e0f
MC
3019 /* convert reserved quota blocks to real quota blocks */
3020 vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
3021 }
c9de560d 3022
772cb7c8
JS
3023 if (sbi->s_log_groups_per_flex) {
3024 ext4_group_t flex_group = ext4_flex_group(sbi,
3025 ac->ac_b_ex.fe_group);
9f24e420
TT
3026 atomic_sub(ac->ac_b_ex.fe_len,
3027 &sbi->s_flex_groups[flex_group].free_blocks);
772cb7c8
JS
3028 }
3029
0390131b 3030 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
c9de560d
AT
3031 if (err)
3032 goto out_err;
0390131b 3033 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
c9de560d
AT
3034
3035out_err:
3036 sb->s_dirt = 1;
42a10add 3037 brelse(bitmap_bh);
c9de560d
AT
3038 return err;
3039}
3040
3041/*
3042 * here we normalize request for locality group
3043 * Group request are normalized to s_strip size if we set the same via mount
3044 * option. If not we set it to s_mb_group_prealloc which can be configured via
b713a5ec 3045 * /sys/fs/ext4/<partition>/mb_group_prealloc
c9de560d
AT
3046 *
3047 * XXX: should we try to preallocate more than the group has now?
3048 */
3049static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3050{
3051 struct super_block *sb = ac->ac_sb;
3052 struct ext4_locality_group *lg = ac->ac_lg;
3053
3054 BUG_ON(lg == NULL);
3055 if (EXT4_SB(sb)->s_stripe)
3056 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
3057 else
3058 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
60bd63d1 3059 mb_debug("#%u: goal %u blocks for locality group\n",
c9de560d
AT
3060 current->pid, ac->ac_g_ex.fe_len);
3061}
3062
3063/*
3064 * Normalization means making request better in terms of
3065 * size and alignment
3066 */
4ddfef7b
ES
3067static noinline_for_stack void
3068ext4_mb_normalize_request(struct ext4_allocation_context *ac,
c9de560d
AT
3069 struct ext4_allocation_request *ar)
3070{
3071 int bsbits, max;
3072 ext4_lblk_t end;
c9de560d
AT
3073 loff_t size, orig_size, start_off;
3074 ext4_lblk_t start, orig_start;
3075 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
9a0762c5 3076 struct ext4_prealloc_space *pa;
c9de560d
AT
3077
3078 /* do normalize only data requests, metadata requests
3079 do not need preallocation */
3080 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3081 return;
3082
3083 /* sometime caller may want exact blocks */
3084 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3085 return;
3086
3087 /* caller may indicate that preallocation isn't
3088 * required (it's a tail, for example) */
3089 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3090 return;
3091
3092 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3093 ext4_mb_normalize_group_request(ac);
3094 return ;
3095 }
3096
3097 bsbits = ac->ac_sb->s_blocksize_bits;
3098
3099 /* first, let's learn actual file size
3100 * given current request is allocated */
3101 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3102 size = size << bsbits;
3103 if (size < i_size_read(ac->ac_inode))
3104 size = i_size_read(ac->ac_inode);
3105
1930479c
VC
3106 /* max size of free chunks */
3107 max = 2 << bsbits;
c9de560d 3108
1930479c
VC
3109#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
3110 (req <= (size) || max <= (chunk_size))
c9de560d
AT
3111
3112 /* first, try to predict filesize */
3113 /* XXX: should this table be tunable? */
3114 start_off = 0;
3115 if (size <= 16 * 1024) {
3116 size = 16 * 1024;
3117 } else if (size <= 32 * 1024) {
3118 size = 32 * 1024;
3119 } else if (size <= 64 * 1024) {
3120 size = 64 * 1024;
3121 } else if (size <= 128 * 1024) {
3122 size = 128 * 1024;
3123 } else if (size <= 256 * 1024) {
3124 size = 256 * 1024;
3125 } else if (size <= 512 * 1024) {
3126 size = 512 * 1024;
3127 } else if (size <= 1024 * 1024) {
3128 size = 1024 * 1024;
1930479c 3129 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
c9de560d 3130 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
1930479c
VC
3131 (21 - bsbits)) << 21;
3132 size = 2 * 1024 * 1024;
3133 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
c9de560d
AT
3134 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3135 (22 - bsbits)) << 22;
3136 size = 4 * 1024 * 1024;
3137 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
1930479c 3138 (8<<20)>>bsbits, max, 8 * 1024)) {
c9de560d
AT
3139 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3140 (23 - bsbits)) << 23;
3141 size = 8 * 1024 * 1024;
3142 } else {
3143 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3144 size = ac->ac_o_ex.fe_len << bsbits;
3145 }
3146 orig_size = size = size >> bsbits;
3147 orig_start = start = start_off >> bsbits;
3148
3149 /* don't cover already allocated blocks in selected range */
3150 if (ar->pleft && start <= ar->lleft) {
3151 size -= ar->lleft + 1 - start;
3152 start = ar->lleft + 1;
3153 }
3154 if (ar->pright && start + size - 1 >= ar->lright)
3155 size -= start + size - ar->lright;
3156
3157 end = start + size;
3158
3159 /* check we don't cross already preallocated blocks */
3160 rcu_read_lock();
9a0762c5 3161 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 3162 ext4_lblk_t pa_end;
c9de560d 3163
c9de560d
AT
3164 if (pa->pa_deleted)
3165 continue;
3166 spin_lock(&pa->pa_lock);
3167 if (pa->pa_deleted) {
3168 spin_unlock(&pa->pa_lock);
3169 continue;
3170 }
3171
3172 pa_end = pa->pa_lstart + pa->pa_len;
3173
3174 /* PA must not overlap original request */
3175 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3176 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3177
3178 /* skip PA normalized request doesn't overlap with */
3179 if (pa->pa_lstart >= end) {
3180 spin_unlock(&pa->pa_lock);
3181 continue;
3182 }
3183 if (pa_end <= start) {
3184 spin_unlock(&pa->pa_lock);
3185 continue;
3186 }
3187 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3188
3189 if (pa_end <= ac->ac_o_ex.fe_logical) {
3190 BUG_ON(pa_end < start);
3191 start = pa_end;
3192 }
3193
3194 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3195 BUG_ON(pa->pa_lstart > end);
3196 end = pa->pa_lstart;
3197 }
3198 spin_unlock(&pa->pa_lock);
3199 }
3200 rcu_read_unlock();
3201 size = end - start;
3202
3203 /* XXX: extra loop to check we really don't overlap preallocations */
3204 rcu_read_lock();
9a0762c5 3205 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
498e5f24 3206 ext4_lblk_t pa_end;
c9de560d
AT
3207 spin_lock(&pa->pa_lock);
3208 if (pa->pa_deleted == 0) {
3209 pa_end = pa->pa_lstart + pa->pa_len;
3210 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3211 }
3212 spin_unlock(&pa->pa_lock);
3213 }
3214 rcu_read_unlock();
3215
3216 if (start + size <= ac->ac_o_ex.fe_logical &&
3217 start > ac->ac_o_ex.fe_logical) {
3218 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3219 (unsigned long) start, (unsigned long) size,
3220 (unsigned long) ac->ac_o_ex.fe_logical);
3221 }
3222 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3223 start > ac->ac_o_ex.fe_logical);
8d03c7a0 3224 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
c9de560d
AT
3225
3226 /* now prepare goal request */
3227
3228 /* XXX: is it better to align blocks WRT to logical
3229 * placement or satisfy big request as is */
3230 ac->ac_g_ex.fe_logical = start;
3231 ac->ac_g_ex.fe_len = size;
3232
3233 /* define goal start in order to merge */
3234 if (ar->pright && (ar->lright == (start + size))) {
3235 /* merge to the right */
3236 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3237 &ac->ac_f_ex.fe_group,
3238 &ac->ac_f_ex.fe_start);
3239 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3240 }
3241 if (ar->pleft && (ar->lleft + 1 == start)) {
3242 /* merge to the left */
3243 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3244 &ac->ac_f_ex.fe_group,
3245 &ac->ac_f_ex.fe_start);
3246 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3247 }
3248
3249 mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3250 (unsigned) orig_size, (unsigned) start);
3251}
3252
3253static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3254{
3255 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3256
3257 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3258 atomic_inc(&sbi->s_bal_reqs);
3259 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3260 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3261 atomic_inc(&sbi->s_bal_success);
3262 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3263 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3264 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3265 atomic_inc(&sbi->s_bal_goals);
3266 if (ac->ac_found > sbi->s_mb_max_to_scan)
3267 atomic_inc(&sbi->s_bal_breaks);
3268 }
3269
3270 ext4_mb_store_history(ac);
3271}
3272
3273/*
3274 * use blocks preallocated to inode
3275 */
3276static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3277 struct ext4_prealloc_space *pa)
3278{
3279 ext4_fsblk_t start;
3280 ext4_fsblk_t end;
3281 int len;
3282
3283 /* found preallocated blocks, use them */
3284 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3285 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3286 len = end - start;
3287 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3288 &ac->ac_b_ex.fe_start);
3289 ac->ac_b_ex.fe_len = len;
3290 ac->ac_status = AC_STATUS_FOUND;
3291 ac->ac_pa = pa;
3292
3293 BUG_ON(start < pa->pa_pstart);
3294 BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3295 BUG_ON(pa->pa_free < len);
3296 pa->pa_free -= len;
3297
60bd63d1 3298 mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
c9de560d
AT
3299}
3300
3301/*
3302 * use blocks preallocated to locality group
3303 */
3304static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3305 struct ext4_prealloc_space *pa)
3306{
03cddb80 3307 unsigned int len = ac->ac_o_ex.fe_len;
6be2ded1 3308
c9de560d
AT
3309 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3310 &ac->ac_b_ex.fe_group,
3311 &ac->ac_b_ex.fe_start);
3312 ac->ac_b_ex.fe_len = len;
3313 ac->ac_status = AC_STATUS_FOUND;
3314 ac->ac_pa = pa;
3315
3316 /* we don't correct pa_pstart or pa_plen here to avoid
26346ff6 3317 * possible race when the group is being loaded concurrently
c9de560d 3318 * instead we correct pa later, after blocks are marked
26346ff6
AK
3319 * in on-disk bitmap -- see ext4_mb_release_context()
3320 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d
AT
3321 */
3322 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3323}
3324
5e745b04
AK
3325/*
3326 * Return the prealloc space that have minimal distance
3327 * from the goal block. @cpa is the prealloc
3328 * space that is having currently known minimal distance
3329 * from the goal block.
3330 */
3331static struct ext4_prealloc_space *
3332ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3333 struct ext4_prealloc_space *pa,
3334 struct ext4_prealloc_space *cpa)
3335{
3336 ext4_fsblk_t cur_distance, new_distance;
3337
3338 if (cpa == NULL) {
3339 atomic_inc(&pa->pa_count);
3340 return pa;
3341 }
3342 cur_distance = abs(goal_block - cpa->pa_pstart);
3343 new_distance = abs(goal_block - pa->pa_pstart);
3344
3345 if (cur_distance < new_distance)
3346 return cpa;
3347
3348 /* drop the previous reference */
3349 atomic_dec(&cpa->pa_count);
3350 atomic_inc(&pa->pa_count);
3351 return pa;
3352}
3353
c9de560d
AT
3354/*
3355 * search goal blocks in preallocated space
3356 */
4ddfef7b
ES
3357static noinline_for_stack int
3358ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
c9de560d 3359{
6be2ded1 3360 int order, i;
c9de560d
AT
3361 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3362 struct ext4_locality_group *lg;
5e745b04
AK
3363 struct ext4_prealloc_space *pa, *cpa = NULL;
3364 ext4_fsblk_t goal_block;
c9de560d
AT
3365
3366 /* only data can be preallocated */
3367 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3368 return 0;
3369
3370 /* first, try per-file preallocation */
3371 rcu_read_lock();
9a0762c5 3372 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d
AT
3373
3374 /* all fields in this condition don't change,
3375 * so we can skip locking for them */
3376 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3377 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3378 continue;
3379
3380 /* found preallocated blocks, use them */
3381 spin_lock(&pa->pa_lock);
3382 if (pa->pa_deleted == 0 && pa->pa_free) {
3383 atomic_inc(&pa->pa_count);
3384 ext4_mb_use_inode_pa(ac, pa);
3385 spin_unlock(&pa->pa_lock);
3386 ac->ac_criteria = 10;
3387 rcu_read_unlock();
3388 return 1;
3389 }
3390 spin_unlock(&pa->pa_lock);
3391 }
3392 rcu_read_unlock();
3393
3394 /* can we use group allocation? */
3395 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3396 return 0;
3397
3398 /* inode may have no locality group for some reason */
3399 lg = ac->ac_lg;
3400 if (lg == NULL)
3401 return 0;
6be2ded1
AK
3402 order = fls(ac->ac_o_ex.fe_len) - 1;
3403 if (order > PREALLOC_TB_SIZE - 1)
3404 /* The max size of hash table is PREALLOC_TB_SIZE */
3405 order = PREALLOC_TB_SIZE - 1;
3406
5e745b04
AK
3407 goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3408 ac->ac_g_ex.fe_start +
3409 le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3410 /*
3411 * search for the prealloc space that is having
3412 * minimal distance from the goal block.
3413 */
6be2ded1
AK
3414 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3415 rcu_read_lock();
3416 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3417 pa_inode_list) {
3418 spin_lock(&pa->pa_lock);
3419 if (pa->pa_deleted == 0 &&
3420 pa->pa_free >= ac->ac_o_ex.fe_len) {
5e745b04
AK
3421
3422 cpa = ext4_mb_check_group_pa(goal_block,
3423 pa, cpa);
6be2ded1 3424 }
c9de560d 3425 spin_unlock(&pa->pa_lock);
c9de560d 3426 }
6be2ded1 3427 rcu_read_unlock();
c9de560d 3428 }
5e745b04
AK
3429 if (cpa) {
3430 ext4_mb_use_group_pa(ac, cpa);
3431 ac->ac_criteria = 20;
3432 return 1;
3433 }
c9de560d
AT
3434 return 0;
3435}
3436
7a2fcbf7
AK
3437/*
3438 * the function goes through all block freed in the group
3439 * but not yet committed and marks them used in in-core bitmap.
3440 * buddy must be generated from this bitmap
955ce5f5 3441 * Need to be called with the ext4 group lock held
7a2fcbf7
AK
3442 */
3443static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3444 ext4_group_t group)
3445{
3446 struct rb_node *n;
3447 struct ext4_group_info *grp;
3448 struct ext4_free_data *entry;
3449
3450 grp = ext4_get_group_info(sb, group);
3451 n = rb_first(&(grp->bb_free_root));
3452
3453 while (n) {
3454 entry = rb_entry(n, struct ext4_free_data, node);
955ce5f5 3455 mb_set_bits(bitmap, entry->start_blk, entry->count);
7a2fcbf7
AK
3456 n = rb_next(n);
3457 }
3458 return;
3459}
3460
c9de560d
AT
3461/*
3462 * the function goes through all preallocation in this group and marks them
3463 * used in in-core bitmap. buddy must be generated from this bitmap
955ce5f5 3464 * Need to be called with ext4 group lock held
c9de560d
AT
3465 */
3466static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3467 ext4_group_t group)
3468{
3469 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3470 struct ext4_prealloc_space *pa;
3471 struct list_head *cur;
3472 ext4_group_t groupnr;
3473 ext4_grpblk_t start;
3474 int preallocated = 0;
3475 int count = 0;
3476 int len;
3477
3478 /* all form of preallocation discards first load group,
3479 * so the only competing code is preallocation use.
3480 * we don't need any locking here
3481 * notice we do NOT ignore preallocations with pa_deleted
3482 * otherwise we could leave used blocks available for
3483 * allocation in buddy when concurrent ext4_mb_put_pa()
3484 * is dropping preallocation
3485 */
3486 list_for_each(cur, &grp->bb_prealloc_list) {
3487 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3488 spin_lock(&pa->pa_lock);
3489 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3490 &groupnr, &start);
3491 len = pa->pa_len;
3492 spin_unlock(&pa->pa_lock);
3493 if (unlikely(len == 0))
3494 continue;
3495 BUG_ON(groupnr != group);
955ce5f5 3496 mb_set_bits(bitmap, start, len);
c9de560d
AT
3497 preallocated += len;
3498 count++;
3499 }
a9df9a49 3500 mb_debug("prellocated %u for group %u\n", preallocated, group);
c9de560d
AT
3501}
3502
3503static void ext4_mb_pa_callback(struct rcu_head *head)
3504{
3505 struct ext4_prealloc_space *pa;
3506 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3507 kmem_cache_free(ext4_pspace_cachep, pa);
3508}
3509
3510/*
3511 * drops a reference to preallocated space descriptor
3512 * if this was the last reference and the space is consumed
3513 */
3514static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3515 struct super_block *sb, struct ext4_prealloc_space *pa)
3516{
a9df9a49 3517 ext4_group_t grp;
d33a1976 3518 ext4_fsblk_t grp_blk;
c9de560d
AT
3519
3520 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3521 return;
3522
3523 /* in this short window concurrent discard can set pa_deleted */
3524 spin_lock(&pa->pa_lock);
3525 if (pa->pa_deleted == 1) {
3526 spin_unlock(&pa->pa_lock);
3527 return;
3528 }
3529
3530 pa->pa_deleted = 1;
3531 spin_unlock(&pa->pa_lock);
3532
d33a1976 3533 grp_blk = pa->pa_pstart;
cc0fb9ad
AK
3534 /*
3535 * If doing group-based preallocation, pa_pstart may be in the
3536 * next group when pa is used up
3537 */
3538 if (pa->pa_type == MB_GROUP_PA)
d33a1976
ES
3539 grp_blk--;
3540
3541 ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
c9de560d
AT
3542
3543 /*
3544 * possible race:
3545 *
3546 * P1 (buddy init) P2 (regular allocation)
3547 * find block B in PA
3548 * copy on-disk bitmap to buddy
3549 * mark B in on-disk bitmap
3550 * drop PA from group
3551 * mark all PAs in buddy
3552 *
3553 * thus, P1 initializes buddy with B available. to prevent this
3554 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3555 * against that pair
3556 */
3557 ext4_lock_group(sb, grp);
3558 list_del(&pa->pa_group_list);
3559 ext4_unlock_group(sb, grp);
3560
3561 spin_lock(pa->pa_obj_lock);
3562 list_del_rcu(&pa->pa_inode_list);
3563 spin_unlock(pa->pa_obj_lock);
3564
3565 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3566}
3567
3568/*
3569 * creates new preallocated space for given inode
3570 */
4ddfef7b
ES
3571static noinline_for_stack int
3572ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3573{
3574 struct super_block *sb = ac->ac_sb;
3575 struct ext4_prealloc_space *pa;
3576 struct ext4_group_info *grp;
3577 struct ext4_inode_info *ei;
3578
3579 /* preallocate only when found space is larger then requested */
3580 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3581 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3582 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3583
3584 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3585 if (pa == NULL)
3586 return -ENOMEM;
3587
3588 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3589 int winl;
3590 int wins;
3591 int win;
3592 int offs;
3593
3594 /* we can't allocate as much as normalizer wants.
3595 * so, found space must get proper lstart
3596 * to cover original request */
3597 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3598 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3599
3600 /* we're limited by original request in that
3601 * logical block must be covered any way
3602 * winl is window we can move our chunk within */
3603 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3604
3605 /* also, we should cover whole original request */
3606 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3607
3608 /* the smallest one defines real window */
3609 win = min(winl, wins);
3610
3611 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3612 if (offs && offs < win)
3613 win = offs;
3614
3615 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3616 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3617 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3618 }
3619
3620 /* preallocation can change ac_b_ex, thus we store actually
3621 * allocated blocks for history */
3622 ac->ac_f_ex = ac->ac_b_ex;
3623
3624 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3625 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3626 pa->pa_len = ac->ac_b_ex.fe_len;
3627 pa->pa_free = pa->pa_len;
3628 atomic_set(&pa->pa_count, 1);
3629 spin_lock_init(&pa->pa_lock);
d794bf8e
AK
3630 INIT_LIST_HEAD(&pa->pa_inode_list);
3631 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3632 pa->pa_deleted = 0;
cc0fb9ad 3633 pa->pa_type = MB_INODE_PA;
c9de560d
AT
3634
3635 mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3636 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
ba80b101
TT
3637 trace_mark(ext4_mb_new_inode_pa,
3638 "dev %s ino %lu pstart %llu len %u lstart %u",
3639 sb->s_id, ac->ac_inode->i_ino,
3640 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
c9de560d
AT
3641
3642 ext4_mb_use_inode_pa(ac, pa);
3643 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3644
3645 ei = EXT4_I(ac->ac_inode);
3646 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3647
3648 pa->pa_obj_lock = &ei->i_prealloc_lock;
3649 pa->pa_inode = ac->ac_inode;
3650
3651 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3652 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3653 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3654
3655 spin_lock(pa->pa_obj_lock);
3656 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3657 spin_unlock(pa->pa_obj_lock);
3658
3659 return 0;
3660}
3661
3662/*
3663 * creates new preallocated space for locality group inodes belongs to
3664 */
4ddfef7b
ES
3665static noinline_for_stack int
3666ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3667{
3668 struct super_block *sb = ac->ac_sb;
3669 struct ext4_locality_group *lg;
3670 struct ext4_prealloc_space *pa;
3671 struct ext4_group_info *grp;
3672
3673 /* preallocate only when found space is larger then requested */
3674 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3675 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3676 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3677
3678 BUG_ON(ext4_pspace_cachep == NULL);
3679 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3680 if (pa == NULL)
3681 return -ENOMEM;
3682
3683 /* preallocation can change ac_b_ex, thus we store actually
3684 * allocated blocks for history */
3685 ac->ac_f_ex = ac->ac_b_ex;
3686
3687 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3688 pa->pa_lstart = pa->pa_pstart;
3689 pa->pa_len = ac->ac_b_ex.fe_len;
3690 pa->pa_free = pa->pa_len;
3691 atomic_set(&pa->pa_count, 1);
3692 spin_lock_init(&pa->pa_lock);
6be2ded1 3693 INIT_LIST_HEAD(&pa->pa_inode_list);
d794bf8e 3694 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 3695 pa->pa_deleted = 0;
cc0fb9ad 3696 pa->pa_type = MB_GROUP_PA;
c9de560d
AT
3697
3698 mb_debug("new group pa %p: %llu/%u for %u\n", pa,
ba80b101
TT
3699 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3700 trace_mark(ext4_mb_new_group_pa, "dev %s pstart %llu len %u lstart %u",
3701 sb->s_id, pa->pa_pstart, pa->pa_len, pa->pa_lstart);
c9de560d
AT
3702
3703 ext4_mb_use_group_pa(ac, pa);
3704 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3705
3706 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3707 lg = ac->ac_lg;
3708 BUG_ON(lg == NULL);
3709
3710 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3711 pa->pa_inode = NULL;
3712
3713 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3714 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3715 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3716
6be2ded1
AK
3717 /*
3718 * We will later add the new pa to the right bucket
3719 * after updating the pa_free in ext4_mb_release_context
3720 */
c9de560d
AT
3721 return 0;
3722}
3723
3724static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3725{
3726 int err;
3727
3728 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3729 err = ext4_mb_new_group_pa(ac);
3730 else
3731 err = ext4_mb_new_inode_pa(ac);
3732 return err;
3733}
3734
3735/*
3736 * finds all unused blocks in on-disk bitmap, frees them in
3737 * in-core bitmap and buddy.
3738 * @pa must be unlinked from inode and group lists, so that
3739 * nobody else can find/use it.
3740 * the caller MUST hold group/inode locks.
3741 * TODO: optimize the case when there are no in-core structures yet
3742 */
4ddfef7b
ES
3743static noinline_for_stack int
3744ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
c83617db
AK
3745 struct ext4_prealloc_space *pa,
3746 struct ext4_allocation_context *ac)
c9de560d 3747{
c9de560d
AT
3748 struct super_block *sb = e4b->bd_sb;
3749 struct ext4_sb_info *sbi = EXT4_SB(sb);
498e5f24
TT
3750 unsigned int end;
3751 unsigned int next;
c9de560d
AT
3752 ext4_group_t group;
3753 ext4_grpblk_t bit;
ba80b101 3754 unsigned long long grp_blk_start;
c9de560d
AT
3755 sector_t start;
3756 int err = 0;
3757 int free = 0;
3758
3759 BUG_ON(pa->pa_deleted == 0);
3760 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
ba80b101 3761 grp_blk_start = pa->pa_pstart - bit;
c9de560d
AT
3762 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3763 end = bit + pa->pa_len;
3764
256bdb49
ES
3765 if (ac) {
3766 ac->ac_sb = sb;
3767 ac->ac_inode = pa->pa_inode;
3768 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3769 }
c9de560d
AT
3770
3771 while (bit < end) {
ffad0a44 3772 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3773 if (bit >= end)
3774 break;
ffad0a44 3775 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3776 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3777 le32_to_cpu(sbi->s_es->s_first_data_block);
3778 mb_debug(" free preallocated %u/%u in group %u\n",
3779 (unsigned) start, (unsigned) next - bit,
3780 (unsigned) group);
3781 free += next - bit;
3782
256bdb49
ES
3783 if (ac) {
3784 ac->ac_b_ex.fe_group = group;
3785 ac->ac_b_ex.fe_start = bit;
3786 ac->ac_b_ex.fe_len = next - bit;
3787 ac->ac_b_ex.fe_logical = 0;
3788 ext4_mb_store_history(ac);
3789 }
c9de560d 3790
ba80b101
TT
3791 trace_mark(ext4_mb_release_inode_pa,
3792 "dev %s ino %lu block %llu count %u",
3793 sb->s_id, pa->pa_inode->i_ino, grp_blk_start + bit,
3794 next - bit);
c9de560d
AT
3795 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3796 bit = next + 1;
3797 }
3798 if (free != pa->pa_free) {
26346ff6 3799 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
c9de560d
AT
3800 pa, (unsigned long) pa->pa_lstart,
3801 (unsigned long) pa->pa_pstart,
3802 (unsigned long) pa->pa_len);
5d1b1b3f
AK
3803 ext4_grp_locked_error(sb, group,
3804 __func__, "free %u, pa_free %u",
3805 free, pa->pa_free);
e56eb659
AK
3806 /*
3807 * pa is already deleted so we use the value obtained
3808 * from the bitmap and continue.
3809 */
c9de560d 3810 }
c9de560d
AT
3811 atomic_add(free, &sbi->s_mb_discarded);
3812
3813 return err;
3814}
3815
4ddfef7b
ES
3816static noinline_for_stack int
3817ext4_mb_release_group_pa(struct ext4_buddy *e4b,
c83617db
AK
3818 struct ext4_prealloc_space *pa,
3819 struct ext4_allocation_context *ac)
c9de560d 3820{
c9de560d
AT
3821 struct super_block *sb = e4b->bd_sb;
3822 ext4_group_t group;
3823 ext4_grpblk_t bit;
3824
256bdb49
ES
3825 if (ac)
3826 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
c9de560d 3827
ba80b101
TT
3828 trace_mark(ext4_mb_release_group_pa, "dev %s pstart %llu len %d",
3829 sb->s_id, pa->pa_pstart, pa->pa_len);
c9de560d
AT
3830 BUG_ON(pa->pa_deleted == 0);
3831 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3832 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3833 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3834 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3835
256bdb49
ES
3836 if (ac) {
3837 ac->ac_sb = sb;
3838 ac->ac_inode = NULL;
3839 ac->ac_b_ex.fe_group = group;
3840 ac->ac_b_ex.fe_start = bit;
3841 ac->ac_b_ex.fe_len = pa->pa_len;
3842 ac->ac_b_ex.fe_logical = 0;
3843 ext4_mb_store_history(ac);
256bdb49 3844 }
c9de560d
AT
3845
3846 return 0;
3847}
3848
3849/*
3850 * releases all preallocations in given group
3851 *
3852 * first, we need to decide discard policy:
3853 * - when do we discard
3854 * 1) ENOSPC
3855 * - how many do we discard
3856 * 1) how many requested
3857 */
4ddfef7b
ES
3858static noinline_for_stack int
3859ext4_mb_discard_group_preallocations(struct super_block *sb,
c9de560d
AT
3860 ext4_group_t group, int needed)
3861{
3862 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3863 struct buffer_head *bitmap_bh = NULL;
3864 struct ext4_prealloc_space *pa, *tmp;
c83617db 3865 struct ext4_allocation_context *ac;
c9de560d
AT
3866 struct list_head list;
3867 struct ext4_buddy e4b;
3868 int err;
3869 int busy = 0;
3870 int free = 0;
3871
a9df9a49 3872 mb_debug("discard preallocation for group %u\n", group);
c9de560d
AT
3873
3874 if (list_empty(&grp->bb_prealloc_list))
3875 return 0;
3876
574ca174 3877 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3878 if (bitmap_bh == NULL) {
ce89f46c 3879 ext4_error(sb, __func__, "Error in reading block "
a9df9a49 3880 "bitmap for %u", group);
ce89f46c 3881 return 0;
c9de560d
AT
3882 }
3883
3884 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c
AK
3885 if (err) {
3886 ext4_error(sb, __func__, "Error in loading buddy "
a9df9a49 3887 "information for %u", group);
ce89f46c
AK
3888 put_bh(bitmap_bh);
3889 return 0;
3890 }
c9de560d
AT
3891
3892 if (needed == 0)
3893 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3894
c9de560d 3895 INIT_LIST_HEAD(&list);
c83617db 3896 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
c9de560d
AT
3897repeat:
3898 ext4_lock_group(sb, group);
3899 list_for_each_entry_safe(pa, tmp,
3900 &grp->bb_prealloc_list, pa_group_list) {
3901 spin_lock(&pa->pa_lock);
3902 if (atomic_read(&pa->pa_count)) {
3903 spin_unlock(&pa->pa_lock);
3904 busy = 1;
3905 continue;
3906 }
3907 if (pa->pa_deleted) {
3908 spin_unlock(&pa->pa_lock);
3909 continue;
3910 }
3911
3912 /* seems this one can be freed ... */
3913 pa->pa_deleted = 1;
3914
3915 /* we can trust pa_free ... */
3916 free += pa->pa_free;
3917
3918 spin_unlock(&pa->pa_lock);
3919
3920 list_del(&pa->pa_group_list);
3921 list_add(&pa->u.pa_tmp_list, &list);
3922 }
3923
3924 /* if we still need more blocks and some PAs were used, try again */
3925 if (free < needed && busy) {
3926 busy = 0;
3927 ext4_unlock_group(sb, group);
3928 /*
3929 * Yield the CPU here so that we don't get soft lockup
3930 * in non preempt case.
3931 */
3932 yield();
3933 goto repeat;
3934 }
3935
3936 /* found anything to free? */
3937 if (list_empty(&list)) {
3938 BUG_ON(free != 0);
3939 goto out;
3940 }
3941
3942 /* now free all selected PAs */
3943 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3944
3945 /* remove from object (inode or locality group) */
3946 spin_lock(pa->pa_obj_lock);
3947 list_del_rcu(&pa->pa_inode_list);
3948 spin_unlock(pa->pa_obj_lock);
3949
cc0fb9ad 3950 if (pa->pa_type == MB_GROUP_PA)
c83617db 3951 ext4_mb_release_group_pa(&e4b, pa, ac);
c9de560d 3952 else
c83617db 3953 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
c9de560d
AT
3954
3955 list_del(&pa->u.pa_tmp_list);
3956 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3957 }
3958
3959out:
3960 ext4_unlock_group(sb, group);
c83617db
AK
3961 if (ac)
3962 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
3963 ext4_mb_release_desc(&e4b);
3964 put_bh(bitmap_bh);
3965 return free;
3966}
3967
3968/*
3969 * releases all non-used preallocated blocks for given inode
3970 *
3971 * It's important to discard preallocations under i_data_sem
3972 * We don't want another block to be served from the prealloc
3973 * space when we are discarding the inode prealloc space.
3974 *
3975 * FIXME!! Make sure it is valid at all the call sites
3976 */
c2ea3fde 3977void ext4_discard_preallocations(struct inode *inode)
c9de560d
AT
3978{
3979 struct ext4_inode_info *ei = EXT4_I(inode);
3980 struct super_block *sb = inode->i_sb;
3981 struct buffer_head *bitmap_bh = NULL;
3982 struct ext4_prealloc_space *pa, *tmp;
c83617db 3983 struct ext4_allocation_context *ac;
c9de560d
AT
3984 ext4_group_t group = 0;
3985 struct list_head list;
3986 struct ext4_buddy e4b;
3987 int err;
3988
c2ea3fde 3989 if (!S_ISREG(inode->i_mode)) {
c9de560d
AT
3990 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3991 return;
3992 }
3993
3994 mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
ba80b101
TT
3995 trace_mark(ext4_discard_preallocations, "dev %s ino %lu", sb->s_id,
3996 inode->i_ino);
c9de560d
AT
3997
3998 INIT_LIST_HEAD(&list);
3999
c83617db 4000 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
c9de560d
AT
4001repeat:
4002 /* first, collect all pa's in the inode */
4003 spin_lock(&ei->i_prealloc_lock);
4004 while (!list_empty(&ei->i_prealloc_list)) {
4005 pa = list_entry(ei->i_prealloc_list.next,
4006 struct ext4_prealloc_space, pa_inode_list);
4007 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4008 spin_lock(&pa->pa_lock);
4009 if (atomic_read(&pa->pa_count)) {
4010 /* this shouldn't happen often - nobody should
4011 * use preallocation while we're discarding it */
4012 spin_unlock(&pa->pa_lock);
4013 spin_unlock(&ei->i_prealloc_lock);
4014 printk(KERN_ERR "uh-oh! used pa while discarding\n");
4015 WARN_ON(1);
4016 schedule_timeout_uninterruptible(HZ);
4017 goto repeat;
4018
4019 }
4020 if (pa->pa_deleted == 0) {
4021 pa->pa_deleted = 1;
4022 spin_unlock(&pa->pa_lock);
4023 list_del_rcu(&pa->pa_inode_list);
4024 list_add(&pa->u.pa_tmp_list, &list);
4025 continue;
4026 }
4027
4028 /* someone is deleting pa right now */
4029 spin_unlock(&pa->pa_lock);
4030 spin_unlock(&ei->i_prealloc_lock);
4031
4032 /* we have to wait here because pa_deleted
4033 * doesn't mean pa is already unlinked from
4034 * the list. as we might be called from
4035 * ->clear_inode() the inode will get freed
4036 * and concurrent thread which is unlinking
4037 * pa from inode's list may access already
4038 * freed memory, bad-bad-bad */
4039
4040 /* XXX: if this happens too often, we can
4041 * add a flag to force wait only in case
4042 * of ->clear_inode(), but not in case of
4043 * regular truncate */
4044 schedule_timeout_uninterruptible(HZ);
4045 goto repeat;
4046 }
4047 spin_unlock(&ei->i_prealloc_lock);
4048
4049 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
cc0fb9ad 4050 BUG_ON(pa->pa_type != MB_INODE_PA);
c9de560d
AT
4051 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4052
4053 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c
AK
4054 if (err) {
4055 ext4_error(sb, __func__, "Error in loading buddy "
a9df9a49 4056 "information for %u", group);
ce89f46c
AK
4057 continue;
4058 }
c9de560d 4059
574ca174 4060 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 4061 if (bitmap_bh == NULL) {
ce89f46c 4062 ext4_error(sb, __func__, "Error in reading block "
a9df9a49 4063 "bitmap for %u", group);
c9de560d 4064 ext4_mb_release_desc(&e4b);
ce89f46c 4065 continue;
c9de560d
AT
4066 }
4067
4068 ext4_lock_group(sb, group);
4069 list_del(&pa->pa_group_list);
c83617db 4070 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
c9de560d
AT
4071 ext4_unlock_group(sb, group);
4072
4073 ext4_mb_release_desc(&e4b);
4074 put_bh(bitmap_bh);
4075
4076 list_del(&pa->u.pa_tmp_list);
4077 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4078 }
c83617db
AK
4079 if (ac)
4080 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
4081}
4082
4083/*
4084 * finds all preallocated spaces and return blocks being freed to them
4085 * if preallocated space becomes full (no block is used from the space)
4086 * then the function frees space in buddy
4087 * XXX: at the moment, truncate (which is the only way to free blocks)
4088 * discards all preallocations
4089 */
4090static void ext4_mb_return_to_preallocation(struct inode *inode,
4091 struct ext4_buddy *e4b,
4092 sector_t block, int count)
4093{
4094 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
4095}
4096#ifdef MB_DEBUG
4097static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4098{
4099 struct super_block *sb = ac->ac_sb;
8df9675f 4100 ext4_group_t ngroups, i;
c9de560d
AT
4101
4102 printk(KERN_ERR "EXT4-fs: Can't allocate:"
4103 " Allocation context details:\n");
4104 printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
4105 ac->ac_status, ac->ac_flags);
4106 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4107 "best %lu/%lu/%lu@%lu cr %d\n",
4108 (unsigned long)ac->ac_o_ex.fe_group,
4109 (unsigned long)ac->ac_o_ex.fe_start,
4110 (unsigned long)ac->ac_o_ex.fe_len,
4111 (unsigned long)ac->ac_o_ex.fe_logical,
4112 (unsigned long)ac->ac_g_ex.fe_group,
4113 (unsigned long)ac->ac_g_ex.fe_start,
4114 (unsigned long)ac->ac_g_ex.fe_len,
4115 (unsigned long)ac->ac_g_ex.fe_logical,
4116 (unsigned long)ac->ac_b_ex.fe_group,
4117 (unsigned long)ac->ac_b_ex.fe_start,
4118 (unsigned long)ac->ac_b_ex.fe_len,
4119 (unsigned long)ac->ac_b_ex.fe_logical,
4120 (int)ac->ac_criteria);
4121 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4122 ac->ac_found);
4123 printk(KERN_ERR "EXT4-fs: groups: \n");
8df9675f
TT
4124 ngroups = ext4_get_groups_count(sb);
4125 for (i = 0; i < ngroups; i++) {
c9de560d
AT
4126 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4127 struct ext4_prealloc_space *pa;
4128 ext4_grpblk_t start;
4129 struct list_head *cur;
4130 ext4_lock_group(sb, i);
4131 list_for_each(cur, &grp->bb_prealloc_list) {
4132 pa = list_entry(cur, struct ext4_prealloc_space,
4133 pa_group_list);
4134 spin_lock(&pa->pa_lock);
4135 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4136 NULL, &start);
4137 spin_unlock(&pa->pa_lock);
4138 printk(KERN_ERR "PA:%lu:%d:%u \n", i,
4139 start, pa->pa_len);
4140 }
60bd63d1 4141 ext4_unlock_group(sb, i);
c9de560d
AT
4142
4143 if (grp->bb_free == 0)
4144 continue;
4145 printk(KERN_ERR "%lu: %d/%d \n",
4146 i, grp->bb_free, grp->bb_fragments);
4147 }
4148 printk(KERN_ERR "\n");
4149}
4150#else
4151static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4152{
4153 return;
4154}
4155#endif
4156
4157/*
4158 * We use locality group preallocation for small size file. The size of the
4159 * file is determined by the current size or the resulting size after
4160 * allocation which ever is larger
4161 *
b713a5ec 4162 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
c9de560d
AT
4163 */
4164static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4165{
4166 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4167 int bsbits = ac->ac_sb->s_blocksize_bits;
4168 loff_t size, isize;
4169
4170 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4171 return;
4172
4173 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4174 isize = i_size_read(ac->ac_inode) >> bsbits;
4175 size = max(size, isize);
4176
4177 /* don't use group allocation for large files */
4178 if (size >= sbi->s_mb_stream_request)
4179 return;
4180
4181 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4182 return;
4183
4184 BUG_ON(ac->ac_lg != NULL);
4185 /*
4186 * locality group prealloc space are per cpu. The reason for having
4187 * per cpu locality group is to reduce the contention between block
4188 * request from multiple CPUs.
4189 */
730c213c 4190 ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
c9de560d
AT
4191
4192 /* we're going to use group allocation */
4193 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4194
4195 /* serialize all allocations in the group */
4196 mutex_lock(&ac->ac_lg->lg_mutex);
4197}
4198
4ddfef7b
ES
4199static noinline_for_stack int
4200ext4_mb_initialize_context(struct ext4_allocation_context *ac,
c9de560d
AT
4201 struct ext4_allocation_request *ar)
4202{
4203 struct super_block *sb = ar->inode->i_sb;
4204 struct ext4_sb_info *sbi = EXT4_SB(sb);
4205 struct ext4_super_block *es = sbi->s_es;
4206 ext4_group_t group;
498e5f24
TT
4207 unsigned int len;
4208 ext4_fsblk_t goal;
c9de560d
AT
4209 ext4_grpblk_t block;
4210
4211 /* we can't allocate > group size */
4212 len = ar->len;
4213
4214 /* just a dirty hack to filter too big requests */
4215 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4216 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4217
4218 /* start searching from the goal */
4219 goal = ar->goal;
4220 if (goal < le32_to_cpu(es->s_first_data_block) ||
4221 goal >= ext4_blocks_count(es))
4222 goal = le32_to_cpu(es->s_first_data_block);
4223 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4224
4225 /* set up allocation goals */
4226 ac->ac_b_ex.fe_logical = ar->logical;
4227 ac->ac_b_ex.fe_group = 0;
4228 ac->ac_b_ex.fe_start = 0;
4229 ac->ac_b_ex.fe_len = 0;
4230 ac->ac_status = AC_STATUS_CONTINUE;
4231 ac->ac_groups_scanned = 0;
4232 ac->ac_ex_scanned = 0;
4233 ac->ac_found = 0;
4234 ac->ac_sb = sb;
4235 ac->ac_inode = ar->inode;
4236 ac->ac_o_ex.fe_logical = ar->logical;
4237 ac->ac_o_ex.fe_group = group;
4238 ac->ac_o_ex.fe_start = block;
4239 ac->ac_o_ex.fe_len = len;
4240 ac->ac_g_ex.fe_logical = ar->logical;
4241 ac->ac_g_ex.fe_group = group;
4242 ac->ac_g_ex.fe_start = block;
4243 ac->ac_g_ex.fe_len = len;
4244 ac->ac_f_ex.fe_len = 0;
4245 ac->ac_flags = ar->flags;
4246 ac->ac_2order = 0;
4247 ac->ac_criteria = 0;
4248 ac->ac_pa = NULL;
4249 ac->ac_bitmap_page = NULL;
4250 ac->ac_buddy_page = NULL;
8556e8f3 4251 ac->alloc_semp = NULL;
c9de560d
AT
4252 ac->ac_lg = NULL;
4253
4254 /* we have to define context: we'll we work with a file or
4255 * locality group. this is a policy, actually */
4256 ext4_mb_group_or_file(ac);
4257
4258 mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4259 "left: %u/%u, right %u/%u to %swritable\n",
4260 (unsigned) ar->len, (unsigned) ar->logical,
4261 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4262 (unsigned) ar->lleft, (unsigned) ar->pleft,
4263 (unsigned) ar->lright, (unsigned) ar->pright,
4264 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4265 return 0;
4266
4267}
4268
6be2ded1
AK
4269static noinline_for_stack void
4270ext4_mb_discard_lg_preallocations(struct super_block *sb,
4271 struct ext4_locality_group *lg,
4272 int order, int total_entries)
4273{
4274 ext4_group_t group = 0;
4275 struct ext4_buddy e4b;
4276 struct list_head discard_list;
4277 struct ext4_prealloc_space *pa, *tmp;
4278 struct ext4_allocation_context *ac;
4279
4280 mb_debug("discard locality group preallocation\n");
4281
4282 INIT_LIST_HEAD(&discard_list);
4283 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4284
4285 spin_lock(&lg->lg_prealloc_lock);
4286 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4287 pa_inode_list) {
4288 spin_lock(&pa->pa_lock);
4289 if (atomic_read(&pa->pa_count)) {
4290 /*
4291 * This is the pa that we just used
4292 * for block allocation. So don't
4293 * free that
4294 */
4295 spin_unlock(&pa->pa_lock);
4296 continue;
4297 }
4298 if (pa->pa_deleted) {
4299 spin_unlock(&pa->pa_lock);
4300 continue;
4301 }
4302 /* only lg prealloc space */
cc0fb9ad 4303 BUG_ON(pa->pa_type != MB_GROUP_PA);
6be2ded1
AK
4304
4305 /* seems this one can be freed ... */
4306 pa->pa_deleted = 1;
4307 spin_unlock(&pa->pa_lock);
4308
4309 list_del_rcu(&pa->pa_inode_list);
4310 list_add(&pa->u.pa_tmp_list, &discard_list);
4311
4312 total_entries--;
4313 if (total_entries <= 5) {
4314 /*
4315 * we want to keep only 5 entries
4316 * allowing it to grow to 8. This
4317 * mak sure we don't call discard
4318 * soon for this list.
4319 */
4320 break;
4321 }
4322 }
4323 spin_unlock(&lg->lg_prealloc_lock);
4324
4325 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4326
4327 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4328 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4329 ext4_error(sb, __func__, "Error in loading buddy "
a9df9a49 4330 "information for %u", group);
6be2ded1
AK
4331 continue;
4332 }
4333 ext4_lock_group(sb, group);
4334 list_del(&pa->pa_group_list);
4335 ext4_mb_release_group_pa(&e4b, pa, ac);
4336 ext4_unlock_group(sb, group);
4337
4338 ext4_mb_release_desc(&e4b);
4339 list_del(&pa->u.pa_tmp_list);
4340 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4341 }
4342 if (ac)
4343 kmem_cache_free(ext4_ac_cachep, ac);
4344}
4345
4346/*
4347 * We have incremented pa_count. So it cannot be freed at this
4348 * point. Also we hold lg_mutex. So no parallel allocation is
4349 * possible from this lg. That means pa_free cannot be updated.
4350 *
4351 * A parallel ext4_mb_discard_group_preallocations is possible.
4352 * which can cause the lg_prealloc_list to be updated.
4353 */
4354
4355static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4356{
4357 int order, added = 0, lg_prealloc_count = 1;
4358 struct super_block *sb = ac->ac_sb;
4359 struct ext4_locality_group *lg = ac->ac_lg;
4360 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4361
4362 order = fls(pa->pa_free) - 1;
4363 if (order > PREALLOC_TB_SIZE - 1)
4364 /* The max size of hash table is PREALLOC_TB_SIZE */
4365 order = PREALLOC_TB_SIZE - 1;
4366 /* Add the prealloc space to lg */
4367 rcu_read_lock();
4368 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4369 pa_inode_list) {
4370 spin_lock(&tmp_pa->pa_lock);
4371 if (tmp_pa->pa_deleted) {
e7c9e3e9 4372 spin_unlock(&tmp_pa->pa_lock);
6be2ded1
AK
4373 continue;
4374 }
4375 if (!added && pa->pa_free < tmp_pa->pa_free) {
4376 /* Add to the tail of the previous entry */
4377 list_add_tail_rcu(&pa->pa_inode_list,
4378 &tmp_pa->pa_inode_list);
4379 added = 1;
4380 /*
4381 * we want to count the total
4382 * number of entries in the list
4383 */
4384 }
4385 spin_unlock(&tmp_pa->pa_lock);
4386 lg_prealloc_count++;
4387 }
4388 if (!added)
4389 list_add_tail_rcu(&pa->pa_inode_list,
4390 &lg->lg_prealloc_list[order]);
4391 rcu_read_unlock();
4392
4393 /* Now trim the list to be not more than 8 elements */
4394 if (lg_prealloc_count > 8) {
4395 ext4_mb_discard_lg_preallocations(sb, lg,
4396 order, lg_prealloc_count);
4397 return;
4398 }
4399 return ;
4400}
4401
c9de560d
AT
4402/*
4403 * release all resource we used in allocation
4404 */
4405static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4406{
6be2ded1
AK
4407 struct ext4_prealloc_space *pa = ac->ac_pa;
4408 if (pa) {
cc0fb9ad 4409 if (pa->pa_type == MB_GROUP_PA) {
c9de560d 4410 /* see comment in ext4_mb_use_group_pa() */
6be2ded1
AK
4411 spin_lock(&pa->pa_lock);
4412 pa->pa_pstart += ac->ac_b_ex.fe_len;
4413 pa->pa_lstart += ac->ac_b_ex.fe_len;
4414 pa->pa_free -= ac->ac_b_ex.fe_len;
4415 pa->pa_len -= ac->ac_b_ex.fe_len;
4416 spin_unlock(&pa->pa_lock);
c9de560d 4417 }
c9de560d 4418 }
8556e8f3
AK
4419 if (ac->alloc_semp)
4420 up_read(ac->alloc_semp);
ba443916
AK
4421 if (pa) {
4422 /*
4423 * We want to add the pa to the right bucket.
4424 * Remove it from the list and while adding
4425 * make sure the list to which we are adding
4426 * doesn't grow big. We need to release
4427 * alloc_semp before calling ext4_mb_add_n_trim()
4428 */
cc0fb9ad 4429 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
ba443916
AK
4430 spin_lock(pa->pa_obj_lock);
4431 list_del_rcu(&pa->pa_inode_list);
4432 spin_unlock(pa->pa_obj_lock);
4433 ext4_mb_add_n_trim(ac);
4434 }
4435 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4436 }
c9de560d
AT
4437 if (ac->ac_bitmap_page)
4438 page_cache_release(ac->ac_bitmap_page);
4439 if (ac->ac_buddy_page)
4440 page_cache_release(ac->ac_buddy_page);
4441 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4442 mutex_unlock(&ac->ac_lg->lg_mutex);
4443 ext4_mb_collect_stats(ac);
4444 return 0;
4445}
4446
4447static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4448{
8df9675f 4449 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
c9de560d
AT
4450 int ret;
4451 int freed = 0;
4452
ba80b101
TT
4453 trace_mark(ext4_mb_discard_preallocations, "dev %s needed %d",
4454 sb->s_id, needed);
8df9675f 4455 for (i = 0; i < ngroups && needed > 0; i++) {
c9de560d
AT
4456 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4457 freed += ret;
4458 needed -= ret;
4459 }
4460
4461 return freed;
4462}
4463
4464/*
4465 * Main entry point into mballoc to allocate blocks
4466 * it tries to use preallocation first, then falls back
4467 * to usual allocation
4468 */
4469ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4470 struct ext4_allocation_request *ar, int *errp)
4471{
6bc6e63f 4472 int freed;
256bdb49 4473 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4474 struct ext4_sb_info *sbi;
4475 struct super_block *sb;
4476 ext4_fsblk_t block = 0;
60e58e0f 4477 unsigned int inquota = 0;
498e5f24 4478 unsigned int reserv_blks = 0;
c9de560d
AT
4479
4480 sb = ar->inode->i_sb;
4481 sbi = EXT4_SB(sb);
4482
ba80b101
TT
4483 trace_mark(ext4_request_blocks, "dev %s flags %u len %u ino %lu "
4484 "lblk %llu goal %llu lleft %llu lright %llu "
4485 "pleft %llu pright %llu ",
4486 sb->s_id, ar->flags, ar->len,
4487 ar->inode ? ar->inode->i_ino : 0,
4488 (unsigned long long) ar->logical,
4489 (unsigned long long) ar->goal,
4490 (unsigned long long) ar->lleft,
4491 (unsigned long long) ar->lright,
4492 (unsigned long long) ar->pleft,
4493 (unsigned long long) ar->pright);
4494
60e58e0f
MC
4495 /*
4496 * For delayed allocation, we could skip the ENOSPC and
4497 * EDQUOT check, as blocks and quotas have been already
4498 * reserved when data being copied into pagecache.
4499 */
4500 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4501 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4502 else {
4503 /* Without delayed allocation we need to verify
4504 * there is enough free blocks to do block allocation
4505 * and verify allocation doesn't exceed the quota limits.
d2a17637 4506 */
030ba6bc
AK
4507 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4508 /* let others to free the space */
4509 yield();
4510 ar->len = ar->len >> 1;
4511 }
4512 if (!ar->len) {
a30d542a
AK
4513 *errp = -ENOSPC;
4514 return 0;
4515 }
6bc6e63f 4516 reserv_blks = ar->len;
a269eb18 4517 while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
60e58e0f
MC
4518 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4519 ar->len--;
4520 }
4521 inquota = ar->len;
4522 if (ar->len == 0) {
4523 *errp = -EDQUOT;
4524 goto out3;
4525 }
07031431 4526 }
d2a17637 4527
256bdb49
ES
4528 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4529 if (!ac) {
363d4251 4530 ar->len = 0;
256bdb49 4531 *errp = -ENOMEM;
363d4251 4532 goto out1;
256bdb49
ES
4533 }
4534
256bdb49 4535 *errp = ext4_mb_initialize_context(ac, ar);
c9de560d
AT
4536 if (*errp) {
4537 ar->len = 0;
363d4251 4538 goto out2;
c9de560d
AT
4539 }
4540
256bdb49
ES
4541 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4542 if (!ext4_mb_use_preallocated(ac)) {
256bdb49
ES
4543 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4544 ext4_mb_normalize_request(ac, ar);
c9de560d
AT
4545repeat:
4546 /* allocate space in core */
256bdb49 4547 ext4_mb_regular_allocator(ac);
c9de560d
AT
4548
4549 /* as we've just preallocated more space than
4550 * user requested orinally, we store allocated
4551 * space in a special descriptor */
256bdb49
ES
4552 if (ac->ac_status == AC_STATUS_FOUND &&
4553 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4554 ext4_mb_new_preallocation(ac);
c9de560d 4555 }
256bdb49 4556 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6bc6e63f 4557 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
519deca0 4558 if (*errp == -EAGAIN) {
8556e8f3
AK
4559 /*
4560 * drop the reference that we took
4561 * in ext4_mb_use_best_found
4562 */
4563 ext4_mb_release_context(ac);
519deca0
AK
4564 ac->ac_b_ex.fe_group = 0;
4565 ac->ac_b_ex.fe_start = 0;
4566 ac->ac_b_ex.fe_len = 0;
4567 ac->ac_status = AC_STATUS_CONTINUE;
4568 goto repeat;
4569 } else if (*errp) {
4570 ac->ac_b_ex.fe_len = 0;
4571 ar->len = 0;
4572 ext4_mb_show_ac(ac);
4573 } else {
4574 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4575 ar->len = ac->ac_b_ex.fe_len;
4576 }
c9de560d 4577 } else {
256bdb49 4578 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
c9de560d
AT
4579 if (freed)
4580 goto repeat;
4581 *errp = -ENOSPC;
256bdb49 4582 ac->ac_b_ex.fe_len = 0;
c9de560d 4583 ar->len = 0;
256bdb49 4584 ext4_mb_show_ac(ac);
c9de560d
AT
4585 }
4586
256bdb49 4587 ext4_mb_release_context(ac);
c9de560d 4588
363d4251
SF
4589out2:
4590 kmem_cache_free(ext4_ac_cachep, ac);
4591out1:
60e58e0f 4592 if (inquota && ar->len < inquota)
a269eb18 4593 vfs_dq_free_block(ar->inode, inquota - ar->len);
0087d9fb
AK
4594out3:
4595 if (!ar->len) {
4596 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4597 /* release all the reserved blocks if non delalloc */
4598 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4599 reserv_blks);
4600 }
c9de560d 4601
ba80b101
TT
4602 trace_mark(ext4_allocate_blocks,
4603 "dev %s block %llu flags %u len %u ino %lu "
4604 "logical %llu goal %llu lleft %llu lright %llu "
4605 "pleft %llu pright %llu ",
4606 sb->s_id, (unsigned long long) block,
4607 ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0,
4608 (unsigned long long) ar->logical,
4609 (unsigned long long) ar->goal,
4610 (unsigned long long) ar->lleft,
4611 (unsigned long long) ar->lright,
4612 (unsigned long long) ar->pleft,
4613 (unsigned long long) ar->pright);
4614
c9de560d
AT
4615 return block;
4616}
c9de560d 4617
c894058d
AK
4618/*
4619 * We can merge two free data extents only if the physical blocks
4620 * are contiguous, AND the extents were freed by the same transaction,
4621 * AND the blocks are associated with the same group.
4622 */
4623static int can_merge(struct ext4_free_data *entry1,
4624 struct ext4_free_data *entry2)
4625{
4626 if ((entry1->t_tid == entry2->t_tid) &&
4627 (entry1->group == entry2->group) &&
4628 ((entry1->start_blk + entry1->count) == entry2->start_blk))
4629 return 1;
4630 return 0;
4631}
4632
4ddfef7b
ES
4633static noinline_for_stack int
4634ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
7a2fcbf7 4635 struct ext4_free_data *new_entry)
c9de560d 4636{
7a2fcbf7
AK
4637 ext4_grpblk_t block;
4638 struct ext4_free_data *entry;
c9de560d
AT
4639 struct ext4_group_info *db = e4b->bd_info;
4640 struct super_block *sb = e4b->bd_sb;
4641 struct ext4_sb_info *sbi = EXT4_SB(sb);
c894058d
AK
4642 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4643 struct rb_node *parent = NULL, *new_node;
4644
0390131b 4645 BUG_ON(!ext4_handle_valid(handle));
c9de560d
AT
4646 BUG_ON(e4b->bd_bitmap_page == NULL);
4647 BUG_ON(e4b->bd_buddy_page == NULL);
4648
c894058d 4649 new_node = &new_entry->node;
7a2fcbf7 4650 block = new_entry->start_blk;
c894058d 4651
c894058d
AK
4652 if (!*n) {
4653 /* first free block exent. We need to
4654 protect buddy cache from being freed,
4655 * otherwise we'll refresh it from
4656 * on-disk bitmap and lose not-yet-available
4657 * blocks */
4658 page_cache_get(e4b->bd_buddy_page);
4659 page_cache_get(e4b->bd_bitmap_page);
4660 }
4661 while (*n) {
4662 parent = *n;
4663 entry = rb_entry(parent, struct ext4_free_data, node);
4664 if (block < entry->start_blk)
4665 n = &(*n)->rb_left;
4666 else if (block >= (entry->start_blk + entry->count))
4667 n = &(*n)->rb_right;
4668 else {
5d1b1b3f
AK
4669 ext4_grp_locked_error(sb, e4b->bd_group, __func__,
4670 "Double free of blocks %d (%d %d)",
4671 block, entry->start_blk, entry->count);
c894058d 4672 return 0;
c9de560d 4673 }
c894058d 4674 }
c9de560d 4675
c894058d
AK
4676 rb_link_node(new_node, parent, n);
4677 rb_insert_color(new_node, &db->bb_free_root);
4678
4679 /* Now try to see the extent can be merged to left and right */
4680 node = rb_prev(new_node);
4681 if (node) {
4682 entry = rb_entry(node, struct ext4_free_data, node);
4683 if (can_merge(entry, new_entry)) {
4684 new_entry->start_blk = entry->start_blk;
4685 new_entry->count += entry->count;
4686 rb_erase(node, &(db->bb_free_root));
4687 spin_lock(&sbi->s_md_lock);
4688 list_del(&entry->list);
4689 spin_unlock(&sbi->s_md_lock);
4690 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d 4691 }
c894058d 4692 }
c9de560d 4693
c894058d
AK
4694 node = rb_next(new_node);
4695 if (node) {
4696 entry = rb_entry(node, struct ext4_free_data, node);
4697 if (can_merge(new_entry, entry)) {
4698 new_entry->count += entry->count;
4699 rb_erase(node, &(db->bb_free_root));
4700 spin_lock(&sbi->s_md_lock);
4701 list_del(&entry->list);
4702 spin_unlock(&sbi->s_md_lock);
4703 kmem_cache_free(ext4_free_ext_cachep, entry);
c9de560d
AT
4704 }
4705 }
3e624fc7 4706 /* Add the extent to transaction's private list */
c894058d 4707 spin_lock(&sbi->s_md_lock);
3e624fc7 4708 list_add(&new_entry->list, &handle->h_transaction->t_private_list);
c894058d 4709 spin_unlock(&sbi->s_md_lock);
c9de560d
AT
4710 return 0;
4711}
4712
4713/*
4714 * Main entry point into mballoc to free blocks
4715 */
4716void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4717 unsigned long block, unsigned long count,
4718 int metadata, unsigned long *freed)
4719{
26346ff6 4720 struct buffer_head *bitmap_bh = NULL;
c9de560d 4721 struct super_block *sb = inode->i_sb;
256bdb49 4722 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4723 struct ext4_group_desc *gdp;
4724 struct ext4_super_block *es;
498e5f24 4725 unsigned int overflow;
c9de560d
AT
4726 ext4_grpblk_t bit;
4727 struct buffer_head *gd_bh;
4728 ext4_group_t block_group;
4729 struct ext4_sb_info *sbi;
4730 struct ext4_buddy e4b;
4731 int err = 0;
4732 int ret;
4733
4734 *freed = 0;
4735
c9de560d
AT
4736 sbi = EXT4_SB(sb);
4737 es = EXT4_SB(sb)->s_es;
4738 if (block < le32_to_cpu(es->s_first_data_block) ||
4739 block + count < block ||
4740 block + count > ext4_blocks_count(es)) {
46e665e9 4741 ext4_error(sb, __func__,
c9de560d
AT
4742 "Freeing blocks not in datazone - "
4743 "block = %lu, count = %lu", block, count);
4744 goto error_return;
4745 }
4746
4747 ext4_debug("freeing block %lu\n", block);
ba80b101
TT
4748 trace_mark(ext4_free_blocks,
4749 "dev %s block %llu count %lu metadata %d ino %lu",
4750 sb->s_id, (unsigned long long) block, count, metadata,
4751 inode ? inode->i_ino : 0);
c9de560d 4752
256bdb49
ES
4753 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4754 if (ac) {
4755 ac->ac_op = EXT4_MB_HISTORY_FREE;
4756 ac->ac_inode = inode;
4757 ac->ac_sb = sb;
4758 }
c9de560d
AT
4759
4760do_more:
4761 overflow = 0;
4762 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4763
4764 /*
4765 * Check to see if we are freeing blocks across a group
4766 * boundary.
4767 */
4768 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4769 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4770 count -= overflow;
4771 }
574ca174 4772 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
ce89f46c
AK
4773 if (!bitmap_bh) {
4774 err = -EIO;
c9de560d 4775 goto error_return;
ce89f46c 4776 }
c9de560d 4777 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
ce89f46c
AK
4778 if (!gdp) {
4779 err = -EIO;
c9de560d 4780 goto error_return;
ce89f46c 4781 }
c9de560d
AT
4782
4783 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4784 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4785 in_range(block, ext4_inode_table(sb, gdp),
4786 EXT4_SB(sb)->s_itb_per_group) ||
4787 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4788 EXT4_SB(sb)->s_itb_per_group)) {
4789
46e665e9 4790 ext4_error(sb, __func__,
c9de560d
AT
4791 "Freeing blocks in system zone - "
4792 "Block = %lu, count = %lu", block, count);
519deca0
AK
4793 /* err = 0. ext4_std_error should be a no op */
4794 goto error_return;
c9de560d
AT
4795 }
4796
4797 BUFFER_TRACE(bitmap_bh, "getting write access");
4798 err = ext4_journal_get_write_access(handle, bitmap_bh);
4799 if (err)
4800 goto error_return;
4801
4802 /*
4803 * We are about to modify some metadata. Call the journal APIs
4804 * to unshare ->b_data if a currently-committing transaction is
4805 * using it
4806 */
4807 BUFFER_TRACE(gd_bh, "get_write_access");
4808 err = ext4_journal_get_write_access(handle, gd_bh);
4809 if (err)
4810 goto error_return;
c9de560d
AT
4811#ifdef AGGRESSIVE_CHECK
4812 {
4813 int i;
4814 for (i = 0; i < count; i++)
4815 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4816 }
4817#endif
256bdb49
ES
4818 if (ac) {
4819 ac->ac_b_ex.fe_group = block_group;
4820 ac->ac_b_ex.fe_start = bit;
4821 ac->ac_b_ex.fe_len = count;
4822 ext4_mb_store_history(ac);
4823 }
c9de560d 4824
920313a7
AK
4825 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4826 if (err)
4827 goto error_return;
0390131b 4828 if (metadata && ext4_handle_valid(handle)) {
7a2fcbf7
AK
4829 struct ext4_free_data *new_entry;
4830 /*
4831 * blocks being freed are metadata. these blocks shouldn't
4832 * be used until this transaction is committed
4833 */
4834 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4835 new_entry->start_blk = bit;
4836 new_entry->group = block_group;
4837 new_entry->count = count;
4838 new_entry->t_tid = handle->h_transaction->t_tid;
955ce5f5 4839
7a2fcbf7 4840 ext4_lock_group(sb, block_group);
955ce5f5 4841 mb_clear_bits(bitmap_bh->b_data, bit, count);
7a2fcbf7 4842 ext4_mb_free_metadata(handle, &e4b, new_entry);
c9de560d 4843 } else {
7a2fcbf7
AK
4844 /* need to update group_info->bb_free and bitmap
4845 * with group lock held. generate_buddy look at
4846 * them with group lock_held
4847 */
955ce5f5
AK
4848 ext4_lock_group(sb, block_group);
4849 mb_clear_bits(bitmap_bh->b_data, bit, count);
7e5a8cdd 4850 mb_free_blocks(inode, &e4b, bit, count);
c9de560d 4851 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
c9de560d
AT
4852 }
4853
560671a0
AK
4854 ret = ext4_free_blks_count(sb, gdp) + count;
4855 ext4_free_blks_set(sb, gdp, ret);
c9de560d 4856 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
955ce5f5 4857 ext4_unlock_group(sb, block_group);
c9de560d
AT
4858 percpu_counter_add(&sbi->s_freeblocks_counter, count);
4859
772cb7c8
JS
4860 if (sbi->s_log_groups_per_flex) {
4861 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
9f24e420 4862 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
772cb7c8
JS
4863 }
4864
c9de560d
AT
4865 ext4_mb_release_desc(&e4b);
4866
4867 *freed += count;
4868
7a2fcbf7
AK
4869 /* We dirtied the bitmap block */
4870 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4871 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4872
c9de560d
AT
4873 /* And the group descriptor block */
4874 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
0390131b 4875 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
c9de560d
AT
4876 if (!err)
4877 err = ret;
4878
4879 if (overflow && !err) {
4880 block += count;
4881 count = overflow;
4882 put_bh(bitmap_bh);
4883 goto do_more;
4884 }
4885 sb->s_dirt = 1;
4886error_return:
4887 brelse(bitmap_bh);
4888 ext4_std_error(sb, err);
256bdb49
ES
4889 if (ac)
4890 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
4891 return;
4892}