]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ext4/mballoc.c
afs: use non-racy method for proc entries creation
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / mballoc.c
CommitLineData
c9de560d
AT
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
24#include <linux/time.h>
25#include <linux/fs.h>
26#include <linux/namei.h>
27#include <linux/ext4_jbd2.h>
28#include <linux/ext4_fs.h>
29#include <linux/quotaops.h>
30#include <linux/buffer_head.h>
31#include <linux/module.h>
32#include <linux/swap.h>
33#include <linux/proc_fs.h>
34#include <linux/pagemap.h>
35#include <linux/seq_file.h>
36#include <linux/version.h>
37#include "group.h"
38
39/*
40 * MUSTDO:
41 * - test ext4_ext_search_left() and ext4_ext_search_right()
42 * - search for metadata in few groups
43 *
44 * TODO v4:
45 * - normalization should take into account whether file is still open
46 * - discard preallocations if no free space left (policy?)
47 * - don't normalize tails
48 * - quota
49 * - reservation for superuser
50 *
51 * TODO v3:
52 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
53 * - track min/max extents in each group for better group selection
54 * - mb_mark_used() may allocate chunk right after splitting buddy
55 * - tree of groups sorted by number of free blocks
56 * - error handling
57 */
58
59/*
60 * The allocation request involve request for multiple number of blocks
61 * near to the goal(block) value specified.
62 *
63 * During initialization phase of the allocator we decide to use the group
64 * preallocation or inode preallocation depending on the size file. The
65 * size of the file could be the resulting file size we would have after
66 * allocation or the current file size which ever is larger. If the size is
67 * less that sbi->s_mb_stream_request we select the group
68 * preallocation. The default value of s_mb_stream_request is 16
69 * blocks. This can also be tuned via
70 * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
71 * of number of blocks.
72 *
73 * The main motivation for having small file use group preallocation is to
74 * ensure that we have small file closer in the disk.
75 *
76 * First stage the allocator looks at the inode prealloc list
77 * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
78 * this particular inode. The inode prealloc space is represented as:
79 *
80 * pa_lstart -> the logical start block for this prealloc space
81 * pa_pstart -> the physical start block for this prealloc space
82 * pa_len -> lenght for this prealloc space
83 * pa_free -> free space available in this prealloc space
84 *
85 * The inode preallocation space is used looking at the _logical_ start
86 * block. If only the logical file block falls within the range of prealloc
87 * space we will consume the particular prealloc space. This make sure that
88 * that the we have contiguous physical blocks representing the file blocks
89 *
90 * The important thing to be noted in case of inode prealloc space is that
91 * we don't modify the values associated to inode prealloc space except
92 * pa_free.
93 *
94 * If we are not able to find blocks in the inode prealloc space and if we
95 * have the group allocation flag set then we look at the locality group
96 * prealloc space. These are per CPU prealloc list repreasented as
97 *
98 * ext4_sb_info.s_locality_groups[smp_processor_id()]
99 *
100 * The reason for having a per cpu locality group is to reduce the contention
101 * between CPUs. It is possible to get scheduled at this point.
102 *
103 * The locality group prealloc space is used looking at whether we have
104 * enough free space (pa_free) withing the prealloc space.
105 *
106 * If we can't allocate blocks via inode prealloc or/and locality group
107 * prealloc then we look at the buddy cache. The buddy cache is represented
108 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
109 * mapped to the buddy and bitmap information regarding different
110 * groups. The buddy information is attached to buddy cache inode so that
111 * we can access them through the page cache. The information regarding
112 * each group is loaded via ext4_mb_load_buddy. The information involve
113 * block bitmap and buddy information. The information are stored in the
114 * inode as:
115 *
116 * { page }
117 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
118 *
119 *
120 * one block each for bitmap and buddy information. So for each group we
121 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
122 * blocksize) blocks. So it can have information regarding groups_per_page
123 * which is blocks_per_page/2
124 *
125 * The buddy cache inode is not stored on disk. The inode is thrown
126 * away when the filesystem is unmounted.
127 *
128 * We look for count number of blocks in the buddy cache. If we were able
129 * to locate that many free blocks we return with additional information
130 * regarding rest of the contiguous physical block available
131 *
132 * Before allocating blocks via buddy cache we normalize the request
133 * blocks. This ensure we ask for more blocks that we needed. The extra
134 * blocks that we get after allocation is added to the respective prealloc
135 * list. In case of inode preallocation we follow a list of heuristics
136 * based on file size. This can be found in ext4_mb_normalize_request. If
137 * we are doing a group prealloc we try to normalize the request to
138 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
139 * 512 blocks. This can be tuned via
140 * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
141 * terms of number of blocks. If we have mounted the file system with -O
142 * stripe=<value> option the group prealloc request is normalized to the
143 * stripe value (sbi->s_stripe)
144 *
145 * The regular allocator(using the buddy cache) support few tunables.
146 *
147 * /proc/fs/ext4/<partition>/min_to_scan
148 * /proc/fs/ext4/<partition>/max_to_scan
149 * /proc/fs/ext4/<partition>/order2_req
150 *
151 * The regular allocator use buddy scan only if the request len is power of
152 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
153 * value of s_mb_order2_reqs can be tuned via
154 * /proc/fs/ext4/<partition>/order2_req. If the request len is equal to
155 * stripe size (sbi->s_stripe), we try to search for contigous block in
156 * stripe size. This should result in better allocation on RAID setup. If
157 * not we search in the specific group using bitmap for best extents. The
158 * tunable min_to_scan and max_to_scan controll the behaviour here.
159 * min_to_scan indicate how long the mballoc __must__ look for a best
160 * extent and max_to_scanindicate how long the mballoc __can__ look for a
161 * best extent in the found extents. Searching for the blocks starts with
162 * the group specified as the goal value in allocation context via
163 * ac_g_ex. Each group is first checked based on the criteria whether it
164 * can used for allocation. ext4_mb_good_group explains how the groups are
165 * checked.
166 *
167 * Both the prealloc space are getting populated as above. So for the first
168 * request we will hit the buddy cache which will result in this prealloc
169 * space getting filled. The prealloc space is then later used for the
170 * subsequent request.
171 */
172
173/*
174 * mballoc operates on the following data:
175 * - on-disk bitmap
176 * - in-core buddy (actually includes buddy and bitmap)
177 * - preallocation descriptors (PAs)
178 *
179 * there are two types of preallocations:
180 * - inode
181 * assiged to specific inode and can be used for this inode only.
182 * it describes part of inode's space preallocated to specific
183 * physical blocks. any block from that preallocated can be used
184 * independent. the descriptor just tracks number of blocks left
185 * unused. so, before taking some block from descriptor, one must
186 * make sure corresponded logical block isn't allocated yet. this
187 * also means that freeing any block within descriptor's range
188 * must discard all preallocated blocks.
189 * - locality group
190 * assigned to specific locality group which does not translate to
191 * permanent set of inodes: inode can join and leave group. space
192 * from this type of preallocation can be used for any inode. thus
193 * it's consumed from the beginning to the end.
194 *
195 * relation between them can be expressed as:
196 * in-core buddy = on-disk bitmap + preallocation descriptors
197 *
198 * this mean blocks mballoc considers used are:
199 * - allocated blocks (persistent)
200 * - preallocated blocks (non-persistent)
201 *
202 * consistency in mballoc world means that at any time a block is either
203 * free or used in ALL structures. notice: "any time" should not be read
204 * literally -- time is discrete and delimited by locks.
205 *
206 * to keep it simple, we don't use block numbers, instead we count number of
207 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
208 *
209 * all operations can be expressed as:
210 * - init buddy: buddy = on-disk + PAs
211 * - new PA: buddy += N; PA = N
212 * - use inode PA: on-disk += N; PA -= N
213 * - discard inode PA buddy -= on-disk - PA; PA = 0
214 * - use locality group PA on-disk += N; PA -= N
215 * - discard locality group PA buddy -= PA; PA = 0
216 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
217 * is used in real operation because we can't know actual used
218 * bits from PA, only from on-disk bitmap
219 *
220 * if we follow this strict logic, then all operations above should be atomic.
221 * given some of them can block, we'd have to use something like semaphores
222 * killing performance on high-end SMP hardware. let's try to relax it using
223 * the following knowledge:
224 * 1) if buddy is referenced, it's already initialized
225 * 2) while block is used in buddy and the buddy is referenced,
226 * nobody can re-allocate that block
227 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
228 * bit set and PA claims same block, it's OK. IOW, one can set bit in
229 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
230 * block
231 *
232 * so, now we're building a concurrency table:
233 * - init buddy vs.
234 * - new PA
235 * blocks for PA are allocated in the buddy, buddy must be referenced
236 * until PA is linked to allocation group to avoid concurrent buddy init
237 * - use inode PA
238 * we need to make sure that either on-disk bitmap or PA has uptodate data
239 * given (3) we care that PA-=N operation doesn't interfere with init
240 * - discard inode PA
241 * the simplest way would be to have buddy initialized by the discard
242 * - use locality group PA
243 * again PA-=N must be serialized with init
244 * - discard locality group PA
245 * the simplest way would be to have buddy initialized by the discard
246 * - new PA vs.
247 * - use inode PA
248 * i_data_sem serializes them
249 * - discard inode PA
250 * discard process must wait until PA isn't used by another process
251 * - use locality group PA
252 * some mutex should serialize them
253 * - discard locality group PA
254 * discard process must wait until PA isn't used by another process
255 * - use inode PA
256 * - use inode PA
257 * i_data_sem or another mutex should serializes them
258 * - discard inode PA
259 * discard process must wait until PA isn't used by another process
260 * - use locality group PA
261 * nothing wrong here -- they're different PAs covering different blocks
262 * - discard locality group PA
263 * discard process must wait until PA isn't used by another process
264 *
265 * now we're ready to make few consequences:
266 * - PA is referenced and while it is no discard is possible
267 * - PA is referenced until block isn't marked in on-disk bitmap
268 * - PA changes only after on-disk bitmap
269 * - discard must not compete with init. either init is done before
270 * any discard or they're serialized somehow
271 * - buddy init as sum of on-disk bitmap and PAs is done atomically
272 *
273 * a special case when we've used PA to emptiness. no need to modify buddy
274 * in this case, but we should care about concurrent init
275 *
276 */
277
278 /*
279 * Logic in few words:
280 *
281 * - allocation:
282 * load group
283 * find blocks
284 * mark bits in on-disk bitmap
285 * release group
286 *
287 * - use preallocation:
288 * find proper PA (per-inode or group)
289 * load group
290 * mark bits in on-disk bitmap
291 * release group
292 * release PA
293 *
294 * - free:
295 * load group
296 * mark bits in on-disk bitmap
297 * release group
298 *
299 * - discard preallocations in group:
300 * mark PAs deleted
301 * move them onto local list
302 * load on-disk bitmap
303 * load group
304 * remove PA from object (inode or locality group)
305 * mark free blocks in-core
306 *
307 * - discard inode's preallocations:
308 */
309
310/*
311 * Locking rules
312 *
313 * Locks:
314 * - bitlock on a group (group)
315 * - object (inode/locality) (object)
316 * - per-pa lock (pa)
317 *
318 * Paths:
319 * - new pa
320 * object
321 * group
322 *
323 * - find and use pa:
324 * pa
325 *
326 * - release consumed pa:
327 * pa
328 * group
329 * object
330 *
331 * - generate in-core bitmap:
332 * group
333 * pa
334 *
335 * - discard all for given object (inode, locality group):
336 * object
337 * pa
338 * group
339 *
340 * - discard all for given group:
341 * group
342 * pa
343 * group
344 * object
345 *
346 */
347
348/*
349 * with AGGRESSIVE_CHECK allocator runs consistency checks over
350 * structures. these checks slow things down a lot
351 */
352#define AGGRESSIVE_CHECK__
353
354/*
355 * with DOUBLE_CHECK defined mballoc creates persistent in-core
356 * bitmaps, maintains and uses them to check for double allocations
357 */
358#define DOUBLE_CHECK__
359
360/*
361 */
362#define MB_DEBUG__
363#ifdef MB_DEBUG
364#define mb_debug(fmt, a...) printk(fmt, ##a)
365#else
366#define mb_debug(fmt, a...)
367#endif
368
369/*
370 * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
371 * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
372 */
373#define EXT4_MB_HISTORY
374#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
375#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
376#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
377#define EXT4_MB_HISTORY_FREE 8 /* free */
378
379#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
380 EXT4_MB_HISTORY_PREALLOC)
381
382/*
383 * How long mballoc can look for a best extent (in found extents)
384 */
385#define MB_DEFAULT_MAX_TO_SCAN 200
386
387/*
388 * How long mballoc must look for a best extent
389 */
390#define MB_DEFAULT_MIN_TO_SCAN 10
391
392/*
393 * How many groups mballoc will scan looking for the best chunk
394 */
395#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
396
397/*
398 * with 'ext4_mb_stats' allocator will collect stats that will be
399 * shown at umount. The collecting costs though!
400 */
401#define MB_DEFAULT_STATS 1
402
403/*
404 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
405 * by the stream allocator, which purpose is to pack requests
406 * as close each to other as possible to produce smooth I/O traffic
407 * We use locality group prealloc space for stream request.
408 * We can tune the same via /proc/fs/ext4/<parition>/stream_req
409 */
410#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
411
412/*
413 * for which requests use 2^N search using buddies
414 */
415#define MB_DEFAULT_ORDER2_REQS 2
416
417/*
418 * default group prealloc size 512 blocks
419 */
420#define MB_DEFAULT_GROUP_PREALLOC 512
421
422static struct kmem_cache *ext4_pspace_cachep;
256bdb49 423static struct kmem_cache *ext4_ac_cachep;
c9de560d
AT
424
425#ifdef EXT4_BB_MAX_BLOCKS
426#undef EXT4_BB_MAX_BLOCKS
427#endif
428#define EXT4_BB_MAX_BLOCKS 30
429
430struct ext4_free_metadata {
431 ext4_group_t group;
432 unsigned short num;
433 ext4_grpblk_t blocks[EXT4_BB_MAX_BLOCKS];
434 struct list_head list;
435};
436
437struct ext4_group_info {
438 unsigned long bb_state;
439 unsigned long bb_tid;
440 struct ext4_free_metadata *bb_md_cur;
441 unsigned short bb_first_free;
442 unsigned short bb_free;
443 unsigned short bb_fragments;
444 struct list_head bb_prealloc_list;
445#ifdef DOUBLE_CHECK
446 void *bb_bitmap;
447#endif
448 unsigned short bb_counters[];
449};
450
451#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
452#define EXT4_GROUP_INFO_LOCKED_BIT 1
453
454#define EXT4_MB_GRP_NEED_INIT(grp) \
455 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
456
457
458struct ext4_prealloc_space {
459 struct list_head pa_inode_list;
460 struct list_head pa_group_list;
461 union {
462 struct list_head pa_tmp_list;
463 struct rcu_head pa_rcu;
464 } u;
465 spinlock_t pa_lock;
466 atomic_t pa_count;
467 unsigned pa_deleted;
468 ext4_fsblk_t pa_pstart; /* phys. block */
469 ext4_lblk_t pa_lstart; /* log. block */
470 unsigned short pa_len; /* len of preallocated chunk */
471 unsigned short pa_free; /* how many blocks are free */
472 unsigned short pa_linear; /* consumed in one direction
473 * strictly, for grp prealloc */
474 spinlock_t *pa_obj_lock;
475 struct inode *pa_inode; /* hack, for history only */
476};
477
478
479struct ext4_free_extent {
480 ext4_lblk_t fe_logical;
481 ext4_grpblk_t fe_start;
482 ext4_group_t fe_group;
483 int fe_len;
484};
485
486/*
487 * Locality group:
488 * we try to group all related changes together
489 * so that writeback can flush/allocate them together as well
490 */
491struct ext4_locality_group {
492 /* for allocator */
493 struct mutex lg_mutex; /* to serialize allocates */
494 struct list_head lg_prealloc_list;/* list of preallocations */
495 spinlock_t lg_prealloc_lock;
496};
497
498struct ext4_allocation_context {
499 struct inode *ac_inode;
500 struct super_block *ac_sb;
501
502 /* original request */
503 struct ext4_free_extent ac_o_ex;
504
505 /* goal request (after normalization) */
506 struct ext4_free_extent ac_g_ex;
507
508 /* the best found extent */
509 struct ext4_free_extent ac_b_ex;
510
511 /* copy of the bext found extent taken before preallocation efforts */
512 struct ext4_free_extent ac_f_ex;
513
514 /* number of iterations done. we have to track to limit searching */
515 unsigned long ac_ex_scanned;
516 __u16 ac_groups_scanned;
517 __u16 ac_found;
518 __u16 ac_tail;
519 __u16 ac_buddy;
520 __u16 ac_flags; /* allocation hints */
521 __u8 ac_status;
522 __u8 ac_criteria;
523 __u8 ac_repeats;
524 __u8 ac_2order; /* if request is to allocate 2^N blocks and
525 * N > 0, the field stores N, otherwise 0 */
526 __u8 ac_op; /* operation, for history only */
527 struct page *ac_bitmap_page;
528 struct page *ac_buddy_page;
529 struct ext4_prealloc_space *ac_pa;
530 struct ext4_locality_group *ac_lg;
531};
532
533#define AC_STATUS_CONTINUE 1
534#define AC_STATUS_FOUND 2
535#define AC_STATUS_BREAK 3
536
537struct ext4_mb_history {
538 struct ext4_free_extent orig; /* orig allocation */
539 struct ext4_free_extent goal; /* goal allocation */
540 struct ext4_free_extent result; /* result allocation */
541 unsigned pid;
542 unsigned ino;
543 __u16 found; /* how many extents have been found */
544 __u16 groups; /* how many groups have been scanned */
545 __u16 tail; /* what tail broke some buddy */
546 __u16 buddy; /* buddy the tail ^^^ broke */
547 __u16 flags;
548 __u8 cr:3; /* which phase the result extent was found at */
549 __u8 op:4;
550 __u8 merged:1;
551};
552
553struct ext4_buddy {
554 struct page *bd_buddy_page;
555 void *bd_buddy;
556 struct page *bd_bitmap_page;
557 void *bd_bitmap;
558 struct ext4_group_info *bd_info;
559 struct super_block *bd_sb;
560 __u16 bd_blkbits;
561 ext4_group_t bd_group;
562};
563#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
564#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
565
566#ifndef EXT4_MB_HISTORY
567static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
568{
569 return;
570}
571#else
572static void ext4_mb_store_history(struct ext4_allocation_context *ac);
573#endif
574
575#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
576
577static struct proc_dir_entry *proc_root_ext4;
578struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
579ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
580 ext4_fsblk_t goal, unsigned long *count, int *errp);
581
582static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
583 ext4_group_t group);
584static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
585static void ext4_mb_free_committed_blocks(struct super_block *);
586static void ext4_mb_return_to_preallocation(struct inode *inode,
587 struct ext4_buddy *e4b, sector_t block,
588 int count);
589static void ext4_mb_put_pa(struct ext4_allocation_context *,
590 struct super_block *, struct ext4_prealloc_space *pa);
591static int ext4_mb_init_per_dev_proc(struct super_block *sb);
592static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
593
594
595static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
596{
597 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
598
599 bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
600}
601
602static inline void ext4_unlock_group(struct super_block *sb,
603 ext4_group_t group)
604{
605 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
606
607 bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
608}
609
610static inline int ext4_is_group_locked(struct super_block *sb,
611 ext4_group_t group)
612{
613 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
614
615 return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
616 &(grinfo->bb_state));
617}
618
619static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
620 struct ext4_free_extent *fex)
621{
622 ext4_fsblk_t block;
623
624 block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
625 + fex->fe_start
626 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
627 return block;
628}
629
ffad0a44
AK
630static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
631{
c9de560d 632#if BITS_PER_LONG == 64
ffad0a44
AK
633 *bit += ((unsigned long) addr & 7UL) << 3;
634 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 635#elif BITS_PER_LONG == 32
ffad0a44
AK
636 *bit += ((unsigned long) addr & 3UL) << 3;
637 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
638#else
639#error "how many bits you are?!"
640#endif
ffad0a44
AK
641 return addr;
642}
c9de560d
AT
643
644static inline int mb_test_bit(int bit, void *addr)
645{
646 /*
647 * ext4_test_bit on architecture like powerpc
648 * needs unsigned long aligned address
649 */
ffad0a44 650 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
651 return ext4_test_bit(bit, addr);
652}
653
654static inline void mb_set_bit(int bit, void *addr)
655{
ffad0a44 656 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
657 ext4_set_bit(bit, addr);
658}
659
660static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
661{
ffad0a44 662 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
663 ext4_set_bit_atomic(lock, bit, addr);
664}
665
666static inline void mb_clear_bit(int bit, void *addr)
667{
ffad0a44 668 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
669 ext4_clear_bit(bit, addr);
670}
671
672static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
673{
ffad0a44 674 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
675 ext4_clear_bit_atomic(lock, bit, addr);
676}
677
ffad0a44
AK
678static inline int mb_find_next_zero_bit(void *addr, int max, int start)
679{
680 int fix = 0;
681 addr = mb_correct_addr_and_bit(&fix, addr);
682 max += fix;
683 start += fix;
684
685 return ext4_find_next_zero_bit(addr, max, start) - fix;
686}
687
688static inline int mb_find_next_bit(void *addr, int max, int start)
689{
690 int fix = 0;
691 addr = mb_correct_addr_and_bit(&fix, addr);
692 max += fix;
693 start += fix;
694
695 return ext4_find_next_bit(addr, max, start) - fix;
696}
697
c9de560d
AT
698static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
699{
700 char *bb;
701
c9de560d
AT
702 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
703 BUG_ON(max == NULL);
704
705 if (order > e4b->bd_blkbits + 1) {
706 *max = 0;
707 return NULL;
708 }
709
710 /* at order 0 we see each particular block */
711 *max = 1 << (e4b->bd_blkbits + 3);
712 if (order == 0)
713 return EXT4_MB_BITMAP(e4b);
714
715 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
716 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
717
718 return bb;
719}
720
721#ifdef DOUBLE_CHECK
722static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
723 int first, int count)
724{
725 int i;
726 struct super_block *sb = e4b->bd_sb;
727
728 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
729 return;
730 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
731 for (i = 0; i < count; i++) {
732 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
733 ext4_fsblk_t blocknr;
734 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
735 blocknr += first + i;
736 blocknr +=
737 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
738
739 ext4_error(sb, __FUNCTION__, "double-free of inode"
740 " %lu's block %llu(bit %u in group %lu)\n",
741 inode ? inode->i_ino : 0, blocknr,
742 first + i, e4b->bd_group);
743 }
744 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
745 }
746}
747
748static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
749{
750 int i;
751
752 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
753 return;
754 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
755 for (i = 0; i < count; i++) {
756 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
757 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
758 }
759}
760
761static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
762{
763 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
764 unsigned char *b1, *b2;
765 int i;
766 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
767 b2 = (unsigned char *) bitmap;
768 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
769 if (b1[i] != b2[i]) {
770 printk("corruption in group %lu at byte %u(%u):"
771 " %x in copy != %x on disk/prealloc\n",
772 e4b->bd_group, i, i * 8, b1[i], b2[i]);
773 BUG();
774 }
775 }
776 }
777}
778
779#else
780static inline void mb_free_blocks_double(struct inode *inode,
781 struct ext4_buddy *e4b, int first, int count)
782{
783 return;
784}
785static inline void mb_mark_used_double(struct ext4_buddy *e4b,
786 int first, int count)
787{
788 return;
789}
790static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
791{
792 return;
793}
794#endif
795
796#ifdef AGGRESSIVE_CHECK
797
798#define MB_CHECK_ASSERT(assert) \
799do { \
800 if (!(assert)) { \
801 printk(KERN_EMERG \
802 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
803 function, file, line, # assert); \
804 BUG(); \
805 } \
806} while (0)
807
808static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
809 const char *function, int line)
810{
811 struct super_block *sb = e4b->bd_sb;
812 int order = e4b->bd_blkbits + 1;
813 int max;
814 int max2;
815 int i;
816 int j;
817 int k;
818 int count;
819 struct ext4_group_info *grp;
820 int fragments = 0;
821 int fstart;
822 struct list_head *cur;
823 void *buddy;
824 void *buddy2;
825
826 if (!test_opt(sb, MBALLOC))
827 return 0;
828
829 {
830 static int mb_check_counter;
831 if (mb_check_counter++ % 100 != 0)
832 return 0;
833 }
834
835 while (order > 1) {
836 buddy = mb_find_buddy(e4b, order, &max);
837 MB_CHECK_ASSERT(buddy);
838 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
839 MB_CHECK_ASSERT(buddy2);
840 MB_CHECK_ASSERT(buddy != buddy2);
841 MB_CHECK_ASSERT(max * 2 == max2);
842
843 count = 0;
844 for (i = 0; i < max; i++) {
845
846 if (mb_test_bit(i, buddy)) {
847 /* only single bit in buddy2 may be 1 */
848 if (!mb_test_bit(i << 1, buddy2)) {
849 MB_CHECK_ASSERT(
850 mb_test_bit((i<<1)+1, buddy2));
851 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
852 MB_CHECK_ASSERT(
853 mb_test_bit(i << 1, buddy2));
854 }
855 continue;
856 }
857
858 /* both bits in buddy2 must be 0 */
859 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
860 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
861
862 for (j = 0; j < (1 << order); j++) {
863 k = (i * (1 << order)) + j;
864 MB_CHECK_ASSERT(
865 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
866 }
867 count++;
868 }
869 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
870 order--;
871 }
872
873 fstart = -1;
874 buddy = mb_find_buddy(e4b, 0, &max);
875 for (i = 0; i < max; i++) {
876 if (!mb_test_bit(i, buddy)) {
877 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
878 if (fstart == -1) {
879 fragments++;
880 fstart = i;
881 }
882 continue;
883 }
884 fstart = -1;
885 /* check used bits only */
886 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
887 buddy2 = mb_find_buddy(e4b, j, &max2);
888 k = i >> j;
889 MB_CHECK_ASSERT(k < max2);
890 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
891 }
892 }
893 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
894 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
895
896 grp = ext4_get_group_info(sb, e4b->bd_group);
897 buddy = mb_find_buddy(e4b, 0, &max);
898 list_for_each(cur, &grp->bb_prealloc_list) {
899 ext4_group_t groupnr;
900 struct ext4_prealloc_space *pa;
901 pa = list_entry(cur, struct ext4_prealloc_space, group_list);
902 ext4_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
903 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
904 for (i = 0; i < pa->len; i++)
905 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
906 }
907 return 0;
908}
909#undef MB_CHECK_ASSERT
910#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
911 __FILE__, __FUNCTION__, __LINE__)
912#else
913#define mb_check_buddy(e4b)
914#endif
915
916/* FIXME!! need more doc */
917static void ext4_mb_mark_free_simple(struct super_block *sb,
918 void *buddy, unsigned first, int len,
919 struct ext4_group_info *grp)
920{
921 struct ext4_sb_info *sbi = EXT4_SB(sb);
922 unsigned short min;
923 unsigned short max;
924 unsigned short chunk;
925 unsigned short border;
926
b73fce69 927 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
c9de560d
AT
928
929 border = 2 << sb->s_blocksize_bits;
930
931 while (len > 0) {
932 /* find how many blocks can be covered since this position */
933 max = ffs(first | border) - 1;
934
935 /* find how many blocks of power 2 we need to mark */
936 min = fls(len) - 1;
937
938 if (max < min)
939 min = max;
940 chunk = 1 << min;
941
942 /* mark multiblock chunks only */
943 grp->bb_counters[min]++;
944 if (min > 0)
945 mb_clear_bit(first >> min,
946 buddy + sbi->s_mb_offsets[min]);
947
948 len -= chunk;
949 first += chunk;
950 }
951}
952
953static void ext4_mb_generate_buddy(struct super_block *sb,
954 void *buddy, void *bitmap, ext4_group_t group)
955{
956 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
957 unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
958 unsigned short i = 0;
959 unsigned short first;
960 unsigned short len;
961 unsigned free = 0;
962 unsigned fragments = 0;
963 unsigned long long period = get_cycles();
964
965 /* initialize buddy from bitmap which is aggregation
966 * of on-disk bitmap and preallocations */
ffad0a44 967 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
968 grp->bb_first_free = i;
969 while (i < max) {
970 fragments++;
971 first = i;
ffad0a44 972 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
973 len = i - first;
974 free += len;
975 if (len > 1)
976 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
977 else
978 grp->bb_counters[0]++;
979 if (i < max)
ffad0a44 980 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
981 }
982 grp->bb_fragments = fragments;
983
984 if (free != grp->bb_free) {
26346ff6 985 ext4_error(sb, __FUNCTION__,
c9de560d
AT
986 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
987 group, free, grp->bb_free);
e56eb659
AK
988 /*
989 * If we intent to continue, we consider group descritor
990 * corrupt and update bb_free using bitmap value
991 */
c9de560d
AT
992 grp->bb_free = free;
993 }
994
995 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
996
997 period = get_cycles() - period;
998 spin_lock(&EXT4_SB(sb)->s_bal_lock);
999 EXT4_SB(sb)->s_mb_buddies_generated++;
1000 EXT4_SB(sb)->s_mb_generation_time += period;
1001 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
1002}
1003
1004/* The buddy information is attached the buddy cache inode
1005 * for convenience. The information regarding each group
1006 * is loaded via ext4_mb_load_buddy. The information involve
1007 * block bitmap and buddy information. The information are
1008 * stored in the inode as
1009 *
1010 * { page }
1011 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
1012 *
1013 *
1014 * one block each for bitmap and buddy information.
1015 * So for each group we take up 2 blocks. A page can
1016 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
1017 * So it can have information regarding groups_per_page which
1018 * is blocks_per_page/2
1019 */
1020
1021static int ext4_mb_init_cache(struct page *page, char *incore)
1022{
1023 int blocksize;
1024 int blocks_per_page;
1025 int groups_per_page;
1026 int err = 0;
1027 int i;
1028 ext4_group_t first_group;
1029 int first_block;
1030 struct super_block *sb;
1031 struct buffer_head *bhs;
1032 struct buffer_head **bh;
1033 struct inode *inode;
1034 char *data;
1035 char *bitmap;
1036
1037 mb_debug("init page %lu\n", page->index);
1038
1039 inode = page->mapping->host;
1040 sb = inode->i_sb;
1041 blocksize = 1 << inode->i_blkbits;
1042 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
1043
1044 groups_per_page = blocks_per_page >> 1;
1045 if (groups_per_page == 0)
1046 groups_per_page = 1;
1047
1048 /* allocate buffer_heads to read bitmaps */
1049 if (groups_per_page > 1) {
1050 err = -ENOMEM;
1051 i = sizeof(struct buffer_head *) * groups_per_page;
1052 bh = kzalloc(i, GFP_NOFS);
1053 if (bh == NULL)
1054 goto out;
1055 } else
1056 bh = &bhs;
1057
1058 first_group = page->index * blocks_per_page / 2;
1059
1060 /* read all groups the page covers into the cache */
1061 for (i = 0; i < groups_per_page; i++) {
1062 struct ext4_group_desc *desc;
1063
1064 if (first_group + i >= EXT4_SB(sb)->s_groups_count)
1065 break;
1066
1067 err = -EIO;
1068 desc = ext4_get_group_desc(sb, first_group + i, NULL);
1069 if (desc == NULL)
1070 goto out;
1071
1072 err = -ENOMEM;
1073 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
1074 if (bh[i] == NULL)
1075 goto out;
1076
1077 if (bh_uptodate_or_lock(bh[i]))
1078 continue;
1079
1080 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1081 ext4_init_block_bitmap(sb, bh[i],
1082 first_group + i, desc);
1083 set_buffer_uptodate(bh[i]);
1084 unlock_buffer(bh[i]);
1085 continue;
1086 }
1087 get_bh(bh[i]);
1088 bh[i]->b_end_io = end_buffer_read_sync;
1089 submit_bh(READ, bh[i]);
1090 mb_debug("read bitmap for group %lu\n", first_group + i);
1091 }
1092
1093 /* wait for I/O completion */
1094 for (i = 0; i < groups_per_page && bh[i]; i++)
1095 wait_on_buffer(bh[i]);
1096
1097 err = -EIO;
1098 for (i = 0; i < groups_per_page && bh[i]; i++)
1099 if (!buffer_uptodate(bh[i]))
1100 goto out;
1101
1102 first_block = page->index * blocks_per_page;
1103 for (i = 0; i < blocks_per_page; i++) {
1104 int group;
1105 struct ext4_group_info *grinfo;
1106
1107 group = (first_block + i) >> 1;
1108 if (group >= EXT4_SB(sb)->s_groups_count)
1109 break;
1110
1111 /*
1112 * data carry information regarding this
1113 * particular group in the format specified
1114 * above
1115 *
1116 */
1117 data = page_address(page) + (i * blocksize);
1118 bitmap = bh[group - first_group]->b_data;
1119
1120 /*
1121 * We place the buddy block and bitmap block
1122 * close together
1123 */
1124 if ((first_block + i) & 1) {
1125 /* this is block of buddy */
1126 BUG_ON(incore == NULL);
1127 mb_debug("put buddy for group %u in page %lu/%x\n",
1128 group, page->index, i * blocksize);
1129 memset(data, 0xff, blocksize);
1130 grinfo = ext4_get_group_info(sb, group);
1131 grinfo->bb_fragments = 0;
1132 memset(grinfo->bb_counters, 0,
1133 sizeof(unsigned short)*(sb->s_blocksize_bits+2));
1134 /*
1135 * incore got set to the group block bitmap below
1136 */
1137 ext4_mb_generate_buddy(sb, data, incore, group);
1138 incore = NULL;
1139 } else {
1140 /* this is block of bitmap */
1141 BUG_ON(incore != NULL);
1142 mb_debug("put bitmap for group %u in page %lu/%x\n",
1143 group, page->index, i * blocksize);
1144
1145 /* see comments in ext4_mb_put_pa() */
1146 ext4_lock_group(sb, group);
1147 memcpy(data, bitmap, blocksize);
1148
1149 /* mark all preallocated blks used in in-core bitmap */
1150 ext4_mb_generate_from_pa(sb, data, group);
1151 ext4_unlock_group(sb, group);
1152
1153 /* set incore so that the buddy information can be
1154 * generated using this
1155 */
1156 incore = data;
1157 }
1158 }
1159 SetPageUptodate(page);
1160
1161out:
1162 if (bh) {
1163 for (i = 0; i < groups_per_page && bh[i]; i++)
1164 brelse(bh[i]);
1165 if (bh != &bhs)
1166 kfree(bh);
1167 }
1168 return err;
1169}
1170
1171static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1172 struct ext4_buddy *e4b)
1173{
1174 struct ext4_sb_info *sbi = EXT4_SB(sb);
1175 struct inode *inode = sbi->s_buddy_cache;
1176 int blocks_per_page;
1177 int block;
1178 int pnum;
1179 int poff;
1180 struct page *page;
1181
1182 mb_debug("load group %lu\n", group);
1183
1184 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1185
1186 e4b->bd_blkbits = sb->s_blocksize_bits;
1187 e4b->bd_info = ext4_get_group_info(sb, group);
1188 e4b->bd_sb = sb;
1189 e4b->bd_group = group;
1190 e4b->bd_buddy_page = NULL;
1191 e4b->bd_bitmap_page = NULL;
1192
1193 /*
1194 * the buddy cache inode stores the block bitmap
1195 * and buddy information in consecutive blocks.
1196 * So for each group we need two blocks.
1197 */
1198 block = group * 2;
1199 pnum = block / blocks_per_page;
1200 poff = block % blocks_per_page;
1201
1202 /* we could use find_or_create_page(), but it locks page
1203 * what we'd like to avoid in fast path ... */
1204 page = find_get_page(inode->i_mapping, pnum);
1205 if (page == NULL || !PageUptodate(page)) {
1206 if (page)
1207 page_cache_release(page);
1208 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1209 if (page) {
1210 BUG_ON(page->mapping != inode->i_mapping);
1211 if (!PageUptodate(page)) {
1212 ext4_mb_init_cache(page, NULL);
1213 mb_cmp_bitmaps(e4b, page_address(page) +
1214 (poff * sb->s_blocksize));
1215 }
1216 unlock_page(page);
1217 }
1218 }
1219 if (page == NULL || !PageUptodate(page))
1220 goto err;
1221 e4b->bd_bitmap_page = page;
1222 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1223 mark_page_accessed(page);
1224
1225 block++;
1226 pnum = block / blocks_per_page;
1227 poff = block % blocks_per_page;
1228
1229 page = find_get_page(inode->i_mapping, pnum);
1230 if (page == NULL || !PageUptodate(page)) {
1231 if (page)
1232 page_cache_release(page);
1233 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1234 if (page) {
1235 BUG_ON(page->mapping != inode->i_mapping);
1236 if (!PageUptodate(page))
1237 ext4_mb_init_cache(page, e4b->bd_bitmap);
1238
1239 unlock_page(page);
1240 }
1241 }
1242 if (page == NULL || !PageUptodate(page))
1243 goto err;
1244 e4b->bd_buddy_page = page;
1245 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1246 mark_page_accessed(page);
1247
1248 BUG_ON(e4b->bd_bitmap_page == NULL);
1249 BUG_ON(e4b->bd_buddy_page == NULL);
1250
1251 return 0;
1252
1253err:
1254 if (e4b->bd_bitmap_page)
1255 page_cache_release(e4b->bd_bitmap_page);
1256 if (e4b->bd_buddy_page)
1257 page_cache_release(e4b->bd_buddy_page);
1258 e4b->bd_buddy = NULL;
1259 e4b->bd_bitmap = NULL;
1260 return -EIO;
1261}
1262
1263static void ext4_mb_release_desc(struct ext4_buddy *e4b)
1264{
1265 if (e4b->bd_bitmap_page)
1266 page_cache_release(e4b->bd_bitmap_page);
1267 if (e4b->bd_buddy_page)
1268 page_cache_release(e4b->bd_buddy_page);
1269}
1270
1271
1272static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1273{
1274 int order = 1;
1275 void *bb;
1276
1277 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1278 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1279
1280 bb = EXT4_MB_BUDDY(e4b);
1281 while (order <= e4b->bd_blkbits + 1) {
1282 block = block >> 1;
1283 if (!mb_test_bit(block, bb)) {
1284 /* this block is part of buddy of order 'order' */
1285 return order;
1286 }
1287 bb += 1 << (e4b->bd_blkbits - order);
1288 order++;
1289 }
1290 return 0;
1291}
1292
1293static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1294{
1295 __u32 *addr;
1296
1297 len = cur + len;
1298 while (cur < len) {
1299 if ((cur & 31) == 0 && (len - cur) >= 32) {
1300 /* fast path: clear whole word at once */
1301 addr = bm + (cur >> 3);
1302 *addr = 0;
1303 cur += 32;
1304 continue;
1305 }
1306 mb_clear_bit_atomic(lock, cur, bm);
1307 cur++;
1308 }
1309}
1310
1311static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1312{
1313 __u32 *addr;
1314
1315 len = cur + len;
1316 while (cur < len) {
1317 if ((cur & 31) == 0 && (len - cur) >= 32) {
1318 /* fast path: set whole word at once */
1319 addr = bm + (cur >> 3);
1320 *addr = 0xffffffff;
1321 cur += 32;
1322 continue;
1323 }
1324 mb_set_bit_atomic(lock, cur, bm);
1325 cur++;
1326 }
1327}
1328
1329static int mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1330 int first, int count)
1331{
1332 int block = 0;
1333 int max = 0;
1334 int order;
1335 void *buddy;
1336 void *buddy2;
1337 struct super_block *sb = e4b->bd_sb;
1338
1339 BUG_ON(first + count > (sb->s_blocksize << 3));
1340 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1341 mb_check_buddy(e4b);
1342 mb_free_blocks_double(inode, e4b, first, count);
1343
1344 e4b->bd_info->bb_free += count;
1345 if (first < e4b->bd_info->bb_first_free)
1346 e4b->bd_info->bb_first_free = first;
1347
1348 /* let's maintain fragments counter */
1349 if (first != 0)
1350 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1351 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1352 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1353 if (block && max)
1354 e4b->bd_info->bb_fragments--;
1355 else if (!block && !max)
1356 e4b->bd_info->bb_fragments++;
1357
1358 /* let's maintain buddy itself */
1359 while (count-- > 0) {
1360 block = first++;
1361 order = 0;
1362
1363 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1364 ext4_fsblk_t blocknr;
1365 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1366 blocknr += block;
1367 blocknr +=
1368 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1369
1370 ext4_error(sb, __FUNCTION__, "double-free of inode"
1371 " %lu's block %llu(bit %u in group %lu)\n",
1372 inode ? inode->i_ino : 0, blocknr, block,
1373 e4b->bd_group);
1374 }
1375 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1376 e4b->bd_info->bb_counters[order]++;
1377
1378 /* start of the buddy */
1379 buddy = mb_find_buddy(e4b, order, &max);
1380
1381 do {
1382 block &= ~1UL;
1383 if (mb_test_bit(block, buddy) ||
1384 mb_test_bit(block + 1, buddy))
1385 break;
1386
1387 /* both the buddies are free, try to coalesce them */
1388 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1389
1390 if (!buddy2)
1391 break;
1392
1393 if (order > 0) {
1394 /* for special purposes, we don't set
1395 * free bits in bitmap */
1396 mb_set_bit(block, buddy);
1397 mb_set_bit(block + 1, buddy);
1398 }
1399 e4b->bd_info->bb_counters[order]--;
1400 e4b->bd_info->bb_counters[order]--;
1401
1402 block = block >> 1;
1403 order++;
1404 e4b->bd_info->bb_counters[order]++;
1405
1406 mb_clear_bit(block, buddy2);
1407 buddy = buddy2;
1408 } while (1);
1409 }
1410 mb_check_buddy(e4b);
1411
1412 return 0;
1413}
1414
1415static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1416 int needed, struct ext4_free_extent *ex)
1417{
1418 int next = block;
1419 int max;
1420 int ord;
1421 void *buddy;
1422
1423 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1424 BUG_ON(ex == NULL);
1425
1426 buddy = mb_find_buddy(e4b, order, &max);
1427 BUG_ON(buddy == NULL);
1428 BUG_ON(block >= max);
1429 if (mb_test_bit(block, buddy)) {
1430 ex->fe_len = 0;
1431 ex->fe_start = 0;
1432 ex->fe_group = 0;
1433 return 0;
1434 }
1435
1436 /* FIXME dorp order completely ? */
1437 if (likely(order == 0)) {
1438 /* find actual order */
1439 order = mb_find_order_for_block(e4b, block);
1440 block = block >> order;
1441 }
1442
1443 ex->fe_len = 1 << order;
1444 ex->fe_start = block << order;
1445 ex->fe_group = e4b->bd_group;
1446
1447 /* calc difference from given start */
1448 next = next - ex->fe_start;
1449 ex->fe_len -= next;
1450 ex->fe_start += next;
1451
1452 while (needed > ex->fe_len &&
1453 (buddy = mb_find_buddy(e4b, order, &max))) {
1454
1455 if (block + 1 >= max)
1456 break;
1457
1458 next = (block + 1) * (1 << order);
1459 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1460 break;
1461
1462 ord = mb_find_order_for_block(e4b, next);
1463
1464 order = ord;
1465 block = next >> order;
1466 ex->fe_len += 1 << order;
1467 }
1468
1469 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1470 return ex->fe_len;
1471}
1472
1473static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1474{
1475 int ord;
1476 int mlen = 0;
1477 int max = 0;
1478 int cur;
1479 int start = ex->fe_start;
1480 int len = ex->fe_len;
1481 unsigned ret = 0;
1482 int len0 = len;
1483 void *buddy;
1484
1485 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1486 BUG_ON(e4b->bd_group != ex->fe_group);
1487 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1488 mb_check_buddy(e4b);
1489 mb_mark_used_double(e4b, start, len);
1490
1491 e4b->bd_info->bb_free -= len;
1492 if (e4b->bd_info->bb_first_free == start)
1493 e4b->bd_info->bb_first_free += len;
1494
1495 /* let's maintain fragments counter */
1496 if (start != 0)
1497 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1498 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1499 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1500 if (mlen && max)
1501 e4b->bd_info->bb_fragments++;
1502 else if (!mlen && !max)
1503 e4b->bd_info->bb_fragments--;
1504
1505 /* let's maintain buddy itself */
1506 while (len) {
1507 ord = mb_find_order_for_block(e4b, start);
1508
1509 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1510 /* the whole chunk may be allocated at once! */
1511 mlen = 1 << ord;
1512 buddy = mb_find_buddy(e4b, ord, &max);
1513 BUG_ON((start >> ord) >= max);
1514 mb_set_bit(start >> ord, buddy);
1515 e4b->bd_info->bb_counters[ord]--;
1516 start += mlen;
1517 len -= mlen;
1518 BUG_ON(len < 0);
1519 continue;
1520 }
1521
1522 /* store for history */
1523 if (ret == 0)
1524 ret = len | (ord << 16);
1525
1526 /* we have to split large buddy */
1527 BUG_ON(ord <= 0);
1528 buddy = mb_find_buddy(e4b, ord, &max);
1529 mb_set_bit(start >> ord, buddy);
1530 e4b->bd_info->bb_counters[ord]--;
1531
1532 ord--;
1533 cur = (start >> ord) & ~1U;
1534 buddy = mb_find_buddy(e4b, ord, &max);
1535 mb_clear_bit(cur, buddy);
1536 mb_clear_bit(cur + 1, buddy);
1537 e4b->bd_info->bb_counters[ord]++;
1538 e4b->bd_info->bb_counters[ord]++;
1539 }
1540
1541 mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1542 EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1543 mb_check_buddy(e4b);
1544
1545 return ret;
1546}
1547
1548/*
1549 * Must be called under group lock!
1550 */
1551static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1552 struct ext4_buddy *e4b)
1553{
1554 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1555 int ret;
1556
1557 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1558 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1559
1560 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1561 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1562 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1563
1564 /* preallocation can change ac_b_ex, thus we store actually
1565 * allocated blocks for history */
1566 ac->ac_f_ex = ac->ac_b_ex;
1567
1568 ac->ac_status = AC_STATUS_FOUND;
1569 ac->ac_tail = ret & 0xffff;
1570 ac->ac_buddy = ret >> 16;
1571
1572 /* XXXXXXX: SUCH A HORRIBLE **CK */
1573 /*FIXME!! Why ? */
1574 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1575 get_page(ac->ac_bitmap_page);
1576 ac->ac_buddy_page = e4b->bd_buddy_page;
1577 get_page(ac->ac_buddy_page);
1578
1579 /* store last allocated for subsequent stream allocation */
1580 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1581 spin_lock(&sbi->s_md_lock);
1582 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1583 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1584 spin_unlock(&sbi->s_md_lock);
1585 }
1586}
1587
1588/*
1589 * regular allocator, for general purposes allocation
1590 */
1591
1592static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1593 struct ext4_buddy *e4b,
1594 int finish_group)
1595{
1596 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1597 struct ext4_free_extent *bex = &ac->ac_b_ex;
1598 struct ext4_free_extent *gex = &ac->ac_g_ex;
1599 struct ext4_free_extent ex;
1600 int max;
1601
1602 /*
1603 * We don't want to scan for a whole year
1604 */
1605 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1606 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1607 ac->ac_status = AC_STATUS_BREAK;
1608 return;
1609 }
1610
1611 /*
1612 * Haven't found good chunk so far, let's continue
1613 */
1614 if (bex->fe_len < gex->fe_len)
1615 return;
1616
1617 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1618 && bex->fe_group == e4b->bd_group) {
1619 /* recheck chunk's availability - we don't know
1620 * when it was found (within this lock-unlock
1621 * period or not) */
1622 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1623 if (max >= gex->fe_len) {
1624 ext4_mb_use_best_found(ac, e4b);
1625 return;
1626 }
1627 }
1628}
1629
1630/*
1631 * The routine checks whether found extent is good enough. If it is,
1632 * then the extent gets marked used and flag is set to the context
1633 * to stop scanning. Otherwise, the extent is compared with the
1634 * previous found extent and if new one is better, then it's stored
1635 * in the context. Later, the best found extent will be used, if
1636 * mballoc can't find good enough extent.
1637 *
1638 * FIXME: real allocation policy is to be designed yet!
1639 */
1640static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1641 struct ext4_free_extent *ex,
1642 struct ext4_buddy *e4b)
1643{
1644 struct ext4_free_extent *bex = &ac->ac_b_ex;
1645 struct ext4_free_extent *gex = &ac->ac_g_ex;
1646
1647 BUG_ON(ex->fe_len <= 0);
1648 BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1649 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1650 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1651
1652 ac->ac_found++;
1653
1654 /*
1655 * The special case - take what you catch first
1656 */
1657 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1658 *bex = *ex;
1659 ext4_mb_use_best_found(ac, e4b);
1660 return;
1661 }
1662
1663 /*
1664 * Let's check whether the chuck is good enough
1665 */
1666 if (ex->fe_len == gex->fe_len) {
1667 *bex = *ex;
1668 ext4_mb_use_best_found(ac, e4b);
1669 return;
1670 }
1671
1672 /*
1673 * If this is first found extent, just store it in the context
1674 */
1675 if (bex->fe_len == 0) {
1676 *bex = *ex;
1677 return;
1678 }
1679
1680 /*
1681 * If new found extent is better, store it in the context
1682 */
1683 if (bex->fe_len < gex->fe_len) {
1684 /* if the request isn't satisfied, any found extent
1685 * larger than previous best one is better */
1686 if (ex->fe_len > bex->fe_len)
1687 *bex = *ex;
1688 } else if (ex->fe_len > gex->fe_len) {
1689 /* if the request is satisfied, then we try to find
1690 * an extent that still satisfy the request, but is
1691 * smaller than previous one */
1692 if (ex->fe_len < bex->fe_len)
1693 *bex = *ex;
1694 }
1695
1696 ext4_mb_check_limits(ac, e4b, 0);
1697}
1698
1699static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1700 struct ext4_buddy *e4b)
1701{
1702 struct ext4_free_extent ex = ac->ac_b_ex;
1703 ext4_group_t group = ex.fe_group;
1704 int max;
1705 int err;
1706
1707 BUG_ON(ex.fe_len <= 0);
1708 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1709 if (err)
1710 return err;
1711
1712 ext4_lock_group(ac->ac_sb, group);
1713 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1714
1715 if (max > 0) {
1716 ac->ac_b_ex = ex;
1717 ext4_mb_use_best_found(ac, e4b);
1718 }
1719
1720 ext4_unlock_group(ac->ac_sb, group);
1721 ext4_mb_release_desc(e4b);
1722
1723 return 0;
1724}
1725
1726static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1727 struct ext4_buddy *e4b)
1728{
1729 ext4_group_t group = ac->ac_g_ex.fe_group;
1730 int max;
1731 int err;
1732 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1733 struct ext4_super_block *es = sbi->s_es;
1734 struct ext4_free_extent ex;
1735
1736 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1737 return 0;
1738
1739 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1740 if (err)
1741 return err;
1742
1743 ext4_lock_group(ac->ac_sb, group);
1744 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1745 ac->ac_g_ex.fe_len, &ex);
1746
1747 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1748 ext4_fsblk_t start;
1749
1750 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1751 ex.fe_start + le32_to_cpu(es->s_first_data_block);
1752 /* use do_div to get remainder (would be 64-bit modulo) */
1753 if (do_div(start, sbi->s_stripe) == 0) {
1754 ac->ac_found++;
1755 ac->ac_b_ex = ex;
1756 ext4_mb_use_best_found(ac, e4b);
1757 }
1758 } else if (max >= ac->ac_g_ex.fe_len) {
1759 BUG_ON(ex.fe_len <= 0);
1760 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1761 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1762 ac->ac_found++;
1763 ac->ac_b_ex = ex;
1764 ext4_mb_use_best_found(ac, e4b);
1765 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1766 /* Sometimes, caller may want to merge even small
1767 * number of blocks to an existing extent */
1768 BUG_ON(ex.fe_len <= 0);
1769 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1770 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1771 ac->ac_found++;
1772 ac->ac_b_ex = ex;
1773 ext4_mb_use_best_found(ac, e4b);
1774 }
1775 ext4_unlock_group(ac->ac_sb, group);
1776 ext4_mb_release_desc(e4b);
1777
1778 return 0;
1779}
1780
1781/*
1782 * The routine scans buddy structures (not bitmap!) from given order
1783 * to max order and tries to find big enough chunk to satisfy the req
1784 */
1785static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1786 struct ext4_buddy *e4b)
1787{
1788 struct super_block *sb = ac->ac_sb;
1789 struct ext4_group_info *grp = e4b->bd_info;
1790 void *buddy;
1791 int i;
1792 int k;
1793 int max;
1794
1795 BUG_ON(ac->ac_2order <= 0);
1796 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1797 if (grp->bb_counters[i] == 0)
1798 continue;
1799
1800 buddy = mb_find_buddy(e4b, i, &max);
1801 BUG_ON(buddy == NULL);
1802
ffad0a44 1803 k = mb_find_next_zero_bit(buddy, max, 0);
c9de560d
AT
1804 BUG_ON(k >= max);
1805
1806 ac->ac_found++;
1807
1808 ac->ac_b_ex.fe_len = 1 << i;
1809 ac->ac_b_ex.fe_start = k << i;
1810 ac->ac_b_ex.fe_group = e4b->bd_group;
1811
1812 ext4_mb_use_best_found(ac, e4b);
1813
1814 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1815
1816 if (EXT4_SB(sb)->s_mb_stats)
1817 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1818
1819 break;
1820 }
1821}
1822
1823/*
1824 * The routine scans the group and measures all found extents.
1825 * In order to optimize scanning, caller must pass number of
1826 * free blocks in the group, so the routine can know upper limit.
1827 */
1828static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1829 struct ext4_buddy *e4b)
1830{
1831 struct super_block *sb = ac->ac_sb;
1832 void *bitmap = EXT4_MB_BITMAP(e4b);
1833 struct ext4_free_extent ex;
1834 int i;
1835 int free;
1836
1837 free = e4b->bd_info->bb_free;
1838 BUG_ON(free <= 0);
1839
1840 i = e4b->bd_info->bb_first_free;
1841
1842 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 1843 i = mb_find_next_zero_bit(bitmap,
c9de560d
AT
1844 EXT4_BLOCKS_PER_GROUP(sb), i);
1845 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
26346ff6 1846 /*
e56eb659 1847 * IF we have corrupt bitmap, we won't find any
26346ff6
AK
1848 * free blocks even though group info says we
1849 * we have free blocks
1850 */
1851 ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1852 "group info. But bitmap says 0\n",
1853 free);
c9de560d
AT
1854 break;
1855 }
1856
1857 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1858 BUG_ON(ex.fe_len <= 0);
26346ff6
AK
1859 if (free < ex.fe_len) {
1860 ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1861 "group info. But got %d blocks\n",
1862 free, ex.fe_len);
e56eb659
AK
1863 /*
1864 * The number of free blocks differs. This mostly
1865 * indicate that the bitmap is corrupt. So exit
1866 * without claiming the space.
1867 */
1868 break;
26346ff6 1869 }
c9de560d
AT
1870
1871 ext4_mb_measure_extent(ac, &ex, e4b);
1872
1873 i += ex.fe_len;
1874 free -= ex.fe_len;
1875 }
1876
1877 ext4_mb_check_limits(ac, e4b, 1);
1878}
1879
1880/*
1881 * This is a special case for storages like raid5
1882 * we try to find stripe-aligned chunks for stripe-size requests
1883 * XXX should do so at least for multiples of stripe size as well
1884 */
1885static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1886 struct ext4_buddy *e4b)
1887{
1888 struct super_block *sb = ac->ac_sb;
1889 struct ext4_sb_info *sbi = EXT4_SB(sb);
1890 void *bitmap = EXT4_MB_BITMAP(e4b);
1891 struct ext4_free_extent ex;
1892 ext4_fsblk_t first_group_block;
1893 ext4_fsblk_t a;
1894 ext4_grpblk_t i;
1895 int max;
1896
1897 BUG_ON(sbi->s_stripe == 0);
1898
1899 /* find first stripe-aligned block in group */
1900 first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1901 + le32_to_cpu(sbi->s_es->s_first_data_block);
1902 a = first_group_block + sbi->s_stripe - 1;
1903 do_div(a, sbi->s_stripe);
1904 i = (a * sbi->s_stripe) - first_group_block;
1905
1906 while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1907 if (!mb_test_bit(i, bitmap)) {
1908 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1909 if (max >= sbi->s_stripe) {
1910 ac->ac_found++;
1911 ac->ac_b_ex = ex;
1912 ext4_mb_use_best_found(ac, e4b);
1913 break;
1914 }
1915 }
1916 i += sbi->s_stripe;
1917 }
1918}
1919
1920static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1921 ext4_group_t group, int cr)
1922{
1923 unsigned free, fragments;
1924 unsigned i, bits;
1925 struct ext4_group_desc *desc;
1926 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1927
1928 BUG_ON(cr < 0 || cr >= 4);
1929 BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1930
1931 free = grp->bb_free;
1932 fragments = grp->bb_fragments;
1933 if (free == 0)
1934 return 0;
1935 if (fragments == 0)
1936 return 0;
1937
1938 switch (cr) {
1939 case 0:
1940 BUG_ON(ac->ac_2order == 0);
1941 /* If this group is uninitialized, skip it initially */
1942 desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1943 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1944 return 0;
1945
1946 bits = ac->ac_sb->s_blocksize_bits + 1;
1947 for (i = ac->ac_2order; i <= bits; i++)
1948 if (grp->bb_counters[i] > 0)
1949 return 1;
1950 break;
1951 case 1:
1952 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1953 return 1;
1954 break;
1955 case 2:
1956 if (free >= ac->ac_g_ex.fe_len)
1957 return 1;
1958 break;
1959 case 3:
1960 return 1;
1961 default:
1962 BUG();
1963 }
1964
1965 return 0;
1966}
1967
1968static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1969{
1970 ext4_group_t group;
1971 ext4_group_t i;
1972 int cr;
1973 int err = 0;
1974 int bsbits;
1975 struct ext4_sb_info *sbi;
1976 struct super_block *sb;
1977 struct ext4_buddy e4b;
1978 loff_t size, isize;
1979
1980 sb = ac->ac_sb;
1981 sbi = EXT4_SB(sb);
1982 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1983
1984 /* first, try the goal */
1985 err = ext4_mb_find_by_goal(ac, &e4b);
1986 if (err || ac->ac_status == AC_STATUS_FOUND)
1987 goto out;
1988
1989 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1990 goto out;
1991
1992 /*
1993 * ac->ac2_order is set only if the fe_len is a power of 2
1994 * if ac2_order is set we also set criteria to 0 so that we
1995 * try exact allocation using buddy.
1996 */
1997 i = fls(ac->ac_g_ex.fe_len);
1998 ac->ac_2order = 0;
1999 /*
2000 * We search using buddy data only if the order of the request
2001 * is greater than equal to the sbi_s_mb_order2_reqs
2002 * You can tune it via /proc/fs/ext4/<partition>/order2_req
2003 */
2004 if (i >= sbi->s_mb_order2_reqs) {
2005 /*
2006 * This should tell if fe_len is exactly power of 2
2007 */
2008 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2009 ac->ac_2order = i - 1;
2010 }
2011
2012 bsbits = ac->ac_sb->s_blocksize_bits;
2013 /* if stream allocation is enabled, use global goal */
2014 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2015 isize = i_size_read(ac->ac_inode) >> bsbits;
2016 if (size < isize)
2017 size = isize;
2018
2019 if (size < sbi->s_mb_stream_request &&
2020 (ac->ac_flags & EXT4_MB_HINT_DATA)) {
2021 /* TBD: may be hot point */
2022 spin_lock(&sbi->s_md_lock);
2023 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2024 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2025 spin_unlock(&sbi->s_md_lock);
2026 }
2027
2028 /* searching for the right group start from the goal value specified */
2029 group = ac->ac_g_ex.fe_group;
2030
2031 /* Let's just scan groups to find more-less suitable blocks */
2032 cr = ac->ac_2order ? 0 : 1;
2033 /*
2034 * cr == 0 try to get exact allocation,
2035 * cr == 3 try to get anything
2036 */
2037repeat:
2038 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2039 ac->ac_criteria = cr;
2040 for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
2041 struct ext4_group_info *grp;
2042 struct ext4_group_desc *desc;
2043
2044 if (group == EXT4_SB(sb)->s_groups_count)
2045 group = 0;
2046
2047 /* quick check to skip empty groups */
2048 grp = ext4_get_group_info(ac->ac_sb, group);
2049 if (grp->bb_free == 0)
2050 continue;
2051
2052 /*
2053 * if the group is already init we check whether it is
2054 * a good group and if not we don't load the buddy
2055 */
2056 if (EXT4_MB_GRP_NEED_INIT(grp)) {
2057 /*
2058 * we need full data about the group
2059 * to make a good selection
2060 */
2061 err = ext4_mb_load_buddy(sb, group, &e4b);
2062 if (err)
2063 goto out;
2064 ext4_mb_release_desc(&e4b);
2065 }
2066
2067 /*
2068 * If the particular group doesn't satisfy our
2069 * criteria we continue with the next group
2070 */
2071 if (!ext4_mb_good_group(ac, group, cr))
2072 continue;
2073
2074 err = ext4_mb_load_buddy(sb, group, &e4b);
2075 if (err)
2076 goto out;
2077
2078 ext4_lock_group(sb, group);
2079 if (!ext4_mb_good_group(ac, group, cr)) {
2080 /* someone did allocation from this group */
2081 ext4_unlock_group(sb, group);
2082 ext4_mb_release_desc(&e4b);
2083 continue;
2084 }
2085
2086 ac->ac_groups_scanned++;
2087 desc = ext4_get_group_desc(sb, group, NULL);
2088 if (cr == 0 || (desc->bg_flags &
2089 cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
2090 ac->ac_2order != 0))
2091 ext4_mb_simple_scan_group(ac, &e4b);
2092 else if (cr == 1 &&
2093 ac->ac_g_ex.fe_len == sbi->s_stripe)
2094 ext4_mb_scan_aligned(ac, &e4b);
2095 else
2096 ext4_mb_complex_scan_group(ac, &e4b);
2097
2098 ext4_unlock_group(sb, group);
2099 ext4_mb_release_desc(&e4b);
2100
2101 if (ac->ac_status != AC_STATUS_CONTINUE)
2102 break;
2103 }
2104 }
2105
2106 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2107 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2108 /*
2109 * We've been searching too long. Let's try to allocate
2110 * the best chunk we've found so far
2111 */
2112
2113 ext4_mb_try_best_found(ac, &e4b);
2114 if (ac->ac_status != AC_STATUS_FOUND) {
2115 /*
2116 * Someone more lucky has already allocated it.
2117 * The only thing we can do is just take first
2118 * found block(s)
2119 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2120 */
2121 ac->ac_b_ex.fe_group = 0;
2122 ac->ac_b_ex.fe_start = 0;
2123 ac->ac_b_ex.fe_len = 0;
2124 ac->ac_status = AC_STATUS_CONTINUE;
2125 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2126 cr = 3;
2127 atomic_inc(&sbi->s_mb_lost_chunks);
2128 goto repeat;
2129 }
2130 }
2131out:
2132 return err;
2133}
2134
2135#ifdef EXT4_MB_HISTORY
2136struct ext4_mb_proc_session {
2137 struct ext4_mb_history *history;
2138 struct super_block *sb;
2139 int start;
2140 int max;
2141};
2142
2143static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
2144 struct ext4_mb_history *hs,
2145 int first)
2146{
2147 if (hs == s->history + s->max)
2148 hs = s->history;
2149 if (!first && hs == s->history + s->start)
2150 return NULL;
2151 while (hs->orig.fe_len == 0) {
2152 hs++;
2153 if (hs == s->history + s->max)
2154 hs = s->history;
2155 if (hs == s->history + s->start)
2156 return NULL;
2157 }
2158 return hs;
2159}
2160
2161static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2162{
2163 struct ext4_mb_proc_session *s = seq->private;
2164 struct ext4_mb_history *hs;
2165 int l = *pos;
2166
2167 if (l == 0)
2168 return SEQ_START_TOKEN;
2169 hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2170 if (!hs)
2171 return NULL;
2172 while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2173 return hs;
2174}
2175
2176static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
2177 loff_t *pos)
2178{
2179 struct ext4_mb_proc_session *s = seq->private;
2180 struct ext4_mb_history *hs = v;
2181
2182 ++*pos;
2183 if (v == SEQ_START_TOKEN)
2184 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2185 else
2186 return ext4_mb_history_skip_empty(s, ++hs, 0);
2187}
2188
2189static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
2190{
2191 char buf[25], buf2[25], buf3[25], *fmt;
2192 struct ext4_mb_history *hs = v;
2193
2194 if (v == SEQ_START_TOKEN) {
2195 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2196 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
2197 "pid", "inode", "original", "goal", "result", "found",
2198 "grps", "cr", "flags", "merge", "tail", "broken");
2199 return 0;
2200 }
2201
2202 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
2203 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2204 "%-5u %-5s %-5u %-6u\n";
2205 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
2206 hs->result.fe_start, hs->result.fe_len,
2207 hs->result.fe_logical);
2208 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
2209 hs->orig.fe_start, hs->orig.fe_len,
2210 hs->orig.fe_logical);
2211 sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group,
2212 hs->goal.fe_start, hs->goal.fe_len,
2213 hs->goal.fe_logical);
2214 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2215 hs->found, hs->groups, hs->cr, hs->flags,
2216 hs->merged ? "M" : "", hs->tail,
2217 hs->buddy ? 1 << hs->buddy : 0);
2218 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
2219 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2220 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
2221 hs->result.fe_start, hs->result.fe_len,
2222 hs->result.fe_logical);
2223 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
2224 hs->orig.fe_start, hs->orig.fe_len,
2225 hs->orig.fe_logical);
2226 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2227 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
2228 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
2229 hs->result.fe_start, hs->result.fe_len);
2230 seq_printf(seq, "%-5u %-8u %-23s discard\n",
2231 hs->pid, hs->ino, buf2);
2232 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
2233 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
2234 hs->result.fe_start, hs->result.fe_len);
2235 seq_printf(seq, "%-5u %-8u %-23s free\n",
2236 hs->pid, hs->ino, buf2);
2237 }
2238 return 0;
2239}
2240
2241static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
2242{
2243}
2244
2245static struct seq_operations ext4_mb_seq_history_ops = {
2246 .start = ext4_mb_seq_history_start,
2247 .next = ext4_mb_seq_history_next,
2248 .stop = ext4_mb_seq_history_stop,
2249 .show = ext4_mb_seq_history_show,
2250};
2251
2252static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
2253{
2254 struct super_block *sb = PDE(inode)->data;
2255 struct ext4_sb_info *sbi = EXT4_SB(sb);
2256 struct ext4_mb_proc_session *s;
2257 int rc;
2258 int size;
2259
2260 s = kmalloc(sizeof(*s), GFP_KERNEL);
2261 if (s == NULL)
2262 return -ENOMEM;
2263 s->sb = sb;
2264 size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
2265 s->history = kmalloc(size, GFP_KERNEL);
2266 if (s->history == NULL) {
2267 kfree(s);
2268 return -ENOMEM;
2269 }
2270
2271 spin_lock(&sbi->s_mb_history_lock);
2272 memcpy(s->history, sbi->s_mb_history, size);
2273 s->max = sbi->s_mb_history_max;
2274 s->start = sbi->s_mb_history_cur % s->max;
2275 spin_unlock(&sbi->s_mb_history_lock);
2276
2277 rc = seq_open(file, &ext4_mb_seq_history_ops);
2278 if (rc == 0) {
2279 struct seq_file *m = (struct seq_file *)file->private_data;
2280 m->private = s;
2281 } else {
2282 kfree(s->history);
2283 kfree(s);
2284 }
2285 return rc;
2286
2287}
2288
2289static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2290{
2291 struct seq_file *seq = (struct seq_file *)file->private_data;
2292 struct ext4_mb_proc_session *s = seq->private;
2293 kfree(s->history);
2294 kfree(s);
2295 return seq_release(inode, file);
2296}
2297
2298static ssize_t ext4_mb_seq_history_write(struct file *file,
2299 const char __user *buffer,
2300 size_t count, loff_t *ppos)
2301{
2302 struct seq_file *seq = (struct seq_file *)file->private_data;
2303 struct ext4_mb_proc_session *s = seq->private;
2304 struct super_block *sb = s->sb;
2305 char str[32];
2306 int value;
2307
2308 if (count >= sizeof(str)) {
2309 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2310 "mb_history", (int)sizeof(str));
2311 return -EOVERFLOW;
2312 }
2313
2314 if (copy_from_user(str, buffer, count))
2315 return -EFAULT;
2316
2317 value = simple_strtol(str, NULL, 0);
2318 if (value < 0)
2319 return -ERANGE;
2320 EXT4_SB(sb)->s_mb_history_filter = value;
2321
2322 return count;
2323}
2324
2325static struct file_operations ext4_mb_seq_history_fops = {
2326 .owner = THIS_MODULE,
2327 .open = ext4_mb_seq_history_open,
2328 .read = seq_read,
2329 .write = ext4_mb_seq_history_write,
2330 .llseek = seq_lseek,
2331 .release = ext4_mb_seq_history_release,
2332};
2333
2334static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2335{
2336 struct super_block *sb = seq->private;
2337 struct ext4_sb_info *sbi = EXT4_SB(sb);
2338 ext4_group_t group;
2339
2340 if (*pos < 0 || *pos >= sbi->s_groups_count)
2341 return NULL;
2342
2343 group = *pos + 1;
2344 return (void *) group;
2345}
2346
2347static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2348{
2349 struct super_block *sb = seq->private;
2350 struct ext4_sb_info *sbi = EXT4_SB(sb);
2351 ext4_group_t group;
2352
2353 ++*pos;
2354 if (*pos < 0 || *pos >= sbi->s_groups_count)
2355 return NULL;
2356 group = *pos + 1;
2357 return (void *) group;;
2358}
2359
2360static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2361{
2362 struct super_block *sb = seq->private;
2363 long group = (long) v;
2364 int i;
2365 int err;
2366 struct ext4_buddy e4b;
2367 struct sg {
2368 struct ext4_group_info info;
2369 unsigned short counters[16];
2370 } sg;
2371
2372 group--;
2373 if (group == 0)
2374 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2375 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2376 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2377 "group", "free", "frags", "first",
2378 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2379 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2380
2381 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2382 sizeof(struct ext4_group_info);
2383 err = ext4_mb_load_buddy(sb, group, &e4b);
2384 if (err) {
2385 seq_printf(seq, "#%-5lu: I/O error\n", group);
2386 return 0;
2387 }
2388 ext4_lock_group(sb, group);
2389 memcpy(&sg, ext4_get_group_info(sb, group), i);
2390 ext4_unlock_group(sb, group);
2391 ext4_mb_release_desc(&e4b);
2392
2393 seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2394 sg.info.bb_fragments, sg.info.bb_first_free);
2395 for (i = 0; i <= 13; i++)
2396 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2397 sg.info.bb_counters[i] : 0);
2398 seq_printf(seq, " ]\n");
2399
2400 return 0;
2401}
2402
2403static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2404{
2405}
2406
2407static struct seq_operations ext4_mb_seq_groups_ops = {
2408 .start = ext4_mb_seq_groups_start,
2409 .next = ext4_mb_seq_groups_next,
2410 .stop = ext4_mb_seq_groups_stop,
2411 .show = ext4_mb_seq_groups_show,
2412};
2413
2414static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2415{
2416 struct super_block *sb = PDE(inode)->data;
2417 int rc;
2418
2419 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2420 if (rc == 0) {
2421 struct seq_file *m = (struct seq_file *)file->private_data;
2422 m->private = sb;
2423 }
2424 return rc;
2425
2426}
2427
2428static struct file_operations ext4_mb_seq_groups_fops = {
2429 .owner = THIS_MODULE,
2430 .open = ext4_mb_seq_groups_open,
2431 .read = seq_read,
2432 .llseek = seq_lseek,
2433 .release = seq_release,
2434};
2435
2436static void ext4_mb_history_release(struct super_block *sb)
2437{
2438 struct ext4_sb_info *sbi = EXT4_SB(sb);
2439
2440 remove_proc_entry("mb_groups", sbi->s_mb_proc);
2441 remove_proc_entry("mb_history", sbi->s_mb_proc);
2442
2443 kfree(sbi->s_mb_history);
2444}
2445
2446static void ext4_mb_history_init(struct super_block *sb)
2447{
2448 struct ext4_sb_info *sbi = EXT4_SB(sb);
2449 int i;
2450
2451 if (sbi->s_mb_proc != NULL) {
2452 struct proc_dir_entry *p;
2453 p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
2454 if (p) {
2455 p->proc_fops = &ext4_mb_seq_history_fops;
2456 p->data = sb;
2457 }
2458 p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
2459 if (p) {
2460 p->proc_fops = &ext4_mb_seq_groups_fops;
2461 p->data = sb;
2462 }
2463 }
2464
2465 sbi->s_mb_history_max = 1000;
2466 sbi->s_mb_history_cur = 0;
2467 spin_lock_init(&sbi->s_mb_history_lock);
2468 i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2469 sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
2470 if (likely(sbi->s_mb_history != NULL))
2471 memset(sbi->s_mb_history, 0, i);
2472 /* if we can't allocate history, then we simple won't use it */
2473}
2474
2475static void ext4_mb_store_history(struct ext4_allocation_context *ac)
2476{
2477 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2478 struct ext4_mb_history h;
2479
2480 if (unlikely(sbi->s_mb_history == NULL))
2481 return;
2482
2483 if (!(ac->ac_op & sbi->s_mb_history_filter))
2484 return;
2485
2486 h.op = ac->ac_op;
2487 h.pid = current->pid;
2488 h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2489 h.orig = ac->ac_o_ex;
2490 h.result = ac->ac_b_ex;
2491 h.flags = ac->ac_flags;
2492 h.found = ac->ac_found;
2493 h.groups = ac->ac_groups_scanned;
2494 h.cr = ac->ac_criteria;
2495 h.tail = ac->ac_tail;
2496 h.buddy = ac->ac_buddy;
2497 h.merged = 0;
2498 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2499 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2500 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2501 h.merged = 1;
2502 h.goal = ac->ac_g_ex;
2503 h.result = ac->ac_f_ex;
2504 }
2505
2506 spin_lock(&sbi->s_mb_history_lock);
2507 memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2508 if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2509 sbi->s_mb_history_cur = 0;
2510 spin_unlock(&sbi->s_mb_history_lock);
2511}
2512
2513#else
2514#define ext4_mb_history_release(sb)
2515#define ext4_mb_history_init(sb)
2516#endif
2517
2518static int ext4_mb_init_backend(struct super_block *sb)
2519{
2520 ext4_group_t i;
2521 int j, len, metalen;
2522 struct ext4_sb_info *sbi = EXT4_SB(sb);
2523 int num_meta_group_infos =
2524 (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2525 EXT4_DESC_PER_BLOCK_BITS(sb);
2526 struct ext4_group_info **meta_group_info;
2527
2528 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2529 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2530 * So a two level scheme suffices for now. */
2531 sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
2532 num_meta_group_infos, GFP_KERNEL);
2533 if (sbi->s_group_info == NULL) {
2534 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2535 return -ENOMEM;
2536 }
2537 sbi->s_buddy_cache = new_inode(sb);
2538 if (sbi->s_buddy_cache == NULL) {
2539 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2540 goto err_freesgi;
2541 }
2542 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2543
2544 metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2545 for (i = 0; i < num_meta_group_infos; i++) {
2546 if ((i + 1) == num_meta_group_infos)
2547 metalen = sizeof(*meta_group_info) *
2548 (sbi->s_groups_count -
2549 (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2550 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2551 if (meta_group_info == NULL) {
2552 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2553 "buddy group\n");
2554 goto err_freemeta;
2555 }
2556 sbi->s_group_info[i] = meta_group_info;
2557 }
2558
2559 /*
2560 * calculate needed size. if change bb_counters size,
2561 * don't forget about ext4_mb_generate_buddy()
2562 */
2563 len = sizeof(struct ext4_group_info);
2564 len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
2565 for (i = 0; i < sbi->s_groups_count; i++) {
2566 struct ext4_group_desc *desc;
2567
2568 meta_group_info =
2569 sbi->s_group_info[i >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2570 j = i & (EXT4_DESC_PER_BLOCK(sb) - 1);
2571
2572 meta_group_info[j] = kzalloc(len, GFP_KERNEL);
2573 if (meta_group_info[j] == NULL) {
2574 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2575 i--;
2576 goto err_freebuddy;
2577 }
2578 desc = ext4_get_group_desc(sb, i, NULL);
2579 if (desc == NULL) {
2580 printk(KERN_ERR
2581 "EXT4-fs: can't read descriptor %lu\n", i);
2582 goto err_freebuddy;
2583 }
2584 memset(meta_group_info[j], 0, len);
2585 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2586 &(meta_group_info[j]->bb_state));
2587
2588 /*
2589 * initialize bb_free to be able to skip
2590 * empty groups without initialization
2591 */
2592 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2593 meta_group_info[j]->bb_free =
2594 ext4_free_blocks_after_init(sb, i, desc);
2595 } else {
2596 meta_group_info[j]->bb_free =
2597 le16_to_cpu(desc->bg_free_blocks_count);
2598 }
2599
2600 INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
2601
2602#ifdef DOUBLE_CHECK
2603 {
2604 struct buffer_head *bh;
2605 meta_group_info[j]->bb_bitmap =
2606 kmalloc(sb->s_blocksize, GFP_KERNEL);
2607 BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
2608 bh = read_block_bitmap(sb, i);
2609 BUG_ON(bh == NULL);
2610 memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
2611 sb->s_blocksize);
2612 put_bh(bh);
2613 }
2614#endif
2615
2616 }
2617
2618 return 0;
2619
2620err_freebuddy:
2621 while (i >= 0) {
2622 kfree(ext4_get_group_info(sb, i));
2623 i--;
2624 }
2625 i = num_meta_group_infos;
2626err_freemeta:
2627 while (--i >= 0)
2628 kfree(sbi->s_group_info[i]);
2629 iput(sbi->s_buddy_cache);
2630err_freesgi:
2631 kfree(sbi->s_group_info);
2632 return -ENOMEM;
2633}
2634
2635int ext4_mb_init(struct super_block *sb, int needs_recovery)
2636{
2637 struct ext4_sb_info *sbi = EXT4_SB(sb);
2638 unsigned i;
2639 unsigned offset;
2640 unsigned max;
2641
2642 if (!test_opt(sb, MBALLOC))
2643 return 0;
2644
2645 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2646
2647 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2648 if (sbi->s_mb_offsets == NULL) {
2649 clear_opt(sbi->s_mount_opt, MBALLOC);
2650 return -ENOMEM;
2651 }
2652 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2653 if (sbi->s_mb_maxs == NULL) {
2654 clear_opt(sbi->s_mount_opt, MBALLOC);
2655 kfree(sbi->s_mb_maxs);
2656 return -ENOMEM;
2657 }
2658
2659 /* order 0 is regular bitmap */
2660 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2661 sbi->s_mb_offsets[0] = 0;
2662
2663 i = 1;
2664 offset = 0;
2665 max = sb->s_blocksize << 2;
2666 do {
2667 sbi->s_mb_offsets[i] = offset;
2668 sbi->s_mb_maxs[i] = max;
2669 offset += 1 << (sb->s_blocksize_bits - i);
2670 max = max >> 1;
2671 i++;
2672 } while (i <= sb->s_blocksize_bits + 1);
2673
2674 /* init file for buddy data */
2675 i = ext4_mb_init_backend(sb);
2676 if (i) {
2677 clear_opt(sbi->s_mount_opt, MBALLOC);
2678 kfree(sbi->s_mb_offsets);
2679 kfree(sbi->s_mb_maxs);
2680 return i;
2681 }
2682
2683 spin_lock_init(&sbi->s_md_lock);
2684 INIT_LIST_HEAD(&sbi->s_active_transaction);
2685 INIT_LIST_HEAD(&sbi->s_closed_transaction);
2686 INIT_LIST_HEAD(&sbi->s_committed_transaction);
2687 spin_lock_init(&sbi->s_bal_lock);
2688
2689 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2690 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2691 sbi->s_mb_stats = MB_DEFAULT_STATS;
2692 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2693 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2694 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2695 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2696
2697 i = sizeof(struct ext4_locality_group) * NR_CPUS;
2698 sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
2699 if (sbi->s_locality_groups == NULL) {
2700 clear_opt(sbi->s_mount_opt, MBALLOC);
2701 kfree(sbi->s_mb_offsets);
2702 kfree(sbi->s_mb_maxs);
2703 return -ENOMEM;
2704 }
2705 for (i = 0; i < NR_CPUS; i++) {
2706 struct ext4_locality_group *lg;
2707 lg = &sbi->s_locality_groups[i];
2708 mutex_init(&lg->lg_mutex);
2709 INIT_LIST_HEAD(&lg->lg_prealloc_list);
2710 spin_lock_init(&lg->lg_prealloc_lock);
2711 }
2712
2713 ext4_mb_init_per_dev_proc(sb);
2714 ext4_mb_history_init(sb);
2715
2716 printk("EXT4-fs: mballoc enabled\n");
2717 return 0;
2718}
2719
2720/* need to called with ext4 group lock (ext4_lock_group) */
2721static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2722{
2723 struct ext4_prealloc_space *pa;
2724 struct list_head *cur, *tmp;
2725 int count = 0;
2726
2727 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2728 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2729 list_del(&pa->pa_group_list);
2730 count++;
2731 kfree(pa);
2732 }
2733 if (count)
2734 mb_debug("mballoc: %u PAs left\n", count);
2735
2736}
2737
2738int ext4_mb_release(struct super_block *sb)
2739{
2740 ext4_group_t i;
2741 int num_meta_group_infos;
2742 struct ext4_group_info *grinfo;
2743 struct ext4_sb_info *sbi = EXT4_SB(sb);
2744
2745 if (!test_opt(sb, MBALLOC))
2746 return 0;
2747
2748 /* release freed, non-committed blocks */
2749 spin_lock(&sbi->s_md_lock);
2750 list_splice_init(&sbi->s_closed_transaction,
2751 &sbi->s_committed_transaction);
2752 list_splice_init(&sbi->s_active_transaction,
2753 &sbi->s_committed_transaction);
2754 spin_unlock(&sbi->s_md_lock);
2755 ext4_mb_free_committed_blocks(sb);
2756
2757 if (sbi->s_group_info) {
2758 for (i = 0; i < sbi->s_groups_count; i++) {
2759 grinfo = ext4_get_group_info(sb, i);
2760#ifdef DOUBLE_CHECK
2761 kfree(grinfo->bb_bitmap);
2762#endif
2763 ext4_lock_group(sb, i);
2764 ext4_mb_cleanup_pa(grinfo);
2765 ext4_unlock_group(sb, i);
2766 kfree(grinfo);
2767 }
2768 num_meta_group_infos = (sbi->s_groups_count +
2769 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2770 EXT4_DESC_PER_BLOCK_BITS(sb);
2771 for (i = 0; i < num_meta_group_infos; i++)
2772 kfree(sbi->s_group_info[i]);
2773 kfree(sbi->s_group_info);
2774 }
2775 kfree(sbi->s_mb_offsets);
2776 kfree(sbi->s_mb_maxs);
2777 if (sbi->s_buddy_cache)
2778 iput(sbi->s_buddy_cache);
2779 if (sbi->s_mb_stats) {
2780 printk(KERN_INFO
2781 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2782 atomic_read(&sbi->s_bal_allocated),
2783 atomic_read(&sbi->s_bal_reqs),
2784 atomic_read(&sbi->s_bal_success));
2785 printk(KERN_INFO
2786 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2787 "%u 2^N hits, %u breaks, %u lost\n",
2788 atomic_read(&sbi->s_bal_ex_scanned),
2789 atomic_read(&sbi->s_bal_goals),
2790 atomic_read(&sbi->s_bal_2orders),
2791 atomic_read(&sbi->s_bal_breaks),
2792 atomic_read(&sbi->s_mb_lost_chunks));
2793 printk(KERN_INFO
2794 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2795 sbi->s_mb_buddies_generated++,
2796 sbi->s_mb_generation_time);
2797 printk(KERN_INFO
2798 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2799 atomic_read(&sbi->s_mb_preallocated),
2800 atomic_read(&sbi->s_mb_discarded));
2801 }
2802
2803 kfree(sbi->s_locality_groups);
2804
2805 ext4_mb_history_release(sb);
2806 ext4_mb_destroy_per_dev_proc(sb);
2807
2808 return 0;
2809}
2810
2811static void ext4_mb_free_committed_blocks(struct super_block *sb)
2812{
2813 struct ext4_sb_info *sbi = EXT4_SB(sb);
2814 int err;
2815 int i;
2816 int count = 0;
2817 int count2 = 0;
2818 struct ext4_free_metadata *md;
2819 struct ext4_buddy e4b;
2820
2821 if (list_empty(&sbi->s_committed_transaction))
2822 return;
2823
2824 /* there is committed blocks to be freed yet */
2825 do {
2826 /* get next array of blocks */
2827 md = NULL;
2828 spin_lock(&sbi->s_md_lock);
2829 if (!list_empty(&sbi->s_committed_transaction)) {
2830 md = list_entry(sbi->s_committed_transaction.next,
2831 struct ext4_free_metadata, list);
2832 list_del(&md->list);
2833 }
2834 spin_unlock(&sbi->s_md_lock);
2835
2836 if (md == NULL)
2837 break;
2838
2839 mb_debug("gonna free %u blocks in group %lu (0x%p):",
2840 md->num, md->group, md);
2841
2842 err = ext4_mb_load_buddy(sb, md->group, &e4b);
2843 /* we expect to find existing buddy because it's pinned */
2844 BUG_ON(err != 0);
2845
2846 /* there are blocks to put in buddy to make them really free */
2847 count += md->num;
2848 count2++;
2849 ext4_lock_group(sb, md->group);
2850 for (i = 0; i < md->num; i++) {
2851 mb_debug(" %u", md->blocks[i]);
2852 err = mb_free_blocks(NULL, &e4b, md->blocks[i], 1);
2853 BUG_ON(err != 0);
2854 }
2855 mb_debug("\n");
2856 ext4_unlock_group(sb, md->group);
2857
2858 /* balance refcounts from ext4_mb_free_metadata() */
2859 page_cache_release(e4b.bd_buddy_page);
2860 page_cache_release(e4b.bd_bitmap_page);
2861
2862 kfree(md);
2863 ext4_mb_release_desc(&e4b);
2864
2865 } while (md);
2866
2867 mb_debug("freed %u blocks in %u structures\n", count, count2);
2868}
2869
c9de560d
AT
2870#define EXT4_MB_STATS_NAME "stats"
2871#define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
2872#define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
2873#define EXT4_MB_ORDER2_REQ "order2_req"
2874#define EXT4_MB_STREAM_REQ "stream_req"
2875#define EXT4_MB_GROUP_PREALLOC "group_prealloc"
2876
2877
2878
2879#define MB_PROC_VALUE_READ(name) \
2880static int ext4_mb_read_##name(char *page, char **start, \
2881 off_t off, int count, int *eof, void *data) \
2882{ \
2883 struct ext4_sb_info *sbi = data; \
2884 int len; \
2885 *eof = 1; \
2886 if (off != 0) \
2887 return 0; \
2888 len = sprintf(page, "%ld\n", sbi->s_mb_##name); \
2889 *start = page; \
2890 return len; \
2891}
2892
2893#define MB_PROC_VALUE_WRITE(name) \
2894static int ext4_mb_write_##name(struct file *file, \
2895 const char __user *buf, unsigned long cnt, void *data) \
2896{ \
2897 struct ext4_sb_info *sbi = data; \
2898 char str[32]; \
2899 long value; \
2900 if (cnt >= sizeof(str)) \
2901 return -EINVAL; \
2902 if (copy_from_user(str, buf, cnt)) \
2903 return -EFAULT; \
2904 value = simple_strtol(str, NULL, 0); \
2905 if (value <= 0) \
2906 return -ERANGE; \
2907 sbi->s_mb_##name = value; \
2908 return cnt; \
2909}
2910
2911MB_PROC_VALUE_READ(stats);
2912MB_PROC_VALUE_WRITE(stats);
2913MB_PROC_VALUE_READ(max_to_scan);
2914MB_PROC_VALUE_WRITE(max_to_scan);
2915MB_PROC_VALUE_READ(min_to_scan);
2916MB_PROC_VALUE_WRITE(min_to_scan);
2917MB_PROC_VALUE_READ(order2_reqs);
2918MB_PROC_VALUE_WRITE(order2_reqs);
2919MB_PROC_VALUE_READ(stream_request);
2920MB_PROC_VALUE_WRITE(stream_request);
2921MB_PROC_VALUE_READ(group_prealloc);
2922MB_PROC_VALUE_WRITE(group_prealloc);
2923
2924#define MB_PROC_HANDLER(name, var) \
2925do { \
2926 proc = create_proc_entry(name, mode, sbi->s_mb_proc); \
2927 if (proc == NULL) { \
2928 printk(KERN_ERR "EXT4-fs: can't to create %s\n", name); \
2929 goto err_out; \
2930 } \
2931 proc->data = sbi; \
2932 proc->read_proc = ext4_mb_read_##var ; \
2933 proc->write_proc = ext4_mb_write_##var; \
2934} while (0)
2935
2936static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2937{
2938 mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2939 struct ext4_sb_info *sbi = EXT4_SB(sb);
2940 struct proc_dir_entry *proc;
2941 char devname[64];
2942
2943 snprintf(devname, sizeof(devname) - 1, "%s",
2944 bdevname(sb->s_bdev, devname));
2945 sbi->s_mb_proc = proc_mkdir(devname, proc_root_ext4);
2946
2947 MB_PROC_HANDLER(EXT4_MB_STATS_NAME, stats);
2948 MB_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, max_to_scan);
2949 MB_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, min_to_scan);
2950 MB_PROC_HANDLER(EXT4_MB_ORDER2_REQ, order2_reqs);
2951 MB_PROC_HANDLER(EXT4_MB_STREAM_REQ, stream_request);
2952 MB_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, group_prealloc);
2953
2954 return 0;
2955
2956err_out:
2957 printk(KERN_ERR "EXT4-fs: Unable to create %s\n", devname);
2958 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc);
2959 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc);
2960 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc);
2961 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
2962 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
2963 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc);
2964 remove_proc_entry(devname, proc_root_ext4);
2965 sbi->s_mb_proc = NULL;
2966
2967 return -ENOMEM;
2968}
2969
2970static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2971{
2972 struct ext4_sb_info *sbi = EXT4_SB(sb);
2973 char devname[64];
2974
2975 if (sbi->s_mb_proc == NULL)
2976 return -EINVAL;
2977
2978 snprintf(devname, sizeof(devname) - 1, "%s",
2979 bdevname(sb->s_bdev, devname));
2980 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc);
2981 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc);
2982 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc);
2983 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
2984 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
2985 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc);
2986 remove_proc_entry(devname, proc_root_ext4);
2987
2988 return 0;
2989}
2990
2991int __init init_ext4_mballoc(void)
2992{
2993 ext4_pspace_cachep =
2994 kmem_cache_create("ext4_prealloc_space",
2995 sizeof(struct ext4_prealloc_space),
2996 0, SLAB_RECLAIM_ACCOUNT, NULL);
2997 if (ext4_pspace_cachep == NULL)
2998 return -ENOMEM;
2999
256bdb49
ES
3000 ext4_ac_cachep =
3001 kmem_cache_create("ext4_alloc_context",
3002 sizeof(struct ext4_allocation_context),
3003 0, SLAB_RECLAIM_ACCOUNT, NULL);
3004 if (ext4_ac_cachep == NULL) {
3005 kmem_cache_destroy(ext4_pspace_cachep);
3006 return -ENOMEM;
3007 }
c9de560d 3008#ifdef CONFIG_PROC_FS
36a5aeb8 3009 proc_root_ext4 = proc_mkdir("fs/ext4", NULL);
c9de560d 3010 if (proc_root_ext4 == NULL)
36a5aeb8 3011 printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n");
c9de560d 3012#endif
c9de560d
AT
3013 return 0;
3014}
3015
3016void exit_ext4_mballoc(void)
3017{
3018 /* XXX: synchronize_rcu(); */
3019 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 3020 kmem_cache_destroy(ext4_ac_cachep);
c9de560d 3021#ifdef CONFIG_PROC_FS
36a5aeb8 3022 remove_proc_entry("fs/ext4", NULL);
c9de560d
AT
3023#endif
3024}
3025
3026
3027/*
3028 * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
3029 * Returns 0 if success or error code
3030 */
3031static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3032 handle_t *handle)
3033{
3034 struct buffer_head *bitmap_bh = NULL;
3035 struct ext4_super_block *es;
3036 struct ext4_group_desc *gdp;
3037 struct buffer_head *gdp_bh;
3038 struct ext4_sb_info *sbi;
3039 struct super_block *sb;
3040 ext4_fsblk_t block;
3041 int err;
3042
3043 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3044 BUG_ON(ac->ac_b_ex.fe_len <= 0);
3045
3046 sb = ac->ac_sb;
3047 sbi = EXT4_SB(sb);
3048 es = sbi->s_es;
3049
3050 ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
3051 gdp->bg_free_blocks_count);
3052
3053 err = -EIO;
3054 bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3055 if (!bitmap_bh)
3056 goto out_err;
3057
3058 err = ext4_journal_get_write_access(handle, bitmap_bh);
3059 if (err)
3060 goto out_err;
3061
3062 err = -EIO;
3063 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3064 if (!gdp)
3065 goto out_err;
3066
3067 err = ext4_journal_get_write_access(handle, gdp_bh);
3068 if (err)
3069 goto out_err;
3070
3071 block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
3072 + ac->ac_b_ex.fe_start
3073 + le32_to_cpu(es->s_first_data_block);
3074
3075 if (block == ext4_block_bitmap(sb, gdp) ||
3076 block == ext4_inode_bitmap(sb, gdp) ||
3077 in_range(block, ext4_inode_table(sb, gdp),
3078 EXT4_SB(sb)->s_itb_per_group)) {
3079
3080 ext4_error(sb, __FUNCTION__,
3081 "Allocating block in system zone - block = %llu",
3082 block);
3083 }
3084#ifdef AGGRESSIVE_CHECK
3085 {
3086 int i;
3087 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3088 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3089 bitmap_bh->b_data));
3090 }
3091 }
3092#endif
3093 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
3094 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
3095
3096 spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3097 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
3098 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3099 gdp->bg_free_blocks_count =
3100 cpu_to_le16(ext4_free_blocks_after_init(sb,
3101 ac->ac_b_ex.fe_group,
3102 gdp));
3103 }
3104 gdp->bg_free_blocks_count =
3105 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
3106 - ac->ac_b_ex.fe_len);
3107 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
3108 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3109 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
3110
3111 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
3112 if (err)
3113 goto out_err;
3114 err = ext4_journal_dirty_metadata(handle, gdp_bh);
3115
3116out_err:
3117 sb->s_dirt = 1;
42a10add 3118 brelse(bitmap_bh);
c9de560d
AT
3119 return err;
3120}
3121
3122/*
3123 * here we normalize request for locality group
3124 * Group request are normalized to s_strip size if we set the same via mount
3125 * option. If not we set it to s_mb_group_prealloc which can be configured via
3126 * /proc/fs/ext4/<partition>/group_prealloc
3127 *
3128 * XXX: should we try to preallocate more than the group has now?
3129 */
3130static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3131{
3132 struct super_block *sb = ac->ac_sb;
3133 struct ext4_locality_group *lg = ac->ac_lg;
3134
3135 BUG_ON(lg == NULL);
3136 if (EXT4_SB(sb)->s_stripe)
3137 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
3138 else
3139 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3140 mb_debug("#%u: goal %lu blocks for locality group\n",
3141 current->pid, ac->ac_g_ex.fe_len);
3142}
3143
3144/*
3145 * Normalization means making request better in terms of
3146 * size and alignment
3147 */
3148static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3149 struct ext4_allocation_request *ar)
3150{
3151 int bsbits, max;
3152 ext4_lblk_t end;
3153 struct list_head *cur;
3154 loff_t size, orig_size, start_off;
3155 ext4_lblk_t start, orig_start;
3156 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3157
3158 /* do normalize only data requests, metadata requests
3159 do not need preallocation */
3160 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3161 return;
3162
3163 /* sometime caller may want exact blocks */
3164 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3165 return;
3166
3167 /* caller may indicate that preallocation isn't
3168 * required (it's a tail, for example) */
3169 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3170 return;
3171
3172 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3173 ext4_mb_normalize_group_request(ac);
3174 return ;
3175 }
3176
3177 bsbits = ac->ac_sb->s_blocksize_bits;
3178
3179 /* first, let's learn actual file size
3180 * given current request is allocated */
3181 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3182 size = size << bsbits;
3183 if (size < i_size_read(ac->ac_inode))
3184 size = i_size_read(ac->ac_inode);
3185
3186 /* max available blocks in a free group */
3187 max = EXT4_BLOCKS_PER_GROUP(ac->ac_sb) - 1 - 1 -
3188 EXT4_SB(ac->ac_sb)->s_itb_per_group;
3189
3190#define NRL_CHECK_SIZE(req, size, max,bits) \
3191 (req <= (size) || max <= ((size) >> bits))
3192
3193 /* first, try to predict filesize */
3194 /* XXX: should this table be tunable? */
3195 start_off = 0;
3196 if (size <= 16 * 1024) {
3197 size = 16 * 1024;
3198 } else if (size <= 32 * 1024) {
3199 size = 32 * 1024;
3200 } else if (size <= 64 * 1024) {
3201 size = 64 * 1024;
3202 } else if (size <= 128 * 1024) {
3203 size = 128 * 1024;
3204 } else if (size <= 256 * 1024) {
3205 size = 256 * 1024;
3206 } else if (size <= 512 * 1024) {
3207 size = 512 * 1024;
3208 } else if (size <= 1024 * 1024) {
3209 size = 1024 * 1024;
3210 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, bsbits)) {
3211 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3212 (20 - bsbits)) << 20;
3213 size = 1024 * 1024;
3214 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, bsbits)) {
3215 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3216 (22 - bsbits)) << 22;
3217 size = 4 * 1024 * 1024;
3218 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3219 (8<<20)>>bsbits, max, bsbits)) {
3220 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3221 (23 - bsbits)) << 23;
3222 size = 8 * 1024 * 1024;
3223 } else {
3224 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3225 size = ac->ac_o_ex.fe_len << bsbits;
3226 }
3227 orig_size = size = size >> bsbits;
3228 orig_start = start = start_off >> bsbits;
3229
3230 /* don't cover already allocated blocks in selected range */
3231 if (ar->pleft && start <= ar->lleft) {
3232 size -= ar->lleft + 1 - start;
3233 start = ar->lleft + 1;
3234 }
3235 if (ar->pright && start + size - 1 >= ar->lright)
3236 size -= start + size - ar->lright;
3237
3238 end = start + size;
3239
3240 /* check we don't cross already preallocated blocks */
3241 rcu_read_lock();
3242 list_for_each_rcu(cur, &ei->i_prealloc_list) {
3243 struct ext4_prealloc_space *pa;
3244 unsigned long pa_end;
3245
3246 pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3247
3248 if (pa->pa_deleted)
3249 continue;
3250 spin_lock(&pa->pa_lock);
3251 if (pa->pa_deleted) {
3252 spin_unlock(&pa->pa_lock);
3253 continue;
3254 }
3255
3256 pa_end = pa->pa_lstart + pa->pa_len;
3257
3258 /* PA must not overlap original request */
3259 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3260 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3261
3262 /* skip PA normalized request doesn't overlap with */
3263 if (pa->pa_lstart >= end) {
3264 spin_unlock(&pa->pa_lock);
3265 continue;
3266 }
3267 if (pa_end <= start) {
3268 spin_unlock(&pa->pa_lock);
3269 continue;
3270 }
3271 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3272
3273 if (pa_end <= ac->ac_o_ex.fe_logical) {
3274 BUG_ON(pa_end < start);
3275 start = pa_end;
3276 }
3277
3278 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3279 BUG_ON(pa->pa_lstart > end);
3280 end = pa->pa_lstart;
3281 }
3282 spin_unlock(&pa->pa_lock);
3283 }
3284 rcu_read_unlock();
3285 size = end - start;
3286
3287 /* XXX: extra loop to check we really don't overlap preallocations */
3288 rcu_read_lock();
3289 list_for_each_rcu(cur, &ei->i_prealloc_list) {
3290 struct ext4_prealloc_space *pa;
3291 unsigned long pa_end;
3292 pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3293 spin_lock(&pa->pa_lock);
3294 if (pa->pa_deleted == 0) {
3295 pa_end = pa->pa_lstart + pa->pa_len;
3296 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3297 }
3298 spin_unlock(&pa->pa_lock);
3299 }
3300 rcu_read_unlock();
3301
3302 if (start + size <= ac->ac_o_ex.fe_logical &&
3303 start > ac->ac_o_ex.fe_logical) {
3304 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3305 (unsigned long) start, (unsigned long) size,
3306 (unsigned long) ac->ac_o_ex.fe_logical);
3307 }
3308 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3309 start > ac->ac_o_ex.fe_logical);
3310 BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3311
3312 /* now prepare goal request */
3313
3314 /* XXX: is it better to align blocks WRT to logical
3315 * placement or satisfy big request as is */
3316 ac->ac_g_ex.fe_logical = start;
3317 ac->ac_g_ex.fe_len = size;
3318
3319 /* define goal start in order to merge */
3320 if (ar->pright && (ar->lright == (start + size))) {
3321 /* merge to the right */
3322 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3323 &ac->ac_f_ex.fe_group,
3324 &ac->ac_f_ex.fe_start);
3325 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3326 }
3327 if (ar->pleft && (ar->lleft + 1 == start)) {
3328 /* merge to the left */
3329 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3330 &ac->ac_f_ex.fe_group,
3331 &ac->ac_f_ex.fe_start);
3332 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3333 }
3334
3335 mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3336 (unsigned) orig_size, (unsigned) start);
3337}
3338
3339static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3340{
3341 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3342
3343 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3344 atomic_inc(&sbi->s_bal_reqs);
3345 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3346 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3347 atomic_inc(&sbi->s_bal_success);
3348 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3349 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3350 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3351 atomic_inc(&sbi->s_bal_goals);
3352 if (ac->ac_found > sbi->s_mb_max_to_scan)
3353 atomic_inc(&sbi->s_bal_breaks);
3354 }
3355
3356 ext4_mb_store_history(ac);
3357}
3358
3359/*
3360 * use blocks preallocated to inode
3361 */
3362static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3363 struct ext4_prealloc_space *pa)
3364{
3365 ext4_fsblk_t start;
3366 ext4_fsblk_t end;
3367 int len;
3368
3369 /* found preallocated blocks, use them */
3370 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3371 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3372 len = end - start;
3373 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3374 &ac->ac_b_ex.fe_start);
3375 ac->ac_b_ex.fe_len = len;
3376 ac->ac_status = AC_STATUS_FOUND;
3377 ac->ac_pa = pa;
3378
3379 BUG_ON(start < pa->pa_pstart);
3380 BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3381 BUG_ON(pa->pa_free < len);
3382 pa->pa_free -= len;
3383
3384 mb_debug("use %llu/%lu from inode pa %p\n", start, len, pa);
3385}
3386
3387/*
3388 * use blocks preallocated to locality group
3389 */
3390static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3391 struct ext4_prealloc_space *pa)
3392{
3393 unsigned len = ac->ac_o_ex.fe_len;
3394
3395 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3396 &ac->ac_b_ex.fe_group,
3397 &ac->ac_b_ex.fe_start);
3398 ac->ac_b_ex.fe_len = len;
3399 ac->ac_status = AC_STATUS_FOUND;
3400 ac->ac_pa = pa;
3401
3402 /* we don't correct pa_pstart or pa_plen here to avoid
26346ff6 3403 * possible race when the group is being loaded concurrently
c9de560d 3404 * instead we correct pa later, after blocks are marked
26346ff6
AK
3405 * in on-disk bitmap -- see ext4_mb_release_context()
3406 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d
AT
3407 */
3408 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3409}
3410
3411/*
3412 * search goal blocks in preallocated space
3413 */
3414static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3415{
3416 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3417 struct ext4_locality_group *lg;
3418 struct ext4_prealloc_space *pa;
3419 struct list_head *cur;
3420
3421 /* only data can be preallocated */
3422 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3423 return 0;
3424
3425 /* first, try per-file preallocation */
3426 rcu_read_lock();
3427 list_for_each_rcu(cur, &ei->i_prealloc_list) {
3428 pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3429
3430 /* all fields in this condition don't change,
3431 * so we can skip locking for them */
3432 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3433 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3434 continue;
3435
3436 /* found preallocated blocks, use them */
3437 spin_lock(&pa->pa_lock);
3438 if (pa->pa_deleted == 0 && pa->pa_free) {
3439 atomic_inc(&pa->pa_count);
3440 ext4_mb_use_inode_pa(ac, pa);
3441 spin_unlock(&pa->pa_lock);
3442 ac->ac_criteria = 10;
3443 rcu_read_unlock();
3444 return 1;
3445 }
3446 spin_unlock(&pa->pa_lock);
3447 }
3448 rcu_read_unlock();
3449
3450 /* can we use group allocation? */
3451 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3452 return 0;
3453
3454 /* inode may have no locality group for some reason */
3455 lg = ac->ac_lg;
3456 if (lg == NULL)
3457 return 0;
3458
3459 rcu_read_lock();
3460 list_for_each_rcu(cur, &lg->lg_prealloc_list) {
3461 pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3462 spin_lock(&pa->pa_lock);
3463 if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
3464 atomic_inc(&pa->pa_count);
3465 ext4_mb_use_group_pa(ac, pa);
3466 spin_unlock(&pa->pa_lock);
3467 ac->ac_criteria = 20;
3468 rcu_read_unlock();
3469 return 1;
3470 }
3471 spin_unlock(&pa->pa_lock);
3472 }
3473 rcu_read_unlock();
3474
3475 return 0;
3476}
3477
3478/*
3479 * the function goes through all preallocation in this group and marks them
3480 * used in in-core bitmap. buddy must be generated from this bitmap
3481 * Need to be called with ext4 group lock (ext4_lock_group)
3482 */
3483static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3484 ext4_group_t group)
3485{
3486 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3487 struct ext4_prealloc_space *pa;
3488 struct list_head *cur;
3489 ext4_group_t groupnr;
3490 ext4_grpblk_t start;
3491 int preallocated = 0;
3492 int count = 0;
3493 int len;
3494
3495 /* all form of preallocation discards first load group,
3496 * so the only competing code is preallocation use.
3497 * we don't need any locking here
3498 * notice we do NOT ignore preallocations with pa_deleted
3499 * otherwise we could leave used blocks available for
3500 * allocation in buddy when concurrent ext4_mb_put_pa()
3501 * is dropping preallocation
3502 */
3503 list_for_each(cur, &grp->bb_prealloc_list) {
3504 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3505 spin_lock(&pa->pa_lock);
3506 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3507 &groupnr, &start);
3508 len = pa->pa_len;
3509 spin_unlock(&pa->pa_lock);
3510 if (unlikely(len == 0))
3511 continue;
3512 BUG_ON(groupnr != group);
3513 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3514 bitmap, start, len);
3515 preallocated += len;
3516 count++;
3517 }
3518 mb_debug("prellocated %u for group %lu\n", preallocated, group);
3519}
3520
3521static void ext4_mb_pa_callback(struct rcu_head *head)
3522{
3523 struct ext4_prealloc_space *pa;
3524 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3525 kmem_cache_free(ext4_pspace_cachep, pa);
3526}
3527
3528/*
3529 * drops a reference to preallocated space descriptor
3530 * if this was the last reference and the space is consumed
3531 */
3532static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3533 struct super_block *sb, struct ext4_prealloc_space *pa)
3534{
3535 unsigned long grp;
3536
3537 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3538 return;
3539
3540 /* in this short window concurrent discard can set pa_deleted */
3541 spin_lock(&pa->pa_lock);
3542 if (pa->pa_deleted == 1) {
3543 spin_unlock(&pa->pa_lock);
3544 return;
3545 }
3546
3547 pa->pa_deleted = 1;
3548 spin_unlock(&pa->pa_lock);
3549
3550 /* -1 is to protect from crossing allocation group */
3551 ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3552
3553 /*
3554 * possible race:
3555 *
3556 * P1 (buddy init) P2 (regular allocation)
3557 * find block B in PA
3558 * copy on-disk bitmap to buddy
3559 * mark B in on-disk bitmap
3560 * drop PA from group
3561 * mark all PAs in buddy
3562 *
3563 * thus, P1 initializes buddy with B available. to prevent this
3564 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3565 * against that pair
3566 */
3567 ext4_lock_group(sb, grp);
3568 list_del(&pa->pa_group_list);
3569 ext4_unlock_group(sb, grp);
3570
3571 spin_lock(pa->pa_obj_lock);
3572 list_del_rcu(&pa->pa_inode_list);
3573 spin_unlock(pa->pa_obj_lock);
3574
3575 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3576}
3577
3578/*
3579 * creates new preallocated space for given inode
3580 */
3581static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3582{
3583 struct super_block *sb = ac->ac_sb;
3584 struct ext4_prealloc_space *pa;
3585 struct ext4_group_info *grp;
3586 struct ext4_inode_info *ei;
3587
3588 /* preallocate only when found space is larger then requested */
3589 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3590 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3591 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3592
3593 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3594 if (pa == NULL)
3595 return -ENOMEM;
3596
3597 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3598 int winl;
3599 int wins;
3600 int win;
3601 int offs;
3602
3603 /* we can't allocate as much as normalizer wants.
3604 * so, found space must get proper lstart
3605 * to cover original request */
3606 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3607 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3608
3609 /* we're limited by original request in that
3610 * logical block must be covered any way
3611 * winl is window we can move our chunk within */
3612 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3613
3614 /* also, we should cover whole original request */
3615 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3616
3617 /* the smallest one defines real window */
3618 win = min(winl, wins);
3619
3620 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3621 if (offs && offs < win)
3622 win = offs;
3623
3624 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3625 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3626 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3627 }
3628
3629 /* preallocation can change ac_b_ex, thus we store actually
3630 * allocated blocks for history */
3631 ac->ac_f_ex = ac->ac_b_ex;
3632
3633 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3634 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3635 pa->pa_len = ac->ac_b_ex.fe_len;
3636 pa->pa_free = pa->pa_len;
3637 atomic_set(&pa->pa_count, 1);
3638 spin_lock_init(&pa->pa_lock);
3639 pa->pa_deleted = 0;
3640 pa->pa_linear = 0;
3641
3642 mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3643 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3644
3645 ext4_mb_use_inode_pa(ac, pa);
3646 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3647
3648 ei = EXT4_I(ac->ac_inode);
3649 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3650
3651 pa->pa_obj_lock = &ei->i_prealloc_lock;
3652 pa->pa_inode = ac->ac_inode;
3653
3654 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3655 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3656 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3657
3658 spin_lock(pa->pa_obj_lock);
3659 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3660 spin_unlock(pa->pa_obj_lock);
3661
3662 return 0;
3663}
3664
3665/*
3666 * creates new preallocated space for locality group inodes belongs to
3667 */
3668static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3669{
3670 struct super_block *sb = ac->ac_sb;
3671 struct ext4_locality_group *lg;
3672 struct ext4_prealloc_space *pa;
3673 struct ext4_group_info *grp;
3674
3675 /* preallocate only when found space is larger then requested */
3676 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3677 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3678 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3679
3680 BUG_ON(ext4_pspace_cachep == NULL);
3681 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3682 if (pa == NULL)
3683 return -ENOMEM;
3684
3685 /* preallocation can change ac_b_ex, thus we store actually
3686 * allocated blocks for history */
3687 ac->ac_f_ex = ac->ac_b_ex;
3688
3689 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3690 pa->pa_lstart = pa->pa_pstart;
3691 pa->pa_len = ac->ac_b_ex.fe_len;
3692 pa->pa_free = pa->pa_len;
3693 atomic_set(&pa->pa_count, 1);
3694 spin_lock_init(&pa->pa_lock);
3695 pa->pa_deleted = 0;
3696 pa->pa_linear = 1;
3697
3698 mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3699 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3700
3701 ext4_mb_use_group_pa(ac, pa);
3702 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3703
3704 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3705 lg = ac->ac_lg;
3706 BUG_ON(lg == NULL);
3707
3708 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3709 pa->pa_inode = NULL;
3710
3711 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3712 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3713 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3714
3715 spin_lock(pa->pa_obj_lock);
3716 list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
3717 spin_unlock(pa->pa_obj_lock);
3718
3719 return 0;
3720}
3721
3722static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3723{
3724 int err;
3725
3726 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3727 err = ext4_mb_new_group_pa(ac);
3728 else
3729 err = ext4_mb_new_inode_pa(ac);
3730 return err;
3731}
3732
3733/*
3734 * finds all unused blocks in on-disk bitmap, frees them in
3735 * in-core bitmap and buddy.
3736 * @pa must be unlinked from inode and group lists, so that
3737 * nobody else can find/use it.
3738 * the caller MUST hold group/inode locks.
3739 * TODO: optimize the case when there are no in-core structures yet
3740 */
3741static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3742 struct buffer_head *bitmap_bh,
3743 struct ext4_prealloc_space *pa)
3744{
256bdb49 3745 struct ext4_allocation_context *ac;
c9de560d
AT
3746 struct super_block *sb = e4b->bd_sb;
3747 struct ext4_sb_info *sbi = EXT4_SB(sb);
3748 unsigned long end;
3749 unsigned long next;
3750 ext4_group_t group;
3751 ext4_grpblk_t bit;
3752 sector_t start;
3753 int err = 0;
3754 int free = 0;
3755
3756 BUG_ON(pa->pa_deleted == 0);
3757 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3758 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3759 end = bit + pa->pa_len;
3760
256bdb49
ES
3761 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3762
3763 if (ac) {
3764 ac->ac_sb = sb;
3765 ac->ac_inode = pa->pa_inode;
3766 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3767 }
c9de560d
AT
3768
3769 while (bit < end) {
ffad0a44 3770 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3771 if (bit >= end)
3772 break;
ffad0a44 3773 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3774 if (next > end)
3775 next = end;
3776 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3777 le32_to_cpu(sbi->s_es->s_first_data_block);
3778 mb_debug(" free preallocated %u/%u in group %u\n",
3779 (unsigned) start, (unsigned) next - bit,
3780 (unsigned) group);
3781 free += next - bit;
3782
256bdb49
ES
3783 if (ac) {
3784 ac->ac_b_ex.fe_group = group;
3785 ac->ac_b_ex.fe_start = bit;
3786 ac->ac_b_ex.fe_len = next - bit;
3787 ac->ac_b_ex.fe_logical = 0;
3788 ext4_mb_store_history(ac);
3789 }
c9de560d
AT
3790
3791 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3792 bit = next + 1;
3793 }
3794 if (free != pa->pa_free) {
26346ff6 3795 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
c9de560d
AT
3796 pa, (unsigned long) pa->pa_lstart,
3797 (unsigned long) pa->pa_pstart,
3798 (unsigned long) pa->pa_len);
26346ff6
AK
3799 ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
3800 free, pa->pa_free);
e56eb659
AK
3801 /*
3802 * pa is already deleted so we use the value obtained
3803 * from the bitmap and continue.
3804 */
c9de560d 3805 }
c9de560d 3806 atomic_add(free, &sbi->s_mb_discarded);
256bdb49
ES
3807 if (ac)
3808 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
3809
3810 return err;
3811}
3812
3813static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3814 struct ext4_prealloc_space *pa)
3815{
256bdb49 3816 struct ext4_allocation_context *ac;
c9de560d
AT
3817 struct super_block *sb = e4b->bd_sb;
3818 ext4_group_t group;
3819 ext4_grpblk_t bit;
3820
256bdb49
ES
3821 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3822
3823 if (ac)
3824 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
c9de560d
AT
3825
3826 BUG_ON(pa->pa_deleted == 0);
3827 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3828 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3829 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3830 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3831
256bdb49
ES
3832 if (ac) {
3833 ac->ac_sb = sb;
3834 ac->ac_inode = NULL;
3835 ac->ac_b_ex.fe_group = group;
3836 ac->ac_b_ex.fe_start = bit;
3837 ac->ac_b_ex.fe_len = pa->pa_len;
3838 ac->ac_b_ex.fe_logical = 0;
3839 ext4_mb_store_history(ac);
3840 kmem_cache_free(ext4_ac_cachep, ac);
3841 }
c9de560d
AT
3842
3843 return 0;
3844}
3845
3846/*
3847 * releases all preallocations in given group
3848 *
3849 * first, we need to decide discard policy:
3850 * - when do we discard
3851 * 1) ENOSPC
3852 * - how many do we discard
3853 * 1) how many requested
3854 */
3855static int ext4_mb_discard_group_preallocations(struct super_block *sb,
3856 ext4_group_t group, int needed)
3857{
3858 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3859 struct buffer_head *bitmap_bh = NULL;
3860 struct ext4_prealloc_space *pa, *tmp;
3861 struct list_head list;
3862 struct ext4_buddy e4b;
3863 int err;
3864 int busy = 0;
3865 int free = 0;
3866
3867 mb_debug("discard preallocation for group %lu\n", group);
3868
3869 if (list_empty(&grp->bb_prealloc_list))
3870 return 0;
3871
3872 bitmap_bh = read_block_bitmap(sb, group);
3873 if (bitmap_bh == NULL) {
3874 /* error handling here */
3875 ext4_mb_release_desc(&e4b);
3876 BUG_ON(bitmap_bh == NULL);
3877 }
3878
3879 err = ext4_mb_load_buddy(sb, group, &e4b);
3880 BUG_ON(err != 0); /* error handling here */
3881
3882 if (needed == 0)
3883 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3884
3885 grp = ext4_get_group_info(sb, group);
3886 INIT_LIST_HEAD(&list);
3887
3888repeat:
3889 ext4_lock_group(sb, group);
3890 list_for_each_entry_safe(pa, tmp,
3891 &grp->bb_prealloc_list, pa_group_list) {
3892 spin_lock(&pa->pa_lock);
3893 if (atomic_read(&pa->pa_count)) {
3894 spin_unlock(&pa->pa_lock);
3895 busy = 1;
3896 continue;
3897 }
3898 if (pa->pa_deleted) {
3899 spin_unlock(&pa->pa_lock);
3900 continue;
3901 }
3902
3903 /* seems this one can be freed ... */
3904 pa->pa_deleted = 1;
3905
3906 /* we can trust pa_free ... */
3907 free += pa->pa_free;
3908
3909 spin_unlock(&pa->pa_lock);
3910
3911 list_del(&pa->pa_group_list);
3912 list_add(&pa->u.pa_tmp_list, &list);
3913 }
3914
3915 /* if we still need more blocks and some PAs were used, try again */
3916 if (free < needed && busy) {
3917 busy = 0;
3918 ext4_unlock_group(sb, group);
3919 /*
3920 * Yield the CPU here so that we don't get soft lockup
3921 * in non preempt case.
3922 */
3923 yield();
3924 goto repeat;
3925 }
3926
3927 /* found anything to free? */
3928 if (list_empty(&list)) {
3929 BUG_ON(free != 0);
3930 goto out;
3931 }
3932
3933 /* now free all selected PAs */
3934 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3935
3936 /* remove from object (inode or locality group) */
3937 spin_lock(pa->pa_obj_lock);
3938 list_del_rcu(&pa->pa_inode_list);
3939 spin_unlock(pa->pa_obj_lock);
3940
3941 if (pa->pa_linear)
3942 ext4_mb_release_group_pa(&e4b, pa);
3943 else
3944 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3945
3946 list_del(&pa->u.pa_tmp_list);
3947 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3948 }
3949
3950out:
3951 ext4_unlock_group(sb, group);
3952 ext4_mb_release_desc(&e4b);
3953 put_bh(bitmap_bh);
3954 return free;
3955}
3956
3957/*
3958 * releases all non-used preallocated blocks for given inode
3959 *
3960 * It's important to discard preallocations under i_data_sem
3961 * We don't want another block to be served from the prealloc
3962 * space when we are discarding the inode prealloc space.
3963 *
3964 * FIXME!! Make sure it is valid at all the call sites
3965 */
3966void ext4_mb_discard_inode_preallocations(struct inode *inode)
3967{
3968 struct ext4_inode_info *ei = EXT4_I(inode);
3969 struct super_block *sb = inode->i_sb;
3970 struct buffer_head *bitmap_bh = NULL;
3971 struct ext4_prealloc_space *pa, *tmp;
3972 ext4_group_t group = 0;
3973 struct list_head list;
3974 struct ext4_buddy e4b;
3975 int err;
3976
3977 if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
3978 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3979 return;
3980 }
3981
3982 mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3983
3984 INIT_LIST_HEAD(&list);
3985
3986repeat:
3987 /* first, collect all pa's in the inode */
3988 spin_lock(&ei->i_prealloc_lock);
3989 while (!list_empty(&ei->i_prealloc_list)) {
3990 pa = list_entry(ei->i_prealloc_list.next,
3991 struct ext4_prealloc_space, pa_inode_list);
3992 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3993 spin_lock(&pa->pa_lock);
3994 if (atomic_read(&pa->pa_count)) {
3995 /* this shouldn't happen often - nobody should
3996 * use preallocation while we're discarding it */
3997 spin_unlock(&pa->pa_lock);
3998 spin_unlock(&ei->i_prealloc_lock);
3999 printk(KERN_ERR "uh-oh! used pa while discarding\n");
4000 WARN_ON(1);
4001 schedule_timeout_uninterruptible(HZ);
4002 goto repeat;
4003
4004 }
4005 if (pa->pa_deleted == 0) {
4006 pa->pa_deleted = 1;
4007 spin_unlock(&pa->pa_lock);
4008 list_del_rcu(&pa->pa_inode_list);
4009 list_add(&pa->u.pa_tmp_list, &list);
4010 continue;
4011 }
4012
4013 /* someone is deleting pa right now */
4014 spin_unlock(&pa->pa_lock);
4015 spin_unlock(&ei->i_prealloc_lock);
4016
4017 /* we have to wait here because pa_deleted
4018 * doesn't mean pa is already unlinked from
4019 * the list. as we might be called from
4020 * ->clear_inode() the inode will get freed
4021 * and concurrent thread which is unlinking
4022 * pa from inode's list may access already
4023 * freed memory, bad-bad-bad */
4024
4025 /* XXX: if this happens too often, we can
4026 * add a flag to force wait only in case
4027 * of ->clear_inode(), but not in case of
4028 * regular truncate */
4029 schedule_timeout_uninterruptible(HZ);
4030 goto repeat;
4031 }
4032 spin_unlock(&ei->i_prealloc_lock);
4033
4034 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4035 BUG_ON(pa->pa_linear != 0);
4036 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4037
4038 err = ext4_mb_load_buddy(sb, group, &e4b);
4039 BUG_ON(err != 0); /* error handling here */
4040
4041 bitmap_bh = read_block_bitmap(sb, group);
4042 if (bitmap_bh == NULL) {
4043 /* error handling here */
4044 ext4_mb_release_desc(&e4b);
4045 BUG_ON(bitmap_bh == NULL);
4046 }
4047
4048 ext4_lock_group(sb, group);
4049 list_del(&pa->pa_group_list);
4050 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4051 ext4_unlock_group(sb, group);
4052
4053 ext4_mb_release_desc(&e4b);
4054 put_bh(bitmap_bh);
4055
4056 list_del(&pa->u.pa_tmp_list);
4057 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4058 }
4059}
4060
4061/*
4062 * finds all preallocated spaces and return blocks being freed to them
4063 * if preallocated space becomes full (no block is used from the space)
4064 * then the function frees space in buddy
4065 * XXX: at the moment, truncate (which is the only way to free blocks)
4066 * discards all preallocations
4067 */
4068static void ext4_mb_return_to_preallocation(struct inode *inode,
4069 struct ext4_buddy *e4b,
4070 sector_t block, int count)
4071{
4072 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
4073}
4074#ifdef MB_DEBUG
4075static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4076{
4077 struct super_block *sb = ac->ac_sb;
4078 ext4_group_t i;
4079
4080 printk(KERN_ERR "EXT4-fs: Can't allocate:"
4081 " Allocation context details:\n");
4082 printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
4083 ac->ac_status, ac->ac_flags);
4084 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4085 "best %lu/%lu/%lu@%lu cr %d\n",
4086 (unsigned long)ac->ac_o_ex.fe_group,
4087 (unsigned long)ac->ac_o_ex.fe_start,
4088 (unsigned long)ac->ac_o_ex.fe_len,
4089 (unsigned long)ac->ac_o_ex.fe_logical,
4090 (unsigned long)ac->ac_g_ex.fe_group,
4091 (unsigned long)ac->ac_g_ex.fe_start,
4092 (unsigned long)ac->ac_g_ex.fe_len,
4093 (unsigned long)ac->ac_g_ex.fe_logical,
4094 (unsigned long)ac->ac_b_ex.fe_group,
4095 (unsigned long)ac->ac_b_ex.fe_start,
4096 (unsigned long)ac->ac_b_ex.fe_len,
4097 (unsigned long)ac->ac_b_ex.fe_logical,
4098 (int)ac->ac_criteria);
4099 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4100 ac->ac_found);
4101 printk(KERN_ERR "EXT4-fs: groups: \n");
4102 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
4103 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4104 struct ext4_prealloc_space *pa;
4105 ext4_grpblk_t start;
4106 struct list_head *cur;
4107 ext4_lock_group(sb, i);
4108 list_for_each(cur, &grp->bb_prealloc_list) {
4109 pa = list_entry(cur, struct ext4_prealloc_space,
4110 pa_group_list);
4111 spin_lock(&pa->pa_lock);
4112 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4113 NULL, &start);
4114 spin_unlock(&pa->pa_lock);
4115 printk(KERN_ERR "PA:%lu:%d:%u \n", i,
4116 start, pa->pa_len);
4117 }
4118 ext4_lock_group(sb, i);
4119
4120 if (grp->bb_free == 0)
4121 continue;
4122 printk(KERN_ERR "%lu: %d/%d \n",
4123 i, grp->bb_free, grp->bb_fragments);
4124 }
4125 printk(KERN_ERR "\n");
4126}
4127#else
4128static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4129{
4130 return;
4131}
4132#endif
4133
4134/*
4135 * We use locality group preallocation for small size file. The size of the
4136 * file is determined by the current size or the resulting size after
4137 * allocation which ever is larger
4138 *
4139 * One can tune this size via /proc/fs/ext4/<partition>/stream_req
4140 */
4141static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4142{
4143 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4144 int bsbits = ac->ac_sb->s_blocksize_bits;
4145 loff_t size, isize;
4146
4147 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4148 return;
4149
4150 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4151 isize = i_size_read(ac->ac_inode) >> bsbits;
4152 size = max(size, isize);
4153
4154 /* don't use group allocation for large files */
4155 if (size >= sbi->s_mb_stream_request)
4156 return;
4157
4158 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4159 return;
4160
4161 BUG_ON(ac->ac_lg != NULL);
4162 /*
4163 * locality group prealloc space are per cpu. The reason for having
4164 * per cpu locality group is to reduce the contention between block
4165 * request from multiple CPUs.
4166 */
4167 ac->ac_lg = &sbi->s_locality_groups[get_cpu()];
4168 put_cpu();
4169
4170 /* we're going to use group allocation */
4171 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4172
4173 /* serialize all allocations in the group */
4174 mutex_lock(&ac->ac_lg->lg_mutex);
4175}
4176
4177static int ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4178 struct ext4_allocation_request *ar)
4179{
4180 struct super_block *sb = ar->inode->i_sb;
4181 struct ext4_sb_info *sbi = EXT4_SB(sb);
4182 struct ext4_super_block *es = sbi->s_es;
4183 ext4_group_t group;
4184 unsigned long len;
4185 unsigned long goal;
4186 ext4_grpblk_t block;
4187
4188 /* we can't allocate > group size */
4189 len = ar->len;
4190
4191 /* just a dirty hack to filter too big requests */
4192 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4193 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4194
4195 /* start searching from the goal */
4196 goal = ar->goal;
4197 if (goal < le32_to_cpu(es->s_first_data_block) ||
4198 goal >= ext4_blocks_count(es))
4199 goal = le32_to_cpu(es->s_first_data_block);
4200 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4201
4202 /* set up allocation goals */
4203 ac->ac_b_ex.fe_logical = ar->logical;
4204 ac->ac_b_ex.fe_group = 0;
4205 ac->ac_b_ex.fe_start = 0;
4206 ac->ac_b_ex.fe_len = 0;
4207 ac->ac_status = AC_STATUS_CONTINUE;
4208 ac->ac_groups_scanned = 0;
4209 ac->ac_ex_scanned = 0;
4210 ac->ac_found = 0;
4211 ac->ac_sb = sb;
4212 ac->ac_inode = ar->inode;
4213 ac->ac_o_ex.fe_logical = ar->logical;
4214 ac->ac_o_ex.fe_group = group;
4215 ac->ac_o_ex.fe_start = block;
4216 ac->ac_o_ex.fe_len = len;
4217 ac->ac_g_ex.fe_logical = ar->logical;
4218 ac->ac_g_ex.fe_group = group;
4219 ac->ac_g_ex.fe_start = block;
4220 ac->ac_g_ex.fe_len = len;
4221 ac->ac_f_ex.fe_len = 0;
4222 ac->ac_flags = ar->flags;
4223 ac->ac_2order = 0;
4224 ac->ac_criteria = 0;
4225 ac->ac_pa = NULL;
4226 ac->ac_bitmap_page = NULL;
4227 ac->ac_buddy_page = NULL;
4228 ac->ac_lg = NULL;
4229
4230 /* we have to define context: we'll we work with a file or
4231 * locality group. this is a policy, actually */
4232 ext4_mb_group_or_file(ac);
4233
4234 mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4235 "left: %u/%u, right %u/%u to %swritable\n",
4236 (unsigned) ar->len, (unsigned) ar->logical,
4237 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4238 (unsigned) ar->lleft, (unsigned) ar->pleft,
4239 (unsigned) ar->lright, (unsigned) ar->pright,
4240 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4241 return 0;
4242
4243}
4244
4245/*
4246 * release all resource we used in allocation
4247 */
4248static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4249{
4250 if (ac->ac_pa) {
4251 if (ac->ac_pa->pa_linear) {
4252 /* see comment in ext4_mb_use_group_pa() */
4253 spin_lock(&ac->ac_pa->pa_lock);
4254 ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
4255 ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
4256 ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
4257 ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
4258 spin_unlock(&ac->ac_pa->pa_lock);
4259 }
4260 ext4_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
4261 }
4262 if (ac->ac_bitmap_page)
4263 page_cache_release(ac->ac_bitmap_page);
4264 if (ac->ac_buddy_page)
4265 page_cache_release(ac->ac_buddy_page);
4266 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4267 mutex_unlock(&ac->ac_lg->lg_mutex);
4268 ext4_mb_collect_stats(ac);
4269 return 0;
4270}
4271
4272static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4273{
4274 ext4_group_t i;
4275 int ret;
4276 int freed = 0;
4277
4278 for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4279 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4280 freed += ret;
4281 needed -= ret;
4282 }
4283
4284 return freed;
4285}
4286
4287/*
4288 * Main entry point into mballoc to allocate blocks
4289 * it tries to use preallocation first, then falls back
4290 * to usual allocation
4291 */
4292ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4293 struct ext4_allocation_request *ar, int *errp)
4294{
256bdb49 4295 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4296 struct ext4_sb_info *sbi;
4297 struct super_block *sb;
4298 ext4_fsblk_t block = 0;
4299 int freed;
4300 int inquota;
4301
4302 sb = ar->inode->i_sb;
4303 sbi = EXT4_SB(sb);
4304
4305 if (!test_opt(sb, MBALLOC)) {
4306 block = ext4_new_blocks_old(handle, ar->inode, ar->goal,
4307 &(ar->len), errp);
4308 return block;
4309 }
4310
4311 while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4312 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4313 ar->len--;
4314 }
4315 if (ar->len == 0) {
4316 *errp = -EDQUOT;
4317 return 0;
4318 }
4319 inquota = ar->len;
4320
256bdb49
ES
4321 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4322 if (!ac) {
4323 *errp = -ENOMEM;
4324 return 0;
4325 }
4326
c9de560d
AT
4327 ext4_mb_poll_new_transaction(sb, handle);
4328
256bdb49 4329 *errp = ext4_mb_initialize_context(ac, ar);
c9de560d
AT
4330 if (*errp) {
4331 ar->len = 0;
4332 goto out;
4333 }
4334
256bdb49
ES
4335 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4336 if (!ext4_mb_use_preallocated(ac)) {
c9de560d 4337
256bdb49
ES
4338 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4339 ext4_mb_normalize_request(ac, ar);
c9de560d
AT
4340
4341repeat:
4342 /* allocate space in core */
256bdb49 4343 ext4_mb_regular_allocator(ac);
c9de560d
AT
4344
4345 /* as we've just preallocated more space than
4346 * user requested orinally, we store allocated
4347 * space in a special descriptor */
256bdb49
ES
4348 if (ac->ac_status == AC_STATUS_FOUND &&
4349 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4350 ext4_mb_new_preallocation(ac);
c9de560d
AT
4351 }
4352
256bdb49
ES
4353 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4354 ext4_mb_mark_diskspace_used(ac, handle);
c9de560d 4355 *errp = 0;
256bdb49
ES
4356 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4357 ar->len = ac->ac_b_ex.fe_len;
c9de560d 4358 } else {
256bdb49 4359 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
c9de560d
AT
4360 if (freed)
4361 goto repeat;
4362 *errp = -ENOSPC;
256bdb49 4363 ac->ac_b_ex.fe_len = 0;
c9de560d 4364 ar->len = 0;
256bdb49 4365 ext4_mb_show_ac(ac);
c9de560d
AT
4366 }
4367
256bdb49 4368 ext4_mb_release_context(ac);
c9de560d
AT
4369
4370out:
4371 if (ar->len < inquota)
4372 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4373
256bdb49 4374 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
4375 return block;
4376}
4377static void ext4_mb_poll_new_transaction(struct super_block *sb,
4378 handle_t *handle)
4379{
4380 struct ext4_sb_info *sbi = EXT4_SB(sb);
4381
4382 if (sbi->s_last_transaction == handle->h_transaction->t_tid)
4383 return;
4384
4385 /* new transaction! time to close last one and free blocks for
4386 * committed transaction. we know that only transaction can be
4387 * active, so previos transaction can be being logged and we
4388 * know that transaction before previous is known to be already
4389 * logged. this means that now we may free blocks freed in all
4390 * transactions before previous one. hope I'm clear enough ... */
4391
4392 spin_lock(&sbi->s_md_lock);
4393 if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
4394 mb_debug("new transaction %lu, old %lu\n",
4395 (unsigned long) handle->h_transaction->t_tid,
4396 (unsigned long) sbi->s_last_transaction);
4397 list_splice_init(&sbi->s_closed_transaction,
4398 &sbi->s_committed_transaction);
4399 list_splice_init(&sbi->s_active_transaction,
4400 &sbi->s_closed_transaction);
4401 sbi->s_last_transaction = handle->h_transaction->t_tid;
4402 }
4403 spin_unlock(&sbi->s_md_lock);
4404
4405 ext4_mb_free_committed_blocks(sb);
4406}
4407
4408static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4409 ext4_group_t group, ext4_grpblk_t block, int count)
4410{
4411 struct ext4_group_info *db = e4b->bd_info;
4412 struct super_block *sb = e4b->bd_sb;
4413 struct ext4_sb_info *sbi = EXT4_SB(sb);
4414 struct ext4_free_metadata *md;
4415 int i;
4416
4417 BUG_ON(e4b->bd_bitmap_page == NULL);
4418 BUG_ON(e4b->bd_buddy_page == NULL);
4419
4420 ext4_lock_group(sb, group);
4421 for (i = 0; i < count; i++) {
4422 md = db->bb_md_cur;
4423 if (md && db->bb_tid != handle->h_transaction->t_tid) {
4424 db->bb_md_cur = NULL;
4425 md = NULL;
4426 }
4427
4428 if (md == NULL) {
4429 ext4_unlock_group(sb, group);
4430 md = kmalloc(sizeof(*md), GFP_NOFS);
4431 if (md == NULL)
4432 return -ENOMEM;
4433 md->num = 0;
4434 md->group = group;
4435
4436 ext4_lock_group(sb, group);
4437 if (db->bb_md_cur == NULL) {
4438 spin_lock(&sbi->s_md_lock);
4439 list_add(&md->list, &sbi->s_active_transaction);
4440 spin_unlock(&sbi->s_md_lock);
4441 /* protect buddy cache from being freed,
4442 * otherwise we'll refresh it from
4443 * on-disk bitmap and lose not-yet-available
4444 * blocks */
4445 page_cache_get(e4b->bd_buddy_page);
4446 page_cache_get(e4b->bd_bitmap_page);
4447 db->bb_md_cur = md;
4448 db->bb_tid = handle->h_transaction->t_tid;
4449 mb_debug("new md 0x%p for group %lu\n",
4450 md, md->group);
4451 } else {
4452 kfree(md);
4453 md = db->bb_md_cur;
4454 }
4455 }
4456
4457 BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS);
4458 md->blocks[md->num] = block + i;
4459 md->num++;
4460 if (md->num == EXT4_BB_MAX_BLOCKS) {
4461 /* no more space, put full container on a sb's list */
4462 db->bb_md_cur = NULL;
4463 }
4464 }
4465 ext4_unlock_group(sb, group);
4466 return 0;
4467}
4468
4469/*
4470 * Main entry point into mballoc to free blocks
4471 */
4472void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4473 unsigned long block, unsigned long count,
4474 int metadata, unsigned long *freed)
4475{
26346ff6 4476 struct buffer_head *bitmap_bh = NULL;
c9de560d 4477 struct super_block *sb = inode->i_sb;
256bdb49 4478 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4479 struct ext4_group_desc *gdp;
4480 struct ext4_super_block *es;
4481 unsigned long overflow;
4482 ext4_grpblk_t bit;
4483 struct buffer_head *gd_bh;
4484 ext4_group_t block_group;
4485 struct ext4_sb_info *sbi;
4486 struct ext4_buddy e4b;
4487 int err = 0;
4488 int ret;
4489
4490 *freed = 0;
4491
4492 ext4_mb_poll_new_transaction(sb, handle);
4493
4494 sbi = EXT4_SB(sb);
4495 es = EXT4_SB(sb)->s_es;
4496 if (block < le32_to_cpu(es->s_first_data_block) ||
4497 block + count < block ||
4498 block + count > ext4_blocks_count(es)) {
4499 ext4_error(sb, __FUNCTION__,
4500 "Freeing blocks not in datazone - "
4501 "block = %lu, count = %lu", block, count);
4502 goto error_return;
4503 }
4504
4505 ext4_debug("freeing block %lu\n", block);
4506
256bdb49
ES
4507 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4508 if (ac) {
4509 ac->ac_op = EXT4_MB_HISTORY_FREE;
4510 ac->ac_inode = inode;
4511 ac->ac_sb = sb;
4512 }
c9de560d
AT
4513
4514do_more:
4515 overflow = 0;
4516 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4517
4518 /*
4519 * Check to see if we are freeing blocks across a group
4520 * boundary.
4521 */
4522 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4523 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4524 count -= overflow;
4525 }
4526 bitmap_bh = read_block_bitmap(sb, block_group);
4527 if (!bitmap_bh)
4528 goto error_return;
4529 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4530 if (!gdp)
4531 goto error_return;
4532
4533 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4534 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4535 in_range(block, ext4_inode_table(sb, gdp),
4536 EXT4_SB(sb)->s_itb_per_group) ||
4537 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4538 EXT4_SB(sb)->s_itb_per_group)) {
4539
4540 ext4_error(sb, __FUNCTION__,
4541 "Freeing blocks in system zone - "
4542 "Block = %lu, count = %lu", block, count);
4543 }
4544
4545 BUFFER_TRACE(bitmap_bh, "getting write access");
4546 err = ext4_journal_get_write_access(handle, bitmap_bh);
4547 if (err)
4548 goto error_return;
4549
4550 /*
4551 * We are about to modify some metadata. Call the journal APIs
4552 * to unshare ->b_data if a currently-committing transaction is
4553 * using it
4554 */
4555 BUFFER_TRACE(gd_bh, "get_write_access");
4556 err = ext4_journal_get_write_access(handle, gd_bh);
4557 if (err)
4558 goto error_return;
4559
4560 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4561 if (err)
4562 goto error_return;
4563
4564#ifdef AGGRESSIVE_CHECK
4565 {
4566 int i;
4567 for (i = 0; i < count; i++)
4568 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4569 }
4570#endif
4571 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4572 bit, count);
4573
4574 /* We dirtied the bitmap block */
4575 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4576 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4577
256bdb49
ES
4578 if (ac) {
4579 ac->ac_b_ex.fe_group = block_group;
4580 ac->ac_b_ex.fe_start = bit;
4581 ac->ac_b_ex.fe_len = count;
4582 ext4_mb_store_history(ac);
4583 }
c9de560d
AT
4584
4585 if (metadata) {
4586 /* blocks being freed are metadata. these blocks shouldn't
4587 * be used until this transaction is committed */
4588 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4589 } else {
4590 ext4_lock_group(sb, block_group);
4591 err = mb_free_blocks(inode, &e4b, bit, count);
4592 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4593 ext4_unlock_group(sb, block_group);
4594 BUG_ON(err != 0);
4595 }
4596
4597 spin_lock(sb_bgl_lock(sbi, block_group));
4598 gdp->bg_free_blocks_count =
4599 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
4600 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4601 spin_unlock(sb_bgl_lock(sbi, block_group));
4602 percpu_counter_add(&sbi->s_freeblocks_counter, count);
4603
4604 ext4_mb_release_desc(&e4b);
4605
4606 *freed += count;
4607
4608 /* And the group descriptor block */
4609 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4610 ret = ext4_journal_dirty_metadata(handle, gd_bh);
4611 if (!err)
4612 err = ret;
4613
4614 if (overflow && !err) {
4615 block += count;
4616 count = overflow;
4617 put_bh(bitmap_bh);
4618 goto do_more;
4619 }
4620 sb->s_dirt = 1;
4621error_return:
4622 brelse(bitmap_bh);
4623 ext4_std_error(sb, err);
256bdb49
ES
4624 if (ac)
4625 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
4626 return;
4627}