]>
Commit | Line | Data |
---|---|---|
c9de560d AT |
1 | /* |
2 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
3 | * Written by Alex Tomas <alex@clusterfs.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public Licens | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
17 | */ | |
18 | ||
19 | ||
20 | /* | |
21 | * mballoc.c contains the multiblocks allocation routines | |
22 | */ | |
23 | ||
8f6e39a7 | 24 | #include "mballoc.h" |
c9de560d AT |
25 | /* |
26 | * MUSTDO: | |
27 | * - test ext4_ext_search_left() and ext4_ext_search_right() | |
28 | * - search for metadata in few groups | |
29 | * | |
30 | * TODO v4: | |
31 | * - normalization should take into account whether file is still open | |
32 | * - discard preallocations if no free space left (policy?) | |
33 | * - don't normalize tails | |
34 | * - quota | |
35 | * - reservation for superuser | |
36 | * | |
37 | * TODO v3: | |
38 | * - bitmap read-ahead (proposed by Oleg Drokin aka green) | |
39 | * - track min/max extents in each group for better group selection | |
40 | * - mb_mark_used() may allocate chunk right after splitting buddy | |
41 | * - tree of groups sorted by number of free blocks | |
42 | * - error handling | |
43 | */ | |
44 | ||
45 | /* | |
46 | * The allocation request involve request for multiple number of blocks | |
47 | * near to the goal(block) value specified. | |
48 | * | |
49 | * During initialization phase of the allocator we decide to use the group | |
50 | * preallocation or inode preallocation depending on the size file. The | |
51 | * size of the file could be the resulting file size we would have after | |
52 | * allocation or the current file size which ever is larger. If the size is | |
53 | * less that sbi->s_mb_stream_request we select the group | |
54 | * preallocation. The default value of s_mb_stream_request is 16 | |
55 | * blocks. This can also be tuned via | |
56 | * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms | |
57 | * of number of blocks. | |
58 | * | |
59 | * The main motivation for having small file use group preallocation is to | |
60 | * ensure that we have small file closer in the disk. | |
61 | * | |
62 | * First stage the allocator looks at the inode prealloc list | |
63 | * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for | |
64 | * this particular inode. The inode prealloc space is represented as: | |
65 | * | |
66 | * pa_lstart -> the logical start block for this prealloc space | |
67 | * pa_pstart -> the physical start block for this prealloc space | |
68 | * pa_len -> lenght for this prealloc space | |
69 | * pa_free -> free space available in this prealloc space | |
70 | * | |
71 | * The inode preallocation space is used looking at the _logical_ start | |
72 | * block. If only the logical file block falls within the range of prealloc | |
73 | * space we will consume the particular prealloc space. This make sure that | |
74 | * that the we have contiguous physical blocks representing the file blocks | |
75 | * | |
76 | * The important thing to be noted in case of inode prealloc space is that | |
77 | * we don't modify the values associated to inode prealloc space except | |
78 | * pa_free. | |
79 | * | |
80 | * If we are not able to find blocks in the inode prealloc space and if we | |
81 | * have the group allocation flag set then we look at the locality group | |
82 | * prealloc space. These are per CPU prealloc list repreasented as | |
83 | * | |
84 | * ext4_sb_info.s_locality_groups[smp_processor_id()] | |
85 | * | |
86 | * The reason for having a per cpu locality group is to reduce the contention | |
87 | * between CPUs. It is possible to get scheduled at this point. | |
88 | * | |
89 | * The locality group prealloc space is used looking at whether we have | |
90 | * enough free space (pa_free) withing the prealloc space. | |
91 | * | |
92 | * If we can't allocate blocks via inode prealloc or/and locality group | |
93 | * prealloc then we look at the buddy cache. The buddy cache is represented | |
94 | * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets | |
95 | * mapped to the buddy and bitmap information regarding different | |
96 | * groups. The buddy information is attached to buddy cache inode so that | |
97 | * we can access them through the page cache. The information regarding | |
98 | * each group is loaded via ext4_mb_load_buddy. The information involve | |
99 | * block bitmap and buddy information. The information are stored in the | |
100 | * inode as: | |
101 | * | |
102 | * { page } | |
103 | * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... | |
104 | * | |
105 | * | |
106 | * one block each for bitmap and buddy information. So for each group we | |
107 | * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / | |
108 | * blocksize) blocks. So it can have information regarding groups_per_page | |
109 | * which is blocks_per_page/2 | |
110 | * | |
111 | * The buddy cache inode is not stored on disk. The inode is thrown | |
112 | * away when the filesystem is unmounted. | |
113 | * | |
114 | * We look for count number of blocks in the buddy cache. If we were able | |
115 | * to locate that many free blocks we return with additional information | |
116 | * regarding rest of the contiguous physical block available | |
117 | * | |
118 | * Before allocating blocks via buddy cache we normalize the request | |
119 | * blocks. This ensure we ask for more blocks that we needed. The extra | |
120 | * blocks that we get after allocation is added to the respective prealloc | |
121 | * list. In case of inode preallocation we follow a list of heuristics | |
122 | * based on file size. This can be found in ext4_mb_normalize_request. If | |
123 | * we are doing a group prealloc we try to normalize the request to | |
124 | * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to | |
125 | * 512 blocks. This can be tuned via | |
126 | * /proc/fs/ext4/<partition/group_prealloc. The value is represented in | |
127 | * terms of number of blocks. If we have mounted the file system with -O | |
128 | * stripe=<value> option the group prealloc request is normalized to the | |
129 | * stripe value (sbi->s_stripe) | |
130 | * | |
131 | * The regular allocator(using the buddy cache) support few tunables. | |
132 | * | |
133 | * /proc/fs/ext4/<partition>/min_to_scan | |
134 | * /proc/fs/ext4/<partition>/max_to_scan | |
135 | * /proc/fs/ext4/<partition>/order2_req | |
136 | * | |
137 | * The regular allocator use buddy scan only if the request len is power of | |
138 | * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The | |
139 | * value of s_mb_order2_reqs can be tuned via | |
140 | * /proc/fs/ext4/<partition>/order2_req. If the request len is equal to | |
141 | * stripe size (sbi->s_stripe), we try to search for contigous block in | |
142 | * stripe size. This should result in better allocation on RAID setup. If | |
143 | * not we search in the specific group using bitmap for best extents. The | |
144 | * tunable min_to_scan and max_to_scan controll the behaviour here. | |
145 | * min_to_scan indicate how long the mballoc __must__ look for a best | |
146 | * extent and max_to_scanindicate how long the mballoc __can__ look for a | |
147 | * best extent in the found extents. Searching for the blocks starts with | |
148 | * the group specified as the goal value in allocation context via | |
149 | * ac_g_ex. Each group is first checked based on the criteria whether it | |
150 | * can used for allocation. ext4_mb_good_group explains how the groups are | |
151 | * checked. | |
152 | * | |
153 | * Both the prealloc space are getting populated as above. So for the first | |
154 | * request we will hit the buddy cache which will result in this prealloc | |
155 | * space getting filled. The prealloc space is then later used for the | |
156 | * subsequent request. | |
157 | */ | |
158 | ||
159 | /* | |
160 | * mballoc operates on the following data: | |
161 | * - on-disk bitmap | |
162 | * - in-core buddy (actually includes buddy and bitmap) | |
163 | * - preallocation descriptors (PAs) | |
164 | * | |
165 | * there are two types of preallocations: | |
166 | * - inode | |
167 | * assiged to specific inode and can be used for this inode only. | |
168 | * it describes part of inode's space preallocated to specific | |
169 | * physical blocks. any block from that preallocated can be used | |
170 | * independent. the descriptor just tracks number of blocks left | |
171 | * unused. so, before taking some block from descriptor, one must | |
172 | * make sure corresponded logical block isn't allocated yet. this | |
173 | * also means that freeing any block within descriptor's range | |
174 | * must discard all preallocated blocks. | |
175 | * - locality group | |
176 | * assigned to specific locality group which does not translate to | |
177 | * permanent set of inodes: inode can join and leave group. space | |
178 | * from this type of preallocation can be used for any inode. thus | |
179 | * it's consumed from the beginning to the end. | |
180 | * | |
181 | * relation between them can be expressed as: | |
182 | * in-core buddy = on-disk bitmap + preallocation descriptors | |
183 | * | |
184 | * this mean blocks mballoc considers used are: | |
185 | * - allocated blocks (persistent) | |
186 | * - preallocated blocks (non-persistent) | |
187 | * | |
188 | * consistency in mballoc world means that at any time a block is either | |
189 | * free or used in ALL structures. notice: "any time" should not be read | |
190 | * literally -- time is discrete and delimited by locks. | |
191 | * | |
192 | * to keep it simple, we don't use block numbers, instead we count number of | |
193 | * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. | |
194 | * | |
195 | * all operations can be expressed as: | |
196 | * - init buddy: buddy = on-disk + PAs | |
197 | * - new PA: buddy += N; PA = N | |
198 | * - use inode PA: on-disk += N; PA -= N | |
199 | * - discard inode PA buddy -= on-disk - PA; PA = 0 | |
200 | * - use locality group PA on-disk += N; PA -= N | |
201 | * - discard locality group PA buddy -= PA; PA = 0 | |
202 | * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap | |
203 | * is used in real operation because we can't know actual used | |
204 | * bits from PA, only from on-disk bitmap | |
205 | * | |
206 | * if we follow this strict logic, then all operations above should be atomic. | |
207 | * given some of them can block, we'd have to use something like semaphores | |
208 | * killing performance on high-end SMP hardware. let's try to relax it using | |
209 | * the following knowledge: | |
210 | * 1) if buddy is referenced, it's already initialized | |
211 | * 2) while block is used in buddy and the buddy is referenced, | |
212 | * nobody can re-allocate that block | |
213 | * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has | |
214 | * bit set and PA claims same block, it's OK. IOW, one can set bit in | |
215 | * on-disk bitmap if buddy has same bit set or/and PA covers corresponded | |
216 | * block | |
217 | * | |
218 | * so, now we're building a concurrency table: | |
219 | * - init buddy vs. | |
220 | * - new PA | |
221 | * blocks for PA are allocated in the buddy, buddy must be referenced | |
222 | * until PA is linked to allocation group to avoid concurrent buddy init | |
223 | * - use inode PA | |
224 | * we need to make sure that either on-disk bitmap or PA has uptodate data | |
225 | * given (3) we care that PA-=N operation doesn't interfere with init | |
226 | * - discard inode PA | |
227 | * the simplest way would be to have buddy initialized by the discard | |
228 | * - use locality group PA | |
229 | * again PA-=N must be serialized with init | |
230 | * - discard locality group PA | |
231 | * the simplest way would be to have buddy initialized by the discard | |
232 | * - new PA vs. | |
233 | * - use inode PA | |
234 | * i_data_sem serializes them | |
235 | * - discard inode PA | |
236 | * discard process must wait until PA isn't used by another process | |
237 | * - use locality group PA | |
238 | * some mutex should serialize them | |
239 | * - discard locality group PA | |
240 | * discard process must wait until PA isn't used by another process | |
241 | * - use inode PA | |
242 | * - use inode PA | |
243 | * i_data_sem or another mutex should serializes them | |
244 | * - discard inode PA | |
245 | * discard process must wait until PA isn't used by another process | |
246 | * - use locality group PA | |
247 | * nothing wrong here -- they're different PAs covering different blocks | |
248 | * - discard locality group PA | |
249 | * discard process must wait until PA isn't used by another process | |
250 | * | |
251 | * now we're ready to make few consequences: | |
252 | * - PA is referenced and while it is no discard is possible | |
253 | * - PA is referenced until block isn't marked in on-disk bitmap | |
254 | * - PA changes only after on-disk bitmap | |
255 | * - discard must not compete with init. either init is done before | |
256 | * any discard or they're serialized somehow | |
257 | * - buddy init as sum of on-disk bitmap and PAs is done atomically | |
258 | * | |
259 | * a special case when we've used PA to emptiness. no need to modify buddy | |
260 | * in this case, but we should care about concurrent init | |
261 | * | |
262 | */ | |
263 | ||
264 | /* | |
265 | * Logic in few words: | |
266 | * | |
267 | * - allocation: | |
268 | * load group | |
269 | * find blocks | |
270 | * mark bits in on-disk bitmap | |
271 | * release group | |
272 | * | |
273 | * - use preallocation: | |
274 | * find proper PA (per-inode or group) | |
275 | * load group | |
276 | * mark bits in on-disk bitmap | |
277 | * release group | |
278 | * release PA | |
279 | * | |
280 | * - free: | |
281 | * load group | |
282 | * mark bits in on-disk bitmap | |
283 | * release group | |
284 | * | |
285 | * - discard preallocations in group: | |
286 | * mark PAs deleted | |
287 | * move them onto local list | |
288 | * load on-disk bitmap | |
289 | * load group | |
290 | * remove PA from object (inode or locality group) | |
291 | * mark free blocks in-core | |
292 | * | |
293 | * - discard inode's preallocations: | |
294 | */ | |
295 | ||
296 | /* | |
297 | * Locking rules | |
298 | * | |
299 | * Locks: | |
300 | * - bitlock on a group (group) | |
301 | * - object (inode/locality) (object) | |
302 | * - per-pa lock (pa) | |
303 | * | |
304 | * Paths: | |
305 | * - new pa | |
306 | * object | |
307 | * group | |
308 | * | |
309 | * - find and use pa: | |
310 | * pa | |
311 | * | |
312 | * - release consumed pa: | |
313 | * pa | |
314 | * group | |
315 | * object | |
316 | * | |
317 | * - generate in-core bitmap: | |
318 | * group | |
319 | * pa | |
320 | * | |
321 | * - discard all for given object (inode, locality group): | |
322 | * object | |
323 | * pa | |
324 | * group | |
325 | * | |
326 | * - discard all for given group: | |
327 | * group | |
328 | * pa | |
329 | * group | |
330 | * object | |
331 | * | |
332 | */ | |
333 | ||
ffad0a44 AK |
334 | static inline void *mb_correct_addr_and_bit(int *bit, void *addr) |
335 | { | |
c9de560d | 336 | #if BITS_PER_LONG == 64 |
ffad0a44 AK |
337 | *bit += ((unsigned long) addr & 7UL) << 3; |
338 | addr = (void *) ((unsigned long) addr & ~7UL); | |
c9de560d | 339 | #elif BITS_PER_LONG == 32 |
ffad0a44 AK |
340 | *bit += ((unsigned long) addr & 3UL) << 3; |
341 | addr = (void *) ((unsigned long) addr & ~3UL); | |
c9de560d AT |
342 | #else |
343 | #error "how many bits you are?!" | |
344 | #endif | |
ffad0a44 AK |
345 | return addr; |
346 | } | |
c9de560d AT |
347 | |
348 | static inline int mb_test_bit(int bit, void *addr) | |
349 | { | |
350 | /* | |
351 | * ext4_test_bit on architecture like powerpc | |
352 | * needs unsigned long aligned address | |
353 | */ | |
ffad0a44 | 354 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
355 | return ext4_test_bit(bit, addr); |
356 | } | |
357 | ||
358 | static inline void mb_set_bit(int bit, void *addr) | |
359 | { | |
ffad0a44 | 360 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
361 | ext4_set_bit(bit, addr); |
362 | } | |
363 | ||
364 | static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) | |
365 | { | |
ffad0a44 | 366 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
367 | ext4_set_bit_atomic(lock, bit, addr); |
368 | } | |
369 | ||
370 | static inline void mb_clear_bit(int bit, void *addr) | |
371 | { | |
ffad0a44 | 372 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
373 | ext4_clear_bit(bit, addr); |
374 | } | |
375 | ||
376 | static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) | |
377 | { | |
ffad0a44 | 378 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
379 | ext4_clear_bit_atomic(lock, bit, addr); |
380 | } | |
381 | ||
ffad0a44 AK |
382 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) |
383 | { | |
e7dfb246 | 384 | int fix = 0, ret, tmpmax; |
ffad0a44 | 385 | addr = mb_correct_addr_and_bit(&fix, addr); |
e7dfb246 | 386 | tmpmax = max + fix; |
ffad0a44 AK |
387 | start += fix; |
388 | ||
e7dfb246 AK |
389 | ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; |
390 | if (ret > max) | |
391 | return max; | |
392 | return ret; | |
ffad0a44 AK |
393 | } |
394 | ||
395 | static inline int mb_find_next_bit(void *addr, int max, int start) | |
396 | { | |
e7dfb246 | 397 | int fix = 0, ret, tmpmax; |
ffad0a44 | 398 | addr = mb_correct_addr_and_bit(&fix, addr); |
e7dfb246 | 399 | tmpmax = max + fix; |
ffad0a44 AK |
400 | start += fix; |
401 | ||
e7dfb246 AK |
402 | ret = ext4_find_next_bit(addr, tmpmax, start) - fix; |
403 | if (ret > max) | |
404 | return max; | |
405 | return ret; | |
ffad0a44 AK |
406 | } |
407 | ||
c9de560d AT |
408 | static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) |
409 | { | |
410 | char *bb; | |
411 | ||
c9de560d AT |
412 | BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); |
413 | BUG_ON(max == NULL); | |
414 | ||
415 | if (order > e4b->bd_blkbits + 1) { | |
416 | *max = 0; | |
417 | return NULL; | |
418 | } | |
419 | ||
420 | /* at order 0 we see each particular block */ | |
421 | *max = 1 << (e4b->bd_blkbits + 3); | |
422 | if (order == 0) | |
423 | return EXT4_MB_BITMAP(e4b); | |
424 | ||
425 | bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; | |
426 | *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; | |
427 | ||
428 | return bb; | |
429 | } | |
430 | ||
431 | #ifdef DOUBLE_CHECK | |
432 | static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, | |
433 | int first, int count) | |
434 | { | |
435 | int i; | |
436 | struct super_block *sb = e4b->bd_sb; | |
437 | ||
438 | if (unlikely(e4b->bd_info->bb_bitmap == NULL)) | |
439 | return; | |
440 | BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group)); | |
441 | for (i = 0; i < count; i++) { | |
442 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { | |
443 | ext4_fsblk_t blocknr; | |
444 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | |
445 | blocknr += first + i; | |
446 | blocknr += | |
447 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | |
448 | ||
46e665e9 | 449 | ext4_error(sb, __func__, "double-free of inode" |
c9de560d AT |
450 | " %lu's block %llu(bit %u in group %lu)\n", |
451 | inode ? inode->i_ino : 0, blocknr, | |
452 | first + i, e4b->bd_group); | |
453 | } | |
454 | mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); | |
455 | } | |
456 | } | |
457 | ||
458 | static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) | |
459 | { | |
460 | int i; | |
461 | ||
462 | if (unlikely(e4b->bd_info->bb_bitmap == NULL)) | |
463 | return; | |
464 | BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group)); | |
465 | for (i = 0; i < count; i++) { | |
466 | BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); | |
467 | mb_set_bit(first + i, e4b->bd_info->bb_bitmap); | |
468 | } | |
469 | } | |
470 | ||
471 | static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) | |
472 | { | |
473 | if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { | |
474 | unsigned char *b1, *b2; | |
475 | int i; | |
476 | b1 = (unsigned char *) e4b->bd_info->bb_bitmap; | |
477 | b2 = (unsigned char *) bitmap; | |
478 | for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { | |
479 | if (b1[i] != b2[i]) { | |
4776004f TT |
480 | printk(KERN_ERR "corruption in group %lu " |
481 | "at byte %u(%u): %x in copy != %x " | |
482 | "on disk/prealloc\n", | |
483 | e4b->bd_group, i, i * 8, b1[i], b2[i]); | |
c9de560d AT |
484 | BUG(); |
485 | } | |
486 | } | |
487 | } | |
488 | } | |
489 | ||
490 | #else | |
491 | static inline void mb_free_blocks_double(struct inode *inode, | |
492 | struct ext4_buddy *e4b, int first, int count) | |
493 | { | |
494 | return; | |
495 | } | |
496 | static inline void mb_mark_used_double(struct ext4_buddy *e4b, | |
497 | int first, int count) | |
498 | { | |
499 | return; | |
500 | } | |
501 | static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) | |
502 | { | |
503 | return; | |
504 | } | |
505 | #endif | |
506 | ||
507 | #ifdef AGGRESSIVE_CHECK | |
508 | ||
509 | #define MB_CHECK_ASSERT(assert) \ | |
510 | do { \ | |
511 | if (!(assert)) { \ | |
512 | printk(KERN_EMERG \ | |
513 | "Assertion failure in %s() at %s:%d: \"%s\"\n", \ | |
514 | function, file, line, # assert); \ | |
515 | BUG(); \ | |
516 | } \ | |
517 | } while (0) | |
518 | ||
519 | static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, | |
520 | const char *function, int line) | |
521 | { | |
522 | struct super_block *sb = e4b->bd_sb; | |
523 | int order = e4b->bd_blkbits + 1; | |
524 | int max; | |
525 | int max2; | |
526 | int i; | |
527 | int j; | |
528 | int k; | |
529 | int count; | |
530 | struct ext4_group_info *grp; | |
531 | int fragments = 0; | |
532 | int fstart; | |
533 | struct list_head *cur; | |
534 | void *buddy; | |
535 | void *buddy2; | |
536 | ||
537 | if (!test_opt(sb, MBALLOC)) | |
538 | return 0; | |
539 | ||
540 | { | |
541 | static int mb_check_counter; | |
542 | if (mb_check_counter++ % 100 != 0) | |
543 | return 0; | |
544 | } | |
545 | ||
546 | while (order > 1) { | |
547 | buddy = mb_find_buddy(e4b, order, &max); | |
548 | MB_CHECK_ASSERT(buddy); | |
549 | buddy2 = mb_find_buddy(e4b, order - 1, &max2); | |
550 | MB_CHECK_ASSERT(buddy2); | |
551 | MB_CHECK_ASSERT(buddy != buddy2); | |
552 | MB_CHECK_ASSERT(max * 2 == max2); | |
553 | ||
554 | count = 0; | |
555 | for (i = 0; i < max; i++) { | |
556 | ||
557 | if (mb_test_bit(i, buddy)) { | |
558 | /* only single bit in buddy2 may be 1 */ | |
559 | if (!mb_test_bit(i << 1, buddy2)) { | |
560 | MB_CHECK_ASSERT( | |
561 | mb_test_bit((i<<1)+1, buddy2)); | |
562 | } else if (!mb_test_bit((i << 1) + 1, buddy2)) { | |
563 | MB_CHECK_ASSERT( | |
564 | mb_test_bit(i << 1, buddy2)); | |
565 | } | |
566 | continue; | |
567 | } | |
568 | ||
569 | /* both bits in buddy2 must be 0 */ | |
570 | MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); | |
571 | MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); | |
572 | ||
573 | for (j = 0; j < (1 << order); j++) { | |
574 | k = (i * (1 << order)) + j; | |
575 | MB_CHECK_ASSERT( | |
576 | !mb_test_bit(k, EXT4_MB_BITMAP(e4b))); | |
577 | } | |
578 | count++; | |
579 | } | |
580 | MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); | |
581 | order--; | |
582 | } | |
583 | ||
584 | fstart = -1; | |
585 | buddy = mb_find_buddy(e4b, 0, &max); | |
586 | for (i = 0; i < max; i++) { | |
587 | if (!mb_test_bit(i, buddy)) { | |
588 | MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); | |
589 | if (fstart == -1) { | |
590 | fragments++; | |
591 | fstart = i; | |
592 | } | |
593 | continue; | |
594 | } | |
595 | fstart = -1; | |
596 | /* check used bits only */ | |
597 | for (j = 0; j < e4b->bd_blkbits + 1; j++) { | |
598 | buddy2 = mb_find_buddy(e4b, j, &max2); | |
599 | k = i >> j; | |
600 | MB_CHECK_ASSERT(k < max2); | |
601 | MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); | |
602 | } | |
603 | } | |
604 | MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); | |
605 | MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); | |
606 | ||
607 | grp = ext4_get_group_info(sb, e4b->bd_group); | |
608 | buddy = mb_find_buddy(e4b, 0, &max); | |
609 | list_for_each(cur, &grp->bb_prealloc_list) { | |
610 | ext4_group_t groupnr; | |
611 | struct ext4_prealloc_space *pa; | |
60bd63d1 SR |
612 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); |
613 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); | |
c9de560d | 614 | MB_CHECK_ASSERT(groupnr == e4b->bd_group); |
60bd63d1 | 615 | for (i = 0; i < pa->pa_len; i++) |
c9de560d AT |
616 | MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); |
617 | } | |
618 | return 0; | |
619 | } | |
620 | #undef MB_CHECK_ASSERT | |
621 | #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ | |
46e665e9 | 622 | __FILE__, __func__, __LINE__) |
c9de560d AT |
623 | #else |
624 | #define mb_check_buddy(e4b) | |
625 | #endif | |
626 | ||
627 | /* FIXME!! need more doc */ | |
628 | static void ext4_mb_mark_free_simple(struct super_block *sb, | |
629 | void *buddy, unsigned first, int len, | |
630 | struct ext4_group_info *grp) | |
631 | { | |
632 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
633 | unsigned short min; | |
634 | unsigned short max; | |
635 | unsigned short chunk; | |
636 | unsigned short border; | |
637 | ||
b73fce69 | 638 | BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); |
c9de560d AT |
639 | |
640 | border = 2 << sb->s_blocksize_bits; | |
641 | ||
642 | while (len > 0) { | |
643 | /* find how many blocks can be covered since this position */ | |
644 | max = ffs(first | border) - 1; | |
645 | ||
646 | /* find how many blocks of power 2 we need to mark */ | |
647 | min = fls(len) - 1; | |
648 | ||
649 | if (max < min) | |
650 | min = max; | |
651 | chunk = 1 << min; | |
652 | ||
653 | /* mark multiblock chunks only */ | |
654 | grp->bb_counters[min]++; | |
655 | if (min > 0) | |
656 | mb_clear_bit(first >> min, | |
657 | buddy + sbi->s_mb_offsets[min]); | |
658 | ||
659 | len -= chunk; | |
660 | first += chunk; | |
661 | } | |
662 | } | |
663 | ||
664 | static void ext4_mb_generate_buddy(struct super_block *sb, | |
665 | void *buddy, void *bitmap, ext4_group_t group) | |
666 | { | |
667 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
668 | unsigned short max = EXT4_BLOCKS_PER_GROUP(sb); | |
669 | unsigned short i = 0; | |
670 | unsigned short first; | |
671 | unsigned short len; | |
672 | unsigned free = 0; | |
673 | unsigned fragments = 0; | |
674 | unsigned long long period = get_cycles(); | |
675 | ||
676 | /* initialize buddy from bitmap which is aggregation | |
677 | * of on-disk bitmap and preallocations */ | |
ffad0a44 | 678 | i = mb_find_next_zero_bit(bitmap, max, 0); |
c9de560d AT |
679 | grp->bb_first_free = i; |
680 | while (i < max) { | |
681 | fragments++; | |
682 | first = i; | |
ffad0a44 | 683 | i = mb_find_next_bit(bitmap, max, i); |
c9de560d AT |
684 | len = i - first; |
685 | free += len; | |
686 | if (len > 1) | |
687 | ext4_mb_mark_free_simple(sb, buddy, first, len, grp); | |
688 | else | |
689 | grp->bb_counters[0]++; | |
690 | if (i < max) | |
ffad0a44 | 691 | i = mb_find_next_zero_bit(bitmap, max, i); |
c9de560d AT |
692 | } |
693 | grp->bb_fragments = fragments; | |
694 | ||
695 | if (free != grp->bb_free) { | |
46e665e9 | 696 | ext4_error(sb, __func__, |
c9de560d AT |
697 | "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", |
698 | group, free, grp->bb_free); | |
e56eb659 AK |
699 | /* |
700 | * If we intent to continue, we consider group descritor | |
701 | * corrupt and update bb_free using bitmap value | |
702 | */ | |
c9de560d AT |
703 | grp->bb_free = free; |
704 | } | |
705 | ||
706 | clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); | |
707 | ||
708 | period = get_cycles() - period; | |
709 | spin_lock(&EXT4_SB(sb)->s_bal_lock); | |
710 | EXT4_SB(sb)->s_mb_buddies_generated++; | |
711 | EXT4_SB(sb)->s_mb_generation_time += period; | |
712 | spin_unlock(&EXT4_SB(sb)->s_bal_lock); | |
713 | } | |
714 | ||
715 | /* The buddy information is attached the buddy cache inode | |
716 | * for convenience. The information regarding each group | |
717 | * is loaded via ext4_mb_load_buddy. The information involve | |
718 | * block bitmap and buddy information. The information are | |
719 | * stored in the inode as | |
720 | * | |
721 | * { page } | |
722 | * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... | |
723 | * | |
724 | * | |
725 | * one block each for bitmap and buddy information. | |
726 | * So for each group we take up 2 blocks. A page can | |
727 | * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. | |
728 | * So it can have information regarding groups_per_page which | |
729 | * is blocks_per_page/2 | |
730 | */ | |
731 | ||
732 | static int ext4_mb_init_cache(struct page *page, char *incore) | |
733 | { | |
734 | int blocksize; | |
735 | int blocks_per_page; | |
736 | int groups_per_page; | |
737 | int err = 0; | |
738 | int i; | |
739 | ext4_group_t first_group; | |
740 | int first_block; | |
741 | struct super_block *sb; | |
742 | struct buffer_head *bhs; | |
743 | struct buffer_head **bh; | |
744 | struct inode *inode; | |
745 | char *data; | |
746 | char *bitmap; | |
747 | ||
748 | mb_debug("init page %lu\n", page->index); | |
749 | ||
750 | inode = page->mapping->host; | |
751 | sb = inode->i_sb; | |
752 | blocksize = 1 << inode->i_blkbits; | |
753 | blocks_per_page = PAGE_CACHE_SIZE / blocksize; | |
754 | ||
755 | groups_per_page = blocks_per_page >> 1; | |
756 | if (groups_per_page == 0) | |
757 | groups_per_page = 1; | |
758 | ||
759 | /* allocate buffer_heads to read bitmaps */ | |
760 | if (groups_per_page > 1) { | |
761 | err = -ENOMEM; | |
762 | i = sizeof(struct buffer_head *) * groups_per_page; | |
763 | bh = kzalloc(i, GFP_NOFS); | |
764 | if (bh == NULL) | |
765 | goto out; | |
766 | } else | |
767 | bh = &bhs; | |
768 | ||
769 | first_group = page->index * blocks_per_page / 2; | |
770 | ||
771 | /* read all groups the page covers into the cache */ | |
772 | for (i = 0; i < groups_per_page; i++) { | |
773 | struct ext4_group_desc *desc; | |
774 | ||
775 | if (first_group + i >= EXT4_SB(sb)->s_groups_count) | |
776 | break; | |
777 | ||
778 | err = -EIO; | |
779 | desc = ext4_get_group_desc(sb, first_group + i, NULL); | |
780 | if (desc == NULL) | |
781 | goto out; | |
782 | ||
783 | err = -ENOMEM; | |
784 | bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc)); | |
785 | if (bh[i] == NULL) | |
786 | goto out; | |
787 | ||
788 | if (bh_uptodate_or_lock(bh[i])) | |
789 | continue; | |
790 | ||
b5f10eed | 791 | spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); |
c9de560d AT |
792 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
793 | ext4_init_block_bitmap(sb, bh[i], | |
794 | first_group + i, desc); | |
795 | set_buffer_uptodate(bh[i]); | |
796 | unlock_buffer(bh[i]); | |
b5f10eed | 797 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); |
c9de560d AT |
798 | continue; |
799 | } | |
b5f10eed | 800 | spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); |
c9de560d AT |
801 | get_bh(bh[i]); |
802 | bh[i]->b_end_io = end_buffer_read_sync; | |
803 | submit_bh(READ, bh[i]); | |
804 | mb_debug("read bitmap for group %lu\n", first_group + i); | |
805 | } | |
806 | ||
807 | /* wait for I/O completion */ | |
808 | for (i = 0; i < groups_per_page && bh[i]; i++) | |
809 | wait_on_buffer(bh[i]); | |
810 | ||
811 | err = -EIO; | |
812 | for (i = 0; i < groups_per_page && bh[i]; i++) | |
813 | if (!buffer_uptodate(bh[i])) | |
814 | goto out; | |
815 | ||
31b481dc | 816 | err = 0; |
c9de560d AT |
817 | first_block = page->index * blocks_per_page; |
818 | for (i = 0; i < blocks_per_page; i++) { | |
819 | int group; | |
820 | struct ext4_group_info *grinfo; | |
821 | ||
822 | group = (first_block + i) >> 1; | |
823 | if (group >= EXT4_SB(sb)->s_groups_count) | |
824 | break; | |
825 | ||
826 | /* | |
827 | * data carry information regarding this | |
828 | * particular group in the format specified | |
829 | * above | |
830 | * | |
831 | */ | |
832 | data = page_address(page) + (i * blocksize); | |
833 | bitmap = bh[group - first_group]->b_data; | |
834 | ||
835 | /* | |
836 | * We place the buddy block and bitmap block | |
837 | * close together | |
838 | */ | |
839 | if ((first_block + i) & 1) { | |
840 | /* this is block of buddy */ | |
841 | BUG_ON(incore == NULL); | |
842 | mb_debug("put buddy for group %u in page %lu/%x\n", | |
843 | group, page->index, i * blocksize); | |
844 | memset(data, 0xff, blocksize); | |
845 | grinfo = ext4_get_group_info(sb, group); | |
846 | grinfo->bb_fragments = 0; | |
847 | memset(grinfo->bb_counters, 0, | |
848 | sizeof(unsigned short)*(sb->s_blocksize_bits+2)); | |
849 | /* | |
850 | * incore got set to the group block bitmap below | |
851 | */ | |
852 | ext4_mb_generate_buddy(sb, data, incore, group); | |
853 | incore = NULL; | |
854 | } else { | |
855 | /* this is block of bitmap */ | |
856 | BUG_ON(incore != NULL); | |
857 | mb_debug("put bitmap for group %u in page %lu/%x\n", | |
858 | group, page->index, i * blocksize); | |
859 | ||
860 | /* see comments in ext4_mb_put_pa() */ | |
861 | ext4_lock_group(sb, group); | |
862 | memcpy(data, bitmap, blocksize); | |
863 | ||
864 | /* mark all preallocated blks used in in-core bitmap */ | |
865 | ext4_mb_generate_from_pa(sb, data, group); | |
866 | ext4_unlock_group(sb, group); | |
867 | ||
868 | /* set incore so that the buddy information can be | |
869 | * generated using this | |
870 | */ | |
871 | incore = data; | |
872 | } | |
873 | } | |
874 | SetPageUptodate(page); | |
875 | ||
876 | out: | |
877 | if (bh) { | |
878 | for (i = 0; i < groups_per_page && bh[i]; i++) | |
879 | brelse(bh[i]); | |
880 | if (bh != &bhs) | |
881 | kfree(bh); | |
882 | } | |
883 | return err; | |
884 | } | |
885 | ||
4ddfef7b ES |
886 | static noinline_for_stack int |
887 | ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, | |
888 | struct ext4_buddy *e4b) | |
c9de560d AT |
889 | { |
890 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
891 | struct inode *inode = sbi->s_buddy_cache; | |
892 | int blocks_per_page; | |
893 | int block; | |
894 | int pnum; | |
895 | int poff; | |
896 | struct page *page; | |
fdf6c7a7 | 897 | int ret; |
c9de560d AT |
898 | |
899 | mb_debug("load group %lu\n", group); | |
900 | ||
901 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | |
902 | ||
903 | e4b->bd_blkbits = sb->s_blocksize_bits; | |
904 | e4b->bd_info = ext4_get_group_info(sb, group); | |
905 | e4b->bd_sb = sb; | |
906 | e4b->bd_group = group; | |
907 | e4b->bd_buddy_page = NULL; | |
908 | e4b->bd_bitmap_page = NULL; | |
909 | ||
910 | /* | |
911 | * the buddy cache inode stores the block bitmap | |
912 | * and buddy information in consecutive blocks. | |
913 | * So for each group we need two blocks. | |
914 | */ | |
915 | block = group * 2; | |
916 | pnum = block / blocks_per_page; | |
917 | poff = block % blocks_per_page; | |
918 | ||
919 | /* we could use find_or_create_page(), but it locks page | |
920 | * what we'd like to avoid in fast path ... */ | |
921 | page = find_get_page(inode->i_mapping, pnum); | |
922 | if (page == NULL || !PageUptodate(page)) { | |
923 | if (page) | |
924 | page_cache_release(page); | |
925 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
926 | if (page) { | |
927 | BUG_ON(page->mapping != inode->i_mapping); | |
928 | if (!PageUptodate(page)) { | |
fdf6c7a7 SF |
929 | ret = ext4_mb_init_cache(page, NULL); |
930 | if (ret) { | |
931 | unlock_page(page); | |
932 | goto err; | |
933 | } | |
c9de560d AT |
934 | mb_cmp_bitmaps(e4b, page_address(page) + |
935 | (poff * sb->s_blocksize)); | |
936 | } | |
937 | unlock_page(page); | |
938 | } | |
939 | } | |
fdf6c7a7 SF |
940 | if (page == NULL || !PageUptodate(page)) { |
941 | ret = -EIO; | |
c9de560d | 942 | goto err; |
fdf6c7a7 | 943 | } |
c9de560d AT |
944 | e4b->bd_bitmap_page = page; |
945 | e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); | |
946 | mark_page_accessed(page); | |
947 | ||
948 | block++; | |
949 | pnum = block / blocks_per_page; | |
950 | poff = block % blocks_per_page; | |
951 | ||
952 | page = find_get_page(inode->i_mapping, pnum); | |
953 | if (page == NULL || !PageUptodate(page)) { | |
954 | if (page) | |
955 | page_cache_release(page); | |
956 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
957 | if (page) { | |
958 | BUG_ON(page->mapping != inode->i_mapping); | |
fdf6c7a7 SF |
959 | if (!PageUptodate(page)) { |
960 | ret = ext4_mb_init_cache(page, e4b->bd_bitmap); | |
961 | if (ret) { | |
962 | unlock_page(page); | |
963 | goto err; | |
964 | } | |
965 | } | |
c9de560d AT |
966 | unlock_page(page); |
967 | } | |
968 | } | |
fdf6c7a7 SF |
969 | if (page == NULL || !PageUptodate(page)) { |
970 | ret = -EIO; | |
c9de560d | 971 | goto err; |
fdf6c7a7 | 972 | } |
c9de560d AT |
973 | e4b->bd_buddy_page = page; |
974 | e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); | |
975 | mark_page_accessed(page); | |
976 | ||
977 | BUG_ON(e4b->bd_bitmap_page == NULL); | |
978 | BUG_ON(e4b->bd_buddy_page == NULL); | |
979 | ||
980 | return 0; | |
981 | ||
982 | err: | |
983 | if (e4b->bd_bitmap_page) | |
984 | page_cache_release(e4b->bd_bitmap_page); | |
985 | if (e4b->bd_buddy_page) | |
986 | page_cache_release(e4b->bd_buddy_page); | |
987 | e4b->bd_buddy = NULL; | |
988 | e4b->bd_bitmap = NULL; | |
fdf6c7a7 | 989 | return ret; |
c9de560d AT |
990 | } |
991 | ||
992 | static void ext4_mb_release_desc(struct ext4_buddy *e4b) | |
993 | { | |
994 | if (e4b->bd_bitmap_page) | |
995 | page_cache_release(e4b->bd_bitmap_page); | |
996 | if (e4b->bd_buddy_page) | |
997 | page_cache_release(e4b->bd_buddy_page); | |
998 | } | |
999 | ||
1000 | ||
1001 | static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) | |
1002 | { | |
1003 | int order = 1; | |
1004 | void *bb; | |
1005 | ||
1006 | BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); | |
1007 | BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); | |
1008 | ||
1009 | bb = EXT4_MB_BUDDY(e4b); | |
1010 | while (order <= e4b->bd_blkbits + 1) { | |
1011 | block = block >> 1; | |
1012 | if (!mb_test_bit(block, bb)) { | |
1013 | /* this block is part of buddy of order 'order' */ | |
1014 | return order; | |
1015 | } | |
1016 | bb += 1 << (e4b->bd_blkbits - order); | |
1017 | order++; | |
1018 | } | |
1019 | return 0; | |
1020 | } | |
1021 | ||
1022 | static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) | |
1023 | { | |
1024 | __u32 *addr; | |
1025 | ||
1026 | len = cur + len; | |
1027 | while (cur < len) { | |
1028 | if ((cur & 31) == 0 && (len - cur) >= 32) { | |
1029 | /* fast path: clear whole word at once */ | |
1030 | addr = bm + (cur >> 3); | |
1031 | *addr = 0; | |
1032 | cur += 32; | |
1033 | continue; | |
1034 | } | |
1035 | mb_clear_bit_atomic(lock, cur, bm); | |
1036 | cur++; | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) | |
1041 | { | |
1042 | __u32 *addr; | |
1043 | ||
1044 | len = cur + len; | |
1045 | while (cur < len) { | |
1046 | if ((cur & 31) == 0 && (len - cur) >= 32) { | |
1047 | /* fast path: set whole word at once */ | |
1048 | addr = bm + (cur >> 3); | |
1049 | *addr = 0xffffffff; | |
1050 | cur += 32; | |
1051 | continue; | |
1052 | } | |
1053 | mb_set_bit_atomic(lock, cur, bm); | |
1054 | cur++; | |
1055 | } | |
1056 | } | |
1057 | ||
7e5a8cdd | 1058 | static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, |
c9de560d AT |
1059 | int first, int count) |
1060 | { | |
1061 | int block = 0; | |
1062 | int max = 0; | |
1063 | int order; | |
1064 | void *buddy; | |
1065 | void *buddy2; | |
1066 | struct super_block *sb = e4b->bd_sb; | |
1067 | ||
1068 | BUG_ON(first + count > (sb->s_blocksize << 3)); | |
1069 | BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group)); | |
1070 | mb_check_buddy(e4b); | |
1071 | mb_free_blocks_double(inode, e4b, first, count); | |
1072 | ||
1073 | e4b->bd_info->bb_free += count; | |
1074 | if (first < e4b->bd_info->bb_first_free) | |
1075 | e4b->bd_info->bb_first_free = first; | |
1076 | ||
1077 | /* let's maintain fragments counter */ | |
1078 | if (first != 0) | |
1079 | block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b)); | |
1080 | if (first + count < EXT4_SB(sb)->s_mb_maxs[0]) | |
1081 | max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b)); | |
1082 | if (block && max) | |
1083 | e4b->bd_info->bb_fragments--; | |
1084 | else if (!block && !max) | |
1085 | e4b->bd_info->bb_fragments++; | |
1086 | ||
1087 | /* let's maintain buddy itself */ | |
1088 | while (count-- > 0) { | |
1089 | block = first++; | |
1090 | order = 0; | |
1091 | ||
1092 | if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { | |
1093 | ext4_fsblk_t blocknr; | |
1094 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | |
1095 | blocknr += block; | |
1096 | blocknr += | |
1097 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | |
7e5a8cdd | 1098 | ext4_unlock_group(sb, e4b->bd_group); |
46e665e9 | 1099 | ext4_error(sb, __func__, "double-free of inode" |
c9de560d AT |
1100 | " %lu's block %llu(bit %u in group %lu)\n", |
1101 | inode ? inode->i_ino : 0, blocknr, block, | |
1102 | e4b->bd_group); | |
7e5a8cdd | 1103 | ext4_lock_group(sb, e4b->bd_group); |
c9de560d AT |
1104 | } |
1105 | mb_clear_bit(block, EXT4_MB_BITMAP(e4b)); | |
1106 | e4b->bd_info->bb_counters[order]++; | |
1107 | ||
1108 | /* start of the buddy */ | |
1109 | buddy = mb_find_buddy(e4b, order, &max); | |
1110 | ||
1111 | do { | |
1112 | block &= ~1UL; | |
1113 | if (mb_test_bit(block, buddy) || | |
1114 | mb_test_bit(block + 1, buddy)) | |
1115 | break; | |
1116 | ||
1117 | /* both the buddies are free, try to coalesce them */ | |
1118 | buddy2 = mb_find_buddy(e4b, order + 1, &max); | |
1119 | ||
1120 | if (!buddy2) | |
1121 | break; | |
1122 | ||
1123 | if (order > 0) { | |
1124 | /* for special purposes, we don't set | |
1125 | * free bits in bitmap */ | |
1126 | mb_set_bit(block, buddy); | |
1127 | mb_set_bit(block + 1, buddy); | |
1128 | } | |
1129 | e4b->bd_info->bb_counters[order]--; | |
1130 | e4b->bd_info->bb_counters[order]--; | |
1131 | ||
1132 | block = block >> 1; | |
1133 | order++; | |
1134 | e4b->bd_info->bb_counters[order]++; | |
1135 | ||
1136 | mb_clear_bit(block, buddy2); | |
1137 | buddy = buddy2; | |
1138 | } while (1); | |
1139 | } | |
1140 | mb_check_buddy(e4b); | |
c9de560d AT |
1141 | } |
1142 | ||
1143 | static int mb_find_extent(struct ext4_buddy *e4b, int order, int block, | |
1144 | int needed, struct ext4_free_extent *ex) | |
1145 | { | |
1146 | int next = block; | |
1147 | int max; | |
1148 | int ord; | |
1149 | void *buddy; | |
1150 | ||
1151 | BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group)); | |
1152 | BUG_ON(ex == NULL); | |
1153 | ||
1154 | buddy = mb_find_buddy(e4b, order, &max); | |
1155 | BUG_ON(buddy == NULL); | |
1156 | BUG_ON(block >= max); | |
1157 | if (mb_test_bit(block, buddy)) { | |
1158 | ex->fe_len = 0; | |
1159 | ex->fe_start = 0; | |
1160 | ex->fe_group = 0; | |
1161 | return 0; | |
1162 | } | |
1163 | ||
1164 | /* FIXME dorp order completely ? */ | |
1165 | if (likely(order == 0)) { | |
1166 | /* find actual order */ | |
1167 | order = mb_find_order_for_block(e4b, block); | |
1168 | block = block >> order; | |
1169 | } | |
1170 | ||
1171 | ex->fe_len = 1 << order; | |
1172 | ex->fe_start = block << order; | |
1173 | ex->fe_group = e4b->bd_group; | |
1174 | ||
1175 | /* calc difference from given start */ | |
1176 | next = next - ex->fe_start; | |
1177 | ex->fe_len -= next; | |
1178 | ex->fe_start += next; | |
1179 | ||
1180 | while (needed > ex->fe_len && | |
1181 | (buddy = mb_find_buddy(e4b, order, &max))) { | |
1182 | ||
1183 | if (block + 1 >= max) | |
1184 | break; | |
1185 | ||
1186 | next = (block + 1) * (1 << order); | |
1187 | if (mb_test_bit(next, EXT4_MB_BITMAP(e4b))) | |
1188 | break; | |
1189 | ||
1190 | ord = mb_find_order_for_block(e4b, next); | |
1191 | ||
1192 | order = ord; | |
1193 | block = next >> order; | |
1194 | ex->fe_len += 1 << order; | |
1195 | } | |
1196 | ||
1197 | BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); | |
1198 | return ex->fe_len; | |
1199 | } | |
1200 | ||
1201 | static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) | |
1202 | { | |
1203 | int ord; | |
1204 | int mlen = 0; | |
1205 | int max = 0; | |
1206 | int cur; | |
1207 | int start = ex->fe_start; | |
1208 | int len = ex->fe_len; | |
1209 | unsigned ret = 0; | |
1210 | int len0 = len; | |
1211 | void *buddy; | |
1212 | ||
1213 | BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); | |
1214 | BUG_ON(e4b->bd_group != ex->fe_group); | |
1215 | BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group)); | |
1216 | mb_check_buddy(e4b); | |
1217 | mb_mark_used_double(e4b, start, len); | |
1218 | ||
1219 | e4b->bd_info->bb_free -= len; | |
1220 | if (e4b->bd_info->bb_first_free == start) | |
1221 | e4b->bd_info->bb_first_free += len; | |
1222 | ||
1223 | /* let's maintain fragments counter */ | |
1224 | if (start != 0) | |
1225 | mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b)); | |
1226 | if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) | |
1227 | max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b)); | |
1228 | if (mlen && max) | |
1229 | e4b->bd_info->bb_fragments++; | |
1230 | else if (!mlen && !max) | |
1231 | e4b->bd_info->bb_fragments--; | |
1232 | ||
1233 | /* let's maintain buddy itself */ | |
1234 | while (len) { | |
1235 | ord = mb_find_order_for_block(e4b, start); | |
1236 | ||
1237 | if (((start >> ord) << ord) == start && len >= (1 << ord)) { | |
1238 | /* the whole chunk may be allocated at once! */ | |
1239 | mlen = 1 << ord; | |
1240 | buddy = mb_find_buddy(e4b, ord, &max); | |
1241 | BUG_ON((start >> ord) >= max); | |
1242 | mb_set_bit(start >> ord, buddy); | |
1243 | e4b->bd_info->bb_counters[ord]--; | |
1244 | start += mlen; | |
1245 | len -= mlen; | |
1246 | BUG_ON(len < 0); | |
1247 | continue; | |
1248 | } | |
1249 | ||
1250 | /* store for history */ | |
1251 | if (ret == 0) | |
1252 | ret = len | (ord << 16); | |
1253 | ||
1254 | /* we have to split large buddy */ | |
1255 | BUG_ON(ord <= 0); | |
1256 | buddy = mb_find_buddy(e4b, ord, &max); | |
1257 | mb_set_bit(start >> ord, buddy); | |
1258 | e4b->bd_info->bb_counters[ord]--; | |
1259 | ||
1260 | ord--; | |
1261 | cur = (start >> ord) & ~1U; | |
1262 | buddy = mb_find_buddy(e4b, ord, &max); | |
1263 | mb_clear_bit(cur, buddy); | |
1264 | mb_clear_bit(cur + 1, buddy); | |
1265 | e4b->bd_info->bb_counters[ord]++; | |
1266 | e4b->bd_info->bb_counters[ord]++; | |
1267 | } | |
1268 | ||
1269 | mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group), | |
1270 | EXT4_MB_BITMAP(e4b), ex->fe_start, len0); | |
1271 | mb_check_buddy(e4b); | |
1272 | ||
1273 | return ret; | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * Must be called under group lock! | |
1278 | */ | |
1279 | static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, | |
1280 | struct ext4_buddy *e4b) | |
1281 | { | |
1282 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
1283 | int ret; | |
1284 | ||
1285 | BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); | |
1286 | BUG_ON(ac->ac_status == AC_STATUS_FOUND); | |
1287 | ||
1288 | ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); | |
1289 | ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; | |
1290 | ret = mb_mark_used(e4b, &ac->ac_b_ex); | |
1291 | ||
1292 | /* preallocation can change ac_b_ex, thus we store actually | |
1293 | * allocated blocks for history */ | |
1294 | ac->ac_f_ex = ac->ac_b_ex; | |
1295 | ||
1296 | ac->ac_status = AC_STATUS_FOUND; | |
1297 | ac->ac_tail = ret & 0xffff; | |
1298 | ac->ac_buddy = ret >> 16; | |
1299 | ||
1300 | /* XXXXXXX: SUCH A HORRIBLE **CK */ | |
1301 | /*FIXME!! Why ? */ | |
1302 | ac->ac_bitmap_page = e4b->bd_bitmap_page; | |
1303 | get_page(ac->ac_bitmap_page); | |
1304 | ac->ac_buddy_page = e4b->bd_buddy_page; | |
1305 | get_page(ac->ac_buddy_page); | |
1306 | ||
1307 | /* store last allocated for subsequent stream allocation */ | |
1308 | if ((ac->ac_flags & EXT4_MB_HINT_DATA)) { | |
1309 | spin_lock(&sbi->s_md_lock); | |
1310 | sbi->s_mb_last_group = ac->ac_f_ex.fe_group; | |
1311 | sbi->s_mb_last_start = ac->ac_f_ex.fe_start; | |
1312 | spin_unlock(&sbi->s_md_lock); | |
1313 | } | |
1314 | } | |
1315 | ||
1316 | /* | |
1317 | * regular allocator, for general purposes allocation | |
1318 | */ | |
1319 | ||
1320 | static void ext4_mb_check_limits(struct ext4_allocation_context *ac, | |
1321 | struct ext4_buddy *e4b, | |
1322 | int finish_group) | |
1323 | { | |
1324 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
1325 | struct ext4_free_extent *bex = &ac->ac_b_ex; | |
1326 | struct ext4_free_extent *gex = &ac->ac_g_ex; | |
1327 | struct ext4_free_extent ex; | |
1328 | int max; | |
1329 | ||
1330 | /* | |
1331 | * We don't want to scan for a whole year | |
1332 | */ | |
1333 | if (ac->ac_found > sbi->s_mb_max_to_scan && | |
1334 | !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
1335 | ac->ac_status = AC_STATUS_BREAK; | |
1336 | return; | |
1337 | } | |
1338 | ||
1339 | /* | |
1340 | * Haven't found good chunk so far, let's continue | |
1341 | */ | |
1342 | if (bex->fe_len < gex->fe_len) | |
1343 | return; | |
1344 | ||
1345 | if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) | |
1346 | && bex->fe_group == e4b->bd_group) { | |
1347 | /* recheck chunk's availability - we don't know | |
1348 | * when it was found (within this lock-unlock | |
1349 | * period or not) */ | |
1350 | max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex); | |
1351 | if (max >= gex->fe_len) { | |
1352 | ext4_mb_use_best_found(ac, e4b); | |
1353 | return; | |
1354 | } | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | /* | |
1359 | * The routine checks whether found extent is good enough. If it is, | |
1360 | * then the extent gets marked used and flag is set to the context | |
1361 | * to stop scanning. Otherwise, the extent is compared with the | |
1362 | * previous found extent and if new one is better, then it's stored | |
1363 | * in the context. Later, the best found extent will be used, if | |
1364 | * mballoc can't find good enough extent. | |
1365 | * | |
1366 | * FIXME: real allocation policy is to be designed yet! | |
1367 | */ | |
1368 | static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, | |
1369 | struct ext4_free_extent *ex, | |
1370 | struct ext4_buddy *e4b) | |
1371 | { | |
1372 | struct ext4_free_extent *bex = &ac->ac_b_ex; | |
1373 | struct ext4_free_extent *gex = &ac->ac_g_ex; | |
1374 | ||
1375 | BUG_ON(ex->fe_len <= 0); | |
1376 | BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | |
1377 | BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | |
1378 | BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); | |
1379 | ||
1380 | ac->ac_found++; | |
1381 | ||
1382 | /* | |
1383 | * The special case - take what you catch first | |
1384 | */ | |
1385 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
1386 | *bex = *ex; | |
1387 | ext4_mb_use_best_found(ac, e4b); | |
1388 | return; | |
1389 | } | |
1390 | ||
1391 | /* | |
1392 | * Let's check whether the chuck is good enough | |
1393 | */ | |
1394 | if (ex->fe_len == gex->fe_len) { | |
1395 | *bex = *ex; | |
1396 | ext4_mb_use_best_found(ac, e4b); | |
1397 | return; | |
1398 | } | |
1399 | ||
1400 | /* | |
1401 | * If this is first found extent, just store it in the context | |
1402 | */ | |
1403 | if (bex->fe_len == 0) { | |
1404 | *bex = *ex; | |
1405 | return; | |
1406 | } | |
1407 | ||
1408 | /* | |
1409 | * If new found extent is better, store it in the context | |
1410 | */ | |
1411 | if (bex->fe_len < gex->fe_len) { | |
1412 | /* if the request isn't satisfied, any found extent | |
1413 | * larger than previous best one is better */ | |
1414 | if (ex->fe_len > bex->fe_len) | |
1415 | *bex = *ex; | |
1416 | } else if (ex->fe_len > gex->fe_len) { | |
1417 | /* if the request is satisfied, then we try to find | |
1418 | * an extent that still satisfy the request, but is | |
1419 | * smaller than previous one */ | |
1420 | if (ex->fe_len < bex->fe_len) | |
1421 | *bex = *ex; | |
1422 | } | |
1423 | ||
1424 | ext4_mb_check_limits(ac, e4b, 0); | |
1425 | } | |
1426 | ||
1427 | static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, | |
1428 | struct ext4_buddy *e4b) | |
1429 | { | |
1430 | struct ext4_free_extent ex = ac->ac_b_ex; | |
1431 | ext4_group_t group = ex.fe_group; | |
1432 | int max; | |
1433 | int err; | |
1434 | ||
1435 | BUG_ON(ex.fe_len <= 0); | |
1436 | err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); | |
1437 | if (err) | |
1438 | return err; | |
1439 | ||
1440 | ext4_lock_group(ac->ac_sb, group); | |
1441 | max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex); | |
1442 | ||
1443 | if (max > 0) { | |
1444 | ac->ac_b_ex = ex; | |
1445 | ext4_mb_use_best_found(ac, e4b); | |
1446 | } | |
1447 | ||
1448 | ext4_unlock_group(ac->ac_sb, group); | |
1449 | ext4_mb_release_desc(e4b); | |
1450 | ||
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |
1455 | struct ext4_buddy *e4b) | |
1456 | { | |
1457 | ext4_group_t group = ac->ac_g_ex.fe_group; | |
1458 | int max; | |
1459 | int err; | |
1460 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
1461 | struct ext4_super_block *es = sbi->s_es; | |
1462 | struct ext4_free_extent ex; | |
1463 | ||
1464 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) | |
1465 | return 0; | |
1466 | ||
1467 | err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); | |
1468 | if (err) | |
1469 | return err; | |
1470 | ||
1471 | ext4_lock_group(ac->ac_sb, group); | |
1472 | max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start, | |
1473 | ac->ac_g_ex.fe_len, &ex); | |
1474 | ||
1475 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { | |
1476 | ext4_fsblk_t start; | |
1477 | ||
1478 | start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + | |
1479 | ex.fe_start + le32_to_cpu(es->s_first_data_block); | |
1480 | /* use do_div to get remainder (would be 64-bit modulo) */ | |
1481 | if (do_div(start, sbi->s_stripe) == 0) { | |
1482 | ac->ac_found++; | |
1483 | ac->ac_b_ex = ex; | |
1484 | ext4_mb_use_best_found(ac, e4b); | |
1485 | } | |
1486 | } else if (max >= ac->ac_g_ex.fe_len) { | |
1487 | BUG_ON(ex.fe_len <= 0); | |
1488 | BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); | |
1489 | BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); | |
1490 | ac->ac_found++; | |
1491 | ac->ac_b_ex = ex; | |
1492 | ext4_mb_use_best_found(ac, e4b); | |
1493 | } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { | |
1494 | /* Sometimes, caller may want to merge even small | |
1495 | * number of blocks to an existing extent */ | |
1496 | BUG_ON(ex.fe_len <= 0); | |
1497 | BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); | |
1498 | BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); | |
1499 | ac->ac_found++; | |
1500 | ac->ac_b_ex = ex; | |
1501 | ext4_mb_use_best_found(ac, e4b); | |
1502 | } | |
1503 | ext4_unlock_group(ac->ac_sb, group); | |
1504 | ext4_mb_release_desc(e4b); | |
1505 | ||
1506 | return 0; | |
1507 | } | |
1508 | ||
1509 | /* | |
1510 | * The routine scans buddy structures (not bitmap!) from given order | |
1511 | * to max order and tries to find big enough chunk to satisfy the req | |
1512 | */ | |
1513 | static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, | |
1514 | struct ext4_buddy *e4b) | |
1515 | { | |
1516 | struct super_block *sb = ac->ac_sb; | |
1517 | struct ext4_group_info *grp = e4b->bd_info; | |
1518 | void *buddy; | |
1519 | int i; | |
1520 | int k; | |
1521 | int max; | |
1522 | ||
1523 | BUG_ON(ac->ac_2order <= 0); | |
1524 | for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { | |
1525 | if (grp->bb_counters[i] == 0) | |
1526 | continue; | |
1527 | ||
1528 | buddy = mb_find_buddy(e4b, i, &max); | |
1529 | BUG_ON(buddy == NULL); | |
1530 | ||
ffad0a44 | 1531 | k = mb_find_next_zero_bit(buddy, max, 0); |
c9de560d AT |
1532 | BUG_ON(k >= max); |
1533 | ||
1534 | ac->ac_found++; | |
1535 | ||
1536 | ac->ac_b_ex.fe_len = 1 << i; | |
1537 | ac->ac_b_ex.fe_start = k << i; | |
1538 | ac->ac_b_ex.fe_group = e4b->bd_group; | |
1539 | ||
1540 | ext4_mb_use_best_found(ac, e4b); | |
1541 | ||
1542 | BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); | |
1543 | ||
1544 | if (EXT4_SB(sb)->s_mb_stats) | |
1545 | atomic_inc(&EXT4_SB(sb)->s_bal_2orders); | |
1546 | ||
1547 | break; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * The routine scans the group and measures all found extents. | |
1553 | * In order to optimize scanning, caller must pass number of | |
1554 | * free blocks in the group, so the routine can know upper limit. | |
1555 | */ | |
1556 | static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, | |
1557 | struct ext4_buddy *e4b) | |
1558 | { | |
1559 | struct super_block *sb = ac->ac_sb; | |
1560 | void *bitmap = EXT4_MB_BITMAP(e4b); | |
1561 | struct ext4_free_extent ex; | |
1562 | int i; | |
1563 | int free; | |
1564 | ||
1565 | free = e4b->bd_info->bb_free; | |
1566 | BUG_ON(free <= 0); | |
1567 | ||
1568 | i = e4b->bd_info->bb_first_free; | |
1569 | ||
1570 | while (free && ac->ac_status == AC_STATUS_CONTINUE) { | |
ffad0a44 | 1571 | i = mb_find_next_zero_bit(bitmap, |
c9de560d AT |
1572 | EXT4_BLOCKS_PER_GROUP(sb), i); |
1573 | if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { | |
26346ff6 | 1574 | /* |
e56eb659 | 1575 | * IF we have corrupt bitmap, we won't find any |
26346ff6 AK |
1576 | * free blocks even though group info says we |
1577 | * we have free blocks | |
1578 | */ | |
46e665e9 | 1579 | ext4_error(sb, __func__, "%d free blocks as per " |
26346ff6 AK |
1580 | "group info. But bitmap says 0\n", |
1581 | free); | |
c9de560d AT |
1582 | break; |
1583 | } | |
1584 | ||
1585 | mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); | |
1586 | BUG_ON(ex.fe_len <= 0); | |
26346ff6 | 1587 | if (free < ex.fe_len) { |
46e665e9 | 1588 | ext4_error(sb, __func__, "%d free blocks as per " |
26346ff6 AK |
1589 | "group info. But got %d blocks\n", |
1590 | free, ex.fe_len); | |
e56eb659 AK |
1591 | /* |
1592 | * The number of free blocks differs. This mostly | |
1593 | * indicate that the bitmap is corrupt. So exit | |
1594 | * without claiming the space. | |
1595 | */ | |
1596 | break; | |
26346ff6 | 1597 | } |
c9de560d AT |
1598 | |
1599 | ext4_mb_measure_extent(ac, &ex, e4b); | |
1600 | ||
1601 | i += ex.fe_len; | |
1602 | free -= ex.fe_len; | |
1603 | } | |
1604 | ||
1605 | ext4_mb_check_limits(ac, e4b, 1); | |
1606 | } | |
1607 | ||
1608 | /* | |
1609 | * This is a special case for storages like raid5 | |
1610 | * we try to find stripe-aligned chunks for stripe-size requests | |
1611 | * XXX should do so at least for multiples of stripe size as well | |
1612 | */ | |
1613 | static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, | |
1614 | struct ext4_buddy *e4b) | |
1615 | { | |
1616 | struct super_block *sb = ac->ac_sb; | |
1617 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
1618 | void *bitmap = EXT4_MB_BITMAP(e4b); | |
1619 | struct ext4_free_extent ex; | |
1620 | ext4_fsblk_t first_group_block; | |
1621 | ext4_fsblk_t a; | |
1622 | ext4_grpblk_t i; | |
1623 | int max; | |
1624 | ||
1625 | BUG_ON(sbi->s_stripe == 0); | |
1626 | ||
1627 | /* find first stripe-aligned block in group */ | |
1628 | first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb) | |
1629 | + le32_to_cpu(sbi->s_es->s_first_data_block); | |
1630 | a = first_group_block + sbi->s_stripe - 1; | |
1631 | do_div(a, sbi->s_stripe); | |
1632 | i = (a * sbi->s_stripe) - first_group_block; | |
1633 | ||
1634 | while (i < EXT4_BLOCKS_PER_GROUP(sb)) { | |
1635 | if (!mb_test_bit(i, bitmap)) { | |
1636 | max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex); | |
1637 | if (max >= sbi->s_stripe) { | |
1638 | ac->ac_found++; | |
1639 | ac->ac_b_ex = ex; | |
1640 | ext4_mb_use_best_found(ac, e4b); | |
1641 | break; | |
1642 | } | |
1643 | } | |
1644 | i += sbi->s_stripe; | |
1645 | } | |
1646 | } | |
1647 | ||
1648 | static int ext4_mb_good_group(struct ext4_allocation_context *ac, | |
1649 | ext4_group_t group, int cr) | |
1650 | { | |
1651 | unsigned free, fragments; | |
1652 | unsigned i, bits; | |
1653 | struct ext4_group_desc *desc; | |
1654 | struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); | |
1655 | ||
1656 | BUG_ON(cr < 0 || cr >= 4); | |
1657 | BUG_ON(EXT4_MB_GRP_NEED_INIT(grp)); | |
1658 | ||
1659 | free = grp->bb_free; | |
1660 | fragments = grp->bb_fragments; | |
1661 | if (free == 0) | |
1662 | return 0; | |
1663 | if (fragments == 0) | |
1664 | return 0; | |
1665 | ||
1666 | switch (cr) { | |
1667 | case 0: | |
1668 | BUG_ON(ac->ac_2order == 0); | |
1669 | /* If this group is uninitialized, skip it initially */ | |
1670 | desc = ext4_get_group_desc(ac->ac_sb, group, NULL); | |
1671 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) | |
1672 | return 0; | |
1673 | ||
1674 | bits = ac->ac_sb->s_blocksize_bits + 1; | |
1675 | for (i = ac->ac_2order; i <= bits; i++) | |
1676 | if (grp->bb_counters[i] > 0) | |
1677 | return 1; | |
1678 | break; | |
1679 | case 1: | |
1680 | if ((free / fragments) >= ac->ac_g_ex.fe_len) | |
1681 | return 1; | |
1682 | break; | |
1683 | case 2: | |
1684 | if (free >= ac->ac_g_ex.fe_len) | |
1685 | return 1; | |
1686 | break; | |
1687 | case 3: | |
1688 | return 1; | |
1689 | default: | |
1690 | BUG(); | |
1691 | } | |
1692 | ||
1693 | return 0; | |
1694 | } | |
1695 | ||
4ddfef7b ES |
1696 | static noinline_for_stack int |
1697 | ext4_mb_regular_allocator(struct ext4_allocation_context *ac) | |
c9de560d AT |
1698 | { |
1699 | ext4_group_t group; | |
1700 | ext4_group_t i; | |
1701 | int cr; | |
1702 | int err = 0; | |
1703 | int bsbits; | |
1704 | struct ext4_sb_info *sbi; | |
1705 | struct super_block *sb; | |
1706 | struct ext4_buddy e4b; | |
1707 | loff_t size, isize; | |
1708 | ||
1709 | sb = ac->ac_sb; | |
1710 | sbi = EXT4_SB(sb); | |
1711 | BUG_ON(ac->ac_status == AC_STATUS_FOUND); | |
1712 | ||
1713 | /* first, try the goal */ | |
1714 | err = ext4_mb_find_by_goal(ac, &e4b); | |
1715 | if (err || ac->ac_status == AC_STATUS_FOUND) | |
1716 | goto out; | |
1717 | ||
1718 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) | |
1719 | goto out; | |
1720 | ||
1721 | /* | |
1722 | * ac->ac2_order is set only if the fe_len is a power of 2 | |
1723 | * if ac2_order is set we also set criteria to 0 so that we | |
1724 | * try exact allocation using buddy. | |
1725 | */ | |
1726 | i = fls(ac->ac_g_ex.fe_len); | |
1727 | ac->ac_2order = 0; | |
1728 | /* | |
1729 | * We search using buddy data only if the order of the request | |
1730 | * is greater than equal to the sbi_s_mb_order2_reqs | |
1731 | * You can tune it via /proc/fs/ext4/<partition>/order2_req | |
1732 | */ | |
1733 | if (i >= sbi->s_mb_order2_reqs) { | |
1734 | /* | |
1735 | * This should tell if fe_len is exactly power of 2 | |
1736 | */ | |
1737 | if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) | |
1738 | ac->ac_2order = i - 1; | |
1739 | } | |
1740 | ||
1741 | bsbits = ac->ac_sb->s_blocksize_bits; | |
1742 | /* if stream allocation is enabled, use global goal */ | |
1743 | size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; | |
1744 | isize = i_size_read(ac->ac_inode) >> bsbits; | |
1745 | if (size < isize) | |
1746 | size = isize; | |
1747 | ||
1748 | if (size < sbi->s_mb_stream_request && | |
1749 | (ac->ac_flags & EXT4_MB_HINT_DATA)) { | |
1750 | /* TBD: may be hot point */ | |
1751 | spin_lock(&sbi->s_md_lock); | |
1752 | ac->ac_g_ex.fe_group = sbi->s_mb_last_group; | |
1753 | ac->ac_g_ex.fe_start = sbi->s_mb_last_start; | |
1754 | spin_unlock(&sbi->s_md_lock); | |
1755 | } | |
c9de560d AT |
1756 | /* Let's just scan groups to find more-less suitable blocks */ |
1757 | cr = ac->ac_2order ? 0 : 1; | |
1758 | /* | |
1759 | * cr == 0 try to get exact allocation, | |
1760 | * cr == 3 try to get anything | |
1761 | */ | |
1762 | repeat: | |
1763 | for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { | |
1764 | ac->ac_criteria = cr; | |
ed8f9c75 AK |
1765 | /* |
1766 | * searching for the right group start | |
1767 | * from the goal value specified | |
1768 | */ | |
1769 | group = ac->ac_g_ex.fe_group; | |
1770 | ||
c9de560d AT |
1771 | for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) { |
1772 | struct ext4_group_info *grp; | |
1773 | struct ext4_group_desc *desc; | |
1774 | ||
1775 | if (group == EXT4_SB(sb)->s_groups_count) | |
1776 | group = 0; | |
1777 | ||
1778 | /* quick check to skip empty groups */ | |
1779 | grp = ext4_get_group_info(ac->ac_sb, group); | |
1780 | if (grp->bb_free == 0) | |
1781 | continue; | |
1782 | ||
1783 | /* | |
1784 | * if the group is already init we check whether it is | |
1785 | * a good group and if not we don't load the buddy | |
1786 | */ | |
1787 | if (EXT4_MB_GRP_NEED_INIT(grp)) { | |
1788 | /* | |
1789 | * we need full data about the group | |
1790 | * to make a good selection | |
1791 | */ | |
1792 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
1793 | if (err) | |
1794 | goto out; | |
1795 | ext4_mb_release_desc(&e4b); | |
1796 | } | |
1797 | ||
1798 | /* | |
1799 | * If the particular group doesn't satisfy our | |
1800 | * criteria we continue with the next group | |
1801 | */ | |
1802 | if (!ext4_mb_good_group(ac, group, cr)) | |
1803 | continue; | |
1804 | ||
1805 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
1806 | if (err) | |
1807 | goto out; | |
1808 | ||
1809 | ext4_lock_group(sb, group); | |
1810 | if (!ext4_mb_good_group(ac, group, cr)) { | |
1811 | /* someone did allocation from this group */ | |
1812 | ext4_unlock_group(sb, group); | |
1813 | ext4_mb_release_desc(&e4b); | |
1814 | continue; | |
1815 | } | |
1816 | ||
1817 | ac->ac_groups_scanned++; | |
1818 | desc = ext4_get_group_desc(sb, group, NULL); | |
1819 | if (cr == 0 || (desc->bg_flags & | |
1820 | cpu_to_le16(EXT4_BG_BLOCK_UNINIT) && | |
1821 | ac->ac_2order != 0)) | |
1822 | ext4_mb_simple_scan_group(ac, &e4b); | |
1823 | else if (cr == 1 && | |
1824 | ac->ac_g_ex.fe_len == sbi->s_stripe) | |
1825 | ext4_mb_scan_aligned(ac, &e4b); | |
1826 | else | |
1827 | ext4_mb_complex_scan_group(ac, &e4b); | |
1828 | ||
1829 | ext4_unlock_group(sb, group); | |
1830 | ext4_mb_release_desc(&e4b); | |
1831 | ||
1832 | if (ac->ac_status != AC_STATUS_CONTINUE) | |
1833 | break; | |
1834 | } | |
1835 | } | |
1836 | ||
1837 | if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && | |
1838 | !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
1839 | /* | |
1840 | * We've been searching too long. Let's try to allocate | |
1841 | * the best chunk we've found so far | |
1842 | */ | |
1843 | ||
1844 | ext4_mb_try_best_found(ac, &e4b); | |
1845 | if (ac->ac_status != AC_STATUS_FOUND) { | |
1846 | /* | |
1847 | * Someone more lucky has already allocated it. | |
1848 | * The only thing we can do is just take first | |
1849 | * found block(s) | |
1850 | printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); | |
1851 | */ | |
1852 | ac->ac_b_ex.fe_group = 0; | |
1853 | ac->ac_b_ex.fe_start = 0; | |
1854 | ac->ac_b_ex.fe_len = 0; | |
1855 | ac->ac_status = AC_STATUS_CONTINUE; | |
1856 | ac->ac_flags |= EXT4_MB_HINT_FIRST; | |
1857 | cr = 3; | |
1858 | atomic_inc(&sbi->s_mb_lost_chunks); | |
1859 | goto repeat; | |
1860 | } | |
1861 | } | |
1862 | out: | |
1863 | return err; | |
1864 | } | |
1865 | ||
1866 | #ifdef EXT4_MB_HISTORY | |
1867 | struct ext4_mb_proc_session { | |
1868 | struct ext4_mb_history *history; | |
1869 | struct super_block *sb; | |
1870 | int start; | |
1871 | int max; | |
1872 | }; | |
1873 | ||
1874 | static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s, | |
1875 | struct ext4_mb_history *hs, | |
1876 | int first) | |
1877 | { | |
1878 | if (hs == s->history + s->max) | |
1879 | hs = s->history; | |
1880 | if (!first && hs == s->history + s->start) | |
1881 | return NULL; | |
1882 | while (hs->orig.fe_len == 0) { | |
1883 | hs++; | |
1884 | if (hs == s->history + s->max) | |
1885 | hs = s->history; | |
1886 | if (hs == s->history + s->start) | |
1887 | return NULL; | |
1888 | } | |
1889 | return hs; | |
1890 | } | |
1891 | ||
1892 | static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos) | |
1893 | { | |
1894 | struct ext4_mb_proc_session *s = seq->private; | |
1895 | struct ext4_mb_history *hs; | |
1896 | int l = *pos; | |
1897 | ||
1898 | if (l == 0) | |
1899 | return SEQ_START_TOKEN; | |
1900 | hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1); | |
1901 | if (!hs) | |
1902 | return NULL; | |
1903 | while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL); | |
1904 | return hs; | |
1905 | } | |
1906 | ||
1907 | static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v, | |
1908 | loff_t *pos) | |
1909 | { | |
1910 | struct ext4_mb_proc_session *s = seq->private; | |
1911 | struct ext4_mb_history *hs = v; | |
1912 | ||
1913 | ++*pos; | |
1914 | if (v == SEQ_START_TOKEN) | |
1915 | return ext4_mb_history_skip_empty(s, s->history + s->start, 1); | |
1916 | else | |
1917 | return ext4_mb_history_skip_empty(s, ++hs, 0); | |
1918 | } | |
1919 | ||
1920 | static int ext4_mb_seq_history_show(struct seq_file *seq, void *v) | |
1921 | { | |
1922 | char buf[25], buf2[25], buf3[25], *fmt; | |
1923 | struct ext4_mb_history *hs = v; | |
1924 | ||
1925 | if (v == SEQ_START_TOKEN) { | |
1926 | seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s " | |
1927 | "%-5s %-2s %-5s %-5s %-5s %-6s\n", | |
1928 | "pid", "inode", "original", "goal", "result", "found", | |
1929 | "grps", "cr", "flags", "merge", "tail", "broken"); | |
1930 | return 0; | |
1931 | } | |
1932 | ||
1933 | if (hs->op == EXT4_MB_HISTORY_ALLOC) { | |
1934 | fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u " | |
1935 | "%-5u %-5s %-5u %-6u\n"; | |
1936 | sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group, | |
1937 | hs->result.fe_start, hs->result.fe_len, | |
1938 | hs->result.fe_logical); | |
1939 | sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group, | |
1940 | hs->orig.fe_start, hs->orig.fe_len, | |
1941 | hs->orig.fe_logical); | |
1942 | sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group, | |
1943 | hs->goal.fe_start, hs->goal.fe_len, | |
1944 | hs->goal.fe_logical); | |
1945 | seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2, | |
1946 | hs->found, hs->groups, hs->cr, hs->flags, | |
1947 | hs->merged ? "M" : "", hs->tail, | |
1948 | hs->buddy ? 1 << hs->buddy : 0); | |
1949 | } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) { | |
1950 | fmt = "%-5u %-8u %-23s %-23s %-23s\n"; | |
1951 | sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group, | |
1952 | hs->result.fe_start, hs->result.fe_len, | |
1953 | hs->result.fe_logical); | |
1954 | sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group, | |
1955 | hs->orig.fe_start, hs->orig.fe_len, | |
1956 | hs->orig.fe_logical); | |
1957 | seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2); | |
1958 | } else if (hs->op == EXT4_MB_HISTORY_DISCARD) { | |
1959 | sprintf(buf2, "%lu/%d/%u", hs->result.fe_group, | |
1960 | hs->result.fe_start, hs->result.fe_len); | |
1961 | seq_printf(seq, "%-5u %-8u %-23s discard\n", | |
1962 | hs->pid, hs->ino, buf2); | |
1963 | } else if (hs->op == EXT4_MB_HISTORY_FREE) { | |
1964 | sprintf(buf2, "%lu/%d/%u", hs->result.fe_group, | |
1965 | hs->result.fe_start, hs->result.fe_len); | |
1966 | seq_printf(seq, "%-5u %-8u %-23s free\n", | |
1967 | hs->pid, hs->ino, buf2); | |
1968 | } | |
1969 | return 0; | |
1970 | } | |
1971 | ||
1972 | static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v) | |
1973 | { | |
1974 | } | |
1975 | ||
1976 | static struct seq_operations ext4_mb_seq_history_ops = { | |
1977 | .start = ext4_mb_seq_history_start, | |
1978 | .next = ext4_mb_seq_history_next, | |
1979 | .stop = ext4_mb_seq_history_stop, | |
1980 | .show = ext4_mb_seq_history_show, | |
1981 | }; | |
1982 | ||
1983 | static int ext4_mb_seq_history_open(struct inode *inode, struct file *file) | |
1984 | { | |
1985 | struct super_block *sb = PDE(inode)->data; | |
1986 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
1987 | struct ext4_mb_proc_session *s; | |
1988 | int rc; | |
1989 | int size; | |
1990 | ||
74767c5a SF |
1991 | if (unlikely(sbi->s_mb_history == NULL)) |
1992 | return -ENOMEM; | |
c9de560d AT |
1993 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1994 | if (s == NULL) | |
1995 | return -ENOMEM; | |
1996 | s->sb = sb; | |
1997 | size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max; | |
1998 | s->history = kmalloc(size, GFP_KERNEL); | |
1999 | if (s->history == NULL) { | |
2000 | kfree(s); | |
2001 | return -ENOMEM; | |
2002 | } | |
2003 | ||
2004 | spin_lock(&sbi->s_mb_history_lock); | |
2005 | memcpy(s->history, sbi->s_mb_history, size); | |
2006 | s->max = sbi->s_mb_history_max; | |
2007 | s->start = sbi->s_mb_history_cur % s->max; | |
2008 | spin_unlock(&sbi->s_mb_history_lock); | |
2009 | ||
2010 | rc = seq_open(file, &ext4_mb_seq_history_ops); | |
2011 | if (rc == 0) { | |
2012 | struct seq_file *m = (struct seq_file *)file->private_data; | |
2013 | m->private = s; | |
2014 | } else { | |
2015 | kfree(s->history); | |
2016 | kfree(s); | |
2017 | } | |
2018 | return rc; | |
2019 | ||
2020 | } | |
2021 | ||
2022 | static int ext4_mb_seq_history_release(struct inode *inode, struct file *file) | |
2023 | { | |
2024 | struct seq_file *seq = (struct seq_file *)file->private_data; | |
2025 | struct ext4_mb_proc_session *s = seq->private; | |
2026 | kfree(s->history); | |
2027 | kfree(s); | |
2028 | return seq_release(inode, file); | |
2029 | } | |
2030 | ||
2031 | static ssize_t ext4_mb_seq_history_write(struct file *file, | |
2032 | const char __user *buffer, | |
2033 | size_t count, loff_t *ppos) | |
2034 | { | |
2035 | struct seq_file *seq = (struct seq_file *)file->private_data; | |
2036 | struct ext4_mb_proc_session *s = seq->private; | |
2037 | struct super_block *sb = s->sb; | |
2038 | char str[32]; | |
2039 | int value; | |
2040 | ||
2041 | if (count >= sizeof(str)) { | |
2042 | printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n", | |
2043 | "mb_history", (int)sizeof(str)); | |
2044 | return -EOVERFLOW; | |
2045 | } | |
2046 | ||
2047 | if (copy_from_user(str, buffer, count)) | |
2048 | return -EFAULT; | |
2049 | ||
2050 | value = simple_strtol(str, NULL, 0); | |
2051 | if (value < 0) | |
2052 | return -ERANGE; | |
2053 | EXT4_SB(sb)->s_mb_history_filter = value; | |
2054 | ||
2055 | return count; | |
2056 | } | |
2057 | ||
2058 | static struct file_operations ext4_mb_seq_history_fops = { | |
2059 | .owner = THIS_MODULE, | |
2060 | .open = ext4_mb_seq_history_open, | |
2061 | .read = seq_read, | |
2062 | .write = ext4_mb_seq_history_write, | |
2063 | .llseek = seq_lseek, | |
2064 | .release = ext4_mb_seq_history_release, | |
2065 | }; | |
2066 | ||
2067 | static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) | |
2068 | { | |
2069 | struct super_block *sb = seq->private; | |
2070 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2071 | ext4_group_t group; | |
2072 | ||
2073 | if (*pos < 0 || *pos >= sbi->s_groups_count) | |
2074 | return NULL; | |
2075 | ||
2076 | group = *pos + 1; | |
2077 | return (void *) group; | |
2078 | } | |
2079 | ||
2080 | static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) | |
2081 | { | |
2082 | struct super_block *sb = seq->private; | |
2083 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2084 | ext4_group_t group; | |
2085 | ||
2086 | ++*pos; | |
2087 | if (*pos < 0 || *pos >= sbi->s_groups_count) | |
2088 | return NULL; | |
2089 | group = *pos + 1; | |
2090 | return (void *) group;; | |
2091 | } | |
2092 | ||
2093 | static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) | |
2094 | { | |
2095 | struct super_block *sb = seq->private; | |
2096 | long group = (long) v; | |
2097 | int i; | |
2098 | int err; | |
2099 | struct ext4_buddy e4b; | |
2100 | struct sg { | |
2101 | struct ext4_group_info info; | |
2102 | unsigned short counters[16]; | |
2103 | } sg; | |
2104 | ||
2105 | group--; | |
2106 | if (group == 0) | |
2107 | seq_printf(seq, "#%-5s: %-5s %-5s %-5s " | |
2108 | "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " | |
2109 | "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", | |
2110 | "group", "free", "frags", "first", | |
2111 | "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", | |
2112 | "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); | |
2113 | ||
2114 | i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + | |
2115 | sizeof(struct ext4_group_info); | |
2116 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
2117 | if (err) { | |
2118 | seq_printf(seq, "#%-5lu: I/O error\n", group); | |
2119 | return 0; | |
2120 | } | |
2121 | ext4_lock_group(sb, group); | |
2122 | memcpy(&sg, ext4_get_group_info(sb, group), i); | |
2123 | ext4_unlock_group(sb, group); | |
2124 | ext4_mb_release_desc(&e4b); | |
2125 | ||
2126 | seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free, | |
2127 | sg.info.bb_fragments, sg.info.bb_first_free); | |
2128 | for (i = 0; i <= 13; i++) | |
2129 | seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? | |
2130 | sg.info.bb_counters[i] : 0); | |
2131 | seq_printf(seq, " ]\n"); | |
2132 | ||
2133 | return 0; | |
2134 | } | |
2135 | ||
2136 | static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) | |
2137 | { | |
2138 | } | |
2139 | ||
2140 | static struct seq_operations ext4_mb_seq_groups_ops = { | |
2141 | .start = ext4_mb_seq_groups_start, | |
2142 | .next = ext4_mb_seq_groups_next, | |
2143 | .stop = ext4_mb_seq_groups_stop, | |
2144 | .show = ext4_mb_seq_groups_show, | |
2145 | }; | |
2146 | ||
2147 | static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) | |
2148 | { | |
2149 | struct super_block *sb = PDE(inode)->data; | |
2150 | int rc; | |
2151 | ||
2152 | rc = seq_open(file, &ext4_mb_seq_groups_ops); | |
2153 | if (rc == 0) { | |
2154 | struct seq_file *m = (struct seq_file *)file->private_data; | |
2155 | m->private = sb; | |
2156 | } | |
2157 | return rc; | |
2158 | ||
2159 | } | |
2160 | ||
2161 | static struct file_operations ext4_mb_seq_groups_fops = { | |
2162 | .owner = THIS_MODULE, | |
2163 | .open = ext4_mb_seq_groups_open, | |
2164 | .read = seq_read, | |
2165 | .llseek = seq_lseek, | |
2166 | .release = seq_release, | |
2167 | }; | |
2168 | ||
2169 | static void ext4_mb_history_release(struct super_block *sb) | |
2170 | { | |
2171 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2172 | ||
2173 | remove_proc_entry("mb_groups", sbi->s_mb_proc); | |
2174 | remove_proc_entry("mb_history", sbi->s_mb_proc); | |
2175 | ||
2176 | kfree(sbi->s_mb_history); | |
2177 | } | |
2178 | ||
2179 | static void ext4_mb_history_init(struct super_block *sb) | |
2180 | { | |
2181 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2182 | int i; | |
2183 | ||
2184 | if (sbi->s_mb_proc != NULL) { | |
46fe74f2 DL |
2185 | proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc, |
2186 | &ext4_mb_seq_history_fops, sb); | |
2187 | proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc, | |
2188 | &ext4_mb_seq_groups_fops, sb); | |
c9de560d AT |
2189 | } |
2190 | ||
2191 | sbi->s_mb_history_max = 1000; | |
2192 | sbi->s_mb_history_cur = 0; | |
2193 | spin_lock_init(&sbi->s_mb_history_lock); | |
2194 | i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history); | |
74767c5a | 2195 | sbi->s_mb_history = kzalloc(i, GFP_KERNEL); |
c9de560d AT |
2196 | /* if we can't allocate history, then we simple won't use it */ |
2197 | } | |
2198 | ||
4ddfef7b ES |
2199 | static noinline_for_stack void |
2200 | ext4_mb_store_history(struct ext4_allocation_context *ac) | |
c9de560d AT |
2201 | { |
2202 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
2203 | struct ext4_mb_history h; | |
2204 | ||
2205 | if (unlikely(sbi->s_mb_history == NULL)) | |
2206 | return; | |
2207 | ||
2208 | if (!(ac->ac_op & sbi->s_mb_history_filter)) | |
2209 | return; | |
2210 | ||
2211 | h.op = ac->ac_op; | |
2212 | h.pid = current->pid; | |
2213 | h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0; | |
2214 | h.orig = ac->ac_o_ex; | |
2215 | h.result = ac->ac_b_ex; | |
2216 | h.flags = ac->ac_flags; | |
2217 | h.found = ac->ac_found; | |
2218 | h.groups = ac->ac_groups_scanned; | |
2219 | h.cr = ac->ac_criteria; | |
2220 | h.tail = ac->ac_tail; | |
2221 | h.buddy = ac->ac_buddy; | |
2222 | h.merged = 0; | |
2223 | if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) { | |
2224 | if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && | |
2225 | ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) | |
2226 | h.merged = 1; | |
2227 | h.goal = ac->ac_g_ex; | |
2228 | h.result = ac->ac_f_ex; | |
2229 | } | |
2230 | ||
2231 | spin_lock(&sbi->s_mb_history_lock); | |
2232 | memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h)); | |
2233 | if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max) | |
2234 | sbi->s_mb_history_cur = 0; | |
2235 | spin_unlock(&sbi->s_mb_history_lock); | |
2236 | } | |
2237 | ||
2238 | #else | |
2239 | #define ext4_mb_history_release(sb) | |
2240 | #define ext4_mb_history_init(sb) | |
2241 | #endif | |
2242 | ||
5f21b0e6 FB |
2243 | |
2244 | /* Create and initialize ext4_group_info data for the given group. */ | |
2245 | int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, | |
2246 | struct ext4_group_desc *desc) | |
2247 | { | |
2248 | int i, len; | |
2249 | int metalen = 0; | |
2250 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2251 | struct ext4_group_info **meta_group_info; | |
2252 | ||
2253 | /* | |
2254 | * First check if this group is the first of a reserved block. | |
2255 | * If it's true, we have to allocate a new table of pointers | |
2256 | * to ext4_group_info structures | |
2257 | */ | |
2258 | if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { | |
2259 | metalen = sizeof(*meta_group_info) << | |
2260 | EXT4_DESC_PER_BLOCK_BITS(sb); | |
2261 | meta_group_info = kmalloc(metalen, GFP_KERNEL); | |
2262 | if (meta_group_info == NULL) { | |
2263 | printk(KERN_ERR "EXT4-fs: can't allocate mem for a " | |
2264 | "buddy group\n"); | |
2265 | goto exit_meta_group_info; | |
2266 | } | |
2267 | sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = | |
2268 | meta_group_info; | |
2269 | } | |
2270 | ||
2271 | /* | |
2272 | * calculate needed size. if change bb_counters size, | |
2273 | * don't forget about ext4_mb_generate_buddy() | |
2274 | */ | |
2275 | len = offsetof(typeof(**meta_group_info), | |
2276 | bb_counters[sb->s_blocksize_bits + 2]); | |
2277 | ||
2278 | meta_group_info = | |
2279 | sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; | |
2280 | i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); | |
2281 | ||
2282 | meta_group_info[i] = kzalloc(len, GFP_KERNEL); | |
2283 | if (meta_group_info[i] == NULL) { | |
2284 | printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n"); | |
2285 | goto exit_group_info; | |
2286 | } | |
2287 | set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, | |
2288 | &(meta_group_info[i]->bb_state)); | |
2289 | ||
2290 | /* | |
2291 | * initialize bb_free to be able to skip | |
2292 | * empty groups without initialization | |
2293 | */ | |
2294 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | |
2295 | meta_group_info[i]->bb_free = | |
2296 | ext4_free_blocks_after_init(sb, group, desc); | |
2297 | } else { | |
2298 | meta_group_info[i]->bb_free = | |
2299 | le16_to_cpu(desc->bg_free_blocks_count); | |
2300 | } | |
2301 | ||
2302 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); | |
2303 | ||
2304 | #ifdef DOUBLE_CHECK | |
2305 | { | |
2306 | struct buffer_head *bh; | |
2307 | meta_group_info[i]->bb_bitmap = | |
2308 | kmalloc(sb->s_blocksize, GFP_KERNEL); | |
2309 | BUG_ON(meta_group_info[i]->bb_bitmap == NULL); | |
2310 | bh = ext4_read_block_bitmap(sb, group); | |
2311 | BUG_ON(bh == NULL); | |
2312 | memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, | |
2313 | sb->s_blocksize); | |
2314 | put_bh(bh); | |
2315 | } | |
2316 | #endif | |
2317 | ||
2318 | return 0; | |
2319 | ||
2320 | exit_group_info: | |
2321 | /* If a meta_group_info table has been allocated, release it now */ | |
2322 | if (group % EXT4_DESC_PER_BLOCK(sb) == 0) | |
2323 | kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); | |
2324 | exit_meta_group_info: | |
2325 | return -ENOMEM; | |
2326 | } /* ext4_mb_add_groupinfo */ | |
2327 | ||
2328 | /* | |
2329 | * Add a group to the existing groups. | |
2330 | * This function is used for online resize | |
2331 | */ | |
2332 | int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group, | |
2333 | struct ext4_group_desc *desc) | |
2334 | { | |
2335 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2336 | struct inode *inode = sbi->s_buddy_cache; | |
2337 | int blocks_per_page; | |
2338 | int block; | |
2339 | int pnum; | |
2340 | struct page *page; | |
2341 | int err; | |
2342 | ||
2343 | /* Add group based on group descriptor*/ | |
2344 | err = ext4_mb_add_groupinfo(sb, group, desc); | |
2345 | if (err) | |
2346 | return err; | |
2347 | ||
2348 | /* | |
2349 | * Cache pages containing dynamic mb_alloc datas (buddy and bitmap | |
2350 | * datas) are set not up to date so that they will be re-initilaized | |
2351 | * during the next call to ext4_mb_load_buddy | |
2352 | */ | |
2353 | ||
2354 | /* Set buddy page as not up to date */ | |
2355 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | |
2356 | block = group * 2; | |
2357 | pnum = block / blocks_per_page; | |
2358 | page = find_get_page(inode->i_mapping, pnum); | |
2359 | if (page != NULL) { | |
2360 | ClearPageUptodate(page); | |
2361 | page_cache_release(page); | |
2362 | } | |
2363 | ||
2364 | /* Set bitmap page as not up to date */ | |
2365 | block++; | |
2366 | pnum = block / blocks_per_page; | |
2367 | page = find_get_page(inode->i_mapping, pnum); | |
2368 | if (page != NULL) { | |
2369 | ClearPageUptodate(page); | |
2370 | page_cache_release(page); | |
2371 | } | |
2372 | ||
2373 | return 0; | |
2374 | } | |
2375 | ||
2376 | /* | |
2377 | * Update an existing group. | |
2378 | * This function is used for online resize | |
2379 | */ | |
2380 | void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add) | |
2381 | { | |
2382 | grp->bb_free += add; | |
2383 | } | |
2384 | ||
c9de560d AT |
2385 | static int ext4_mb_init_backend(struct super_block *sb) |
2386 | { | |
2387 | ext4_group_t i; | |
5f21b0e6 | 2388 | int metalen; |
c9de560d | 2389 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
5f21b0e6 FB |
2390 | struct ext4_super_block *es = sbi->s_es; |
2391 | int num_meta_group_infos; | |
2392 | int num_meta_group_infos_max; | |
2393 | int array_size; | |
c9de560d | 2394 | struct ext4_group_info **meta_group_info; |
5f21b0e6 FB |
2395 | struct ext4_group_desc *desc; |
2396 | ||
2397 | /* This is the number of blocks used by GDT */ | |
2398 | num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - | |
2399 | 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); | |
2400 | ||
2401 | /* | |
2402 | * This is the total number of blocks used by GDT including | |
2403 | * the number of reserved blocks for GDT. | |
2404 | * The s_group_info array is allocated with this value | |
2405 | * to allow a clean online resize without a complex | |
2406 | * manipulation of pointer. | |
2407 | * The drawback is the unused memory when no resize | |
2408 | * occurs but it's very low in terms of pages | |
2409 | * (see comments below) | |
2410 | * Need to handle this properly when META_BG resizing is allowed | |
2411 | */ | |
2412 | num_meta_group_infos_max = num_meta_group_infos + | |
2413 | le16_to_cpu(es->s_reserved_gdt_blocks); | |
c9de560d | 2414 | |
5f21b0e6 FB |
2415 | /* |
2416 | * array_size is the size of s_group_info array. We round it | |
2417 | * to the next power of two because this approximation is done | |
2418 | * internally by kmalloc so we can have some more memory | |
2419 | * for free here (e.g. may be used for META_BG resize). | |
2420 | */ | |
2421 | array_size = 1; | |
2422 | while (array_size < sizeof(*sbi->s_group_info) * | |
2423 | num_meta_group_infos_max) | |
2424 | array_size = array_size << 1; | |
c9de560d AT |
2425 | /* An 8TB filesystem with 64-bit pointers requires a 4096 byte |
2426 | * kmalloc. A 128kb malloc should suffice for a 256TB filesystem. | |
2427 | * So a two level scheme suffices for now. */ | |
5f21b0e6 | 2428 | sbi->s_group_info = kmalloc(array_size, GFP_KERNEL); |
c9de560d AT |
2429 | if (sbi->s_group_info == NULL) { |
2430 | printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n"); | |
2431 | return -ENOMEM; | |
2432 | } | |
2433 | sbi->s_buddy_cache = new_inode(sb); | |
2434 | if (sbi->s_buddy_cache == NULL) { | |
2435 | printk(KERN_ERR "EXT4-fs: can't get new inode\n"); | |
2436 | goto err_freesgi; | |
2437 | } | |
2438 | EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; | |
2439 | ||
2440 | metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); | |
2441 | for (i = 0; i < num_meta_group_infos; i++) { | |
2442 | if ((i + 1) == num_meta_group_infos) | |
2443 | metalen = sizeof(*meta_group_info) * | |
2444 | (sbi->s_groups_count - | |
2445 | (i << EXT4_DESC_PER_BLOCK_BITS(sb))); | |
2446 | meta_group_info = kmalloc(metalen, GFP_KERNEL); | |
2447 | if (meta_group_info == NULL) { | |
2448 | printk(KERN_ERR "EXT4-fs: can't allocate mem for a " | |
2449 | "buddy group\n"); | |
2450 | goto err_freemeta; | |
2451 | } | |
2452 | sbi->s_group_info[i] = meta_group_info; | |
2453 | } | |
2454 | ||
c9de560d | 2455 | for (i = 0; i < sbi->s_groups_count; i++) { |
c9de560d AT |
2456 | desc = ext4_get_group_desc(sb, i, NULL); |
2457 | if (desc == NULL) { | |
2458 | printk(KERN_ERR | |
2459 | "EXT4-fs: can't read descriptor %lu\n", i); | |
2460 | goto err_freebuddy; | |
2461 | } | |
5f21b0e6 FB |
2462 | if (ext4_mb_add_groupinfo(sb, i, desc) != 0) |
2463 | goto err_freebuddy; | |
c9de560d AT |
2464 | } |
2465 | ||
2466 | return 0; | |
2467 | ||
2468 | err_freebuddy: | |
f1fa3342 | 2469 | while (i-- > 0) |
c9de560d | 2470 | kfree(ext4_get_group_info(sb, i)); |
c9de560d AT |
2471 | i = num_meta_group_infos; |
2472 | err_freemeta: | |
f1fa3342 | 2473 | while (i-- > 0) |
c9de560d AT |
2474 | kfree(sbi->s_group_info[i]); |
2475 | iput(sbi->s_buddy_cache); | |
2476 | err_freesgi: | |
2477 | kfree(sbi->s_group_info); | |
2478 | return -ENOMEM; | |
2479 | } | |
2480 | ||
2481 | int ext4_mb_init(struct super_block *sb, int needs_recovery) | |
2482 | { | |
2483 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
6be2ded1 | 2484 | unsigned i, j; |
c9de560d AT |
2485 | unsigned offset; |
2486 | unsigned max; | |
74767c5a | 2487 | int ret; |
c9de560d AT |
2488 | |
2489 | if (!test_opt(sb, MBALLOC)) | |
2490 | return 0; | |
2491 | ||
2492 | i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short); | |
2493 | ||
2494 | sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); | |
2495 | if (sbi->s_mb_offsets == NULL) { | |
2496 | clear_opt(sbi->s_mount_opt, MBALLOC); | |
2497 | return -ENOMEM; | |
2498 | } | |
2499 | sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); | |
2500 | if (sbi->s_mb_maxs == NULL) { | |
2501 | clear_opt(sbi->s_mount_opt, MBALLOC); | |
2502 | kfree(sbi->s_mb_maxs); | |
2503 | return -ENOMEM; | |
2504 | } | |
2505 | ||
2506 | /* order 0 is regular bitmap */ | |
2507 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; | |
2508 | sbi->s_mb_offsets[0] = 0; | |
2509 | ||
2510 | i = 1; | |
2511 | offset = 0; | |
2512 | max = sb->s_blocksize << 2; | |
2513 | do { | |
2514 | sbi->s_mb_offsets[i] = offset; | |
2515 | sbi->s_mb_maxs[i] = max; | |
2516 | offset += 1 << (sb->s_blocksize_bits - i); | |
2517 | max = max >> 1; | |
2518 | i++; | |
2519 | } while (i <= sb->s_blocksize_bits + 1); | |
2520 | ||
2521 | /* init file for buddy data */ | |
74767c5a SF |
2522 | ret = ext4_mb_init_backend(sb); |
2523 | if (ret != 0) { | |
c9de560d AT |
2524 | clear_opt(sbi->s_mount_opt, MBALLOC); |
2525 | kfree(sbi->s_mb_offsets); | |
2526 | kfree(sbi->s_mb_maxs); | |
74767c5a | 2527 | return ret; |
c9de560d AT |
2528 | } |
2529 | ||
2530 | spin_lock_init(&sbi->s_md_lock); | |
2531 | INIT_LIST_HEAD(&sbi->s_active_transaction); | |
2532 | INIT_LIST_HEAD(&sbi->s_closed_transaction); | |
2533 | INIT_LIST_HEAD(&sbi->s_committed_transaction); | |
2534 | spin_lock_init(&sbi->s_bal_lock); | |
2535 | ||
2536 | sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; | |
2537 | sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; | |
2538 | sbi->s_mb_stats = MB_DEFAULT_STATS; | |
2539 | sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; | |
2540 | sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; | |
2541 | sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT; | |
2542 | sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; | |
2543 | ||
1320cbcf | 2544 | i = sizeof(struct ext4_locality_group) * nr_cpu_ids; |
c9de560d AT |
2545 | sbi->s_locality_groups = kmalloc(i, GFP_KERNEL); |
2546 | if (sbi->s_locality_groups == NULL) { | |
2547 | clear_opt(sbi->s_mount_opt, MBALLOC); | |
2548 | kfree(sbi->s_mb_offsets); | |
2549 | kfree(sbi->s_mb_maxs); | |
2550 | return -ENOMEM; | |
2551 | } | |
1320cbcf | 2552 | for (i = 0; i < nr_cpu_ids; i++) { |
c9de560d AT |
2553 | struct ext4_locality_group *lg; |
2554 | lg = &sbi->s_locality_groups[i]; | |
2555 | mutex_init(&lg->lg_mutex); | |
6be2ded1 AK |
2556 | for (j = 0; j < PREALLOC_TB_SIZE; j++) |
2557 | INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); | |
c9de560d AT |
2558 | spin_lock_init(&lg->lg_prealloc_lock); |
2559 | } | |
2560 | ||
2561 | ext4_mb_init_per_dev_proc(sb); | |
2562 | ext4_mb_history_init(sb); | |
2563 | ||
4776004f | 2564 | printk(KERN_INFO "EXT4-fs: mballoc enabled\n"); |
c9de560d AT |
2565 | return 0; |
2566 | } | |
2567 | ||
2568 | /* need to called with ext4 group lock (ext4_lock_group) */ | |
2569 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) | |
2570 | { | |
2571 | struct ext4_prealloc_space *pa; | |
2572 | struct list_head *cur, *tmp; | |
2573 | int count = 0; | |
2574 | ||
2575 | list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { | |
2576 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); | |
2577 | list_del(&pa->pa_group_list); | |
2578 | count++; | |
2579 | kfree(pa); | |
2580 | } | |
2581 | if (count) | |
2582 | mb_debug("mballoc: %u PAs left\n", count); | |
2583 | ||
2584 | } | |
2585 | ||
2586 | int ext4_mb_release(struct super_block *sb) | |
2587 | { | |
2588 | ext4_group_t i; | |
2589 | int num_meta_group_infos; | |
2590 | struct ext4_group_info *grinfo; | |
2591 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2592 | ||
2593 | if (!test_opt(sb, MBALLOC)) | |
2594 | return 0; | |
2595 | ||
2596 | /* release freed, non-committed blocks */ | |
2597 | spin_lock(&sbi->s_md_lock); | |
2598 | list_splice_init(&sbi->s_closed_transaction, | |
2599 | &sbi->s_committed_transaction); | |
2600 | list_splice_init(&sbi->s_active_transaction, | |
2601 | &sbi->s_committed_transaction); | |
2602 | spin_unlock(&sbi->s_md_lock); | |
2603 | ext4_mb_free_committed_blocks(sb); | |
2604 | ||
2605 | if (sbi->s_group_info) { | |
2606 | for (i = 0; i < sbi->s_groups_count; i++) { | |
2607 | grinfo = ext4_get_group_info(sb, i); | |
2608 | #ifdef DOUBLE_CHECK | |
2609 | kfree(grinfo->bb_bitmap); | |
2610 | #endif | |
2611 | ext4_lock_group(sb, i); | |
2612 | ext4_mb_cleanup_pa(grinfo); | |
2613 | ext4_unlock_group(sb, i); | |
2614 | kfree(grinfo); | |
2615 | } | |
2616 | num_meta_group_infos = (sbi->s_groups_count + | |
2617 | EXT4_DESC_PER_BLOCK(sb) - 1) >> | |
2618 | EXT4_DESC_PER_BLOCK_BITS(sb); | |
2619 | for (i = 0; i < num_meta_group_infos; i++) | |
2620 | kfree(sbi->s_group_info[i]); | |
2621 | kfree(sbi->s_group_info); | |
2622 | } | |
2623 | kfree(sbi->s_mb_offsets); | |
2624 | kfree(sbi->s_mb_maxs); | |
2625 | if (sbi->s_buddy_cache) | |
2626 | iput(sbi->s_buddy_cache); | |
2627 | if (sbi->s_mb_stats) { | |
2628 | printk(KERN_INFO | |
2629 | "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n", | |
2630 | atomic_read(&sbi->s_bal_allocated), | |
2631 | atomic_read(&sbi->s_bal_reqs), | |
2632 | atomic_read(&sbi->s_bal_success)); | |
2633 | printk(KERN_INFO | |
2634 | "EXT4-fs: mballoc: %u extents scanned, %u goal hits, " | |
2635 | "%u 2^N hits, %u breaks, %u lost\n", | |
2636 | atomic_read(&sbi->s_bal_ex_scanned), | |
2637 | atomic_read(&sbi->s_bal_goals), | |
2638 | atomic_read(&sbi->s_bal_2orders), | |
2639 | atomic_read(&sbi->s_bal_breaks), | |
2640 | atomic_read(&sbi->s_mb_lost_chunks)); | |
2641 | printk(KERN_INFO | |
2642 | "EXT4-fs: mballoc: %lu generated and it took %Lu\n", | |
2643 | sbi->s_mb_buddies_generated++, | |
2644 | sbi->s_mb_generation_time); | |
2645 | printk(KERN_INFO | |
2646 | "EXT4-fs: mballoc: %u preallocated, %u discarded\n", | |
2647 | atomic_read(&sbi->s_mb_preallocated), | |
2648 | atomic_read(&sbi->s_mb_discarded)); | |
2649 | } | |
2650 | ||
2651 | kfree(sbi->s_locality_groups); | |
2652 | ||
2653 | ext4_mb_history_release(sb); | |
2654 | ext4_mb_destroy_per_dev_proc(sb); | |
2655 | ||
2656 | return 0; | |
2657 | } | |
2658 | ||
4ddfef7b ES |
2659 | static noinline_for_stack void |
2660 | ext4_mb_free_committed_blocks(struct super_block *sb) | |
c9de560d AT |
2661 | { |
2662 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2663 | int err; | |
2664 | int i; | |
2665 | int count = 0; | |
2666 | int count2 = 0; | |
2667 | struct ext4_free_metadata *md; | |
2668 | struct ext4_buddy e4b; | |
2669 | ||
2670 | if (list_empty(&sbi->s_committed_transaction)) | |
2671 | return; | |
2672 | ||
2673 | /* there is committed blocks to be freed yet */ | |
2674 | do { | |
2675 | /* get next array of blocks */ | |
2676 | md = NULL; | |
2677 | spin_lock(&sbi->s_md_lock); | |
2678 | if (!list_empty(&sbi->s_committed_transaction)) { | |
2679 | md = list_entry(sbi->s_committed_transaction.next, | |
2680 | struct ext4_free_metadata, list); | |
2681 | list_del(&md->list); | |
2682 | } | |
2683 | spin_unlock(&sbi->s_md_lock); | |
2684 | ||
2685 | if (md == NULL) | |
2686 | break; | |
2687 | ||
2688 | mb_debug("gonna free %u blocks in group %lu (0x%p):", | |
2689 | md->num, md->group, md); | |
2690 | ||
2691 | err = ext4_mb_load_buddy(sb, md->group, &e4b); | |
2692 | /* we expect to find existing buddy because it's pinned */ | |
2693 | BUG_ON(err != 0); | |
2694 | ||
2695 | /* there are blocks to put in buddy to make them really free */ | |
2696 | count += md->num; | |
2697 | count2++; | |
2698 | ext4_lock_group(sb, md->group); | |
2699 | for (i = 0; i < md->num; i++) { | |
2700 | mb_debug(" %u", md->blocks[i]); | |
7e5a8cdd | 2701 | mb_free_blocks(NULL, &e4b, md->blocks[i], 1); |
c9de560d AT |
2702 | } |
2703 | mb_debug("\n"); | |
2704 | ext4_unlock_group(sb, md->group); | |
2705 | ||
2706 | /* balance refcounts from ext4_mb_free_metadata() */ | |
2707 | page_cache_release(e4b.bd_buddy_page); | |
2708 | page_cache_release(e4b.bd_bitmap_page); | |
2709 | ||
2710 | kfree(md); | |
2711 | ext4_mb_release_desc(&e4b); | |
2712 | ||
2713 | } while (md); | |
2714 | ||
2715 | mb_debug("freed %u blocks in %u structures\n", count, count2); | |
2716 | } | |
2717 | ||
c9de560d AT |
2718 | #define EXT4_MB_STATS_NAME "stats" |
2719 | #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan" | |
2720 | #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan" | |
2721 | #define EXT4_MB_ORDER2_REQ "order2_req" | |
2722 | #define EXT4_MB_STREAM_REQ "stream_req" | |
2723 | #define EXT4_MB_GROUP_PREALLOC "group_prealloc" | |
2724 | ||
2725 | ||
2726 | ||
91d99827 AD |
2727 | #define MB_PROC_FOPS(name) \ |
2728 | static int ext4_mb_##name##_proc_show(struct seq_file *m, void *v) \ | |
c9de560d | 2729 | { \ |
91d99827 AD |
2730 | struct ext4_sb_info *sbi = m->private; \ |
2731 | \ | |
2732 | seq_printf(m, "%ld\n", sbi->s_mb_##name); \ | |
2733 | return 0; \ | |
2734 | } \ | |
2735 | \ | |
2736 | static int ext4_mb_##name##_proc_open(struct inode *inode, struct file *file)\ | |
2737 | { \ | |
2738 | return single_open(file, ext4_mb_##name##_proc_show, PDE(inode)->data);\ | |
2739 | } \ | |
2740 | \ | |
2741 | static ssize_t ext4_mb_##name##_proc_write(struct file *file, \ | |
2742 | const char __user *buf, size_t cnt, loff_t *ppos) \ | |
c9de560d | 2743 | { \ |
91d99827 | 2744 | struct ext4_sb_info *sbi = PDE(file->f_path.dentry->d_inode)->data;\ |
c9de560d AT |
2745 | char str[32]; \ |
2746 | long value; \ | |
2747 | if (cnt >= sizeof(str)) \ | |
2748 | return -EINVAL; \ | |
2749 | if (copy_from_user(str, buf, cnt)) \ | |
2750 | return -EFAULT; \ | |
2751 | value = simple_strtol(str, NULL, 0); \ | |
2752 | if (value <= 0) \ | |
2753 | return -ERANGE; \ | |
2754 | sbi->s_mb_##name = value; \ | |
2755 | return cnt; \ | |
91d99827 AD |
2756 | } \ |
2757 | \ | |
2758 | static const struct file_operations ext4_mb_##name##_proc_fops = { \ | |
2759 | .owner = THIS_MODULE, \ | |
2760 | .open = ext4_mb_##name##_proc_open, \ | |
2761 | .read = seq_read, \ | |
2762 | .llseek = seq_lseek, \ | |
2763 | .release = single_release, \ | |
2764 | .write = ext4_mb_##name##_proc_write, \ | |
2765 | }; | |
c9de560d | 2766 | |
91d99827 AD |
2767 | MB_PROC_FOPS(stats); |
2768 | MB_PROC_FOPS(max_to_scan); | |
2769 | MB_PROC_FOPS(min_to_scan); | |
2770 | MB_PROC_FOPS(order2_reqs); | |
2771 | MB_PROC_FOPS(stream_request); | |
2772 | MB_PROC_FOPS(group_prealloc); | |
c9de560d AT |
2773 | |
2774 | #define MB_PROC_HANDLER(name, var) \ | |
2775 | do { \ | |
91d99827 AD |
2776 | proc = proc_create_data(name, mode, sbi->s_mb_proc, \ |
2777 | &ext4_mb_##var##_proc_fops, sbi); \ | |
c9de560d AT |
2778 | if (proc == NULL) { \ |
2779 | printk(KERN_ERR "EXT4-fs: can't to create %s\n", name); \ | |
2780 | goto err_out; \ | |
2781 | } \ | |
c9de560d AT |
2782 | } while (0) |
2783 | ||
2784 | static int ext4_mb_init_per_dev_proc(struct super_block *sb) | |
2785 | { | |
2786 | mode_t mode = S_IFREG | S_IRUGO | S_IWUSR; | |
2787 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2788 | struct proc_dir_entry *proc; | |
899fc1a4 | 2789 | char devname[BDEVNAME_SIZE], *p; |
c9de560d | 2790 | |
cfbe7e4f SF |
2791 | if (proc_root_ext4 == NULL) { |
2792 | sbi->s_mb_proc = NULL; | |
2793 | return -EINVAL; | |
2794 | } | |
f36f21ec | 2795 | bdevname(sb->s_bdev, devname); |
899fc1a4 AD |
2796 | p = devname; |
2797 | while ((p = strchr(p, '/'))) | |
2798 | *p = '!'; | |
2799 | ||
c9de560d | 2800 | sbi->s_mb_proc = proc_mkdir(devname, proc_root_ext4); |
899fc1a4 AD |
2801 | if (!sbi->s_mb_proc) |
2802 | goto err_create_dir; | |
c9de560d AT |
2803 | |
2804 | MB_PROC_HANDLER(EXT4_MB_STATS_NAME, stats); | |
2805 | MB_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, max_to_scan); | |
2806 | MB_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, min_to_scan); | |
2807 | MB_PROC_HANDLER(EXT4_MB_ORDER2_REQ, order2_reqs); | |
2808 | MB_PROC_HANDLER(EXT4_MB_STREAM_REQ, stream_request); | |
2809 | MB_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, group_prealloc); | |
2810 | ||
2811 | return 0; | |
2812 | ||
2813 | err_out: | |
c9de560d AT |
2814 | remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc); |
2815 | remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc); | |
2816 | remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc); | |
2817 | remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc); | |
2818 | remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc); | |
2819 | remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc); | |
2820 | remove_proc_entry(devname, proc_root_ext4); | |
2821 | sbi->s_mb_proc = NULL; | |
899fc1a4 AD |
2822 | err_create_dir: |
2823 | printk(KERN_ERR "EXT4-fs: Unable to create %s\n", devname); | |
c9de560d AT |
2824 | |
2825 | return -ENOMEM; | |
2826 | } | |
2827 | ||
2828 | static int ext4_mb_destroy_per_dev_proc(struct super_block *sb) | |
2829 | { | |
2830 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
899fc1a4 | 2831 | char devname[BDEVNAME_SIZE], *p; |
c9de560d AT |
2832 | |
2833 | if (sbi->s_mb_proc == NULL) | |
2834 | return -EINVAL; | |
2835 | ||
f36f21ec | 2836 | bdevname(sb->s_bdev, devname); |
899fc1a4 AD |
2837 | p = devname; |
2838 | while ((p = strchr(p, '/'))) | |
2839 | *p = '!'; | |
c9de560d AT |
2840 | remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc); |
2841 | remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc); | |
2842 | remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc); | |
2843 | remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc); | |
2844 | remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc); | |
2845 | remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc); | |
2846 | remove_proc_entry(devname, proc_root_ext4); | |
2847 | ||
2848 | return 0; | |
2849 | } | |
2850 | ||
2851 | int __init init_ext4_mballoc(void) | |
2852 | { | |
2853 | ext4_pspace_cachep = | |
2854 | kmem_cache_create("ext4_prealloc_space", | |
2855 | sizeof(struct ext4_prealloc_space), | |
2856 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | |
2857 | if (ext4_pspace_cachep == NULL) | |
2858 | return -ENOMEM; | |
2859 | ||
256bdb49 ES |
2860 | ext4_ac_cachep = |
2861 | kmem_cache_create("ext4_alloc_context", | |
2862 | sizeof(struct ext4_allocation_context), | |
2863 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | |
2864 | if (ext4_ac_cachep == NULL) { | |
2865 | kmem_cache_destroy(ext4_pspace_cachep); | |
2866 | return -ENOMEM; | |
2867 | } | |
c9de560d | 2868 | #ifdef CONFIG_PROC_FS |
36a5aeb8 | 2869 | proc_root_ext4 = proc_mkdir("fs/ext4", NULL); |
c9de560d | 2870 | if (proc_root_ext4 == NULL) |
36a5aeb8 | 2871 | printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n"); |
c9de560d | 2872 | #endif |
c9de560d AT |
2873 | return 0; |
2874 | } | |
2875 | ||
2876 | void exit_ext4_mballoc(void) | |
2877 | { | |
2878 | /* XXX: synchronize_rcu(); */ | |
2879 | kmem_cache_destroy(ext4_pspace_cachep); | |
256bdb49 | 2880 | kmem_cache_destroy(ext4_ac_cachep); |
c9de560d | 2881 | #ifdef CONFIG_PROC_FS |
36a5aeb8 | 2882 | remove_proc_entry("fs/ext4", NULL); |
c9de560d AT |
2883 | #endif |
2884 | } | |
2885 | ||
2886 | ||
2887 | /* | |
2888 | * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps | |
2889 | * Returns 0 if success or error code | |
2890 | */ | |
4ddfef7b ES |
2891 | static noinline_for_stack int |
2892 | ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |
6bc6e63f | 2893 | handle_t *handle, unsigned long reserv_blks) |
c9de560d AT |
2894 | { |
2895 | struct buffer_head *bitmap_bh = NULL; | |
2896 | struct ext4_super_block *es; | |
2897 | struct ext4_group_desc *gdp; | |
2898 | struct buffer_head *gdp_bh; | |
2899 | struct ext4_sb_info *sbi; | |
2900 | struct super_block *sb; | |
2901 | ext4_fsblk_t block; | |
519deca0 | 2902 | int err, len; |
c9de560d AT |
2903 | |
2904 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
2905 | BUG_ON(ac->ac_b_ex.fe_len <= 0); | |
2906 | ||
2907 | sb = ac->ac_sb; | |
2908 | sbi = EXT4_SB(sb); | |
2909 | es = sbi->s_es; | |
2910 | ||
c9de560d AT |
2911 | |
2912 | err = -EIO; | |
574ca174 | 2913 | bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); |
c9de560d AT |
2914 | if (!bitmap_bh) |
2915 | goto out_err; | |
2916 | ||
2917 | err = ext4_journal_get_write_access(handle, bitmap_bh); | |
2918 | if (err) | |
2919 | goto out_err; | |
2920 | ||
2921 | err = -EIO; | |
2922 | gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); | |
2923 | if (!gdp) | |
2924 | goto out_err; | |
2925 | ||
03cddb80 AK |
2926 | ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group, |
2927 | gdp->bg_free_blocks_count); | |
2928 | ||
c9de560d AT |
2929 | err = ext4_journal_get_write_access(handle, gdp_bh); |
2930 | if (err) | |
2931 | goto out_err; | |
2932 | ||
2933 | block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb) | |
2934 | + ac->ac_b_ex.fe_start | |
2935 | + le32_to_cpu(es->s_first_data_block); | |
2936 | ||
519deca0 AK |
2937 | len = ac->ac_b_ex.fe_len; |
2938 | if (in_range(ext4_block_bitmap(sb, gdp), block, len) || | |
2939 | in_range(ext4_inode_bitmap(sb, gdp), block, len) || | |
2940 | in_range(block, ext4_inode_table(sb, gdp), | |
2941 | EXT4_SB(sb)->s_itb_per_group) || | |
2942 | in_range(block + len - 1, ext4_inode_table(sb, gdp), | |
2943 | EXT4_SB(sb)->s_itb_per_group)) { | |
46e665e9 | 2944 | ext4_error(sb, __func__, |
c9de560d AT |
2945 | "Allocating block in system zone - block = %llu", |
2946 | block); | |
519deca0 AK |
2947 | /* File system mounted not to panic on error |
2948 | * Fix the bitmap and repeat the block allocation | |
2949 | * We leak some of the blocks here. | |
2950 | */ | |
2951 | mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), | |
2952 | bitmap_bh->b_data, ac->ac_b_ex.fe_start, | |
2953 | ac->ac_b_ex.fe_len); | |
2954 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); | |
2955 | if (!err) | |
2956 | err = -EAGAIN; | |
2957 | goto out_err; | |
c9de560d AT |
2958 | } |
2959 | #ifdef AGGRESSIVE_CHECK | |
2960 | { | |
2961 | int i; | |
2962 | for (i = 0; i < ac->ac_b_ex.fe_len; i++) { | |
2963 | BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, | |
2964 | bitmap_bh->b_data)); | |
2965 | } | |
2966 | } | |
2967 | #endif | |
2968 | mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data, | |
2969 | ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); | |
2970 | ||
2971 | spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | |
2972 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | |
2973 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | |
2974 | gdp->bg_free_blocks_count = | |
2975 | cpu_to_le16(ext4_free_blocks_after_init(sb, | |
2976 | ac->ac_b_ex.fe_group, | |
2977 | gdp)); | |
2978 | } | |
e8546d06 | 2979 | le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len); |
c9de560d AT |
2980 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); |
2981 | spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | |
6bc6e63f | 2982 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); |
d2a17637 | 2983 | /* |
6bc6e63f | 2984 | * Now reduce the dirty block count also. Should not go negative |
d2a17637 | 2985 | */ |
6bc6e63f AK |
2986 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
2987 | /* release all the reserved blocks if non delalloc */ | |
2988 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); | |
2989 | else | |
2990 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, | |
2991 | ac->ac_b_ex.fe_len); | |
c9de560d | 2992 | |
772cb7c8 JS |
2993 | if (sbi->s_log_groups_per_flex) { |
2994 | ext4_group_t flex_group = ext4_flex_group(sbi, | |
2995 | ac->ac_b_ex.fe_group); | |
2996 | spin_lock(sb_bgl_lock(sbi, flex_group)); | |
2997 | sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len; | |
2998 | spin_unlock(sb_bgl_lock(sbi, flex_group)); | |
2999 | } | |
3000 | ||
c9de560d AT |
3001 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); |
3002 | if (err) | |
3003 | goto out_err; | |
3004 | err = ext4_journal_dirty_metadata(handle, gdp_bh); | |
3005 | ||
3006 | out_err: | |
3007 | sb->s_dirt = 1; | |
42a10add | 3008 | brelse(bitmap_bh); |
c9de560d AT |
3009 | return err; |
3010 | } | |
3011 | ||
3012 | /* | |
3013 | * here we normalize request for locality group | |
3014 | * Group request are normalized to s_strip size if we set the same via mount | |
3015 | * option. If not we set it to s_mb_group_prealloc which can be configured via | |
3016 | * /proc/fs/ext4/<partition>/group_prealloc | |
3017 | * | |
3018 | * XXX: should we try to preallocate more than the group has now? | |
3019 | */ | |
3020 | static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) | |
3021 | { | |
3022 | struct super_block *sb = ac->ac_sb; | |
3023 | struct ext4_locality_group *lg = ac->ac_lg; | |
3024 | ||
3025 | BUG_ON(lg == NULL); | |
3026 | if (EXT4_SB(sb)->s_stripe) | |
3027 | ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe; | |
3028 | else | |
3029 | ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; | |
60bd63d1 | 3030 | mb_debug("#%u: goal %u blocks for locality group\n", |
c9de560d AT |
3031 | current->pid, ac->ac_g_ex.fe_len); |
3032 | } | |
3033 | ||
3034 | /* | |
3035 | * Normalization means making request better in terms of | |
3036 | * size and alignment | |
3037 | */ | |
4ddfef7b ES |
3038 | static noinline_for_stack void |
3039 | ext4_mb_normalize_request(struct ext4_allocation_context *ac, | |
c9de560d AT |
3040 | struct ext4_allocation_request *ar) |
3041 | { | |
3042 | int bsbits, max; | |
3043 | ext4_lblk_t end; | |
c9de560d AT |
3044 | loff_t size, orig_size, start_off; |
3045 | ext4_lblk_t start, orig_start; | |
3046 | struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); | |
9a0762c5 | 3047 | struct ext4_prealloc_space *pa; |
c9de560d AT |
3048 | |
3049 | /* do normalize only data requests, metadata requests | |
3050 | do not need preallocation */ | |
3051 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
3052 | return; | |
3053 | ||
3054 | /* sometime caller may want exact blocks */ | |
3055 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) | |
3056 | return; | |
3057 | ||
3058 | /* caller may indicate that preallocation isn't | |
3059 | * required (it's a tail, for example) */ | |
3060 | if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) | |
3061 | return; | |
3062 | ||
3063 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { | |
3064 | ext4_mb_normalize_group_request(ac); | |
3065 | return ; | |
3066 | } | |
3067 | ||
3068 | bsbits = ac->ac_sb->s_blocksize_bits; | |
3069 | ||
3070 | /* first, let's learn actual file size | |
3071 | * given current request is allocated */ | |
3072 | size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; | |
3073 | size = size << bsbits; | |
3074 | if (size < i_size_read(ac->ac_inode)) | |
3075 | size = i_size_read(ac->ac_inode); | |
3076 | ||
1930479c VC |
3077 | /* max size of free chunks */ |
3078 | max = 2 << bsbits; | |
c9de560d | 3079 | |
1930479c VC |
3080 | #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ |
3081 | (req <= (size) || max <= (chunk_size)) | |
c9de560d AT |
3082 | |
3083 | /* first, try to predict filesize */ | |
3084 | /* XXX: should this table be tunable? */ | |
3085 | start_off = 0; | |
3086 | if (size <= 16 * 1024) { | |
3087 | size = 16 * 1024; | |
3088 | } else if (size <= 32 * 1024) { | |
3089 | size = 32 * 1024; | |
3090 | } else if (size <= 64 * 1024) { | |
3091 | size = 64 * 1024; | |
3092 | } else if (size <= 128 * 1024) { | |
3093 | size = 128 * 1024; | |
3094 | } else if (size <= 256 * 1024) { | |
3095 | size = 256 * 1024; | |
3096 | } else if (size <= 512 * 1024) { | |
3097 | size = 512 * 1024; | |
3098 | } else if (size <= 1024 * 1024) { | |
3099 | size = 1024 * 1024; | |
1930479c | 3100 | } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { |
c9de560d | 3101 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
1930479c VC |
3102 | (21 - bsbits)) << 21; |
3103 | size = 2 * 1024 * 1024; | |
3104 | } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { | |
c9de560d AT |
3105 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
3106 | (22 - bsbits)) << 22; | |
3107 | size = 4 * 1024 * 1024; | |
3108 | } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, | |
1930479c | 3109 | (8<<20)>>bsbits, max, 8 * 1024)) { |
c9de560d AT |
3110 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
3111 | (23 - bsbits)) << 23; | |
3112 | size = 8 * 1024 * 1024; | |
3113 | } else { | |
3114 | start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; | |
3115 | size = ac->ac_o_ex.fe_len << bsbits; | |
3116 | } | |
3117 | orig_size = size = size >> bsbits; | |
3118 | orig_start = start = start_off >> bsbits; | |
3119 | ||
3120 | /* don't cover already allocated blocks in selected range */ | |
3121 | if (ar->pleft && start <= ar->lleft) { | |
3122 | size -= ar->lleft + 1 - start; | |
3123 | start = ar->lleft + 1; | |
3124 | } | |
3125 | if (ar->pright && start + size - 1 >= ar->lright) | |
3126 | size -= start + size - ar->lright; | |
3127 | ||
3128 | end = start + size; | |
3129 | ||
3130 | /* check we don't cross already preallocated blocks */ | |
3131 | rcu_read_lock(); | |
9a0762c5 | 3132 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
c9de560d AT |
3133 | unsigned long pa_end; |
3134 | ||
c9de560d AT |
3135 | if (pa->pa_deleted) |
3136 | continue; | |
3137 | spin_lock(&pa->pa_lock); | |
3138 | if (pa->pa_deleted) { | |
3139 | spin_unlock(&pa->pa_lock); | |
3140 | continue; | |
3141 | } | |
3142 | ||
3143 | pa_end = pa->pa_lstart + pa->pa_len; | |
3144 | ||
3145 | /* PA must not overlap original request */ | |
3146 | BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || | |
3147 | ac->ac_o_ex.fe_logical < pa->pa_lstart)); | |
3148 | ||
3149 | /* skip PA normalized request doesn't overlap with */ | |
3150 | if (pa->pa_lstart >= end) { | |
3151 | spin_unlock(&pa->pa_lock); | |
3152 | continue; | |
3153 | } | |
3154 | if (pa_end <= start) { | |
3155 | spin_unlock(&pa->pa_lock); | |
3156 | continue; | |
3157 | } | |
3158 | BUG_ON(pa->pa_lstart <= start && pa_end >= end); | |
3159 | ||
3160 | if (pa_end <= ac->ac_o_ex.fe_logical) { | |
3161 | BUG_ON(pa_end < start); | |
3162 | start = pa_end; | |
3163 | } | |
3164 | ||
3165 | if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { | |
3166 | BUG_ON(pa->pa_lstart > end); | |
3167 | end = pa->pa_lstart; | |
3168 | } | |
3169 | spin_unlock(&pa->pa_lock); | |
3170 | } | |
3171 | rcu_read_unlock(); | |
3172 | size = end - start; | |
3173 | ||
3174 | /* XXX: extra loop to check we really don't overlap preallocations */ | |
3175 | rcu_read_lock(); | |
9a0762c5 | 3176 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
c9de560d | 3177 | unsigned long pa_end; |
c9de560d AT |
3178 | spin_lock(&pa->pa_lock); |
3179 | if (pa->pa_deleted == 0) { | |
3180 | pa_end = pa->pa_lstart + pa->pa_len; | |
3181 | BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); | |
3182 | } | |
3183 | spin_unlock(&pa->pa_lock); | |
3184 | } | |
3185 | rcu_read_unlock(); | |
3186 | ||
3187 | if (start + size <= ac->ac_o_ex.fe_logical && | |
3188 | start > ac->ac_o_ex.fe_logical) { | |
3189 | printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n", | |
3190 | (unsigned long) start, (unsigned long) size, | |
3191 | (unsigned long) ac->ac_o_ex.fe_logical); | |
3192 | } | |
3193 | BUG_ON(start + size <= ac->ac_o_ex.fe_logical && | |
3194 | start > ac->ac_o_ex.fe_logical); | |
3195 | BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | |
3196 | ||
3197 | /* now prepare goal request */ | |
3198 | ||
3199 | /* XXX: is it better to align blocks WRT to logical | |
3200 | * placement or satisfy big request as is */ | |
3201 | ac->ac_g_ex.fe_logical = start; | |
3202 | ac->ac_g_ex.fe_len = size; | |
3203 | ||
3204 | /* define goal start in order to merge */ | |
3205 | if (ar->pright && (ar->lright == (start + size))) { | |
3206 | /* merge to the right */ | |
3207 | ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, | |
3208 | &ac->ac_f_ex.fe_group, | |
3209 | &ac->ac_f_ex.fe_start); | |
3210 | ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; | |
3211 | } | |
3212 | if (ar->pleft && (ar->lleft + 1 == start)) { | |
3213 | /* merge to the left */ | |
3214 | ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, | |
3215 | &ac->ac_f_ex.fe_group, | |
3216 | &ac->ac_f_ex.fe_start); | |
3217 | ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; | |
3218 | } | |
3219 | ||
3220 | mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size, | |
3221 | (unsigned) orig_size, (unsigned) start); | |
3222 | } | |
3223 | ||
3224 | static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) | |
3225 | { | |
3226 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
3227 | ||
3228 | if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { | |
3229 | atomic_inc(&sbi->s_bal_reqs); | |
3230 | atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); | |
3231 | if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len) | |
3232 | atomic_inc(&sbi->s_bal_success); | |
3233 | atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); | |
3234 | if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && | |
3235 | ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) | |
3236 | atomic_inc(&sbi->s_bal_goals); | |
3237 | if (ac->ac_found > sbi->s_mb_max_to_scan) | |
3238 | atomic_inc(&sbi->s_bal_breaks); | |
3239 | } | |
3240 | ||
3241 | ext4_mb_store_history(ac); | |
3242 | } | |
3243 | ||
3244 | /* | |
3245 | * use blocks preallocated to inode | |
3246 | */ | |
3247 | static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, | |
3248 | struct ext4_prealloc_space *pa) | |
3249 | { | |
3250 | ext4_fsblk_t start; | |
3251 | ext4_fsblk_t end; | |
3252 | int len; | |
3253 | ||
3254 | /* found preallocated blocks, use them */ | |
3255 | start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); | |
3256 | end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len); | |
3257 | len = end - start; | |
3258 | ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, | |
3259 | &ac->ac_b_ex.fe_start); | |
3260 | ac->ac_b_ex.fe_len = len; | |
3261 | ac->ac_status = AC_STATUS_FOUND; | |
3262 | ac->ac_pa = pa; | |
3263 | ||
3264 | BUG_ON(start < pa->pa_pstart); | |
3265 | BUG_ON(start + len > pa->pa_pstart + pa->pa_len); | |
3266 | BUG_ON(pa->pa_free < len); | |
3267 | pa->pa_free -= len; | |
3268 | ||
60bd63d1 | 3269 | mb_debug("use %llu/%u from inode pa %p\n", start, len, pa); |
c9de560d AT |
3270 | } |
3271 | ||
3272 | /* | |
3273 | * use blocks preallocated to locality group | |
3274 | */ | |
3275 | static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, | |
3276 | struct ext4_prealloc_space *pa) | |
3277 | { | |
03cddb80 | 3278 | unsigned int len = ac->ac_o_ex.fe_len; |
6be2ded1 | 3279 | |
c9de560d AT |
3280 | ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, |
3281 | &ac->ac_b_ex.fe_group, | |
3282 | &ac->ac_b_ex.fe_start); | |
3283 | ac->ac_b_ex.fe_len = len; | |
3284 | ac->ac_status = AC_STATUS_FOUND; | |
3285 | ac->ac_pa = pa; | |
3286 | ||
3287 | /* we don't correct pa_pstart or pa_plen here to avoid | |
26346ff6 | 3288 | * possible race when the group is being loaded concurrently |
c9de560d | 3289 | * instead we correct pa later, after blocks are marked |
26346ff6 AK |
3290 | * in on-disk bitmap -- see ext4_mb_release_context() |
3291 | * Other CPUs are prevented from allocating from this pa by lg_mutex | |
c9de560d AT |
3292 | */ |
3293 | mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); | |
3294 | } | |
3295 | ||
5e745b04 AK |
3296 | /* |
3297 | * Return the prealloc space that have minimal distance | |
3298 | * from the goal block. @cpa is the prealloc | |
3299 | * space that is having currently known minimal distance | |
3300 | * from the goal block. | |
3301 | */ | |
3302 | static struct ext4_prealloc_space * | |
3303 | ext4_mb_check_group_pa(ext4_fsblk_t goal_block, | |
3304 | struct ext4_prealloc_space *pa, | |
3305 | struct ext4_prealloc_space *cpa) | |
3306 | { | |
3307 | ext4_fsblk_t cur_distance, new_distance; | |
3308 | ||
3309 | if (cpa == NULL) { | |
3310 | atomic_inc(&pa->pa_count); | |
3311 | return pa; | |
3312 | } | |
3313 | cur_distance = abs(goal_block - cpa->pa_pstart); | |
3314 | new_distance = abs(goal_block - pa->pa_pstart); | |
3315 | ||
3316 | if (cur_distance < new_distance) | |
3317 | return cpa; | |
3318 | ||
3319 | /* drop the previous reference */ | |
3320 | atomic_dec(&cpa->pa_count); | |
3321 | atomic_inc(&pa->pa_count); | |
3322 | return pa; | |
3323 | } | |
3324 | ||
c9de560d AT |
3325 | /* |
3326 | * search goal blocks in preallocated space | |
3327 | */ | |
4ddfef7b ES |
3328 | static noinline_for_stack int |
3329 | ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |
c9de560d | 3330 | { |
6be2ded1 | 3331 | int order, i; |
c9de560d AT |
3332 | struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); |
3333 | struct ext4_locality_group *lg; | |
5e745b04 AK |
3334 | struct ext4_prealloc_space *pa, *cpa = NULL; |
3335 | ext4_fsblk_t goal_block; | |
c9de560d AT |
3336 | |
3337 | /* only data can be preallocated */ | |
3338 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
3339 | return 0; | |
3340 | ||
3341 | /* first, try per-file preallocation */ | |
3342 | rcu_read_lock(); | |
9a0762c5 | 3343 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
c9de560d AT |
3344 | |
3345 | /* all fields in this condition don't change, | |
3346 | * so we can skip locking for them */ | |
3347 | if (ac->ac_o_ex.fe_logical < pa->pa_lstart || | |
3348 | ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len) | |
3349 | continue; | |
3350 | ||
3351 | /* found preallocated blocks, use them */ | |
3352 | spin_lock(&pa->pa_lock); | |
3353 | if (pa->pa_deleted == 0 && pa->pa_free) { | |
3354 | atomic_inc(&pa->pa_count); | |
3355 | ext4_mb_use_inode_pa(ac, pa); | |
3356 | spin_unlock(&pa->pa_lock); | |
3357 | ac->ac_criteria = 10; | |
3358 | rcu_read_unlock(); | |
3359 | return 1; | |
3360 | } | |
3361 | spin_unlock(&pa->pa_lock); | |
3362 | } | |
3363 | rcu_read_unlock(); | |
3364 | ||
3365 | /* can we use group allocation? */ | |
3366 | if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) | |
3367 | return 0; | |
3368 | ||
3369 | /* inode may have no locality group for some reason */ | |
3370 | lg = ac->ac_lg; | |
3371 | if (lg == NULL) | |
3372 | return 0; | |
6be2ded1 AK |
3373 | order = fls(ac->ac_o_ex.fe_len) - 1; |
3374 | if (order > PREALLOC_TB_SIZE - 1) | |
3375 | /* The max size of hash table is PREALLOC_TB_SIZE */ | |
3376 | order = PREALLOC_TB_SIZE - 1; | |
3377 | ||
5e745b04 AK |
3378 | goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + |
3379 | ac->ac_g_ex.fe_start + | |
3380 | le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block); | |
3381 | /* | |
3382 | * search for the prealloc space that is having | |
3383 | * minimal distance from the goal block. | |
3384 | */ | |
6be2ded1 AK |
3385 | for (i = order; i < PREALLOC_TB_SIZE; i++) { |
3386 | rcu_read_lock(); | |
3387 | list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], | |
3388 | pa_inode_list) { | |
3389 | spin_lock(&pa->pa_lock); | |
3390 | if (pa->pa_deleted == 0 && | |
3391 | pa->pa_free >= ac->ac_o_ex.fe_len) { | |
5e745b04 AK |
3392 | |
3393 | cpa = ext4_mb_check_group_pa(goal_block, | |
3394 | pa, cpa); | |
6be2ded1 | 3395 | } |
c9de560d | 3396 | spin_unlock(&pa->pa_lock); |
c9de560d | 3397 | } |
6be2ded1 | 3398 | rcu_read_unlock(); |
c9de560d | 3399 | } |
5e745b04 AK |
3400 | if (cpa) { |
3401 | ext4_mb_use_group_pa(ac, cpa); | |
3402 | ac->ac_criteria = 20; | |
3403 | return 1; | |
3404 | } | |
c9de560d AT |
3405 | return 0; |
3406 | } | |
3407 | ||
3408 | /* | |
3409 | * the function goes through all preallocation in this group and marks them | |
3410 | * used in in-core bitmap. buddy must be generated from this bitmap | |
3411 | * Need to be called with ext4 group lock (ext4_lock_group) | |
3412 | */ | |
3413 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | |
3414 | ext4_group_t group) | |
3415 | { | |
3416 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
3417 | struct ext4_prealloc_space *pa; | |
3418 | struct list_head *cur; | |
3419 | ext4_group_t groupnr; | |
3420 | ext4_grpblk_t start; | |
3421 | int preallocated = 0; | |
3422 | int count = 0; | |
3423 | int len; | |
3424 | ||
3425 | /* all form of preallocation discards first load group, | |
3426 | * so the only competing code is preallocation use. | |
3427 | * we don't need any locking here | |
3428 | * notice we do NOT ignore preallocations with pa_deleted | |
3429 | * otherwise we could leave used blocks available for | |
3430 | * allocation in buddy when concurrent ext4_mb_put_pa() | |
3431 | * is dropping preallocation | |
3432 | */ | |
3433 | list_for_each(cur, &grp->bb_prealloc_list) { | |
3434 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); | |
3435 | spin_lock(&pa->pa_lock); | |
3436 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, | |
3437 | &groupnr, &start); | |
3438 | len = pa->pa_len; | |
3439 | spin_unlock(&pa->pa_lock); | |
3440 | if (unlikely(len == 0)) | |
3441 | continue; | |
3442 | BUG_ON(groupnr != group); | |
3443 | mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), | |
3444 | bitmap, start, len); | |
3445 | preallocated += len; | |
3446 | count++; | |
3447 | } | |
3448 | mb_debug("prellocated %u for group %lu\n", preallocated, group); | |
3449 | } | |
3450 | ||
3451 | static void ext4_mb_pa_callback(struct rcu_head *head) | |
3452 | { | |
3453 | struct ext4_prealloc_space *pa; | |
3454 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); | |
3455 | kmem_cache_free(ext4_pspace_cachep, pa); | |
3456 | } | |
3457 | ||
3458 | /* | |
3459 | * drops a reference to preallocated space descriptor | |
3460 | * if this was the last reference and the space is consumed | |
3461 | */ | |
3462 | static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |
3463 | struct super_block *sb, struct ext4_prealloc_space *pa) | |
3464 | { | |
3465 | unsigned long grp; | |
3466 | ||
3467 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | |
3468 | return; | |
3469 | ||
3470 | /* in this short window concurrent discard can set pa_deleted */ | |
3471 | spin_lock(&pa->pa_lock); | |
3472 | if (pa->pa_deleted == 1) { | |
3473 | spin_unlock(&pa->pa_lock); | |
3474 | return; | |
3475 | } | |
3476 | ||
3477 | pa->pa_deleted = 1; | |
3478 | spin_unlock(&pa->pa_lock); | |
3479 | ||
3480 | /* -1 is to protect from crossing allocation group */ | |
3481 | ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL); | |
3482 | ||
3483 | /* | |
3484 | * possible race: | |
3485 | * | |
3486 | * P1 (buddy init) P2 (regular allocation) | |
3487 | * find block B in PA | |
3488 | * copy on-disk bitmap to buddy | |
3489 | * mark B in on-disk bitmap | |
3490 | * drop PA from group | |
3491 | * mark all PAs in buddy | |
3492 | * | |
3493 | * thus, P1 initializes buddy with B available. to prevent this | |
3494 | * we make "copy" and "mark all PAs" atomic and serialize "drop PA" | |
3495 | * against that pair | |
3496 | */ | |
3497 | ext4_lock_group(sb, grp); | |
3498 | list_del(&pa->pa_group_list); | |
3499 | ext4_unlock_group(sb, grp); | |
3500 | ||
3501 | spin_lock(pa->pa_obj_lock); | |
3502 | list_del_rcu(&pa->pa_inode_list); | |
3503 | spin_unlock(pa->pa_obj_lock); | |
3504 | ||
3505 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3506 | } | |
3507 | ||
3508 | /* | |
3509 | * creates new preallocated space for given inode | |
3510 | */ | |
4ddfef7b ES |
3511 | static noinline_for_stack int |
3512 | ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) | |
c9de560d AT |
3513 | { |
3514 | struct super_block *sb = ac->ac_sb; | |
3515 | struct ext4_prealloc_space *pa; | |
3516 | struct ext4_group_info *grp; | |
3517 | struct ext4_inode_info *ei; | |
3518 | ||
3519 | /* preallocate only when found space is larger then requested */ | |
3520 | BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); | |
3521 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
3522 | BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); | |
3523 | ||
3524 | pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); | |
3525 | if (pa == NULL) | |
3526 | return -ENOMEM; | |
3527 | ||
3528 | if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { | |
3529 | int winl; | |
3530 | int wins; | |
3531 | int win; | |
3532 | int offs; | |
3533 | ||
3534 | /* we can't allocate as much as normalizer wants. | |
3535 | * so, found space must get proper lstart | |
3536 | * to cover original request */ | |
3537 | BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); | |
3538 | BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); | |
3539 | ||
3540 | /* we're limited by original request in that | |
3541 | * logical block must be covered any way | |
3542 | * winl is window we can move our chunk within */ | |
3543 | winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; | |
3544 | ||
3545 | /* also, we should cover whole original request */ | |
3546 | wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len; | |
3547 | ||
3548 | /* the smallest one defines real window */ | |
3549 | win = min(winl, wins); | |
3550 | ||
3551 | offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len; | |
3552 | if (offs && offs < win) | |
3553 | win = offs; | |
3554 | ||
3555 | ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win; | |
3556 | BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); | |
3557 | BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); | |
3558 | } | |
3559 | ||
3560 | /* preallocation can change ac_b_ex, thus we store actually | |
3561 | * allocated blocks for history */ | |
3562 | ac->ac_f_ex = ac->ac_b_ex; | |
3563 | ||
3564 | pa->pa_lstart = ac->ac_b_ex.fe_logical; | |
3565 | pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); | |
3566 | pa->pa_len = ac->ac_b_ex.fe_len; | |
3567 | pa->pa_free = pa->pa_len; | |
3568 | atomic_set(&pa->pa_count, 1); | |
3569 | spin_lock_init(&pa->pa_lock); | |
3570 | pa->pa_deleted = 0; | |
3571 | pa->pa_linear = 0; | |
3572 | ||
3573 | mb_debug("new inode pa %p: %llu/%u for %u\n", pa, | |
3574 | pa->pa_pstart, pa->pa_len, pa->pa_lstart); | |
3575 | ||
3576 | ext4_mb_use_inode_pa(ac, pa); | |
3577 | atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); | |
3578 | ||
3579 | ei = EXT4_I(ac->ac_inode); | |
3580 | grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); | |
3581 | ||
3582 | pa->pa_obj_lock = &ei->i_prealloc_lock; | |
3583 | pa->pa_inode = ac->ac_inode; | |
3584 | ||
3585 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | |
3586 | list_add(&pa->pa_group_list, &grp->bb_prealloc_list); | |
3587 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | |
3588 | ||
3589 | spin_lock(pa->pa_obj_lock); | |
3590 | list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); | |
3591 | spin_unlock(pa->pa_obj_lock); | |
3592 | ||
3593 | return 0; | |
3594 | } | |
3595 | ||
3596 | /* | |
3597 | * creates new preallocated space for locality group inodes belongs to | |
3598 | */ | |
4ddfef7b ES |
3599 | static noinline_for_stack int |
3600 | ext4_mb_new_group_pa(struct ext4_allocation_context *ac) | |
c9de560d AT |
3601 | { |
3602 | struct super_block *sb = ac->ac_sb; | |
3603 | struct ext4_locality_group *lg; | |
3604 | struct ext4_prealloc_space *pa; | |
3605 | struct ext4_group_info *grp; | |
3606 | ||
3607 | /* preallocate only when found space is larger then requested */ | |
3608 | BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); | |
3609 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
3610 | BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); | |
3611 | ||
3612 | BUG_ON(ext4_pspace_cachep == NULL); | |
3613 | pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); | |
3614 | if (pa == NULL) | |
3615 | return -ENOMEM; | |
3616 | ||
3617 | /* preallocation can change ac_b_ex, thus we store actually | |
3618 | * allocated blocks for history */ | |
3619 | ac->ac_f_ex = ac->ac_b_ex; | |
3620 | ||
3621 | pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); | |
3622 | pa->pa_lstart = pa->pa_pstart; | |
3623 | pa->pa_len = ac->ac_b_ex.fe_len; | |
3624 | pa->pa_free = pa->pa_len; | |
3625 | atomic_set(&pa->pa_count, 1); | |
3626 | spin_lock_init(&pa->pa_lock); | |
6be2ded1 | 3627 | INIT_LIST_HEAD(&pa->pa_inode_list); |
c9de560d AT |
3628 | pa->pa_deleted = 0; |
3629 | pa->pa_linear = 1; | |
3630 | ||
3631 | mb_debug("new group pa %p: %llu/%u for %u\n", pa, | |
3632 | pa->pa_pstart, pa->pa_len, pa->pa_lstart); | |
3633 | ||
3634 | ext4_mb_use_group_pa(ac, pa); | |
3635 | atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); | |
3636 | ||
3637 | grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); | |
3638 | lg = ac->ac_lg; | |
3639 | BUG_ON(lg == NULL); | |
3640 | ||
3641 | pa->pa_obj_lock = &lg->lg_prealloc_lock; | |
3642 | pa->pa_inode = NULL; | |
3643 | ||
3644 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | |
3645 | list_add(&pa->pa_group_list, &grp->bb_prealloc_list); | |
3646 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | |
3647 | ||
6be2ded1 AK |
3648 | /* |
3649 | * We will later add the new pa to the right bucket | |
3650 | * after updating the pa_free in ext4_mb_release_context | |
3651 | */ | |
c9de560d AT |
3652 | return 0; |
3653 | } | |
3654 | ||
3655 | static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) | |
3656 | { | |
3657 | int err; | |
3658 | ||
3659 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) | |
3660 | err = ext4_mb_new_group_pa(ac); | |
3661 | else | |
3662 | err = ext4_mb_new_inode_pa(ac); | |
3663 | return err; | |
3664 | } | |
3665 | ||
3666 | /* | |
3667 | * finds all unused blocks in on-disk bitmap, frees them in | |
3668 | * in-core bitmap and buddy. | |
3669 | * @pa must be unlinked from inode and group lists, so that | |
3670 | * nobody else can find/use it. | |
3671 | * the caller MUST hold group/inode locks. | |
3672 | * TODO: optimize the case when there are no in-core structures yet | |
3673 | */ | |
4ddfef7b ES |
3674 | static noinline_for_stack int |
3675 | ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, | |
c83617db AK |
3676 | struct ext4_prealloc_space *pa, |
3677 | struct ext4_allocation_context *ac) | |
c9de560d | 3678 | { |
c9de560d AT |
3679 | struct super_block *sb = e4b->bd_sb; |
3680 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
3681 | unsigned long end; | |
3682 | unsigned long next; | |
3683 | ext4_group_t group; | |
3684 | ext4_grpblk_t bit; | |
3685 | sector_t start; | |
3686 | int err = 0; | |
3687 | int free = 0; | |
3688 | ||
3689 | BUG_ON(pa->pa_deleted == 0); | |
3690 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); | |
3691 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); | |
3692 | end = bit + pa->pa_len; | |
3693 | ||
256bdb49 ES |
3694 | if (ac) { |
3695 | ac->ac_sb = sb; | |
3696 | ac->ac_inode = pa->pa_inode; | |
3697 | ac->ac_op = EXT4_MB_HISTORY_DISCARD; | |
3698 | } | |
c9de560d AT |
3699 | |
3700 | while (bit < end) { | |
ffad0a44 | 3701 | bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); |
c9de560d AT |
3702 | if (bit >= end) |
3703 | break; | |
ffad0a44 | 3704 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); |
c9de560d AT |
3705 | start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + |
3706 | le32_to_cpu(sbi->s_es->s_first_data_block); | |
3707 | mb_debug(" free preallocated %u/%u in group %u\n", | |
3708 | (unsigned) start, (unsigned) next - bit, | |
3709 | (unsigned) group); | |
3710 | free += next - bit; | |
3711 | ||
256bdb49 ES |
3712 | if (ac) { |
3713 | ac->ac_b_ex.fe_group = group; | |
3714 | ac->ac_b_ex.fe_start = bit; | |
3715 | ac->ac_b_ex.fe_len = next - bit; | |
3716 | ac->ac_b_ex.fe_logical = 0; | |
3717 | ext4_mb_store_history(ac); | |
3718 | } | |
c9de560d AT |
3719 | |
3720 | mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); | |
3721 | bit = next + 1; | |
3722 | } | |
3723 | if (free != pa->pa_free) { | |
26346ff6 | 3724 | printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n", |
c9de560d AT |
3725 | pa, (unsigned long) pa->pa_lstart, |
3726 | (unsigned long) pa->pa_pstart, | |
3727 | (unsigned long) pa->pa_len); | |
46e665e9 | 3728 | ext4_error(sb, __func__, "free %u, pa_free %u\n", |
26346ff6 | 3729 | free, pa->pa_free); |
e56eb659 AK |
3730 | /* |
3731 | * pa is already deleted so we use the value obtained | |
3732 | * from the bitmap and continue. | |
3733 | */ | |
c9de560d | 3734 | } |
c9de560d AT |
3735 | atomic_add(free, &sbi->s_mb_discarded); |
3736 | ||
3737 | return err; | |
3738 | } | |
3739 | ||
4ddfef7b ES |
3740 | static noinline_for_stack int |
3741 | ext4_mb_release_group_pa(struct ext4_buddy *e4b, | |
c83617db AK |
3742 | struct ext4_prealloc_space *pa, |
3743 | struct ext4_allocation_context *ac) | |
c9de560d | 3744 | { |
c9de560d AT |
3745 | struct super_block *sb = e4b->bd_sb; |
3746 | ext4_group_t group; | |
3747 | ext4_grpblk_t bit; | |
3748 | ||
256bdb49 ES |
3749 | if (ac) |
3750 | ac->ac_op = EXT4_MB_HISTORY_DISCARD; | |
c9de560d AT |
3751 | |
3752 | BUG_ON(pa->pa_deleted == 0); | |
3753 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); | |
3754 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); | |
3755 | mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); | |
3756 | atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); | |
3757 | ||
256bdb49 ES |
3758 | if (ac) { |
3759 | ac->ac_sb = sb; | |
3760 | ac->ac_inode = NULL; | |
3761 | ac->ac_b_ex.fe_group = group; | |
3762 | ac->ac_b_ex.fe_start = bit; | |
3763 | ac->ac_b_ex.fe_len = pa->pa_len; | |
3764 | ac->ac_b_ex.fe_logical = 0; | |
3765 | ext4_mb_store_history(ac); | |
256bdb49 | 3766 | } |
c9de560d AT |
3767 | |
3768 | return 0; | |
3769 | } | |
3770 | ||
3771 | /* | |
3772 | * releases all preallocations in given group | |
3773 | * | |
3774 | * first, we need to decide discard policy: | |
3775 | * - when do we discard | |
3776 | * 1) ENOSPC | |
3777 | * - how many do we discard | |
3778 | * 1) how many requested | |
3779 | */ | |
4ddfef7b ES |
3780 | static noinline_for_stack int |
3781 | ext4_mb_discard_group_preallocations(struct super_block *sb, | |
c9de560d AT |
3782 | ext4_group_t group, int needed) |
3783 | { | |
3784 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
3785 | struct buffer_head *bitmap_bh = NULL; | |
3786 | struct ext4_prealloc_space *pa, *tmp; | |
c83617db | 3787 | struct ext4_allocation_context *ac; |
c9de560d AT |
3788 | struct list_head list; |
3789 | struct ext4_buddy e4b; | |
3790 | int err; | |
3791 | int busy = 0; | |
3792 | int free = 0; | |
3793 | ||
3794 | mb_debug("discard preallocation for group %lu\n", group); | |
3795 | ||
3796 | if (list_empty(&grp->bb_prealloc_list)) | |
3797 | return 0; | |
3798 | ||
574ca174 | 3799 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
c9de560d | 3800 | if (bitmap_bh == NULL) { |
ce89f46c AK |
3801 | ext4_error(sb, __func__, "Error in reading block " |
3802 | "bitmap for %lu\n", group); | |
3803 | return 0; | |
c9de560d AT |
3804 | } |
3805 | ||
3806 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
ce89f46c AK |
3807 | if (err) { |
3808 | ext4_error(sb, __func__, "Error in loading buddy " | |
3809 | "information for %lu\n", group); | |
3810 | put_bh(bitmap_bh); | |
3811 | return 0; | |
3812 | } | |
c9de560d AT |
3813 | |
3814 | if (needed == 0) | |
3815 | needed = EXT4_BLOCKS_PER_GROUP(sb) + 1; | |
3816 | ||
c9de560d | 3817 | INIT_LIST_HEAD(&list); |
c83617db | 3818 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
c9de560d AT |
3819 | repeat: |
3820 | ext4_lock_group(sb, group); | |
3821 | list_for_each_entry_safe(pa, tmp, | |
3822 | &grp->bb_prealloc_list, pa_group_list) { | |
3823 | spin_lock(&pa->pa_lock); | |
3824 | if (atomic_read(&pa->pa_count)) { | |
3825 | spin_unlock(&pa->pa_lock); | |
3826 | busy = 1; | |
3827 | continue; | |
3828 | } | |
3829 | if (pa->pa_deleted) { | |
3830 | spin_unlock(&pa->pa_lock); | |
3831 | continue; | |
3832 | } | |
3833 | ||
3834 | /* seems this one can be freed ... */ | |
3835 | pa->pa_deleted = 1; | |
3836 | ||
3837 | /* we can trust pa_free ... */ | |
3838 | free += pa->pa_free; | |
3839 | ||
3840 | spin_unlock(&pa->pa_lock); | |
3841 | ||
3842 | list_del(&pa->pa_group_list); | |
3843 | list_add(&pa->u.pa_tmp_list, &list); | |
3844 | } | |
3845 | ||
3846 | /* if we still need more blocks and some PAs were used, try again */ | |
3847 | if (free < needed && busy) { | |
3848 | busy = 0; | |
3849 | ext4_unlock_group(sb, group); | |
3850 | /* | |
3851 | * Yield the CPU here so that we don't get soft lockup | |
3852 | * in non preempt case. | |
3853 | */ | |
3854 | yield(); | |
3855 | goto repeat; | |
3856 | } | |
3857 | ||
3858 | /* found anything to free? */ | |
3859 | if (list_empty(&list)) { | |
3860 | BUG_ON(free != 0); | |
3861 | goto out; | |
3862 | } | |
3863 | ||
3864 | /* now free all selected PAs */ | |
3865 | list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { | |
3866 | ||
3867 | /* remove from object (inode or locality group) */ | |
3868 | spin_lock(pa->pa_obj_lock); | |
3869 | list_del_rcu(&pa->pa_inode_list); | |
3870 | spin_unlock(pa->pa_obj_lock); | |
3871 | ||
3872 | if (pa->pa_linear) | |
c83617db | 3873 | ext4_mb_release_group_pa(&e4b, pa, ac); |
c9de560d | 3874 | else |
c83617db | 3875 | ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); |
c9de560d AT |
3876 | |
3877 | list_del(&pa->u.pa_tmp_list); | |
3878 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3879 | } | |
3880 | ||
3881 | out: | |
3882 | ext4_unlock_group(sb, group); | |
c83617db AK |
3883 | if (ac) |
3884 | kmem_cache_free(ext4_ac_cachep, ac); | |
c9de560d AT |
3885 | ext4_mb_release_desc(&e4b); |
3886 | put_bh(bitmap_bh); | |
3887 | return free; | |
3888 | } | |
3889 | ||
3890 | /* | |
3891 | * releases all non-used preallocated blocks for given inode | |
3892 | * | |
3893 | * It's important to discard preallocations under i_data_sem | |
3894 | * We don't want another block to be served from the prealloc | |
3895 | * space when we are discarding the inode prealloc space. | |
3896 | * | |
3897 | * FIXME!! Make sure it is valid at all the call sites | |
3898 | */ | |
3899 | void ext4_mb_discard_inode_preallocations(struct inode *inode) | |
3900 | { | |
3901 | struct ext4_inode_info *ei = EXT4_I(inode); | |
3902 | struct super_block *sb = inode->i_sb; | |
3903 | struct buffer_head *bitmap_bh = NULL; | |
3904 | struct ext4_prealloc_space *pa, *tmp; | |
c83617db | 3905 | struct ext4_allocation_context *ac; |
c9de560d AT |
3906 | ext4_group_t group = 0; |
3907 | struct list_head list; | |
3908 | struct ext4_buddy e4b; | |
3909 | int err; | |
3910 | ||
3911 | if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) { | |
3912 | /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ | |
3913 | return; | |
3914 | } | |
3915 | ||
3916 | mb_debug("discard preallocation for inode %lu\n", inode->i_ino); | |
3917 | ||
3918 | INIT_LIST_HEAD(&list); | |
3919 | ||
c83617db | 3920 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
c9de560d AT |
3921 | repeat: |
3922 | /* first, collect all pa's in the inode */ | |
3923 | spin_lock(&ei->i_prealloc_lock); | |
3924 | while (!list_empty(&ei->i_prealloc_list)) { | |
3925 | pa = list_entry(ei->i_prealloc_list.next, | |
3926 | struct ext4_prealloc_space, pa_inode_list); | |
3927 | BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); | |
3928 | spin_lock(&pa->pa_lock); | |
3929 | if (atomic_read(&pa->pa_count)) { | |
3930 | /* this shouldn't happen often - nobody should | |
3931 | * use preallocation while we're discarding it */ | |
3932 | spin_unlock(&pa->pa_lock); | |
3933 | spin_unlock(&ei->i_prealloc_lock); | |
3934 | printk(KERN_ERR "uh-oh! used pa while discarding\n"); | |
3935 | WARN_ON(1); | |
3936 | schedule_timeout_uninterruptible(HZ); | |
3937 | goto repeat; | |
3938 | ||
3939 | } | |
3940 | if (pa->pa_deleted == 0) { | |
3941 | pa->pa_deleted = 1; | |
3942 | spin_unlock(&pa->pa_lock); | |
3943 | list_del_rcu(&pa->pa_inode_list); | |
3944 | list_add(&pa->u.pa_tmp_list, &list); | |
3945 | continue; | |
3946 | } | |
3947 | ||
3948 | /* someone is deleting pa right now */ | |
3949 | spin_unlock(&pa->pa_lock); | |
3950 | spin_unlock(&ei->i_prealloc_lock); | |
3951 | ||
3952 | /* we have to wait here because pa_deleted | |
3953 | * doesn't mean pa is already unlinked from | |
3954 | * the list. as we might be called from | |
3955 | * ->clear_inode() the inode will get freed | |
3956 | * and concurrent thread which is unlinking | |
3957 | * pa from inode's list may access already | |
3958 | * freed memory, bad-bad-bad */ | |
3959 | ||
3960 | /* XXX: if this happens too often, we can | |
3961 | * add a flag to force wait only in case | |
3962 | * of ->clear_inode(), but not in case of | |
3963 | * regular truncate */ | |
3964 | schedule_timeout_uninterruptible(HZ); | |
3965 | goto repeat; | |
3966 | } | |
3967 | spin_unlock(&ei->i_prealloc_lock); | |
3968 | ||
3969 | list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { | |
3970 | BUG_ON(pa->pa_linear != 0); | |
3971 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); | |
3972 | ||
3973 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
ce89f46c AK |
3974 | if (err) { |
3975 | ext4_error(sb, __func__, "Error in loading buddy " | |
3976 | "information for %lu\n", group); | |
3977 | continue; | |
3978 | } | |
c9de560d | 3979 | |
574ca174 | 3980 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
c9de560d | 3981 | if (bitmap_bh == NULL) { |
ce89f46c AK |
3982 | ext4_error(sb, __func__, "Error in reading block " |
3983 | "bitmap for %lu\n", group); | |
c9de560d | 3984 | ext4_mb_release_desc(&e4b); |
ce89f46c | 3985 | continue; |
c9de560d AT |
3986 | } |
3987 | ||
3988 | ext4_lock_group(sb, group); | |
3989 | list_del(&pa->pa_group_list); | |
c83617db | 3990 | ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); |
c9de560d AT |
3991 | ext4_unlock_group(sb, group); |
3992 | ||
3993 | ext4_mb_release_desc(&e4b); | |
3994 | put_bh(bitmap_bh); | |
3995 | ||
3996 | list_del(&pa->u.pa_tmp_list); | |
3997 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3998 | } | |
c83617db AK |
3999 | if (ac) |
4000 | kmem_cache_free(ext4_ac_cachep, ac); | |
c9de560d AT |
4001 | } |
4002 | ||
4003 | /* | |
4004 | * finds all preallocated spaces and return blocks being freed to them | |
4005 | * if preallocated space becomes full (no block is used from the space) | |
4006 | * then the function frees space in buddy | |
4007 | * XXX: at the moment, truncate (which is the only way to free blocks) | |
4008 | * discards all preallocations | |
4009 | */ | |
4010 | static void ext4_mb_return_to_preallocation(struct inode *inode, | |
4011 | struct ext4_buddy *e4b, | |
4012 | sector_t block, int count) | |
4013 | { | |
4014 | BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list)); | |
4015 | } | |
4016 | #ifdef MB_DEBUG | |
4017 | static void ext4_mb_show_ac(struct ext4_allocation_context *ac) | |
4018 | { | |
4019 | struct super_block *sb = ac->ac_sb; | |
4020 | ext4_group_t i; | |
4021 | ||
4022 | printk(KERN_ERR "EXT4-fs: Can't allocate:" | |
4023 | " Allocation context details:\n"); | |
4024 | printk(KERN_ERR "EXT4-fs: status %d flags %d\n", | |
4025 | ac->ac_status, ac->ac_flags); | |
4026 | printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, " | |
4027 | "best %lu/%lu/%lu@%lu cr %d\n", | |
4028 | (unsigned long)ac->ac_o_ex.fe_group, | |
4029 | (unsigned long)ac->ac_o_ex.fe_start, | |
4030 | (unsigned long)ac->ac_o_ex.fe_len, | |
4031 | (unsigned long)ac->ac_o_ex.fe_logical, | |
4032 | (unsigned long)ac->ac_g_ex.fe_group, | |
4033 | (unsigned long)ac->ac_g_ex.fe_start, | |
4034 | (unsigned long)ac->ac_g_ex.fe_len, | |
4035 | (unsigned long)ac->ac_g_ex.fe_logical, | |
4036 | (unsigned long)ac->ac_b_ex.fe_group, | |
4037 | (unsigned long)ac->ac_b_ex.fe_start, | |
4038 | (unsigned long)ac->ac_b_ex.fe_len, | |
4039 | (unsigned long)ac->ac_b_ex.fe_logical, | |
4040 | (int)ac->ac_criteria); | |
4041 | printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned, | |
4042 | ac->ac_found); | |
4043 | printk(KERN_ERR "EXT4-fs: groups: \n"); | |
4044 | for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { | |
4045 | struct ext4_group_info *grp = ext4_get_group_info(sb, i); | |
4046 | struct ext4_prealloc_space *pa; | |
4047 | ext4_grpblk_t start; | |
4048 | struct list_head *cur; | |
4049 | ext4_lock_group(sb, i); | |
4050 | list_for_each(cur, &grp->bb_prealloc_list) { | |
4051 | pa = list_entry(cur, struct ext4_prealloc_space, | |
4052 | pa_group_list); | |
4053 | spin_lock(&pa->pa_lock); | |
4054 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, | |
4055 | NULL, &start); | |
4056 | spin_unlock(&pa->pa_lock); | |
4057 | printk(KERN_ERR "PA:%lu:%d:%u \n", i, | |
4058 | start, pa->pa_len); | |
4059 | } | |
60bd63d1 | 4060 | ext4_unlock_group(sb, i); |
c9de560d AT |
4061 | |
4062 | if (grp->bb_free == 0) | |
4063 | continue; | |
4064 | printk(KERN_ERR "%lu: %d/%d \n", | |
4065 | i, grp->bb_free, grp->bb_fragments); | |
4066 | } | |
4067 | printk(KERN_ERR "\n"); | |
4068 | } | |
4069 | #else | |
4070 | static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) | |
4071 | { | |
4072 | return; | |
4073 | } | |
4074 | #endif | |
4075 | ||
4076 | /* | |
4077 | * We use locality group preallocation for small size file. The size of the | |
4078 | * file is determined by the current size or the resulting size after | |
4079 | * allocation which ever is larger | |
4080 | * | |
4081 | * One can tune this size via /proc/fs/ext4/<partition>/stream_req | |
4082 | */ | |
4083 | static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |
4084 | { | |
4085 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
4086 | int bsbits = ac->ac_sb->s_blocksize_bits; | |
4087 | loff_t size, isize; | |
4088 | ||
4089 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
4090 | return; | |
4091 | ||
4092 | size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; | |
4093 | isize = i_size_read(ac->ac_inode) >> bsbits; | |
4094 | size = max(size, isize); | |
4095 | ||
4096 | /* don't use group allocation for large files */ | |
4097 | if (size >= sbi->s_mb_stream_request) | |
4098 | return; | |
4099 | ||
4100 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) | |
4101 | return; | |
4102 | ||
4103 | BUG_ON(ac->ac_lg != NULL); | |
4104 | /* | |
4105 | * locality group prealloc space are per cpu. The reason for having | |
4106 | * per cpu locality group is to reduce the contention between block | |
4107 | * request from multiple CPUs. | |
4108 | */ | |
4109 | ac->ac_lg = &sbi->s_locality_groups[get_cpu()]; | |
4110 | put_cpu(); | |
4111 | ||
4112 | /* we're going to use group allocation */ | |
4113 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; | |
4114 | ||
4115 | /* serialize all allocations in the group */ | |
4116 | mutex_lock(&ac->ac_lg->lg_mutex); | |
4117 | } | |
4118 | ||
4ddfef7b ES |
4119 | static noinline_for_stack int |
4120 | ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |
c9de560d AT |
4121 | struct ext4_allocation_request *ar) |
4122 | { | |
4123 | struct super_block *sb = ar->inode->i_sb; | |
4124 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
4125 | struct ext4_super_block *es = sbi->s_es; | |
4126 | ext4_group_t group; | |
4127 | unsigned long len; | |
4128 | unsigned long goal; | |
4129 | ext4_grpblk_t block; | |
4130 | ||
4131 | /* we can't allocate > group size */ | |
4132 | len = ar->len; | |
4133 | ||
4134 | /* just a dirty hack to filter too big requests */ | |
4135 | if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10) | |
4136 | len = EXT4_BLOCKS_PER_GROUP(sb) - 10; | |
4137 | ||
4138 | /* start searching from the goal */ | |
4139 | goal = ar->goal; | |
4140 | if (goal < le32_to_cpu(es->s_first_data_block) || | |
4141 | goal >= ext4_blocks_count(es)) | |
4142 | goal = le32_to_cpu(es->s_first_data_block); | |
4143 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | |
4144 | ||
4145 | /* set up allocation goals */ | |
4146 | ac->ac_b_ex.fe_logical = ar->logical; | |
4147 | ac->ac_b_ex.fe_group = 0; | |
4148 | ac->ac_b_ex.fe_start = 0; | |
4149 | ac->ac_b_ex.fe_len = 0; | |
4150 | ac->ac_status = AC_STATUS_CONTINUE; | |
4151 | ac->ac_groups_scanned = 0; | |
4152 | ac->ac_ex_scanned = 0; | |
4153 | ac->ac_found = 0; | |
4154 | ac->ac_sb = sb; | |
4155 | ac->ac_inode = ar->inode; | |
4156 | ac->ac_o_ex.fe_logical = ar->logical; | |
4157 | ac->ac_o_ex.fe_group = group; | |
4158 | ac->ac_o_ex.fe_start = block; | |
4159 | ac->ac_o_ex.fe_len = len; | |
4160 | ac->ac_g_ex.fe_logical = ar->logical; | |
4161 | ac->ac_g_ex.fe_group = group; | |
4162 | ac->ac_g_ex.fe_start = block; | |
4163 | ac->ac_g_ex.fe_len = len; | |
4164 | ac->ac_f_ex.fe_len = 0; | |
4165 | ac->ac_flags = ar->flags; | |
4166 | ac->ac_2order = 0; | |
4167 | ac->ac_criteria = 0; | |
4168 | ac->ac_pa = NULL; | |
4169 | ac->ac_bitmap_page = NULL; | |
4170 | ac->ac_buddy_page = NULL; | |
4171 | ac->ac_lg = NULL; | |
4172 | ||
4173 | /* we have to define context: we'll we work with a file or | |
4174 | * locality group. this is a policy, actually */ | |
4175 | ext4_mb_group_or_file(ac); | |
4176 | ||
4177 | mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " | |
4178 | "left: %u/%u, right %u/%u to %swritable\n", | |
4179 | (unsigned) ar->len, (unsigned) ar->logical, | |
4180 | (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, | |
4181 | (unsigned) ar->lleft, (unsigned) ar->pleft, | |
4182 | (unsigned) ar->lright, (unsigned) ar->pright, | |
4183 | atomic_read(&ar->inode->i_writecount) ? "" : "non-"); | |
4184 | return 0; | |
4185 | ||
4186 | } | |
4187 | ||
6be2ded1 AK |
4188 | static noinline_for_stack void |
4189 | ext4_mb_discard_lg_preallocations(struct super_block *sb, | |
4190 | struct ext4_locality_group *lg, | |
4191 | int order, int total_entries) | |
4192 | { | |
4193 | ext4_group_t group = 0; | |
4194 | struct ext4_buddy e4b; | |
4195 | struct list_head discard_list; | |
4196 | struct ext4_prealloc_space *pa, *tmp; | |
4197 | struct ext4_allocation_context *ac; | |
4198 | ||
4199 | mb_debug("discard locality group preallocation\n"); | |
4200 | ||
4201 | INIT_LIST_HEAD(&discard_list); | |
4202 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); | |
4203 | ||
4204 | spin_lock(&lg->lg_prealloc_lock); | |
4205 | list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], | |
4206 | pa_inode_list) { | |
4207 | spin_lock(&pa->pa_lock); | |
4208 | if (atomic_read(&pa->pa_count)) { | |
4209 | /* | |
4210 | * This is the pa that we just used | |
4211 | * for block allocation. So don't | |
4212 | * free that | |
4213 | */ | |
4214 | spin_unlock(&pa->pa_lock); | |
4215 | continue; | |
4216 | } | |
4217 | if (pa->pa_deleted) { | |
4218 | spin_unlock(&pa->pa_lock); | |
4219 | continue; | |
4220 | } | |
4221 | /* only lg prealloc space */ | |
4222 | BUG_ON(!pa->pa_linear); | |
4223 | ||
4224 | /* seems this one can be freed ... */ | |
4225 | pa->pa_deleted = 1; | |
4226 | spin_unlock(&pa->pa_lock); | |
4227 | ||
4228 | list_del_rcu(&pa->pa_inode_list); | |
4229 | list_add(&pa->u.pa_tmp_list, &discard_list); | |
4230 | ||
4231 | total_entries--; | |
4232 | if (total_entries <= 5) { | |
4233 | /* | |
4234 | * we want to keep only 5 entries | |
4235 | * allowing it to grow to 8. This | |
4236 | * mak sure we don't call discard | |
4237 | * soon for this list. | |
4238 | */ | |
4239 | break; | |
4240 | } | |
4241 | } | |
4242 | spin_unlock(&lg->lg_prealloc_lock); | |
4243 | ||
4244 | list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { | |
4245 | ||
4246 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); | |
4247 | if (ext4_mb_load_buddy(sb, group, &e4b)) { | |
4248 | ext4_error(sb, __func__, "Error in loading buddy " | |
4249 | "information for %lu\n", group); | |
4250 | continue; | |
4251 | } | |
4252 | ext4_lock_group(sb, group); | |
4253 | list_del(&pa->pa_group_list); | |
4254 | ext4_mb_release_group_pa(&e4b, pa, ac); | |
4255 | ext4_unlock_group(sb, group); | |
4256 | ||
4257 | ext4_mb_release_desc(&e4b); | |
4258 | list_del(&pa->u.pa_tmp_list); | |
4259 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
4260 | } | |
4261 | if (ac) | |
4262 | kmem_cache_free(ext4_ac_cachep, ac); | |
4263 | } | |
4264 | ||
4265 | /* | |
4266 | * We have incremented pa_count. So it cannot be freed at this | |
4267 | * point. Also we hold lg_mutex. So no parallel allocation is | |
4268 | * possible from this lg. That means pa_free cannot be updated. | |
4269 | * | |
4270 | * A parallel ext4_mb_discard_group_preallocations is possible. | |
4271 | * which can cause the lg_prealloc_list to be updated. | |
4272 | */ | |
4273 | ||
4274 | static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) | |
4275 | { | |
4276 | int order, added = 0, lg_prealloc_count = 1; | |
4277 | struct super_block *sb = ac->ac_sb; | |
4278 | struct ext4_locality_group *lg = ac->ac_lg; | |
4279 | struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; | |
4280 | ||
4281 | order = fls(pa->pa_free) - 1; | |
4282 | if (order > PREALLOC_TB_SIZE - 1) | |
4283 | /* The max size of hash table is PREALLOC_TB_SIZE */ | |
4284 | order = PREALLOC_TB_SIZE - 1; | |
4285 | /* Add the prealloc space to lg */ | |
4286 | rcu_read_lock(); | |
4287 | list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], | |
4288 | pa_inode_list) { | |
4289 | spin_lock(&tmp_pa->pa_lock); | |
4290 | if (tmp_pa->pa_deleted) { | |
4291 | spin_unlock(&pa->pa_lock); | |
4292 | continue; | |
4293 | } | |
4294 | if (!added && pa->pa_free < tmp_pa->pa_free) { | |
4295 | /* Add to the tail of the previous entry */ | |
4296 | list_add_tail_rcu(&pa->pa_inode_list, | |
4297 | &tmp_pa->pa_inode_list); | |
4298 | added = 1; | |
4299 | /* | |
4300 | * we want to count the total | |
4301 | * number of entries in the list | |
4302 | */ | |
4303 | } | |
4304 | spin_unlock(&tmp_pa->pa_lock); | |
4305 | lg_prealloc_count++; | |
4306 | } | |
4307 | if (!added) | |
4308 | list_add_tail_rcu(&pa->pa_inode_list, | |
4309 | &lg->lg_prealloc_list[order]); | |
4310 | rcu_read_unlock(); | |
4311 | ||
4312 | /* Now trim the list to be not more than 8 elements */ | |
4313 | if (lg_prealloc_count > 8) { | |
4314 | ext4_mb_discard_lg_preallocations(sb, lg, | |
4315 | order, lg_prealloc_count); | |
4316 | return; | |
4317 | } | |
4318 | return ; | |
4319 | } | |
4320 | ||
c9de560d AT |
4321 | /* |
4322 | * release all resource we used in allocation | |
4323 | */ | |
4324 | static int ext4_mb_release_context(struct ext4_allocation_context *ac) | |
4325 | { | |
6be2ded1 AK |
4326 | struct ext4_prealloc_space *pa = ac->ac_pa; |
4327 | if (pa) { | |
4328 | if (pa->pa_linear) { | |
c9de560d | 4329 | /* see comment in ext4_mb_use_group_pa() */ |
6be2ded1 AK |
4330 | spin_lock(&pa->pa_lock); |
4331 | pa->pa_pstart += ac->ac_b_ex.fe_len; | |
4332 | pa->pa_lstart += ac->ac_b_ex.fe_len; | |
4333 | pa->pa_free -= ac->ac_b_ex.fe_len; | |
4334 | pa->pa_len -= ac->ac_b_ex.fe_len; | |
4335 | spin_unlock(&pa->pa_lock); | |
4336 | /* | |
4337 | * We want to add the pa to the right bucket. | |
4338 | * Remove it from the list and while adding | |
4339 | * make sure the list to which we are adding | |
4340 | * doesn't grow big. | |
4341 | */ | |
4342 | if (likely(pa->pa_free)) { | |
4343 | spin_lock(pa->pa_obj_lock); | |
4344 | list_del_rcu(&pa->pa_inode_list); | |
4345 | spin_unlock(pa->pa_obj_lock); | |
4346 | ext4_mb_add_n_trim(ac); | |
4347 | } | |
c9de560d | 4348 | } |
6be2ded1 | 4349 | ext4_mb_put_pa(ac, ac->ac_sb, pa); |
c9de560d AT |
4350 | } |
4351 | if (ac->ac_bitmap_page) | |
4352 | page_cache_release(ac->ac_bitmap_page); | |
4353 | if (ac->ac_buddy_page) | |
4354 | page_cache_release(ac->ac_buddy_page); | |
4355 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) | |
4356 | mutex_unlock(&ac->ac_lg->lg_mutex); | |
4357 | ext4_mb_collect_stats(ac); | |
4358 | return 0; | |
4359 | } | |
4360 | ||
4361 | static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) | |
4362 | { | |
4363 | ext4_group_t i; | |
4364 | int ret; | |
4365 | int freed = 0; | |
4366 | ||
4367 | for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) { | |
4368 | ret = ext4_mb_discard_group_preallocations(sb, i, needed); | |
4369 | freed += ret; | |
4370 | needed -= ret; | |
4371 | } | |
4372 | ||
4373 | return freed; | |
4374 | } | |
4375 | ||
4376 | /* | |
4377 | * Main entry point into mballoc to allocate blocks | |
4378 | * it tries to use preallocation first, then falls back | |
4379 | * to usual allocation | |
4380 | */ | |
4381 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |
4382 | struct ext4_allocation_request *ar, int *errp) | |
4383 | { | |
6bc6e63f | 4384 | int freed; |
256bdb49 | 4385 | struct ext4_allocation_context *ac = NULL; |
c9de560d AT |
4386 | struct ext4_sb_info *sbi; |
4387 | struct super_block *sb; | |
4388 | ext4_fsblk_t block = 0; | |
6bc6e63f AK |
4389 | unsigned long inquota; |
4390 | unsigned long reserv_blks = 0; | |
c9de560d AT |
4391 | |
4392 | sb = ar->inode->i_sb; | |
4393 | sbi = EXT4_SB(sb); | |
4394 | ||
4395 | if (!test_opt(sb, MBALLOC)) { | |
654b4908 | 4396 | block = ext4_old_new_blocks(handle, ar->inode, ar->goal, |
c9de560d AT |
4397 | &(ar->len), errp); |
4398 | return block; | |
4399 | } | |
d2a17637 MC |
4400 | if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { |
4401 | /* | |
4402 | * With delalloc we already reserved the blocks | |
4403 | */ | |
030ba6bc AK |
4404 | while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { |
4405 | /* let others to free the space */ | |
4406 | yield(); | |
4407 | ar->len = ar->len >> 1; | |
4408 | } | |
4409 | if (!ar->len) { | |
a30d542a AK |
4410 | *errp = -ENOSPC; |
4411 | return 0; | |
4412 | } | |
6bc6e63f | 4413 | reserv_blks = ar->len; |
07031431 | 4414 | } |
c9de560d AT |
4415 | while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { |
4416 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | |
4417 | ar->len--; | |
4418 | } | |
4419 | if (ar->len == 0) { | |
4420 | *errp = -EDQUOT; | |
4421 | return 0; | |
4422 | } | |
4423 | inquota = ar->len; | |
4424 | ||
d2a17637 MC |
4425 | if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) |
4426 | ar->flags |= EXT4_MB_DELALLOC_RESERVED; | |
4427 | ||
256bdb49 ES |
4428 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
4429 | if (!ac) { | |
363d4251 | 4430 | ar->len = 0; |
256bdb49 | 4431 | *errp = -ENOMEM; |
363d4251 | 4432 | goto out1; |
256bdb49 ES |
4433 | } |
4434 | ||
c9de560d AT |
4435 | ext4_mb_poll_new_transaction(sb, handle); |
4436 | ||
256bdb49 | 4437 | *errp = ext4_mb_initialize_context(ac, ar); |
c9de560d AT |
4438 | if (*errp) { |
4439 | ar->len = 0; | |
363d4251 | 4440 | goto out2; |
c9de560d AT |
4441 | } |
4442 | ||
256bdb49 ES |
4443 | ac->ac_op = EXT4_MB_HISTORY_PREALLOC; |
4444 | if (!ext4_mb_use_preallocated(ac)) { | |
256bdb49 ES |
4445 | ac->ac_op = EXT4_MB_HISTORY_ALLOC; |
4446 | ext4_mb_normalize_request(ac, ar); | |
c9de560d AT |
4447 | repeat: |
4448 | /* allocate space in core */ | |
256bdb49 | 4449 | ext4_mb_regular_allocator(ac); |
c9de560d AT |
4450 | |
4451 | /* as we've just preallocated more space than | |
4452 | * user requested orinally, we store allocated | |
4453 | * space in a special descriptor */ | |
256bdb49 ES |
4454 | if (ac->ac_status == AC_STATUS_FOUND && |
4455 | ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) | |
4456 | ext4_mb_new_preallocation(ac); | |
c9de560d AT |
4457 | } |
4458 | ||
256bdb49 | 4459 | if (likely(ac->ac_status == AC_STATUS_FOUND)) { |
6bc6e63f | 4460 | *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks); |
519deca0 AK |
4461 | if (*errp == -EAGAIN) { |
4462 | ac->ac_b_ex.fe_group = 0; | |
4463 | ac->ac_b_ex.fe_start = 0; | |
4464 | ac->ac_b_ex.fe_len = 0; | |
4465 | ac->ac_status = AC_STATUS_CONTINUE; | |
4466 | goto repeat; | |
4467 | } else if (*errp) { | |
4468 | ac->ac_b_ex.fe_len = 0; | |
4469 | ar->len = 0; | |
4470 | ext4_mb_show_ac(ac); | |
4471 | } else { | |
4472 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); | |
4473 | ar->len = ac->ac_b_ex.fe_len; | |
4474 | } | |
c9de560d | 4475 | } else { |
256bdb49 | 4476 | freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); |
c9de560d AT |
4477 | if (freed) |
4478 | goto repeat; | |
4479 | *errp = -ENOSPC; | |
256bdb49 | 4480 | ac->ac_b_ex.fe_len = 0; |
c9de560d | 4481 | ar->len = 0; |
256bdb49 | 4482 | ext4_mb_show_ac(ac); |
c9de560d AT |
4483 | } |
4484 | ||
256bdb49 | 4485 | ext4_mb_release_context(ac); |
c9de560d | 4486 | |
363d4251 SF |
4487 | out2: |
4488 | kmem_cache_free(ext4_ac_cachep, ac); | |
4489 | out1: | |
c9de560d AT |
4490 | if (ar->len < inquota) |
4491 | DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); | |
4492 | ||
4493 | return block; | |
4494 | } | |
4495 | static void ext4_mb_poll_new_transaction(struct super_block *sb, | |
4496 | handle_t *handle) | |
4497 | { | |
4498 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
4499 | ||
4500 | if (sbi->s_last_transaction == handle->h_transaction->t_tid) | |
4501 | return; | |
4502 | ||
4503 | /* new transaction! time to close last one and free blocks for | |
4504 | * committed transaction. we know that only transaction can be | |
4505 | * active, so previos transaction can be being logged and we | |
4506 | * know that transaction before previous is known to be already | |
4507 | * logged. this means that now we may free blocks freed in all | |
4508 | * transactions before previous one. hope I'm clear enough ... */ | |
4509 | ||
4510 | spin_lock(&sbi->s_md_lock); | |
4511 | if (sbi->s_last_transaction != handle->h_transaction->t_tid) { | |
4512 | mb_debug("new transaction %lu, old %lu\n", | |
4513 | (unsigned long) handle->h_transaction->t_tid, | |
4514 | (unsigned long) sbi->s_last_transaction); | |
4515 | list_splice_init(&sbi->s_closed_transaction, | |
4516 | &sbi->s_committed_transaction); | |
4517 | list_splice_init(&sbi->s_active_transaction, | |
4518 | &sbi->s_closed_transaction); | |
4519 | sbi->s_last_transaction = handle->h_transaction->t_tid; | |
4520 | } | |
4521 | spin_unlock(&sbi->s_md_lock); | |
4522 | ||
4523 | ext4_mb_free_committed_blocks(sb); | |
4524 | } | |
4525 | ||
4ddfef7b ES |
4526 | static noinline_for_stack int |
4527 | ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, | |
c9de560d AT |
4528 | ext4_group_t group, ext4_grpblk_t block, int count) |
4529 | { | |
4530 | struct ext4_group_info *db = e4b->bd_info; | |
4531 | struct super_block *sb = e4b->bd_sb; | |
4532 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
4533 | struct ext4_free_metadata *md; | |
4534 | int i; | |
4535 | ||
4536 | BUG_ON(e4b->bd_bitmap_page == NULL); | |
4537 | BUG_ON(e4b->bd_buddy_page == NULL); | |
4538 | ||
4539 | ext4_lock_group(sb, group); | |
4540 | for (i = 0; i < count; i++) { | |
4541 | md = db->bb_md_cur; | |
4542 | if (md && db->bb_tid != handle->h_transaction->t_tid) { | |
4543 | db->bb_md_cur = NULL; | |
4544 | md = NULL; | |
4545 | } | |
4546 | ||
4547 | if (md == NULL) { | |
4548 | ext4_unlock_group(sb, group); | |
4549 | md = kmalloc(sizeof(*md), GFP_NOFS); | |
4550 | if (md == NULL) | |
4551 | return -ENOMEM; | |
4552 | md->num = 0; | |
4553 | md->group = group; | |
4554 | ||
4555 | ext4_lock_group(sb, group); | |
4556 | if (db->bb_md_cur == NULL) { | |
4557 | spin_lock(&sbi->s_md_lock); | |
4558 | list_add(&md->list, &sbi->s_active_transaction); | |
4559 | spin_unlock(&sbi->s_md_lock); | |
4560 | /* protect buddy cache from being freed, | |
4561 | * otherwise we'll refresh it from | |
4562 | * on-disk bitmap and lose not-yet-available | |
4563 | * blocks */ | |
4564 | page_cache_get(e4b->bd_buddy_page); | |
4565 | page_cache_get(e4b->bd_bitmap_page); | |
4566 | db->bb_md_cur = md; | |
4567 | db->bb_tid = handle->h_transaction->t_tid; | |
4568 | mb_debug("new md 0x%p for group %lu\n", | |
4569 | md, md->group); | |
4570 | } else { | |
4571 | kfree(md); | |
4572 | md = db->bb_md_cur; | |
4573 | } | |
4574 | } | |
4575 | ||
4576 | BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS); | |
4577 | md->blocks[md->num] = block + i; | |
4578 | md->num++; | |
4579 | if (md->num == EXT4_BB_MAX_BLOCKS) { | |
4580 | /* no more space, put full container on a sb's list */ | |
4581 | db->bb_md_cur = NULL; | |
4582 | } | |
4583 | } | |
4584 | ext4_unlock_group(sb, group); | |
4585 | return 0; | |
4586 | } | |
4587 | ||
4588 | /* | |
4589 | * Main entry point into mballoc to free blocks | |
4590 | */ | |
4591 | void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, | |
4592 | unsigned long block, unsigned long count, | |
4593 | int metadata, unsigned long *freed) | |
4594 | { | |
26346ff6 | 4595 | struct buffer_head *bitmap_bh = NULL; |
c9de560d | 4596 | struct super_block *sb = inode->i_sb; |
256bdb49 | 4597 | struct ext4_allocation_context *ac = NULL; |
c9de560d AT |
4598 | struct ext4_group_desc *gdp; |
4599 | struct ext4_super_block *es; | |
4600 | unsigned long overflow; | |
4601 | ext4_grpblk_t bit; | |
4602 | struct buffer_head *gd_bh; | |
4603 | ext4_group_t block_group; | |
4604 | struct ext4_sb_info *sbi; | |
4605 | struct ext4_buddy e4b; | |
4606 | int err = 0; | |
4607 | int ret; | |
4608 | ||
4609 | *freed = 0; | |
4610 | ||
4611 | ext4_mb_poll_new_transaction(sb, handle); | |
4612 | ||
4613 | sbi = EXT4_SB(sb); | |
4614 | es = EXT4_SB(sb)->s_es; | |
4615 | if (block < le32_to_cpu(es->s_first_data_block) || | |
4616 | block + count < block || | |
4617 | block + count > ext4_blocks_count(es)) { | |
46e665e9 | 4618 | ext4_error(sb, __func__, |
c9de560d AT |
4619 | "Freeing blocks not in datazone - " |
4620 | "block = %lu, count = %lu", block, count); | |
4621 | goto error_return; | |
4622 | } | |
4623 | ||
4624 | ext4_debug("freeing block %lu\n", block); | |
4625 | ||
256bdb49 ES |
4626 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
4627 | if (ac) { | |
4628 | ac->ac_op = EXT4_MB_HISTORY_FREE; | |
4629 | ac->ac_inode = inode; | |
4630 | ac->ac_sb = sb; | |
4631 | } | |
c9de560d AT |
4632 | |
4633 | do_more: | |
4634 | overflow = 0; | |
4635 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); | |
4636 | ||
4637 | /* | |
4638 | * Check to see if we are freeing blocks across a group | |
4639 | * boundary. | |
4640 | */ | |
4641 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { | |
4642 | overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); | |
4643 | count -= overflow; | |
4644 | } | |
574ca174 | 4645 | bitmap_bh = ext4_read_block_bitmap(sb, block_group); |
ce89f46c AK |
4646 | if (!bitmap_bh) { |
4647 | err = -EIO; | |
c9de560d | 4648 | goto error_return; |
ce89f46c | 4649 | } |
c9de560d | 4650 | gdp = ext4_get_group_desc(sb, block_group, &gd_bh); |
ce89f46c AK |
4651 | if (!gdp) { |
4652 | err = -EIO; | |
c9de560d | 4653 | goto error_return; |
ce89f46c | 4654 | } |
c9de560d AT |
4655 | |
4656 | if (in_range(ext4_block_bitmap(sb, gdp), block, count) || | |
4657 | in_range(ext4_inode_bitmap(sb, gdp), block, count) || | |
4658 | in_range(block, ext4_inode_table(sb, gdp), | |
4659 | EXT4_SB(sb)->s_itb_per_group) || | |
4660 | in_range(block + count - 1, ext4_inode_table(sb, gdp), | |
4661 | EXT4_SB(sb)->s_itb_per_group)) { | |
4662 | ||
46e665e9 | 4663 | ext4_error(sb, __func__, |
c9de560d AT |
4664 | "Freeing blocks in system zone - " |
4665 | "Block = %lu, count = %lu", block, count); | |
519deca0 AK |
4666 | /* err = 0. ext4_std_error should be a no op */ |
4667 | goto error_return; | |
c9de560d AT |
4668 | } |
4669 | ||
4670 | BUFFER_TRACE(bitmap_bh, "getting write access"); | |
4671 | err = ext4_journal_get_write_access(handle, bitmap_bh); | |
4672 | if (err) | |
4673 | goto error_return; | |
4674 | ||
4675 | /* | |
4676 | * We are about to modify some metadata. Call the journal APIs | |
4677 | * to unshare ->b_data if a currently-committing transaction is | |
4678 | * using it | |
4679 | */ | |
4680 | BUFFER_TRACE(gd_bh, "get_write_access"); | |
4681 | err = ext4_journal_get_write_access(handle, gd_bh); | |
4682 | if (err) | |
4683 | goto error_return; | |
4684 | ||
4685 | err = ext4_mb_load_buddy(sb, block_group, &e4b); | |
4686 | if (err) | |
4687 | goto error_return; | |
4688 | ||
4689 | #ifdef AGGRESSIVE_CHECK | |
4690 | { | |
4691 | int i; | |
4692 | for (i = 0; i < count; i++) | |
4693 | BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); | |
4694 | } | |
4695 | #endif | |
4696 | mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, | |
4697 | bit, count); | |
4698 | ||
4699 | /* We dirtied the bitmap block */ | |
4700 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | |
4701 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); | |
4702 | ||
256bdb49 ES |
4703 | if (ac) { |
4704 | ac->ac_b_ex.fe_group = block_group; | |
4705 | ac->ac_b_ex.fe_start = bit; | |
4706 | ac->ac_b_ex.fe_len = count; | |
4707 | ext4_mb_store_history(ac); | |
4708 | } | |
c9de560d AT |
4709 | |
4710 | if (metadata) { | |
4711 | /* blocks being freed are metadata. these blocks shouldn't | |
4712 | * be used until this transaction is committed */ | |
4713 | ext4_mb_free_metadata(handle, &e4b, block_group, bit, count); | |
4714 | } else { | |
4715 | ext4_lock_group(sb, block_group); | |
7e5a8cdd | 4716 | mb_free_blocks(inode, &e4b, bit, count); |
c9de560d AT |
4717 | ext4_mb_return_to_preallocation(inode, &e4b, block, count); |
4718 | ext4_unlock_group(sb, block_group); | |
c9de560d AT |
4719 | } |
4720 | ||
4721 | spin_lock(sb_bgl_lock(sbi, block_group)); | |
e8546d06 | 4722 | le16_add_cpu(&gdp->bg_free_blocks_count, count); |
c9de560d AT |
4723 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); |
4724 | spin_unlock(sb_bgl_lock(sbi, block_group)); | |
4725 | percpu_counter_add(&sbi->s_freeblocks_counter, count); | |
4726 | ||
772cb7c8 JS |
4727 | if (sbi->s_log_groups_per_flex) { |
4728 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | |
4729 | spin_lock(sb_bgl_lock(sbi, flex_group)); | |
4730 | sbi->s_flex_groups[flex_group].free_blocks += count; | |
4731 | spin_unlock(sb_bgl_lock(sbi, flex_group)); | |
4732 | } | |
4733 | ||
c9de560d AT |
4734 | ext4_mb_release_desc(&e4b); |
4735 | ||
4736 | *freed += count; | |
4737 | ||
4738 | /* And the group descriptor block */ | |
4739 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | |
4740 | ret = ext4_journal_dirty_metadata(handle, gd_bh); | |
4741 | if (!err) | |
4742 | err = ret; | |
4743 | ||
4744 | if (overflow && !err) { | |
4745 | block += count; | |
4746 | count = overflow; | |
4747 | put_bh(bitmap_bh); | |
4748 | goto do_more; | |
4749 | } | |
4750 | sb->s_dirt = 1; | |
4751 | error_return: | |
4752 | brelse(bitmap_bh); | |
4753 | ext4_std_error(sb, err); | |
256bdb49 ES |
4754 | if (ac) |
4755 | kmem_cache_free(ext4_ac_cachep, ac); | |
c9de560d AT |
4756 | return; |
4757 | } |