]>
Commit | Line | Data |
---|---|---|
c9de560d AT |
1 | /* |
2 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
3 | * Written by Alex Tomas <alex@clusterfs.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public Licens | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
17 | */ | |
18 | ||
19 | ||
20 | /* | |
21 | * mballoc.c contains the multiblocks allocation routines | |
22 | */ | |
23 | ||
18aadd47 | 24 | #include "ext4_jbd2.h" |
8f6e39a7 | 25 | #include "mballoc.h" |
6ba495e9 | 26 | #include <linux/debugfs.h> |
28623c2f | 27 | #include <linux/log2.h> |
5a0e3ad6 | 28 | #include <linux/slab.h> |
9bffad1e TT |
29 | #include <trace/events/ext4.h> |
30 | ||
c9de560d AT |
31 | /* |
32 | * MUSTDO: | |
33 | * - test ext4_ext_search_left() and ext4_ext_search_right() | |
34 | * - search for metadata in few groups | |
35 | * | |
36 | * TODO v4: | |
37 | * - normalization should take into account whether file is still open | |
38 | * - discard preallocations if no free space left (policy?) | |
39 | * - don't normalize tails | |
40 | * - quota | |
41 | * - reservation for superuser | |
42 | * | |
43 | * TODO v3: | |
44 | * - bitmap read-ahead (proposed by Oleg Drokin aka green) | |
45 | * - track min/max extents in each group for better group selection | |
46 | * - mb_mark_used() may allocate chunk right after splitting buddy | |
47 | * - tree of groups sorted by number of free blocks | |
48 | * - error handling | |
49 | */ | |
50 | ||
51 | /* | |
52 | * The allocation request involve request for multiple number of blocks | |
53 | * near to the goal(block) value specified. | |
54 | * | |
b713a5ec TT |
55 | * During initialization phase of the allocator we decide to use the |
56 | * group preallocation or inode preallocation depending on the size of | |
57 | * the file. The size of the file could be the resulting file size we | |
58 | * would have after allocation, or the current file size, which ever | |
59 | * is larger. If the size is less than sbi->s_mb_stream_request we | |
60 | * select to use the group preallocation. The default value of | |
61 | * s_mb_stream_request is 16 blocks. This can also be tuned via | |
62 | * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in | |
63 | * terms of number of blocks. | |
c9de560d AT |
64 | * |
65 | * The main motivation for having small file use group preallocation is to | |
b713a5ec | 66 | * ensure that we have small files closer together on the disk. |
c9de560d | 67 | * |
b713a5ec TT |
68 | * First stage the allocator looks at the inode prealloc list, |
69 | * ext4_inode_info->i_prealloc_list, which contains list of prealloc | |
70 | * spaces for this particular inode. The inode prealloc space is | |
71 | * represented as: | |
c9de560d AT |
72 | * |
73 | * pa_lstart -> the logical start block for this prealloc space | |
74 | * pa_pstart -> the physical start block for this prealloc space | |
53accfa9 TT |
75 | * pa_len -> length for this prealloc space (in clusters) |
76 | * pa_free -> free space available in this prealloc space (in clusters) | |
c9de560d AT |
77 | * |
78 | * The inode preallocation space is used looking at the _logical_ start | |
79 | * block. If only the logical file block falls within the range of prealloc | |
caaf7a29 TM |
80 | * space we will consume the particular prealloc space. This makes sure that |
81 | * we have contiguous physical blocks representing the file blocks | |
c9de560d AT |
82 | * |
83 | * The important thing to be noted in case of inode prealloc space is that | |
84 | * we don't modify the values associated to inode prealloc space except | |
85 | * pa_free. | |
86 | * | |
87 | * If we are not able to find blocks in the inode prealloc space and if we | |
88 | * have the group allocation flag set then we look at the locality group | |
caaf7a29 | 89 | * prealloc space. These are per CPU prealloc list represented as |
c9de560d AT |
90 | * |
91 | * ext4_sb_info.s_locality_groups[smp_processor_id()] | |
92 | * | |
93 | * The reason for having a per cpu locality group is to reduce the contention | |
94 | * between CPUs. It is possible to get scheduled at this point. | |
95 | * | |
96 | * The locality group prealloc space is used looking at whether we have | |
25985edc | 97 | * enough free space (pa_free) within the prealloc space. |
c9de560d AT |
98 | * |
99 | * If we can't allocate blocks via inode prealloc or/and locality group | |
100 | * prealloc then we look at the buddy cache. The buddy cache is represented | |
101 | * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets | |
102 | * mapped to the buddy and bitmap information regarding different | |
103 | * groups. The buddy information is attached to buddy cache inode so that | |
104 | * we can access them through the page cache. The information regarding | |
105 | * each group is loaded via ext4_mb_load_buddy. The information involve | |
106 | * block bitmap and buddy information. The information are stored in the | |
107 | * inode as: | |
108 | * | |
109 | * { page } | |
c3a326a6 | 110 | * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... |
c9de560d AT |
111 | * |
112 | * | |
113 | * one block each for bitmap and buddy information. So for each group we | |
114 | * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / | |
115 | * blocksize) blocks. So it can have information regarding groups_per_page | |
116 | * which is blocks_per_page/2 | |
117 | * | |
118 | * The buddy cache inode is not stored on disk. The inode is thrown | |
119 | * away when the filesystem is unmounted. | |
120 | * | |
121 | * We look for count number of blocks in the buddy cache. If we were able | |
122 | * to locate that many free blocks we return with additional information | |
123 | * regarding rest of the contiguous physical block available | |
124 | * | |
125 | * Before allocating blocks via buddy cache we normalize the request | |
126 | * blocks. This ensure we ask for more blocks that we needed. The extra | |
127 | * blocks that we get after allocation is added to the respective prealloc | |
128 | * list. In case of inode preallocation we follow a list of heuristics | |
129 | * based on file size. This can be found in ext4_mb_normalize_request. If | |
130 | * we are doing a group prealloc we try to normalize the request to | |
27baebb8 TT |
131 | * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is |
132 | * dependent on the cluster size; for non-bigalloc file systems, it is | |
c9de560d | 133 | * 512 blocks. This can be tuned via |
d7a1fee1 | 134 | * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in |
c9de560d AT |
135 | * terms of number of blocks. If we have mounted the file system with -O |
136 | * stripe=<value> option the group prealloc request is normalized to the | |
d7a1fee1 DE |
137 | * the smallest multiple of the stripe value (sbi->s_stripe) which is |
138 | * greater than the default mb_group_prealloc. | |
c9de560d | 139 | * |
d7a1fee1 | 140 | * The regular allocator (using the buddy cache) supports a few tunables. |
c9de560d | 141 | * |
b713a5ec TT |
142 | * /sys/fs/ext4/<partition>/mb_min_to_scan |
143 | * /sys/fs/ext4/<partition>/mb_max_to_scan | |
144 | * /sys/fs/ext4/<partition>/mb_order2_req | |
c9de560d | 145 | * |
b713a5ec | 146 | * The regular allocator uses buddy scan only if the request len is power of |
c9de560d AT |
147 | * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The |
148 | * value of s_mb_order2_reqs can be tuned via | |
b713a5ec | 149 | * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to |
af901ca1 | 150 | * stripe size (sbi->s_stripe), we try to search for contiguous block in |
b713a5ec TT |
151 | * stripe size. This should result in better allocation on RAID setups. If |
152 | * not, we search in the specific group using bitmap for best extents. The | |
153 | * tunable min_to_scan and max_to_scan control the behaviour here. | |
c9de560d | 154 | * min_to_scan indicate how long the mballoc __must__ look for a best |
b713a5ec | 155 | * extent and max_to_scan indicates how long the mballoc __can__ look for a |
c9de560d AT |
156 | * best extent in the found extents. Searching for the blocks starts with |
157 | * the group specified as the goal value in allocation context via | |
158 | * ac_g_ex. Each group is first checked based on the criteria whether it | |
caaf7a29 | 159 | * can be used for allocation. ext4_mb_good_group explains how the groups are |
c9de560d AT |
160 | * checked. |
161 | * | |
162 | * Both the prealloc space are getting populated as above. So for the first | |
163 | * request we will hit the buddy cache which will result in this prealloc | |
164 | * space getting filled. The prealloc space is then later used for the | |
165 | * subsequent request. | |
166 | */ | |
167 | ||
168 | /* | |
169 | * mballoc operates on the following data: | |
170 | * - on-disk bitmap | |
171 | * - in-core buddy (actually includes buddy and bitmap) | |
172 | * - preallocation descriptors (PAs) | |
173 | * | |
174 | * there are two types of preallocations: | |
175 | * - inode | |
176 | * assiged to specific inode and can be used for this inode only. | |
177 | * it describes part of inode's space preallocated to specific | |
178 | * physical blocks. any block from that preallocated can be used | |
179 | * independent. the descriptor just tracks number of blocks left | |
180 | * unused. so, before taking some block from descriptor, one must | |
181 | * make sure corresponded logical block isn't allocated yet. this | |
182 | * also means that freeing any block within descriptor's range | |
183 | * must discard all preallocated blocks. | |
184 | * - locality group | |
185 | * assigned to specific locality group which does not translate to | |
186 | * permanent set of inodes: inode can join and leave group. space | |
187 | * from this type of preallocation can be used for any inode. thus | |
188 | * it's consumed from the beginning to the end. | |
189 | * | |
190 | * relation between them can be expressed as: | |
191 | * in-core buddy = on-disk bitmap + preallocation descriptors | |
192 | * | |
193 | * this mean blocks mballoc considers used are: | |
194 | * - allocated blocks (persistent) | |
195 | * - preallocated blocks (non-persistent) | |
196 | * | |
197 | * consistency in mballoc world means that at any time a block is either | |
198 | * free or used in ALL structures. notice: "any time" should not be read | |
199 | * literally -- time is discrete and delimited by locks. | |
200 | * | |
201 | * to keep it simple, we don't use block numbers, instead we count number of | |
202 | * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. | |
203 | * | |
204 | * all operations can be expressed as: | |
205 | * - init buddy: buddy = on-disk + PAs | |
206 | * - new PA: buddy += N; PA = N | |
207 | * - use inode PA: on-disk += N; PA -= N | |
208 | * - discard inode PA buddy -= on-disk - PA; PA = 0 | |
209 | * - use locality group PA on-disk += N; PA -= N | |
210 | * - discard locality group PA buddy -= PA; PA = 0 | |
211 | * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap | |
212 | * is used in real operation because we can't know actual used | |
213 | * bits from PA, only from on-disk bitmap | |
214 | * | |
215 | * if we follow this strict logic, then all operations above should be atomic. | |
216 | * given some of them can block, we'd have to use something like semaphores | |
217 | * killing performance on high-end SMP hardware. let's try to relax it using | |
218 | * the following knowledge: | |
219 | * 1) if buddy is referenced, it's already initialized | |
220 | * 2) while block is used in buddy and the buddy is referenced, | |
221 | * nobody can re-allocate that block | |
222 | * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has | |
223 | * bit set and PA claims same block, it's OK. IOW, one can set bit in | |
224 | * on-disk bitmap if buddy has same bit set or/and PA covers corresponded | |
225 | * block | |
226 | * | |
227 | * so, now we're building a concurrency table: | |
228 | * - init buddy vs. | |
229 | * - new PA | |
230 | * blocks for PA are allocated in the buddy, buddy must be referenced | |
231 | * until PA is linked to allocation group to avoid concurrent buddy init | |
232 | * - use inode PA | |
233 | * we need to make sure that either on-disk bitmap or PA has uptodate data | |
234 | * given (3) we care that PA-=N operation doesn't interfere with init | |
235 | * - discard inode PA | |
236 | * the simplest way would be to have buddy initialized by the discard | |
237 | * - use locality group PA | |
238 | * again PA-=N must be serialized with init | |
239 | * - discard locality group PA | |
240 | * the simplest way would be to have buddy initialized by the discard | |
241 | * - new PA vs. | |
242 | * - use inode PA | |
243 | * i_data_sem serializes them | |
244 | * - discard inode PA | |
245 | * discard process must wait until PA isn't used by another process | |
246 | * - use locality group PA | |
247 | * some mutex should serialize them | |
248 | * - discard locality group PA | |
249 | * discard process must wait until PA isn't used by another process | |
250 | * - use inode PA | |
251 | * - use inode PA | |
252 | * i_data_sem or another mutex should serializes them | |
253 | * - discard inode PA | |
254 | * discard process must wait until PA isn't used by another process | |
255 | * - use locality group PA | |
256 | * nothing wrong here -- they're different PAs covering different blocks | |
257 | * - discard locality group PA | |
258 | * discard process must wait until PA isn't used by another process | |
259 | * | |
260 | * now we're ready to make few consequences: | |
261 | * - PA is referenced and while it is no discard is possible | |
262 | * - PA is referenced until block isn't marked in on-disk bitmap | |
263 | * - PA changes only after on-disk bitmap | |
264 | * - discard must not compete with init. either init is done before | |
265 | * any discard or they're serialized somehow | |
266 | * - buddy init as sum of on-disk bitmap and PAs is done atomically | |
267 | * | |
268 | * a special case when we've used PA to emptiness. no need to modify buddy | |
269 | * in this case, but we should care about concurrent init | |
270 | * | |
271 | */ | |
272 | ||
273 | /* | |
274 | * Logic in few words: | |
275 | * | |
276 | * - allocation: | |
277 | * load group | |
278 | * find blocks | |
279 | * mark bits in on-disk bitmap | |
280 | * release group | |
281 | * | |
282 | * - use preallocation: | |
283 | * find proper PA (per-inode or group) | |
284 | * load group | |
285 | * mark bits in on-disk bitmap | |
286 | * release group | |
287 | * release PA | |
288 | * | |
289 | * - free: | |
290 | * load group | |
291 | * mark bits in on-disk bitmap | |
292 | * release group | |
293 | * | |
294 | * - discard preallocations in group: | |
295 | * mark PAs deleted | |
296 | * move them onto local list | |
297 | * load on-disk bitmap | |
298 | * load group | |
299 | * remove PA from object (inode or locality group) | |
300 | * mark free blocks in-core | |
301 | * | |
302 | * - discard inode's preallocations: | |
303 | */ | |
304 | ||
305 | /* | |
306 | * Locking rules | |
307 | * | |
308 | * Locks: | |
309 | * - bitlock on a group (group) | |
310 | * - object (inode/locality) (object) | |
311 | * - per-pa lock (pa) | |
312 | * | |
313 | * Paths: | |
314 | * - new pa | |
315 | * object | |
316 | * group | |
317 | * | |
318 | * - find and use pa: | |
319 | * pa | |
320 | * | |
321 | * - release consumed pa: | |
322 | * pa | |
323 | * group | |
324 | * object | |
325 | * | |
326 | * - generate in-core bitmap: | |
327 | * group | |
328 | * pa | |
329 | * | |
330 | * - discard all for given object (inode, locality group): | |
331 | * object | |
332 | * pa | |
333 | * group | |
334 | * | |
335 | * - discard all for given group: | |
336 | * group | |
337 | * pa | |
338 | * group | |
339 | * object | |
340 | * | |
341 | */ | |
c3a326a6 AK |
342 | static struct kmem_cache *ext4_pspace_cachep; |
343 | static struct kmem_cache *ext4_ac_cachep; | |
18aadd47 | 344 | static struct kmem_cache *ext4_free_data_cachep; |
fb1813f4 CW |
345 | |
346 | /* We create slab caches for groupinfo data structures based on the | |
347 | * superblock block size. There will be one per mounted filesystem for | |
348 | * each unique s_blocksize_bits */ | |
2892c15d | 349 | #define NR_GRPINFO_CACHES 8 |
fb1813f4 CW |
350 | static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; |
351 | ||
2892c15d ES |
352 | static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { |
353 | "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", | |
354 | "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", | |
355 | "ext4_groupinfo_64k", "ext4_groupinfo_128k" | |
356 | }; | |
357 | ||
c3a326a6 AK |
358 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, |
359 | ext4_group_t group); | |
7a2fcbf7 AK |
360 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, |
361 | ext4_group_t group); | |
18aadd47 BJ |
362 | static void ext4_free_data_callback(struct super_block *sb, |
363 | struct ext4_journal_cb_entry *jce, int rc); | |
c3a326a6 | 364 | |
ffad0a44 AK |
365 | static inline void *mb_correct_addr_and_bit(int *bit, void *addr) |
366 | { | |
c9de560d | 367 | #if BITS_PER_LONG == 64 |
ffad0a44 AK |
368 | *bit += ((unsigned long) addr & 7UL) << 3; |
369 | addr = (void *) ((unsigned long) addr & ~7UL); | |
c9de560d | 370 | #elif BITS_PER_LONG == 32 |
ffad0a44 AK |
371 | *bit += ((unsigned long) addr & 3UL) << 3; |
372 | addr = (void *) ((unsigned long) addr & ~3UL); | |
c9de560d AT |
373 | #else |
374 | #error "how many bits you are?!" | |
375 | #endif | |
ffad0a44 AK |
376 | return addr; |
377 | } | |
c9de560d AT |
378 | |
379 | static inline int mb_test_bit(int bit, void *addr) | |
380 | { | |
381 | /* | |
382 | * ext4_test_bit on architecture like powerpc | |
383 | * needs unsigned long aligned address | |
384 | */ | |
ffad0a44 | 385 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
386 | return ext4_test_bit(bit, addr); |
387 | } | |
388 | ||
389 | static inline void mb_set_bit(int bit, void *addr) | |
390 | { | |
ffad0a44 | 391 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
392 | ext4_set_bit(bit, addr); |
393 | } | |
394 | ||
c9de560d AT |
395 | static inline void mb_clear_bit(int bit, void *addr) |
396 | { | |
ffad0a44 | 397 | addr = mb_correct_addr_and_bit(&bit, addr); |
c9de560d AT |
398 | ext4_clear_bit(bit, addr); |
399 | } | |
400 | ||
ffad0a44 AK |
401 | static inline int mb_find_next_zero_bit(void *addr, int max, int start) |
402 | { | |
e7dfb246 | 403 | int fix = 0, ret, tmpmax; |
ffad0a44 | 404 | addr = mb_correct_addr_and_bit(&fix, addr); |
e7dfb246 | 405 | tmpmax = max + fix; |
ffad0a44 AK |
406 | start += fix; |
407 | ||
e7dfb246 AK |
408 | ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; |
409 | if (ret > max) | |
410 | return max; | |
411 | return ret; | |
ffad0a44 AK |
412 | } |
413 | ||
414 | static inline int mb_find_next_bit(void *addr, int max, int start) | |
415 | { | |
e7dfb246 | 416 | int fix = 0, ret, tmpmax; |
ffad0a44 | 417 | addr = mb_correct_addr_and_bit(&fix, addr); |
e7dfb246 | 418 | tmpmax = max + fix; |
ffad0a44 AK |
419 | start += fix; |
420 | ||
e7dfb246 AK |
421 | ret = ext4_find_next_bit(addr, tmpmax, start) - fix; |
422 | if (ret > max) | |
423 | return max; | |
424 | return ret; | |
ffad0a44 AK |
425 | } |
426 | ||
c9de560d AT |
427 | static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) |
428 | { | |
429 | char *bb; | |
430 | ||
c5e8f3f3 | 431 | BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); |
c9de560d AT |
432 | BUG_ON(max == NULL); |
433 | ||
434 | if (order > e4b->bd_blkbits + 1) { | |
435 | *max = 0; | |
436 | return NULL; | |
437 | } | |
438 | ||
439 | /* at order 0 we see each particular block */ | |
84b775a3 CL |
440 | if (order == 0) { |
441 | *max = 1 << (e4b->bd_blkbits + 3); | |
c5e8f3f3 | 442 | return e4b->bd_bitmap; |
84b775a3 | 443 | } |
c9de560d | 444 | |
c5e8f3f3 | 445 | bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; |
c9de560d AT |
446 | *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; |
447 | ||
448 | return bb; | |
449 | } | |
450 | ||
451 | #ifdef DOUBLE_CHECK | |
452 | static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, | |
453 | int first, int count) | |
454 | { | |
455 | int i; | |
456 | struct super_block *sb = e4b->bd_sb; | |
457 | ||
458 | if (unlikely(e4b->bd_info->bb_bitmap == NULL)) | |
459 | return; | |
bc8e6740 | 460 | assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); |
c9de560d AT |
461 | for (i = 0; i < count; i++) { |
462 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { | |
463 | ext4_fsblk_t blocknr; | |
5661bd68 AM |
464 | |
465 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | |
53accfa9 | 466 | blocknr += EXT4_C2B(EXT4_SB(sb), first + i); |
5d1b1b3f | 467 | ext4_grp_locked_error(sb, e4b->bd_group, |
e29136f8 TT |
468 | inode ? inode->i_ino : 0, |
469 | blocknr, | |
470 | "freeing block already freed " | |
471 | "(bit %u)", | |
472 | first + i); | |
c9de560d AT |
473 | } |
474 | mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); | |
475 | } | |
476 | } | |
477 | ||
478 | static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) | |
479 | { | |
480 | int i; | |
481 | ||
482 | if (unlikely(e4b->bd_info->bb_bitmap == NULL)) | |
483 | return; | |
bc8e6740 | 484 | assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); |
c9de560d AT |
485 | for (i = 0; i < count; i++) { |
486 | BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); | |
487 | mb_set_bit(first + i, e4b->bd_info->bb_bitmap); | |
488 | } | |
489 | } | |
490 | ||
491 | static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) | |
492 | { | |
493 | if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { | |
494 | unsigned char *b1, *b2; | |
495 | int i; | |
496 | b1 = (unsigned char *) e4b->bd_info->bb_bitmap; | |
497 | b2 = (unsigned char *) bitmap; | |
498 | for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { | |
499 | if (b1[i] != b2[i]) { | |
9d8b9ec4 TT |
500 | ext4_msg(e4b->bd_sb, KERN_ERR, |
501 | "corruption in group %u " | |
502 | "at byte %u(%u): %x in copy != %x " | |
503 | "on disk/prealloc", | |
504 | e4b->bd_group, i, i * 8, b1[i], b2[i]); | |
c9de560d AT |
505 | BUG(); |
506 | } | |
507 | } | |
508 | } | |
509 | } | |
510 | ||
511 | #else | |
512 | static inline void mb_free_blocks_double(struct inode *inode, | |
513 | struct ext4_buddy *e4b, int first, int count) | |
514 | { | |
515 | return; | |
516 | } | |
517 | static inline void mb_mark_used_double(struct ext4_buddy *e4b, | |
518 | int first, int count) | |
519 | { | |
520 | return; | |
521 | } | |
522 | static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) | |
523 | { | |
524 | return; | |
525 | } | |
526 | #endif | |
527 | ||
528 | #ifdef AGGRESSIVE_CHECK | |
529 | ||
530 | #define MB_CHECK_ASSERT(assert) \ | |
531 | do { \ | |
532 | if (!(assert)) { \ | |
533 | printk(KERN_EMERG \ | |
534 | "Assertion failure in %s() at %s:%d: \"%s\"\n", \ | |
535 | function, file, line, # assert); \ | |
536 | BUG(); \ | |
537 | } \ | |
538 | } while (0) | |
539 | ||
540 | static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, | |
541 | const char *function, int line) | |
542 | { | |
543 | struct super_block *sb = e4b->bd_sb; | |
544 | int order = e4b->bd_blkbits + 1; | |
545 | int max; | |
546 | int max2; | |
547 | int i; | |
548 | int j; | |
549 | int k; | |
550 | int count; | |
551 | struct ext4_group_info *grp; | |
552 | int fragments = 0; | |
553 | int fstart; | |
554 | struct list_head *cur; | |
555 | void *buddy; | |
556 | void *buddy2; | |
557 | ||
c9de560d AT |
558 | { |
559 | static int mb_check_counter; | |
560 | if (mb_check_counter++ % 100 != 0) | |
561 | return 0; | |
562 | } | |
563 | ||
564 | while (order > 1) { | |
565 | buddy = mb_find_buddy(e4b, order, &max); | |
566 | MB_CHECK_ASSERT(buddy); | |
567 | buddy2 = mb_find_buddy(e4b, order - 1, &max2); | |
568 | MB_CHECK_ASSERT(buddy2); | |
569 | MB_CHECK_ASSERT(buddy != buddy2); | |
570 | MB_CHECK_ASSERT(max * 2 == max2); | |
571 | ||
572 | count = 0; | |
573 | for (i = 0; i < max; i++) { | |
574 | ||
575 | if (mb_test_bit(i, buddy)) { | |
576 | /* only single bit in buddy2 may be 1 */ | |
577 | if (!mb_test_bit(i << 1, buddy2)) { | |
578 | MB_CHECK_ASSERT( | |
579 | mb_test_bit((i<<1)+1, buddy2)); | |
580 | } else if (!mb_test_bit((i << 1) + 1, buddy2)) { | |
581 | MB_CHECK_ASSERT( | |
582 | mb_test_bit(i << 1, buddy2)); | |
583 | } | |
584 | continue; | |
585 | } | |
586 | ||
0a10da73 | 587 | /* both bits in buddy2 must be 1 */ |
c9de560d AT |
588 | MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); |
589 | MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); | |
590 | ||
591 | for (j = 0; j < (1 << order); j++) { | |
592 | k = (i * (1 << order)) + j; | |
593 | MB_CHECK_ASSERT( | |
c5e8f3f3 | 594 | !mb_test_bit(k, e4b->bd_bitmap)); |
c9de560d AT |
595 | } |
596 | count++; | |
597 | } | |
598 | MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); | |
599 | order--; | |
600 | } | |
601 | ||
602 | fstart = -1; | |
603 | buddy = mb_find_buddy(e4b, 0, &max); | |
604 | for (i = 0; i < max; i++) { | |
605 | if (!mb_test_bit(i, buddy)) { | |
606 | MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); | |
607 | if (fstart == -1) { | |
608 | fragments++; | |
609 | fstart = i; | |
610 | } | |
611 | continue; | |
612 | } | |
613 | fstart = -1; | |
614 | /* check used bits only */ | |
615 | for (j = 0; j < e4b->bd_blkbits + 1; j++) { | |
616 | buddy2 = mb_find_buddy(e4b, j, &max2); | |
617 | k = i >> j; | |
618 | MB_CHECK_ASSERT(k < max2); | |
619 | MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); | |
620 | } | |
621 | } | |
622 | MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); | |
623 | MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); | |
624 | ||
625 | grp = ext4_get_group_info(sb, e4b->bd_group); | |
c9de560d AT |
626 | list_for_each(cur, &grp->bb_prealloc_list) { |
627 | ext4_group_t groupnr; | |
628 | struct ext4_prealloc_space *pa; | |
60bd63d1 SR |
629 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); |
630 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); | |
c9de560d | 631 | MB_CHECK_ASSERT(groupnr == e4b->bd_group); |
60bd63d1 | 632 | for (i = 0; i < pa->pa_len; i++) |
c9de560d AT |
633 | MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); |
634 | } | |
635 | return 0; | |
636 | } | |
637 | #undef MB_CHECK_ASSERT | |
638 | #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ | |
46e665e9 | 639 | __FILE__, __func__, __LINE__) |
c9de560d AT |
640 | #else |
641 | #define mb_check_buddy(e4b) | |
642 | #endif | |
643 | ||
7c786059 CL |
644 | /* |
645 | * Divide blocks started from @first with length @len into | |
646 | * smaller chunks with power of 2 blocks. | |
647 | * Clear the bits in bitmap which the blocks of the chunk(s) covered, | |
648 | * then increase bb_counters[] for corresponded chunk size. | |
649 | */ | |
c9de560d | 650 | static void ext4_mb_mark_free_simple(struct super_block *sb, |
a36b4498 | 651 | void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, |
c9de560d AT |
652 | struct ext4_group_info *grp) |
653 | { | |
654 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
a36b4498 ES |
655 | ext4_grpblk_t min; |
656 | ext4_grpblk_t max; | |
657 | ext4_grpblk_t chunk; | |
c9de560d AT |
658 | unsigned short border; |
659 | ||
7137d7a4 | 660 | BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); |
c9de560d AT |
661 | |
662 | border = 2 << sb->s_blocksize_bits; | |
663 | ||
664 | while (len > 0) { | |
665 | /* find how many blocks can be covered since this position */ | |
666 | max = ffs(first | border) - 1; | |
667 | ||
668 | /* find how many blocks of power 2 we need to mark */ | |
669 | min = fls(len) - 1; | |
670 | ||
671 | if (max < min) | |
672 | min = max; | |
673 | chunk = 1 << min; | |
674 | ||
675 | /* mark multiblock chunks only */ | |
676 | grp->bb_counters[min]++; | |
677 | if (min > 0) | |
678 | mb_clear_bit(first >> min, | |
679 | buddy + sbi->s_mb_offsets[min]); | |
680 | ||
681 | len -= chunk; | |
682 | first += chunk; | |
683 | } | |
684 | } | |
685 | ||
8a57d9d6 CW |
686 | /* |
687 | * Cache the order of the largest free extent we have available in this block | |
688 | * group. | |
689 | */ | |
690 | static void | |
691 | mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) | |
692 | { | |
693 | int i; | |
694 | int bits; | |
695 | ||
696 | grp->bb_largest_free_order = -1; /* uninit */ | |
697 | ||
698 | bits = sb->s_blocksize_bits + 1; | |
699 | for (i = bits; i >= 0; i--) { | |
700 | if (grp->bb_counters[i] > 0) { | |
701 | grp->bb_largest_free_order = i; | |
702 | break; | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
089ceecc ES |
707 | static noinline_for_stack |
708 | void ext4_mb_generate_buddy(struct super_block *sb, | |
c9de560d AT |
709 | void *buddy, void *bitmap, ext4_group_t group) |
710 | { | |
711 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
7137d7a4 | 712 | ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); |
a36b4498 ES |
713 | ext4_grpblk_t i = 0; |
714 | ext4_grpblk_t first; | |
715 | ext4_grpblk_t len; | |
c9de560d AT |
716 | unsigned free = 0; |
717 | unsigned fragments = 0; | |
718 | unsigned long long period = get_cycles(); | |
719 | ||
720 | /* initialize buddy from bitmap which is aggregation | |
721 | * of on-disk bitmap and preallocations */ | |
ffad0a44 | 722 | i = mb_find_next_zero_bit(bitmap, max, 0); |
c9de560d AT |
723 | grp->bb_first_free = i; |
724 | while (i < max) { | |
725 | fragments++; | |
726 | first = i; | |
ffad0a44 | 727 | i = mb_find_next_bit(bitmap, max, i); |
c9de560d AT |
728 | len = i - first; |
729 | free += len; | |
730 | if (len > 1) | |
731 | ext4_mb_mark_free_simple(sb, buddy, first, len, grp); | |
732 | else | |
733 | grp->bb_counters[0]++; | |
734 | if (i < max) | |
ffad0a44 | 735 | i = mb_find_next_zero_bit(bitmap, max, i); |
c9de560d AT |
736 | } |
737 | grp->bb_fragments = fragments; | |
738 | ||
739 | if (free != grp->bb_free) { | |
e29136f8 | 740 | ext4_grp_locked_error(sb, group, 0, 0, |
53accfa9 | 741 | "%u clusters in bitmap, %u in gd", |
e29136f8 | 742 | free, grp->bb_free); |
e56eb659 AK |
743 | /* |
744 | * If we intent to continue, we consider group descritor | |
745 | * corrupt and update bb_free using bitmap value | |
746 | */ | |
c9de560d AT |
747 | grp->bb_free = free; |
748 | } | |
8a57d9d6 | 749 | mb_set_largest_free_order(sb, grp); |
c9de560d AT |
750 | |
751 | clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); | |
752 | ||
753 | period = get_cycles() - period; | |
754 | spin_lock(&EXT4_SB(sb)->s_bal_lock); | |
755 | EXT4_SB(sb)->s_mb_buddies_generated++; | |
756 | EXT4_SB(sb)->s_mb_generation_time += period; | |
757 | spin_unlock(&EXT4_SB(sb)->s_bal_lock); | |
758 | } | |
759 | ||
760 | /* The buddy information is attached the buddy cache inode | |
761 | * for convenience. The information regarding each group | |
762 | * is loaded via ext4_mb_load_buddy. The information involve | |
763 | * block bitmap and buddy information. The information are | |
764 | * stored in the inode as | |
765 | * | |
766 | * { page } | |
c3a326a6 | 767 | * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... |
c9de560d AT |
768 | * |
769 | * | |
770 | * one block each for bitmap and buddy information. | |
771 | * So for each group we take up 2 blocks. A page can | |
772 | * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. | |
773 | * So it can have information regarding groups_per_page which | |
774 | * is blocks_per_page/2 | |
8a57d9d6 CW |
775 | * |
776 | * Locking note: This routine takes the block group lock of all groups | |
777 | * for this page; do not hold this lock when calling this routine! | |
c9de560d AT |
778 | */ |
779 | ||
780 | static int ext4_mb_init_cache(struct page *page, char *incore) | |
781 | { | |
8df9675f | 782 | ext4_group_t ngroups; |
c9de560d AT |
783 | int blocksize; |
784 | int blocks_per_page; | |
785 | int groups_per_page; | |
786 | int err = 0; | |
787 | int i; | |
813e5727 | 788 | ext4_group_t first_group, group; |
c9de560d AT |
789 | int first_block; |
790 | struct super_block *sb; | |
791 | struct buffer_head *bhs; | |
fa77dcfa | 792 | struct buffer_head **bh = NULL; |
c9de560d AT |
793 | struct inode *inode; |
794 | char *data; | |
795 | char *bitmap; | |
9b8b7d35 | 796 | struct ext4_group_info *grinfo; |
c9de560d | 797 | |
6ba495e9 | 798 | mb_debug(1, "init page %lu\n", page->index); |
c9de560d AT |
799 | |
800 | inode = page->mapping->host; | |
801 | sb = inode->i_sb; | |
8df9675f | 802 | ngroups = ext4_get_groups_count(sb); |
c9de560d AT |
803 | blocksize = 1 << inode->i_blkbits; |
804 | blocks_per_page = PAGE_CACHE_SIZE / blocksize; | |
805 | ||
806 | groups_per_page = blocks_per_page >> 1; | |
807 | if (groups_per_page == 0) | |
808 | groups_per_page = 1; | |
809 | ||
810 | /* allocate buffer_heads to read bitmaps */ | |
811 | if (groups_per_page > 1) { | |
c9de560d AT |
812 | i = sizeof(struct buffer_head *) * groups_per_page; |
813 | bh = kzalloc(i, GFP_NOFS); | |
813e5727 TT |
814 | if (bh == NULL) { |
815 | err = -ENOMEM; | |
c9de560d | 816 | goto out; |
813e5727 | 817 | } |
c9de560d AT |
818 | } else |
819 | bh = &bhs; | |
820 | ||
821 | first_group = page->index * blocks_per_page / 2; | |
822 | ||
823 | /* read all groups the page covers into the cache */ | |
813e5727 TT |
824 | for (i = 0, group = first_group; i < groups_per_page; i++, group++) { |
825 | if (group >= ngroups) | |
c9de560d AT |
826 | break; |
827 | ||
813e5727 | 828 | grinfo = ext4_get_group_info(sb, group); |
9b8b7d35 AG |
829 | /* |
830 | * If page is uptodate then we came here after online resize | |
831 | * which added some new uninitialized group info structs, so | |
832 | * we must skip all initialized uptodate buddies on the page, | |
833 | * which may be currently in use by an allocating task. | |
834 | */ | |
835 | if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { | |
836 | bh[i] = NULL; | |
837 | continue; | |
838 | } | |
813e5727 TT |
839 | if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { |
840 | err = -ENOMEM; | |
c9de560d | 841 | goto out; |
2ccb5fb9 | 842 | } |
813e5727 | 843 | mb_debug(1, "read bitmap for group %u\n", group); |
c9de560d AT |
844 | } |
845 | ||
846 | /* wait for I/O completion */ | |
813e5727 TT |
847 | for (i = 0, group = first_group; i < groups_per_page; i++, group++) { |
848 | if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { | |
849 | err = -EIO; | |
c9de560d | 850 | goto out; |
813e5727 TT |
851 | } |
852 | } | |
c9de560d AT |
853 | |
854 | first_block = page->index * blocks_per_page; | |
855 | for (i = 0; i < blocks_per_page; i++) { | |
856 | int group; | |
c9de560d AT |
857 | |
858 | group = (first_block + i) >> 1; | |
8df9675f | 859 | if (group >= ngroups) |
c9de560d AT |
860 | break; |
861 | ||
9b8b7d35 AG |
862 | if (!bh[group - first_group]) |
863 | /* skip initialized uptodate buddy */ | |
864 | continue; | |
865 | ||
c9de560d AT |
866 | /* |
867 | * data carry information regarding this | |
868 | * particular group in the format specified | |
869 | * above | |
870 | * | |
871 | */ | |
872 | data = page_address(page) + (i * blocksize); | |
873 | bitmap = bh[group - first_group]->b_data; | |
874 | ||
875 | /* | |
876 | * We place the buddy block and bitmap block | |
877 | * close together | |
878 | */ | |
879 | if ((first_block + i) & 1) { | |
880 | /* this is block of buddy */ | |
881 | BUG_ON(incore == NULL); | |
6ba495e9 | 882 | mb_debug(1, "put buddy for group %u in page %lu/%x\n", |
c9de560d | 883 | group, page->index, i * blocksize); |
f307333e | 884 | trace_ext4_mb_buddy_bitmap_load(sb, group); |
c9de560d AT |
885 | grinfo = ext4_get_group_info(sb, group); |
886 | grinfo->bb_fragments = 0; | |
887 | memset(grinfo->bb_counters, 0, | |
1927805e ES |
888 | sizeof(*grinfo->bb_counters) * |
889 | (sb->s_blocksize_bits+2)); | |
c9de560d AT |
890 | /* |
891 | * incore got set to the group block bitmap below | |
892 | */ | |
7a2fcbf7 | 893 | ext4_lock_group(sb, group); |
9b8b7d35 AG |
894 | /* init the buddy */ |
895 | memset(data, 0xff, blocksize); | |
c9de560d | 896 | ext4_mb_generate_buddy(sb, data, incore, group); |
7a2fcbf7 | 897 | ext4_unlock_group(sb, group); |
c9de560d AT |
898 | incore = NULL; |
899 | } else { | |
900 | /* this is block of bitmap */ | |
901 | BUG_ON(incore != NULL); | |
6ba495e9 | 902 | mb_debug(1, "put bitmap for group %u in page %lu/%x\n", |
c9de560d | 903 | group, page->index, i * blocksize); |
f307333e | 904 | trace_ext4_mb_bitmap_load(sb, group); |
c9de560d AT |
905 | |
906 | /* see comments in ext4_mb_put_pa() */ | |
907 | ext4_lock_group(sb, group); | |
908 | memcpy(data, bitmap, blocksize); | |
909 | ||
910 | /* mark all preallocated blks used in in-core bitmap */ | |
911 | ext4_mb_generate_from_pa(sb, data, group); | |
7a2fcbf7 | 912 | ext4_mb_generate_from_freelist(sb, data, group); |
c9de560d AT |
913 | ext4_unlock_group(sb, group); |
914 | ||
915 | /* set incore so that the buddy information can be | |
916 | * generated using this | |
917 | */ | |
918 | incore = data; | |
919 | } | |
920 | } | |
921 | SetPageUptodate(page); | |
922 | ||
923 | out: | |
924 | if (bh) { | |
9b8b7d35 | 925 | for (i = 0; i < groups_per_page; i++) |
c9de560d AT |
926 | brelse(bh[i]); |
927 | if (bh != &bhs) | |
928 | kfree(bh); | |
929 | } | |
930 | return err; | |
931 | } | |
932 | ||
eee4adc7 | 933 | /* |
2de8807b AG |
934 | * Lock the buddy and bitmap pages. This make sure other parallel init_group |
935 | * on the same buddy page doesn't happen whild holding the buddy page lock. | |
936 | * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap | |
937 | * are on the same page e4b->bd_buddy_page is NULL and return value is 0. | |
eee4adc7 | 938 | */ |
2de8807b AG |
939 | static int ext4_mb_get_buddy_page_lock(struct super_block *sb, |
940 | ext4_group_t group, struct ext4_buddy *e4b) | |
eee4adc7 | 941 | { |
2de8807b AG |
942 | struct inode *inode = EXT4_SB(sb)->s_buddy_cache; |
943 | int block, pnum, poff; | |
eee4adc7 | 944 | int blocks_per_page; |
2de8807b AG |
945 | struct page *page; |
946 | ||
947 | e4b->bd_buddy_page = NULL; | |
948 | e4b->bd_bitmap_page = NULL; | |
eee4adc7 ES |
949 | |
950 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | |
951 | /* | |
952 | * the buddy cache inode stores the block bitmap | |
953 | * and buddy information in consecutive blocks. | |
954 | * So for each group we need two blocks. | |
955 | */ | |
956 | block = group * 2; | |
957 | pnum = block / blocks_per_page; | |
2de8807b AG |
958 | poff = block % blocks_per_page; |
959 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
960 | if (!page) | |
961 | return -EIO; | |
962 | BUG_ON(page->mapping != inode->i_mapping); | |
963 | e4b->bd_bitmap_page = page; | |
964 | e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); | |
965 | ||
966 | if (blocks_per_page >= 2) { | |
967 | /* buddy and bitmap are on the same page */ | |
968 | return 0; | |
eee4adc7 | 969 | } |
2de8807b AG |
970 | |
971 | block++; | |
972 | pnum = block / blocks_per_page; | |
2de8807b AG |
973 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); |
974 | if (!page) | |
975 | return -EIO; | |
976 | BUG_ON(page->mapping != inode->i_mapping); | |
977 | e4b->bd_buddy_page = page; | |
978 | return 0; | |
eee4adc7 ES |
979 | } |
980 | ||
2de8807b | 981 | static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) |
eee4adc7 | 982 | { |
2de8807b AG |
983 | if (e4b->bd_bitmap_page) { |
984 | unlock_page(e4b->bd_bitmap_page); | |
985 | page_cache_release(e4b->bd_bitmap_page); | |
986 | } | |
987 | if (e4b->bd_buddy_page) { | |
988 | unlock_page(e4b->bd_buddy_page); | |
989 | page_cache_release(e4b->bd_buddy_page); | |
eee4adc7 | 990 | } |
eee4adc7 ES |
991 | } |
992 | ||
8a57d9d6 CW |
993 | /* |
994 | * Locking note: This routine calls ext4_mb_init_cache(), which takes the | |
995 | * block group lock of all groups for this page; do not hold the BG lock when | |
996 | * calling this routine! | |
997 | */ | |
b6a758ec AK |
998 | static noinline_for_stack |
999 | int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) | |
1000 | { | |
1001 | ||
b6a758ec | 1002 | struct ext4_group_info *this_grp; |
2de8807b AG |
1003 | struct ext4_buddy e4b; |
1004 | struct page *page; | |
1005 | int ret = 0; | |
b6a758ec AK |
1006 | |
1007 | mb_debug(1, "init group %u\n", group); | |
b6a758ec AK |
1008 | this_grp = ext4_get_group_info(sb, group); |
1009 | /* | |
08c3a813 AK |
1010 | * This ensures that we don't reinit the buddy cache |
1011 | * page which map to the group from which we are already | |
1012 | * allocating. If we are looking at the buddy cache we would | |
1013 | * have taken a reference using ext4_mb_load_buddy and that | |
2de8807b | 1014 | * would have pinned buddy page to page cache. |
b6a758ec | 1015 | */ |
2de8807b AG |
1016 | ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); |
1017 | if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { | |
b6a758ec AK |
1018 | /* |
1019 | * somebody initialized the group | |
1020 | * return without doing anything | |
1021 | */ | |
b6a758ec AK |
1022 | goto err; |
1023 | } | |
2de8807b AG |
1024 | |
1025 | page = e4b.bd_bitmap_page; | |
1026 | ret = ext4_mb_init_cache(page, NULL); | |
1027 | if (ret) | |
1028 | goto err; | |
1029 | if (!PageUptodate(page)) { | |
b6a758ec AK |
1030 | ret = -EIO; |
1031 | goto err; | |
1032 | } | |
1033 | mark_page_accessed(page); | |
b6a758ec | 1034 | |
2de8807b | 1035 | if (e4b.bd_buddy_page == NULL) { |
b6a758ec AK |
1036 | /* |
1037 | * If both the bitmap and buddy are in | |
1038 | * the same page we don't need to force | |
1039 | * init the buddy | |
1040 | */ | |
2de8807b AG |
1041 | ret = 0; |
1042 | goto err; | |
b6a758ec | 1043 | } |
2de8807b AG |
1044 | /* init buddy cache */ |
1045 | page = e4b.bd_buddy_page; | |
1046 | ret = ext4_mb_init_cache(page, e4b.bd_bitmap); | |
1047 | if (ret) | |
1048 | goto err; | |
1049 | if (!PageUptodate(page)) { | |
b6a758ec AK |
1050 | ret = -EIO; |
1051 | goto err; | |
1052 | } | |
1053 | mark_page_accessed(page); | |
1054 | err: | |
2de8807b | 1055 | ext4_mb_put_buddy_page_lock(&e4b); |
b6a758ec AK |
1056 | return ret; |
1057 | } | |
1058 | ||
8a57d9d6 CW |
1059 | /* |
1060 | * Locking note: This routine calls ext4_mb_init_cache(), which takes the | |
1061 | * block group lock of all groups for this page; do not hold the BG lock when | |
1062 | * calling this routine! | |
1063 | */ | |
4ddfef7b ES |
1064 | static noinline_for_stack int |
1065 | ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, | |
1066 | struct ext4_buddy *e4b) | |
c9de560d | 1067 | { |
c9de560d AT |
1068 | int blocks_per_page; |
1069 | int block; | |
1070 | int pnum; | |
1071 | int poff; | |
1072 | struct page *page; | |
fdf6c7a7 | 1073 | int ret; |
920313a7 AK |
1074 | struct ext4_group_info *grp; |
1075 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
1076 | struct inode *inode = sbi->s_buddy_cache; | |
c9de560d | 1077 | |
6ba495e9 | 1078 | mb_debug(1, "load group %u\n", group); |
c9de560d AT |
1079 | |
1080 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | |
920313a7 | 1081 | grp = ext4_get_group_info(sb, group); |
c9de560d AT |
1082 | |
1083 | e4b->bd_blkbits = sb->s_blocksize_bits; | |
529da704 | 1084 | e4b->bd_info = grp; |
c9de560d AT |
1085 | e4b->bd_sb = sb; |
1086 | e4b->bd_group = group; | |
1087 | e4b->bd_buddy_page = NULL; | |
1088 | e4b->bd_bitmap_page = NULL; | |
1089 | ||
f41c0750 | 1090 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { |
f41c0750 AK |
1091 | /* |
1092 | * we need full data about the group | |
1093 | * to make a good selection | |
1094 | */ | |
1095 | ret = ext4_mb_init_group(sb, group); | |
1096 | if (ret) | |
1097 | return ret; | |
f41c0750 AK |
1098 | } |
1099 | ||
c9de560d AT |
1100 | /* |
1101 | * the buddy cache inode stores the block bitmap | |
1102 | * and buddy information in consecutive blocks. | |
1103 | * So for each group we need two blocks. | |
1104 | */ | |
1105 | block = group * 2; | |
1106 | pnum = block / blocks_per_page; | |
1107 | poff = block % blocks_per_page; | |
1108 | ||
1109 | /* we could use find_or_create_page(), but it locks page | |
1110 | * what we'd like to avoid in fast path ... */ | |
1111 | page = find_get_page(inode->i_mapping, pnum); | |
1112 | if (page == NULL || !PageUptodate(page)) { | |
1113 | if (page) | |
920313a7 AK |
1114 | /* |
1115 | * drop the page reference and try | |
1116 | * to get the page with lock. If we | |
1117 | * are not uptodate that implies | |
1118 | * somebody just created the page but | |
1119 | * is yet to initialize the same. So | |
1120 | * wait for it to initialize. | |
1121 | */ | |
c9de560d AT |
1122 | page_cache_release(page); |
1123 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
1124 | if (page) { | |
1125 | BUG_ON(page->mapping != inode->i_mapping); | |
1126 | if (!PageUptodate(page)) { | |
fdf6c7a7 SF |
1127 | ret = ext4_mb_init_cache(page, NULL); |
1128 | if (ret) { | |
1129 | unlock_page(page); | |
1130 | goto err; | |
1131 | } | |
c9de560d AT |
1132 | mb_cmp_bitmaps(e4b, page_address(page) + |
1133 | (poff * sb->s_blocksize)); | |
1134 | } | |
1135 | unlock_page(page); | |
1136 | } | |
1137 | } | |
fdf6c7a7 SF |
1138 | if (page == NULL || !PageUptodate(page)) { |
1139 | ret = -EIO; | |
c9de560d | 1140 | goto err; |
fdf6c7a7 | 1141 | } |
c9de560d AT |
1142 | e4b->bd_bitmap_page = page; |
1143 | e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); | |
1144 | mark_page_accessed(page); | |
1145 | ||
1146 | block++; | |
1147 | pnum = block / blocks_per_page; | |
1148 | poff = block % blocks_per_page; | |
1149 | ||
1150 | page = find_get_page(inode->i_mapping, pnum); | |
1151 | if (page == NULL || !PageUptodate(page)) { | |
1152 | if (page) | |
1153 | page_cache_release(page); | |
1154 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
1155 | if (page) { | |
1156 | BUG_ON(page->mapping != inode->i_mapping); | |
fdf6c7a7 SF |
1157 | if (!PageUptodate(page)) { |
1158 | ret = ext4_mb_init_cache(page, e4b->bd_bitmap); | |
1159 | if (ret) { | |
1160 | unlock_page(page); | |
1161 | goto err; | |
1162 | } | |
1163 | } | |
c9de560d AT |
1164 | unlock_page(page); |
1165 | } | |
1166 | } | |
fdf6c7a7 SF |
1167 | if (page == NULL || !PageUptodate(page)) { |
1168 | ret = -EIO; | |
c9de560d | 1169 | goto err; |
fdf6c7a7 | 1170 | } |
c9de560d AT |
1171 | e4b->bd_buddy_page = page; |
1172 | e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); | |
1173 | mark_page_accessed(page); | |
1174 | ||
1175 | BUG_ON(e4b->bd_bitmap_page == NULL); | |
1176 | BUG_ON(e4b->bd_buddy_page == NULL); | |
1177 | ||
1178 | return 0; | |
1179 | ||
1180 | err: | |
26626f11 YR |
1181 | if (page) |
1182 | page_cache_release(page); | |
c9de560d AT |
1183 | if (e4b->bd_bitmap_page) |
1184 | page_cache_release(e4b->bd_bitmap_page); | |
1185 | if (e4b->bd_buddy_page) | |
1186 | page_cache_release(e4b->bd_buddy_page); | |
1187 | e4b->bd_buddy = NULL; | |
1188 | e4b->bd_bitmap = NULL; | |
fdf6c7a7 | 1189 | return ret; |
c9de560d AT |
1190 | } |
1191 | ||
e39e07fd | 1192 | static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) |
c9de560d AT |
1193 | { |
1194 | if (e4b->bd_bitmap_page) | |
1195 | page_cache_release(e4b->bd_bitmap_page); | |
1196 | if (e4b->bd_buddy_page) | |
1197 | page_cache_release(e4b->bd_buddy_page); | |
1198 | } | |
1199 | ||
1200 | ||
1201 | static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) | |
1202 | { | |
1203 | int order = 1; | |
1204 | void *bb; | |
1205 | ||
c5e8f3f3 | 1206 | BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); |
c9de560d AT |
1207 | BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); |
1208 | ||
c5e8f3f3 | 1209 | bb = e4b->bd_buddy; |
c9de560d AT |
1210 | while (order <= e4b->bd_blkbits + 1) { |
1211 | block = block >> 1; | |
1212 | if (!mb_test_bit(block, bb)) { | |
1213 | /* this block is part of buddy of order 'order' */ | |
1214 | return order; | |
1215 | } | |
1216 | bb += 1 << (e4b->bd_blkbits - order); | |
1217 | order++; | |
1218 | } | |
1219 | return 0; | |
1220 | } | |
1221 | ||
955ce5f5 | 1222 | static void mb_clear_bits(void *bm, int cur, int len) |
c9de560d AT |
1223 | { |
1224 | __u32 *addr; | |
1225 | ||
1226 | len = cur + len; | |
1227 | while (cur < len) { | |
1228 | if ((cur & 31) == 0 && (len - cur) >= 32) { | |
1229 | /* fast path: clear whole word at once */ | |
1230 | addr = bm + (cur >> 3); | |
1231 | *addr = 0; | |
1232 | cur += 32; | |
1233 | continue; | |
1234 | } | |
955ce5f5 | 1235 | mb_clear_bit(cur, bm); |
c9de560d AT |
1236 | cur++; |
1237 | } | |
1238 | } | |
1239 | ||
c3e94d1d | 1240 | void ext4_set_bits(void *bm, int cur, int len) |
c9de560d AT |
1241 | { |
1242 | __u32 *addr; | |
1243 | ||
1244 | len = cur + len; | |
1245 | while (cur < len) { | |
1246 | if ((cur & 31) == 0 && (len - cur) >= 32) { | |
1247 | /* fast path: set whole word at once */ | |
1248 | addr = bm + (cur >> 3); | |
1249 | *addr = 0xffffffff; | |
1250 | cur += 32; | |
1251 | continue; | |
1252 | } | |
955ce5f5 | 1253 | mb_set_bit(cur, bm); |
c9de560d AT |
1254 | cur++; |
1255 | } | |
1256 | } | |
1257 | ||
7e5a8cdd | 1258 | static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, |
c9de560d AT |
1259 | int first, int count) |
1260 | { | |
1261 | int block = 0; | |
1262 | int max = 0; | |
1263 | int order; | |
1264 | void *buddy; | |
1265 | void *buddy2; | |
1266 | struct super_block *sb = e4b->bd_sb; | |
1267 | ||
1268 | BUG_ON(first + count > (sb->s_blocksize << 3)); | |
bc8e6740 | 1269 | assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); |
c9de560d AT |
1270 | mb_check_buddy(e4b); |
1271 | mb_free_blocks_double(inode, e4b, first, count); | |
1272 | ||
1273 | e4b->bd_info->bb_free += count; | |
1274 | if (first < e4b->bd_info->bb_first_free) | |
1275 | e4b->bd_info->bb_first_free = first; | |
1276 | ||
1277 | /* let's maintain fragments counter */ | |
1278 | if (first != 0) | |
c5e8f3f3 | 1279 | block = !mb_test_bit(first - 1, e4b->bd_bitmap); |
c9de560d | 1280 | if (first + count < EXT4_SB(sb)->s_mb_maxs[0]) |
c5e8f3f3 | 1281 | max = !mb_test_bit(first + count, e4b->bd_bitmap); |
c9de560d AT |
1282 | if (block && max) |
1283 | e4b->bd_info->bb_fragments--; | |
1284 | else if (!block && !max) | |
1285 | e4b->bd_info->bb_fragments++; | |
1286 | ||
1287 | /* let's maintain buddy itself */ | |
1288 | while (count-- > 0) { | |
1289 | block = first++; | |
1290 | order = 0; | |
1291 | ||
c5e8f3f3 | 1292 | if (!mb_test_bit(block, e4b->bd_bitmap)) { |
c9de560d | 1293 | ext4_fsblk_t blocknr; |
5661bd68 AM |
1294 | |
1295 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | |
53accfa9 | 1296 | blocknr += EXT4_C2B(EXT4_SB(sb), block); |
5d1b1b3f | 1297 | ext4_grp_locked_error(sb, e4b->bd_group, |
e29136f8 TT |
1298 | inode ? inode->i_ino : 0, |
1299 | blocknr, | |
1300 | "freeing already freed block " | |
1301 | "(bit %u)", block); | |
c9de560d | 1302 | } |
c5e8f3f3 | 1303 | mb_clear_bit(block, e4b->bd_bitmap); |
c9de560d AT |
1304 | e4b->bd_info->bb_counters[order]++; |
1305 | ||
1306 | /* start of the buddy */ | |
1307 | buddy = mb_find_buddy(e4b, order, &max); | |
1308 | ||
1309 | do { | |
1310 | block &= ~1UL; | |
1311 | if (mb_test_bit(block, buddy) || | |
1312 | mb_test_bit(block + 1, buddy)) | |
1313 | break; | |
1314 | ||
1315 | /* both the buddies are free, try to coalesce them */ | |
1316 | buddy2 = mb_find_buddy(e4b, order + 1, &max); | |
1317 | ||
1318 | if (!buddy2) | |
1319 | break; | |
1320 | ||
1321 | if (order > 0) { | |
1322 | /* for special purposes, we don't set | |
1323 | * free bits in bitmap */ | |
1324 | mb_set_bit(block, buddy); | |
1325 | mb_set_bit(block + 1, buddy); | |
1326 | } | |
1327 | e4b->bd_info->bb_counters[order]--; | |
1328 | e4b->bd_info->bb_counters[order]--; | |
1329 | ||
1330 | block = block >> 1; | |
1331 | order++; | |
1332 | e4b->bd_info->bb_counters[order]++; | |
1333 | ||
1334 | mb_clear_bit(block, buddy2); | |
1335 | buddy = buddy2; | |
1336 | } while (1); | |
1337 | } | |
8a57d9d6 | 1338 | mb_set_largest_free_order(sb, e4b->bd_info); |
c9de560d | 1339 | mb_check_buddy(e4b); |
c9de560d AT |
1340 | } |
1341 | ||
15c006a2 | 1342 | static int mb_find_extent(struct ext4_buddy *e4b, int block, |
c9de560d AT |
1343 | int needed, struct ext4_free_extent *ex) |
1344 | { | |
1345 | int next = block; | |
15c006a2 | 1346 | int max, order; |
c9de560d AT |
1347 | void *buddy; |
1348 | ||
bc8e6740 | 1349 | assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); |
c9de560d AT |
1350 | BUG_ON(ex == NULL); |
1351 | ||
15c006a2 | 1352 | buddy = mb_find_buddy(e4b, 0, &max); |
c9de560d AT |
1353 | BUG_ON(buddy == NULL); |
1354 | BUG_ON(block >= max); | |
1355 | if (mb_test_bit(block, buddy)) { | |
1356 | ex->fe_len = 0; | |
1357 | ex->fe_start = 0; | |
1358 | ex->fe_group = 0; | |
1359 | return 0; | |
1360 | } | |
1361 | ||
15c006a2 RD |
1362 | /* find actual order */ |
1363 | order = mb_find_order_for_block(e4b, block); | |
1364 | block = block >> order; | |
c9de560d AT |
1365 | |
1366 | ex->fe_len = 1 << order; | |
1367 | ex->fe_start = block << order; | |
1368 | ex->fe_group = e4b->bd_group; | |
1369 | ||
1370 | /* calc difference from given start */ | |
1371 | next = next - ex->fe_start; | |
1372 | ex->fe_len -= next; | |
1373 | ex->fe_start += next; | |
1374 | ||
1375 | while (needed > ex->fe_len && | |
1376 | (buddy = mb_find_buddy(e4b, order, &max))) { | |
1377 | ||
1378 | if (block + 1 >= max) | |
1379 | break; | |
1380 | ||
1381 | next = (block + 1) * (1 << order); | |
c5e8f3f3 | 1382 | if (mb_test_bit(next, e4b->bd_bitmap)) |
c9de560d AT |
1383 | break; |
1384 | ||
b051d8dc | 1385 | order = mb_find_order_for_block(e4b, next); |
c9de560d | 1386 | |
c9de560d AT |
1387 | block = next >> order; |
1388 | ex->fe_len += 1 << order; | |
1389 | } | |
1390 | ||
1391 | BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); | |
1392 | return ex->fe_len; | |
1393 | } | |
1394 | ||
1395 | static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) | |
1396 | { | |
1397 | int ord; | |
1398 | int mlen = 0; | |
1399 | int max = 0; | |
1400 | int cur; | |
1401 | int start = ex->fe_start; | |
1402 | int len = ex->fe_len; | |
1403 | unsigned ret = 0; | |
1404 | int len0 = len; | |
1405 | void *buddy; | |
1406 | ||
1407 | BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); | |
1408 | BUG_ON(e4b->bd_group != ex->fe_group); | |
bc8e6740 | 1409 | assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); |
c9de560d AT |
1410 | mb_check_buddy(e4b); |
1411 | mb_mark_used_double(e4b, start, len); | |
1412 | ||
1413 | e4b->bd_info->bb_free -= len; | |
1414 | if (e4b->bd_info->bb_first_free == start) | |
1415 | e4b->bd_info->bb_first_free += len; | |
1416 | ||
1417 | /* let's maintain fragments counter */ | |
1418 | if (start != 0) | |
c5e8f3f3 | 1419 | mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); |
c9de560d | 1420 | if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) |
c5e8f3f3 | 1421 | max = !mb_test_bit(start + len, e4b->bd_bitmap); |
c9de560d AT |
1422 | if (mlen && max) |
1423 | e4b->bd_info->bb_fragments++; | |
1424 | else if (!mlen && !max) | |
1425 | e4b->bd_info->bb_fragments--; | |
1426 | ||
1427 | /* let's maintain buddy itself */ | |
1428 | while (len) { | |
1429 | ord = mb_find_order_for_block(e4b, start); | |
1430 | ||
1431 | if (((start >> ord) << ord) == start && len >= (1 << ord)) { | |
1432 | /* the whole chunk may be allocated at once! */ | |
1433 | mlen = 1 << ord; | |
1434 | buddy = mb_find_buddy(e4b, ord, &max); | |
1435 | BUG_ON((start >> ord) >= max); | |
1436 | mb_set_bit(start >> ord, buddy); | |
1437 | e4b->bd_info->bb_counters[ord]--; | |
1438 | start += mlen; | |
1439 | len -= mlen; | |
1440 | BUG_ON(len < 0); | |
1441 | continue; | |
1442 | } | |
1443 | ||
1444 | /* store for history */ | |
1445 | if (ret == 0) | |
1446 | ret = len | (ord << 16); | |
1447 | ||
1448 | /* we have to split large buddy */ | |
1449 | BUG_ON(ord <= 0); | |
1450 | buddy = mb_find_buddy(e4b, ord, &max); | |
1451 | mb_set_bit(start >> ord, buddy); | |
1452 | e4b->bd_info->bb_counters[ord]--; | |
1453 | ||
1454 | ord--; | |
1455 | cur = (start >> ord) & ~1U; | |
1456 | buddy = mb_find_buddy(e4b, ord, &max); | |
1457 | mb_clear_bit(cur, buddy); | |
1458 | mb_clear_bit(cur + 1, buddy); | |
1459 | e4b->bd_info->bb_counters[ord]++; | |
1460 | e4b->bd_info->bb_counters[ord]++; | |
1461 | } | |
8a57d9d6 | 1462 | mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); |
c9de560d | 1463 | |
c5e8f3f3 | 1464 | ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); |
c9de560d AT |
1465 | mb_check_buddy(e4b); |
1466 | ||
1467 | return ret; | |
1468 | } | |
1469 | ||
1470 | /* | |
1471 | * Must be called under group lock! | |
1472 | */ | |
1473 | static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, | |
1474 | struct ext4_buddy *e4b) | |
1475 | { | |
1476 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
1477 | int ret; | |
1478 | ||
1479 | BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); | |
1480 | BUG_ON(ac->ac_status == AC_STATUS_FOUND); | |
1481 | ||
1482 | ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); | |
1483 | ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; | |
1484 | ret = mb_mark_used(e4b, &ac->ac_b_ex); | |
1485 | ||
1486 | /* preallocation can change ac_b_ex, thus we store actually | |
1487 | * allocated blocks for history */ | |
1488 | ac->ac_f_ex = ac->ac_b_ex; | |
1489 | ||
1490 | ac->ac_status = AC_STATUS_FOUND; | |
1491 | ac->ac_tail = ret & 0xffff; | |
1492 | ac->ac_buddy = ret >> 16; | |
1493 | ||
c3a326a6 AK |
1494 | /* |
1495 | * take the page reference. We want the page to be pinned | |
1496 | * so that we don't get a ext4_mb_init_cache_call for this | |
1497 | * group until we update the bitmap. That would mean we | |
1498 | * double allocate blocks. The reference is dropped | |
1499 | * in ext4_mb_release_context | |
1500 | */ | |
c9de560d AT |
1501 | ac->ac_bitmap_page = e4b->bd_bitmap_page; |
1502 | get_page(ac->ac_bitmap_page); | |
1503 | ac->ac_buddy_page = e4b->bd_buddy_page; | |
1504 | get_page(ac->ac_buddy_page); | |
c9de560d | 1505 | /* store last allocated for subsequent stream allocation */ |
4ba74d00 | 1506 | if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { |
c9de560d AT |
1507 | spin_lock(&sbi->s_md_lock); |
1508 | sbi->s_mb_last_group = ac->ac_f_ex.fe_group; | |
1509 | sbi->s_mb_last_start = ac->ac_f_ex.fe_start; | |
1510 | spin_unlock(&sbi->s_md_lock); | |
1511 | } | |
1512 | } | |
1513 | ||
1514 | /* | |
1515 | * regular allocator, for general purposes allocation | |
1516 | */ | |
1517 | ||
1518 | static void ext4_mb_check_limits(struct ext4_allocation_context *ac, | |
1519 | struct ext4_buddy *e4b, | |
1520 | int finish_group) | |
1521 | { | |
1522 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
1523 | struct ext4_free_extent *bex = &ac->ac_b_ex; | |
1524 | struct ext4_free_extent *gex = &ac->ac_g_ex; | |
1525 | struct ext4_free_extent ex; | |
1526 | int max; | |
1527 | ||
032115fc AK |
1528 | if (ac->ac_status == AC_STATUS_FOUND) |
1529 | return; | |
c9de560d AT |
1530 | /* |
1531 | * We don't want to scan for a whole year | |
1532 | */ | |
1533 | if (ac->ac_found > sbi->s_mb_max_to_scan && | |
1534 | !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
1535 | ac->ac_status = AC_STATUS_BREAK; | |
1536 | return; | |
1537 | } | |
1538 | ||
1539 | /* | |
1540 | * Haven't found good chunk so far, let's continue | |
1541 | */ | |
1542 | if (bex->fe_len < gex->fe_len) | |
1543 | return; | |
1544 | ||
1545 | if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) | |
1546 | && bex->fe_group == e4b->bd_group) { | |
1547 | /* recheck chunk's availability - we don't know | |
1548 | * when it was found (within this lock-unlock | |
1549 | * period or not) */ | |
15c006a2 | 1550 | max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); |
c9de560d AT |
1551 | if (max >= gex->fe_len) { |
1552 | ext4_mb_use_best_found(ac, e4b); | |
1553 | return; | |
1554 | } | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | /* | |
1559 | * The routine checks whether found extent is good enough. If it is, | |
1560 | * then the extent gets marked used and flag is set to the context | |
1561 | * to stop scanning. Otherwise, the extent is compared with the | |
1562 | * previous found extent and if new one is better, then it's stored | |
1563 | * in the context. Later, the best found extent will be used, if | |
1564 | * mballoc can't find good enough extent. | |
1565 | * | |
1566 | * FIXME: real allocation policy is to be designed yet! | |
1567 | */ | |
1568 | static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, | |
1569 | struct ext4_free_extent *ex, | |
1570 | struct ext4_buddy *e4b) | |
1571 | { | |
1572 | struct ext4_free_extent *bex = &ac->ac_b_ex; | |
1573 | struct ext4_free_extent *gex = &ac->ac_g_ex; | |
1574 | ||
1575 | BUG_ON(ex->fe_len <= 0); | |
7137d7a4 TT |
1576 | BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); |
1577 | BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); | |
c9de560d AT |
1578 | BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); |
1579 | ||
1580 | ac->ac_found++; | |
1581 | ||
1582 | /* | |
1583 | * The special case - take what you catch first | |
1584 | */ | |
1585 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
1586 | *bex = *ex; | |
1587 | ext4_mb_use_best_found(ac, e4b); | |
1588 | return; | |
1589 | } | |
1590 | ||
1591 | /* | |
1592 | * Let's check whether the chuck is good enough | |
1593 | */ | |
1594 | if (ex->fe_len == gex->fe_len) { | |
1595 | *bex = *ex; | |
1596 | ext4_mb_use_best_found(ac, e4b); | |
1597 | return; | |
1598 | } | |
1599 | ||
1600 | /* | |
1601 | * If this is first found extent, just store it in the context | |
1602 | */ | |
1603 | if (bex->fe_len == 0) { | |
1604 | *bex = *ex; | |
1605 | return; | |
1606 | } | |
1607 | ||
1608 | /* | |
1609 | * If new found extent is better, store it in the context | |
1610 | */ | |
1611 | if (bex->fe_len < gex->fe_len) { | |
1612 | /* if the request isn't satisfied, any found extent | |
1613 | * larger than previous best one is better */ | |
1614 | if (ex->fe_len > bex->fe_len) | |
1615 | *bex = *ex; | |
1616 | } else if (ex->fe_len > gex->fe_len) { | |
1617 | /* if the request is satisfied, then we try to find | |
1618 | * an extent that still satisfy the request, but is | |
1619 | * smaller than previous one */ | |
1620 | if (ex->fe_len < bex->fe_len) | |
1621 | *bex = *ex; | |
1622 | } | |
1623 | ||
1624 | ext4_mb_check_limits(ac, e4b, 0); | |
1625 | } | |
1626 | ||
089ceecc ES |
1627 | static noinline_for_stack |
1628 | int ext4_mb_try_best_found(struct ext4_allocation_context *ac, | |
c9de560d AT |
1629 | struct ext4_buddy *e4b) |
1630 | { | |
1631 | struct ext4_free_extent ex = ac->ac_b_ex; | |
1632 | ext4_group_t group = ex.fe_group; | |
1633 | int max; | |
1634 | int err; | |
1635 | ||
1636 | BUG_ON(ex.fe_len <= 0); | |
1637 | err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); | |
1638 | if (err) | |
1639 | return err; | |
1640 | ||
1641 | ext4_lock_group(ac->ac_sb, group); | |
15c006a2 | 1642 | max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); |
c9de560d AT |
1643 | |
1644 | if (max > 0) { | |
1645 | ac->ac_b_ex = ex; | |
1646 | ext4_mb_use_best_found(ac, e4b); | |
1647 | } | |
1648 | ||
1649 | ext4_unlock_group(ac->ac_sb, group); | |
e39e07fd | 1650 | ext4_mb_unload_buddy(e4b); |
c9de560d AT |
1651 | |
1652 | return 0; | |
1653 | } | |
1654 | ||
089ceecc ES |
1655 | static noinline_for_stack |
1656 | int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |
c9de560d AT |
1657 | struct ext4_buddy *e4b) |
1658 | { | |
1659 | ext4_group_t group = ac->ac_g_ex.fe_group; | |
1660 | int max; | |
1661 | int err; | |
1662 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
838cd0cf | 1663 | struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); |
c9de560d AT |
1664 | struct ext4_free_extent ex; |
1665 | ||
1666 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) | |
1667 | return 0; | |
838cd0cf YY |
1668 | if (grp->bb_free == 0) |
1669 | return 0; | |
c9de560d AT |
1670 | |
1671 | err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); | |
1672 | if (err) | |
1673 | return err; | |
1674 | ||
1675 | ext4_lock_group(ac->ac_sb, group); | |
15c006a2 | 1676 | max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, |
c9de560d AT |
1677 | ac->ac_g_ex.fe_len, &ex); |
1678 | ||
1679 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { | |
1680 | ext4_fsblk_t start; | |
1681 | ||
5661bd68 AM |
1682 | start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + |
1683 | ex.fe_start; | |
c9de560d AT |
1684 | /* use do_div to get remainder (would be 64-bit modulo) */ |
1685 | if (do_div(start, sbi->s_stripe) == 0) { | |
1686 | ac->ac_found++; | |
1687 | ac->ac_b_ex = ex; | |
1688 | ext4_mb_use_best_found(ac, e4b); | |
1689 | } | |
1690 | } else if (max >= ac->ac_g_ex.fe_len) { | |
1691 | BUG_ON(ex.fe_len <= 0); | |
1692 | BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); | |
1693 | BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); | |
1694 | ac->ac_found++; | |
1695 | ac->ac_b_ex = ex; | |
1696 | ext4_mb_use_best_found(ac, e4b); | |
1697 | } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { | |
1698 | /* Sometimes, caller may want to merge even small | |
1699 | * number of blocks to an existing extent */ | |
1700 | BUG_ON(ex.fe_len <= 0); | |
1701 | BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); | |
1702 | BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); | |
1703 | ac->ac_found++; | |
1704 | ac->ac_b_ex = ex; | |
1705 | ext4_mb_use_best_found(ac, e4b); | |
1706 | } | |
1707 | ext4_unlock_group(ac->ac_sb, group); | |
e39e07fd | 1708 | ext4_mb_unload_buddy(e4b); |
c9de560d AT |
1709 | |
1710 | return 0; | |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * The routine scans buddy structures (not bitmap!) from given order | |
1715 | * to max order and tries to find big enough chunk to satisfy the req | |
1716 | */ | |
089ceecc ES |
1717 | static noinline_for_stack |
1718 | void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, | |
c9de560d AT |
1719 | struct ext4_buddy *e4b) |
1720 | { | |
1721 | struct super_block *sb = ac->ac_sb; | |
1722 | struct ext4_group_info *grp = e4b->bd_info; | |
1723 | void *buddy; | |
1724 | int i; | |
1725 | int k; | |
1726 | int max; | |
1727 | ||
1728 | BUG_ON(ac->ac_2order <= 0); | |
1729 | for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { | |
1730 | if (grp->bb_counters[i] == 0) | |
1731 | continue; | |
1732 | ||
1733 | buddy = mb_find_buddy(e4b, i, &max); | |
1734 | BUG_ON(buddy == NULL); | |
1735 | ||
ffad0a44 | 1736 | k = mb_find_next_zero_bit(buddy, max, 0); |
c9de560d AT |
1737 | BUG_ON(k >= max); |
1738 | ||
1739 | ac->ac_found++; | |
1740 | ||
1741 | ac->ac_b_ex.fe_len = 1 << i; | |
1742 | ac->ac_b_ex.fe_start = k << i; | |
1743 | ac->ac_b_ex.fe_group = e4b->bd_group; | |
1744 | ||
1745 | ext4_mb_use_best_found(ac, e4b); | |
1746 | ||
1747 | BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); | |
1748 | ||
1749 | if (EXT4_SB(sb)->s_mb_stats) | |
1750 | atomic_inc(&EXT4_SB(sb)->s_bal_2orders); | |
1751 | ||
1752 | break; | |
1753 | } | |
1754 | } | |
1755 | ||
1756 | /* | |
1757 | * The routine scans the group and measures all found extents. | |
1758 | * In order to optimize scanning, caller must pass number of | |
1759 | * free blocks in the group, so the routine can know upper limit. | |
1760 | */ | |
089ceecc ES |
1761 | static noinline_for_stack |
1762 | void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, | |
c9de560d AT |
1763 | struct ext4_buddy *e4b) |
1764 | { | |
1765 | struct super_block *sb = ac->ac_sb; | |
c5e8f3f3 | 1766 | void *bitmap = e4b->bd_bitmap; |
c9de560d AT |
1767 | struct ext4_free_extent ex; |
1768 | int i; | |
1769 | int free; | |
1770 | ||
1771 | free = e4b->bd_info->bb_free; | |
1772 | BUG_ON(free <= 0); | |
1773 | ||
1774 | i = e4b->bd_info->bb_first_free; | |
1775 | ||
1776 | while (free && ac->ac_status == AC_STATUS_CONTINUE) { | |
ffad0a44 | 1777 | i = mb_find_next_zero_bit(bitmap, |
7137d7a4 TT |
1778 | EXT4_CLUSTERS_PER_GROUP(sb), i); |
1779 | if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { | |
26346ff6 | 1780 | /* |
e56eb659 | 1781 | * IF we have corrupt bitmap, we won't find any |
26346ff6 AK |
1782 | * free blocks even though group info says we |
1783 | * we have free blocks | |
1784 | */ | |
e29136f8 | 1785 | ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, |
53accfa9 | 1786 | "%d free clusters as per " |
fde4d95a | 1787 | "group info. But bitmap says 0", |
26346ff6 | 1788 | free); |
c9de560d AT |
1789 | break; |
1790 | } | |
1791 | ||
15c006a2 | 1792 | mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); |
c9de560d | 1793 | BUG_ON(ex.fe_len <= 0); |
26346ff6 | 1794 | if (free < ex.fe_len) { |
e29136f8 | 1795 | ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, |
53accfa9 | 1796 | "%d free clusters as per " |
fde4d95a | 1797 | "group info. But got %d blocks", |
26346ff6 | 1798 | free, ex.fe_len); |
e56eb659 AK |
1799 | /* |
1800 | * The number of free blocks differs. This mostly | |
1801 | * indicate that the bitmap is corrupt. So exit | |
1802 | * without claiming the space. | |
1803 | */ | |
1804 | break; | |
26346ff6 | 1805 | } |
c9de560d AT |
1806 | |
1807 | ext4_mb_measure_extent(ac, &ex, e4b); | |
1808 | ||
1809 | i += ex.fe_len; | |
1810 | free -= ex.fe_len; | |
1811 | } | |
1812 | ||
1813 | ext4_mb_check_limits(ac, e4b, 1); | |
1814 | } | |
1815 | ||
1816 | /* | |
1817 | * This is a special case for storages like raid5 | |
506bf2d8 | 1818 | * we try to find stripe-aligned chunks for stripe-size-multiple requests |
c9de560d | 1819 | */ |
089ceecc ES |
1820 | static noinline_for_stack |
1821 | void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, | |
c9de560d AT |
1822 | struct ext4_buddy *e4b) |
1823 | { | |
1824 | struct super_block *sb = ac->ac_sb; | |
1825 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
c5e8f3f3 | 1826 | void *bitmap = e4b->bd_bitmap; |
c9de560d AT |
1827 | struct ext4_free_extent ex; |
1828 | ext4_fsblk_t first_group_block; | |
1829 | ext4_fsblk_t a; | |
1830 | ext4_grpblk_t i; | |
1831 | int max; | |
1832 | ||
1833 | BUG_ON(sbi->s_stripe == 0); | |
1834 | ||
1835 | /* find first stripe-aligned block in group */ | |
5661bd68 AM |
1836 | first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); |
1837 | ||
c9de560d AT |
1838 | a = first_group_block + sbi->s_stripe - 1; |
1839 | do_div(a, sbi->s_stripe); | |
1840 | i = (a * sbi->s_stripe) - first_group_block; | |
1841 | ||
7137d7a4 | 1842 | while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { |
c9de560d | 1843 | if (!mb_test_bit(i, bitmap)) { |
15c006a2 | 1844 | max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); |
c9de560d AT |
1845 | if (max >= sbi->s_stripe) { |
1846 | ac->ac_found++; | |
1847 | ac->ac_b_ex = ex; | |
1848 | ext4_mb_use_best_found(ac, e4b); | |
1849 | break; | |
1850 | } | |
1851 | } | |
1852 | i += sbi->s_stripe; | |
1853 | } | |
1854 | } | |
1855 | ||
8a57d9d6 | 1856 | /* This is now called BEFORE we load the buddy bitmap. */ |
c9de560d AT |
1857 | static int ext4_mb_good_group(struct ext4_allocation_context *ac, |
1858 | ext4_group_t group, int cr) | |
1859 | { | |
1860 | unsigned free, fragments; | |
a4912123 | 1861 | int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); |
c9de560d AT |
1862 | struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); |
1863 | ||
1864 | BUG_ON(cr < 0 || cr >= 4); | |
8a57d9d6 | 1865 | |
01fc48e8 TT |
1866 | free = grp->bb_free; |
1867 | if (free == 0) | |
1868 | return 0; | |
1869 | if (cr <= 2 && free < ac->ac_g_ex.fe_len) | |
1870 | return 0; | |
1871 | ||
8a57d9d6 CW |
1872 | /* We only do this if the grp has never been initialized */ |
1873 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { | |
1874 | int ret = ext4_mb_init_group(ac->ac_sb, group); | |
1875 | if (ret) | |
1876 | return 0; | |
1877 | } | |
c9de560d | 1878 | |
c9de560d | 1879 | fragments = grp->bb_fragments; |
c9de560d AT |
1880 | if (fragments == 0) |
1881 | return 0; | |
1882 | ||
1883 | switch (cr) { | |
1884 | case 0: | |
1885 | BUG_ON(ac->ac_2order == 0); | |
c9de560d | 1886 | |
8a57d9d6 CW |
1887 | if (grp->bb_largest_free_order < ac->ac_2order) |
1888 | return 0; | |
1889 | ||
a4912123 TT |
1890 | /* Avoid using the first bg of a flexgroup for data files */ |
1891 | if ((ac->ac_flags & EXT4_MB_HINT_DATA) && | |
1892 | (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && | |
1893 | ((group % flex_size) == 0)) | |
1894 | return 0; | |
1895 | ||
8a57d9d6 | 1896 | return 1; |
c9de560d AT |
1897 | case 1: |
1898 | if ((free / fragments) >= ac->ac_g_ex.fe_len) | |
1899 | return 1; | |
1900 | break; | |
1901 | case 2: | |
1902 | if (free >= ac->ac_g_ex.fe_len) | |
1903 | return 1; | |
1904 | break; | |
1905 | case 3: | |
1906 | return 1; | |
1907 | default: | |
1908 | BUG(); | |
1909 | } | |
1910 | ||
1911 | return 0; | |
1912 | } | |
1913 | ||
4ddfef7b ES |
1914 | static noinline_for_stack int |
1915 | ext4_mb_regular_allocator(struct ext4_allocation_context *ac) | |
c9de560d | 1916 | { |
8df9675f | 1917 | ext4_group_t ngroups, group, i; |
c9de560d AT |
1918 | int cr; |
1919 | int err = 0; | |
c9de560d AT |
1920 | struct ext4_sb_info *sbi; |
1921 | struct super_block *sb; | |
1922 | struct ext4_buddy e4b; | |
c9de560d AT |
1923 | |
1924 | sb = ac->ac_sb; | |
1925 | sbi = EXT4_SB(sb); | |
8df9675f | 1926 | ngroups = ext4_get_groups_count(sb); |
fb0a387d | 1927 | /* non-extent files are limited to low blocks/groups */ |
12e9b892 | 1928 | if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) |
fb0a387d ES |
1929 | ngroups = sbi->s_blockfile_groups; |
1930 | ||
c9de560d AT |
1931 | BUG_ON(ac->ac_status == AC_STATUS_FOUND); |
1932 | ||
1933 | /* first, try the goal */ | |
1934 | err = ext4_mb_find_by_goal(ac, &e4b); | |
1935 | if (err || ac->ac_status == AC_STATUS_FOUND) | |
1936 | goto out; | |
1937 | ||
1938 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) | |
1939 | goto out; | |
1940 | ||
1941 | /* | |
1942 | * ac->ac2_order is set only if the fe_len is a power of 2 | |
1943 | * if ac2_order is set we also set criteria to 0 so that we | |
1944 | * try exact allocation using buddy. | |
1945 | */ | |
1946 | i = fls(ac->ac_g_ex.fe_len); | |
1947 | ac->ac_2order = 0; | |
1948 | /* | |
1949 | * We search using buddy data only if the order of the request | |
1950 | * is greater than equal to the sbi_s_mb_order2_reqs | |
b713a5ec | 1951 | * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req |
c9de560d AT |
1952 | */ |
1953 | if (i >= sbi->s_mb_order2_reqs) { | |
1954 | /* | |
1955 | * This should tell if fe_len is exactly power of 2 | |
1956 | */ | |
1957 | if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) | |
1958 | ac->ac_2order = i - 1; | |
1959 | } | |
1960 | ||
4ba74d00 TT |
1961 | /* if stream allocation is enabled, use global goal */ |
1962 | if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { | |
c9de560d AT |
1963 | /* TBD: may be hot point */ |
1964 | spin_lock(&sbi->s_md_lock); | |
1965 | ac->ac_g_ex.fe_group = sbi->s_mb_last_group; | |
1966 | ac->ac_g_ex.fe_start = sbi->s_mb_last_start; | |
1967 | spin_unlock(&sbi->s_md_lock); | |
1968 | } | |
4ba74d00 | 1969 | |
c9de560d AT |
1970 | /* Let's just scan groups to find more-less suitable blocks */ |
1971 | cr = ac->ac_2order ? 0 : 1; | |
1972 | /* | |
1973 | * cr == 0 try to get exact allocation, | |
1974 | * cr == 3 try to get anything | |
1975 | */ | |
1976 | repeat: | |
1977 | for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { | |
1978 | ac->ac_criteria = cr; | |
ed8f9c75 AK |
1979 | /* |
1980 | * searching for the right group start | |
1981 | * from the goal value specified | |
1982 | */ | |
1983 | group = ac->ac_g_ex.fe_group; | |
1984 | ||
8df9675f | 1985 | for (i = 0; i < ngroups; group++, i++) { |
8df9675f | 1986 | if (group == ngroups) |
c9de560d AT |
1987 | group = 0; |
1988 | ||
8a57d9d6 CW |
1989 | /* This now checks without needing the buddy page */ |
1990 | if (!ext4_mb_good_group(ac, group, cr)) | |
c9de560d AT |
1991 | continue; |
1992 | ||
c9de560d AT |
1993 | err = ext4_mb_load_buddy(sb, group, &e4b); |
1994 | if (err) | |
1995 | goto out; | |
1996 | ||
1997 | ext4_lock_group(sb, group); | |
8a57d9d6 CW |
1998 | |
1999 | /* | |
2000 | * We need to check again after locking the | |
2001 | * block group | |
2002 | */ | |
c9de560d | 2003 | if (!ext4_mb_good_group(ac, group, cr)) { |
c9de560d | 2004 | ext4_unlock_group(sb, group); |
e39e07fd | 2005 | ext4_mb_unload_buddy(&e4b); |
c9de560d AT |
2006 | continue; |
2007 | } | |
2008 | ||
2009 | ac->ac_groups_scanned++; | |
75507efb | 2010 | if (cr == 0) |
c9de560d | 2011 | ext4_mb_simple_scan_group(ac, &e4b); |
506bf2d8 ES |
2012 | else if (cr == 1 && sbi->s_stripe && |
2013 | !(ac->ac_g_ex.fe_len % sbi->s_stripe)) | |
c9de560d AT |
2014 | ext4_mb_scan_aligned(ac, &e4b); |
2015 | else | |
2016 | ext4_mb_complex_scan_group(ac, &e4b); | |
2017 | ||
2018 | ext4_unlock_group(sb, group); | |
e39e07fd | 2019 | ext4_mb_unload_buddy(&e4b); |
c9de560d AT |
2020 | |
2021 | if (ac->ac_status != AC_STATUS_CONTINUE) | |
2022 | break; | |
2023 | } | |
2024 | } | |
2025 | ||
2026 | if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && | |
2027 | !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { | |
2028 | /* | |
2029 | * We've been searching too long. Let's try to allocate | |
2030 | * the best chunk we've found so far | |
2031 | */ | |
2032 | ||
2033 | ext4_mb_try_best_found(ac, &e4b); | |
2034 | if (ac->ac_status != AC_STATUS_FOUND) { | |
2035 | /* | |
2036 | * Someone more lucky has already allocated it. | |
2037 | * The only thing we can do is just take first | |
2038 | * found block(s) | |
2039 | printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); | |
2040 | */ | |
2041 | ac->ac_b_ex.fe_group = 0; | |
2042 | ac->ac_b_ex.fe_start = 0; | |
2043 | ac->ac_b_ex.fe_len = 0; | |
2044 | ac->ac_status = AC_STATUS_CONTINUE; | |
2045 | ac->ac_flags |= EXT4_MB_HINT_FIRST; | |
2046 | cr = 3; | |
2047 | atomic_inc(&sbi->s_mb_lost_chunks); | |
2048 | goto repeat; | |
2049 | } | |
2050 | } | |
2051 | out: | |
2052 | return err; | |
2053 | } | |
2054 | ||
c9de560d AT |
2055 | static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) |
2056 | { | |
2057 | struct super_block *sb = seq->private; | |
c9de560d AT |
2058 | ext4_group_t group; |
2059 | ||
8df9675f | 2060 | if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) |
c9de560d | 2061 | return NULL; |
c9de560d | 2062 | group = *pos + 1; |
a9df9a49 | 2063 | return (void *) ((unsigned long) group); |
c9de560d AT |
2064 | } |
2065 | ||
2066 | static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) | |
2067 | { | |
2068 | struct super_block *sb = seq->private; | |
c9de560d AT |
2069 | ext4_group_t group; |
2070 | ||
2071 | ++*pos; | |
8df9675f | 2072 | if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) |
c9de560d AT |
2073 | return NULL; |
2074 | group = *pos + 1; | |
a9df9a49 | 2075 | return (void *) ((unsigned long) group); |
c9de560d AT |
2076 | } |
2077 | ||
2078 | static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) | |
2079 | { | |
2080 | struct super_block *sb = seq->private; | |
a9df9a49 | 2081 | ext4_group_t group = (ext4_group_t) ((unsigned long) v); |
c9de560d | 2082 | int i; |
1c8457ca | 2083 | int err, buddy_loaded = 0; |
c9de560d | 2084 | struct ext4_buddy e4b; |
1c8457ca | 2085 | struct ext4_group_info *grinfo; |
c9de560d AT |
2086 | struct sg { |
2087 | struct ext4_group_info info; | |
a36b4498 | 2088 | ext4_grpblk_t counters[16]; |
c9de560d AT |
2089 | } sg; |
2090 | ||
2091 | group--; | |
2092 | if (group == 0) | |
2093 | seq_printf(seq, "#%-5s: %-5s %-5s %-5s " | |
2094 | "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " | |
2095 | "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", | |
2096 | "group", "free", "frags", "first", | |
2097 | "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", | |
2098 | "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); | |
2099 | ||
2100 | i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + | |
2101 | sizeof(struct ext4_group_info); | |
1c8457ca AK |
2102 | grinfo = ext4_get_group_info(sb, group); |
2103 | /* Load the group info in memory only if not already loaded. */ | |
2104 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { | |
2105 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
2106 | if (err) { | |
2107 | seq_printf(seq, "#%-5u: I/O error\n", group); | |
2108 | return 0; | |
2109 | } | |
2110 | buddy_loaded = 1; | |
c9de560d | 2111 | } |
1c8457ca | 2112 | |
c9de560d | 2113 | memcpy(&sg, ext4_get_group_info(sb, group), i); |
1c8457ca AK |
2114 | |
2115 | if (buddy_loaded) | |
2116 | ext4_mb_unload_buddy(&e4b); | |
c9de560d | 2117 | |
a9df9a49 | 2118 | seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, |
c9de560d AT |
2119 | sg.info.bb_fragments, sg.info.bb_first_free); |
2120 | for (i = 0; i <= 13; i++) | |
2121 | seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? | |
2122 | sg.info.bb_counters[i] : 0); | |
2123 | seq_printf(seq, " ]\n"); | |
2124 | ||
2125 | return 0; | |
2126 | } | |
2127 | ||
2128 | static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) | |
2129 | { | |
2130 | } | |
2131 | ||
7f1346a9 | 2132 | static const struct seq_operations ext4_mb_seq_groups_ops = { |
c9de560d AT |
2133 | .start = ext4_mb_seq_groups_start, |
2134 | .next = ext4_mb_seq_groups_next, | |
2135 | .stop = ext4_mb_seq_groups_stop, | |
2136 | .show = ext4_mb_seq_groups_show, | |
2137 | }; | |
2138 | ||
2139 | static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) | |
2140 | { | |
2141 | struct super_block *sb = PDE(inode)->data; | |
2142 | int rc; | |
2143 | ||
2144 | rc = seq_open(file, &ext4_mb_seq_groups_ops); | |
2145 | if (rc == 0) { | |
a271fe85 | 2146 | struct seq_file *m = file->private_data; |
c9de560d AT |
2147 | m->private = sb; |
2148 | } | |
2149 | return rc; | |
2150 | ||
2151 | } | |
2152 | ||
7f1346a9 | 2153 | static const struct file_operations ext4_mb_seq_groups_fops = { |
c9de560d AT |
2154 | .owner = THIS_MODULE, |
2155 | .open = ext4_mb_seq_groups_open, | |
2156 | .read = seq_read, | |
2157 | .llseek = seq_lseek, | |
2158 | .release = seq_release, | |
2159 | }; | |
2160 | ||
fb1813f4 CW |
2161 | static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) |
2162 | { | |
2163 | int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | |
2164 | struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; | |
2165 | ||
2166 | BUG_ON(!cachep); | |
2167 | return cachep; | |
2168 | } | |
5f21b0e6 | 2169 | |
28623c2f TT |
2170 | /* |
2171 | * Allocate the top-level s_group_info array for the specified number | |
2172 | * of groups | |
2173 | */ | |
2174 | int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) | |
2175 | { | |
2176 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2177 | unsigned size; | |
2178 | struct ext4_group_info ***new_groupinfo; | |
2179 | ||
2180 | size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> | |
2181 | EXT4_DESC_PER_BLOCK_BITS(sb); | |
2182 | if (size <= sbi->s_group_info_size) | |
2183 | return 0; | |
2184 | ||
2185 | size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); | |
2186 | new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); | |
2187 | if (!new_groupinfo) { | |
2188 | ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); | |
2189 | return -ENOMEM; | |
2190 | } | |
2191 | if (sbi->s_group_info) { | |
2192 | memcpy(new_groupinfo, sbi->s_group_info, | |
2193 | sbi->s_group_info_size * sizeof(*sbi->s_group_info)); | |
2194 | ext4_kvfree(sbi->s_group_info); | |
2195 | } | |
2196 | sbi->s_group_info = new_groupinfo; | |
2197 | sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); | |
2198 | ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", | |
2199 | sbi->s_group_info_size); | |
2200 | return 0; | |
2201 | } | |
2202 | ||
5f21b0e6 | 2203 | /* Create and initialize ext4_group_info data for the given group. */ |
920313a7 | 2204 | int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, |
5f21b0e6 FB |
2205 | struct ext4_group_desc *desc) |
2206 | { | |
fb1813f4 | 2207 | int i; |
5f21b0e6 FB |
2208 | int metalen = 0; |
2209 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2210 | struct ext4_group_info **meta_group_info; | |
fb1813f4 | 2211 | struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); |
5f21b0e6 FB |
2212 | |
2213 | /* | |
2214 | * First check if this group is the first of a reserved block. | |
2215 | * If it's true, we have to allocate a new table of pointers | |
2216 | * to ext4_group_info structures | |
2217 | */ | |
2218 | if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { | |
2219 | metalen = sizeof(*meta_group_info) << | |
2220 | EXT4_DESC_PER_BLOCK_BITS(sb); | |
2221 | meta_group_info = kmalloc(metalen, GFP_KERNEL); | |
2222 | if (meta_group_info == NULL) { | |
7f6a11e7 | 2223 | ext4_msg(sb, KERN_ERR, "can't allocate mem " |
9d8b9ec4 | 2224 | "for a buddy group"); |
5f21b0e6 FB |
2225 | goto exit_meta_group_info; |
2226 | } | |
2227 | sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = | |
2228 | meta_group_info; | |
2229 | } | |
2230 | ||
5f21b0e6 FB |
2231 | meta_group_info = |
2232 | sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; | |
2233 | i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); | |
2234 | ||
85556c9a | 2235 | meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); |
5f21b0e6 | 2236 | if (meta_group_info[i] == NULL) { |
7f6a11e7 | 2237 | ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); |
5f21b0e6 FB |
2238 | goto exit_group_info; |
2239 | } | |
2240 | set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, | |
2241 | &(meta_group_info[i]->bb_state)); | |
2242 | ||
2243 | /* | |
2244 | * initialize bb_free to be able to skip | |
2245 | * empty groups without initialization | |
2246 | */ | |
2247 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | |
2248 | meta_group_info[i]->bb_free = | |
cff1dfd7 | 2249 | ext4_free_clusters_after_init(sb, group, desc); |
5f21b0e6 FB |
2250 | } else { |
2251 | meta_group_info[i]->bb_free = | |
021b65bb | 2252 | ext4_free_group_clusters(sb, desc); |
5f21b0e6 FB |
2253 | } |
2254 | ||
2255 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); | |
920313a7 | 2256 | init_rwsem(&meta_group_info[i]->alloc_sem); |
64e290ec | 2257 | meta_group_info[i]->bb_free_root = RB_ROOT; |
8a57d9d6 | 2258 | meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ |
5f21b0e6 FB |
2259 | |
2260 | #ifdef DOUBLE_CHECK | |
2261 | { | |
2262 | struct buffer_head *bh; | |
2263 | meta_group_info[i]->bb_bitmap = | |
2264 | kmalloc(sb->s_blocksize, GFP_KERNEL); | |
2265 | BUG_ON(meta_group_info[i]->bb_bitmap == NULL); | |
2266 | bh = ext4_read_block_bitmap(sb, group); | |
2267 | BUG_ON(bh == NULL); | |
2268 | memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, | |
2269 | sb->s_blocksize); | |
2270 | put_bh(bh); | |
2271 | } | |
2272 | #endif | |
2273 | ||
2274 | return 0; | |
2275 | ||
2276 | exit_group_info: | |
2277 | /* If a meta_group_info table has been allocated, release it now */ | |
caaf7a29 | 2278 | if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { |
5f21b0e6 | 2279 | kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); |
caaf7a29 TM |
2280 | sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; |
2281 | } | |
5f21b0e6 FB |
2282 | exit_meta_group_info: |
2283 | return -ENOMEM; | |
2284 | } /* ext4_mb_add_groupinfo */ | |
2285 | ||
c9de560d AT |
2286 | static int ext4_mb_init_backend(struct super_block *sb) |
2287 | { | |
8df9675f | 2288 | ext4_group_t ngroups = ext4_get_groups_count(sb); |
c9de560d | 2289 | ext4_group_t i; |
c9de560d | 2290 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
28623c2f | 2291 | int err; |
5f21b0e6 | 2292 | struct ext4_group_desc *desc; |
fb1813f4 | 2293 | struct kmem_cache *cachep; |
5f21b0e6 | 2294 | |
28623c2f TT |
2295 | err = ext4_mb_alloc_groupinfo(sb, ngroups); |
2296 | if (err) | |
2297 | return err; | |
c9de560d | 2298 | |
c9de560d AT |
2299 | sbi->s_buddy_cache = new_inode(sb); |
2300 | if (sbi->s_buddy_cache == NULL) { | |
9d8b9ec4 | 2301 | ext4_msg(sb, KERN_ERR, "can't get new inode"); |
c9de560d AT |
2302 | goto err_freesgi; |
2303 | } | |
48e6061b YJ |
2304 | /* To avoid potentially colliding with an valid on-disk inode number, |
2305 | * use EXT4_BAD_INO for the buddy cache inode number. This inode is | |
2306 | * not in the inode hash, so it should never be found by iget(), but | |
2307 | * this will avoid confusion if it ever shows up during debugging. */ | |
2308 | sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; | |
c9de560d | 2309 | EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; |
8df9675f | 2310 | for (i = 0; i < ngroups; i++) { |
c9de560d AT |
2311 | desc = ext4_get_group_desc(sb, i, NULL); |
2312 | if (desc == NULL) { | |
9d8b9ec4 | 2313 | ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); |
c9de560d AT |
2314 | goto err_freebuddy; |
2315 | } | |
5f21b0e6 FB |
2316 | if (ext4_mb_add_groupinfo(sb, i, desc) != 0) |
2317 | goto err_freebuddy; | |
c9de560d AT |
2318 | } |
2319 | ||
2320 | return 0; | |
2321 | ||
2322 | err_freebuddy: | |
fb1813f4 | 2323 | cachep = get_groupinfo_cache(sb->s_blocksize_bits); |
f1fa3342 | 2324 | while (i-- > 0) |
fb1813f4 | 2325 | kmem_cache_free(cachep, ext4_get_group_info(sb, i)); |
28623c2f | 2326 | i = sbi->s_group_info_size; |
f1fa3342 | 2327 | while (i-- > 0) |
c9de560d AT |
2328 | kfree(sbi->s_group_info[i]); |
2329 | iput(sbi->s_buddy_cache); | |
2330 | err_freesgi: | |
f18a5f21 | 2331 | ext4_kvfree(sbi->s_group_info); |
c9de560d AT |
2332 | return -ENOMEM; |
2333 | } | |
2334 | ||
2892c15d ES |
2335 | static void ext4_groupinfo_destroy_slabs(void) |
2336 | { | |
2337 | int i; | |
2338 | ||
2339 | for (i = 0; i < NR_GRPINFO_CACHES; i++) { | |
2340 | if (ext4_groupinfo_caches[i]) | |
2341 | kmem_cache_destroy(ext4_groupinfo_caches[i]); | |
2342 | ext4_groupinfo_caches[i] = NULL; | |
2343 | } | |
2344 | } | |
2345 | ||
2346 | static int ext4_groupinfo_create_slab(size_t size) | |
2347 | { | |
2348 | static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); | |
2349 | int slab_size; | |
2350 | int blocksize_bits = order_base_2(size); | |
2351 | int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | |
2352 | struct kmem_cache *cachep; | |
2353 | ||
2354 | if (cache_index >= NR_GRPINFO_CACHES) | |
2355 | return -EINVAL; | |
2356 | ||
2357 | if (unlikely(cache_index < 0)) | |
2358 | cache_index = 0; | |
2359 | ||
2360 | mutex_lock(&ext4_grpinfo_slab_create_mutex); | |
2361 | if (ext4_groupinfo_caches[cache_index]) { | |
2362 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); | |
2363 | return 0; /* Already created */ | |
2364 | } | |
2365 | ||
2366 | slab_size = offsetof(struct ext4_group_info, | |
2367 | bb_counters[blocksize_bits + 2]); | |
2368 | ||
2369 | cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], | |
2370 | slab_size, 0, SLAB_RECLAIM_ACCOUNT, | |
2371 | NULL); | |
2372 | ||
823ba01f TM |
2373 | ext4_groupinfo_caches[cache_index] = cachep; |
2374 | ||
2892c15d ES |
2375 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); |
2376 | if (!cachep) { | |
9d8b9ec4 TT |
2377 | printk(KERN_EMERG |
2378 | "EXT4-fs: no memory for groupinfo slab cache\n"); | |
2892c15d ES |
2379 | return -ENOMEM; |
2380 | } | |
2381 | ||
2892c15d ES |
2382 | return 0; |
2383 | } | |
2384 | ||
9d99012f | 2385 | int ext4_mb_init(struct super_block *sb) |
c9de560d AT |
2386 | { |
2387 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
6be2ded1 | 2388 | unsigned i, j; |
c9de560d AT |
2389 | unsigned offset; |
2390 | unsigned max; | |
74767c5a | 2391 | int ret; |
c9de560d | 2392 | |
1927805e | 2393 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); |
c9de560d AT |
2394 | |
2395 | sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); | |
2396 | if (sbi->s_mb_offsets == NULL) { | |
fb1813f4 CW |
2397 | ret = -ENOMEM; |
2398 | goto out; | |
c9de560d | 2399 | } |
ff7ef329 | 2400 | |
1927805e | 2401 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); |
c9de560d AT |
2402 | sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); |
2403 | if (sbi->s_mb_maxs == NULL) { | |
fb1813f4 CW |
2404 | ret = -ENOMEM; |
2405 | goto out; | |
2406 | } | |
2407 | ||
2892c15d ES |
2408 | ret = ext4_groupinfo_create_slab(sb->s_blocksize); |
2409 | if (ret < 0) | |
2410 | goto out; | |
c9de560d AT |
2411 | |
2412 | /* order 0 is regular bitmap */ | |
2413 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; | |
2414 | sbi->s_mb_offsets[0] = 0; | |
2415 | ||
2416 | i = 1; | |
2417 | offset = 0; | |
2418 | max = sb->s_blocksize << 2; | |
2419 | do { | |
2420 | sbi->s_mb_offsets[i] = offset; | |
2421 | sbi->s_mb_maxs[i] = max; | |
2422 | offset += 1 << (sb->s_blocksize_bits - i); | |
2423 | max = max >> 1; | |
2424 | i++; | |
2425 | } while (i <= sb->s_blocksize_bits + 1); | |
2426 | ||
c9de560d | 2427 | spin_lock_init(&sbi->s_md_lock); |
c9de560d AT |
2428 | spin_lock_init(&sbi->s_bal_lock); |
2429 | ||
2430 | sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; | |
2431 | sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; | |
2432 | sbi->s_mb_stats = MB_DEFAULT_STATS; | |
2433 | sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; | |
2434 | sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; | |
27baebb8 TT |
2435 | /* |
2436 | * The default group preallocation is 512, which for 4k block | |
2437 | * sizes translates to 2 megabytes. However for bigalloc file | |
2438 | * systems, this is probably too big (i.e, if the cluster size | |
2439 | * is 1 megabyte, then group preallocation size becomes half a | |
2440 | * gigabyte!). As a default, we will keep a two megabyte | |
2441 | * group pralloc size for cluster sizes up to 64k, and after | |
2442 | * that, we will force a minimum group preallocation size of | |
2443 | * 32 clusters. This translates to 8 megs when the cluster | |
2444 | * size is 256k, and 32 megs when the cluster size is 1 meg, | |
2445 | * which seems reasonable as a default. | |
2446 | */ | |
2447 | sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> | |
2448 | sbi->s_cluster_bits, 32); | |
d7a1fee1 DE |
2449 | /* |
2450 | * If there is a s_stripe > 1, then we set the s_mb_group_prealloc | |
2451 | * to the lowest multiple of s_stripe which is bigger than | |
2452 | * the s_mb_group_prealloc as determined above. We want | |
2453 | * the preallocation size to be an exact multiple of the | |
2454 | * RAID stripe size so that preallocations don't fragment | |
2455 | * the stripes. | |
2456 | */ | |
2457 | if (sbi->s_stripe > 1) { | |
2458 | sbi->s_mb_group_prealloc = roundup( | |
2459 | sbi->s_mb_group_prealloc, sbi->s_stripe); | |
2460 | } | |
c9de560d | 2461 | |
730c213c | 2462 | sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); |
c9de560d | 2463 | if (sbi->s_locality_groups == NULL) { |
fb1813f4 | 2464 | ret = -ENOMEM; |
7aa0baea | 2465 | goto out_free_groupinfo_slab; |
c9de560d | 2466 | } |
730c213c | 2467 | for_each_possible_cpu(i) { |
c9de560d | 2468 | struct ext4_locality_group *lg; |
730c213c | 2469 | lg = per_cpu_ptr(sbi->s_locality_groups, i); |
c9de560d | 2470 | mutex_init(&lg->lg_mutex); |
6be2ded1 AK |
2471 | for (j = 0; j < PREALLOC_TB_SIZE; j++) |
2472 | INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); | |
c9de560d AT |
2473 | spin_lock_init(&lg->lg_prealloc_lock); |
2474 | } | |
2475 | ||
79a77c5a YJ |
2476 | /* init file for buddy data */ |
2477 | ret = ext4_mb_init_backend(sb); | |
7aa0baea TM |
2478 | if (ret != 0) |
2479 | goto out_free_locality_groups; | |
79a77c5a | 2480 | |
296c355c TT |
2481 | if (sbi->s_proc) |
2482 | proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, | |
2483 | &ext4_mb_seq_groups_fops, sb); | |
c9de560d | 2484 | |
7aa0baea TM |
2485 | return 0; |
2486 | ||
2487 | out_free_locality_groups: | |
2488 | free_percpu(sbi->s_locality_groups); | |
2489 | sbi->s_locality_groups = NULL; | |
2490 | out_free_groupinfo_slab: | |
2491 | ext4_groupinfo_destroy_slabs(); | |
fb1813f4 | 2492 | out: |
7aa0baea TM |
2493 | kfree(sbi->s_mb_offsets); |
2494 | sbi->s_mb_offsets = NULL; | |
2495 | kfree(sbi->s_mb_maxs); | |
2496 | sbi->s_mb_maxs = NULL; | |
fb1813f4 | 2497 | return ret; |
c9de560d AT |
2498 | } |
2499 | ||
955ce5f5 | 2500 | /* need to called with the ext4 group lock held */ |
c9de560d AT |
2501 | static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) |
2502 | { | |
2503 | struct ext4_prealloc_space *pa; | |
2504 | struct list_head *cur, *tmp; | |
2505 | int count = 0; | |
2506 | ||
2507 | list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { | |
2508 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); | |
2509 | list_del(&pa->pa_group_list); | |
2510 | count++; | |
688f05a0 | 2511 | kmem_cache_free(ext4_pspace_cachep, pa); |
c9de560d AT |
2512 | } |
2513 | if (count) | |
6ba495e9 | 2514 | mb_debug(1, "mballoc: %u PAs left\n", count); |
c9de560d AT |
2515 | |
2516 | } | |
2517 | ||
2518 | int ext4_mb_release(struct super_block *sb) | |
2519 | { | |
8df9675f | 2520 | ext4_group_t ngroups = ext4_get_groups_count(sb); |
c9de560d AT |
2521 | ext4_group_t i; |
2522 | int num_meta_group_infos; | |
2523 | struct ext4_group_info *grinfo; | |
2524 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
fb1813f4 | 2525 | struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); |
c9de560d | 2526 | |
95599968 SQ |
2527 | if (sbi->s_proc) |
2528 | remove_proc_entry("mb_groups", sbi->s_proc); | |
2529 | ||
c9de560d | 2530 | if (sbi->s_group_info) { |
8df9675f | 2531 | for (i = 0; i < ngroups; i++) { |
c9de560d AT |
2532 | grinfo = ext4_get_group_info(sb, i); |
2533 | #ifdef DOUBLE_CHECK | |
2534 | kfree(grinfo->bb_bitmap); | |
2535 | #endif | |
2536 | ext4_lock_group(sb, i); | |
2537 | ext4_mb_cleanup_pa(grinfo); | |
2538 | ext4_unlock_group(sb, i); | |
fb1813f4 | 2539 | kmem_cache_free(cachep, grinfo); |
c9de560d | 2540 | } |
8df9675f | 2541 | num_meta_group_infos = (ngroups + |
c9de560d AT |
2542 | EXT4_DESC_PER_BLOCK(sb) - 1) >> |
2543 | EXT4_DESC_PER_BLOCK_BITS(sb); | |
2544 | for (i = 0; i < num_meta_group_infos; i++) | |
2545 | kfree(sbi->s_group_info[i]); | |
f18a5f21 | 2546 | ext4_kvfree(sbi->s_group_info); |
c9de560d AT |
2547 | } |
2548 | kfree(sbi->s_mb_offsets); | |
2549 | kfree(sbi->s_mb_maxs); | |
2550 | if (sbi->s_buddy_cache) | |
2551 | iput(sbi->s_buddy_cache); | |
2552 | if (sbi->s_mb_stats) { | |
9d8b9ec4 TT |
2553 | ext4_msg(sb, KERN_INFO, |
2554 | "mballoc: %u blocks %u reqs (%u success)", | |
c9de560d AT |
2555 | atomic_read(&sbi->s_bal_allocated), |
2556 | atomic_read(&sbi->s_bal_reqs), | |
2557 | atomic_read(&sbi->s_bal_success)); | |
9d8b9ec4 TT |
2558 | ext4_msg(sb, KERN_INFO, |
2559 | "mballoc: %u extents scanned, %u goal hits, " | |
2560 | "%u 2^N hits, %u breaks, %u lost", | |
c9de560d AT |
2561 | atomic_read(&sbi->s_bal_ex_scanned), |
2562 | atomic_read(&sbi->s_bal_goals), | |
2563 | atomic_read(&sbi->s_bal_2orders), | |
2564 | atomic_read(&sbi->s_bal_breaks), | |
2565 | atomic_read(&sbi->s_mb_lost_chunks)); | |
9d8b9ec4 TT |
2566 | ext4_msg(sb, KERN_INFO, |
2567 | "mballoc: %lu generated and it took %Lu", | |
ced156e4 | 2568 | sbi->s_mb_buddies_generated, |
c9de560d | 2569 | sbi->s_mb_generation_time); |
9d8b9ec4 TT |
2570 | ext4_msg(sb, KERN_INFO, |
2571 | "mballoc: %u preallocated, %u discarded", | |
c9de560d AT |
2572 | atomic_read(&sbi->s_mb_preallocated), |
2573 | atomic_read(&sbi->s_mb_discarded)); | |
2574 | } | |
2575 | ||
730c213c | 2576 | free_percpu(sbi->s_locality_groups); |
c9de560d AT |
2577 | |
2578 | return 0; | |
2579 | } | |
2580 | ||
77ca6cdf | 2581 | static inline int ext4_issue_discard(struct super_block *sb, |
84130193 | 2582 | ext4_group_t block_group, ext4_grpblk_t cluster, int count) |
5c521830 | 2583 | { |
5c521830 JZ |
2584 | ext4_fsblk_t discard_block; |
2585 | ||
84130193 TT |
2586 | discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + |
2587 | ext4_group_first_block_no(sb, block_group)); | |
2588 | count = EXT4_C2B(EXT4_SB(sb), count); | |
5c521830 JZ |
2589 | trace_ext4_discard_blocks(sb, |
2590 | (unsigned long long) discard_block, count); | |
93259636 | 2591 | return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); |
5c521830 JZ |
2592 | } |
2593 | ||
3e624fc7 TT |
2594 | /* |
2595 | * This function is called by the jbd2 layer once the commit has finished, | |
2596 | * so we know we can free the blocks that were released with that commit. | |
2597 | */ | |
18aadd47 BJ |
2598 | static void ext4_free_data_callback(struct super_block *sb, |
2599 | struct ext4_journal_cb_entry *jce, | |
2600 | int rc) | |
c9de560d | 2601 | { |
18aadd47 | 2602 | struct ext4_free_data *entry = (struct ext4_free_data *)jce; |
c9de560d | 2603 | struct ext4_buddy e4b; |
c894058d | 2604 | struct ext4_group_info *db; |
d9f34504 | 2605 | int err, count = 0, count2 = 0; |
c9de560d | 2606 | |
18aadd47 BJ |
2607 | mb_debug(1, "gonna free %u blocks in group %u (0x%p):", |
2608 | entry->efd_count, entry->efd_group, entry); | |
c9de560d | 2609 | |
18aadd47 BJ |
2610 | if (test_opt(sb, DISCARD)) |
2611 | ext4_issue_discard(sb, entry->efd_group, | |
2612 | entry->efd_start_cluster, entry->efd_count); | |
c9de560d | 2613 | |
18aadd47 BJ |
2614 | err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); |
2615 | /* we expect to find existing buddy because it's pinned */ | |
2616 | BUG_ON(err != 0); | |
b90f6870 | 2617 | |
c9de560d | 2618 | |
18aadd47 BJ |
2619 | db = e4b.bd_info; |
2620 | /* there are blocks to put in buddy to make them really free */ | |
2621 | count += entry->efd_count; | |
2622 | count2++; | |
2623 | ext4_lock_group(sb, entry->efd_group); | |
2624 | /* Take it out of per group rb tree */ | |
2625 | rb_erase(&entry->efd_node, &(db->bb_free_root)); | |
2626 | mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); | |
c894058d | 2627 | |
18aadd47 BJ |
2628 | /* |
2629 | * Clear the trimmed flag for the group so that the next | |
2630 | * ext4_trim_fs can trim it. | |
2631 | * If the volume is mounted with -o discard, online discard | |
2632 | * is supported and the free blocks will be trimmed online. | |
2633 | */ | |
2634 | if (!test_opt(sb, DISCARD)) | |
2635 | EXT4_MB_GRP_CLEAR_TRIMMED(db); | |
3d56b8d2 | 2636 | |
18aadd47 BJ |
2637 | if (!db->bb_free_root.rb_node) { |
2638 | /* No more items in the per group rb tree | |
2639 | * balance refcounts from ext4_mb_free_metadata() | |
2640 | */ | |
2641 | page_cache_release(e4b.bd_buddy_page); | |
2642 | page_cache_release(e4b.bd_bitmap_page); | |
3e624fc7 | 2643 | } |
18aadd47 BJ |
2644 | ext4_unlock_group(sb, entry->efd_group); |
2645 | kmem_cache_free(ext4_free_data_cachep, entry); | |
2646 | ext4_mb_unload_buddy(&e4b); | |
c9de560d | 2647 | |
6ba495e9 | 2648 | mb_debug(1, "freed %u blocks in %u structures\n", count, count2); |
c9de560d AT |
2649 | } |
2650 | ||
6ba495e9 TT |
2651 | #ifdef CONFIG_EXT4_DEBUG |
2652 | u8 mb_enable_debug __read_mostly; | |
2653 | ||
2654 | static struct dentry *debugfs_dir; | |
2655 | static struct dentry *debugfs_debug; | |
2656 | ||
2657 | static void __init ext4_create_debugfs_entry(void) | |
2658 | { | |
2659 | debugfs_dir = debugfs_create_dir("ext4", NULL); | |
2660 | if (debugfs_dir) | |
2661 | debugfs_debug = debugfs_create_u8("mballoc-debug", | |
2662 | S_IRUGO | S_IWUSR, | |
2663 | debugfs_dir, | |
2664 | &mb_enable_debug); | |
2665 | } | |
2666 | ||
2667 | static void ext4_remove_debugfs_entry(void) | |
2668 | { | |
2669 | debugfs_remove(debugfs_debug); | |
2670 | debugfs_remove(debugfs_dir); | |
2671 | } | |
2672 | ||
2673 | #else | |
2674 | ||
2675 | static void __init ext4_create_debugfs_entry(void) | |
2676 | { | |
2677 | } | |
2678 | ||
2679 | static void ext4_remove_debugfs_entry(void) | |
2680 | { | |
2681 | } | |
2682 | ||
2683 | #endif | |
2684 | ||
5dabfc78 | 2685 | int __init ext4_init_mballoc(void) |
c9de560d | 2686 | { |
16828088 TT |
2687 | ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, |
2688 | SLAB_RECLAIM_ACCOUNT); | |
c9de560d AT |
2689 | if (ext4_pspace_cachep == NULL) |
2690 | return -ENOMEM; | |
2691 | ||
16828088 TT |
2692 | ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, |
2693 | SLAB_RECLAIM_ACCOUNT); | |
256bdb49 ES |
2694 | if (ext4_ac_cachep == NULL) { |
2695 | kmem_cache_destroy(ext4_pspace_cachep); | |
2696 | return -ENOMEM; | |
2697 | } | |
c894058d | 2698 | |
18aadd47 BJ |
2699 | ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, |
2700 | SLAB_RECLAIM_ACCOUNT); | |
2701 | if (ext4_free_data_cachep == NULL) { | |
c894058d AK |
2702 | kmem_cache_destroy(ext4_pspace_cachep); |
2703 | kmem_cache_destroy(ext4_ac_cachep); | |
2704 | return -ENOMEM; | |
2705 | } | |
6ba495e9 | 2706 | ext4_create_debugfs_entry(); |
c9de560d AT |
2707 | return 0; |
2708 | } | |
2709 | ||
5dabfc78 | 2710 | void ext4_exit_mballoc(void) |
c9de560d | 2711 | { |
60e6679e | 2712 | /* |
3e03f9ca JDB |
2713 | * Wait for completion of call_rcu()'s on ext4_pspace_cachep |
2714 | * before destroying the slab cache. | |
2715 | */ | |
2716 | rcu_barrier(); | |
c9de560d | 2717 | kmem_cache_destroy(ext4_pspace_cachep); |
256bdb49 | 2718 | kmem_cache_destroy(ext4_ac_cachep); |
18aadd47 | 2719 | kmem_cache_destroy(ext4_free_data_cachep); |
2892c15d | 2720 | ext4_groupinfo_destroy_slabs(); |
6ba495e9 | 2721 | ext4_remove_debugfs_entry(); |
c9de560d AT |
2722 | } |
2723 | ||
2724 | ||
2725 | /* | |
73b2c716 | 2726 | * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps |
c9de560d AT |
2727 | * Returns 0 if success or error code |
2728 | */ | |
4ddfef7b ES |
2729 | static noinline_for_stack int |
2730 | ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |
53accfa9 | 2731 | handle_t *handle, unsigned int reserv_clstrs) |
c9de560d AT |
2732 | { |
2733 | struct buffer_head *bitmap_bh = NULL; | |
c9de560d AT |
2734 | struct ext4_group_desc *gdp; |
2735 | struct buffer_head *gdp_bh; | |
2736 | struct ext4_sb_info *sbi; | |
2737 | struct super_block *sb; | |
2738 | ext4_fsblk_t block; | |
519deca0 | 2739 | int err, len; |
c9de560d AT |
2740 | |
2741 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
2742 | BUG_ON(ac->ac_b_ex.fe_len <= 0); | |
2743 | ||
2744 | sb = ac->ac_sb; | |
2745 | sbi = EXT4_SB(sb); | |
c9de560d AT |
2746 | |
2747 | err = -EIO; | |
574ca174 | 2748 | bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); |
c9de560d AT |
2749 | if (!bitmap_bh) |
2750 | goto out_err; | |
2751 | ||
2752 | err = ext4_journal_get_write_access(handle, bitmap_bh); | |
2753 | if (err) | |
2754 | goto out_err; | |
2755 | ||
2756 | err = -EIO; | |
2757 | gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); | |
2758 | if (!gdp) | |
2759 | goto out_err; | |
2760 | ||
a9df9a49 | 2761 | ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, |
021b65bb | 2762 | ext4_free_group_clusters(sb, gdp)); |
03cddb80 | 2763 | |
c9de560d AT |
2764 | err = ext4_journal_get_write_access(handle, gdp_bh); |
2765 | if (err) | |
2766 | goto out_err; | |
2767 | ||
bda00de7 | 2768 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); |
c9de560d | 2769 | |
53accfa9 | 2770 | len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); |
6fd058f7 | 2771 | if (!ext4_data_block_valid(sbi, block, len)) { |
12062ddd | 2772 | ext4_error(sb, "Allocating blocks %llu-%llu which overlap " |
1084f252 | 2773 | "fs metadata", block, block+len); |
519deca0 AK |
2774 | /* File system mounted not to panic on error |
2775 | * Fix the bitmap and repeat the block allocation | |
2776 | * We leak some of the blocks here. | |
2777 | */ | |
955ce5f5 | 2778 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); |
c3e94d1d YY |
2779 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
2780 | ac->ac_b_ex.fe_len); | |
955ce5f5 | 2781 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); |
0390131b | 2782 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
519deca0 AK |
2783 | if (!err) |
2784 | err = -EAGAIN; | |
2785 | goto out_err; | |
c9de560d | 2786 | } |
955ce5f5 AK |
2787 | |
2788 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | |
c9de560d AT |
2789 | #ifdef AGGRESSIVE_CHECK |
2790 | { | |
2791 | int i; | |
2792 | for (i = 0; i < ac->ac_b_ex.fe_len; i++) { | |
2793 | BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, | |
2794 | bitmap_bh->b_data)); | |
2795 | } | |
2796 | } | |
2797 | #endif | |
c3e94d1d YY |
2798 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
2799 | ac->ac_b_ex.fe_len); | |
c9de560d AT |
2800 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
2801 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | |
021b65bb | 2802 | ext4_free_group_clusters_set(sb, gdp, |
cff1dfd7 | 2803 | ext4_free_clusters_after_init(sb, |
021b65bb | 2804 | ac->ac_b_ex.fe_group, gdp)); |
c9de560d | 2805 | } |
021b65bb TT |
2806 | len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; |
2807 | ext4_free_group_clusters_set(sb, gdp, len); | |
fa77dcfa DW |
2808 | ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh, |
2809 | EXT4_BLOCKS_PER_GROUP(sb) / 8); | |
feb0ab32 | 2810 | ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); |
955ce5f5 AK |
2811 | |
2812 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | |
57042651 | 2813 | percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); |
d2a17637 | 2814 | /* |
6bc6e63f | 2815 | * Now reduce the dirty block count also. Should not go negative |
d2a17637 | 2816 | */ |
6bc6e63f AK |
2817 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
2818 | /* release all the reserved blocks if non delalloc */ | |
57042651 TT |
2819 | percpu_counter_sub(&sbi->s_dirtyclusters_counter, |
2820 | reserv_clstrs); | |
c9de560d | 2821 | |
772cb7c8 JS |
2822 | if (sbi->s_log_groups_per_flex) { |
2823 | ext4_group_t flex_group = ext4_flex_group(sbi, | |
2824 | ac->ac_b_ex.fe_group); | |
9f24e420 | 2825 | atomic_sub(ac->ac_b_ex.fe_len, |
24aaa8ef | 2826 | &sbi->s_flex_groups[flex_group].free_clusters); |
772cb7c8 JS |
2827 | } |
2828 | ||
0390131b | 2829 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
c9de560d AT |
2830 | if (err) |
2831 | goto out_err; | |
0390131b | 2832 | err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); |
c9de560d AT |
2833 | |
2834 | out_err: | |
42a10add | 2835 | brelse(bitmap_bh); |
c9de560d AT |
2836 | return err; |
2837 | } | |
2838 | ||
2839 | /* | |
2840 | * here we normalize request for locality group | |
d7a1fee1 DE |
2841 | * Group request are normalized to s_mb_group_prealloc, which goes to |
2842 | * s_strip if we set the same via mount option. | |
2843 | * s_mb_group_prealloc can be configured via | |
b713a5ec | 2844 | * /sys/fs/ext4/<partition>/mb_group_prealloc |
c9de560d AT |
2845 | * |
2846 | * XXX: should we try to preallocate more than the group has now? | |
2847 | */ | |
2848 | static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) | |
2849 | { | |
2850 | struct super_block *sb = ac->ac_sb; | |
2851 | struct ext4_locality_group *lg = ac->ac_lg; | |
2852 | ||
2853 | BUG_ON(lg == NULL); | |
d7a1fee1 | 2854 | ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; |
6ba495e9 | 2855 | mb_debug(1, "#%u: goal %u blocks for locality group\n", |
c9de560d AT |
2856 | current->pid, ac->ac_g_ex.fe_len); |
2857 | } | |
2858 | ||
2859 | /* | |
2860 | * Normalization means making request better in terms of | |
2861 | * size and alignment | |
2862 | */ | |
4ddfef7b ES |
2863 | static noinline_for_stack void |
2864 | ext4_mb_normalize_request(struct ext4_allocation_context *ac, | |
c9de560d AT |
2865 | struct ext4_allocation_request *ar) |
2866 | { | |
53accfa9 | 2867 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
c9de560d AT |
2868 | int bsbits, max; |
2869 | ext4_lblk_t end; | |
1592d2c5 CW |
2870 | loff_t size, start_off; |
2871 | loff_t orig_size __maybe_unused; | |
5a0790c2 | 2872 | ext4_lblk_t start; |
c9de560d | 2873 | struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); |
9a0762c5 | 2874 | struct ext4_prealloc_space *pa; |
c9de560d AT |
2875 | |
2876 | /* do normalize only data requests, metadata requests | |
2877 | do not need preallocation */ | |
2878 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
2879 | return; | |
2880 | ||
2881 | /* sometime caller may want exact blocks */ | |
2882 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) | |
2883 | return; | |
2884 | ||
2885 | /* caller may indicate that preallocation isn't | |
2886 | * required (it's a tail, for example) */ | |
2887 | if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) | |
2888 | return; | |
2889 | ||
2890 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { | |
2891 | ext4_mb_normalize_group_request(ac); | |
2892 | return ; | |
2893 | } | |
2894 | ||
2895 | bsbits = ac->ac_sb->s_blocksize_bits; | |
2896 | ||
2897 | /* first, let's learn actual file size | |
2898 | * given current request is allocated */ | |
53accfa9 | 2899 | size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); |
c9de560d AT |
2900 | size = size << bsbits; |
2901 | if (size < i_size_read(ac->ac_inode)) | |
2902 | size = i_size_read(ac->ac_inode); | |
5a0790c2 | 2903 | orig_size = size; |
c9de560d | 2904 | |
1930479c VC |
2905 | /* max size of free chunks */ |
2906 | max = 2 << bsbits; | |
c9de560d | 2907 | |
1930479c VC |
2908 | #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ |
2909 | (req <= (size) || max <= (chunk_size)) | |
c9de560d AT |
2910 | |
2911 | /* first, try to predict filesize */ | |
2912 | /* XXX: should this table be tunable? */ | |
2913 | start_off = 0; | |
2914 | if (size <= 16 * 1024) { | |
2915 | size = 16 * 1024; | |
2916 | } else if (size <= 32 * 1024) { | |
2917 | size = 32 * 1024; | |
2918 | } else if (size <= 64 * 1024) { | |
2919 | size = 64 * 1024; | |
2920 | } else if (size <= 128 * 1024) { | |
2921 | size = 128 * 1024; | |
2922 | } else if (size <= 256 * 1024) { | |
2923 | size = 256 * 1024; | |
2924 | } else if (size <= 512 * 1024) { | |
2925 | size = 512 * 1024; | |
2926 | } else if (size <= 1024 * 1024) { | |
2927 | size = 1024 * 1024; | |
1930479c | 2928 | } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { |
c9de560d | 2929 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
1930479c VC |
2930 | (21 - bsbits)) << 21; |
2931 | size = 2 * 1024 * 1024; | |
2932 | } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { | |
c9de560d AT |
2933 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
2934 | (22 - bsbits)) << 22; | |
2935 | size = 4 * 1024 * 1024; | |
2936 | } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, | |
1930479c | 2937 | (8<<20)>>bsbits, max, 8 * 1024)) { |
c9de560d AT |
2938 | start_off = ((loff_t)ac->ac_o_ex.fe_logical >> |
2939 | (23 - bsbits)) << 23; | |
2940 | size = 8 * 1024 * 1024; | |
2941 | } else { | |
2942 | start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; | |
2943 | size = ac->ac_o_ex.fe_len << bsbits; | |
2944 | } | |
5a0790c2 AK |
2945 | size = size >> bsbits; |
2946 | start = start_off >> bsbits; | |
c9de560d AT |
2947 | |
2948 | /* don't cover already allocated blocks in selected range */ | |
2949 | if (ar->pleft && start <= ar->lleft) { | |
2950 | size -= ar->lleft + 1 - start; | |
2951 | start = ar->lleft + 1; | |
2952 | } | |
2953 | if (ar->pright && start + size - 1 >= ar->lright) | |
2954 | size -= start + size - ar->lright; | |
2955 | ||
2956 | end = start + size; | |
2957 | ||
2958 | /* check we don't cross already preallocated blocks */ | |
2959 | rcu_read_lock(); | |
9a0762c5 | 2960 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
498e5f24 | 2961 | ext4_lblk_t pa_end; |
c9de560d | 2962 | |
c9de560d AT |
2963 | if (pa->pa_deleted) |
2964 | continue; | |
2965 | spin_lock(&pa->pa_lock); | |
2966 | if (pa->pa_deleted) { | |
2967 | spin_unlock(&pa->pa_lock); | |
2968 | continue; | |
2969 | } | |
2970 | ||
53accfa9 TT |
2971 | pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), |
2972 | pa->pa_len); | |
c9de560d AT |
2973 | |
2974 | /* PA must not overlap original request */ | |
2975 | BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || | |
2976 | ac->ac_o_ex.fe_logical < pa->pa_lstart)); | |
2977 | ||
38877f4e ES |
2978 | /* skip PAs this normalized request doesn't overlap with */ |
2979 | if (pa->pa_lstart >= end || pa_end <= start) { | |
c9de560d AT |
2980 | spin_unlock(&pa->pa_lock); |
2981 | continue; | |
2982 | } | |
2983 | BUG_ON(pa->pa_lstart <= start && pa_end >= end); | |
2984 | ||
38877f4e | 2985 | /* adjust start or end to be adjacent to this pa */ |
c9de560d AT |
2986 | if (pa_end <= ac->ac_o_ex.fe_logical) { |
2987 | BUG_ON(pa_end < start); | |
2988 | start = pa_end; | |
38877f4e | 2989 | } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { |
c9de560d AT |
2990 | BUG_ON(pa->pa_lstart > end); |
2991 | end = pa->pa_lstart; | |
2992 | } | |
2993 | spin_unlock(&pa->pa_lock); | |
2994 | } | |
2995 | rcu_read_unlock(); | |
2996 | size = end - start; | |
2997 | ||
2998 | /* XXX: extra loop to check we really don't overlap preallocations */ | |
2999 | rcu_read_lock(); | |
9a0762c5 | 3000 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
498e5f24 | 3001 | ext4_lblk_t pa_end; |
53accfa9 | 3002 | |
c9de560d AT |
3003 | spin_lock(&pa->pa_lock); |
3004 | if (pa->pa_deleted == 0) { | |
53accfa9 TT |
3005 | pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), |
3006 | pa->pa_len); | |
c9de560d AT |
3007 | BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); |
3008 | } | |
3009 | spin_unlock(&pa->pa_lock); | |
3010 | } | |
3011 | rcu_read_unlock(); | |
3012 | ||
3013 | if (start + size <= ac->ac_o_ex.fe_logical && | |
3014 | start > ac->ac_o_ex.fe_logical) { | |
9d8b9ec4 TT |
3015 | ext4_msg(ac->ac_sb, KERN_ERR, |
3016 | "start %lu, size %lu, fe_logical %lu", | |
3017 | (unsigned long) start, (unsigned long) size, | |
3018 | (unsigned long) ac->ac_o_ex.fe_logical); | |
c9de560d AT |
3019 | } |
3020 | BUG_ON(start + size <= ac->ac_o_ex.fe_logical && | |
3021 | start > ac->ac_o_ex.fe_logical); | |
7137d7a4 | 3022 | BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); |
c9de560d AT |
3023 | |
3024 | /* now prepare goal request */ | |
3025 | ||
3026 | /* XXX: is it better to align blocks WRT to logical | |
3027 | * placement or satisfy big request as is */ | |
3028 | ac->ac_g_ex.fe_logical = start; | |
53accfa9 | 3029 | ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); |
c9de560d AT |
3030 | |
3031 | /* define goal start in order to merge */ | |
3032 | if (ar->pright && (ar->lright == (start + size))) { | |
3033 | /* merge to the right */ | |
3034 | ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, | |
3035 | &ac->ac_f_ex.fe_group, | |
3036 | &ac->ac_f_ex.fe_start); | |
3037 | ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; | |
3038 | } | |
3039 | if (ar->pleft && (ar->lleft + 1 == start)) { | |
3040 | /* merge to the left */ | |
3041 | ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, | |
3042 | &ac->ac_f_ex.fe_group, | |
3043 | &ac->ac_f_ex.fe_start); | |
3044 | ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; | |
3045 | } | |
3046 | ||
6ba495e9 | 3047 | mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, |
c9de560d AT |
3048 | (unsigned) orig_size, (unsigned) start); |
3049 | } | |
3050 | ||
3051 | static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) | |
3052 | { | |
3053 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
3054 | ||
3055 | if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { | |
3056 | atomic_inc(&sbi->s_bal_reqs); | |
3057 | atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); | |
291dae47 | 3058 | if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) |
c9de560d AT |
3059 | atomic_inc(&sbi->s_bal_success); |
3060 | atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); | |
3061 | if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && | |
3062 | ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) | |
3063 | atomic_inc(&sbi->s_bal_goals); | |
3064 | if (ac->ac_found > sbi->s_mb_max_to_scan) | |
3065 | atomic_inc(&sbi->s_bal_breaks); | |
3066 | } | |
3067 | ||
296c355c TT |
3068 | if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) |
3069 | trace_ext4_mballoc_alloc(ac); | |
3070 | else | |
3071 | trace_ext4_mballoc_prealloc(ac); | |
c9de560d AT |
3072 | } |
3073 | ||
b844167e CW |
3074 | /* |
3075 | * Called on failure; free up any blocks from the inode PA for this | |
3076 | * context. We don't need this for MB_GROUP_PA because we only change | |
3077 | * pa_free in ext4_mb_release_context(), but on failure, we've already | |
3078 | * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. | |
3079 | */ | |
3080 | static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) | |
3081 | { | |
3082 | struct ext4_prealloc_space *pa = ac->ac_pa; | |
b844167e | 3083 | |
400db9d3 ZL |
3084 | if (pa && pa->pa_type == MB_INODE_PA) |
3085 | pa->pa_free += ac->ac_b_ex.fe_len; | |
b844167e CW |
3086 | } |
3087 | ||
c9de560d AT |
3088 | /* |
3089 | * use blocks preallocated to inode | |
3090 | */ | |
3091 | static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, | |
3092 | struct ext4_prealloc_space *pa) | |
3093 | { | |
53accfa9 | 3094 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
c9de560d AT |
3095 | ext4_fsblk_t start; |
3096 | ext4_fsblk_t end; | |
3097 | int len; | |
3098 | ||
3099 | /* found preallocated blocks, use them */ | |
3100 | start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); | |
53accfa9 TT |
3101 | end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), |
3102 | start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); | |
3103 | len = EXT4_NUM_B2C(sbi, end - start); | |
c9de560d AT |
3104 | ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, |
3105 | &ac->ac_b_ex.fe_start); | |
3106 | ac->ac_b_ex.fe_len = len; | |
3107 | ac->ac_status = AC_STATUS_FOUND; | |
3108 | ac->ac_pa = pa; | |
3109 | ||
3110 | BUG_ON(start < pa->pa_pstart); | |
53accfa9 | 3111 | BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); |
c9de560d AT |
3112 | BUG_ON(pa->pa_free < len); |
3113 | pa->pa_free -= len; | |
3114 | ||
6ba495e9 | 3115 | mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); |
c9de560d AT |
3116 | } |
3117 | ||
3118 | /* | |
3119 | * use blocks preallocated to locality group | |
3120 | */ | |
3121 | static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, | |
3122 | struct ext4_prealloc_space *pa) | |
3123 | { | |
03cddb80 | 3124 | unsigned int len = ac->ac_o_ex.fe_len; |
6be2ded1 | 3125 | |
c9de560d AT |
3126 | ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, |
3127 | &ac->ac_b_ex.fe_group, | |
3128 | &ac->ac_b_ex.fe_start); | |
3129 | ac->ac_b_ex.fe_len = len; | |
3130 | ac->ac_status = AC_STATUS_FOUND; | |
3131 | ac->ac_pa = pa; | |
3132 | ||
3133 | /* we don't correct pa_pstart or pa_plen here to avoid | |
26346ff6 | 3134 | * possible race when the group is being loaded concurrently |
c9de560d | 3135 | * instead we correct pa later, after blocks are marked |
26346ff6 AK |
3136 | * in on-disk bitmap -- see ext4_mb_release_context() |
3137 | * Other CPUs are prevented from allocating from this pa by lg_mutex | |
c9de560d | 3138 | */ |
6ba495e9 | 3139 | mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); |
c9de560d AT |
3140 | } |
3141 | ||
5e745b04 AK |
3142 | /* |
3143 | * Return the prealloc space that have minimal distance | |
3144 | * from the goal block. @cpa is the prealloc | |
3145 | * space that is having currently known minimal distance | |
3146 | * from the goal block. | |
3147 | */ | |
3148 | static struct ext4_prealloc_space * | |
3149 | ext4_mb_check_group_pa(ext4_fsblk_t goal_block, | |
3150 | struct ext4_prealloc_space *pa, | |
3151 | struct ext4_prealloc_space *cpa) | |
3152 | { | |
3153 | ext4_fsblk_t cur_distance, new_distance; | |
3154 | ||
3155 | if (cpa == NULL) { | |
3156 | atomic_inc(&pa->pa_count); | |
3157 | return pa; | |
3158 | } | |
3159 | cur_distance = abs(goal_block - cpa->pa_pstart); | |
3160 | new_distance = abs(goal_block - pa->pa_pstart); | |
3161 | ||
5a54b2f1 | 3162 | if (cur_distance <= new_distance) |
5e745b04 AK |
3163 | return cpa; |
3164 | ||
3165 | /* drop the previous reference */ | |
3166 | atomic_dec(&cpa->pa_count); | |
3167 | atomic_inc(&pa->pa_count); | |
3168 | return pa; | |
3169 | } | |
3170 | ||
c9de560d AT |
3171 | /* |
3172 | * search goal blocks in preallocated space | |
3173 | */ | |
4ddfef7b ES |
3174 | static noinline_for_stack int |
3175 | ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |
c9de560d | 3176 | { |
53accfa9 | 3177 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
6be2ded1 | 3178 | int order, i; |
c9de560d AT |
3179 | struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); |
3180 | struct ext4_locality_group *lg; | |
5e745b04 AK |
3181 | struct ext4_prealloc_space *pa, *cpa = NULL; |
3182 | ext4_fsblk_t goal_block; | |
c9de560d AT |
3183 | |
3184 | /* only data can be preallocated */ | |
3185 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
3186 | return 0; | |
3187 | ||
3188 | /* first, try per-file preallocation */ | |
3189 | rcu_read_lock(); | |
9a0762c5 | 3190 | list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { |
c9de560d AT |
3191 | |
3192 | /* all fields in this condition don't change, | |
3193 | * so we can skip locking for them */ | |
3194 | if (ac->ac_o_ex.fe_logical < pa->pa_lstart || | |
53accfa9 TT |
3195 | ac->ac_o_ex.fe_logical >= (pa->pa_lstart + |
3196 | EXT4_C2B(sbi, pa->pa_len))) | |
c9de560d AT |
3197 | continue; |
3198 | ||
fb0a387d | 3199 | /* non-extent files can't have physical blocks past 2^32 */ |
12e9b892 | 3200 | if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && |
53accfa9 TT |
3201 | (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > |
3202 | EXT4_MAX_BLOCK_FILE_PHYS)) | |
fb0a387d ES |
3203 | continue; |
3204 | ||
c9de560d AT |
3205 | /* found preallocated blocks, use them */ |
3206 | spin_lock(&pa->pa_lock); | |
3207 | if (pa->pa_deleted == 0 && pa->pa_free) { | |
3208 | atomic_inc(&pa->pa_count); | |
3209 | ext4_mb_use_inode_pa(ac, pa); | |
3210 | spin_unlock(&pa->pa_lock); | |
3211 | ac->ac_criteria = 10; | |
3212 | rcu_read_unlock(); | |
3213 | return 1; | |
3214 | } | |
3215 | spin_unlock(&pa->pa_lock); | |
3216 | } | |
3217 | rcu_read_unlock(); | |
3218 | ||
3219 | /* can we use group allocation? */ | |
3220 | if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) | |
3221 | return 0; | |
3222 | ||
3223 | /* inode may have no locality group for some reason */ | |
3224 | lg = ac->ac_lg; | |
3225 | if (lg == NULL) | |
3226 | return 0; | |
6be2ded1 AK |
3227 | order = fls(ac->ac_o_ex.fe_len) - 1; |
3228 | if (order > PREALLOC_TB_SIZE - 1) | |
3229 | /* The max size of hash table is PREALLOC_TB_SIZE */ | |
3230 | order = PREALLOC_TB_SIZE - 1; | |
3231 | ||
bda00de7 | 3232 | goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); |
5e745b04 AK |
3233 | /* |
3234 | * search for the prealloc space that is having | |
3235 | * minimal distance from the goal block. | |
3236 | */ | |
6be2ded1 AK |
3237 | for (i = order; i < PREALLOC_TB_SIZE; i++) { |
3238 | rcu_read_lock(); | |
3239 | list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], | |
3240 | pa_inode_list) { | |
3241 | spin_lock(&pa->pa_lock); | |
3242 | if (pa->pa_deleted == 0 && | |
3243 | pa->pa_free >= ac->ac_o_ex.fe_len) { | |
5e745b04 AK |
3244 | |
3245 | cpa = ext4_mb_check_group_pa(goal_block, | |
3246 | pa, cpa); | |
6be2ded1 | 3247 | } |
c9de560d | 3248 | spin_unlock(&pa->pa_lock); |
c9de560d | 3249 | } |
6be2ded1 | 3250 | rcu_read_unlock(); |
c9de560d | 3251 | } |
5e745b04 AK |
3252 | if (cpa) { |
3253 | ext4_mb_use_group_pa(ac, cpa); | |
3254 | ac->ac_criteria = 20; | |
3255 | return 1; | |
3256 | } | |
c9de560d AT |
3257 | return 0; |
3258 | } | |
3259 | ||
7a2fcbf7 AK |
3260 | /* |
3261 | * the function goes through all block freed in the group | |
3262 | * but not yet committed and marks them used in in-core bitmap. | |
3263 | * buddy must be generated from this bitmap | |
955ce5f5 | 3264 | * Need to be called with the ext4 group lock held |
7a2fcbf7 AK |
3265 | */ |
3266 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | |
3267 | ext4_group_t group) | |
3268 | { | |
3269 | struct rb_node *n; | |
3270 | struct ext4_group_info *grp; | |
3271 | struct ext4_free_data *entry; | |
3272 | ||
3273 | grp = ext4_get_group_info(sb, group); | |
3274 | n = rb_first(&(grp->bb_free_root)); | |
3275 | ||
3276 | while (n) { | |
18aadd47 BJ |
3277 | entry = rb_entry(n, struct ext4_free_data, efd_node); |
3278 | ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); | |
7a2fcbf7 AK |
3279 | n = rb_next(n); |
3280 | } | |
3281 | return; | |
3282 | } | |
3283 | ||
c9de560d AT |
3284 | /* |
3285 | * the function goes through all preallocation in this group and marks them | |
3286 | * used in in-core bitmap. buddy must be generated from this bitmap | |
955ce5f5 | 3287 | * Need to be called with ext4 group lock held |
c9de560d | 3288 | */ |
089ceecc ES |
3289 | static noinline_for_stack |
3290 | void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | |
c9de560d AT |
3291 | ext4_group_t group) |
3292 | { | |
3293 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
3294 | struct ext4_prealloc_space *pa; | |
3295 | struct list_head *cur; | |
3296 | ext4_group_t groupnr; | |
3297 | ext4_grpblk_t start; | |
3298 | int preallocated = 0; | |
c9de560d AT |
3299 | int len; |
3300 | ||
3301 | /* all form of preallocation discards first load group, | |
3302 | * so the only competing code is preallocation use. | |
3303 | * we don't need any locking here | |
3304 | * notice we do NOT ignore preallocations with pa_deleted | |
3305 | * otherwise we could leave used blocks available for | |
3306 | * allocation in buddy when concurrent ext4_mb_put_pa() | |
3307 | * is dropping preallocation | |
3308 | */ | |
3309 | list_for_each(cur, &grp->bb_prealloc_list) { | |
3310 | pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); | |
3311 | spin_lock(&pa->pa_lock); | |
3312 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, | |
3313 | &groupnr, &start); | |
3314 | len = pa->pa_len; | |
3315 | spin_unlock(&pa->pa_lock); | |
3316 | if (unlikely(len == 0)) | |
3317 | continue; | |
3318 | BUG_ON(groupnr != group); | |
c3e94d1d | 3319 | ext4_set_bits(bitmap, start, len); |
c9de560d | 3320 | preallocated += len; |
c9de560d | 3321 | } |
6ba495e9 | 3322 | mb_debug(1, "prellocated %u for group %u\n", preallocated, group); |
c9de560d AT |
3323 | } |
3324 | ||
3325 | static void ext4_mb_pa_callback(struct rcu_head *head) | |
3326 | { | |
3327 | struct ext4_prealloc_space *pa; | |
3328 | pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); | |
3329 | kmem_cache_free(ext4_pspace_cachep, pa); | |
3330 | } | |
3331 | ||
3332 | /* | |
3333 | * drops a reference to preallocated space descriptor | |
3334 | * if this was the last reference and the space is consumed | |
3335 | */ | |
3336 | static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |
3337 | struct super_block *sb, struct ext4_prealloc_space *pa) | |
3338 | { | |
a9df9a49 | 3339 | ext4_group_t grp; |
d33a1976 | 3340 | ext4_fsblk_t grp_blk; |
c9de560d AT |
3341 | |
3342 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | |
3343 | return; | |
3344 | ||
3345 | /* in this short window concurrent discard can set pa_deleted */ | |
3346 | spin_lock(&pa->pa_lock); | |
3347 | if (pa->pa_deleted == 1) { | |
3348 | spin_unlock(&pa->pa_lock); | |
3349 | return; | |
3350 | } | |
3351 | ||
3352 | pa->pa_deleted = 1; | |
3353 | spin_unlock(&pa->pa_lock); | |
3354 | ||
d33a1976 | 3355 | grp_blk = pa->pa_pstart; |
60e6679e | 3356 | /* |
cc0fb9ad AK |
3357 | * If doing group-based preallocation, pa_pstart may be in the |
3358 | * next group when pa is used up | |
3359 | */ | |
3360 | if (pa->pa_type == MB_GROUP_PA) | |
d33a1976 ES |
3361 | grp_blk--; |
3362 | ||
3363 | ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL); | |
c9de560d AT |
3364 | |
3365 | /* | |
3366 | * possible race: | |
3367 | * | |
3368 | * P1 (buddy init) P2 (regular allocation) | |
3369 | * find block B in PA | |
3370 | * copy on-disk bitmap to buddy | |
3371 | * mark B in on-disk bitmap | |
3372 | * drop PA from group | |
3373 | * mark all PAs in buddy | |
3374 | * | |
3375 | * thus, P1 initializes buddy with B available. to prevent this | |
3376 | * we make "copy" and "mark all PAs" atomic and serialize "drop PA" | |
3377 | * against that pair | |
3378 | */ | |
3379 | ext4_lock_group(sb, grp); | |
3380 | list_del(&pa->pa_group_list); | |
3381 | ext4_unlock_group(sb, grp); | |
3382 | ||
3383 | spin_lock(pa->pa_obj_lock); | |
3384 | list_del_rcu(&pa->pa_inode_list); | |
3385 | spin_unlock(pa->pa_obj_lock); | |
3386 | ||
3387 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3388 | } | |
3389 | ||
3390 | /* | |
3391 | * creates new preallocated space for given inode | |
3392 | */ | |
4ddfef7b ES |
3393 | static noinline_for_stack int |
3394 | ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) | |
c9de560d AT |
3395 | { |
3396 | struct super_block *sb = ac->ac_sb; | |
53accfa9 | 3397 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
c9de560d AT |
3398 | struct ext4_prealloc_space *pa; |
3399 | struct ext4_group_info *grp; | |
3400 | struct ext4_inode_info *ei; | |
3401 | ||
3402 | /* preallocate only when found space is larger then requested */ | |
3403 | BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); | |
3404 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
3405 | BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); | |
3406 | ||
3407 | pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); | |
3408 | if (pa == NULL) | |
3409 | return -ENOMEM; | |
3410 | ||
3411 | if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { | |
3412 | int winl; | |
3413 | int wins; | |
3414 | int win; | |
3415 | int offs; | |
3416 | ||
3417 | /* we can't allocate as much as normalizer wants. | |
3418 | * so, found space must get proper lstart | |
3419 | * to cover original request */ | |
3420 | BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); | |
3421 | BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); | |
3422 | ||
3423 | /* we're limited by original request in that | |
3424 | * logical block must be covered any way | |
3425 | * winl is window we can move our chunk within */ | |
3426 | winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; | |
3427 | ||
3428 | /* also, we should cover whole original request */ | |
53accfa9 | 3429 | wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); |
c9de560d AT |
3430 | |
3431 | /* the smallest one defines real window */ | |
3432 | win = min(winl, wins); | |
3433 | ||
53accfa9 TT |
3434 | offs = ac->ac_o_ex.fe_logical % |
3435 | EXT4_C2B(sbi, ac->ac_b_ex.fe_len); | |
c9de560d AT |
3436 | if (offs && offs < win) |
3437 | win = offs; | |
3438 | ||
53accfa9 TT |
3439 | ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - |
3440 | EXT4_B2C(sbi, win); | |
c9de560d AT |
3441 | BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); |
3442 | BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); | |
3443 | } | |
3444 | ||
3445 | /* preallocation can change ac_b_ex, thus we store actually | |
3446 | * allocated blocks for history */ | |
3447 | ac->ac_f_ex = ac->ac_b_ex; | |
3448 | ||
3449 | pa->pa_lstart = ac->ac_b_ex.fe_logical; | |
3450 | pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); | |
3451 | pa->pa_len = ac->ac_b_ex.fe_len; | |
3452 | pa->pa_free = pa->pa_len; | |
3453 | atomic_set(&pa->pa_count, 1); | |
3454 | spin_lock_init(&pa->pa_lock); | |
d794bf8e AK |
3455 | INIT_LIST_HEAD(&pa->pa_inode_list); |
3456 | INIT_LIST_HEAD(&pa->pa_group_list); | |
c9de560d | 3457 | pa->pa_deleted = 0; |
cc0fb9ad | 3458 | pa->pa_type = MB_INODE_PA; |
c9de560d | 3459 | |
6ba495e9 | 3460 | mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, |
c9de560d | 3461 | pa->pa_pstart, pa->pa_len, pa->pa_lstart); |
9bffad1e | 3462 | trace_ext4_mb_new_inode_pa(ac, pa); |
c9de560d AT |
3463 | |
3464 | ext4_mb_use_inode_pa(ac, pa); | |
53accfa9 | 3465 | atomic_add(pa->pa_free, &sbi->s_mb_preallocated); |
c9de560d AT |
3466 | |
3467 | ei = EXT4_I(ac->ac_inode); | |
3468 | grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); | |
3469 | ||
3470 | pa->pa_obj_lock = &ei->i_prealloc_lock; | |
3471 | pa->pa_inode = ac->ac_inode; | |
3472 | ||
3473 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | |
3474 | list_add(&pa->pa_group_list, &grp->bb_prealloc_list); | |
3475 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | |
3476 | ||
3477 | spin_lock(pa->pa_obj_lock); | |
3478 | list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); | |
3479 | spin_unlock(pa->pa_obj_lock); | |
3480 | ||
3481 | return 0; | |
3482 | } | |
3483 | ||
3484 | /* | |
3485 | * creates new preallocated space for locality group inodes belongs to | |
3486 | */ | |
4ddfef7b ES |
3487 | static noinline_for_stack int |
3488 | ext4_mb_new_group_pa(struct ext4_allocation_context *ac) | |
c9de560d AT |
3489 | { |
3490 | struct super_block *sb = ac->ac_sb; | |
3491 | struct ext4_locality_group *lg; | |
3492 | struct ext4_prealloc_space *pa; | |
3493 | struct ext4_group_info *grp; | |
3494 | ||
3495 | /* preallocate only when found space is larger then requested */ | |
3496 | BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); | |
3497 | BUG_ON(ac->ac_status != AC_STATUS_FOUND); | |
3498 | BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); | |
3499 | ||
3500 | BUG_ON(ext4_pspace_cachep == NULL); | |
3501 | pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); | |
3502 | if (pa == NULL) | |
3503 | return -ENOMEM; | |
3504 | ||
3505 | /* preallocation can change ac_b_ex, thus we store actually | |
3506 | * allocated blocks for history */ | |
3507 | ac->ac_f_ex = ac->ac_b_ex; | |
3508 | ||
3509 | pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); | |
3510 | pa->pa_lstart = pa->pa_pstart; | |
3511 | pa->pa_len = ac->ac_b_ex.fe_len; | |
3512 | pa->pa_free = pa->pa_len; | |
3513 | atomic_set(&pa->pa_count, 1); | |
3514 | spin_lock_init(&pa->pa_lock); | |
6be2ded1 | 3515 | INIT_LIST_HEAD(&pa->pa_inode_list); |
d794bf8e | 3516 | INIT_LIST_HEAD(&pa->pa_group_list); |
c9de560d | 3517 | pa->pa_deleted = 0; |
cc0fb9ad | 3518 | pa->pa_type = MB_GROUP_PA; |
c9de560d | 3519 | |
6ba495e9 | 3520 | mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, |
9bffad1e TT |
3521 | pa->pa_pstart, pa->pa_len, pa->pa_lstart); |
3522 | trace_ext4_mb_new_group_pa(ac, pa); | |
c9de560d AT |
3523 | |
3524 | ext4_mb_use_group_pa(ac, pa); | |
3525 | atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); | |
3526 | ||
3527 | grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); | |
3528 | lg = ac->ac_lg; | |
3529 | BUG_ON(lg == NULL); | |
3530 | ||
3531 | pa->pa_obj_lock = &lg->lg_prealloc_lock; | |
3532 | pa->pa_inode = NULL; | |
3533 | ||
3534 | ext4_lock_group(sb, ac->ac_b_ex.fe_group); | |
3535 | list_add(&pa->pa_group_list, &grp->bb_prealloc_list); | |
3536 | ext4_unlock_group(sb, ac->ac_b_ex.fe_group); | |
3537 | ||
6be2ded1 AK |
3538 | /* |
3539 | * We will later add the new pa to the right bucket | |
3540 | * after updating the pa_free in ext4_mb_release_context | |
3541 | */ | |
c9de560d AT |
3542 | return 0; |
3543 | } | |
3544 | ||
3545 | static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) | |
3546 | { | |
3547 | int err; | |
3548 | ||
3549 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) | |
3550 | err = ext4_mb_new_group_pa(ac); | |
3551 | else | |
3552 | err = ext4_mb_new_inode_pa(ac); | |
3553 | return err; | |
3554 | } | |
3555 | ||
3556 | /* | |
3557 | * finds all unused blocks in on-disk bitmap, frees them in | |
3558 | * in-core bitmap and buddy. | |
3559 | * @pa must be unlinked from inode and group lists, so that | |
3560 | * nobody else can find/use it. | |
3561 | * the caller MUST hold group/inode locks. | |
3562 | * TODO: optimize the case when there are no in-core structures yet | |
3563 | */ | |
4ddfef7b ES |
3564 | static noinline_for_stack int |
3565 | ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, | |
3e1e5f50 | 3566 | struct ext4_prealloc_space *pa) |
c9de560d | 3567 | { |
c9de560d AT |
3568 | struct super_block *sb = e4b->bd_sb; |
3569 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
498e5f24 TT |
3570 | unsigned int end; |
3571 | unsigned int next; | |
c9de560d AT |
3572 | ext4_group_t group; |
3573 | ext4_grpblk_t bit; | |
ba80b101 | 3574 | unsigned long long grp_blk_start; |
c9de560d AT |
3575 | int err = 0; |
3576 | int free = 0; | |
3577 | ||
3578 | BUG_ON(pa->pa_deleted == 0); | |
3579 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); | |
53accfa9 | 3580 | grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); |
c9de560d AT |
3581 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); |
3582 | end = bit + pa->pa_len; | |
3583 | ||
c9de560d | 3584 | while (bit < end) { |
ffad0a44 | 3585 | bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); |
c9de560d AT |
3586 | if (bit >= end) |
3587 | break; | |
ffad0a44 | 3588 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); |
6ba495e9 | 3589 | mb_debug(1, " free preallocated %u/%u in group %u\n", |
5a0790c2 AK |
3590 | (unsigned) ext4_group_first_block_no(sb, group) + bit, |
3591 | (unsigned) next - bit, (unsigned) group); | |
c9de560d AT |
3592 | free += next - bit; |
3593 | ||
3e1e5f50 | 3594 | trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); |
53accfa9 TT |
3595 | trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + |
3596 | EXT4_C2B(sbi, bit)), | |
a9c667f8 | 3597 | next - bit); |
c9de560d AT |
3598 | mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); |
3599 | bit = next + 1; | |
3600 | } | |
3601 | if (free != pa->pa_free) { | |
9d8b9ec4 TT |
3602 | ext4_msg(e4b->bd_sb, KERN_CRIT, |
3603 | "pa %p: logic %lu, phys. %lu, len %lu", | |
3604 | pa, (unsigned long) pa->pa_lstart, | |
3605 | (unsigned long) pa->pa_pstart, | |
3606 | (unsigned long) pa->pa_len); | |
e29136f8 | 3607 | ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", |
5d1b1b3f | 3608 | free, pa->pa_free); |
e56eb659 AK |
3609 | /* |
3610 | * pa is already deleted so we use the value obtained | |
3611 | * from the bitmap and continue. | |
3612 | */ | |
c9de560d | 3613 | } |
c9de560d AT |
3614 | atomic_add(free, &sbi->s_mb_discarded); |
3615 | ||
3616 | return err; | |
3617 | } | |
3618 | ||
4ddfef7b ES |
3619 | static noinline_for_stack int |
3620 | ext4_mb_release_group_pa(struct ext4_buddy *e4b, | |
3e1e5f50 | 3621 | struct ext4_prealloc_space *pa) |
c9de560d | 3622 | { |
c9de560d AT |
3623 | struct super_block *sb = e4b->bd_sb; |
3624 | ext4_group_t group; | |
3625 | ext4_grpblk_t bit; | |
3626 | ||
60e07cf5 | 3627 | trace_ext4_mb_release_group_pa(sb, pa); |
c9de560d AT |
3628 | BUG_ON(pa->pa_deleted == 0); |
3629 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); | |
3630 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); | |
3631 | mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); | |
3632 | atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); | |
3e1e5f50 | 3633 | trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); |
c9de560d AT |
3634 | |
3635 | return 0; | |
3636 | } | |
3637 | ||
3638 | /* | |
3639 | * releases all preallocations in given group | |
3640 | * | |
3641 | * first, we need to decide discard policy: | |
3642 | * - when do we discard | |
3643 | * 1) ENOSPC | |
3644 | * - how many do we discard | |
3645 | * 1) how many requested | |
3646 | */ | |
4ddfef7b ES |
3647 | static noinline_for_stack int |
3648 | ext4_mb_discard_group_preallocations(struct super_block *sb, | |
c9de560d AT |
3649 | ext4_group_t group, int needed) |
3650 | { | |
3651 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
3652 | struct buffer_head *bitmap_bh = NULL; | |
3653 | struct ext4_prealloc_space *pa, *tmp; | |
3654 | struct list_head list; | |
3655 | struct ext4_buddy e4b; | |
3656 | int err; | |
3657 | int busy = 0; | |
3658 | int free = 0; | |
3659 | ||
6ba495e9 | 3660 | mb_debug(1, "discard preallocation for group %u\n", group); |
c9de560d AT |
3661 | |
3662 | if (list_empty(&grp->bb_prealloc_list)) | |
3663 | return 0; | |
3664 | ||
574ca174 | 3665 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
c9de560d | 3666 | if (bitmap_bh == NULL) { |
12062ddd | 3667 | ext4_error(sb, "Error reading block bitmap for %u", group); |
ce89f46c | 3668 | return 0; |
c9de560d AT |
3669 | } |
3670 | ||
3671 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
ce89f46c | 3672 | if (err) { |
12062ddd | 3673 | ext4_error(sb, "Error loading buddy information for %u", group); |
ce89f46c AK |
3674 | put_bh(bitmap_bh); |
3675 | return 0; | |
3676 | } | |
c9de560d AT |
3677 | |
3678 | if (needed == 0) | |
7137d7a4 | 3679 | needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; |
c9de560d | 3680 | |
c9de560d | 3681 | INIT_LIST_HEAD(&list); |
c9de560d AT |
3682 | repeat: |
3683 | ext4_lock_group(sb, group); | |
3684 | list_for_each_entry_safe(pa, tmp, | |
3685 | &grp->bb_prealloc_list, pa_group_list) { | |
3686 | spin_lock(&pa->pa_lock); | |
3687 | if (atomic_read(&pa->pa_count)) { | |
3688 | spin_unlock(&pa->pa_lock); | |
3689 | busy = 1; | |
3690 | continue; | |
3691 | } | |
3692 | if (pa->pa_deleted) { | |
3693 | spin_unlock(&pa->pa_lock); | |
3694 | continue; | |
3695 | } | |
3696 | ||
3697 | /* seems this one can be freed ... */ | |
3698 | pa->pa_deleted = 1; | |
3699 | ||
3700 | /* we can trust pa_free ... */ | |
3701 | free += pa->pa_free; | |
3702 | ||
3703 | spin_unlock(&pa->pa_lock); | |
3704 | ||
3705 | list_del(&pa->pa_group_list); | |
3706 | list_add(&pa->u.pa_tmp_list, &list); | |
3707 | } | |
3708 | ||
3709 | /* if we still need more blocks and some PAs were used, try again */ | |
3710 | if (free < needed && busy) { | |
3711 | busy = 0; | |
3712 | ext4_unlock_group(sb, group); | |
3713 | /* | |
3714 | * Yield the CPU here so that we don't get soft lockup | |
3715 | * in non preempt case. | |
3716 | */ | |
3717 | yield(); | |
3718 | goto repeat; | |
3719 | } | |
3720 | ||
3721 | /* found anything to free? */ | |
3722 | if (list_empty(&list)) { | |
3723 | BUG_ON(free != 0); | |
3724 | goto out; | |
3725 | } | |
3726 | ||
3727 | /* now free all selected PAs */ | |
3728 | list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { | |
3729 | ||
3730 | /* remove from object (inode or locality group) */ | |
3731 | spin_lock(pa->pa_obj_lock); | |
3732 | list_del_rcu(&pa->pa_inode_list); | |
3733 | spin_unlock(pa->pa_obj_lock); | |
3734 | ||
cc0fb9ad | 3735 | if (pa->pa_type == MB_GROUP_PA) |
3e1e5f50 | 3736 | ext4_mb_release_group_pa(&e4b, pa); |
c9de560d | 3737 | else |
3e1e5f50 | 3738 | ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); |
c9de560d AT |
3739 | |
3740 | list_del(&pa->u.pa_tmp_list); | |
3741 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3742 | } | |
3743 | ||
3744 | out: | |
3745 | ext4_unlock_group(sb, group); | |
e39e07fd | 3746 | ext4_mb_unload_buddy(&e4b); |
c9de560d AT |
3747 | put_bh(bitmap_bh); |
3748 | return free; | |
3749 | } | |
3750 | ||
3751 | /* | |
3752 | * releases all non-used preallocated blocks for given inode | |
3753 | * | |
3754 | * It's important to discard preallocations under i_data_sem | |
3755 | * We don't want another block to be served from the prealloc | |
3756 | * space when we are discarding the inode prealloc space. | |
3757 | * | |
3758 | * FIXME!! Make sure it is valid at all the call sites | |
3759 | */ | |
c2ea3fde | 3760 | void ext4_discard_preallocations(struct inode *inode) |
c9de560d AT |
3761 | { |
3762 | struct ext4_inode_info *ei = EXT4_I(inode); | |
3763 | struct super_block *sb = inode->i_sb; | |
3764 | struct buffer_head *bitmap_bh = NULL; | |
3765 | struct ext4_prealloc_space *pa, *tmp; | |
3766 | ext4_group_t group = 0; | |
3767 | struct list_head list; | |
3768 | struct ext4_buddy e4b; | |
3769 | int err; | |
3770 | ||
c2ea3fde | 3771 | if (!S_ISREG(inode->i_mode)) { |
c9de560d AT |
3772 | /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ |
3773 | return; | |
3774 | } | |
3775 | ||
6ba495e9 | 3776 | mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); |
9bffad1e | 3777 | trace_ext4_discard_preallocations(inode); |
c9de560d AT |
3778 | |
3779 | INIT_LIST_HEAD(&list); | |
3780 | ||
3781 | repeat: | |
3782 | /* first, collect all pa's in the inode */ | |
3783 | spin_lock(&ei->i_prealloc_lock); | |
3784 | while (!list_empty(&ei->i_prealloc_list)) { | |
3785 | pa = list_entry(ei->i_prealloc_list.next, | |
3786 | struct ext4_prealloc_space, pa_inode_list); | |
3787 | BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); | |
3788 | spin_lock(&pa->pa_lock); | |
3789 | if (atomic_read(&pa->pa_count)) { | |
3790 | /* this shouldn't happen often - nobody should | |
3791 | * use preallocation while we're discarding it */ | |
3792 | spin_unlock(&pa->pa_lock); | |
3793 | spin_unlock(&ei->i_prealloc_lock); | |
9d8b9ec4 TT |
3794 | ext4_msg(sb, KERN_ERR, |
3795 | "uh-oh! used pa while discarding"); | |
c9de560d AT |
3796 | WARN_ON(1); |
3797 | schedule_timeout_uninterruptible(HZ); | |
3798 | goto repeat; | |
3799 | ||
3800 | } | |
3801 | if (pa->pa_deleted == 0) { | |
3802 | pa->pa_deleted = 1; | |
3803 | spin_unlock(&pa->pa_lock); | |
3804 | list_del_rcu(&pa->pa_inode_list); | |
3805 | list_add(&pa->u.pa_tmp_list, &list); | |
3806 | continue; | |
3807 | } | |
3808 | ||
3809 | /* someone is deleting pa right now */ | |
3810 | spin_unlock(&pa->pa_lock); | |
3811 | spin_unlock(&ei->i_prealloc_lock); | |
3812 | ||
3813 | /* we have to wait here because pa_deleted | |
3814 | * doesn't mean pa is already unlinked from | |
3815 | * the list. as we might be called from | |
3816 | * ->clear_inode() the inode will get freed | |
3817 | * and concurrent thread which is unlinking | |
3818 | * pa from inode's list may access already | |
3819 | * freed memory, bad-bad-bad */ | |
3820 | ||
3821 | /* XXX: if this happens too often, we can | |
3822 | * add a flag to force wait only in case | |
3823 | * of ->clear_inode(), but not in case of | |
3824 | * regular truncate */ | |
3825 | schedule_timeout_uninterruptible(HZ); | |
3826 | goto repeat; | |
3827 | } | |
3828 | spin_unlock(&ei->i_prealloc_lock); | |
3829 | ||
3830 | list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { | |
cc0fb9ad | 3831 | BUG_ON(pa->pa_type != MB_INODE_PA); |
c9de560d AT |
3832 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); |
3833 | ||
3834 | err = ext4_mb_load_buddy(sb, group, &e4b); | |
ce89f46c | 3835 | if (err) { |
12062ddd ES |
3836 | ext4_error(sb, "Error loading buddy information for %u", |
3837 | group); | |
ce89f46c AK |
3838 | continue; |
3839 | } | |
c9de560d | 3840 | |
574ca174 | 3841 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
c9de560d | 3842 | if (bitmap_bh == NULL) { |
12062ddd ES |
3843 | ext4_error(sb, "Error reading block bitmap for %u", |
3844 | group); | |
e39e07fd | 3845 | ext4_mb_unload_buddy(&e4b); |
ce89f46c | 3846 | continue; |
c9de560d AT |
3847 | } |
3848 | ||
3849 | ext4_lock_group(sb, group); | |
3850 | list_del(&pa->pa_group_list); | |
3e1e5f50 | 3851 | ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); |
c9de560d AT |
3852 | ext4_unlock_group(sb, group); |
3853 | ||
e39e07fd | 3854 | ext4_mb_unload_buddy(&e4b); |
c9de560d AT |
3855 | put_bh(bitmap_bh); |
3856 | ||
3857 | list_del(&pa->u.pa_tmp_list); | |
3858 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
3859 | } | |
3860 | } | |
3861 | ||
6ba495e9 | 3862 | #ifdef CONFIG_EXT4_DEBUG |
c9de560d AT |
3863 | static void ext4_mb_show_ac(struct ext4_allocation_context *ac) |
3864 | { | |
3865 | struct super_block *sb = ac->ac_sb; | |
8df9675f | 3866 | ext4_group_t ngroups, i; |
c9de560d | 3867 | |
4dd89fc6 TT |
3868 | if (!mb_enable_debug || |
3869 | (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) | |
e3570639 ES |
3870 | return; |
3871 | ||
7f6a11e7 | 3872 | ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" |
9d8b9ec4 | 3873 | " Allocation context details:"); |
7f6a11e7 | 3874 | ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", |
c9de560d | 3875 | ac->ac_status, ac->ac_flags); |
7f6a11e7 | 3876 | ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " |
9d8b9ec4 TT |
3877 | "goal %lu/%lu/%lu@%lu, " |
3878 | "best %lu/%lu/%lu@%lu cr %d", | |
c9de560d AT |
3879 | (unsigned long)ac->ac_o_ex.fe_group, |
3880 | (unsigned long)ac->ac_o_ex.fe_start, | |
3881 | (unsigned long)ac->ac_o_ex.fe_len, | |
3882 | (unsigned long)ac->ac_o_ex.fe_logical, | |
3883 | (unsigned long)ac->ac_g_ex.fe_group, | |
3884 | (unsigned long)ac->ac_g_ex.fe_start, | |
3885 | (unsigned long)ac->ac_g_ex.fe_len, | |
3886 | (unsigned long)ac->ac_g_ex.fe_logical, | |
3887 | (unsigned long)ac->ac_b_ex.fe_group, | |
3888 | (unsigned long)ac->ac_b_ex.fe_start, | |
3889 | (unsigned long)ac->ac_b_ex.fe_len, | |
3890 | (unsigned long)ac->ac_b_ex.fe_logical, | |
3891 | (int)ac->ac_criteria); | |
7f6a11e7 | 3892 | ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found", |
9d8b9ec4 | 3893 | ac->ac_ex_scanned, ac->ac_found); |
7f6a11e7 | 3894 | ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); |
8df9675f TT |
3895 | ngroups = ext4_get_groups_count(sb); |
3896 | for (i = 0; i < ngroups; i++) { | |
c9de560d AT |
3897 | struct ext4_group_info *grp = ext4_get_group_info(sb, i); |
3898 | struct ext4_prealloc_space *pa; | |
3899 | ext4_grpblk_t start; | |
3900 | struct list_head *cur; | |
3901 | ext4_lock_group(sb, i); | |
3902 | list_for_each(cur, &grp->bb_prealloc_list) { | |
3903 | pa = list_entry(cur, struct ext4_prealloc_space, | |
3904 | pa_group_list); | |
3905 | spin_lock(&pa->pa_lock); | |
3906 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, | |
3907 | NULL, &start); | |
3908 | spin_unlock(&pa->pa_lock); | |
1c718505 AF |
3909 | printk(KERN_ERR "PA:%u:%d:%u \n", i, |
3910 | start, pa->pa_len); | |
c9de560d | 3911 | } |
60bd63d1 | 3912 | ext4_unlock_group(sb, i); |
c9de560d AT |
3913 | |
3914 | if (grp->bb_free == 0) | |
3915 | continue; | |
1c718505 | 3916 | printk(KERN_ERR "%u: %d/%d \n", |
c9de560d AT |
3917 | i, grp->bb_free, grp->bb_fragments); |
3918 | } | |
3919 | printk(KERN_ERR "\n"); | |
3920 | } | |
3921 | #else | |
3922 | static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) | |
3923 | { | |
3924 | return; | |
3925 | } | |
3926 | #endif | |
3927 | ||
3928 | /* | |
3929 | * We use locality group preallocation for small size file. The size of the | |
3930 | * file is determined by the current size or the resulting size after | |
3931 | * allocation which ever is larger | |
3932 | * | |
b713a5ec | 3933 | * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req |
c9de560d AT |
3934 | */ |
3935 | static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |
3936 | { | |
3937 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | |
3938 | int bsbits = ac->ac_sb->s_blocksize_bits; | |
3939 | loff_t size, isize; | |
3940 | ||
3941 | if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) | |
3942 | return; | |
3943 | ||
4ba74d00 TT |
3944 | if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) |
3945 | return; | |
3946 | ||
53accfa9 | 3947 | size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); |
50797481 TT |
3948 | isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) |
3949 | >> bsbits; | |
c9de560d | 3950 | |
50797481 TT |
3951 | if ((size == isize) && |
3952 | !ext4_fs_is_busy(sbi) && | |
3953 | (atomic_read(&ac->ac_inode->i_writecount) == 0)) { | |
3954 | ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; | |
3955 | return; | |
3956 | } | |
3957 | ||
ebbe0277 RD |
3958 | if (sbi->s_mb_group_prealloc <= 0) { |
3959 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; | |
3960 | return; | |
3961 | } | |
3962 | ||
c9de560d | 3963 | /* don't use group allocation for large files */ |
71780577 | 3964 | size = max(size, isize); |
cc483f10 | 3965 | if (size > sbi->s_mb_stream_request) { |
4ba74d00 | 3966 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; |
c9de560d | 3967 | return; |
4ba74d00 | 3968 | } |
c9de560d AT |
3969 | |
3970 | BUG_ON(ac->ac_lg != NULL); | |
3971 | /* | |
3972 | * locality group prealloc space are per cpu. The reason for having | |
3973 | * per cpu locality group is to reduce the contention between block | |
3974 | * request from multiple CPUs. | |
3975 | */ | |
ca0c9584 | 3976 | ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); |
c9de560d AT |
3977 | |
3978 | /* we're going to use group allocation */ | |
3979 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; | |
3980 | ||
3981 | /* serialize all allocations in the group */ | |
3982 | mutex_lock(&ac->ac_lg->lg_mutex); | |
3983 | } | |
3984 | ||
4ddfef7b ES |
3985 | static noinline_for_stack int |
3986 | ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |
c9de560d AT |
3987 | struct ext4_allocation_request *ar) |
3988 | { | |
3989 | struct super_block *sb = ar->inode->i_sb; | |
3990 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
3991 | struct ext4_super_block *es = sbi->s_es; | |
3992 | ext4_group_t group; | |
498e5f24 TT |
3993 | unsigned int len; |
3994 | ext4_fsblk_t goal; | |
c9de560d AT |
3995 | ext4_grpblk_t block; |
3996 | ||
3997 | /* we can't allocate > group size */ | |
3998 | len = ar->len; | |
3999 | ||
4000 | /* just a dirty hack to filter too big requests */ | |
7137d7a4 TT |
4001 | if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10) |
4002 | len = EXT4_CLUSTERS_PER_GROUP(sb) - 10; | |
c9de560d AT |
4003 | |
4004 | /* start searching from the goal */ | |
4005 | goal = ar->goal; | |
4006 | if (goal < le32_to_cpu(es->s_first_data_block) || | |
4007 | goal >= ext4_blocks_count(es)) | |
4008 | goal = le32_to_cpu(es->s_first_data_block); | |
4009 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | |
4010 | ||
4011 | /* set up allocation goals */ | |
53accfa9 | 4012 | ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); |
c9de560d | 4013 | ac->ac_status = AC_STATUS_CONTINUE; |
c9de560d AT |
4014 | ac->ac_sb = sb; |
4015 | ac->ac_inode = ar->inode; | |
53accfa9 | 4016 | ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; |
c9de560d AT |
4017 | ac->ac_o_ex.fe_group = group; |
4018 | ac->ac_o_ex.fe_start = block; | |
4019 | ac->ac_o_ex.fe_len = len; | |
53accfa9 | 4020 | ac->ac_g_ex = ac->ac_o_ex; |
c9de560d | 4021 | ac->ac_flags = ar->flags; |
c9de560d AT |
4022 | |
4023 | /* we have to define context: we'll we work with a file or | |
4024 | * locality group. this is a policy, actually */ | |
4025 | ext4_mb_group_or_file(ac); | |
4026 | ||
6ba495e9 | 4027 | mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " |
c9de560d AT |
4028 | "left: %u/%u, right %u/%u to %swritable\n", |
4029 | (unsigned) ar->len, (unsigned) ar->logical, | |
4030 | (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, | |
4031 | (unsigned) ar->lleft, (unsigned) ar->pleft, | |
4032 | (unsigned) ar->lright, (unsigned) ar->pright, | |
4033 | atomic_read(&ar->inode->i_writecount) ? "" : "non-"); | |
4034 | return 0; | |
4035 | ||
4036 | } | |
4037 | ||
6be2ded1 AK |
4038 | static noinline_for_stack void |
4039 | ext4_mb_discard_lg_preallocations(struct super_block *sb, | |
4040 | struct ext4_locality_group *lg, | |
4041 | int order, int total_entries) | |
4042 | { | |
4043 | ext4_group_t group = 0; | |
4044 | struct ext4_buddy e4b; | |
4045 | struct list_head discard_list; | |
4046 | struct ext4_prealloc_space *pa, *tmp; | |
6be2ded1 | 4047 | |
6ba495e9 | 4048 | mb_debug(1, "discard locality group preallocation\n"); |
6be2ded1 AK |
4049 | |
4050 | INIT_LIST_HEAD(&discard_list); | |
6be2ded1 AK |
4051 | |
4052 | spin_lock(&lg->lg_prealloc_lock); | |
4053 | list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], | |
4054 | pa_inode_list) { | |
4055 | spin_lock(&pa->pa_lock); | |
4056 | if (atomic_read(&pa->pa_count)) { | |
4057 | /* | |
4058 | * This is the pa that we just used | |
4059 | * for block allocation. So don't | |
4060 | * free that | |
4061 | */ | |
4062 | spin_unlock(&pa->pa_lock); | |
4063 | continue; | |
4064 | } | |
4065 | if (pa->pa_deleted) { | |
4066 | spin_unlock(&pa->pa_lock); | |
4067 | continue; | |
4068 | } | |
4069 | /* only lg prealloc space */ | |
cc0fb9ad | 4070 | BUG_ON(pa->pa_type != MB_GROUP_PA); |
6be2ded1 AK |
4071 | |
4072 | /* seems this one can be freed ... */ | |
4073 | pa->pa_deleted = 1; | |
4074 | spin_unlock(&pa->pa_lock); | |
4075 | ||
4076 | list_del_rcu(&pa->pa_inode_list); | |
4077 | list_add(&pa->u.pa_tmp_list, &discard_list); | |
4078 | ||
4079 | total_entries--; | |
4080 | if (total_entries <= 5) { | |
4081 | /* | |
4082 | * we want to keep only 5 entries | |
4083 | * allowing it to grow to 8. This | |
4084 | * mak sure we don't call discard | |
4085 | * soon for this list. | |
4086 | */ | |
4087 | break; | |
4088 | } | |
4089 | } | |
4090 | spin_unlock(&lg->lg_prealloc_lock); | |
4091 | ||
4092 | list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { | |
4093 | ||
4094 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); | |
4095 | if (ext4_mb_load_buddy(sb, group, &e4b)) { | |
12062ddd ES |
4096 | ext4_error(sb, "Error loading buddy information for %u", |
4097 | group); | |
6be2ded1 AK |
4098 | continue; |
4099 | } | |
4100 | ext4_lock_group(sb, group); | |
4101 | list_del(&pa->pa_group_list); | |
3e1e5f50 | 4102 | ext4_mb_release_group_pa(&e4b, pa); |
6be2ded1 AK |
4103 | ext4_unlock_group(sb, group); |
4104 | ||
e39e07fd | 4105 | ext4_mb_unload_buddy(&e4b); |
6be2ded1 AK |
4106 | list_del(&pa->u.pa_tmp_list); |
4107 | call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); | |
4108 | } | |
6be2ded1 AK |
4109 | } |
4110 | ||
4111 | /* | |
4112 | * We have incremented pa_count. So it cannot be freed at this | |
4113 | * point. Also we hold lg_mutex. So no parallel allocation is | |
4114 | * possible from this lg. That means pa_free cannot be updated. | |
4115 | * | |
4116 | * A parallel ext4_mb_discard_group_preallocations is possible. | |
4117 | * which can cause the lg_prealloc_list to be updated. | |
4118 | */ | |
4119 | ||
4120 | static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) | |
4121 | { | |
4122 | int order, added = 0, lg_prealloc_count = 1; | |
4123 | struct super_block *sb = ac->ac_sb; | |
4124 | struct ext4_locality_group *lg = ac->ac_lg; | |
4125 | struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; | |
4126 | ||
4127 | order = fls(pa->pa_free) - 1; | |
4128 | if (order > PREALLOC_TB_SIZE - 1) | |
4129 | /* The max size of hash table is PREALLOC_TB_SIZE */ | |
4130 | order = PREALLOC_TB_SIZE - 1; | |
4131 | /* Add the prealloc space to lg */ | |
4132 | rcu_read_lock(); | |
4133 | list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], | |
4134 | pa_inode_list) { | |
4135 | spin_lock(&tmp_pa->pa_lock); | |
4136 | if (tmp_pa->pa_deleted) { | |
e7c9e3e9 | 4137 | spin_unlock(&tmp_pa->pa_lock); |
6be2ded1 AK |
4138 | continue; |
4139 | } | |
4140 | if (!added && pa->pa_free < tmp_pa->pa_free) { | |
4141 | /* Add to the tail of the previous entry */ | |
4142 | list_add_tail_rcu(&pa->pa_inode_list, | |
4143 | &tmp_pa->pa_inode_list); | |
4144 | added = 1; | |
4145 | /* | |
4146 | * we want to count the total | |
4147 | * number of entries in the list | |
4148 | */ | |
4149 | } | |
4150 | spin_unlock(&tmp_pa->pa_lock); | |
4151 | lg_prealloc_count++; | |
4152 | } | |
4153 | if (!added) | |
4154 | list_add_tail_rcu(&pa->pa_inode_list, | |
4155 | &lg->lg_prealloc_list[order]); | |
4156 | rcu_read_unlock(); | |
4157 | ||
4158 | /* Now trim the list to be not more than 8 elements */ | |
4159 | if (lg_prealloc_count > 8) { | |
4160 | ext4_mb_discard_lg_preallocations(sb, lg, | |
4161 | order, lg_prealloc_count); | |
4162 | return; | |
4163 | } | |
4164 | return ; | |
4165 | } | |
4166 | ||
c9de560d AT |
4167 | /* |
4168 | * release all resource we used in allocation | |
4169 | */ | |
4170 | static int ext4_mb_release_context(struct ext4_allocation_context *ac) | |
4171 | { | |
53accfa9 | 4172 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
6be2ded1 AK |
4173 | struct ext4_prealloc_space *pa = ac->ac_pa; |
4174 | if (pa) { | |
cc0fb9ad | 4175 | if (pa->pa_type == MB_GROUP_PA) { |
c9de560d | 4176 | /* see comment in ext4_mb_use_group_pa() */ |
6be2ded1 | 4177 | spin_lock(&pa->pa_lock); |
53accfa9 TT |
4178 | pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); |
4179 | pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); | |
6be2ded1 AK |
4180 | pa->pa_free -= ac->ac_b_ex.fe_len; |
4181 | pa->pa_len -= ac->ac_b_ex.fe_len; | |
4182 | spin_unlock(&pa->pa_lock); | |
c9de560d | 4183 | } |
c9de560d | 4184 | } |
ba443916 AK |
4185 | if (pa) { |
4186 | /* | |
4187 | * We want to add the pa to the right bucket. | |
4188 | * Remove it from the list and while adding | |
4189 | * make sure the list to which we are adding | |
44183d42 | 4190 | * doesn't grow big. |
ba443916 | 4191 | */ |
cc0fb9ad | 4192 | if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { |
ba443916 AK |
4193 | spin_lock(pa->pa_obj_lock); |
4194 | list_del_rcu(&pa->pa_inode_list); | |
4195 | spin_unlock(pa->pa_obj_lock); | |
4196 | ext4_mb_add_n_trim(ac); | |
4197 | } | |
4198 | ext4_mb_put_pa(ac, ac->ac_sb, pa); | |
4199 | } | |
c9de560d AT |
4200 | if (ac->ac_bitmap_page) |
4201 | page_cache_release(ac->ac_bitmap_page); | |
4202 | if (ac->ac_buddy_page) | |
4203 | page_cache_release(ac->ac_buddy_page); | |
4204 | if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) | |
4205 | mutex_unlock(&ac->ac_lg->lg_mutex); | |
4206 | ext4_mb_collect_stats(ac); | |
4207 | return 0; | |
4208 | } | |
4209 | ||
4210 | static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) | |
4211 | { | |
8df9675f | 4212 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); |
c9de560d AT |
4213 | int ret; |
4214 | int freed = 0; | |
4215 | ||
9bffad1e | 4216 | trace_ext4_mb_discard_preallocations(sb, needed); |
8df9675f | 4217 | for (i = 0; i < ngroups && needed > 0; i++) { |
c9de560d AT |
4218 | ret = ext4_mb_discard_group_preallocations(sb, i, needed); |
4219 | freed += ret; | |
4220 | needed -= ret; | |
4221 | } | |
4222 | ||
4223 | return freed; | |
4224 | } | |
4225 | ||
4226 | /* | |
4227 | * Main entry point into mballoc to allocate blocks | |
4228 | * it tries to use preallocation first, then falls back | |
4229 | * to usual allocation | |
4230 | */ | |
4231 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |
6c7a120a | 4232 | struct ext4_allocation_request *ar, int *errp) |
c9de560d | 4233 | { |
6bc6e63f | 4234 | int freed; |
256bdb49 | 4235 | struct ext4_allocation_context *ac = NULL; |
c9de560d AT |
4236 | struct ext4_sb_info *sbi; |
4237 | struct super_block *sb; | |
4238 | ext4_fsblk_t block = 0; | |
60e58e0f | 4239 | unsigned int inquota = 0; |
53accfa9 | 4240 | unsigned int reserv_clstrs = 0; |
c9de560d AT |
4241 | |
4242 | sb = ar->inode->i_sb; | |
4243 | sbi = EXT4_SB(sb); | |
4244 | ||
9bffad1e | 4245 | trace_ext4_request_blocks(ar); |
ba80b101 | 4246 | |
45dc63e7 DM |
4247 | /* Allow to use superuser reservation for quota file */ |
4248 | if (IS_NOQUOTA(ar->inode)) | |
4249 | ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; | |
4250 | ||
60e58e0f MC |
4251 | /* |
4252 | * For delayed allocation, we could skip the ENOSPC and | |
4253 | * EDQUOT check, as blocks and quotas have been already | |
4254 | * reserved when data being copied into pagecache. | |
4255 | */ | |
f2321097 | 4256 | if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) |
60e58e0f MC |
4257 | ar->flags |= EXT4_MB_DELALLOC_RESERVED; |
4258 | else { | |
4259 | /* Without delayed allocation we need to verify | |
4260 | * there is enough free blocks to do block allocation | |
4261 | * and verify allocation doesn't exceed the quota limits. | |
d2a17637 | 4262 | */ |
55f020db | 4263 | while (ar->len && |
e7d5f315 | 4264 | ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { |
55f020db | 4265 | |
030ba6bc AK |
4266 | /* let others to free the space */ |
4267 | yield(); | |
4268 | ar->len = ar->len >> 1; | |
4269 | } | |
4270 | if (!ar->len) { | |
a30d542a AK |
4271 | *errp = -ENOSPC; |
4272 | return 0; | |
4273 | } | |
53accfa9 | 4274 | reserv_clstrs = ar->len; |
55f020db | 4275 | if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { |
53accfa9 TT |
4276 | dquot_alloc_block_nofail(ar->inode, |
4277 | EXT4_C2B(sbi, ar->len)); | |
55f020db AH |
4278 | } else { |
4279 | while (ar->len && | |
53accfa9 TT |
4280 | dquot_alloc_block(ar->inode, |
4281 | EXT4_C2B(sbi, ar->len))) { | |
55f020db AH |
4282 | |
4283 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | |
4284 | ar->len--; | |
4285 | } | |
60e58e0f MC |
4286 | } |
4287 | inquota = ar->len; | |
4288 | if (ar->len == 0) { | |
4289 | *errp = -EDQUOT; | |
6c7a120a | 4290 | goto out; |
60e58e0f | 4291 | } |
07031431 | 4292 | } |
d2a17637 | 4293 | |
85556c9a | 4294 | ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); |
833576b3 | 4295 | if (!ac) { |
363d4251 | 4296 | ar->len = 0; |
256bdb49 | 4297 | *errp = -ENOMEM; |
6c7a120a | 4298 | goto out; |
256bdb49 ES |
4299 | } |
4300 | ||
256bdb49 | 4301 | *errp = ext4_mb_initialize_context(ac, ar); |
c9de560d AT |
4302 | if (*errp) { |
4303 | ar->len = 0; | |
6c7a120a | 4304 | goto out; |
c9de560d AT |
4305 | } |
4306 | ||
256bdb49 ES |
4307 | ac->ac_op = EXT4_MB_HISTORY_PREALLOC; |
4308 | if (!ext4_mb_use_preallocated(ac)) { | |
256bdb49 ES |
4309 | ac->ac_op = EXT4_MB_HISTORY_ALLOC; |
4310 | ext4_mb_normalize_request(ac, ar); | |
c9de560d AT |
4311 | repeat: |
4312 | /* allocate space in core */ | |
6c7a120a AK |
4313 | *errp = ext4_mb_regular_allocator(ac); |
4314 | if (*errp) | |
4315 | goto errout; | |
c9de560d AT |
4316 | |
4317 | /* as we've just preallocated more space than | |
4318 | * user requested orinally, we store allocated | |
4319 | * space in a special descriptor */ | |
256bdb49 ES |
4320 | if (ac->ac_status == AC_STATUS_FOUND && |
4321 | ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) | |
4322 | ext4_mb_new_preallocation(ac); | |
c9de560d | 4323 | } |
256bdb49 | 4324 | if (likely(ac->ac_status == AC_STATUS_FOUND)) { |
53accfa9 | 4325 | *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); |
6c7a120a | 4326 | if (*errp == -EAGAIN) { |
8556e8f3 AK |
4327 | /* |
4328 | * drop the reference that we took | |
4329 | * in ext4_mb_use_best_found | |
4330 | */ | |
4331 | ext4_mb_release_context(ac); | |
519deca0 AK |
4332 | ac->ac_b_ex.fe_group = 0; |
4333 | ac->ac_b_ex.fe_start = 0; | |
4334 | ac->ac_b_ex.fe_len = 0; | |
4335 | ac->ac_status = AC_STATUS_CONTINUE; | |
4336 | goto repeat; | |
6c7a120a AK |
4337 | } else if (*errp) |
4338 | errout: | |
b844167e | 4339 | ext4_discard_allocated_blocks(ac); |
6c7a120a | 4340 | else { |
519deca0 AK |
4341 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); |
4342 | ar->len = ac->ac_b_ex.fe_len; | |
4343 | } | |
c9de560d | 4344 | } else { |
256bdb49 | 4345 | freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); |
c9de560d AT |
4346 | if (freed) |
4347 | goto repeat; | |
4348 | *errp = -ENOSPC; | |
6c7a120a AK |
4349 | } |
4350 | ||
4351 | if (*errp) { | |
256bdb49 | 4352 | ac->ac_b_ex.fe_len = 0; |
c9de560d | 4353 | ar->len = 0; |
256bdb49 | 4354 | ext4_mb_show_ac(ac); |
c9de560d | 4355 | } |
256bdb49 | 4356 | ext4_mb_release_context(ac); |
6c7a120a AK |
4357 | out: |
4358 | if (ac) | |
4359 | kmem_cache_free(ext4_ac_cachep, ac); | |
60e58e0f | 4360 | if (inquota && ar->len < inquota) |
53accfa9 | 4361 | dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); |
0087d9fb | 4362 | if (!ar->len) { |
f2321097 TT |
4363 | if (!ext4_test_inode_state(ar->inode, |
4364 | EXT4_STATE_DELALLOC_RESERVED)) | |
0087d9fb | 4365 | /* release all the reserved blocks if non delalloc */ |
57042651 | 4366 | percpu_counter_sub(&sbi->s_dirtyclusters_counter, |
53accfa9 | 4367 | reserv_clstrs); |
0087d9fb | 4368 | } |
c9de560d | 4369 | |
9bffad1e | 4370 | trace_ext4_allocate_blocks(ar, (unsigned long long)block); |
ba80b101 | 4371 | |
c9de560d AT |
4372 | return block; |
4373 | } | |
c9de560d | 4374 | |
c894058d AK |
4375 | /* |
4376 | * We can merge two free data extents only if the physical blocks | |
4377 | * are contiguous, AND the extents were freed by the same transaction, | |
4378 | * AND the blocks are associated with the same group. | |
4379 | */ | |
4380 | static int can_merge(struct ext4_free_data *entry1, | |
4381 | struct ext4_free_data *entry2) | |
4382 | { | |
18aadd47 BJ |
4383 | if ((entry1->efd_tid == entry2->efd_tid) && |
4384 | (entry1->efd_group == entry2->efd_group) && | |
4385 | ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) | |
c894058d AK |
4386 | return 1; |
4387 | return 0; | |
4388 | } | |
4389 | ||
4ddfef7b ES |
4390 | static noinline_for_stack int |
4391 | ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, | |
7a2fcbf7 | 4392 | struct ext4_free_data *new_entry) |
c9de560d | 4393 | { |
e29136f8 | 4394 | ext4_group_t group = e4b->bd_group; |
84130193 | 4395 | ext4_grpblk_t cluster; |
7a2fcbf7 | 4396 | struct ext4_free_data *entry; |
c9de560d AT |
4397 | struct ext4_group_info *db = e4b->bd_info; |
4398 | struct super_block *sb = e4b->bd_sb; | |
4399 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
c894058d AK |
4400 | struct rb_node **n = &db->bb_free_root.rb_node, *node; |
4401 | struct rb_node *parent = NULL, *new_node; | |
4402 | ||
0390131b | 4403 | BUG_ON(!ext4_handle_valid(handle)); |
c9de560d AT |
4404 | BUG_ON(e4b->bd_bitmap_page == NULL); |
4405 | BUG_ON(e4b->bd_buddy_page == NULL); | |
4406 | ||
18aadd47 BJ |
4407 | new_node = &new_entry->efd_node; |
4408 | cluster = new_entry->efd_start_cluster; | |
c894058d | 4409 | |
c894058d AK |
4410 | if (!*n) { |
4411 | /* first free block exent. We need to | |
4412 | protect buddy cache from being freed, | |
4413 | * otherwise we'll refresh it from | |
4414 | * on-disk bitmap and lose not-yet-available | |
4415 | * blocks */ | |
4416 | page_cache_get(e4b->bd_buddy_page); | |
4417 | page_cache_get(e4b->bd_bitmap_page); | |
4418 | } | |
4419 | while (*n) { | |
4420 | parent = *n; | |
18aadd47 BJ |
4421 | entry = rb_entry(parent, struct ext4_free_data, efd_node); |
4422 | if (cluster < entry->efd_start_cluster) | |
c894058d | 4423 | n = &(*n)->rb_left; |
18aadd47 | 4424 | else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) |
c894058d AK |
4425 | n = &(*n)->rb_right; |
4426 | else { | |
e29136f8 | 4427 | ext4_grp_locked_error(sb, group, 0, |
84130193 TT |
4428 | ext4_group_first_block_no(sb, group) + |
4429 | EXT4_C2B(sbi, cluster), | |
e29136f8 | 4430 | "Block already on to-be-freed list"); |
c894058d | 4431 | return 0; |
c9de560d | 4432 | } |
c894058d | 4433 | } |
c9de560d | 4434 | |
c894058d AK |
4435 | rb_link_node(new_node, parent, n); |
4436 | rb_insert_color(new_node, &db->bb_free_root); | |
4437 | ||
4438 | /* Now try to see the extent can be merged to left and right */ | |
4439 | node = rb_prev(new_node); | |
4440 | if (node) { | |
18aadd47 | 4441 | entry = rb_entry(node, struct ext4_free_data, efd_node); |
c894058d | 4442 | if (can_merge(entry, new_entry)) { |
18aadd47 BJ |
4443 | new_entry->efd_start_cluster = entry->efd_start_cluster; |
4444 | new_entry->efd_count += entry->efd_count; | |
c894058d | 4445 | rb_erase(node, &(db->bb_free_root)); |
18aadd47 BJ |
4446 | ext4_journal_callback_del(handle, &entry->efd_jce); |
4447 | kmem_cache_free(ext4_free_data_cachep, entry); | |
c9de560d | 4448 | } |
c894058d | 4449 | } |
c9de560d | 4450 | |
c894058d AK |
4451 | node = rb_next(new_node); |
4452 | if (node) { | |
18aadd47 | 4453 | entry = rb_entry(node, struct ext4_free_data, efd_node); |
c894058d | 4454 | if (can_merge(new_entry, entry)) { |
18aadd47 | 4455 | new_entry->efd_count += entry->efd_count; |
c894058d | 4456 | rb_erase(node, &(db->bb_free_root)); |
18aadd47 BJ |
4457 | ext4_journal_callback_del(handle, &entry->efd_jce); |
4458 | kmem_cache_free(ext4_free_data_cachep, entry); | |
c9de560d AT |
4459 | } |
4460 | } | |
3e624fc7 | 4461 | /* Add the extent to transaction's private list */ |
18aadd47 BJ |
4462 | ext4_journal_callback_add(handle, ext4_free_data_callback, |
4463 | &new_entry->efd_jce); | |
c9de560d AT |
4464 | return 0; |
4465 | } | |
4466 | ||
44338711 TT |
4467 | /** |
4468 | * ext4_free_blocks() -- Free given blocks and update quota | |
4469 | * @handle: handle for this transaction | |
4470 | * @inode: inode | |
4471 | * @block: start physical block to free | |
4472 | * @count: number of blocks to count | |
5def1360 | 4473 | * @flags: flags used by ext4_free_blocks |
c9de560d | 4474 | */ |
44338711 | 4475 | void ext4_free_blocks(handle_t *handle, struct inode *inode, |
e6362609 TT |
4476 | struct buffer_head *bh, ext4_fsblk_t block, |
4477 | unsigned long count, int flags) | |
c9de560d | 4478 | { |
26346ff6 | 4479 | struct buffer_head *bitmap_bh = NULL; |
c9de560d | 4480 | struct super_block *sb = inode->i_sb; |
c9de560d | 4481 | struct ext4_group_desc *gdp; |
44338711 | 4482 | unsigned long freed = 0; |
498e5f24 | 4483 | unsigned int overflow; |
c9de560d AT |
4484 | ext4_grpblk_t bit; |
4485 | struct buffer_head *gd_bh; | |
4486 | ext4_group_t block_group; | |
4487 | struct ext4_sb_info *sbi; | |
4488 | struct ext4_buddy e4b; | |
84130193 | 4489 | unsigned int count_clusters; |
c9de560d AT |
4490 | int err = 0; |
4491 | int ret; | |
4492 | ||
e6362609 TT |
4493 | if (bh) { |
4494 | if (block) | |
4495 | BUG_ON(block != bh->b_blocknr); | |
4496 | else | |
4497 | block = bh->b_blocknr; | |
4498 | } | |
c9de560d | 4499 | |
c9de560d | 4500 | sbi = EXT4_SB(sb); |
1f2acb60 TT |
4501 | if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && |
4502 | !ext4_data_block_valid(sbi, block, count)) { | |
12062ddd | 4503 | ext4_error(sb, "Freeing blocks not in datazone - " |
1f2acb60 | 4504 | "block = %llu, count = %lu", block, count); |
c9de560d AT |
4505 | goto error_return; |
4506 | } | |
4507 | ||
0610b6e9 | 4508 | ext4_debug("freeing block %llu\n", block); |
e6362609 TT |
4509 | trace_ext4_free_blocks(inode, block, count, flags); |
4510 | ||
4511 | if (flags & EXT4_FREE_BLOCKS_FORGET) { | |
4512 | struct buffer_head *tbh = bh; | |
4513 | int i; | |
4514 | ||
4515 | BUG_ON(bh && (count > 1)); | |
4516 | ||
4517 | for (i = 0; i < count; i++) { | |
4518 | if (!bh) | |
4519 | tbh = sb_find_get_block(inode->i_sb, | |
4520 | block + i); | |
87783690 NK |
4521 | if (unlikely(!tbh)) |
4522 | continue; | |
60e6679e | 4523 | ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, |
e6362609 TT |
4524 | inode, tbh, block + i); |
4525 | } | |
4526 | } | |
4527 | ||
60e6679e | 4528 | /* |
e6362609 TT |
4529 | * We need to make sure we don't reuse the freed block until |
4530 | * after the transaction is committed, which we can do by | |
4531 | * treating the block as metadata, below. We make an | |
4532 | * exception if the inode is to be written in writeback mode | |
4533 | * since writeback mode has weak data consistency guarantees. | |
4534 | */ | |
4535 | if (!ext4_should_writeback_data(inode)) | |
4536 | flags |= EXT4_FREE_BLOCKS_METADATA; | |
c9de560d | 4537 | |
84130193 TT |
4538 | /* |
4539 | * If the extent to be freed does not begin on a cluster | |
4540 | * boundary, we need to deal with partial clusters at the | |
4541 | * beginning and end of the extent. Normally we will free | |
4542 | * blocks at the beginning or the end unless we are explicitly | |
4543 | * requested to avoid doing so. | |
4544 | */ | |
4545 | overflow = block & (sbi->s_cluster_ratio - 1); | |
4546 | if (overflow) { | |
4547 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { | |
4548 | overflow = sbi->s_cluster_ratio - overflow; | |
4549 | block += overflow; | |
4550 | if (count > overflow) | |
4551 | count -= overflow; | |
4552 | else | |
4553 | return; | |
4554 | } else { | |
4555 | block -= overflow; | |
4556 | count += overflow; | |
4557 | } | |
4558 | } | |
4559 | overflow = count & (sbi->s_cluster_ratio - 1); | |
4560 | if (overflow) { | |
4561 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { | |
4562 | if (count > overflow) | |
4563 | count -= overflow; | |
4564 | else | |
4565 | return; | |
4566 | } else | |
4567 | count += sbi->s_cluster_ratio - overflow; | |
4568 | } | |
4569 | ||
c9de560d AT |
4570 | do_more: |
4571 | overflow = 0; | |
4572 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); | |
4573 | ||
4574 | /* | |
4575 | * Check to see if we are freeing blocks across a group | |
4576 | * boundary. | |
4577 | */ | |
84130193 TT |
4578 | if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { |
4579 | overflow = EXT4_C2B(sbi, bit) + count - | |
4580 | EXT4_BLOCKS_PER_GROUP(sb); | |
c9de560d AT |
4581 | count -= overflow; |
4582 | } | |
84130193 | 4583 | count_clusters = EXT4_B2C(sbi, count); |
574ca174 | 4584 | bitmap_bh = ext4_read_block_bitmap(sb, block_group); |
ce89f46c AK |
4585 | if (!bitmap_bh) { |
4586 | err = -EIO; | |
c9de560d | 4587 | goto error_return; |
ce89f46c | 4588 | } |
c9de560d | 4589 | gdp = ext4_get_group_desc(sb, block_group, &gd_bh); |
ce89f46c AK |
4590 | if (!gdp) { |
4591 | err = -EIO; | |
c9de560d | 4592 | goto error_return; |
ce89f46c | 4593 | } |
c9de560d AT |
4594 | |
4595 | if (in_range(ext4_block_bitmap(sb, gdp), block, count) || | |
4596 | in_range(ext4_inode_bitmap(sb, gdp), block, count) || | |
4597 | in_range(block, ext4_inode_table(sb, gdp), | |
84130193 | 4598 | EXT4_SB(sb)->s_itb_per_group) || |
c9de560d | 4599 | in_range(block + count - 1, ext4_inode_table(sb, gdp), |
84130193 | 4600 | EXT4_SB(sb)->s_itb_per_group)) { |
c9de560d | 4601 | |
12062ddd | 4602 | ext4_error(sb, "Freeing blocks in system zone - " |
0610b6e9 | 4603 | "Block = %llu, count = %lu", block, count); |
519deca0 AK |
4604 | /* err = 0. ext4_std_error should be a no op */ |
4605 | goto error_return; | |
c9de560d AT |
4606 | } |
4607 | ||
4608 | BUFFER_TRACE(bitmap_bh, "getting write access"); | |
4609 | err = ext4_journal_get_write_access(handle, bitmap_bh); | |
4610 | if (err) | |
4611 | goto error_return; | |
4612 | ||
4613 | /* | |
4614 | * We are about to modify some metadata. Call the journal APIs | |
4615 | * to unshare ->b_data if a currently-committing transaction is | |
4616 | * using it | |
4617 | */ | |
4618 | BUFFER_TRACE(gd_bh, "get_write_access"); | |
4619 | err = ext4_journal_get_write_access(handle, gd_bh); | |
4620 | if (err) | |
4621 | goto error_return; | |
c9de560d AT |
4622 | #ifdef AGGRESSIVE_CHECK |
4623 | { | |
4624 | int i; | |
84130193 | 4625 | for (i = 0; i < count_clusters; i++) |
c9de560d AT |
4626 | BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); |
4627 | } | |
4628 | #endif | |
84130193 | 4629 | trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); |
c9de560d | 4630 | |
920313a7 AK |
4631 | err = ext4_mb_load_buddy(sb, block_group, &e4b); |
4632 | if (err) | |
4633 | goto error_return; | |
e6362609 TT |
4634 | |
4635 | if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { | |
7a2fcbf7 AK |
4636 | struct ext4_free_data *new_entry; |
4637 | /* | |
4638 | * blocks being freed are metadata. these blocks shouldn't | |
4639 | * be used until this transaction is committed | |
4640 | */ | |
18aadd47 | 4641 | new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); |
b72143ab | 4642 | if (!new_entry) { |
02b78310 | 4643 | ext4_mb_unload_buddy(&e4b); |
b72143ab TT |
4644 | err = -ENOMEM; |
4645 | goto error_return; | |
4646 | } | |
18aadd47 BJ |
4647 | new_entry->efd_start_cluster = bit; |
4648 | new_entry->efd_group = block_group; | |
4649 | new_entry->efd_count = count_clusters; | |
4650 | new_entry->efd_tid = handle->h_transaction->t_tid; | |
955ce5f5 | 4651 | |
7a2fcbf7 | 4652 | ext4_lock_group(sb, block_group); |
84130193 | 4653 | mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); |
7a2fcbf7 | 4654 | ext4_mb_free_metadata(handle, &e4b, new_entry); |
c9de560d | 4655 | } else { |
7a2fcbf7 AK |
4656 | /* need to update group_info->bb_free and bitmap |
4657 | * with group lock held. generate_buddy look at | |
4658 | * them with group lock_held | |
4659 | */ | |
b5e2368b TT |
4660 | if (test_opt(sb, DISCARD)) |
4661 | ext4_issue_discard(sb, block_group, bit, count); | |
955ce5f5 | 4662 | ext4_lock_group(sb, block_group); |
84130193 TT |
4663 | mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); |
4664 | mb_free_blocks(inode, &e4b, bit, count_clusters); | |
c9de560d AT |
4665 | } |
4666 | ||
021b65bb TT |
4667 | ret = ext4_free_group_clusters(sb, gdp) + count_clusters; |
4668 | ext4_free_group_clusters_set(sb, gdp, ret); | |
fa77dcfa DW |
4669 | ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, |
4670 | EXT4_BLOCKS_PER_GROUP(sb) / 8); | |
feb0ab32 | 4671 | ext4_group_desc_csum_set(sb, block_group, gdp); |
955ce5f5 | 4672 | ext4_unlock_group(sb, block_group); |
57042651 | 4673 | percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); |
c9de560d | 4674 | |
772cb7c8 JS |
4675 | if (sbi->s_log_groups_per_flex) { |
4676 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | |
24aaa8ef TT |
4677 | atomic_add(count_clusters, |
4678 | &sbi->s_flex_groups[flex_group].free_clusters); | |
772cb7c8 JS |
4679 | } |
4680 | ||
e39e07fd | 4681 | ext4_mb_unload_buddy(&e4b); |
c9de560d | 4682 | |
44338711 | 4683 | freed += count; |
c9de560d | 4684 | |
7b415bf6 AK |
4685 | if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) |
4686 | dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); | |
4687 | ||
7a2fcbf7 AK |
4688 | /* We dirtied the bitmap block */ |
4689 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | |
4690 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | |
4691 | ||
c9de560d AT |
4692 | /* And the group descriptor block */ |
4693 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | |
0390131b | 4694 | ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); |
c9de560d AT |
4695 | if (!err) |
4696 | err = ret; | |
4697 | ||
4698 | if (overflow && !err) { | |
4699 | block += count; | |
4700 | count = overflow; | |
4701 | put_bh(bitmap_bh); | |
4702 | goto do_more; | |
4703 | } | |
c9de560d AT |
4704 | error_return: |
4705 | brelse(bitmap_bh); | |
4706 | ext4_std_error(sb, err); | |
4707 | return; | |
4708 | } | |
7360d173 | 4709 | |
2846e820 | 4710 | /** |
0529155e | 4711 | * ext4_group_add_blocks() -- Add given blocks to an existing group |
2846e820 AG |
4712 | * @handle: handle to this transaction |
4713 | * @sb: super block | |
4714 | * @block: start physcial block to add to the block group | |
4715 | * @count: number of blocks to free | |
4716 | * | |
e73a347b | 4717 | * This marks the blocks as free in the bitmap and buddy. |
2846e820 | 4718 | */ |
cc7365df | 4719 | int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, |
2846e820 AG |
4720 | ext4_fsblk_t block, unsigned long count) |
4721 | { | |
4722 | struct buffer_head *bitmap_bh = NULL; | |
4723 | struct buffer_head *gd_bh; | |
4724 | ext4_group_t block_group; | |
4725 | ext4_grpblk_t bit; | |
4726 | unsigned int i; | |
4727 | struct ext4_group_desc *desc; | |
4728 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
e73a347b | 4729 | struct ext4_buddy e4b; |
2846e820 AG |
4730 | int err = 0, ret, blk_free_count; |
4731 | ext4_grpblk_t blocks_freed; | |
2846e820 AG |
4732 | |
4733 | ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); | |
4734 | ||
4740b830 YY |
4735 | if (count == 0) |
4736 | return 0; | |
4737 | ||
2846e820 | 4738 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); |
2846e820 AG |
4739 | /* |
4740 | * Check to see if we are freeing blocks across a group | |
4741 | * boundary. | |
4742 | */ | |
cc7365df YY |
4743 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { |
4744 | ext4_warning(sb, "too much blocks added to group %u\n", | |
4745 | block_group); | |
4746 | err = -EINVAL; | |
2846e820 | 4747 | goto error_return; |
cc7365df | 4748 | } |
2cd05cc3 | 4749 | |
2846e820 | 4750 | bitmap_bh = ext4_read_block_bitmap(sb, block_group); |
cc7365df YY |
4751 | if (!bitmap_bh) { |
4752 | err = -EIO; | |
2846e820 | 4753 | goto error_return; |
cc7365df YY |
4754 | } |
4755 | ||
2846e820 | 4756 | desc = ext4_get_group_desc(sb, block_group, &gd_bh); |
cc7365df YY |
4757 | if (!desc) { |
4758 | err = -EIO; | |
2846e820 | 4759 | goto error_return; |
cc7365df | 4760 | } |
2846e820 AG |
4761 | |
4762 | if (in_range(ext4_block_bitmap(sb, desc), block, count) || | |
4763 | in_range(ext4_inode_bitmap(sb, desc), block, count) || | |
4764 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | |
4765 | in_range(block + count - 1, ext4_inode_table(sb, desc), | |
4766 | sbi->s_itb_per_group)) { | |
4767 | ext4_error(sb, "Adding blocks in system zones - " | |
4768 | "Block = %llu, count = %lu", | |
4769 | block, count); | |
cc7365df | 4770 | err = -EINVAL; |
2846e820 AG |
4771 | goto error_return; |
4772 | } | |
4773 | ||
2cd05cc3 TT |
4774 | BUFFER_TRACE(bitmap_bh, "getting write access"); |
4775 | err = ext4_journal_get_write_access(handle, bitmap_bh); | |
2846e820 AG |
4776 | if (err) |
4777 | goto error_return; | |
4778 | ||
4779 | /* | |
4780 | * We are about to modify some metadata. Call the journal APIs | |
4781 | * to unshare ->b_data if a currently-committing transaction is | |
4782 | * using it | |
4783 | */ | |
4784 | BUFFER_TRACE(gd_bh, "get_write_access"); | |
4785 | err = ext4_journal_get_write_access(handle, gd_bh); | |
4786 | if (err) | |
4787 | goto error_return; | |
e73a347b | 4788 | |
2846e820 AG |
4789 | for (i = 0, blocks_freed = 0; i < count; i++) { |
4790 | BUFFER_TRACE(bitmap_bh, "clear bit"); | |
e73a347b | 4791 | if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { |
2846e820 AG |
4792 | ext4_error(sb, "bit already cleared for block %llu", |
4793 | (ext4_fsblk_t)(block + i)); | |
4794 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | |
4795 | } else { | |
4796 | blocks_freed++; | |
4797 | } | |
4798 | } | |
e73a347b AG |
4799 | |
4800 | err = ext4_mb_load_buddy(sb, block_group, &e4b); | |
4801 | if (err) | |
4802 | goto error_return; | |
4803 | ||
4804 | /* | |
4805 | * need to update group_info->bb_free and bitmap | |
4806 | * with group lock held. generate_buddy look at | |
4807 | * them with group lock_held | |
4808 | */ | |
2846e820 | 4809 | ext4_lock_group(sb, block_group); |
e73a347b AG |
4810 | mb_clear_bits(bitmap_bh->b_data, bit, count); |
4811 | mb_free_blocks(NULL, &e4b, bit, count); | |
021b65bb TT |
4812 | blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); |
4813 | ext4_free_group_clusters_set(sb, desc, blk_free_count); | |
fa77dcfa DW |
4814 | ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh, |
4815 | EXT4_BLOCKS_PER_GROUP(sb) / 8); | |
feb0ab32 | 4816 | ext4_group_desc_csum_set(sb, block_group, desc); |
2846e820 | 4817 | ext4_unlock_group(sb, block_group); |
57042651 TT |
4818 | percpu_counter_add(&sbi->s_freeclusters_counter, |
4819 | EXT4_B2C(sbi, blocks_freed)); | |
2846e820 AG |
4820 | |
4821 | if (sbi->s_log_groups_per_flex) { | |
4822 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | |
24aaa8ef TT |
4823 | atomic_add(EXT4_B2C(sbi, blocks_freed), |
4824 | &sbi->s_flex_groups[flex_group].free_clusters); | |
2846e820 | 4825 | } |
e73a347b AG |
4826 | |
4827 | ext4_mb_unload_buddy(&e4b); | |
2846e820 AG |
4828 | |
4829 | /* We dirtied the bitmap block */ | |
4830 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | |
4831 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | |
4832 | ||
4833 | /* And the group descriptor block */ | |
4834 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | |
4835 | ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); | |
4836 | if (!err) | |
4837 | err = ret; | |
4838 | ||
4839 | error_return: | |
4840 | brelse(bitmap_bh); | |
4841 | ext4_std_error(sb, err); | |
cc7365df | 4842 | return err; |
2846e820 AG |
4843 | } |
4844 | ||
7360d173 LC |
4845 | /** |
4846 | * ext4_trim_extent -- function to TRIM one single free extent in the group | |
4847 | * @sb: super block for the file system | |
4848 | * @start: starting block of the free extent in the alloc. group | |
4849 | * @count: number of blocks to TRIM | |
4850 | * @group: alloc. group we are working with | |
4851 | * @e4b: ext4 buddy for the group | |
4852 | * | |
4853 | * Trim "count" blocks starting at "start" in the "group". To assure that no | |
4854 | * one will allocate those blocks, mark it as used in buddy bitmap. This must | |
4855 | * be called with under the group lock. | |
4856 | */ | |
d9f34504 TT |
4857 | static void ext4_trim_extent(struct super_block *sb, int start, int count, |
4858 | ext4_group_t group, struct ext4_buddy *e4b) | |
7360d173 LC |
4859 | { |
4860 | struct ext4_free_extent ex; | |
7360d173 | 4861 | |
b3d4c2b1 TM |
4862 | trace_ext4_trim_extent(sb, group, start, count); |
4863 | ||
7360d173 LC |
4864 | assert_spin_locked(ext4_group_lock_ptr(sb, group)); |
4865 | ||
4866 | ex.fe_start = start; | |
4867 | ex.fe_group = group; | |
4868 | ex.fe_len = count; | |
4869 | ||
4870 | /* | |
4871 | * Mark blocks used, so no one can reuse them while | |
4872 | * being trimmed. | |
4873 | */ | |
4874 | mb_mark_used(e4b, &ex); | |
4875 | ext4_unlock_group(sb, group); | |
d9f34504 | 4876 | ext4_issue_discard(sb, group, start, count); |
7360d173 LC |
4877 | ext4_lock_group(sb, group); |
4878 | mb_free_blocks(NULL, e4b, start, ex.fe_len); | |
7360d173 LC |
4879 | } |
4880 | ||
4881 | /** | |
4882 | * ext4_trim_all_free -- function to trim all free space in alloc. group | |
4883 | * @sb: super block for file system | |
22612283 | 4884 | * @group: group to be trimmed |
7360d173 LC |
4885 | * @start: first group block to examine |
4886 | * @max: last group block to examine | |
4887 | * @minblocks: minimum extent block count | |
4888 | * | |
4889 | * ext4_trim_all_free walks through group's buddy bitmap searching for free | |
4890 | * extents. When the free block is found, ext4_trim_extent is called to TRIM | |
4891 | * the extent. | |
4892 | * | |
4893 | * | |
4894 | * ext4_trim_all_free walks through group's block bitmap searching for free | |
4895 | * extents. When the free extent is found, mark it as used in group buddy | |
4896 | * bitmap. Then issue a TRIM command on this extent and free the extent in | |
4897 | * the group buddy bitmap. This is done until whole group is scanned. | |
4898 | */ | |
0b75a840 | 4899 | static ext4_grpblk_t |
78944086 LC |
4900 | ext4_trim_all_free(struct super_block *sb, ext4_group_t group, |
4901 | ext4_grpblk_t start, ext4_grpblk_t max, | |
4902 | ext4_grpblk_t minblocks) | |
7360d173 LC |
4903 | { |
4904 | void *bitmap; | |
169ddc3e | 4905 | ext4_grpblk_t next, count = 0, free_count = 0; |
78944086 LC |
4906 | struct ext4_buddy e4b; |
4907 | int ret; | |
7360d173 | 4908 | |
b3d4c2b1 TM |
4909 | trace_ext4_trim_all_free(sb, group, start, max); |
4910 | ||
78944086 LC |
4911 | ret = ext4_mb_load_buddy(sb, group, &e4b); |
4912 | if (ret) { | |
4913 | ext4_error(sb, "Error in loading buddy " | |
4914 | "information for %u", group); | |
4915 | return ret; | |
4916 | } | |
78944086 | 4917 | bitmap = e4b.bd_bitmap; |
28739eea LC |
4918 | |
4919 | ext4_lock_group(sb, group); | |
3d56b8d2 TM |
4920 | if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && |
4921 | minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) | |
4922 | goto out; | |
4923 | ||
78944086 LC |
4924 | start = (e4b.bd_info->bb_first_free > start) ? |
4925 | e4b.bd_info->bb_first_free : start; | |
7360d173 | 4926 | |
913eed83 LC |
4927 | while (start <= max) { |
4928 | start = mb_find_next_zero_bit(bitmap, max + 1, start); | |
4929 | if (start > max) | |
7360d173 | 4930 | break; |
913eed83 | 4931 | next = mb_find_next_bit(bitmap, max + 1, start); |
7360d173 LC |
4932 | |
4933 | if ((next - start) >= minblocks) { | |
d9f34504 | 4934 | ext4_trim_extent(sb, start, |
78944086 | 4935 | next - start, group, &e4b); |
7360d173 LC |
4936 | count += next - start; |
4937 | } | |
169ddc3e | 4938 | free_count += next - start; |
7360d173 LC |
4939 | start = next + 1; |
4940 | ||
4941 | if (fatal_signal_pending(current)) { | |
4942 | count = -ERESTARTSYS; | |
4943 | break; | |
4944 | } | |
4945 | ||
4946 | if (need_resched()) { | |
4947 | ext4_unlock_group(sb, group); | |
4948 | cond_resched(); | |
4949 | ext4_lock_group(sb, group); | |
4950 | } | |
4951 | ||
169ddc3e | 4952 | if ((e4b.bd_info->bb_free - free_count) < minblocks) |
7360d173 LC |
4953 | break; |
4954 | } | |
3d56b8d2 TM |
4955 | |
4956 | if (!ret) | |
4957 | EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); | |
4958 | out: | |
7360d173 | 4959 | ext4_unlock_group(sb, group); |
78944086 | 4960 | ext4_mb_unload_buddy(&e4b); |
7360d173 LC |
4961 | |
4962 | ext4_debug("trimmed %d blocks in the group %d\n", | |
4963 | count, group); | |
4964 | ||
7360d173 LC |
4965 | return count; |
4966 | } | |
4967 | ||
4968 | /** | |
4969 | * ext4_trim_fs() -- trim ioctl handle function | |
4970 | * @sb: superblock for filesystem | |
4971 | * @range: fstrim_range structure | |
4972 | * | |
4973 | * start: First Byte to trim | |
4974 | * len: number of Bytes to trim from start | |
4975 | * minlen: minimum extent length in Bytes | |
4976 | * ext4_trim_fs goes through all allocation groups containing Bytes from | |
4977 | * start to start+len. For each such a group ext4_trim_all_free function | |
4978 | * is invoked to trim all free space. | |
4979 | */ | |
4980 | int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) | |
4981 | { | |
78944086 | 4982 | struct ext4_group_info *grp; |
913eed83 | 4983 | ext4_group_t group, first_group, last_group; |
7137d7a4 | 4984 | ext4_grpblk_t cnt = 0, first_cluster, last_cluster; |
913eed83 | 4985 | uint64_t start, end, minlen, trimmed = 0; |
0f0a25bf JK |
4986 | ext4_fsblk_t first_data_blk = |
4987 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | |
913eed83 | 4988 | ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); |
7360d173 LC |
4989 | int ret = 0; |
4990 | ||
4991 | start = range->start >> sb->s_blocksize_bits; | |
913eed83 | 4992 | end = start + (range->len >> sb->s_blocksize_bits) - 1; |
aaf7d73e LC |
4993 | minlen = EXT4_NUM_B2C(EXT4_SB(sb), |
4994 | range->minlen >> sb->s_blocksize_bits); | |
7360d173 | 4995 | |
913eed83 LC |
4996 | if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) || |
4997 | unlikely(start >= max_blks)) | |
7360d173 | 4998 | return -EINVAL; |
913eed83 LC |
4999 | if (end >= max_blks) |
5000 | end = max_blks - 1; | |
5001 | if (end <= first_data_blk) | |
22f10457 | 5002 | goto out; |
913eed83 | 5003 | if (start < first_data_blk) |
0f0a25bf | 5004 | start = first_data_blk; |
7360d173 | 5005 | |
913eed83 | 5006 | /* Determine first and last group to examine based on start and end */ |
7360d173 | 5007 | ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, |
7137d7a4 | 5008 | &first_group, &first_cluster); |
913eed83 | 5009 | ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, |
7137d7a4 | 5010 | &last_group, &last_cluster); |
7360d173 | 5011 | |
913eed83 LC |
5012 | /* end now represents the last cluster to discard in this group */ |
5013 | end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; | |
7360d173 LC |
5014 | |
5015 | for (group = first_group; group <= last_group; group++) { | |
78944086 LC |
5016 | grp = ext4_get_group_info(sb, group); |
5017 | /* We only do this if the grp has never been initialized */ | |
5018 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { | |
5019 | ret = ext4_mb_init_group(sb, group); | |
5020 | if (ret) | |
5021 | break; | |
7360d173 LC |
5022 | } |
5023 | ||
0ba08517 | 5024 | /* |
913eed83 LC |
5025 | * For all the groups except the last one, last cluster will |
5026 | * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to | |
5027 | * change it for the last group, note that last_cluster is | |
5028 | * already computed earlier by ext4_get_group_no_and_offset() | |
0ba08517 | 5029 | */ |
913eed83 LC |
5030 | if (group == last_group) |
5031 | end = last_cluster; | |
7360d173 | 5032 | |
78944086 | 5033 | if (grp->bb_free >= minlen) { |
7137d7a4 | 5034 | cnt = ext4_trim_all_free(sb, group, first_cluster, |
913eed83 | 5035 | end, minlen); |
7360d173 LC |
5036 | if (cnt < 0) { |
5037 | ret = cnt; | |
7360d173 LC |
5038 | break; |
5039 | } | |
21e7fd22 | 5040 | trimmed += cnt; |
7360d173 | 5041 | } |
913eed83 LC |
5042 | |
5043 | /* | |
5044 | * For every group except the first one, we are sure | |
5045 | * that the first cluster to discard will be cluster #0. | |
5046 | */ | |
7137d7a4 | 5047 | first_cluster = 0; |
7360d173 | 5048 | } |
7360d173 | 5049 | |
3d56b8d2 TM |
5050 | if (!ret) |
5051 | atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); | |
5052 | ||
22f10457 | 5053 | out: |
aaf7d73e | 5054 | range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; |
7360d173 LC |
5055 | return ret; |
5056 | } |