]>
Commit | Line | Data |
---|---|---|
a86c6181 AT |
1 | /* |
2 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
3 | * Written by Alex Tomas <alex@clusterfs.com> | |
4 | * | |
5 | * Architecture independence: | |
6 | * Copyright (c) 2005, Bull S.A. | |
7 | * Written by Pierre Peiffer <pierre.peiffer@bull.net> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public Licens | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Extents support for EXT4 | |
25 | * | |
26 | * TODO: | |
27 | * - ext4*_error() should be used in some situations | |
28 | * - analyze all BUG()/BUG_ON(), use -EIO where appropriate | |
29 | * - smart tree reduction | |
30 | */ | |
31 | ||
32 | #include <linux/module.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/time.h> | |
cd02ff0b | 35 | #include <linux/jbd2.h> |
a86c6181 AT |
36 | #include <linux/highuid.h> |
37 | #include <linux/pagemap.h> | |
38 | #include <linux/quotaops.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/slab.h> | |
a2df2a63 | 41 | #include <linux/falloc.h> |
a86c6181 | 42 | #include <asm/uaccess.h> |
6873fa0d | 43 | #include <linux/fiemap.h> |
3dcf5451 CH |
44 | #include "ext4_jbd2.h" |
45 | #include "ext4_extents.h" | |
a86c6181 | 46 | |
0562e0ba JZ |
47 | #include <trace/events/ext4.h> |
48 | ||
d583fb87 AH |
49 | static int ext4_split_extent(handle_t *handle, |
50 | struct inode *inode, | |
51 | struct ext4_ext_path *path, | |
52 | struct ext4_map_blocks *map, | |
53 | int split_flag, | |
54 | int flags); | |
55 | ||
487caeef JK |
56 | static int ext4_ext_truncate_extend_restart(handle_t *handle, |
57 | struct inode *inode, | |
58 | int needed) | |
a86c6181 AT |
59 | { |
60 | int err; | |
61 | ||
0390131b FM |
62 | if (!ext4_handle_valid(handle)) |
63 | return 0; | |
a86c6181 | 64 | if (handle->h_buffer_credits > needed) |
9102e4fa SF |
65 | return 0; |
66 | err = ext4_journal_extend(handle, needed); | |
0123c939 | 67 | if (err <= 0) |
9102e4fa | 68 | return err; |
487caeef | 69 | err = ext4_truncate_restart_trans(handle, inode, needed); |
0617b83f DM |
70 | if (err == 0) |
71 | err = -EAGAIN; | |
487caeef JK |
72 | |
73 | return err; | |
a86c6181 AT |
74 | } |
75 | ||
76 | /* | |
77 | * could return: | |
78 | * - EROFS | |
79 | * - ENOMEM | |
80 | */ | |
81 | static int ext4_ext_get_access(handle_t *handle, struct inode *inode, | |
82 | struct ext4_ext_path *path) | |
83 | { | |
84 | if (path->p_bh) { | |
85 | /* path points to block */ | |
86 | return ext4_journal_get_write_access(handle, path->p_bh); | |
87 | } | |
88 | /* path points to leaf/index in inode body */ | |
89 | /* we use in-core data, no need to protect them */ | |
90 | return 0; | |
91 | } | |
92 | ||
93 | /* | |
94 | * could return: | |
95 | * - EROFS | |
96 | * - ENOMEM | |
97 | * - EIO | |
98 | */ | |
99 | static int ext4_ext_dirty(handle_t *handle, struct inode *inode, | |
100 | struct ext4_ext_path *path) | |
101 | { | |
102 | int err; | |
103 | if (path->p_bh) { | |
104 | /* path points to block */ | |
0390131b | 105 | err = ext4_handle_dirty_metadata(handle, inode, path->p_bh); |
a86c6181 AT |
106 | } else { |
107 | /* path points to leaf/index in inode body */ | |
108 | err = ext4_mark_inode_dirty(handle, inode); | |
109 | } | |
110 | return err; | |
111 | } | |
112 | ||
f65e6fba | 113 | static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
a86c6181 | 114 | struct ext4_ext_path *path, |
725d26d3 | 115 | ext4_lblk_t block) |
a86c6181 AT |
116 | { |
117 | struct ext4_inode_info *ei = EXT4_I(inode); | |
f65e6fba | 118 | ext4_fsblk_t bg_start; |
74d3487f | 119 | ext4_fsblk_t last_block; |
f65e6fba | 120 | ext4_grpblk_t colour; |
a4912123 TT |
121 | ext4_group_t block_group; |
122 | int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); | |
a86c6181 AT |
123 | int depth; |
124 | ||
125 | if (path) { | |
126 | struct ext4_extent *ex; | |
127 | depth = path->p_depth; | |
128 | ||
ad4fb9ca KM |
129 | /* |
130 | * Try to predict block placement assuming that we are | |
131 | * filling in a file which will eventually be | |
132 | * non-sparse --- i.e., in the case of libbfd writing | |
133 | * an ELF object sections out-of-order but in a way | |
134 | * the eventually results in a contiguous object or | |
135 | * executable file, or some database extending a table | |
136 | * space file. However, this is actually somewhat | |
137 | * non-ideal if we are writing a sparse file such as | |
138 | * qemu or KVM writing a raw image file that is going | |
139 | * to stay fairly sparse, since it will end up | |
140 | * fragmenting the file system's free space. Maybe we | |
141 | * should have some hueristics or some way to allow | |
142 | * userspace to pass a hint to file system, | |
b8d6568a | 143 | * especially if the latter case turns out to be |
ad4fb9ca KM |
144 | * common. |
145 | */ | |
7e028976 | 146 | ex = path[depth].p_ext; |
ad4fb9ca KM |
147 | if (ex) { |
148 | ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); | |
149 | ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); | |
150 | ||
151 | if (block > ext_block) | |
152 | return ext_pblk + (block - ext_block); | |
153 | else | |
154 | return ext_pblk - (ext_block - block); | |
155 | } | |
a86c6181 | 156 | |
d0d856e8 RD |
157 | /* it looks like index is empty; |
158 | * try to find starting block from index itself */ | |
a86c6181 AT |
159 | if (path[depth].p_bh) |
160 | return path[depth].p_bh->b_blocknr; | |
161 | } | |
162 | ||
163 | /* OK. use inode's group */ | |
a4912123 TT |
164 | block_group = ei->i_block_group; |
165 | if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { | |
166 | /* | |
167 | * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME | |
60e6679e TT |
168 | * block groups per flexgroup, reserve the first block |
169 | * group for directories and special files. Regular | |
a4912123 | 170 | * files will start at the second block group. This |
60e6679e | 171 | * tends to speed up directory access and improves |
a4912123 TT |
172 | * fsck times. |
173 | */ | |
174 | block_group &= ~(flex_size-1); | |
175 | if (S_ISREG(inode->i_mode)) | |
176 | block_group++; | |
177 | } | |
5661bd68 | 178 | bg_start = ext4_group_first_block_no(inode->i_sb, block_group); |
74d3487f VC |
179 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
180 | ||
a4912123 TT |
181 | /* |
182 | * If we are doing delayed allocation, we don't need take | |
183 | * colour into account. | |
184 | */ | |
185 | if (test_opt(inode->i_sb, DELALLOC)) | |
186 | return bg_start; | |
187 | ||
74d3487f VC |
188 | if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) |
189 | colour = (current->pid % 16) * | |
a86c6181 | 190 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); |
74d3487f VC |
191 | else |
192 | colour = (current->pid % 16) * ((last_block - bg_start) / 16); | |
a86c6181 AT |
193 | return bg_start + colour + block; |
194 | } | |
195 | ||
654b4908 AK |
196 | /* |
197 | * Allocation for a meta data block | |
198 | */ | |
f65e6fba | 199 | static ext4_fsblk_t |
654b4908 | 200 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
a86c6181 | 201 | struct ext4_ext_path *path, |
55f020db | 202 | struct ext4_extent *ex, int *err, unsigned int flags) |
a86c6181 | 203 | { |
f65e6fba | 204 | ext4_fsblk_t goal, newblock; |
a86c6181 AT |
205 | |
206 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | |
55f020db AH |
207 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
208 | NULL, err); | |
a86c6181 AT |
209 | return newblock; |
210 | } | |
211 | ||
55ad63bf | 212 | static inline int ext4_ext_space_block(struct inode *inode, int check) |
a86c6181 AT |
213 | { |
214 | int size; | |
215 | ||
216 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
217 | / sizeof(struct ext4_extent); | |
55ad63bf | 218 | if (!check) { |
bbf2f9fb | 219 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
220 | if (size > 6) |
221 | size = 6; | |
a86c6181 | 222 | #endif |
55ad63bf | 223 | } |
a86c6181 AT |
224 | return size; |
225 | } | |
226 | ||
55ad63bf | 227 | static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
a86c6181 AT |
228 | { |
229 | int size; | |
230 | ||
231 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
232 | / sizeof(struct ext4_extent_idx); | |
55ad63bf | 233 | if (!check) { |
bbf2f9fb | 234 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
235 | if (size > 5) |
236 | size = 5; | |
a86c6181 | 237 | #endif |
55ad63bf | 238 | } |
a86c6181 AT |
239 | return size; |
240 | } | |
241 | ||
55ad63bf | 242 | static inline int ext4_ext_space_root(struct inode *inode, int check) |
a86c6181 AT |
243 | { |
244 | int size; | |
245 | ||
246 | size = sizeof(EXT4_I(inode)->i_data); | |
247 | size -= sizeof(struct ext4_extent_header); | |
248 | size /= sizeof(struct ext4_extent); | |
55ad63bf | 249 | if (!check) { |
bbf2f9fb | 250 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
251 | if (size > 3) |
252 | size = 3; | |
a86c6181 | 253 | #endif |
55ad63bf | 254 | } |
a86c6181 AT |
255 | return size; |
256 | } | |
257 | ||
55ad63bf | 258 | static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
a86c6181 AT |
259 | { |
260 | int size; | |
261 | ||
262 | size = sizeof(EXT4_I(inode)->i_data); | |
263 | size -= sizeof(struct ext4_extent_header); | |
264 | size /= sizeof(struct ext4_extent_idx); | |
55ad63bf | 265 | if (!check) { |
bbf2f9fb | 266 | #ifdef AGGRESSIVE_TEST |
55ad63bf TT |
267 | if (size > 4) |
268 | size = 4; | |
a86c6181 | 269 | #endif |
55ad63bf | 270 | } |
a86c6181 AT |
271 | return size; |
272 | } | |
273 | ||
d2a17637 MC |
274 | /* |
275 | * Calculate the number of metadata blocks needed | |
276 | * to allocate @blocks | |
277 | * Worse case is one block per extent | |
278 | */ | |
01f49d0b | 279 | int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) |
d2a17637 | 280 | { |
9d0be502 TT |
281 | struct ext4_inode_info *ei = EXT4_I(inode); |
282 | int idxs, num = 0; | |
d2a17637 | 283 | |
9d0be502 TT |
284 | idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
285 | / sizeof(struct ext4_extent_idx)); | |
d2a17637 MC |
286 | |
287 | /* | |
9d0be502 TT |
288 | * If the new delayed allocation block is contiguous with the |
289 | * previous da block, it can share index blocks with the | |
290 | * previous block, so we only need to allocate a new index | |
291 | * block every idxs leaf blocks. At ldxs**2 blocks, we need | |
292 | * an additional index block, and at ldxs**3 blocks, yet | |
293 | * another index blocks. | |
d2a17637 | 294 | */ |
9d0be502 TT |
295 | if (ei->i_da_metadata_calc_len && |
296 | ei->i_da_metadata_calc_last_lblock+1 == lblock) { | |
297 | if ((ei->i_da_metadata_calc_len % idxs) == 0) | |
298 | num++; | |
299 | if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) | |
300 | num++; | |
301 | if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { | |
302 | num++; | |
303 | ei->i_da_metadata_calc_len = 0; | |
304 | } else | |
305 | ei->i_da_metadata_calc_len++; | |
306 | ei->i_da_metadata_calc_last_lblock++; | |
307 | return num; | |
308 | } | |
d2a17637 | 309 | |
9d0be502 TT |
310 | /* |
311 | * In the worst case we need a new set of index blocks at | |
312 | * every level of the inode's extent tree. | |
313 | */ | |
314 | ei->i_da_metadata_calc_len = 1; | |
315 | ei->i_da_metadata_calc_last_lblock = lblock; | |
316 | return ext_depth(inode) + 1; | |
d2a17637 MC |
317 | } |
318 | ||
c29c0ae7 AT |
319 | static int |
320 | ext4_ext_max_entries(struct inode *inode, int depth) | |
321 | { | |
322 | int max; | |
323 | ||
324 | if (depth == ext_depth(inode)) { | |
325 | if (depth == 0) | |
55ad63bf | 326 | max = ext4_ext_space_root(inode, 1); |
c29c0ae7 | 327 | else |
55ad63bf | 328 | max = ext4_ext_space_root_idx(inode, 1); |
c29c0ae7 AT |
329 | } else { |
330 | if (depth == 0) | |
55ad63bf | 331 | max = ext4_ext_space_block(inode, 1); |
c29c0ae7 | 332 | else |
55ad63bf | 333 | max = ext4_ext_space_block_idx(inode, 1); |
c29c0ae7 AT |
334 | } |
335 | ||
336 | return max; | |
337 | } | |
338 | ||
56b19868 AK |
339 | static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
340 | { | |
bf89d16f | 341 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
56b19868 | 342 | int len = ext4_ext_get_actual_len(ext); |
e84a26ce | 343 | |
6fd058f7 | 344 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
56b19868 AK |
345 | } |
346 | ||
347 | static int ext4_valid_extent_idx(struct inode *inode, | |
348 | struct ext4_extent_idx *ext_idx) | |
349 | { | |
bf89d16f | 350 | ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
e84a26ce | 351 | |
6fd058f7 | 352 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); |
56b19868 AK |
353 | } |
354 | ||
355 | static int ext4_valid_extent_entries(struct inode *inode, | |
356 | struct ext4_extent_header *eh, | |
357 | int depth) | |
358 | { | |
359 | struct ext4_extent *ext; | |
360 | struct ext4_extent_idx *ext_idx; | |
361 | unsigned short entries; | |
362 | if (eh->eh_entries == 0) | |
363 | return 1; | |
364 | ||
365 | entries = le16_to_cpu(eh->eh_entries); | |
366 | ||
367 | if (depth == 0) { | |
368 | /* leaf entries */ | |
369 | ext = EXT_FIRST_EXTENT(eh); | |
370 | while (entries) { | |
371 | if (!ext4_valid_extent(inode, ext)) | |
372 | return 0; | |
373 | ext++; | |
374 | entries--; | |
375 | } | |
376 | } else { | |
377 | ext_idx = EXT_FIRST_INDEX(eh); | |
378 | while (entries) { | |
379 | if (!ext4_valid_extent_idx(inode, ext_idx)) | |
380 | return 0; | |
381 | ext_idx++; | |
382 | entries--; | |
383 | } | |
384 | } | |
385 | return 1; | |
386 | } | |
387 | ||
c398eda0 TT |
388 | static int __ext4_ext_check(const char *function, unsigned int line, |
389 | struct inode *inode, struct ext4_extent_header *eh, | |
390 | int depth) | |
c29c0ae7 AT |
391 | { |
392 | const char *error_msg; | |
393 | int max = 0; | |
394 | ||
395 | if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { | |
396 | error_msg = "invalid magic"; | |
397 | goto corrupted; | |
398 | } | |
399 | if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { | |
400 | error_msg = "unexpected eh_depth"; | |
401 | goto corrupted; | |
402 | } | |
403 | if (unlikely(eh->eh_max == 0)) { | |
404 | error_msg = "invalid eh_max"; | |
405 | goto corrupted; | |
406 | } | |
407 | max = ext4_ext_max_entries(inode, depth); | |
408 | if (unlikely(le16_to_cpu(eh->eh_max) > max)) { | |
409 | error_msg = "too large eh_max"; | |
410 | goto corrupted; | |
411 | } | |
412 | if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { | |
413 | error_msg = "invalid eh_entries"; | |
414 | goto corrupted; | |
415 | } | |
56b19868 AK |
416 | if (!ext4_valid_extent_entries(inode, eh, depth)) { |
417 | error_msg = "invalid extent entries"; | |
418 | goto corrupted; | |
419 | } | |
c29c0ae7 AT |
420 | return 0; |
421 | ||
422 | corrupted: | |
c398eda0 | 423 | ext4_error_inode(inode, function, line, 0, |
24676da4 | 424 | "bad header/extent: %s - magic %x, " |
c29c0ae7 | 425 | "entries %u, max %u(%u), depth %u(%u)", |
24676da4 | 426 | error_msg, le16_to_cpu(eh->eh_magic), |
c29c0ae7 AT |
427 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), |
428 | max, le16_to_cpu(eh->eh_depth), depth); | |
429 | ||
430 | return -EIO; | |
431 | } | |
432 | ||
56b19868 | 433 | #define ext4_ext_check(inode, eh, depth) \ |
c398eda0 | 434 | __ext4_ext_check(__func__, __LINE__, inode, eh, depth) |
c29c0ae7 | 435 | |
7a262f7c AK |
436 | int ext4_ext_check_inode(struct inode *inode) |
437 | { | |
438 | return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); | |
439 | } | |
440 | ||
a86c6181 AT |
441 | #ifdef EXT_DEBUG |
442 | static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) | |
443 | { | |
444 | int k, l = path->p_depth; | |
445 | ||
446 | ext_debug("path:"); | |
447 | for (k = 0; k <= l; k++, path++) { | |
448 | if (path->p_idx) { | |
2ae02107 | 449 | ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), |
bf89d16f | 450 | ext4_idx_pblock(path->p_idx)); |
a86c6181 | 451 | } else if (path->p_ext) { |
553f9008 | 452 | ext_debug(" %d:[%d]%d:%llu ", |
a86c6181 | 453 | le32_to_cpu(path->p_ext->ee_block), |
553f9008 | 454 | ext4_ext_is_uninitialized(path->p_ext), |
a2df2a63 | 455 | ext4_ext_get_actual_len(path->p_ext), |
bf89d16f | 456 | ext4_ext_pblock(path->p_ext)); |
a86c6181 AT |
457 | } else |
458 | ext_debug(" []"); | |
459 | } | |
460 | ext_debug("\n"); | |
461 | } | |
462 | ||
463 | static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |
464 | { | |
465 | int depth = ext_depth(inode); | |
466 | struct ext4_extent_header *eh; | |
467 | struct ext4_extent *ex; | |
468 | int i; | |
469 | ||
470 | if (!path) | |
471 | return; | |
472 | ||
473 | eh = path[depth].p_hdr; | |
474 | ex = EXT_FIRST_EXTENT(eh); | |
475 | ||
553f9008 M |
476 | ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); |
477 | ||
a86c6181 | 478 | for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
553f9008 M |
479 | ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
480 | ext4_ext_is_uninitialized(ex), | |
bf89d16f | 481 | ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
a86c6181 AT |
482 | } |
483 | ext_debug("\n"); | |
484 | } | |
485 | #else | |
af5bc92d TT |
486 | #define ext4_ext_show_path(inode, path) |
487 | #define ext4_ext_show_leaf(inode, path) | |
a86c6181 AT |
488 | #endif |
489 | ||
b35905c1 | 490 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
a86c6181 AT |
491 | { |
492 | int depth = path->p_depth; | |
493 | int i; | |
494 | ||
495 | for (i = 0; i <= depth; i++, path++) | |
496 | if (path->p_bh) { | |
497 | brelse(path->p_bh); | |
498 | path->p_bh = NULL; | |
499 | } | |
500 | } | |
501 | ||
502 | /* | |
d0d856e8 RD |
503 | * ext4_ext_binsearch_idx: |
504 | * binary search for the closest index of the given block | |
c29c0ae7 | 505 | * the header must be checked before calling this |
a86c6181 AT |
506 | */ |
507 | static void | |
725d26d3 AK |
508 | ext4_ext_binsearch_idx(struct inode *inode, |
509 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
510 | { |
511 | struct ext4_extent_header *eh = path->p_hdr; | |
512 | struct ext4_extent_idx *r, *l, *m; | |
513 | ||
a86c6181 | 514 | |
bba90743 | 515 | ext_debug("binsearch for %u(idx): ", block); |
a86c6181 AT |
516 | |
517 | l = EXT_FIRST_INDEX(eh) + 1; | |
e9f410b1 | 518 | r = EXT_LAST_INDEX(eh); |
a86c6181 AT |
519 | while (l <= r) { |
520 | m = l + (r - l) / 2; | |
521 | if (block < le32_to_cpu(m->ei_block)) | |
522 | r = m - 1; | |
523 | else | |
524 | l = m + 1; | |
26d535ed DM |
525 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), |
526 | m, le32_to_cpu(m->ei_block), | |
527 | r, le32_to_cpu(r->ei_block)); | |
a86c6181 AT |
528 | } |
529 | ||
530 | path->p_idx = l - 1; | |
f65e6fba | 531 | ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), |
bf89d16f | 532 | ext4_idx_pblock(path->p_idx)); |
a86c6181 AT |
533 | |
534 | #ifdef CHECK_BINSEARCH | |
535 | { | |
536 | struct ext4_extent_idx *chix, *ix; | |
537 | int k; | |
538 | ||
539 | chix = ix = EXT_FIRST_INDEX(eh); | |
540 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { | |
541 | if (k != 0 && | |
542 | le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { | |
4776004f TT |
543 | printk(KERN_DEBUG "k=%d, ix=0x%p, " |
544 | "first=0x%p\n", k, | |
545 | ix, EXT_FIRST_INDEX(eh)); | |
546 | printk(KERN_DEBUG "%u <= %u\n", | |
a86c6181 AT |
547 | le32_to_cpu(ix->ei_block), |
548 | le32_to_cpu(ix[-1].ei_block)); | |
549 | } | |
550 | BUG_ON(k && le32_to_cpu(ix->ei_block) | |
8c55e204 | 551 | <= le32_to_cpu(ix[-1].ei_block)); |
a86c6181 AT |
552 | if (block < le32_to_cpu(ix->ei_block)) |
553 | break; | |
554 | chix = ix; | |
555 | } | |
556 | BUG_ON(chix != path->p_idx); | |
557 | } | |
558 | #endif | |
559 | ||
560 | } | |
561 | ||
562 | /* | |
d0d856e8 RD |
563 | * ext4_ext_binsearch: |
564 | * binary search for closest extent of the given block | |
c29c0ae7 | 565 | * the header must be checked before calling this |
a86c6181 AT |
566 | */ |
567 | static void | |
725d26d3 AK |
568 | ext4_ext_binsearch(struct inode *inode, |
569 | struct ext4_ext_path *path, ext4_lblk_t block) | |
a86c6181 AT |
570 | { |
571 | struct ext4_extent_header *eh = path->p_hdr; | |
572 | struct ext4_extent *r, *l, *m; | |
573 | ||
a86c6181 AT |
574 | if (eh->eh_entries == 0) { |
575 | /* | |
d0d856e8 RD |
576 | * this leaf is empty: |
577 | * we get such a leaf in split/add case | |
a86c6181 AT |
578 | */ |
579 | return; | |
580 | } | |
581 | ||
bba90743 | 582 | ext_debug("binsearch for %u: ", block); |
a86c6181 AT |
583 | |
584 | l = EXT_FIRST_EXTENT(eh) + 1; | |
e9f410b1 | 585 | r = EXT_LAST_EXTENT(eh); |
a86c6181 AT |
586 | |
587 | while (l <= r) { | |
588 | m = l + (r - l) / 2; | |
589 | if (block < le32_to_cpu(m->ee_block)) | |
590 | r = m - 1; | |
591 | else | |
592 | l = m + 1; | |
26d535ed DM |
593 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), |
594 | m, le32_to_cpu(m->ee_block), | |
595 | r, le32_to_cpu(r->ee_block)); | |
a86c6181 AT |
596 | } |
597 | ||
598 | path->p_ext = l - 1; | |
553f9008 | 599 | ext_debug(" -> %d:%llu:[%d]%d ", |
8c55e204 | 600 | le32_to_cpu(path->p_ext->ee_block), |
bf89d16f | 601 | ext4_ext_pblock(path->p_ext), |
553f9008 | 602 | ext4_ext_is_uninitialized(path->p_ext), |
a2df2a63 | 603 | ext4_ext_get_actual_len(path->p_ext)); |
a86c6181 AT |
604 | |
605 | #ifdef CHECK_BINSEARCH | |
606 | { | |
607 | struct ext4_extent *chex, *ex; | |
608 | int k; | |
609 | ||
610 | chex = ex = EXT_FIRST_EXTENT(eh); | |
611 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | |
612 | BUG_ON(k && le32_to_cpu(ex->ee_block) | |
8c55e204 | 613 | <= le32_to_cpu(ex[-1].ee_block)); |
a86c6181 AT |
614 | if (block < le32_to_cpu(ex->ee_block)) |
615 | break; | |
616 | chex = ex; | |
617 | } | |
618 | BUG_ON(chex != path->p_ext); | |
619 | } | |
620 | #endif | |
621 | ||
622 | } | |
623 | ||
624 | int ext4_ext_tree_init(handle_t *handle, struct inode *inode) | |
625 | { | |
626 | struct ext4_extent_header *eh; | |
627 | ||
628 | eh = ext_inode_hdr(inode); | |
629 | eh->eh_depth = 0; | |
630 | eh->eh_entries = 0; | |
631 | eh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 632 | eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
633 | ext4_mark_inode_dirty(handle, inode); |
634 | ext4_ext_invalidate_cache(inode); | |
635 | return 0; | |
636 | } | |
637 | ||
638 | struct ext4_ext_path * | |
725d26d3 AK |
639 | ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, |
640 | struct ext4_ext_path *path) | |
a86c6181 AT |
641 | { |
642 | struct ext4_extent_header *eh; | |
643 | struct buffer_head *bh; | |
644 | short int depth, i, ppos = 0, alloc = 0; | |
645 | ||
646 | eh = ext_inode_hdr(inode); | |
c29c0ae7 | 647 | depth = ext_depth(inode); |
a86c6181 AT |
648 | |
649 | /* account possible depth increase */ | |
650 | if (!path) { | |
5d4958f9 | 651 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), |
a86c6181 AT |
652 | GFP_NOFS); |
653 | if (!path) | |
654 | return ERR_PTR(-ENOMEM); | |
655 | alloc = 1; | |
656 | } | |
a86c6181 | 657 | path[0].p_hdr = eh; |
1973adcb | 658 | path[0].p_bh = NULL; |
a86c6181 | 659 | |
c29c0ae7 | 660 | i = depth; |
a86c6181 AT |
661 | /* walk through the tree */ |
662 | while (i) { | |
7a262f7c AK |
663 | int need_to_validate = 0; |
664 | ||
a86c6181 AT |
665 | ext_debug("depth %d: num %d, max %d\n", |
666 | ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
c29c0ae7 | 667 | |
a86c6181 | 668 | ext4_ext_binsearch_idx(inode, path + ppos, block); |
bf89d16f | 669 | path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
a86c6181 AT |
670 | path[ppos].p_depth = i; |
671 | path[ppos].p_ext = NULL; | |
672 | ||
7a262f7c AK |
673 | bh = sb_getblk(inode->i_sb, path[ppos].p_block); |
674 | if (unlikely(!bh)) | |
a86c6181 | 675 | goto err; |
7a262f7c | 676 | if (!bh_uptodate_or_lock(bh)) { |
0562e0ba JZ |
677 | trace_ext4_ext_load_extent(inode, block, |
678 | path[ppos].p_block); | |
7a262f7c AK |
679 | if (bh_submit_read(bh) < 0) { |
680 | put_bh(bh); | |
681 | goto err; | |
682 | } | |
683 | /* validate the extent entries */ | |
684 | need_to_validate = 1; | |
685 | } | |
a86c6181 AT |
686 | eh = ext_block_hdr(bh); |
687 | ppos++; | |
273df556 FM |
688 | if (unlikely(ppos > depth)) { |
689 | put_bh(bh); | |
690 | EXT4_ERROR_INODE(inode, | |
691 | "ppos %d > depth %d", ppos, depth); | |
692 | goto err; | |
693 | } | |
a86c6181 AT |
694 | path[ppos].p_bh = bh; |
695 | path[ppos].p_hdr = eh; | |
696 | i--; | |
697 | ||
7a262f7c | 698 | if (need_to_validate && ext4_ext_check(inode, eh, i)) |
a86c6181 AT |
699 | goto err; |
700 | } | |
701 | ||
702 | path[ppos].p_depth = i; | |
a86c6181 AT |
703 | path[ppos].p_ext = NULL; |
704 | path[ppos].p_idx = NULL; | |
705 | ||
a86c6181 AT |
706 | /* find extent */ |
707 | ext4_ext_binsearch(inode, path + ppos, block); | |
1973adcb SF |
708 | /* if not an empty leaf */ |
709 | if (path[ppos].p_ext) | |
bf89d16f | 710 | path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
a86c6181 AT |
711 | |
712 | ext4_ext_show_path(inode, path); | |
713 | ||
714 | return path; | |
715 | ||
716 | err: | |
717 | ext4_ext_drop_refs(path); | |
718 | if (alloc) | |
719 | kfree(path); | |
720 | return ERR_PTR(-EIO); | |
721 | } | |
722 | ||
723 | /* | |
d0d856e8 RD |
724 | * ext4_ext_insert_index: |
725 | * insert new index [@logical;@ptr] into the block at @curp; | |
726 | * check where to insert: before @curp or after @curp | |
a86c6181 | 727 | */ |
1f109d5a TT |
728 | static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
729 | struct ext4_ext_path *curp, | |
730 | int logical, ext4_fsblk_t ptr) | |
a86c6181 AT |
731 | { |
732 | struct ext4_extent_idx *ix; | |
733 | int len, err; | |
734 | ||
7e028976 AM |
735 | err = ext4_ext_get_access(handle, inode, curp); |
736 | if (err) | |
a86c6181 AT |
737 | return err; |
738 | ||
273df556 FM |
739 | if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
740 | EXT4_ERROR_INODE(inode, | |
741 | "logical %d == ei_block %d!", | |
742 | logical, le32_to_cpu(curp->p_idx->ei_block)); | |
743 | return -EIO; | |
744 | } | |
a86c6181 AT |
745 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; |
746 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { | |
747 | /* insert after */ | |
748 | if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { | |
749 | len = (len - 1) * sizeof(struct ext4_extent_idx); | |
750 | len = len < 0 ? 0 : len; | |
26d535ed | 751 | ext_debug("insert new index %d after: %llu. " |
a86c6181 AT |
752 | "move %d from 0x%p to 0x%p\n", |
753 | logical, ptr, len, | |
754 | (curp->p_idx + 1), (curp->p_idx + 2)); | |
755 | memmove(curp->p_idx + 2, curp->p_idx + 1, len); | |
756 | } | |
757 | ix = curp->p_idx + 1; | |
758 | } else { | |
759 | /* insert before */ | |
760 | len = len * sizeof(struct ext4_extent_idx); | |
761 | len = len < 0 ? 0 : len; | |
26d535ed | 762 | ext_debug("insert new index %d before: %llu. " |
a86c6181 AT |
763 | "move %d from 0x%p to 0x%p\n", |
764 | logical, ptr, len, | |
765 | curp->p_idx, (curp->p_idx + 1)); | |
766 | memmove(curp->p_idx + 1, curp->p_idx, len); | |
767 | ix = curp->p_idx; | |
768 | } | |
769 | ||
770 | ix->ei_block = cpu_to_le32(logical); | |
f65e6fba | 771 | ext4_idx_store_pblock(ix, ptr); |
e8546d06 | 772 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
a86c6181 | 773 | |
273df556 FM |
774 | if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
775 | > le16_to_cpu(curp->p_hdr->eh_max))) { | |
776 | EXT4_ERROR_INODE(inode, | |
777 | "logical %d == ei_block %d!", | |
778 | logical, le32_to_cpu(curp->p_idx->ei_block)); | |
779 | return -EIO; | |
780 | } | |
781 | if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { | |
782 | EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); | |
783 | return -EIO; | |
784 | } | |
a86c6181 AT |
785 | |
786 | err = ext4_ext_dirty(handle, inode, curp); | |
787 | ext4_std_error(inode->i_sb, err); | |
788 | ||
789 | return err; | |
790 | } | |
791 | ||
792 | /* | |
d0d856e8 RD |
793 | * ext4_ext_split: |
794 | * inserts new subtree into the path, using free index entry | |
795 | * at depth @at: | |
796 | * - allocates all needed blocks (new leaf and all intermediate index blocks) | |
797 | * - makes decision where to split | |
798 | * - moves remaining extents and index entries (right to the split point) | |
799 | * into the newly allocated blocks | |
800 | * - initializes subtree | |
a86c6181 AT |
801 | */ |
802 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | |
55f020db AH |
803 | unsigned int flags, |
804 | struct ext4_ext_path *path, | |
805 | struct ext4_extent *newext, int at) | |
a86c6181 AT |
806 | { |
807 | struct buffer_head *bh = NULL; | |
808 | int depth = ext_depth(inode); | |
809 | struct ext4_extent_header *neh; | |
810 | struct ext4_extent_idx *fidx; | |
811 | struct ext4_extent *ex; | |
812 | int i = at, k, m, a; | |
f65e6fba | 813 | ext4_fsblk_t newblock, oldblock; |
a86c6181 | 814 | __le32 border; |
f65e6fba | 815 | ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
a86c6181 AT |
816 | int err = 0; |
817 | ||
818 | /* make decision: where to split? */ | |
d0d856e8 | 819 | /* FIXME: now decision is simplest: at current extent */ |
a86c6181 | 820 | |
d0d856e8 | 821 | /* if current leaf will be split, then we should use |
a86c6181 | 822 | * border from split point */ |
273df556 FM |
823 | if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
824 | EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); | |
825 | return -EIO; | |
826 | } | |
a86c6181 AT |
827 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
828 | border = path[depth].p_ext[1].ee_block; | |
d0d856e8 | 829 | ext_debug("leaf will be split." |
a86c6181 | 830 | " next leaf starts at %d\n", |
8c55e204 | 831 | le32_to_cpu(border)); |
a86c6181 AT |
832 | } else { |
833 | border = newext->ee_block; | |
834 | ext_debug("leaf will be added." | |
835 | " next leaf starts at %d\n", | |
8c55e204 | 836 | le32_to_cpu(border)); |
a86c6181 AT |
837 | } |
838 | ||
839 | /* | |
d0d856e8 RD |
840 | * If error occurs, then we break processing |
841 | * and mark filesystem read-only. index won't | |
a86c6181 | 842 | * be inserted and tree will be in consistent |
d0d856e8 | 843 | * state. Next mount will repair buffers too. |
a86c6181 AT |
844 | */ |
845 | ||
846 | /* | |
d0d856e8 RD |
847 | * Get array to track all allocated blocks. |
848 | * We need this to handle errors and free blocks | |
849 | * upon them. | |
a86c6181 | 850 | */ |
5d4958f9 | 851 | ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); |
a86c6181 AT |
852 | if (!ablocks) |
853 | return -ENOMEM; | |
a86c6181 AT |
854 | |
855 | /* allocate all needed blocks */ | |
856 | ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); | |
857 | for (a = 0; a < depth - at; a++) { | |
654b4908 | 858 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
55f020db | 859 | newext, &err, flags); |
a86c6181 AT |
860 | if (newblock == 0) |
861 | goto cleanup; | |
862 | ablocks[a] = newblock; | |
863 | } | |
864 | ||
865 | /* initialize new leaf */ | |
866 | newblock = ablocks[--a]; | |
273df556 FM |
867 | if (unlikely(newblock == 0)) { |
868 | EXT4_ERROR_INODE(inode, "newblock == 0!"); | |
869 | err = -EIO; | |
870 | goto cleanup; | |
871 | } | |
a86c6181 AT |
872 | bh = sb_getblk(inode->i_sb, newblock); |
873 | if (!bh) { | |
874 | err = -EIO; | |
875 | goto cleanup; | |
876 | } | |
877 | lock_buffer(bh); | |
878 | ||
7e028976 AM |
879 | err = ext4_journal_get_create_access(handle, bh); |
880 | if (err) | |
a86c6181 AT |
881 | goto cleanup; |
882 | ||
883 | neh = ext_block_hdr(bh); | |
884 | neh->eh_entries = 0; | |
55ad63bf | 885 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
886 | neh->eh_magic = EXT4_EXT_MAGIC; |
887 | neh->eh_depth = 0; | |
888 | ex = EXT_FIRST_EXTENT(neh); | |
889 | ||
d0d856e8 | 890 | /* move remainder of path[depth] to the new leaf */ |
273df556 FM |
891 | if (unlikely(path[depth].p_hdr->eh_entries != |
892 | path[depth].p_hdr->eh_max)) { | |
893 | EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", | |
894 | path[depth].p_hdr->eh_entries, | |
895 | path[depth].p_hdr->eh_max); | |
896 | err = -EIO; | |
897 | goto cleanup; | |
898 | } | |
a86c6181 AT |
899 | /* start copy from next extent */ |
900 | /* TODO: we could do it by single memmove */ | |
901 | m = 0; | |
902 | path[depth].p_ext++; | |
903 | while (path[depth].p_ext <= | |
904 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
553f9008 | 905 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", |
8c55e204 | 906 | le32_to_cpu(path[depth].p_ext->ee_block), |
bf89d16f | 907 | ext4_ext_pblock(path[depth].p_ext), |
553f9008 | 908 | ext4_ext_is_uninitialized(path[depth].p_ext), |
a2df2a63 | 909 | ext4_ext_get_actual_len(path[depth].p_ext), |
a86c6181 AT |
910 | newblock); |
911 | /*memmove(ex++, path[depth].p_ext++, | |
912 | sizeof(struct ext4_extent)); | |
913 | neh->eh_entries++;*/ | |
914 | path[depth].p_ext++; | |
915 | m++; | |
916 | } | |
917 | if (m) { | |
918 | memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); | |
e8546d06 | 919 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
920 | } |
921 | ||
922 | set_buffer_uptodate(bh); | |
923 | unlock_buffer(bh); | |
924 | ||
0390131b | 925 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 926 | if (err) |
a86c6181 AT |
927 | goto cleanup; |
928 | brelse(bh); | |
929 | bh = NULL; | |
930 | ||
931 | /* correct old leaf */ | |
932 | if (m) { | |
7e028976 AM |
933 | err = ext4_ext_get_access(handle, inode, path + depth); |
934 | if (err) | |
a86c6181 | 935 | goto cleanup; |
e8546d06 | 936 | le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
7e028976 AM |
937 | err = ext4_ext_dirty(handle, inode, path + depth); |
938 | if (err) | |
a86c6181 AT |
939 | goto cleanup; |
940 | ||
941 | } | |
942 | ||
943 | /* create intermediate indexes */ | |
944 | k = depth - at - 1; | |
273df556 FM |
945 | if (unlikely(k < 0)) { |
946 | EXT4_ERROR_INODE(inode, "k %d < 0!", k); | |
947 | err = -EIO; | |
948 | goto cleanup; | |
949 | } | |
a86c6181 AT |
950 | if (k) |
951 | ext_debug("create %d intermediate indices\n", k); | |
952 | /* insert new index into current index block */ | |
953 | /* current depth stored in i var */ | |
954 | i = depth - 1; | |
955 | while (k--) { | |
956 | oldblock = newblock; | |
957 | newblock = ablocks[--a]; | |
bba90743 | 958 | bh = sb_getblk(inode->i_sb, newblock); |
a86c6181 AT |
959 | if (!bh) { |
960 | err = -EIO; | |
961 | goto cleanup; | |
962 | } | |
963 | lock_buffer(bh); | |
964 | ||
7e028976 AM |
965 | err = ext4_journal_get_create_access(handle, bh); |
966 | if (err) | |
a86c6181 AT |
967 | goto cleanup; |
968 | ||
969 | neh = ext_block_hdr(bh); | |
970 | neh->eh_entries = cpu_to_le16(1); | |
971 | neh->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 972 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 AT |
973 | neh->eh_depth = cpu_to_le16(depth - i); |
974 | fidx = EXT_FIRST_INDEX(neh); | |
975 | fidx->ei_block = border; | |
f65e6fba | 976 | ext4_idx_store_pblock(fidx, oldblock); |
a86c6181 | 977 | |
bba90743 ES |
978 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
979 | i, newblock, le32_to_cpu(border), oldblock); | |
a86c6181 AT |
980 | /* copy indexes */ |
981 | m = 0; | |
982 | path[i].p_idx++; | |
983 | ||
984 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | |
985 | EXT_MAX_INDEX(path[i].p_hdr)); | |
273df556 FM |
986 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
987 | EXT_LAST_INDEX(path[i].p_hdr))) { | |
988 | EXT4_ERROR_INODE(inode, | |
989 | "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", | |
990 | le32_to_cpu(path[i].p_ext->ee_block)); | |
991 | err = -EIO; | |
992 | goto cleanup; | |
993 | } | |
a86c6181 | 994 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { |
26d535ed | 995 | ext_debug("%d: move %d:%llu in new index %llu\n", i, |
8c55e204 | 996 | le32_to_cpu(path[i].p_idx->ei_block), |
bf89d16f | 997 | ext4_idx_pblock(path[i].p_idx), |
8c55e204 | 998 | newblock); |
a86c6181 AT |
999 | /*memmove(++fidx, path[i].p_idx++, |
1000 | sizeof(struct ext4_extent_idx)); | |
1001 | neh->eh_entries++; | |
1002 | BUG_ON(neh->eh_entries > neh->eh_max);*/ | |
1003 | path[i].p_idx++; | |
1004 | m++; | |
1005 | } | |
1006 | if (m) { | |
1007 | memmove(++fidx, path[i].p_idx - m, | |
1008 | sizeof(struct ext4_extent_idx) * m); | |
e8546d06 | 1009 | le16_add_cpu(&neh->eh_entries, m); |
a86c6181 AT |
1010 | } |
1011 | set_buffer_uptodate(bh); | |
1012 | unlock_buffer(bh); | |
1013 | ||
0390131b | 1014 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1015 | if (err) |
a86c6181 AT |
1016 | goto cleanup; |
1017 | brelse(bh); | |
1018 | bh = NULL; | |
1019 | ||
1020 | /* correct old index */ | |
1021 | if (m) { | |
1022 | err = ext4_ext_get_access(handle, inode, path + i); | |
1023 | if (err) | |
1024 | goto cleanup; | |
e8546d06 | 1025 | le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
a86c6181 AT |
1026 | err = ext4_ext_dirty(handle, inode, path + i); |
1027 | if (err) | |
1028 | goto cleanup; | |
1029 | } | |
1030 | ||
1031 | i--; | |
1032 | } | |
1033 | ||
1034 | /* insert new index */ | |
a86c6181 AT |
1035 | err = ext4_ext_insert_index(handle, inode, path + at, |
1036 | le32_to_cpu(border), newblock); | |
1037 | ||
1038 | cleanup: | |
1039 | if (bh) { | |
1040 | if (buffer_locked(bh)) | |
1041 | unlock_buffer(bh); | |
1042 | brelse(bh); | |
1043 | } | |
1044 | ||
1045 | if (err) { | |
1046 | /* free all allocated blocks in error case */ | |
1047 | for (i = 0; i < depth; i++) { | |
1048 | if (!ablocks[i]) | |
1049 | continue; | |
7dc57615 | 1050 | ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
e6362609 | 1051 | EXT4_FREE_BLOCKS_METADATA); |
a86c6181 AT |
1052 | } |
1053 | } | |
1054 | kfree(ablocks); | |
1055 | ||
1056 | return err; | |
1057 | } | |
1058 | ||
1059 | /* | |
d0d856e8 RD |
1060 | * ext4_ext_grow_indepth: |
1061 | * implements tree growing procedure: | |
1062 | * - allocates new block | |
1063 | * - moves top-level data (index block or leaf) into the new block | |
1064 | * - initializes new top-level, creating index that points to the | |
1065 | * just created block | |
a86c6181 AT |
1066 | */ |
1067 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |
55f020db AH |
1068 | unsigned int flags, |
1069 | struct ext4_ext_path *path, | |
1070 | struct ext4_extent *newext) | |
a86c6181 AT |
1071 | { |
1072 | struct ext4_ext_path *curp = path; | |
1073 | struct ext4_extent_header *neh; | |
a86c6181 | 1074 | struct buffer_head *bh; |
f65e6fba | 1075 | ext4_fsblk_t newblock; |
a86c6181 AT |
1076 | int err = 0; |
1077 | ||
55f020db AH |
1078 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
1079 | newext, &err, flags); | |
a86c6181 AT |
1080 | if (newblock == 0) |
1081 | return err; | |
1082 | ||
1083 | bh = sb_getblk(inode->i_sb, newblock); | |
1084 | if (!bh) { | |
1085 | err = -EIO; | |
1086 | ext4_std_error(inode->i_sb, err); | |
1087 | return err; | |
1088 | } | |
1089 | lock_buffer(bh); | |
1090 | ||
7e028976 AM |
1091 | err = ext4_journal_get_create_access(handle, bh); |
1092 | if (err) { | |
a86c6181 AT |
1093 | unlock_buffer(bh); |
1094 | goto out; | |
1095 | } | |
1096 | ||
1097 | /* move top-level index/leaf into new block */ | |
1098 | memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); | |
1099 | ||
1100 | /* set size of new block */ | |
1101 | neh = ext_block_hdr(bh); | |
1102 | /* old root could have indexes or leaves | |
1103 | * so calculate e_max right way */ | |
1104 | if (ext_depth(inode)) | |
55ad63bf | 1105 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
a86c6181 | 1106 | else |
55ad63bf | 1107 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
a86c6181 AT |
1108 | neh->eh_magic = EXT4_EXT_MAGIC; |
1109 | set_buffer_uptodate(bh); | |
1110 | unlock_buffer(bh); | |
1111 | ||
0390131b | 1112 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
7e028976 | 1113 | if (err) |
a86c6181 AT |
1114 | goto out; |
1115 | ||
1116 | /* create index in new top-level index: num,max,pointer */ | |
7e028976 AM |
1117 | err = ext4_ext_get_access(handle, inode, curp); |
1118 | if (err) | |
a86c6181 AT |
1119 | goto out; |
1120 | ||
1121 | curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; | |
55ad63bf | 1122 | curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); |
a86c6181 AT |
1123 | curp->p_hdr->eh_entries = cpu_to_le16(1); |
1124 | curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); | |
e9f410b1 DM |
1125 | |
1126 | if (path[0].p_hdr->eh_depth) | |
1127 | curp->p_idx->ei_block = | |
1128 | EXT_FIRST_INDEX(path[0].p_hdr)->ei_block; | |
1129 | else | |
1130 | curp->p_idx->ei_block = | |
1131 | EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; | |
f65e6fba | 1132 | ext4_idx_store_pblock(curp->p_idx, newblock); |
a86c6181 AT |
1133 | |
1134 | neh = ext_inode_hdr(inode); | |
2ae02107 | 1135 | ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", |
a86c6181 | 1136 | le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
5a0790c2 | 1137 | le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
bf89d16f | 1138 | ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
a86c6181 AT |
1139 | |
1140 | neh->eh_depth = cpu_to_le16(path->p_depth + 1); | |
1141 | err = ext4_ext_dirty(handle, inode, curp); | |
1142 | out: | |
1143 | brelse(bh); | |
1144 | ||
1145 | return err; | |
1146 | } | |
1147 | ||
1148 | /* | |
d0d856e8 RD |
1149 | * ext4_ext_create_new_leaf: |
1150 | * finds empty index and adds new leaf. | |
1151 | * if no free index is found, then it requests in-depth growing. | |
a86c6181 AT |
1152 | */ |
1153 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | |
55f020db AH |
1154 | unsigned int flags, |
1155 | struct ext4_ext_path *path, | |
1156 | struct ext4_extent *newext) | |
a86c6181 AT |
1157 | { |
1158 | struct ext4_ext_path *curp; | |
1159 | int depth, i, err = 0; | |
1160 | ||
1161 | repeat: | |
1162 | i = depth = ext_depth(inode); | |
1163 | ||
1164 | /* walk up to the tree and look for free index entry */ | |
1165 | curp = path + depth; | |
1166 | while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { | |
1167 | i--; | |
1168 | curp--; | |
1169 | } | |
1170 | ||
d0d856e8 RD |
1171 | /* we use already allocated block for index block, |
1172 | * so subsequent data blocks should be contiguous */ | |
a86c6181 AT |
1173 | if (EXT_HAS_FREE_INDEX(curp)) { |
1174 | /* if we found index with free entry, then use that | |
1175 | * entry: create all needed subtree and add new leaf */ | |
55f020db | 1176 | err = ext4_ext_split(handle, inode, flags, path, newext, i); |
787e0981 SF |
1177 | if (err) |
1178 | goto out; | |
a86c6181 AT |
1179 | |
1180 | /* refill path */ | |
1181 | ext4_ext_drop_refs(path); | |
1182 | path = ext4_ext_find_extent(inode, | |
725d26d3 AK |
1183 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
1184 | path); | |
a86c6181 AT |
1185 | if (IS_ERR(path)) |
1186 | err = PTR_ERR(path); | |
1187 | } else { | |
1188 | /* tree is full, time to grow in depth */ | |
55f020db AH |
1189 | err = ext4_ext_grow_indepth(handle, inode, flags, |
1190 | path, newext); | |
a86c6181 AT |
1191 | if (err) |
1192 | goto out; | |
1193 | ||
1194 | /* refill path */ | |
1195 | ext4_ext_drop_refs(path); | |
1196 | path = ext4_ext_find_extent(inode, | |
725d26d3 AK |
1197 | (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
1198 | path); | |
a86c6181 AT |
1199 | if (IS_ERR(path)) { |
1200 | err = PTR_ERR(path); | |
1201 | goto out; | |
1202 | } | |
1203 | ||
1204 | /* | |
d0d856e8 RD |
1205 | * only first (depth 0 -> 1) produces free space; |
1206 | * in all other cases we have to split the grown tree | |
a86c6181 AT |
1207 | */ |
1208 | depth = ext_depth(inode); | |
1209 | if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { | |
d0d856e8 | 1210 | /* now we need to split */ |
a86c6181 AT |
1211 | goto repeat; |
1212 | } | |
1213 | } | |
1214 | ||
1215 | out: | |
1216 | return err; | |
1217 | } | |
1218 | ||
1988b51e AT |
1219 | /* |
1220 | * search the closest allocated block to the left for *logical | |
1221 | * and returns it at @logical + it's physical address at @phys | |
1222 | * if *logical is the smallest allocated block, the function | |
1223 | * returns 0 at @phys | |
1224 | * return value contains 0 (success) or error code | |
1225 | */ | |
1f109d5a TT |
1226 | static int ext4_ext_search_left(struct inode *inode, |
1227 | struct ext4_ext_path *path, | |
1228 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1988b51e AT |
1229 | { |
1230 | struct ext4_extent_idx *ix; | |
1231 | struct ext4_extent *ex; | |
b939e376 | 1232 | int depth, ee_len; |
1988b51e | 1233 | |
273df556 FM |
1234 | if (unlikely(path == NULL)) { |
1235 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
1236 | return -EIO; | |
1237 | } | |
1988b51e AT |
1238 | depth = path->p_depth; |
1239 | *phys = 0; | |
1240 | ||
1241 | if (depth == 0 && path->p_ext == NULL) | |
1242 | return 0; | |
1243 | ||
1244 | /* usually extent in the path covers blocks smaller | |
1245 | * then *logical, but it can be that extent is the | |
1246 | * first one in the file */ | |
1247 | ||
1248 | ex = path[depth].p_ext; | |
b939e376 | 1249 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1250 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1251 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1252 | EXT4_ERROR_INODE(inode, | |
1253 | "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", | |
1254 | *logical, le32_to_cpu(ex->ee_block)); | |
1255 | return -EIO; | |
1256 | } | |
1988b51e AT |
1257 | while (--depth >= 0) { |
1258 | ix = path[depth].p_idx; | |
273df556 FM |
1259 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1260 | EXT4_ERROR_INODE(inode, | |
1261 | "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", | |
1262 | ix != NULL ? ix->ei_block : 0, | |
1263 | EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? | |
1264 | EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0, | |
1265 | depth); | |
1266 | return -EIO; | |
1267 | } | |
1988b51e AT |
1268 | } |
1269 | return 0; | |
1270 | } | |
1271 | ||
273df556 FM |
1272 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1273 | EXT4_ERROR_INODE(inode, | |
1274 | "logical %d < ee_block %d + ee_len %d!", | |
1275 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
1276 | return -EIO; | |
1277 | } | |
1988b51e | 1278 | |
b939e376 | 1279 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
bf89d16f | 1280 | *phys = ext4_ext_pblock(ex) + ee_len - 1; |
1988b51e AT |
1281 | return 0; |
1282 | } | |
1283 | ||
1284 | /* | |
1285 | * search the closest allocated block to the right for *logical | |
1286 | * and returns it at @logical + it's physical address at @phys | |
1287 | * if *logical is the smallest allocated block, the function | |
1288 | * returns 0 at @phys | |
1289 | * return value contains 0 (success) or error code | |
1290 | */ | |
1f109d5a TT |
1291 | static int ext4_ext_search_right(struct inode *inode, |
1292 | struct ext4_ext_path *path, | |
1293 | ext4_lblk_t *logical, ext4_fsblk_t *phys) | |
1988b51e AT |
1294 | { |
1295 | struct buffer_head *bh = NULL; | |
1296 | struct ext4_extent_header *eh; | |
1297 | struct ext4_extent_idx *ix; | |
1298 | struct ext4_extent *ex; | |
1299 | ext4_fsblk_t block; | |
395a87bf ES |
1300 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1301 | int ee_len; | |
1988b51e | 1302 | |
273df556 FM |
1303 | if (unlikely(path == NULL)) { |
1304 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | |
1305 | return -EIO; | |
1306 | } | |
1988b51e AT |
1307 | depth = path->p_depth; |
1308 | *phys = 0; | |
1309 | ||
1310 | if (depth == 0 && path->p_ext == NULL) | |
1311 | return 0; | |
1312 | ||
1313 | /* usually extent in the path covers blocks smaller | |
1314 | * then *logical, but it can be that extent is the | |
1315 | * first one in the file */ | |
1316 | ||
1317 | ex = path[depth].p_ext; | |
b939e376 | 1318 | ee_len = ext4_ext_get_actual_len(ex); |
1988b51e | 1319 | if (*logical < le32_to_cpu(ex->ee_block)) { |
273df556 FM |
1320 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1321 | EXT4_ERROR_INODE(inode, | |
1322 | "first_extent(path[%d].p_hdr) != ex", | |
1323 | depth); | |
1324 | return -EIO; | |
1325 | } | |
1988b51e AT |
1326 | while (--depth >= 0) { |
1327 | ix = path[depth].p_idx; | |
273df556 FM |
1328 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1329 | EXT4_ERROR_INODE(inode, | |
1330 | "ix != EXT_FIRST_INDEX *logical %d!", | |
1331 | *logical); | |
1332 | return -EIO; | |
1333 | } | |
1988b51e AT |
1334 | } |
1335 | *logical = le32_to_cpu(ex->ee_block); | |
bf89d16f | 1336 | *phys = ext4_ext_pblock(ex); |
1988b51e AT |
1337 | return 0; |
1338 | } | |
1339 | ||
273df556 FM |
1340 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1341 | EXT4_ERROR_INODE(inode, | |
1342 | "logical %d < ee_block %d + ee_len %d!", | |
1343 | *logical, le32_to_cpu(ex->ee_block), ee_len); | |
1344 | return -EIO; | |
1345 | } | |
1988b51e AT |
1346 | |
1347 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | |
1348 | /* next allocated block in this leaf */ | |
1349 | ex++; | |
1350 | *logical = le32_to_cpu(ex->ee_block); | |
bf89d16f | 1351 | *phys = ext4_ext_pblock(ex); |
1988b51e AT |
1352 | return 0; |
1353 | } | |
1354 | ||
1355 | /* go up and search for index to the right */ | |
1356 | while (--depth >= 0) { | |
1357 | ix = path[depth].p_idx; | |
1358 | if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) | |
25f1ee3a | 1359 | goto got_index; |
1988b51e AT |
1360 | } |
1361 | ||
25f1ee3a WF |
1362 | /* we've gone up to the root and found no index to the right */ |
1363 | return 0; | |
1988b51e | 1364 | |
25f1ee3a | 1365 | got_index: |
1988b51e AT |
1366 | /* we've found index to the right, let's |
1367 | * follow it and find the closest allocated | |
1368 | * block to the right */ | |
1369 | ix++; | |
bf89d16f | 1370 | block = ext4_idx_pblock(ix); |
1988b51e AT |
1371 | while (++depth < path->p_depth) { |
1372 | bh = sb_bread(inode->i_sb, block); | |
1373 | if (bh == NULL) | |
1374 | return -EIO; | |
1375 | eh = ext_block_hdr(bh); | |
395a87bf | 1376 | /* subtract from p_depth to get proper eh_depth */ |
56b19868 | 1377 | if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
1988b51e AT |
1378 | put_bh(bh); |
1379 | return -EIO; | |
1380 | } | |
1381 | ix = EXT_FIRST_INDEX(eh); | |
bf89d16f | 1382 | block = ext4_idx_pblock(ix); |
1988b51e AT |
1383 | put_bh(bh); |
1384 | } | |
1385 | ||
1386 | bh = sb_bread(inode->i_sb, block); | |
1387 | if (bh == NULL) | |
1388 | return -EIO; | |
1389 | eh = ext_block_hdr(bh); | |
56b19868 | 1390 | if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
1988b51e AT |
1391 | put_bh(bh); |
1392 | return -EIO; | |
1393 | } | |
1394 | ex = EXT_FIRST_EXTENT(eh); | |
1395 | *logical = le32_to_cpu(ex->ee_block); | |
bf89d16f | 1396 | *phys = ext4_ext_pblock(ex); |
1988b51e AT |
1397 | put_bh(bh); |
1398 | return 0; | |
1988b51e AT |
1399 | } |
1400 | ||
a86c6181 | 1401 | /* |
d0d856e8 RD |
1402 | * ext4_ext_next_allocated_block: |
1403 | * returns allocated block in subsequent extent or EXT_MAX_BLOCK. | |
1404 | * NOTE: it considers block number from index entry as | |
1405 | * allocated block. Thus, index entries have to be consistent | |
1406 | * with leaves. | |
a86c6181 | 1407 | */ |
725d26d3 | 1408 | static ext4_lblk_t |
a86c6181 AT |
1409 | ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
1410 | { | |
1411 | int depth; | |
1412 | ||
1413 | BUG_ON(path == NULL); | |
1414 | depth = path->p_depth; | |
1415 | ||
1416 | if (depth == 0 && path->p_ext == NULL) | |
1417 | return EXT_MAX_BLOCK; | |
1418 | ||
1419 | while (depth >= 0) { | |
1420 | if (depth == path->p_depth) { | |
1421 | /* leaf */ | |
1422 | if (path[depth].p_ext != | |
1423 | EXT_LAST_EXTENT(path[depth].p_hdr)) | |
1424 | return le32_to_cpu(path[depth].p_ext[1].ee_block); | |
1425 | } else { | |
1426 | /* index */ | |
1427 | if (path[depth].p_idx != | |
1428 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
1429 | return le32_to_cpu(path[depth].p_idx[1].ei_block); | |
1430 | } | |
1431 | depth--; | |
1432 | } | |
1433 | ||
1434 | return EXT_MAX_BLOCK; | |
1435 | } | |
1436 | ||
1437 | /* | |
d0d856e8 | 1438 | * ext4_ext_next_leaf_block: |
a86c6181 AT |
1439 | * returns first allocated block from next leaf or EXT_MAX_BLOCK |
1440 | */ | |
725d26d3 | 1441 | static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, |
63f57933 | 1442 | struct ext4_ext_path *path) |
a86c6181 AT |
1443 | { |
1444 | int depth; | |
1445 | ||
1446 | BUG_ON(path == NULL); | |
1447 | depth = path->p_depth; | |
1448 | ||
1449 | /* zero-tree has no leaf blocks at all */ | |
1450 | if (depth == 0) | |
1451 | return EXT_MAX_BLOCK; | |
1452 | ||
1453 | /* go to index block */ | |
1454 | depth--; | |
1455 | ||
1456 | while (depth >= 0) { | |
1457 | if (path[depth].p_idx != | |
1458 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
725d26d3 AK |
1459 | return (ext4_lblk_t) |
1460 | le32_to_cpu(path[depth].p_idx[1].ei_block); | |
a86c6181 AT |
1461 | depth--; |
1462 | } | |
1463 | ||
1464 | return EXT_MAX_BLOCK; | |
1465 | } | |
1466 | ||
1467 | /* | |
d0d856e8 RD |
1468 | * ext4_ext_correct_indexes: |
1469 | * if leaf gets modified and modified extent is first in the leaf, | |
1470 | * then we have to correct all indexes above. | |
a86c6181 AT |
1471 | * TODO: do we need to correct tree in all cases? |
1472 | */ | |
1d03ec98 | 1473 | static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
a86c6181 AT |
1474 | struct ext4_ext_path *path) |
1475 | { | |
1476 | struct ext4_extent_header *eh; | |
1477 | int depth = ext_depth(inode); | |
1478 | struct ext4_extent *ex; | |
1479 | __le32 border; | |
1480 | int k, err = 0; | |
1481 | ||
1482 | eh = path[depth].p_hdr; | |
1483 | ex = path[depth].p_ext; | |
273df556 FM |
1484 | |
1485 | if (unlikely(ex == NULL || eh == NULL)) { | |
1486 | EXT4_ERROR_INODE(inode, | |
1487 | "ex %p == NULL or eh %p == NULL", ex, eh); | |
1488 | return -EIO; | |
1489 | } | |
a86c6181 AT |
1490 | |
1491 | if (depth == 0) { | |
1492 | /* there is no tree at all */ | |
1493 | return 0; | |
1494 | } | |
1495 | ||
1496 | if (ex != EXT_FIRST_EXTENT(eh)) { | |
1497 | /* we correct tree if first leaf got modified only */ | |
1498 | return 0; | |
1499 | } | |
1500 | ||
1501 | /* | |
d0d856e8 | 1502 | * TODO: we need correction if border is smaller than current one |
a86c6181 AT |
1503 | */ |
1504 | k = depth - 1; | |
1505 | border = path[depth].p_ext->ee_block; | |
7e028976 AM |
1506 | err = ext4_ext_get_access(handle, inode, path + k); |
1507 | if (err) | |
a86c6181 AT |
1508 | return err; |
1509 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1510 | err = ext4_ext_dirty(handle, inode, path + k); |
1511 | if (err) | |
a86c6181 AT |
1512 | return err; |
1513 | ||
1514 | while (k--) { | |
1515 | /* change all left-side indexes */ | |
1516 | if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) | |
1517 | break; | |
7e028976 AM |
1518 | err = ext4_ext_get_access(handle, inode, path + k); |
1519 | if (err) | |
a86c6181 AT |
1520 | break; |
1521 | path[k].p_idx->ei_block = border; | |
7e028976 AM |
1522 | err = ext4_ext_dirty(handle, inode, path + k); |
1523 | if (err) | |
a86c6181 AT |
1524 | break; |
1525 | } | |
1526 | ||
1527 | return err; | |
1528 | } | |
1529 | ||
748de673 | 1530 | int |
a86c6181 AT |
1531 | ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, |
1532 | struct ext4_extent *ex2) | |
1533 | { | |
749269fa | 1534 | unsigned short ext1_ee_len, ext2_ee_len, max_len; |
a2df2a63 AA |
1535 | |
1536 | /* | |
1537 | * Make sure that either both extents are uninitialized, or | |
1538 | * both are _not_. | |
1539 | */ | |
1540 | if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) | |
1541 | return 0; | |
1542 | ||
749269fa AA |
1543 | if (ext4_ext_is_uninitialized(ex1)) |
1544 | max_len = EXT_UNINIT_MAX_LEN; | |
1545 | else | |
1546 | max_len = EXT_INIT_MAX_LEN; | |
1547 | ||
a2df2a63 AA |
1548 | ext1_ee_len = ext4_ext_get_actual_len(ex1); |
1549 | ext2_ee_len = ext4_ext_get_actual_len(ex2); | |
1550 | ||
1551 | if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != | |
63f57933 | 1552 | le32_to_cpu(ex2->ee_block)) |
a86c6181 AT |
1553 | return 0; |
1554 | ||
471d4011 SB |
1555 | /* |
1556 | * To allow future support for preallocated extents to be added | |
1557 | * as an RO_COMPAT feature, refuse to merge to extents if | |
d0d856e8 | 1558 | * this can result in the top bit of ee_len being set. |
471d4011 | 1559 | */ |
749269fa | 1560 | if (ext1_ee_len + ext2_ee_len > max_len) |
471d4011 | 1561 | return 0; |
bbf2f9fb | 1562 | #ifdef AGGRESSIVE_TEST |
b939e376 | 1563 | if (ext1_ee_len >= 4) |
a86c6181 AT |
1564 | return 0; |
1565 | #endif | |
1566 | ||
bf89d16f | 1567 | if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
a86c6181 AT |
1568 | return 1; |
1569 | return 0; | |
1570 | } | |
1571 | ||
56055d3a AA |
1572 | /* |
1573 | * This function tries to merge the "ex" extent to the next extent in the tree. | |
1574 | * It always tries to merge towards right. If you want to merge towards | |
1575 | * left, pass "ex - 1" as argument instead of "ex". | |
1576 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns | |
1577 | * 1 if they got merged. | |
1578 | */ | |
197217a5 | 1579 | static int ext4_ext_try_to_merge_right(struct inode *inode, |
1f109d5a TT |
1580 | struct ext4_ext_path *path, |
1581 | struct ext4_extent *ex) | |
56055d3a AA |
1582 | { |
1583 | struct ext4_extent_header *eh; | |
1584 | unsigned int depth, len; | |
1585 | int merge_done = 0; | |
1586 | int uninitialized = 0; | |
1587 | ||
1588 | depth = ext_depth(inode); | |
1589 | BUG_ON(path[depth].p_hdr == NULL); | |
1590 | eh = path[depth].p_hdr; | |
1591 | ||
1592 | while (ex < EXT_LAST_EXTENT(eh)) { | |
1593 | if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) | |
1594 | break; | |
1595 | /* merge with next extent! */ | |
1596 | if (ext4_ext_is_uninitialized(ex)) | |
1597 | uninitialized = 1; | |
1598 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
1599 | + ext4_ext_get_actual_len(ex + 1)); | |
1600 | if (uninitialized) | |
1601 | ext4_ext_mark_uninitialized(ex); | |
1602 | ||
1603 | if (ex + 1 < EXT_LAST_EXTENT(eh)) { | |
1604 | len = (EXT_LAST_EXTENT(eh) - ex - 1) | |
1605 | * sizeof(struct ext4_extent); | |
1606 | memmove(ex + 1, ex + 2, len); | |
1607 | } | |
e8546d06 | 1608 | le16_add_cpu(&eh->eh_entries, -1); |
56055d3a AA |
1609 | merge_done = 1; |
1610 | WARN_ON(eh->eh_entries == 0); | |
1611 | if (!eh->eh_entries) | |
24676da4 | 1612 | EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
56055d3a AA |
1613 | } |
1614 | ||
1615 | return merge_done; | |
1616 | } | |
1617 | ||
197217a5 YY |
1618 | /* |
1619 | * This function tries to merge the @ex extent to neighbours in the tree. | |
1620 | * return 1 if merge left else 0. | |
1621 | */ | |
1622 | static int ext4_ext_try_to_merge(struct inode *inode, | |
1623 | struct ext4_ext_path *path, | |
1624 | struct ext4_extent *ex) { | |
1625 | struct ext4_extent_header *eh; | |
1626 | unsigned int depth; | |
1627 | int merge_done = 0; | |
1628 | int ret = 0; | |
1629 | ||
1630 | depth = ext_depth(inode); | |
1631 | BUG_ON(path[depth].p_hdr == NULL); | |
1632 | eh = path[depth].p_hdr; | |
1633 | ||
1634 | if (ex > EXT_FIRST_EXTENT(eh)) | |
1635 | merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); | |
1636 | ||
1637 | if (!merge_done) | |
1638 | ret = ext4_ext_try_to_merge_right(inode, path, ex); | |
1639 | ||
1640 | return ret; | |
1641 | } | |
1642 | ||
25d14f98 AA |
1643 | /* |
1644 | * check if a portion of the "newext" extent overlaps with an | |
1645 | * existing extent. | |
1646 | * | |
1647 | * If there is an overlap discovered, it updates the length of the newext | |
1648 | * such that there will be no overlap, and then returns 1. | |
1649 | * If there is no overlap found, it returns 0. | |
1650 | */ | |
1f109d5a TT |
1651 | static unsigned int ext4_ext_check_overlap(struct inode *inode, |
1652 | struct ext4_extent *newext, | |
1653 | struct ext4_ext_path *path) | |
25d14f98 | 1654 | { |
725d26d3 | 1655 | ext4_lblk_t b1, b2; |
25d14f98 AA |
1656 | unsigned int depth, len1; |
1657 | unsigned int ret = 0; | |
1658 | ||
1659 | b1 = le32_to_cpu(newext->ee_block); | |
a2df2a63 | 1660 | len1 = ext4_ext_get_actual_len(newext); |
25d14f98 AA |
1661 | depth = ext_depth(inode); |
1662 | if (!path[depth].p_ext) | |
1663 | goto out; | |
1664 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | |
1665 | ||
1666 | /* | |
1667 | * get the next allocated block if the extent in the path | |
2b2d6d01 | 1668 | * is before the requested block(s) |
25d14f98 AA |
1669 | */ |
1670 | if (b2 < b1) { | |
1671 | b2 = ext4_ext_next_allocated_block(path); | |
1672 | if (b2 == EXT_MAX_BLOCK) | |
1673 | goto out; | |
1674 | } | |
1675 | ||
725d26d3 | 1676 | /* check for wrap through zero on extent logical start block*/ |
25d14f98 AA |
1677 | if (b1 + len1 < b1) { |
1678 | len1 = EXT_MAX_BLOCK - b1; | |
1679 | newext->ee_len = cpu_to_le16(len1); | |
1680 | ret = 1; | |
1681 | } | |
1682 | ||
1683 | /* check for overlap */ | |
1684 | if (b1 + len1 > b2) { | |
1685 | newext->ee_len = cpu_to_le16(b2 - b1); | |
1686 | ret = 1; | |
1687 | } | |
1688 | out: | |
1689 | return ret; | |
1690 | } | |
1691 | ||
a86c6181 | 1692 | /* |
d0d856e8 RD |
1693 | * ext4_ext_insert_extent: |
1694 | * tries to merge requsted extent into the existing extent or | |
1695 | * inserts requested extent as new one into the tree, | |
1696 | * creating new leaf in the no-space case. | |
a86c6181 AT |
1697 | */ |
1698 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |
1699 | struct ext4_ext_path *path, | |
0031462b | 1700 | struct ext4_extent *newext, int flag) |
a86c6181 | 1701 | { |
af5bc92d | 1702 | struct ext4_extent_header *eh; |
a86c6181 AT |
1703 | struct ext4_extent *ex, *fex; |
1704 | struct ext4_extent *nearex; /* nearest extent */ | |
1705 | struct ext4_ext_path *npath = NULL; | |
725d26d3 AK |
1706 | int depth, len, err; |
1707 | ext4_lblk_t next; | |
a2df2a63 | 1708 | unsigned uninitialized = 0; |
55f020db | 1709 | int flags = 0; |
a86c6181 | 1710 | |
273df556 FM |
1711 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
1712 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | |
1713 | return -EIO; | |
1714 | } | |
a86c6181 AT |
1715 | depth = ext_depth(inode); |
1716 | ex = path[depth].p_ext; | |
273df556 FM |
1717 | if (unlikely(path[depth].p_hdr == NULL)) { |
1718 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
1719 | return -EIO; | |
1720 | } | |
a86c6181 AT |
1721 | |
1722 | /* try to insert block into found extent and return */ | |
744692dc | 1723 | if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) |
0031462b | 1724 | && ext4_can_extents_be_merged(inode, ex, newext)) { |
553f9008 | 1725 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", |
bf89d16f TT |
1726 | ext4_ext_is_uninitialized(newext), |
1727 | ext4_ext_get_actual_len(newext), | |
1728 | le32_to_cpu(ex->ee_block), | |
1729 | ext4_ext_is_uninitialized(ex), | |
1730 | ext4_ext_get_actual_len(ex), | |
1731 | ext4_ext_pblock(ex)); | |
7e028976 AM |
1732 | err = ext4_ext_get_access(handle, inode, path + depth); |
1733 | if (err) | |
a86c6181 | 1734 | return err; |
a2df2a63 AA |
1735 | |
1736 | /* | |
1737 | * ext4_can_extents_be_merged should have checked that either | |
1738 | * both extents are uninitialized, or both aren't. Thus we | |
1739 | * need to check only one of them here. | |
1740 | */ | |
1741 | if (ext4_ext_is_uninitialized(ex)) | |
1742 | uninitialized = 1; | |
1743 | ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) | |
1744 | + ext4_ext_get_actual_len(newext)); | |
1745 | if (uninitialized) | |
1746 | ext4_ext_mark_uninitialized(ex); | |
a86c6181 AT |
1747 | eh = path[depth].p_hdr; |
1748 | nearex = ex; | |
1749 | goto merge; | |
1750 | } | |
1751 | ||
1752 | repeat: | |
1753 | depth = ext_depth(inode); | |
1754 | eh = path[depth].p_hdr; | |
1755 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) | |
1756 | goto has_space; | |
1757 | ||
1758 | /* probably next leaf has space for us? */ | |
1759 | fex = EXT_LAST_EXTENT(eh); | |
1760 | next = ext4_ext_next_leaf_block(inode, path); | |
1761 | if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) | |
1762 | && next != EXT_MAX_BLOCK) { | |
1763 | ext_debug("next leaf block - %d\n", next); | |
1764 | BUG_ON(npath != NULL); | |
1765 | npath = ext4_ext_find_extent(inode, next, NULL); | |
1766 | if (IS_ERR(npath)) | |
1767 | return PTR_ERR(npath); | |
1768 | BUG_ON(npath->p_depth != path->p_depth); | |
1769 | eh = npath[depth].p_hdr; | |
1770 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { | |
25985edc | 1771 | ext_debug("next leaf isn't full(%d)\n", |
a86c6181 AT |
1772 | le16_to_cpu(eh->eh_entries)); |
1773 | path = npath; | |
1774 | goto repeat; | |
1775 | } | |
1776 | ext_debug("next leaf has no free space(%d,%d)\n", | |
1777 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
1778 | } | |
1779 | ||
1780 | /* | |
d0d856e8 RD |
1781 | * There is no free space in the found leaf. |
1782 | * We're gonna add a new leaf in the tree. | |
a86c6181 | 1783 | */ |
55f020db AH |
1784 | if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) |
1785 | flags = EXT4_MB_USE_ROOT_BLOCKS; | |
1786 | err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); | |
a86c6181 AT |
1787 | if (err) |
1788 | goto cleanup; | |
1789 | depth = ext_depth(inode); | |
1790 | eh = path[depth].p_hdr; | |
1791 | ||
1792 | has_space: | |
1793 | nearex = path[depth].p_ext; | |
1794 | ||
7e028976 AM |
1795 | err = ext4_ext_get_access(handle, inode, path + depth); |
1796 | if (err) | |
a86c6181 AT |
1797 | goto cleanup; |
1798 | ||
1799 | if (!nearex) { | |
1800 | /* there is no extent in this leaf, create first one */ | |
553f9008 | 1801 | ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", |
8c55e204 | 1802 | le32_to_cpu(newext->ee_block), |
bf89d16f | 1803 | ext4_ext_pblock(newext), |
553f9008 | 1804 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1805 | ext4_ext_get_actual_len(newext)); |
a86c6181 AT |
1806 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); |
1807 | } else if (le32_to_cpu(newext->ee_block) | |
8c55e204 | 1808 | > le32_to_cpu(nearex->ee_block)) { |
a86c6181 AT |
1809 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ |
1810 | if (nearex != EXT_LAST_EXTENT(eh)) { | |
1811 | len = EXT_MAX_EXTENT(eh) - nearex; | |
1812 | len = (len - 1) * sizeof(struct ext4_extent); | |
1813 | len = len < 0 ? 0 : len; | |
553f9008 | 1814 | ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, " |
a86c6181 | 1815 | "move %d from 0x%p to 0x%p\n", |
8c55e204 | 1816 | le32_to_cpu(newext->ee_block), |
bf89d16f | 1817 | ext4_ext_pblock(newext), |
553f9008 | 1818 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1819 | ext4_ext_get_actual_len(newext), |
a86c6181 AT |
1820 | nearex, len, nearex + 1, nearex + 2); |
1821 | memmove(nearex + 2, nearex + 1, len); | |
1822 | } | |
1823 | path[depth].p_ext = nearex + 1; | |
1824 | } else { | |
1825 | BUG_ON(newext->ee_block == nearex->ee_block); | |
1826 | len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); | |
1827 | len = len < 0 ? 0 : len; | |
553f9008 | 1828 | ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, " |
a86c6181 AT |
1829 | "move %d from 0x%p to 0x%p\n", |
1830 | le32_to_cpu(newext->ee_block), | |
bf89d16f | 1831 | ext4_ext_pblock(newext), |
553f9008 | 1832 | ext4_ext_is_uninitialized(newext), |
a2df2a63 | 1833 | ext4_ext_get_actual_len(newext), |
a86c6181 AT |
1834 | nearex, len, nearex + 1, nearex + 2); |
1835 | memmove(nearex + 1, nearex, len); | |
1836 | path[depth].p_ext = nearex; | |
1837 | } | |
1838 | ||
e8546d06 | 1839 | le16_add_cpu(&eh->eh_entries, 1); |
a86c6181 AT |
1840 | nearex = path[depth].p_ext; |
1841 | nearex->ee_block = newext->ee_block; | |
bf89d16f | 1842 | ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
a86c6181 | 1843 | nearex->ee_len = newext->ee_len; |
a86c6181 AT |
1844 | |
1845 | merge: | |
1846 | /* try to merge extents to the right */ | |
744692dc | 1847 | if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) |
0031462b | 1848 | ext4_ext_try_to_merge(inode, path, nearex); |
a86c6181 AT |
1849 | |
1850 | /* try to merge extents to the left */ | |
1851 | ||
1852 | /* time to correct all indexes above */ | |
1853 | err = ext4_ext_correct_indexes(handle, inode, path); | |
1854 | if (err) | |
1855 | goto cleanup; | |
1856 | ||
1857 | err = ext4_ext_dirty(handle, inode, path + depth); | |
1858 | ||
1859 | cleanup: | |
1860 | if (npath) { | |
1861 | ext4_ext_drop_refs(npath); | |
1862 | kfree(npath); | |
1863 | } | |
a86c6181 AT |
1864 | ext4_ext_invalidate_cache(inode); |
1865 | return err; | |
1866 | } | |
1867 | ||
1f109d5a TT |
1868 | static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, |
1869 | ext4_lblk_t num, ext_prepare_callback func, | |
1870 | void *cbdata) | |
6873fa0d ES |
1871 | { |
1872 | struct ext4_ext_path *path = NULL; | |
1873 | struct ext4_ext_cache cbex; | |
1874 | struct ext4_extent *ex; | |
1875 | ext4_lblk_t next, start = 0, end = 0; | |
1876 | ext4_lblk_t last = block + num; | |
1877 | int depth, exists, err = 0; | |
1878 | ||
1879 | BUG_ON(func == NULL); | |
1880 | BUG_ON(inode == NULL); | |
1881 | ||
1882 | while (block < last && block != EXT_MAX_BLOCK) { | |
1883 | num = last - block; | |
1884 | /* find extent for this block */ | |
fab3a549 | 1885 | down_read(&EXT4_I(inode)->i_data_sem); |
6873fa0d | 1886 | path = ext4_ext_find_extent(inode, block, path); |
fab3a549 | 1887 | up_read(&EXT4_I(inode)->i_data_sem); |
6873fa0d ES |
1888 | if (IS_ERR(path)) { |
1889 | err = PTR_ERR(path); | |
1890 | path = NULL; | |
1891 | break; | |
1892 | } | |
1893 | ||
1894 | depth = ext_depth(inode); | |
273df556 FM |
1895 | if (unlikely(path[depth].p_hdr == NULL)) { |
1896 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
1897 | err = -EIO; | |
1898 | break; | |
1899 | } | |
6873fa0d ES |
1900 | ex = path[depth].p_ext; |
1901 | next = ext4_ext_next_allocated_block(path); | |
1902 | ||
1903 | exists = 0; | |
1904 | if (!ex) { | |
1905 | /* there is no extent yet, so try to allocate | |
1906 | * all requested space */ | |
1907 | start = block; | |
1908 | end = block + num; | |
1909 | } else if (le32_to_cpu(ex->ee_block) > block) { | |
1910 | /* need to allocate space before found extent */ | |
1911 | start = block; | |
1912 | end = le32_to_cpu(ex->ee_block); | |
1913 | if (block + num < end) | |
1914 | end = block + num; | |
1915 | } else if (block >= le32_to_cpu(ex->ee_block) | |
1916 | + ext4_ext_get_actual_len(ex)) { | |
1917 | /* need to allocate space after found extent */ | |
1918 | start = block; | |
1919 | end = block + num; | |
1920 | if (end >= next) | |
1921 | end = next; | |
1922 | } else if (block >= le32_to_cpu(ex->ee_block)) { | |
1923 | /* | |
1924 | * some part of requested space is covered | |
1925 | * by found extent | |
1926 | */ | |
1927 | start = block; | |
1928 | end = le32_to_cpu(ex->ee_block) | |
1929 | + ext4_ext_get_actual_len(ex); | |
1930 | if (block + num < end) | |
1931 | end = block + num; | |
1932 | exists = 1; | |
1933 | } else { | |
1934 | BUG(); | |
1935 | } | |
1936 | BUG_ON(end <= start); | |
1937 | ||
1938 | if (!exists) { | |
1939 | cbex.ec_block = start; | |
1940 | cbex.ec_len = end - start; | |
1941 | cbex.ec_start = 0; | |
6873fa0d ES |
1942 | } else { |
1943 | cbex.ec_block = le32_to_cpu(ex->ee_block); | |
1944 | cbex.ec_len = ext4_ext_get_actual_len(ex); | |
bf89d16f | 1945 | cbex.ec_start = ext4_ext_pblock(ex); |
6873fa0d ES |
1946 | } |
1947 | ||
273df556 FM |
1948 | if (unlikely(cbex.ec_len == 0)) { |
1949 | EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); | |
1950 | err = -EIO; | |
1951 | break; | |
1952 | } | |
6873fa0d ES |
1953 | err = func(inode, path, &cbex, ex, cbdata); |
1954 | ext4_ext_drop_refs(path); | |
1955 | ||
1956 | if (err < 0) | |
1957 | break; | |
1958 | ||
1959 | if (err == EXT_REPEAT) | |
1960 | continue; | |
1961 | else if (err == EXT_BREAK) { | |
1962 | err = 0; | |
1963 | break; | |
1964 | } | |
1965 | ||
1966 | if (ext_depth(inode) != depth) { | |
1967 | /* depth was changed. we have to realloc path */ | |
1968 | kfree(path); | |
1969 | path = NULL; | |
1970 | } | |
1971 | ||
1972 | block = cbex.ec_block + cbex.ec_len; | |
1973 | } | |
1974 | ||
1975 | if (path) { | |
1976 | ext4_ext_drop_refs(path); | |
1977 | kfree(path); | |
1978 | } | |
1979 | ||
1980 | return err; | |
1981 | } | |
1982 | ||
09b88252 | 1983 | static void |
725d26d3 | 1984 | ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
b05e6ae5 | 1985 | __u32 len, ext4_fsblk_t start) |
a86c6181 AT |
1986 | { |
1987 | struct ext4_ext_cache *cex; | |
1988 | BUG_ON(len == 0); | |
2ec0ae3a | 1989 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
a86c6181 | 1990 | cex = &EXT4_I(inode)->i_cached_extent; |
a86c6181 AT |
1991 | cex->ec_block = block; |
1992 | cex->ec_len = len; | |
1993 | cex->ec_start = start; | |
2ec0ae3a | 1994 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
a86c6181 AT |
1995 | } |
1996 | ||
1997 | /* | |
d0d856e8 RD |
1998 | * ext4_ext_put_gap_in_cache: |
1999 | * calculate boundaries of the gap that the requested block fits into | |
a86c6181 AT |
2000 | * and cache this gap |
2001 | */ | |
09b88252 | 2002 | static void |
a86c6181 | 2003 | ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, |
725d26d3 | 2004 | ext4_lblk_t block) |
a86c6181 AT |
2005 | { |
2006 | int depth = ext_depth(inode); | |
725d26d3 AK |
2007 | unsigned long len; |
2008 | ext4_lblk_t lblock; | |
a86c6181 AT |
2009 | struct ext4_extent *ex; |
2010 | ||
2011 | ex = path[depth].p_ext; | |
2012 | if (ex == NULL) { | |
2013 | /* there is no extent yet, so gap is [0;-] */ | |
2014 | lblock = 0; | |
2015 | len = EXT_MAX_BLOCK; | |
2016 | ext_debug("cache gap(whole file):"); | |
2017 | } else if (block < le32_to_cpu(ex->ee_block)) { | |
2018 | lblock = block; | |
2019 | len = le32_to_cpu(ex->ee_block) - block; | |
bba90743 ES |
2020 | ext_debug("cache gap(before): %u [%u:%u]", |
2021 | block, | |
2022 | le32_to_cpu(ex->ee_block), | |
2023 | ext4_ext_get_actual_len(ex)); | |
a86c6181 | 2024 | } else if (block >= le32_to_cpu(ex->ee_block) |
a2df2a63 | 2025 | + ext4_ext_get_actual_len(ex)) { |
725d26d3 | 2026 | ext4_lblk_t next; |
8c55e204 | 2027 | lblock = le32_to_cpu(ex->ee_block) |
a2df2a63 | 2028 | + ext4_ext_get_actual_len(ex); |
725d26d3 AK |
2029 | |
2030 | next = ext4_ext_next_allocated_block(path); | |
bba90743 ES |
2031 | ext_debug("cache gap(after): [%u:%u] %u", |
2032 | le32_to_cpu(ex->ee_block), | |
2033 | ext4_ext_get_actual_len(ex), | |
2034 | block); | |
725d26d3 AK |
2035 | BUG_ON(next == lblock); |
2036 | len = next - lblock; | |
a86c6181 AT |
2037 | } else { |
2038 | lblock = len = 0; | |
2039 | BUG(); | |
2040 | } | |
2041 | ||
bba90743 | 2042 | ext_debug(" -> %u:%lu\n", lblock, len); |
b05e6ae5 | 2043 | ext4_ext_put_in_cache(inode, lblock, len, 0); |
a86c6181 AT |
2044 | } |
2045 | ||
b05e6ae5 | 2046 | /* |
a4bb6b64 AH |
2047 | * ext4_ext_in_cache() |
2048 | * Checks to see if the given block is in the cache. | |
2049 | * If it is, the cached extent is stored in the given | |
2050 | * cache extent pointer. If the cached extent is a hole, | |
2051 | * this routine should be used instead of | |
2052 | * ext4_ext_in_cache if the calling function needs to | |
2053 | * know the size of the hole. | |
2054 | * | |
2055 | * @inode: The files inode | |
2056 | * @block: The block to look for in the cache | |
2057 | * @ex: Pointer where the cached extent will be stored | |
2058 | * if it contains block | |
2059 | * | |
b05e6ae5 TT |
2060 | * Return 0 if cache is invalid; 1 if the cache is valid |
2061 | */ | |
a4bb6b64 AH |
2062 | static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, |
2063 | struct ext4_ext_cache *ex){ | |
a86c6181 | 2064 | struct ext4_ext_cache *cex; |
77f4135f | 2065 | struct ext4_sb_info *sbi; |
b05e6ae5 | 2066 | int ret = 0; |
a86c6181 | 2067 | |
60e6679e | 2068 | /* |
2ec0ae3a TT |
2069 | * We borrow i_block_reservation_lock to protect i_cached_extent |
2070 | */ | |
2071 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | |
a86c6181 | 2072 | cex = &EXT4_I(inode)->i_cached_extent; |
77f4135f | 2073 | sbi = EXT4_SB(inode->i_sb); |
a86c6181 AT |
2074 | |
2075 | /* has cache valid data? */ | |
b05e6ae5 | 2076 | if (cex->ec_len == 0) |
2ec0ae3a | 2077 | goto errout; |
a86c6181 | 2078 | |
731eb1a0 | 2079 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
a4bb6b64 | 2080 | memcpy(ex, cex, sizeof(struct ext4_ext_cache)); |
bba90743 ES |
2081 | ext_debug("%u cached by %u:%u:%llu\n", |
2082 | block, | |
2083 | cex->ec_block, cex->ec_len, cex->ec_start); | |
b05e6ae5 | 2084 | ret = 1; |
a86c6181 | 2085 | } |
2ec0ae3a | 2086 | errout: |
77f4135f VH |
2087 | if (!ret) |
2088 | sbi->extent_cache_misses++; | |
2089 | else | |
2090 | sbi->extent_cache_hits++; | |
2ec0ae3a TT |
2091 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
2092 | return ret; | |
a86c6181 AT |
2093 | } |
2094 | ||
a4bb6b64 AH |
2095 | /* |
2096 | * ext4_ext_in_cache() | |
2097 | * Checks to see if the given block is in the cache. | |
2098 | * If it is, the cached extent is stored in the given | |
2099 | * extent pointer. | |
2100 | * | |
2101 | * @inode: The files inode | |
2102 | * @block: The block to look for in the cache | |
2103 | * @ex: Pointer where the cached extent will be stored | |
2104 | * if it contains block | |
2105 | * | |
2106 | * Return 0 if cache is invalid; 1 if the cache is valid | |
2107 | */ | |
2108 | static int | |
2109 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |
2110 | struct ext4_extent *ex) | |
2111 | { | |
2112 | struct ext4_ext_cache cex; | |
2113 | int ret = 0; | |
2114 | ||
2115 | if (ext4_ext_check_cache(inode, block, &cex)) { | |
2116 | ex->ee_block = cpu_to_le32(cex.ec_block); | |
2117 | ext4_ext_store_pblock(ex, cex.ec_start); | |
2118 | ex->ee_len = cpu_to_le16(cex.ec_len); | |
2119 | ret = 1; | |
2120 | } | |
2121 | ||
2122 | return ret; | |
2123 | } | |
2124 | ||
2125 | ||
a86c6181 | 2126 | /* |
d0d856e8 RD |
2127 | * ext4_ext_rm_idx: |
2128 | * removes index from the index block. | |
2129 | * It's used in truncate case only, thus all requests are for | |
2130 | * last index in the block only. | |
a86c6181 | 2131 | */ |
1d03ec98 | 2132 | static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
a86c6181 AT |
2133 | struct ext4_ext_path *path) |
2134 | { | |
a86c6181 | 2135 | int err; |
f65e6fba | 2136 | ext4_fsblk_t leaf; |
a86c6181 AT |
2137 | |
2138 | /* free index block */ | |
2139 | path--; | |
bf89d16f | 2140 | leaf = ext4_idx_pblock(path->p_idx); |
273df556 FM |
2141 | if (unlikely(path->p_hdr->eh_entries == 0)) { |
2142 | EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); | |
2143 | return -EIO; | |
2144 | } | |
7e028976 AM |
2145 | err = ext4_ext_get_access(handle, inode, path); |
2146 | if (err) | |
a86c6181 | 2147 | return err; |
e8546d06 | 2148 | le16_add_cpu(&path->p_hdr->eh_entries, -1); |
7e028976 AM |
2149 | err = ext4_ext_dirty(handle, inode, path); |
2150 | if (err) | |
a86c6181 | 2151 | return err; |
2ae02107 | 2152 | ext_debug("index is empty, remove it, free block %llu\n", leaf); |
7dc57615 | 2153 | ext4_free_blocks(handle, inode, NULL, leaf, 1, |
e6362609 | 2154 | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
a86c6181 AT |
2155 | return err; |
2156 | } | |
2157 | ||
2158 | /* | |
ee12b630 MC |
2159 | * ext4_ext_calc_credits_for_single_extent: |
2160 | * This routine returns max. credits that needed to insert an extent | |
2161 | * to the extent tree. | |
2162 | * When pass the actual path, the caller should calculate credits | |
2163 | * under i_data_sem. | |
a86c6181 | 2164 | */ |
525f4ed8 | 2165 | int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
a86c6181 AT |
2166 | struct ext4_ext_path *path) |
2167 | { | |
a86c6181 | 2168 | if (path) { |
ee12b630 | 2169 | int depth = ext_depth(inode); |
f3bd1f3f | 2170 | int ret = 0; |
ee12b630 | 2171 | |
a86c6181 | 2172 | /* probably there is space in leaf? */ |
a86c6181 | 2173 | if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
ee12b630 | 2174 | < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
a86c6181 | 2175 | |
ee12b630 MC |
2176 | /* |
2177 | * There are some space in the leaf tree, no | |
2178 | * need to account for leaf block credit | |
2179 | * | |
2180 | * bitmaps and block group descriptor blocks | |
2181 | * and other metadat blocks still need to be | |
2182 | * accounted. | |
2183 | */ | |
525f4ed8 | 2184 | /* 1 bitmap, 1 block group descriptor */ |
ee12b630 | 2185 | ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
5887e98b | 2186 | return ret; |
ee12b630 MC |
2187 | } |
2188 | } | |
a86c6181 | 2189 | |
525f4ed8 | 2190 | return ext4_chunk_trans_blocks(inode, nrblocks); |
ee12b630 | 2191 | } |
a86c6181 | 2192 | |
ee12b630 MC |
2193 | /* |
2194 | * How many index/leaf blocks need to change/allocate to modify nrblocks? | |
2195 | * | |
2196 | * if nrblocks are fit in a single extent (chunk flag is 1), then | |
2197 | * in the worse case, each tree level index/leaf need to be changed | |
2198 | * if the tree split due to insert a new extent, then the old tree | |
2199 | * index/leaf need to be updated too | |
2200 | * | |
2201 | * If the nrblocks are discontiguous, they could cause | |
2202 | * the whole tree split more than once, but this is really rare. | |
2203 | */ | |
525f4ed8 | 2204 | int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) |
ee12b630 MC |
2205 | { |
2206 | int index; | |
2207 | int depth = ext_depth(inode); | |
a86c6181 | 2208 | |
ee12b630 MC |
2209 | if (chunk) |
2210 | index = depth * 2; | |
2211 | else | |
2212 | index = depth * 3; | |
a86c6181 | 2213 | |
ee12b630 | 2214 | return index; |
a86c6181 AT |
2215 | } |
2216 | ||
2217 | static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |
2218 | struct ext4_extent *ex, | |
725d26d3 | 2219 | ext4_lblk_t from, ext4_lblk_t to) |
a86c6181 | 2220 | { |
a2df2a63 | 2221 | unsigned short ee_len = ext4_ext_get_actual_len(ex); |
e6362609 | 2222 | int flags = EXT4_FREE_BLOCKS_FORGET; |
a86c6181 | 2223 | |
c9de560d | 2224 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
e6362609 | 2225 | flags |= EXT4_FREE_BLOCKS_METADATA; |
a86c6181 AT |
2226 | #ifdef EXTENTS_STATS |
2227 | { | |
2228 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
a86c6181 AT |
2229 | spin_lock(&sbi->s_ext_stats_lock); |
2230 | sbi->s_ext_blocks += ee_len; | |
2231 | sbi->s_ext_extents++; | |
2232 | if (ee_len < sbi->s_ext_min) | |
2233 | sbi->s_ext_min = ee_len; | |
2234 | if (ee_len > sbi->s_ext_max) | |
2235 | sbi->s_ext_max = ee_len; | |
2236 | if (ext_depth(inode) > sbi->s_depth_max) | |
2237 | sbi->s_depth_max = ext_depth(inode); | |
2238 | spin_unlock(&sbi->s_ext_stats_lock); | |
2239 | } | |
2240 | #endif | |
2241 | if (from >= le32_to_cpu(ex->ee_block) | |
a2df2a63 | 2242 | && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { |
a86c6181 | 2243 | /* tail removal */ |
725d26d3 | 2244 | ext4_lblk_t num; |
f65e6fba | 2245 | ext4_fsblk_t start; |
725d26d3 | 2246 | |
a2df2a63 | 2247 | num = le32_to_cpu(ex->ee_block) + ee_len - from; |
bf89d16f | 2248 | start = ext4_ext_pblock(ex) + ee_len - num; |
725d26d3 | 2249 | ext_debug("free last %u blocks starting %llu\n", num, start); |
7dc57615 | 2250 | ext4_free_blocks(handle, inode, NULL, start, num, flags); |
a86c6181 | 2251 | } else if (from == le32_to_cpu(ex->ee_block) |
a2df2a63 | 2252 | && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { |
d583fb87 AH |
2253 | /* head removal */ |
2254 | ext4_lblk_t num; | |
2255 | ext4_fsblk_t start; | |
2256 | ||
2257 | num = to - from; | |
2258 | start = ext4_ext_pblock(ex); | |
2259 | ||
2260 | ext_debug("free first %u blocks starting %llu\n", num, start); | |
2261 | ext4_free_blocks(handle, inode, 0, start, num, flags); | |
2262 | ||
a86c6181 | 2263 | } else { |
725d26d3 AK |
2264 | printk(KERN_INFO "strange request: removal(2) " |
2265 | "%u-%u from %u:%u\n", | |
2266 | from, to, le32_to_cpu(ex->ee_block), ee_len); | |
a86c6181 AT |
2267 | } |
2268 | return 0; | |
2269 | } | |
2270 | ||
d583fb87 AH |
2271 | |
2272 | /* | |
2273 | * ext4_ext_rm_leaf() Removes the extents associated with the | |
2274 | * blocks appearing between "start" and "end", and splits the extents | |
2275 | * if "start" and "end" appear in the same extent | |
2276 | * | |
2277 | * @handle: The journal handle | |
2278 | * @inode: The files inode | |
2279 | * @path: The path to the leaf | |
2280 | * @start: The first block to remove | |
2281 | * @end: The last block to remove | |
2282 | */ | |
a86c6181 AT |
2283 | static int |
2284 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |
d583fb87 AH |
2285 | struct ext4_ext_path *path, ext4_lblk_t start, |
2286 | ext4_lblk_t end) | |
a86c6181 AT |
2287 | { |
2288 | int err = 0, correct_index = 0; | |
2289 | int depth = ext_depth(inode), credits; | |
2290 | struct ext4_extent_header *eh; | |
725d26d3 AK |
2291 | ext4_lblk_t a, b, block; |
2292 | unsigned num; | |
2293 | ext4_lblk_t ex_ee_block; | |
a86c6181 | 2294 | unsigned short ex_ee_len; |
a2df2a63 | 2295 | unsigned uninitialized = 0; |
a86c6181 | 2296 | struct ext4_extent *ex; |
d583fb87 | 2297 | struct ext4_map_blocks map; |
a86c6181 | 2298 | |
c29c0ae7 | 2299 | /* the header must be checked already in ext4_ext_remove_space() */ |
725d26d3 | 2300 | ext_debug("truncate since %u in leaf\n", start); |
a86c6181 AT |
2301 | if (!path[depth].p_hdr) |
2302 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | |
2303 | eh = path[depth].p_hdr; | |
273df556 FM |
2304 | if (unlikely(path[depth].p_hdr == NULL)) { |
2305 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
2306 | return -EIO; | |
2307 | } | |
a86c6181 AT |
2308 | /* find where to start removing */ |
2309 | ex = EXT_LAST_EXTENT(eh); | |
2310 | ||
2311 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2312 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2313 | |
2314 | while (ex >= EXT_FIRST_EXTENT(eh) && | |
2315 | ex_ee_block + ex_ee_len > start) { | |
a41f2071 AK |
2316 | |
2317 | if (ext4_ext_is_uninitialized(ex)) | |
2318 | uninitialized = 1; | |
2319 | else | |
2320 | uninitialized = 0; | |
2321 | ||
553f9008 M |
2322 | ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, |
2323 | uninitialized, ex_ee_len); | |
a86c6181 AT |
2324 | path[depth].p_ext = ex; |
2325 | ||
2326 | a = ex_ee_block > start ? ex_ee_block : start; | |
d583fb87 AH |
2327 | b = ex_ee_block+ex_ee_len - 1 < end ? |
2328 | ex_ee_block+ex_ee_len - 1 : end; | |
a86c6181 AT |
2329 | |
2330 | ext_debug(" border %u:%u\n", a, b); | |
2331 | ||
d583fb87 AH |
2332 | /* If this extent is beyond the end of the hole, skip it */ |
2333 | if (end <= ex_ee_block) { | |
2334 | ex--; | |
2335 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
2336 | ex_ee_len = ext4_ext_get_actual_len(ex); | |
2337 | continue; | |
2338 | } else if (a != ex_ee_block && | |
2339 | b != ex_ee_block + ex_ee_len - 1) { | |
2340 | /* | |
2341 | * If this is a truncate, then this condition should | |
2342 | * never happen because at least one of the end points | |
2343 | * needs to be on the edge of the extent. | |
2344 | */ | |
2345 | if (end == EXT_MAX_BLOCK) { | |
2346 | ext_debug(" bad truncate %u:%u\n", | |
2347 | start, end); | |
2348 | block = 0; | |
2349 | num = 0; | |
2350 | err = -EIO; | |
2351 | goto out; | |
2352 | } | |
2353 | /* | |
2354 | * else this is a hole punch, so the extent needs to | |
2355 | * be split since neither edge of the hole is on the | |
2356 | * extent edge | |
2357 | */ | |
2358 | else{ | |
2359 | map.m_pblk = ext4_ext_pblock(ex); | |
2360 | map.m_lblk = ex_ee_block; | |
2361 | map.m_len = b - ex_ee_block; | |
2362 | ||
2363 | err = ext4_split_extent(handle, | |
2364 | inode, path, &map, 0, | |
2365 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT | | |
2366 | EXT4_GET_BLOCKS_PRE_IO); | |
2367 | ||
2368 | if (err < 0) | |
2369 | goto out; | |
2370 | ||
2371 | ex_ee_len = ext4_ext_get_actual_len(ex); | |
2372 | ||
2373 | b = ex_ee_block+ex_ee_len - 1 < end ? | |
2374 | ex_ee_block+ex_ee_len - 1 : end; | |
2375 | ||
2376 | /* Then remove tail of this extent */ | |
2377 | block = ex_ee_block; | |
2378 | num = a - block; | |
2379 | } | |
a86c6181 AT |
2380 | } else if (a != ex_ee_block) { |
2381 | /* remove tail of the extent */ | |
2382 | block = ex_ee_block; | |
2383 | num = a - block; | |
2384 | } else if (b != ex_ee_block + ex_ee_len - 1) { | |
2385 | /* remove head of the extent */ | |
d583fb87 AH |
2386 | block = b; |
2387 | num = ex_ee_block + ex_ee_len - b; | |
2388 | ||
2389 | /* | |
2390 | * If this is a truncate, this condition | |
2391 | * should never happen | |
2392 | */ | |
2393 | if (end == EXT_MAX_BLOCK) { | |
2394 | ext_debug(" bad truncate %u:%u\n", | |
2395 | start, end); | |
2396 | err = -EIO; | |
2397 | goto out; | |
2398 | } | |
a86c6181 AT |
2399 | } else { |
2400 | /* remove whole extent: excellent! */ | |
2401 | block = ex_ee_block; | |
2402 | num = 0; | |
d583fb87 AH |
2403 | if (a != ex_ee_block) { |
2404 | ext_debug(" bad truncate %u:%u\n", | |
2405 | start, end); | |
2406 | err = -EIO; | |
2407 | goto out; | |
2408 | } | |
2409 | ||
2410 | if (b != ex_ee_block + ex_ee_len - 1) { | |
2411 | ext_debug(" bad truncate %u:%u\n", | |
2412 | start, end); | |
2413 | err = -EIO; | |
2414 | goto out; | |
2415 | } | |
a86c6181 AT |
2416 | } |
2417 | ||
34071da7 TT |
2418 | /* |
2419 | * 3 for leaf, sb, and inode plus 2 (bmap and group | |
2420 | * descriptor) for each block group; assume two block | |
2421 | * groups plus ex_ee_len/blocks_per_block_group for | |
2422 | * the worst case | |
2423 | */ | |
2424 | credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); | |
a86c6181 AT |
2425 | if (ex == EXT_FIRST_EXTENT(eh)) { |
2426 | correct_index = 1; | |
2427 | credits += (ext_depth(inode)) + 1; | |
2428 | } | |
5aca07eb | 2429 | credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
a86c6181 | 2430 | |
487caeef | 2431 | err = ext4_ext_truncate_extend_restart(handle, inode, credits); |
9102e4fa | 2432 | if (err) |
a86c6181 | 2433 | goto out; |
a86c6181 AT |
2434 | |
2435 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2436 | if (err) | |
2437 | goto out; | |
2438 | ||
2439 | err = ext4_remove_blocks(handle, inode, ex, a, b); | |
2440 | if (err) | |
2441 | goto out; | |
2442 | ||
2443 | if (num == 0) { | |
d0d856e8 | 2444 | /* this extent is removed; mark slot entirely unused */ |
f65e6fba | 2445 | ext4_ext_store_pblock(ex, 0); |
d583fb87 AH |
2446 | } else if (block != ex_ee_block) { |
2447 | /* | |
2448 | * If this was a head removal, then we need to update | |
2449 | * the physical block since it is now at a different | |
2450 | * location | |
2451 | */ | |
2452 | ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a)); | |
a86c6181 AT |
2453 | } |
2454 | ||
2455 | ex->ee_block = cpu_to_le32(block); | |
2456 | ex->ee_len = cpu_to_le16(num); | |
749269fa AA |
2457 | /* |
2458 | * Do not mark uninitialized if all the blocks in the | |
2459 | * extent have been removed. | |
2460 | */ | |
2461 | if (uninitialized && num) | |
a2df2a63 | 2462 | ext4_ext_mark_uninitialized(ex); |
a86c6181 AT |
2463 | |
2464 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2465 | if (err) | |
2466 | goto out; | |
2467 | ||
d583fb87 AH |
2468 | /* |
2469 | * If the extent was completely released, | |
2470 | * we need to remove it from the leaf | |
2471 | */ | |
2472 | if (num == 0) { | |
2473 | if (end != EXT_MAX_BLOCK) { | |
2474 | /* | |
2475 | * For hole punching, we need to scoot all the | |
2476 | * extents up when an extent is removed so that | |
2477 | * we dont have blank extents in the middle | |
2478 | */ | |
2479 | memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * | |
2480 | sizeof(struct ext4_extent)); | |
2481 | ||
2482 | /* Now get rid of the one at the end */ | |
2483 | memset(EXT_LAST_EXTENT(eh), 0, | |
2484 | sizeof(struct ext4_extent)); | |
2485 | } | |
2486 | le16_add_cpu(&eh->eh_entries, -1); | |
2487 | } | |
2488 | ||
2ae02107 | 2489 | ext_debug("new extent: %u:%u:%llu\n", block, num, |
bf89d16f | 2490 | ext4_ext_pblock(ex)); |
a86c6181 AT |
2491 | ex--; |
2492 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
a2df2a63 | 2493 | ex_ee_len = ext4_ext_get_actual_len(ex); |
a86c6181 AT |
2494 | } |
2495 | ||
2496 | if (correct_index && eh->eh_entries) | |
2497 | err = ext4_ext_correct_indexes(handle, inode, path); | |
2498 | ||
2499 | /* if this leaf is free, then we should | |
2500 | * remove it from index block above */ | |
2501 | if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) | |
2502 | err = ext4_ext_rm_idx(handle, inode, path + depth); | |
2503 | ||
2504 | out: | |
2505 | return err; | |
2506 | } | |
2507 | ||
2508 | /* | |
d0d856e8 RD |
2509 | * ext4_ext_more_to_rm: |
2510 | * returns 1 if current index has to be freed (even partial) | |
a86c6181 | 2511 | */ |
09b88252 | 2512 | static int |
a86c6181 AT |
2513 | ext4_ext_more_to_rm(struct ext4_ext_path *path) |
2514 | { | |
2515 | BUG_ON(path->p_idx == NULL); | |
2516 | ||
2517 | if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) | |
2518 | return 0; | |
2519 | ||
2520 | /* | |
d0d856e8 | 2521 | * if truncate on deeper level happened, it wasn't partial, |
a86c6181 AT |
2522 | * so we have to consider current index for truncation |
2523 | */ | |
2524 | if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) | |
2525 | return 0; | |
2526 | return 1; | |
2527 | } | |
2528 | ||
d583fb87 AH |
2529 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
2530 | ext4_lblk_t end) | |
a86c6181 AT |
2531 | { |
2532 | struct super_block *sb = inode->i_sb; | |
2533 | int depth = ext_depth(inode); | |
2534 | struct ext4_ext_path *path; | |
2535 | handle_t *handle; | |
0617b83f | 2536 | int i, err; |
a86c6181 | 2537 | |
725d26d3 | 2538 | ext_debug("truncate since %u\n", start); |
a86c6181 AT |
2539 | |
2540 | /* probably first extent we're gonna free will be last in block */ | |
2541 | handle = ext4_journal_start(inode, depth + 1); | |
2542 | if (IS_ERR(handle)) | |
2543 | return PTR_ERR(handle); | |
2544 | ||
0617b83f | 2545 | again: |
a86c6181 AT |
2546 | ext4_ext_invalidate_cache(inode); |
2547 | ||
2548 | /* | |
d0d856e8 RD |
2549 | * We start scanning from right side, freeing all the blocks |
2550 | * after i_size and walking into the tree depth-wise. | |
a86c6181 | 2551 | */ |
0617b83f | 2552 | depth = ext_depth(inode); |
216553c4 | 2553 | path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); |
a86c6181 AT |
2554 | if (path == NULL) { |
2555 | ext4_journal_stop(handle); | |
2556 | return -ENOMEM; | |
2557 | } | |
0617b83f | 2558 | path[0].p_depth = depth; |
a86c6181 | 2559 | path[0].p_hdr = ext_inode_hdr(inode); |
56b19868 | 2560 | if (ext4_ext_check(inode, path[0].p_hdr, depth)) { |
a86c6181 AT |
2561 | err = -EIO; |
2562 | goto out; | |
2563 | } | |
0617b83f | 2564 | i = err = 0; |
a86c6181 AT |
2565 | |
2566 | while (i >= 0 && err == 0) { | |
2567 | if (i == depth) { | |
2568 | /* this is leaf block */ | |
d583fb87 AH |
2569 | err = ext4_ext_rm_leaf(handle, inode, path, |
2570 | start, end); | |
d0d856e8 | 2571 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2572 | brelse(path[i].p_bh); |
2573 | path[i].p_bh = NULL; | |
2574 | i--; | |
2575 | continue; | |
2576 | } | |
2577 | ||
2578 | /* this is index block */ | |
2579 | if (!path[i].p_hdr) { | |
2580 | ext_debug("initialize header\n"); | |
2581 | path[i].p_hdr = ext_block_hdr(path[i].p_bh); | |
a86c6181 AT |
2582 | } |
2583 | ||
a86c6181 | 2584 | if (!path[i].p_idx) { |
d0d856e8 | 2585 | /* this level hasn't been touched yet */ |
a86c6181 AT |
2586 | path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
2587 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; | |
2588 | ext_debug("init index ptr: hdr 0x%p, num %d\n", | |
2589 | path[i].p_hdr, | |
2590 | le16_to_cpu(path[i].p_hdr->eh_entries)); | |
2591 | } else { | |
d0d856e8 | 2592 | /* we were already here, see at next index */ |
a86c6181 AT |
2593 | path[i].p_idx--; |
2594 | } | |
2595 | ||
2596 | ext_debug("level %d - index, first 0x%p, cur 0x%p\n", | |
2597 | i, EXT_FIRST_INDEX(path[i].p_hdr), | |
2598 | path[i].p_idx); | |
2599 | if (ext4_ext_more_to_rm(path + i)) { | |
c29c0ae7 | 2600 | struct buffer_head *bh; |
a86c6181 | 2601 | /* go to the next level */ |
2ae02107 | 2602 | ext_debug("move to level %d (block %llu)\n", |
bf89d16f | 2603 | i + 1, ext4_idx_pblock(path[i].p_idx)); |
a86c6181 | 2604 | memset(path + i + 1, 0, sizeof(*path)); |
bf89d16f | 2605 | bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); |
c29c0ae7 | 2606 | if (!bh) { |
a86c6181 AT |
2607 | /* should we reset i_size? */ |
2608 | err = -EIO; | |
2609 | break; | |
2610 | } | |
c29c0ae7 AT |
2611 | if (WARN_ON(i + 1 > depth)) { |
2612 | err = -EIO; | |
2613 | break; | |
2614 | } | |
56b19868 | 2615 | if (ext4_ext_check(inode, ext_block_hdr(bh), |
c29c0ae7 AT |
2616 | depth - i - 1)) { |
2617 | err = -EIO; | |
2618 | break; | |
2619 | } | |
2620 | path[i + 1].p_bh = bh; | |
a86c6181 | 2621 | |
d0d856e8 RD |
2622 | /* save actual number of indexes since this |
2623 | * number is changed at the next iteration */ | |
a86c6181 AT |
2624 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
2625 | i++; | |
2626 | } else { | |
d0d856e8 | 2627 | /* we finished processing this index, go up */ |
a86c6181 | 2628 | if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
d0d856e8 | 2629 | /* index is empty, remove it; |
a86c6181 AT |
2630 | * handle must be already prepared by the |
2631 | * truncatei_leaf() */ | |
2632 | err = ext4_ext_rm_idx(handle, inode, path + i); | |
2633 | } | |
d0d856e8 | 2634 | /* root level has p_bh == NULL, brelse() eats this */ |
a86c6181 AT |
2635 | brelse(path[i].p_bh); |
2636 | path[i].p_bh = NULL; | |
2637 | i--; | |
2638 | ext_debug("return to level %d\n", i); | |
2639 | } | |
2640 | } | |
2641 | ||
2642 | /* TODO: flexible tree reduction should be here */ | |
2643 | if (path->p_hdr->eh_entries == 0) { | |
2644 | /* | |
d0d856e8 RD |
2645 | * truncate to zero freed all the tree, |
2646 | * so we need to correct eh_depth | |
a86c6181 AT |
2647 | */ |
2648 | err = ext4_ext_get_access(handle, inode, path); | |
2649 | if (err == 0) { | |
2650 | ext_inode_hdr(inode)->eh_depth = 0; | |
2651 | ext_inode_hdr(inode)->eh_max = | |
55ad63bf | 2652 | cpu_to_le16(ext4_ext_space_root(inode, 0)); |
a86c6181 AT |
2653 | err = ext4_ext_dirty(handle, inode, path); |
2654 | } | |
2655 | } | |
2656 | out: | |
a86c6181 AT |
2657 | ext4_ext_drop_refs(path); |
2658 | kfree(path); | |
0617b83f DM |
2659 | if (err == -EAGAIN) |
2660 | goto again; | |
a86c6181 AT |
2661 | ext4_journal_stop(handle); |
2662 | ||
2663 | return err; | |
2664 | } | |
2665 | ||
2666 | /* | |
2667 | * called at mount time | |
2668 | */ | |
2669 | void ext4_ext_init(struct super_block *sb) | |
2670 | { | |
2671 | /* | |
2672 | * possible initialization would be here | |
2673 | */ | |
2674 | ||
83982b6f | 2675 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { |
90576c0b | 2676 | #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
4776004f | 2677 | printk(KERN_INFO "EXT4-fs: file extents enabled"); |
bbf2f9fb RD |
2678 | #ifdef AGGRESSIVE_TEST |
2679 | printk(", aggressive tests"); | |
a86c6181 AT |
2680 | #endif |
2681 | #ifdef CHECK_BINSEARCH | |
2682 | printk(", check binsearch"); | |
2683 | #endif | |
2684 | #ifdef EXTENTS_STATS | |
2685 | printk(", stats"); | |
2686 | #endif | |
2687 | printk("\n"); | |
90576c0b | 2688 | #endif |
a86c6181 AT |
2689 | #ifdef EXTENTS_STATS |
2690 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | |
2691 | EXT4_SB(sb)->s_ext_min = 1 << 30; | |
2692 | EXT4_SB(sb)->s_ext_max = 0; | |
2693 | #endif | |
2694 | } | |
2695 | } | |
2696 | ||
2697 | /* | |
2698 | * called at umount time | |
2699 | */ | |
2700 | void ext4_ext_release(struct super_block *sb) | |
2701 | { | |
83982b6f | 2702 | if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) |
a86c6181 AT |
2703 | return; |
2704 | ||
2705 | #ifdef EXTENTS_STATS | |
2706 | if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { | |
2707 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
2708 | printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", | |
2709 | sbi->s_ext_blocks, sbi->s_ext_extents, | |
2710 | sbi->s_ext_blocks / sbi->s_ext_extents); | |
2711 | printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", | |
2712 | sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); | |
2713 | } | |
2714 | #endif | |
2715 | } | |
2716 | ||
093a088b AK |
2717 | /* FIXME!! we need to try to merge to left or right after zero-out */ |
2718 | static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |
2719 | { | |
2407518d LC |
2720 | ext4_fsblk_t ee_pblock; |
2721 | unsigned int ee_len; | |
b720303d | 2722 | int ret; |
093a088b | 2723 | |
093a088b | 2724 | ee_len = ext4_ext_get_actual_len(ex); |
bf89d16f | 2725 | ee_pblock = ext4_ext_pblock(ex); |
b720303d | 2726 | |
a107e5a3 | 2727 | ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); |
2407518d LC |
2728 | if (ret > 0) |
2729 | ret = 0; | |
093a088b | 2730 | |
2407518d | 2731 | return ret; |
093a088b AK |
2732 | } |
2733 | ||
47ea3bb5 YY |
2734 | /* |
2735 | * used by extent splitting. | |
2736 | */ | |
2737 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | |
2738 | due to ENOSPC */ | |
2739 | #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ | |
2740 | #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ | |
2741 | ||
2742 | /* | |
2743 | * ext4_split_extent_at() splits an extent at given block. | |
2744 | * | |
2745 | * @handle: the journal handle | |
2746 | * @inode: the file inode | |
2747 | * @path: the path to the extent | |
2748 | * @split: the logical block where the extent is splitted. | |
2749 | * @split_flags: indicates if the extent could be zeroout if split fails, and | |
2750 | * the states(init or uninit) of new extents. | |
2751 | * @flags: flags used to insert new extent to extent tree. | |
2752 | * | |
2753 | * | |
2754 | * Splits extent [a, b] into two extents [a, @split) and [@split, b], states | |
2755 | * of which are deterimined by split_flag. | |
2756 | * | |
2757 | * There are two cases: | |
2758 | * a> the extent are splitted into two extent. | |
2759 | * b> split is not needed, and just mark the extent. | |
2760 | * | |
2761 | * return 0 on success. | |
2762 | */ | |
2763 | static int ext4_split_extent_at(handle_t *handle, | |
2764 | struct inode *inode, | |
2765 | struct ext4_ext_path *path, | |
2766 | ext4_lblk_t split, | |
2767 | int split_flag, | |
2768 | int flags) | |
2769 | { | |
2770 | ext4_fsblk_t newblock; | |
2771 | ext4_lblk_t ee_block; | |
2772 | struct ext4_extent *ex, newex, orig_ex; | |
2773 | struct ext4_extent *ex2 = NULL; | |
2774 | unsigned int ee_len, depth; | |
2775 | int err = 0; | |
2776 | ||
2777 | ext_debug("ext4_split_extents_at: inode %lu, logical" | |
2778 | "block %llu\n", inode->i_ino, (unsigned long long)split); | |
2779 | ||
2780 | ext4_ext_show_leaf(inode, path); | |
2781 | ||
2782 | depth = ext_depth(inode); | |
2783 | ex = path[depth].p_ext; | |
2784 | ee_block = le32_to_cpu(ex->ee_block); | |
2785 | ee_len = ext4_ext_get_actual_len(ex); | |
2786 | newblock = split - ee_block + ext4_ext_pblock(ex); | |
2787 | ||
2788 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | |
2789 | ||
2790 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2791 | if (err) | |
2792 | goto out; | |
2793 | ||
2794 | if (split == ee_block) { | |
2795 | /* | |
2796 | * case b: block @split is the block that the extent begins with | |
2797 | * then we just change the state of the extent, and splitting | |
2798 | * is not needed. | |
2799 | */ | |
2800 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | |
2801 | ext4_ext_mark_uninitialized(ex); | |
2802 | else | |
2803 | ext4_ext_mark_initialized(ex); | |
2804 | ||
2805 | if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) | |
2806 | ext4_ext_try_to_merge(inode, path, ex); | |
2807 | ||
2808 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2809 | goto out; | |
2810 | } | |
2811 | ||
2812 | /* case a */ | |
2813 | memcpy(&orig_ex, ex, sizeof(orig_ex)); | |
2814 | ex->ee_len = cpu_to_le16(split - ee_block); | |
2815 | if (split_flag & EXT4_EXT_MARK_UNINIT1) | |
2816 | ext4_ext_mark_uninitialized(ex); | |
2817 | ||
2818 | /* | |
2819 | * path may lead to new leaf, not to original leaf any more | |
2820 | * after ext4_ext_insert_extent() returns, | |
2821 | */ | |
2822 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2823 | if (err) | |
2824 | goto fix_extent_len; | |
2825 | ||
2826 | ex2 = &newex; | |
2827 | ex2->ee_block = cpu_to_le32(split); | |
2828 | ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); | |
2829 | ext4_ext_store_pblock(ex2, newblock); | |
2830 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | |
2831 | ext4_ext_mark_uninitialized(ex2); | |
2832 | ||
2833 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | |
2834 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | |
2835 | err = ext4_ext_zeroout(inode, &orig_ex); | |
2836 | if (err) | |
2837 | goto fix_extent_len; | |
2838 | /* update the extent length and mark as initialized */ | |
2839 | ex->ee_len = cpu_to_le32(ee_len); | |
2840 | ext4_ext_try_to_merge(inode, path, ex); | |
2841 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2842 | goto out; | |
2843 | } else if (err) | |
2844 | goto fix_extent_len; | |
2845 | ||
2846 | out: | |
2847 | ext4_ext_show_leaf(inode, path); | |
2848 | return err; | |
2849 | ||
2850 | fix_extent_len: | |
2851 | ex->ee_len = orig_ex.ee_len; | |
2852 | ext4_ext_dirty(handle, inode, path + depth); | |
2853 | return err; | |
2854 | } | |
2855 | ||
2856 | /* | |
2857 | * ext4_split_extents() splits an extent and mark extent which is covered | |
2858 | * by @map as split_flags indicates | |
2859 | * | |
2860 | * It may result in splitting the extent into multiple extents (upto three) | |
2861 | * There are three possibilities: | |
2862 | * a> There is no split required | |
2863 | * b> Splits in two extents: Split is happening at either end of the extent | |
2864 | * c> Splits in three extents: Somone is splitting in middle of the extent | |
2865 | * | |
2866 | */ | |
2867 | static int ext4_split_extent(handle_t *handle, | |
2868 | struct inode *inode, | |
2869 | struct ext4_ext_path *path, | |
2870 | struct ext4_map_blocks *map, | |
2871 | int split_flag, | |
2872 | int flags) | |
2873 | { | |
2874 | ext4_lblk_t ee_block; | |
2875 | struct ext4_extent *ex; | |
2876 | unsigned int ee_len, depth; | |
2877 | int err = 0; | |
2878 | int uninitialized; | |
2879 | int split_flag1, flags1; | |
2880 | ||
2881 | depth = ext_depth(inode); | |
2882 | ex = path[depth].p_ext; | |
2883 | ee_block = le32_to_cpu(ex->ee_block); | |
2884 | ee_len = ext4_ext_get_actual_len(ex); | |
2885 | uninitialized = ext4_ext_is_uninitialized(ex); | |
2886 | ||
2887 | if (map->m_lblk + map->m_len < ee_block + ee_len) { | |
2888 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? | |
2889 | EXT4_EXT_MAY_ZEROOUT : 0; | |
2890 | flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; | |
2891 | if (uninitialized) | |
2892 | split_flag1 |= EXT4_EXT_MARK_UNINIT1 | | |
2893 | EXT4_EXT_MARK_UNINIT2; | |
2894 | err = ext4_split_extent_at(handle, inode, path, | |
2895 | map->m_lblk + map->m_len, split_flag1, flags1); | |
93917411 YY |
2896 | if (err) |
2897 | goto out; | |
47ea3bb5 YY |
2898 | } |
2899 | ||
2900 | ext4_ext_drop_refs(path); | |
2901 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | |
2902 | if (IS_ERR(path)) | |
2903 | return PTR_ERR(path); | |
2904 | ||
2905 | if (map->m_lblk >= ee_block) { | |
2906 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? | |
2907 | EXT4_EXT_MAY_ZEROOUT : 0; | |
2908 | if (uninitialized) | |
2909 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; | |
2910 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | |
2911 | split_flag1 |= EXT4_EXT_MARK_UNINIT2; | |
2912 | err = ext4_split_extent_at(handle, inode, path, | |
2913 | map->m_lblk, split_flag1, flags); | |
2914 | if (err) | |
2915 | goto out; | |
2916 | } | |
2917 | ||
2918 | ext4_ext_show_leaf(inode, path); | |
2919 | out: | |
2920 | return err ? err : map->m_len; | |
2921 | } | |
2922 | ||
3977c965 | 2923 | #define EXT4_EXT_ZERO_LEN 7 |
56055d3a | 2924 | /* |
e35fd660 | 2925 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
56055d3a | 2926 | * to an uninitialized extent. It may result in splitting the uninitialized |
25985edc | 2927 | * extent into multiple extents (up to three - one initialized and two |
56055d3a AA |
2928 | * uninitialized). |
2929 | * There are three possibilities: | |
2930 | * a> There is no split required: Entire extent should be initialized | |
2931 | * b> Splits in two extents: Write is happening at either end of the extent | |
2932 | * c> Splits in three extents: Somone is writing in middle of the extent | |
2933 | */ | |
725d26d3 | 2934 | static int ext4_ext_convert_to_initialized(handle_t *handle, |
e35fd660 TT |
2935 | struct inode *inode, |
2936 | struct ext4_map_blocks *map, | |
2937 | struct ext4_ext_path *path) | |
56055d3a | 2938 | { |
667eff35 YY |
2939 | struct ext4_map_blocks split_map; |
2940 | struct ext4_extent zero_ex; | |
2941 | struct ext4_extent *ex; | |
21ca087a | 2942 | ext4_lblk_t ee_block, eof_block; |
725d26d3 | 2943 | unsigned int allocated, ee_len, depth; |
56055d3a | 2944 | int err = 0; |
667eff35 | 2945 | int split_flag = 0; |
21ca087a DM |
2946 | |
2947 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" | |
2948 | "block %llu, max_blocks %u\n", inode->i_ino, | |
e35fd660 | 2949 | (unsigned long long)map->m_lblk, map->m_len); |
21ca087a DM |
2950 | |
2951 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | |
2952 | inode->i_sb->s_blocksize_bits; | |
e35fd660 TT |
2953 | if (eof_block < map->m_lblk + map->m_len) |
2954 | eof_block = map->m_lblk + map->m_len; | |
56055d3a AA |
2955 | |
2956 | depth = ext_depth(inode); | |
56055d3a AA |
2957 | ex = path[depth].p_ext; |
2958 | ee_block = le32_to_cpu(ex->ee_block); | |
2959 | ee_len = ext4_ext_get_actual_len(ex); | |
e35fd660 | 2960 | allocated = ee_len - (map->m_lblk - ee_block); |
56055d3a | 2961 | |
667eff35 | 2962 | WARN_ON(map->m_lblk < ee_block); |
21ca087a DM |
2963 | /* |
2964 | * It is safe to convert extent to initialized via explicit | |
2965 | * zeroout only if extent is fully insde i_size or new_size. | |
2966 | */ | |
667eff35 | 2967 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
21ca087a | 2968 | |
3977c965 | 2969 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ |
667eff35 YY |
2970 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN && |
2971 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | |
2972 | err = ext4_ext_zeroout(inode, ex); | |
3977c965 | 2973 | if (err) |
d03856bd | 2974 | goto out; |
d03856bd AK |
2975 | |
2976 | err = ext4_ext_get_access(handle, inode, path + depth); | |
2977 | if (err) | |
2978 | goto out; | |
667eff35 YY |
2979 | ext4_ext_mark_initialized(ex); |
2980 | ext4_ext_try_to_merge(inode, path, ex); | |
2981 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2982 | goto out; | |
56055d3a | 2983 | } |
667eff35 | 2984 | |
56055d3a | 2985 | /* |
667eff35 YY |
2986 | * four cases: |
2987 | * 1. split the extent into three extents. | |
2988 | * 2. split the extent into two extents, zeroout the first half. | |
2989 | * 3. split the extent into two extents, zeroout the second half. | |
2990 | * 4. split the extent into two extents with out zeroout. | |
56055d3a | 2991 | */ |
667eff35 YY |
2992 | split_map.m_lblk = map->m_lblk; |
2993 | split_map.m_len = map->m_len; | |
2994 | ||
2995 | if (allocated > map->m_len) { | |
2996 | if (allocated <= EXT4_EXT_ZERO_LEN && | |
2997 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | |
2998 | /* case 3 */ | |
2999 | zero_ex.ee_block = | |
9b940f8e AH |
3000 | cpu_to_le32(map->m_lblk); |
3001 | zero_ex.ee_len = cpu_to_le16(allocated); | |
667eff35 YY |
3002 | ext4_ext_store_pblock(&zero_ex, |
3003 | ext4_ext_pblock(ex) + map->m_lblk - ee_block); | |
3004 | err = ext4_ext_zeroout(inode, &zero_ex); | |
56055d3a AA |
3005 | if (err) |
3006 | goto out; | |
667eff35 YY |
3007 | split_map.m_lblk = map->m_lblk; |
3008 | split_map.m_len = allocated; | |
3009 | } else if ((map->m_lblk - ee_block + map->m_len < | |
3010 | EXT4_EXT_ZERO_LEN) && | |
3011 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | |
3012 | /* case 2 */ | |
3013 | if (map->m_lblk != ee_block) { | |
3014 | zero_ex.ee_block = ex->ee_block; | |
3015 | zero_ex.ee_len = cpu_to_le16(map->m_lblk - | |
3016 | ee_block); | |
3017 | ext4_ext_store_pblock(&zero_ex, | |
3018 | ext4_ext_pblock(ex)); | |
3019 | err = ext4_ext_zeroout(inode, &zero_ex); | |
3020 | if (err) | |
3021 | goto out; | |
3022 | } | |
3023 | ||
667eff35 | 3024 | split_map.m_lblk = ee_block; |
9b940f8e AH |
3025 | split_map.m_len = map->m_lblk - ee_block + map->m_len; |
3026 | allocated = map->m_len; | |
56055d3a AA |
3027 | } |
3028 | } | |
667eff35 YY |
3029 | |
3030 | allocated = ext4_split_extent(handle, inode, path, | |
3031 | &split_map, split_flag, 0); | |
3032 | if (allocated < 0) | |
3033 | err = allocated; | |
3034 | ||
56055d3a AA |
3035 | out: |
3036 | return err ? err : allocated; | |
3037 | } | |
3038 | ||
0031462b | 3039 | /* |
e35fd660 | 3040 | * This function is called by ext4_ext_map_blocks() from |
0031462b MC |
3041 | * ext4_get_blocks_dio_write() when DIO to write |
3042 | * to an uninitialized extent. | |
3043 | * | |
fd018fe8 | 3044 | * Writing to an uninitialized extent may result in splitting the uninitialized |
b595076a | 3045 | * extent into multiple /initialized uninitialized extents (up to three) |
0031462b MC |
3046 | * There are three possibilities: |
3047 | * a> There is no split required: Entire extent should be uninitialized | |
3048 | * b> Splits in two extents: Write is happening at either end of the extent | |
3049 | * c> Splits in three extents: Somone is writing in middle of the extent | |
3050 | * | |
3051 | * One of more index blocks maybe needed if the extent tree grow after | |
b595076a | 3052 | * the uninitialized extent split. To prevent ENOSPC occur at the IO |
0031462b | 3053 | * complete, we need to split the uninitialized extent before DIO submit |
421f91d2 | 3054 | * the IO. The uninitialized extent called at this time will be split |
0031462b MC |
3055 | * into three uninitialized extent(at most). After IO complete, the part |
3056 | * being filled will be convert to initialized by the end_io callback function | |
3057 | * via ext4_convert_unwritten_extents(). | |
ba230c3f M |
3058 | * |
3059 | * Returns the size of uninitialized extent to be written on success. | |
0031462b MC |
3060 | */ |
3061 | static int ext4_split_unwritten_extents(handle_t *handle, | |
3062 | struct inode *inode, | |
e35fd660 | 3063 | struct ext4_map_blocks *map, |
0031462b | 3064 | struct ext4_ext_path *path, |
0031462b MC |
3065 | int flags) |
3066 | { | |
667eff35 YY |
3067 | ext4_lblk_t eof_block; |
3068 | ext4_lblk_t ee_block; | |
3069 | struct ext4_extent *ex; | |
3070 | unsigned int ee_len; | |
3071 | int split_flag = 0, depth; | |
21ca087a DM |
3072 | |
3073 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" | |
3074 | "block %llu, max_blocks %u\n", inode->i_ino, | |
e35fd660 | 3075 | (unsigned long long)map->m_lblk, map->m_len); |
21ca087a DM |
3076 | |
3077 | eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> | |
3078 | inode->i_sb->s_blocksize_bits; | |
e35fd660 TT |
3079 | if (eof_block < map->m_lblk + map->m_len) |
3080 | eof_block = map->m_lblk + map->m_len; | |
21ca087a DM |
3081 | /* |
3082 | * It is safe to convert extent to initialized via explicit | |
3083 | * zeroout only if extent is fully insde i_size or new_size. | |
3084 | */ | |
667eff35 YY |
3085 | depth = ext_depth(inode); |
3086 | ex = path[depth].p_ext; | |
3087 | ee_block = le32_to_cpu(ex->ee_block); | |
3088 | ee_len = ext4_ext_get_actual_len(ex); | |
0031462b | 3089 | |
667eff35 YY |
3090 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
3091 | split_flag |= EXT4_EXT_MARK_UNINIT2; | |
0031462b | 3092 | |
667eff35 YY |
3093 | flags |= EXT4_GET_BLOCKS_PRE_IO; |
3094 | return ext4_split_extent(handle, inode, path, map, split_flag, flags); | |
0031462b | 3095 | } |
197217a5 | 3096 | |
c7064ef1 | 3097 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
0031462b MC |
3098 | struct inode *inode, |
3099 | struct ext4_ext_path *path) | |
3100 | { | |
3101 | struct ext4_extent *ex; | |
3102 | struct ext4_extent_header *eh; | |
3103 | int depth; | |
3104 | int err = 0; | |
0031462b MC |
3105 | |
3106 | depth = ext_depth(inode); | |
3107 | eh = path[depth].p_hdr; | |
3108 | ex = path[depth].p_ext; | |
3109 | ||
197217a5 YY |
3110 | ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" |
3111 | "block %llu, max_blocks %u\n", inode->i_ino, | |
3112 | (unsigned long long)le32_to_cpu(ex->ee_block), | |
3113 | ext4_ext_get_actual_len(ex)); | |
3114 | ||
0031462b MC |
3115 | err = ext4_ext_get_access(handle, inode, path + depth); |
3116 | if (err) | |
3117 | goto out; | |
3118 | /* first mark the extent as initialized */ | |
3119 | ext4_ext_mark_initialized(ex); | |
3120 | ||
197217a5 YY |
3121 | /* note: ext4_ext_correct_indexes() isn't needed here because |
3122 | * borders are not changed | |
0031462b | 3123 | */ |
197217a5 YY |
3124 | ext4_ext_try_to_merge(inode, path, ex); |
3125 | ||
0031462b MC |
3126 | /* Mark modified extent as dirty */ |
3127 | err = ext4_ext_dirty(handle, inode, path + depth); | |
3128 | out: | |
3129 | ext4_ext_show_leaf(inode, path); | |
3130 | return err; | |
3131 | } | |
3132 | ||
515f41c3 AK |
3133 | static void unmap_underlying_metadata_blocks(struct block_device *bdev, |
3134 | sector_t block, int count) | |
3135 | { | |
3136 | int i; | |
3137 | for (i = 0; i < count; i++) | |
3138 | unmap_underlying_metadata(bdev, block + i); | |
3139 | } | |
3140 | ||
58590b06 TT |
3141 | /* |
3142 | * Handle EOFBLOCKS_FL flag, clearing it if necessary | |
3143 | */ | |
3144 | static int check_eofblocks_fl(handle_t *handle, struct inode *inode, | |
d002ebf1 | 3145 | ext4_lblk_t lblk, |
58590b06 TT |
3146 | struct ext4_ext_path *path, |
3147 | unsigned int len) | |
3148 | { | |
3149 | int i, depth; | |
3150 | struct ext4_extent_header *eh; | |
65922cb5 | 3151 | struct ext4_extent *last_ex; |
58590b06 TT |
3152 | |
3153 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) | |
3154 | return 0; | |
3155 | ||
3156 | depth = ext_depth(inode); | |
3157 | eh = path[depth].p_hdr; | |
58590b06 TT |
3158 | |
3159 | if (unlikely(!eh->eh_entries)) { | |
3160 | EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " | |
3161 | "EOFBLOCKS_FL set"); | |
3162 | return -EIO; | |
3163 | } | |
3164 | last_ex = EXT_LAST_EXTENT(eh); | |
3165 | /* | |
3166 | * We should clear the EOFBLOCKS_FL flag if we are writing the | |
3167 | * last block in the last extent in the file. We test this by | |
3168 | * first checking to see if the caller to | |
3169 | * ext4_ext_get_blocks() was interested in the last block (or | |
3170 | * a block beyond the last block) in the current extent. If | |
3171 | * this turns out to be false, we can bail out from this | |
3172 | * function immediately. | |
3173 | */ | |
d002ebf1 | 3174 | if (lblk + len < le32_to_cpu(last_ex->ee_block) + |
58590b06 TT |
3175 | ext4_ext_get_actual_len(last_ex)) |
3176 | return 0; | |
3177 | /* | |
3178 | * If the caller does appear to be planning to write at or | |
3179 | * beyond the end of the current extent, we then test to see | |
3180 | * if the current extent is the last extent in the file, by | |
3181 | * checking to make sure it was reached via the rightmost node | |
3182 | * at each level of the tree. | |
3183 | */ | |
3184 | for (i = depth-1; i >= 0; i--) | |
3185 | if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) | |
3186 | return 0; | |
3187 | ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); | |
3188 | return ext4_mark_inode_dirty(handle, inode); | |
3189 | } | |
3190 | ||
0031462b MC |
3191 | static int |
3192 | ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |
e35fd660 | 3193 | struct ext4_map_blocks *map, |
0031462b | 3194 | struct ext4_ext_path *path, int flags, |
e35fd660 | 3195 | unsigned int allocated, ext4_fsblk_t newblock) |
0031462b MC |
3196 | { |
3197 | int ret = 0; | |
3198 | int err = 0; | |
8d5d02e6 | 3199 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
0031462b MC |
3200 | |
3201 | ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" | |
3202 | "block %llu, max_blocks %u, flags %d, allocated %u", | |
e35fd660 | 3203 | inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, |
0031462b MC |
3204 | flags, allocated); |
3205 | ext4_ext_show_leaf(inode, path); | |
3206 | ||
c7064ef1 | 3207 | /* get_block() before submit the IO, split the extent */ |
744692dc | 3208 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
e35fd660 TT |
3209 | ret = ext4_split_unwritten_extents(handle, inode, map, |
3210 | path, flags); | |
5f524950 M |
3211 | /* |
3212 | * Flag the inode(non aio case) or end_io struct (aio case) | |
25985edc | 3213 | * that this IO needs to conversion to written when IO is |
5f524950 M |
3214 | * completed |
3215 | */ | |
e9e3bcec | 3216 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
bd2d0210 | 3217 | io->flag = EXT4_IO_END_UNWRITTEN; |
e9e3bcec ES |
3218 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3219 | } else | |
19f5fb7a | 3220 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
744692dc | 3221 | if (ext4_should_dioread_nolock(inode)) |
e35fd660 | 3222 | map->m_flags |= EXT4_MAP_UNINIT; |
0031462b MC |
3223 | goto out; |
3224 | } | |
c7064ef1 | 3225 | /* IO end_io complete, convert the filled extent to written */ |
744692dc | 3226 | if ((flags & EXT4_GET_BLOCKS_CONVERT)) { |
c7064ef1 | 3227 | ret = ext4_convert_unwritten_extents_endio(handle, inode, |
0031462b | 3228 | path); |
58590b06 | 3229 | if (ret >= 0) { |
b436b9be | 3230 | ext4_update_inode_fsync_trans(handle, inode, 1); |
d002ebf1 ES |
3231 | err = check_eofblocks_fl(handle, inode, map->m_lblk, |
3232 | path, map->m_len); | |
58590b06 TT |
3233 | } else |
3234 | err = ret; | |
0031462b MC |
3235 | goto out2; |
3236 | } | |
3237 | /* buffered IO case */ | |
3238 | /* | |
3239 | * repeat fallocate creation request | |
3240 | * we already have an unwritten extent | |
3241 | */ | |
3242 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) | |
3243 | goto map_out; | |
3244 | ||
3245 | /* buffered READ or buffered write_begin() lookup */ | |
3246 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | |
3247 | /* | |
3248 | * We have blocks reserved already. We | |
3249 | * return allocated blocks so that delalloc | |
3250 | * won't do block reservation for us. But | |
3251 | * the buffer head will be unmapped so that | |
3252 | * a read from the block returns 0s. | |
3253 | */ | |
e35fd660 | 3254 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
0031462b MC |
3255 | goto out1; |
3256 | } | |
3257 | ||
3258 | /* buffered write, writepage time, convert*/ | |
e35fd660 | 3259 | ret = ext4_ext_convert_to_initialized(handle, inode, map, path); |
58590b06 | 3260 | if (ret >= 0) { |
b436b9be | 3261 | ext4_update_inode_fsync_trans(handle, inode, 1); |
d002ebf1 ES |
3262 | err = check_eofblocks_fl(handle, inode, map->m_lblk, path, |
3263 | map->m_len); | |
58590b06 TT |
3264 | if (err < 0) |
3265 | goto out2; | |
3266 | } | |
3267 | ||
0031462b MC |
3268 | out: |
3269 | if (ret <= 0) { | |
3270 | err = ret; | |
3271 | goto out2; | |
3272 | } else | |
3273 | allocated = ret; | |
e35fd660 | 3274 | map->m_flags |= EXT4_MAP_NEW; |
515f41c3 AK |
3275 | /* |
3276 | * if we allocated more blocks than requested | |
3277 | * we need to make sure we unmap the extra block | |
3278 | * allocated. The actual needed block will get | |
3279 | * unmapped later when we find the buffer_head marked | |
3280 | * new. | |
3281 | */ | |
e35fd660 | 3282 | if (allocated > map->m_len) { |
515f41c3 | 3283 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, |
e35fd660 TT |
3284 | newblock + map->m_len, |
3285 | allocated - map->m_len); | |
3286 | allocated = map->m_len; | |
515f41c3 | 3287 | } |
5f634d06 AK |
3288 | |
3289 | /* | |
3290 | * If we have done fallocate with the offset that is already | |
3291 | * delayed allocated, we would have block reservation | |
3292 | * and quota reservation done in the delayed write path. | |
3293 | * But fallocate would have already updated quota and block | |
3294 | * count for this offset. So cancel these reservation | |
3295 | */ | |
1296cc85 | 3296 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
5f634d06 AK |
3297 | ext4_da_update_reserve_space(inode, allocated, 0); |
3298 | ||
0031462b | 3299 | map_out: |
e35fd660 | 3300 | map->m_flags |= EXT4_MAP_MAPPED; |
0031462b | 3301 | out1: |
e35fd660 TT |
3302 | if (allocated > map->m_len) |
3303 | allocated = map->m_len; | |
0031462b | 3304 | ext4_ext_show_leaf(inode, path); |
e35fd660 TT |
3305 | map->m_pblk = newblock; |
3306 | map->m_len = allocated; | |
0031462b MC |
3307 | out2: |
3308 | if (path) { | |
3309 | ext4_ext_drop_refs(path); | |
3310 | kfree(path); | |
3311 | } | |
3312 | return err ? err : allocated; | |
3313 | } | |
58590b06 | 3314 | |
c278bfec | 3315 | /* |
f5ab0d1f MC |
3316 | * Block allocation/map/preallocation routine for extents based files |
3317 | * | |
3318 | * | |
c278bfec | 3319 | * Need to be called with |
0e855ac8 AK |
3320 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
3321 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | |
f5ab0d1f MC |
3322 | * |
3323 | * return > 0, number of of blocks already mapped/allocated | |
3324 | * if create == 0 and these are pre-allocated blocks | |
3325 | * buffer head is unmapped | |
3326 | * otherwise blocks are mapped | |
3327 | * | |
3328 | * return = 0, if plain look up failed (blocks have not been allocated) | |
3329 | * buffer head is unmapped | |
3330 | * | |
3331 | * return < 0, error case. | |
c278bfec | 3332 | */ |
e35fd660 TT |
3333 | int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
3334 | struct ext4_map_blocks *map, int flags) | |
a86c6181 AT |
3335 | { |
3336 | struct ext4_ext_path *path = NULL; | |
58590b06 | 3337 | struct ext4_extent newex, *ex; |
0562e0ba | 3338 | ext4_fsblk_t newblock = 0; |
b05e6ae5 | 3339 | int err = 0, depth, ret; |
498e5f24 | 3340 | unsigned int allocated = 0; |
e861304b AH |
3341 | unsigned int punched_out = 0; |
3342 | unsigned int result = 0; | |
c9de560d | 3343 | struct ext4_allocation_request ar; |
8d5d02e6 | 3344 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
e861304b | 3345 | struct ext4_map_blocks punch_map; |
a86c6181 | 3346 | |
84fe3bef | 3347 | ext_debug("blocks %u/%u requested for inode %lu\n", |
e35fd660 | 3348 | map->m_lblk, map->m_len, inode->i_ino); |
0562e0ba | 3349 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
a86c6181 AT |
3350 | |
3351 | /* check in cache */ | |
e861304b AH |
3352 | if (ext4_ext_in_cache(inode, map->m_lblk, &newex) && |
3353 | ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) { | |
b05e6ae5 | 3354 | if (!newex.ee_start_lo && !newex.ee_start_hi) { |
c2177057 | 3355 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
56055d3a AA |
3356 | /* |
3357 | * block isn't allocated yet and | |
3358 | * user doesn't want to allocate it | |
3359 | */ | |
a86c6181 AT |
3360 | goto out2; |
3361 | } | |
3362 | /* we should allocate requested block */ | |
b05e6ae5 | 3363 | } else { |
a86c6181 | 3364 | /* block is already allocated */ |
e35fd660 | 3365 | newblock = map->m_lblk |
8c55e204 | 3366 | - le32_to_cpu(newex.ee_block) |
bf89d16f | 3367 | + ext4_ext_pblock(&newex); |
d0d856e8 | 3368 | /* number of remaining blocks in the extent */ |
b939e376 | 3369 | allocated = ext4_ext_get_actual_len(&newex) - |
e35fd660 | 3370 | (map->m_lblk - le32_to_cpu(newex.ee_block)); |
a86c6181 | 3371 | goto out; |
a86c6181 AT |
3372 | } |
3373 | } | |
3374 | ||
3375 | /* find extent for this block */ | |
e35fd660 | 3376 | path = ext4_ext_find_extent(inode, map->m_lblk, NULL); |
a86c6181 AT |
3377 | if (IS_ERR(path)) { |
3378 | err = PTR_ERR(path); | |
3379 | path = NULL; | |
3380 | goto out2; | |
3381 | } | |
3382 | ||
3383 | depth = ext_depth(inode); | |
3384 | ||
3385 | /* | |
d0d856e8 RD |
3386 | * consistent leaf must not be empty; |
3387 | * this situation is possible, though, _during_ tree modification; | |
a86c6181 AT |
3388 | * this is why assert can't be put in ext4_ext_find_extent() |
3389 | */ | |
273df556 FM |
3390 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
3391 | EXT4_ERROR_INODE(inode, "bad extent address " | |
f70f362b TT |
3392 | "lblock: %lu, depth: %d pblock %lld", |
3393 | (unsigned long) map->m_lblk, depth, | |
3394 | path[depth].p_block); | |
034fb4c9 SP |
3395 | err = -EIO; |
3396 | goto out2; | |
3397 | } | |
a86c6181 | 3398 | |
7e028976 AM |
3399 | ex = path[depth].p_ext; |
3400 | if (ex) { | |
725d26d3 | 3401 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
bf89d16f | 3402 | ext4_fsblk_t ee_start = ext4_ext_pblock(ex); |
a2df2a63 | 3403 | unsigned short ee_len; |
471d4011 SB |
3404 | |
3405 | /* | |
471d4011 | 3406 | * Uninitialized extents are treated as holes, except that |
56055d3a | 3407 | * we split out initialized portions during a write. |
471d4011 | 3408 | */ |
a2df2a63 | 3409 | ee_len = ext4_ext_get_actual_len(ex); |
d0d856e8 | 3410 | /* if found extent covers block, simply return it */ |
e35fd660 TT |
3411 | if (in_range(map->m_lblk, ee_block, ee_len)) { |
3412 | newblock = map->m_lblk - ee_block + ee_start; | |
d0d856e8 | 3413 | /* number of remaining blocks in the extent */ |
e35fd660 TT |
3414 | allocated = ee_len - (map->m_lblk - ee_block); |
3415 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, | |
3416 | ee_block, ee_len, newblock); | |
56055d3a | 3417 | |
e861304b AH |
3418 | if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { |
3419 | /* | |
3420 | * Do not put uninitialized extent | |
3421 | * in the cache | |
3422 | */ | |
3423 | if (!ext4_ext_is_uninitialized(ex)) { | |
3424 | ext4_ext_put_in_cache(inode, ee_block, | |
3425 | ee_len, ee_start); | |
3426 | goto out; | |
3427 | } | |
3428 | ret = ext4_ext_handle_uninitialized_extents( | |
3429 | handle, inode, map, path, flags, | |
3430 | allocated, newblock); | |
3431 | return ret; | |
56055d3a | 3432 | } |
e861304b AH |
3433 | |
3434 | /* | |
3435 | * Punch out the map length, but only to the | |
3436 | * end of the extent | |
3437 | */ | |
3438 | punched_out = allocated < map->m_len ? | |
3439 | allocated : map->m_len; | |
3440 | ||
3441 | /* | |
3442 | * Sense extents need to be converted to | |
3443 | * uninitialized, they must fit in an | |
3444 | * uninitialized extent | |
3445 | */ | |
3446 | if (punched_out > EXT_UNINIT_MAX_LEN) | |
3447 | punched_out = EXT_UNINIT_MAX_LEN; | |
3448 | ||
3449 | punch_map.m_lblk = map->m_lblk; | |
3450 | punch_map.m_pblk = newblock; | |
3451 | punch_map.m_len = punched_out; | |
3452 | punch_map.m_flags = 0; | |
3453 | ||
3454 | /* Check to see if the extent needs to be split */ | |
3455 | if (punch_map.m_len != ee_len || | |
3456 | punch_map.m_lblk != ee_block) { | |
3457 | ||
3458 | ret = ext4_split_extent(handle, inode, | |
3459 | path, &punch_map, 0, | |
3460 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT | | |
3461 | EXT4_GET_BLOCKS_PRE_IO); | |
3462 | ||
3463 | if (ret < 0) { | |
3464 | err = ret; | |
3465 | goto out2; | |
3466 | } | |
3467 | /* | |
3468 | * find extent for the block at | |
3469 | * the start of the hole | |
3470 | */ | |
3471 | ext4_ext_drop_refs(path); | |
3472 | kfree(path); | |
3473 | ||
3474 | path = ext4_ext_find_extent(inode, | |
3475 | map->m_lblk, NULL); | |
3476 | if (IS_ERR(path)) { | |
3477 | err = PTR_ERR(path); | |
3478 | path = NULL; | |
3479 | goto out2; | |
3480 | } | |
3481 | ||
3482 | depth = ext_depth(inode); | |
3483 | ex = path[depth].p_ext; | |
3484 | ee_len = ext4_ext_get_actual_len(ex); | |
3485 | ee_block = le32_to_cpu(ex->ee_block); | |
3486 | ee_start = ext4_ext_pblock(ex); | |
3487 | ||
3488 | } | |
3489 | ||
3490 | ext4_ext_mark_uninitialized(ex); | |
3491 | ||
3492 | err = ext4_ext_remove_space(inode, map->m_lblk, | |
3493 | map->m_lblk + punched_out); | |
3494 | ||
3495 | goto out2; | |
a86c6181 AT |
3496 | } |
3497 | } | |
3498 | ||
3499 | /* | |
d0d856e8 | 3500 | * requested block isn't allocated yet; |
a86c6181 AT |
3501 | * we couldn't try to create block if create flag is zero |
3502 | */ | |
c2177057 | 3503 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
56055d3a AA |
3504 | /* |
3505 | * put just found gap into cache to speed up | |
3506 | * subsequent requests | |
3507 | */ | |
e35fd660 | 3508 | ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); |
a86c6181 AT |
3509 | goto out2; |
3510 | } | |
3511 | /* | |
c2ea3fde | 3512 | * Okay, we need to do block allocation. |
63f57933 | 3513 | */ |
a86c6181 | 3514 | |
c9de560d | 3515 | /* find neighbour allocated blocks */ |
e35fd660 | 3516 | ar.lleft = map->m_lblk; |
c9de560d AT |
3517 | err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); |
3518 | if (err) | |
3519 | goto out2; | |
e35fd660 | 3520 | ar.lright = map->m_lblk; |
c9de560d AT |
3521 | err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); |
3522 | if (err) | |
3523 | goto out2; | |
25d14f98 | 3524 | |
749269fa AA |
3525 | /* |
3526 | * See if request is beyond maximum number of blocks we can have in | |
3527 | * a single extent. For an initialized extent this limit is | |
3528 | * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is | |
3529 | * EXT_UNINIT_MAX_LEN. | |
3530 | */ | |
e35fd660 | 3531 | if (map->m_len > EXT_INIT_MAX_LEN && |
c2177057 | 3532 | !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
e35fd660 TT |
3533 | map->m_len = EXT_INIT_MAX_LEN; |
3534 | else if (map->m_len > EXT_UNINIT_MAX_LEN && | |
c2177057 | 3535 | (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) |
e35fd660 | 3536 | map->m_len = EXT_UNINIT_MAX_LEN; |
749269fa | 3537 | |
e35fd660 TT |
3538 | /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ |
3539 | newex.ee_block = cpu_to_le32(map->m_lblk); | |
3540 | newex.ee_len = cpu_to_le16(map->m_len); | |
25d14f98 AA |
3541 | err = ext4_ext_check_overlap(inode, &newex, path); |
3542 | if (err) | |
b939e376 | 3543 | allocated = ext4_ext_get_actual_len(&newex); |
25d14f98 | 3544 | else |
e35fd660 | 3545 | allocated = map->m_len; |
c9de560d AT |
3546 | |
3547 | /* allocate new block */ | |
3548 | ar.inode = inode; | |
e35fd660 TT |
3549 | ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); |
3550 | ar.logical = map->m_lblk; | |
c9de560d AT |
3551 | ar.len = allocated; |
3552 | if (S_ISREG(inode->i_mode)) | |
3553 | ar.flags = EXT4_MB_HINT_DATA; | |
3554 | else | |
3555 | /* disable in-core preallocation for non-regular files */ | |
3556 | ar.flags = 0; | |
3557 | newblock = ext4_mb_new_blocks(handle, &ar, &err); | |
a86c6181 AT |
3558 | if (!newblock) |
3559 | goto out2; | |
84fe3bef | 3560 | ext_debug("allocate new block: goal %llu, found %llu/%u\n", |
498e5f24 | 3561 | ar.goal, newblock, allocated); |
a86c6181 AT |
3562 | |
3563 | /* try to insert new extent into found leaf and return */ | |
f65e6fba | 3564 | ext4_ext_store_pblock(&newex, newblock); |
c9de560d | 3565 | newex.ee_len = cpu_to_le16(ar.len); |
8d5d02e6 MC |
3566 | /* Mark uninitialized */ |
3567 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ | |
a2df2a63 | 3568 | ext4_ext_mark_uninitialized(&newex); |
8d5d02e6 | 3569 | /* |
744692dc | 3570 | * io_end structure was created for every IO write to an |
25985edc | 3571 | * uninitialized extent. To avoid unnecessary conversion, |
744692dc | 3572 | * here we flag the IO that really needs the conversion. |
5f524950 | 3573 | * For non asycn direct IO case, flag the inode state |
25985edc | 3574 | * that we need to perform conversion when IO is done. |
8d5d02e6 | 3575 | */ |
744692dc | 3576 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
e9e3bcec | 3577 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
bd2d0210 | 3578 | io->flag = EXT4_IO_END_UNWRITTEN; |
e9e3bcec ES |
3579 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3580 | } else | |
19f5fb7a TT |
3581 | ext4_set_inode_state(inode, |
3582 | EXT4_STATE_DIO_UNWRITTEN); | |
5f524950 | 3583 | } |
744692dc | 3584 | if (ext4_should_dioread_nolock(inode)) |
e35fd660 | 3585 | map->m_flags |= EXT4_MAP_UNINIT; |
8d5d02e6 | 3586 | } |
c8d46e41 | 3587 | |
d002ebf1 | 3588 | err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); |
58590b06 TT |
3589 | if (err) |
3590 | goto out2; | |
3591 | ||
0031462b | 3592 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
315054f0 AT |
3593 | if (err) { |
3594 | /* free data blocks we just allocated */ | |
c9de560d AT |
3595 | /* not a good idea to call discard here directly, |
3596 | * but otherwise we'd need to call it every free() */ | |
c2ea3fde | 3597 | ext4_discard_preallocations(inode); |
7dc57615 | 3598 | ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), |
e6362609 | 3599 | ext4_ext_get_actual_len(&newex), 0); |
a86c6181 | 3600 | goto out2; |
315054f0 | 3601 | } |
a86c6181 | 3602 | |
a86c6181 | 3603 | /* previous routine could use block we allocated */ |
bf89d16f | 3604 | newblock = ext4_ext_pblock(&newex); |
b939e376 | 3605 | allocated = ext4_ext_get_actual_len(&newex); |
e35fd660 TT |
3606 | if (allocated > map->m_len) |
3607 | allocated = map->m_len; | |
3608 | map->m_flags |= EXT4_MAP_NEW; | |
a86c6181 | 3609 | |
5f634d06 AK |
3610 | /* |
3611 | * Update reserved blocks/metadata blocks after successful | |
3612 | * block allocation which had been deferred till now. | |
3613 | */ | |
1296cc85 | 3614 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
5f634d06 AK |
3615 | ext4_da_update_reserve_space(inode, allocated, 1); |
3616 | ||
b436b9be JK |
3617 | /* |
3618 | * Cache the extent and update transaction to commit on fdatasync only | |
3619 | * when it is _not_ an uninitialized extent. | |
3620 | */ | |
3621 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { | |
b05e6ae5 | 3622 | ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); |
b436b9be JK |
3623 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3624 | } else | |
3625 | ext4_update_inode_fsync_trans(handle, inode, 0); | |
a86c6181 | 3626 | out: |
e35fd660 TT |
3627 | if (allocated > map->m_len) |
3628 | allocated = map->m_len; | |
a86c6181 | 3629 | ext4_ext_show_leaf(inode, path); |
e35fd660 TT |
3630 | map->m_flags |= EXT4_MAP_MAPPED; |
3631 | map->m_pblk = newblock; | |
3632 | map->m_len = allocated; | |
a86c6181 AT |
3633 | out2: |
3634 | if (path) { | |
3635 | ext4_ext_drop_refs(path); | |
3636 | kfree(path); | |
3637 | } | |
0562e0ba JZ |
3638 | trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, |
3639 | newblock, map->m_len, err ? err : allocated); | |
e861304b AH |
3640 | |
3641 | result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? | |
3642 | punched_out : allocated; | |
3643 | ||
3644 | return err ? err : result; | |
a86c6181 AT |
3645 | } |
3646 | ||
cf108bca | 3647 | void ext4_ext_truncate(struct inode *inode) |
a86c6181 AT |
3648 | { |
3649 | struct address_space *mapping = inode->i_mapping; | |
3650 | struct super_block *sb = inode->i_sb; | |
725d26d3 | 3651 | ext4_lblk_t last_block; |
a86c6181 AT |
3652 | handle_t *handle; |
3653 | int err = 0; | |
3654 | ||
3889fd57 JZ |
3655 | /* |
3656 | * finish any pending end_io work so we won't run the risk of | |
3657 | * converting any truncated blocks to initialized later | |
3658 | */ | |
3659 | ext4_flush_completed_IO(inode); | |
3660 | ||
a86c6181 AT |
3661 | /* |
3662 | * probably first extent we're gonna free will be last in block | |
3663 | */ | |
f3bd1f3f | 3664 | err = ext4_writepage_trans_blocks(inode); |
a86c6181 | 3665 | handle = ext4_journal_start(inode, err); |
cf108bca | 3666 | if (IS_ERR(handle)) |
a86c6181 | 3667 | return; |
a86c6181 | 3668 | |
cf108bca JK |
3669 | if (inode->i_size & (sb->s_blocksize - 1)) |
3670 | ext4_block_truncate_page(handle, mapping, inode->i_size); | |
a86c6181 | 3671 | |
9ddfc3dc JK |
3672 | if (ext4_orphan_add(handle, inode)) |
3673 | goto out_stop; | |
3674 | ||
0e855ac8 | 3675 | down_write(&EXT4_I(inode)->i_data_sem); |
a86c6181 AT |
3676 | ext4_ext_invalidate_cache(inode); |
3677 | ||
c2ea3fde | 3678 | ext4_discard_preallocations(inode); |
c9de560d | 3679 | |
a86c6181 | 3680 | /* |
d0d856e8 RD |
3681 | * TODO: optimization is possible here. |
3682 | * Probably we need not scan at all, | |
3683 | * because page truncation is enough. | |
a86c6181 | 3684 | */ |
a86c6181 AT |
3685 | |
3686 | /* we have to know where to truncate from in crash case */ | |
3687 | EXT4_I(inode)->i_disksize = inode->i_size; | |
3688 | ext4_mark_inode_dirty(handle, inode); | |
3689 | ||
3690 | last_block = (inode->i_size + sb->s_blocksize - 1) | |
3691 | >> EXT4_BLOCK_SIZE_BITS(sb); | |
d583fb87 | 3692 | err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); |
a86c6181 AT |
3693 | |
3694 | /* In a multi-transaction truncate, we only make the final | |
56055d3a AA |
3695 | * transaction synchronous. |
3696 | */ | |
a86c6181 | 3697 | if (IS_SYNC(inode)) |
0390131b | 3698 | ext4_handle_sync(handle); |
a86c6181 | 3699 | |
9ddfc3dc | 3700 | up_write(&EXT4_I(inode)->i_data_sem); |
f6d2f6b3 EG |
3701 | |
3702 | out_stop: | |
a86c6181 | 3703 | /* |
d0d856e8 | 3704 | * If this was a simple ftruncate() and the file will remain alive, |
a86c6181 AT |
3705 | * then we need to clear up the orphan record which we created above. |
3706 | * However, if this was a real unlink then we were called by | |
3707 | * ext4_delete_inode(), and we allow that function to clean up the | |
3708 | * orphan info for us. | |
3709 | */ | |
3710 | if (inode->i_nlink) | |
3711 | ext4_orphan_del(handle, inode); | |
3712 | ||
ef737728 SR |
3713 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); |
3714 | ext4_mark_inode_dirty(handle, inode); | |
a86c6181 AT |
3715 | ext4_journal_stop(handle); |
3716 | } | |
3717 | ||
fd28784a AK |
3718 | static void ext4_falloc_update_inode(struct inode *inode, |
3719 | int mode, loff_t new_size, int update_ctime) | |
3720 | { | |
3721 | struct timespec now; | |
3722 | ||
3723 | if (update_ctime) { | |
3724 | now = current_fs_time(inode->i_sb); | |
3725 | if (!timespec_equal(&inode->i_ctime, &now)) | |
3726 | inode->i_ctime = now; | |
3727 | } | |
3728 | /* | |
3729 | * Update only when preallocation was requested beyond | |
3730 | * the file size. | |
3731 | */ | |
cf17fea6 AK |
3732 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
3733 | if (new_size > i_size_read(inode)) | |
3734 | i_size_write(inode, new_size); | |
3735 | if (new_size > EXT4_I(inode)->i_disksize) | |
3736 | ext4_update_i_disksize(inode, new_size); | |
c8d46e41 JZ |
3737 | } else { |
3738 | /* | |
3739 | * Mark that we allocate beyond EOF so the subsequent truncate | |
3740 | * can proceed even if the new size is the same as i_size. | |
3741 | */ | |
3742 | if (new_size > i_size_read(inode)) | |
12e9b892 | 3743 | ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
fd28784a AK |
3744 | } |
3745 | ||
3746 | } | |
3747 | ||
a2df2a63 | 3748 | /* |
2fe17c10 | 3749 | * preallocate space for a file. This implements ext4's fallocate file |
a2df2a63 AA |
3750 | * operation, which gets called from sys_fallocate system call. |
3751 | * For block-mapped files, posix_fallocate should fall back to the method | |
3752 | * of writing zeroes to the required new blocks (the same behavior which is | |
3753 | * expected for file systems which do not support fallocate() system call). | |
3754 | */ | |
2fe17c10 | 3755 | long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) |
a2df2a63 | 3756 | { |
2fe17c10 | 3757 | struct inode *inode = file->f_path.dentry->d_inode; |
a2df2a63 | 3758 | handle_t *handle; |
fd28784a | 3759 | loff_t new_size; |
498e5f24 | 3760 | unsigned int max_blocks; |
a2df2a63 AA |
3761 | int ret = 0; |
3762 | int ret2 = 0; | |
3763 | int retries = 0; | |
2ed88685 | 3764 | struct ext4_map_blocks map; |
a2df2a63 AA |
3765 | unsigned int credits, blkbits = inode->i_blkbits; |
3766 | ||
3767 | /* | |
3768 | * currently supporting (pre)allocate mode for extent-based | |
3769 | * files _only_ | |
3770 | */ | |
12e9b892 | 3771 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
a2df2a63 AA |
3772 | return -EOPNOTSUPP; |
3773 | ||
a4bb6b64 AH |
3774 | /* Return error if mode is not supported */ |
3775 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
3776 | return -EOPNOTSUPP; | |
3777 | ||
3778 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
3779 | return ext4_punch_hole(file, offset, len); | |
3780 | ||
0562e0ba | 3781 | trace_ext4_fallocate_enter(inode, offset, len, mode); |
2ed88685 | 3782 | map.m_lblk = offset >> blkbits; |
fd28784a AK |
3783 | /* |
3784 | * We can't just convert len to max_blocks because | |
3785 | * If blocksize = 4096 offset = 3072 and len = 2048 | |
3786 | */ | |
a2df2a63 | 3787 | max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) |
2ed88685 | 3788 | - map.m_lblk; |
a2df2a63 | 3789 | /* |
f3bd1f3f | 3790 | * credits to insert 1 extent into extent tree |
a2df2a63 | 3791 | */ |
f3bd1f3f | 3792 | credits = ext4_chunk_trans_blocks(inode, max_blocks); |
55bd725a | 3793 | mutex_lock(&inode->i_mutex); |
6d19c42b NK |
3794 | ret = inode_newsize_ok(inode, (len + offset)); |
3795 | if (ret) { | |
3796 | mutex_unlock(&inode->i_mutex); | |
0562e0ba | 3797 | trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); |
6d19c42b NK |
3798 | return ret; |
3799 | } | |
a2df2a63 AA |
3800 | retry: |
3801 | while (ret >= 0 && ret < max_blocks) { | |
2ed88685 TT |
3802 | map.m_lblk = map.m_lblk + ret; |
3803 | map.m_len = max_blocks = max_blocks - ret; | |
a2df2a63 AA |
3804 | handle = ext4_journal_start(inode, credits); |
3805 | if (IS_ERR(handle)) { | |
3806 | ret = PTR_ERR(handle); | |
3807 | break; | |
3808 | } | |
2ed88685 | 3809 | ret = ext4_map_blocks(handle, inode, &map, |
c2177057 | 3810 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); |
221879c9 | 3811 | if (ret <= 0) { |
2c98615d AK |
3812 | #ifdef EXT4FS_DEBUG |
3813 | WARN_ON(ret <= 0); | |
e35fd660 | 3814 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
2c98615d | 3815 | "returned error inode#%lu, block=%u, " |
9fd9784c | 3816 | "max_blocks=%u", __func__, |
a6371b63 | 3817 | inode->i_ino, map.m_lblk, max_blocks); |
2c98615d | 3818 | #endif |
a2df2a63 AA |
3819 | ext4_mark_inode_dirty(handle, inode); |
3820 | ret2 = ext4_journal_stop(handle); | |
3821 | break; | |
3822 | } | |
2ed88685 | 3823 | if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, |
fd28784a AK |
3824 | blkbits) >> blkbits)) |
3825 | new_size = offset + len; | |
3826 | else | |
2ed88685 | 3827 | new_size = (map.m_lblk + ret) << blkbits; |
a2df2a63 | 3828 | |
fd28784a | 3829 | ext4_falloc_update_inode(inode, mode, new_size, |
2ed88685 | 3830 | (map.m_flags & EXT4_MAP_NEW)); |
a2df2a63 AA |
3831 | ext4_mark_inode_dirty(handle, inode); |
3832 | ret2 = ext4_journal_stop(handle); | |
3833 | if (ret2) | |
3834 | break; | |
3835 | } | |
fd28784a AK |
3836 | if (ret == -ENOSPC && |
3837 | ext4_should_retry_alloc(inode->i_sb, &retries)) { | |
3838 | ret = 0; | |
a2df2a63 | 3839 | goto retry; |
a2df2a63 | 3840 | } |
55bd725a | 3841 | mutex_unlock(&inode->i_mutex); |
0562e0ba JZ |
3842 | trace_ext4_fallocate_exit(inode, offset, max_blocks, |
3843 | ret > 0 ? ret2 : ret); | |
a2df2a63 AA |
3844 | return ret > 0 ? ret2 : ret; |
3845 | } | |
6873fa0d | 3846 | |
0031462b MC |
3847 | /* |
3848 | * This function convert a range of blocks to written extents | |
3849 | * The caller of this function will pass the start offset and the size. | |
3850 | * all unwritten extents within this range will be converted to | |
3851 | * written extents. | |
3852 | * | |
3853 | * This function is called from the direct IO end io call back | |
3854 | * function, to convert the fallocated extents after IO is completed. | |
109f5565 | 3855 | * Returns 0 on success. |
0031462b MC |
3856 | */ |
3857 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |
a1de02dc | 3858 | ssize_t len) |
0031462b MC |
3859 | { |
3860 | handle_t *handle; | |
0031462b MC |
3861 | unsigned int max_blocks; |
3862 | int ret = 0; | |
3863 | int ret2 = 0; | |
2ed88685 | 3864 | struct ext4_map_blocks map; |
0031462b MC |
3865 | unsigned int credits, blkbits = inode->i_blkbits; |
3866 | ||
2ed88685 | 3867 | map.m_lblk = offset >> blkbits; |
0031462b MC |
3868 | /* |
3869 | * We can't just convert len to max_blocks because | |
3870 | * If blocksize = 4096 offset = 3072 and len = 2048 | |
3871 | */ | |
2ed88685 TT |
3872 | max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - |
3873 | map.m_lblk); | |
0031462b MC |
3874 | /* |
3875 | * credits to insert 1 extent into extent tree | |
3876 | */ | |
3877 | credits = ext4_chunk_trans_blocks(inode, max_blocks); | |
3878 | while (ret >= 0 && ret < max_blocks) { | |
2ed88685 TT |
3879 | map.m_lblk += ret; |
3880 | map.m_len = (max_blocks -= ret); | |
0031462b MC |
3881 | handle = ext4_journal_start(inode, credits); |
3882 | if (IS_ERR(handle)) { | |
3883 | ret = PTR_ERR(handle); | |
3884 | break; | |
3885 | } | |
2ed88685 | 3886 | ret = ext4_map_blocks(handle, inode, &map, |
c7064ef1 | 3887 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
0031462b MC |
3888 | if (ret <= 0) { |
3889 | WARN_ON(ret <= 0); | |
e35fd660 | 3890 | printk(KERN_ERR "%s: ext4_ext_map_blocks " |
0031462b MC |
3891 | "returned error inode#%lu, block=%u, " |
3892 | "max_blocks=%u", __func__, | |
2ed88685 | 3893 | inode->i_ino, map.m_lblk, map.m_len); |
0031462b MC |
3894 | } |
3895 | ext4_mark_inode_dirty(handle, inode); | |
3896 | ret2 = ext4_journal_stop(handle); | |
3897 | if (ret <= 0 || ret2 ) | |
3898 | break; | |
3899 | } | |
3900 | return ret > 0 ? ret2 : ret; | |
3901 | } | |
6d9c85eb | 3902 | |
6873fa0d ES |
3903 | /* |
3904 | * Callback function called for each extent to gather FIEMAP information. | |
3905 | */ | |
3a06d778 | 3906 | static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, |
6873fa0d ES |
3907 | struct ext4_ext_cache *newex, struct ext4_extent *ex, |
3908 | void *data) | |
3909 | { | |
6873fa0d ES |
3910 | __u64 logical; |
3911 | __u64 physical; | |
3912 | __u64 length; | |
6d9c85eb | 3913 | loff_t size; |
6873fa0d | 3914 | __u32 flags = 0; |
6d9c85eb YY |
3915 | int ret = 0; |
3916 | struct fiemap_extent_info *fieinfo = data; | |
3917 | unsigned char blksize_bits; | |
6873fa0d | 3918 | |
6d9c85eb YY |
3919 | blksize_bits = inode->i_sb->s_blocksize_bits; |
3920 | logical = (__u64)newex->ec_block << blksize_bits; | |
6873fa0d | 3921 | |
b05e6ae5 | 3922 | if (newex->ec_start == 0) { |
6d9c85eb YY |
3923 | /* |
3924 | * No extent in extent-tree contains block @newex->ec_start, | |
3925 | * then the block may stay in 1)a hole or 2)delayed-extent. | |
3926 | * | |
3927 | * Holes or delayed-extents are processed as follows. | |
3928 | * 1. lookup dirty pages with specified range in pagecache. | |
3929 | * If no page is got, then there is no delayed-extent and | |
3930 | * return with EXT_CONTINUE. | |
3931 | * 2. find the 1st mapped buffer, | |
3932 | * 3. check if the mapped buffer is both in the request range | |
3933 | * and a delayed buffer. If not, there is no delayed-extent, | |
3934 | * then return. | |
3935 | * 4. a delayed-extent is found, the extent will be collected. | |
3936 | */ | |
3937 | ext4_lblk_t end = 0; | |
3938 | pgoff_t last_offset; | |
3939 | pgoff_t offset; | |
3940 | pgoff_t index; | |
b221349f | 3941 | pgoff_t start_index = 0; |
6d9c85eb | 3942 | struct page **pages = NULL; |
6873fa0d | 3943 | struct buffer_head *bh = NULL; |
6d9c85eb YY |
3944 | struct buffer_head *head = NULL; |
3945 | unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); | |
3946 | ||
3947 | pages = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
3948 | if (pages == NULL) | |
3949 | return -ENOMEM; | |
6873fa0d ES |
3950 | |
3951 | offset = logical >> PAGE_SHIFT; | |
6d9c85eb YY |
3952 | repeat: |
3953 | last_offset = offset; | |
3954 | head = NULL; | |
3955 | ret = find_get_pages_tag(inode->i_mapping, &offset, | |
3956 | PAGECACHE_TAG_DIRTY, nr_pages, pages); | |
3957 | ||
3958 | if (!(flags & FIEMAP_EXTENT_DELALLOC)) { | |
3959 | /* First time, try to find a mapped buffer. */ | |
3960 | if (ret == 0) { | |
3961 | out: | |
3962 | for (index = 0; index < ret; index++) | |
3963 | page_cache_release(pages[index]); | |
3964 | /* just a hole. */ | |
3965 | kfree(pages); | |
3966 | return EXT_CONTINUE; | |
3967 | } | |
b221349f | 3968 | index = 0; |
6873fa0d | 3969 | |
b221349f | 3970 | next_page: |
6d9c85eb | 3971 | /* Try to find the 1st mapped buffer. */ |
b221349f | 3972 | end = ((__u64)pages[index]->index << PAGE_SHIFT) >> |
6d9c85eb | 3973 | blksize_bits; |
b221349f | 3974 | if (!page_has_buffers(pages[index])) |
6d9c85eb | 3975 | goto out; |
b221349f | 3976 | head = page_buffers(pages[index]); |
6d9c85eb YY |
3977 | if (!head) |
3978 | goto out; | |
6873fa0d | 3979 | |
b221349f | 3980 | index++; |
6d9c85eb YY |
3981 | bh = head; |
3982 | do { | |
b221349f YY |
3983 | if (end >= newex->ec_block + |
3984 | newex->ec_len) | |
3985 | /* The buffer is out of | |
3986 | * the request range. | |
3987 | */ | |
3988 | goto out; | |
3989 | ||
3990 | if (buffer_mapped(bh) && | |
3991 | end >= newex->ec_block) { | |
3992 | start_index = index - 1; | |
6d9c85eb | 3993 | /* get the 1st mapped buffer. */ |
6d9c85eb YY |
3994 | goto found_mapped_buffer; |
3995 | } | |
b221349f | 3996 | |
6d9c85eb YY |
3997 | bh = bh->b_this_page; |
3998 | end++; | |
3999 | } while (bh != head); | |
6873fa0d | 4000 | |
b221349f YY |
4001 | /* No mapped buffer in the range found in this page, |
4002 | * We need to look up next page. | |
4003 | */ | |
4004 | if (index >= ret) { | |
4005 | /* There is no page left, but we need to limit | |
4006 | * newex->ec_len. | |
4007 | */ | |
4008 | newex->ec_len = end - newex->ec_block; | |
4009 | goto out; | |
4010 | } | |
4011 | goto next_page; | |
6873fa0d | 4012 | } else { |
6d9c85eb YY |
4013 | /*Find contiguous delayed buffers. */ |
4014 | if (ret > 0 && pages[0]->index == last_offset) | |
4015 | head = page_buffers(pages[0]); | |
4016 | bh = head; | |
b221349f YY |
4017 | index = 1; |
4018 | start_index = 0; | |
6873fa0d | 4019 | } |
6d9c85eb YY |
4020 | |
4021 | found_mapped_buffer: | |
4022 | if (bh != NULL && buffer_delay(bh)) { | |
4023 | /* 1st or contiguous delayed buffer found. */ | |
4024 | if (!(flags & FIEMAP_EXTENT_DELALLOC)) { | |
4025 | /* | |
4026 | * 1st delayed buffer found, record | |
4027 | * the start of extent. | |
4028 | */ | |
4029 | flags |= FIEMAP_EXTENT_DELALLOC; | |
4030 | newex->ec_block = end; | |
4031 | logical = (__u64)end << blksize_bits; | |
4032 | } | |
4033 | /* Find contiguous delayed buffers. */ | |
4034 | do { | |
4035 | if (!buffer_delay(bh)) | |
4036 | goto found_delayed_extent; | |
4037 | bh = bh->b_this_page; | |
4038 | end++; | |
4039 | } while (bh != head); | |
4040 | ||
b221349f | 4041 | for (; index < ret; index++) { |
6d9c85eb YY |
4042 | if (!page_has_buffers(pages[index])) { |
4043 | bh = NULL; | |
4044 | break; | |
4045 | } | |
4046 | head = page_buffers(pages[index]); | |
4047 | if (!head) { | |
4048 | bh = NULL; | |
4049 | break; | |
4050 | } | |
b221349f | 4051 | |
6d9c85eb | 4052 | if (pages[index]->index != |
b221349f YY |
4053 | pages[start_index]->index + index |
4054 | - start_index) { | |
6d9c85eb YY |
4055 | /* Blocks are not contiguous. */ |
4056 | bh = NULL; | |
4057 | break; | |
4058 | } | |
4059 | bh = head; | |
4060 | do { | |
4061 | if (!buffer_delay(bh)) | |
4062 | /* Delayed-extent ends. */ | |
4063 | goto found_delayed_extent; | |
4064 | bh = bh->b_this_page; | |
4065 | end++; | |
4066 | } while (bh != head); | |
4067 | } | |
4068 | } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) | |
4069 | /* a hole found. */ | |
4070 | goto out; | |
4071 | ||
4072 | found_delayed_extent: | |
4073 | newex->ec_len = min(end - newex->ec_block, | |
4074 | (ext4_lblk_t)EXT_INIT_MAX_LEN); | |
4075 | if (ret == nr_pages && bh != NULL && | |
4076 | newex->ec_len < EXT_INIT_MAX_LEN && | |
4077 | buffer_delay(bh)) { | |
4078 | /* Have not collected an extent and continue. */ | |
4079 | for (index = 0; index < ret; index++) | |
4080 | page_cache_release(pages[index]); | |
4081 | goto repeat; | |
6873fa0d | 4082 | } |
6d9c85eb YY |
4083 | |
4084 | for (index = 0; index < ret; index++) | |
4085 | page_cache_release(pages[index]); | |
4086 | kfree(pages); | |
6873fa0d ES |
4087 | } |
4088 | ||
4089 | physical = (__u64)newex->ec_start << blksize_bits; | |
4090 | length = (__u64)newex->ec_len << blksize_bits; | |
4091 | ||
4092 | if (ex && ext4_ext_is_uninitialized(ex)) | |
4093 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
4094 | ||
6d9c85eb YY |
4095 | size = i_size_read(inode); |
4096 | if (logical + length >= size) | |
6873fa0d ES |
4097 | flags |= FIEMAP_EXTENT_LAST; |
4098 | ||
6d9c85eb | 4099 | ret = fiemap_fill_next_extent(fieinfo, logical, physical, |
6873fa0d | 4100 | length, flags); |
6d9c85eb YY |
4101 | if (ret < 0) |
4102 | return ret; | |
4103 | if (ret == 1) | |
6873fa0d | 4104 | return EXT_BREAK; |
6873fa0d ES |
4105 | return EXT_CONTINUE; |
4106 | } | |
4107 | ||
4108 | /* fiemap flags we can handle specified here */ | |
4109 | #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) | |
4110 | ||
3a06d778 AK |
4111 | static int ext4_xattr_fiemap(struct inode *inode, |
4112 | struct fiemap_extent_info *fieinfo) | |
6873fa0d ES |
4113 | { |
4114 | __u64 physical = 0; | |
4115 | __u64 length; | |
4116 | __u32 flags = FIEMAP_EXTENT_LAST; | |
4117 | int blockbits = inode->i_sb->s_blocksize_bits; | |
4118 | int error = 0; | |
4119 | ||
4120 | /* in-inode? */ | |
19f5fb7a | 4121 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
6873fa0d ES |
4122 | struct ext4_iloc iloc; |
4123 | int offset; /* offset of xattr in inode */ | |
4124 | ||
4125 | error = ext4_get_inode_loc(inode, &iloc); | |
4126 | if (error) | |
4127 | return error; | |
4128 | physical = iloc.bh->b_blocknr << blockbits; | |
4129 | offset = EXT4_GOOD_OLD_INODE_SIZE + | |
4130 | EXT4_I(inode)->i_extra_isize; | |
4131 | physical += offset; | |
4132 | length = EXT4_SB(inode->i_sb)->s_inode_size - offset; | |
4133 | flags |= FIEMAP_EXTENT_DATA_INLINE; | |
fd2dd9fb | 4134 | brelse(iloc.bh); |
6873fa0d ES |
4135 | } else { /* external block */ |
4136 | physical = EXT4_I(inode)->i_file_acl << blockbits; | |
4137 | length = inode->i_sb->s_blocksize; | |
4138 | } | |
4139 | ||
4140 | if (physical) | |
4141 | error = fiemap_fill_next_extent(fieinfo, 0, physical, | |
4142 | length, flags); | |
4143 | return (error < 0 ? error : 0); | |
4144 | } | |
4145 | ||
a4bb6b64 AH |
4146 | /* |
4147 | * ext4_ext_punch_hole | |
4148 | * | |
4149 | * Punches a hole of "length" bytes in a file starting | |
4150 | * at byte "offset" | |
4151 | * | |
4152 | * @inode: The inode of the file to punch a hole in | |
4153 | * @offset: The starting byte offset of the hole | |
4154 | * @length: The length of the hole | |
4155 | * | |
4156 | * Returns the number of blocks removed or negative on err | |
4157 | */ | |
4158 | int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | |
4159 | { | |
4160 | struct inode *inode = file->f_path.dentry->d_inode; | |
4161 | struct super_block *sb = inode->i_sb; | |
4162 | struct ext4_ext_cache cache_ex; | |
4163 | ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks; | |
4164 | struct address_space *mapping = inode->i_mapping; | |
4165 | struct ext4_map_blocks map; | |
4166 | handle_t *handle; | |
4167 | loff_t first_block_offset, last_block_offset, block_len; | |
4168 | loff_t first_page, last_page, first_page_offset, last_page_offset; | |
4169 | int ret, credits, blocks_released, err = 0; | |
4170 | ||
4171 | first_block = (offset + sb->s_blocksize - 1) >> | |
4172 | EXT4_BLOCK_SIZE_BITS(sb); | |
4173 | last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); | |
4174 | ||
4175 | first_block_offset = first_block << EXT4_BLOCK_SIZE_BITS(sb); | |
4176 | last_block_offset = last_block << EXT4_BLOCK_SIZE_BITS(sb); | |
4177 | ||
4178 | first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
4179 | last_page = (offset + length) >> PAGE_CACHE_SHIFT; | |
4180 | ||
4181 | first_page_offset = first_page << PAGE_CACHE_SHIFT; | |
4182 | last_page_offset = last_page << PAGE_CACHE_SHIFT; | |
4183 | ||
4184 | /* | |
4185 | * Write out all dirty pages to avoid race conditions | |
4186 | * Then release them. | |
4187 | */ | |
4188 | if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | |
4189 | err = filemap_write_and_wait_range(mapping, | |
4190 | first_page_offset == 0 ? 0 : first_page_offset-1, | |
4191 | last_page_offset); | |
4192 | ||
4193 | if (err) | |
4194 | return err; | |
4195 | } | |
4196 | ||
4197 | /* Now release the pages */ | |
4198 | if (last_page_offset > first_page_offset) { | |
4199 | truncate_inode_pages_range(mapping, first_page_offset, | |
4200 | last_page_offset-1); | |
4201 | } | |
4202 | ||
4203 | /* finish any pending end_io work */ | |
4204 | ext4_flush_completed_IO(inode); | |
4205 | ||
4206 | credits = ext4_writepage_trans_blocks(inode); | |
4207 | handle = ext4_journal_start(inode, credits); | |
4208 | if (IS_ERR(handle)) | |
4209 | return PTR_ERR(handle); | |
4210 | ||
4211 | err = ext4_orphan_add(handle, inode); | |
4212 | if (err) | |
4213 | goto out; | |
4214 | ||
4215 | /* | |
4216 | * Now we need to zero out the un block aligned data. | |
4217 | * If the file is smaller than a block, just | |
4218 | * zero out the middle | |
4219 | */ | |
4220 | if (first_block > last_block) | |
4221 | ext4_block_zero_page_range(handle, mapping, offset, length); | |
4222 | else { | |
4223 | /* zero out the head of the hole before the first block */ | |
4224 | block_len = first_block_offset - offset; | |
4225 | if (block_len > 0) | |
4226 | ext4_block_zero_page_range(handle, mapping, | |
4227 | offset, block_len); | |
4228 | ||
4229 | /* zero out the tail of the hole after the last block */ | |
4230 | block_len = offset + length - last_block_offset; | |
4231 | if (block_len > 0) { | |
4232 | ext4_block_zero_page_range(handle, mapping, | |
4233 | last_block_offset, block_len); | |
4234 | } | |
4235 | } | |
4236 | ||
4237 | /* If there are no blocks to remove, return now */ | |
4238 | if (first_block >= last_block) | |
4239 | goto out; | |
4240 | ||
4241 | down_write(&EXT4_I(inode)->i_data_sem); | |
4242 | ext4_ext_invalidate_cache(inode); | |
4243 | ext4_discard_preallocations(inode); | |
4244 | ||
4245 | /* | |
4246 | * Loop over all the blocks and identify blocks | |
4247 | * that need to be punched out | |
4248 | */ | |
4249 | iblock = first_block; | |
4250 | blocks_released = 0; | |
4251 | while (iblock < last_block) { | |
4252 | max_blocks = last_block - iblock; | |
4253 | num_blocks = 1; | |
4254 | memset(&map, 0, sizeof(map)); | |
4255 | map.m_lblk = iblock; | |
4256 | map.m_len = max_blocks; | |
4257 | ret = ext4_ext_map_blocks(handle, inode, &map, | |
4258 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT); | |
4259 | ||
4260 | if (ret > 0) { | |
4261 | blocks_released += ret; | |
4262 | num_blocks = ret; | |
4263 | } else if (ret == 0) { | |
4264 | /* | |
4265 | * If map blocks could not find the block, | |
4266 | * then it is in a hole. If the hole was | |
4267 | * not already cached, then map blocks should | |
4268 | * put it in the cache. So we can get the hole | |
4269 | * out of the cache | |
4270 | */ | |
4271 | memset(&cache_ex, 0, sizeof(cache_ex)); | |
4272 | if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) && | |
4273 | !cache_ex.ec_start) { | |
4274 | ||
4275 | /* The hole is cached */ | |
4276 | num_blocks = cache_ex.ec_block + | |
4277 | cache_ex.ec_len - iblock; | |
4278 | ||
4279 | } else { | |
4280 | /* The block could not be identified */ | |
4281 | err = -EIO; | |
4282 | break; | |
4283 | } | |
4284 | } else { | |
4285 | /* Map blocks error */ | |
4286 | err = ret; | |
4287 | break; | |
4288 | } | |
4289 | ||
4290 | if (num_blocks == 0) { | |
4291 | /* This condition should never happen */ | |
4292 | ext_debug("Block lookup failed"); | |
4293 | err = -EIO; | |
4294 | break; | |
4295 | } | |
4296 | ||
4297 | iblock += num_blocks; | |
4298 | } | |
4299 | ||
4300 | if (blocks_released > 0) { | |
4301 | ext4_ext_invalidate_cache(inode); | |
4302 | ext4_discard_preallocations(inode); | |
4303 | } | |
4304 | ||
4305 | if (IS_SYNC(inode)) | |
4306 | ext4_handle_sync(handle); | |
4307 | ||
4308 | up_write(&EXT4_I(inode)->i_data_sem); | |
4309 | ||
4310 | out: | |
4311 | ext4_orphan_del(handle, inode); | |
4312 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); | |
4313 | ext4_mark_inode_dirty(handle, inode); | |
4314 | ext4_journal_stop(handle); | |
4315 | return err; | |
4316 | } | |
6873fa0d ES |
4317 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
4318 | __u64 start, __u64 len) | |
4319 | { | |
4320 | ext4_lblk_t start_blk; | |
6873fa0d ES |
4321 | int error = 0; |
4322 | ||
4323 | /* fallback to generic here if not in extents fmt */ | |
12e9b892 | 4324 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
6873fa0d ES |
4325 | return generic_block_fiemap(inode, fieinfo, start, len, |
4326 | ext4_get_block); | |
4327 | ||
4328 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) | |
4329 | return -EBADR; | |
4330 | ||
4331 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { | |
4332 | error = ext4_xattr_fiemap(inode, fieinfo); | |
4333 | } else { | |
aca92ff6 LM |
4334 | ext4_lblk_t len_blks; |
4335 | __u64 last_blk; | |
4336 | ||
6873fa0d | 4337 | start_blk = start >> inode->i_sb->s_blocksize_bits; |
aca92ff6 LM |
4338 | last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; |
4339 | if (last_blk >= EXT_MAX_BLOCK) | |
4340 | last_blk = EXT_MAX_BLOCK-1; | |
4341 | len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; | |
6873fa0d ES |
4342 | |
4343 | /* | |
4344 | * Walk the extent tree gathering extent information. | |
4345 | * ext4_ext_fiemap_cb will push extents back to user. | |
4346 | */ | |
6873fa0d ES |
4347 | error = ext4_ext_walk_space(inode, start_blk, len_blks, |
4348 | ext4_ext_fiemap_cb, fieinfo); | |
6873fa0d ES |
4349 | } |
4350 | ||
4351 | return error; | |
4352 | } |