]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ext4/extents.c
ext4: fix kernel BUG on large-scale rm -rf commands
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / extents.c
1 /*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
21 */
22
23 /*
24 * Extents support for EXT4
25 *
26 * TODO:
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
30 */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44
45 #include <trace/events/ext4.h>
46
47 /*
48 * used by extent splitting.
49 */
50 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
51 due to ENOSPC */
52 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
53 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
54
55 static __le32 ext4_extent_block_csum(struct inode *inode,
56 struct ext4_extent_header *eh)
57 {
58 struct ext4_inode_info *ei = EXT4_I(inode);
59 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
60 __u32 csum;
61
62 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
63 EXT4_EXTENT_TAIL_OFFSET(eh));
64 return cpu_to_le32(csum);
65 }
66
67 static int ext4_extent_block_csum_verify(struct inode *inode,
68 struct ext4_extent_header *eh)
69 {
70 struct ext4_extent_tail *et;
71
72 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
73 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
74 return 1;
75
76 et = find_ext4_extent_tail(eh);
77 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
78 return 0;
79 return 1;
80 }
81
82 static void ext4_extent_block_csum_set(struct inode *inode,
83 struct ext4_extent_header *eh)
84 {
85 struct ext4_extent_tail *et;
86
87 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
88 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
89 return;
90
91 et = find_ext4_extent_tail(eh);
92 et->et_checksum = ext4_extent_block_csum(inode, eh);
93 }
94
95 static int ext4_split_extent(handle_t *handle,
96 struct inode *inode,
97 struct ext4_ext_path *path,
98 struct ext4_map_blocks *map,
99 int split_flag,
100 int flags);
101
102 static int ext4_split_extent_at(handle_t *handle,
103 struct inode *inode,
104 struct ext4_ext_path *path,
105 ext4_lblk_t split,
106 int split_flag,
107 int flags);
108
109 static int ext4_ext_truncate_extend_restart(handle_t *handle,
110 struct inode *inode,
111 int needed)
112 {
113 int err;
114
115 if (!ext4_handle_valid(handle))
116 return 0;
117 if (handle->h_buffer_credits > needed)
118 return 0;
119 err = ext4_journal_extend(handle, needed);
120 if (err <= 0)
121 return err;
122 err = ext4_truncate_restart_trans(handle, inode, needed);
123 if (err == 0)
124 err = -EAGAIN;
125
126 return err;
127 }
128
129 /*
130 * could return:
131 * - EROFS
132 * - ENOMEM
133 */
134 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
135 struct ext4_ext_path *path)
136 {
137 if (path->p_bh) {
138 /* path points to block */
139 return ext4_journal_get_write_access(handle, path->p_bh);
140 }
141 /* path points to leaf/index in inode body */
142 /* we use in-core data, no need to protect them */
143 return 0;
144 }
145
146 /*
147 * could return:
148 * - EROFS
149 * - ENOMEM
150 * - EIO
151 */
152 #define ext4_ext_dirty(handle, inode, path) \
153 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
154 static int __ext4_ext_dirty(const char *where, unsigned int line,
155 handle_t *handle, struct inode *inode,
156 struct ext4_ext_path *path)
157 {
158 int err;
159 if (path->p_bh) {
160 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
161 /* path points to block */
162 err = __ext4_handle_dirty_metadata(where, line, handle,
163 inode, path->p_bh);
164 } else {
165 /* path points to leaf/index in inode body */
166 err = ext4_mark_inode_dirty(handle, inode);
167 }
168 return err;
169 }
170
171 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
172 struct ext4_ext_path *path,
173 ext4_lblk_t block)
174 {
175 if (path) {
176 int depth = path->p_depth;
177 struct ext4_extent *ex;
178
179 /*
180 * Try to predict block placement assuming that we are
181 * filling in a file which will eventually be
182 * non-sparse --- i.e., in the case of libbfd writing
183 * an ELF object sections out-of-order but in a way
184 * the eventually results in a contiguous object or
185 * executable file, or some database extending a table
186 * space file. However, this is actually somewhat
187 * non-ideal if we are writing a sparse file such as
188 * qemu or KVM writing a raw image file that is going
189 * to stay fairly sparse, since it will end up
190 * fragmenting the file system's free space. Maybe we
191 * should have some hueristics or some way to allow
192 * userspace to pass a hint to file system,
193 * especially if the latter case turns out to be
194 * common.
195 */
196 ex = path[depth].p_ext;
197 if (ex) {
198 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
199 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
200
201 if (block > ext_block)
202 return ext_pblk + (block - ext_block);
203 else
204 return ext_pblk - (ext_block - block);
205 }
206
207 /* it looks like index is empty;
208 * try to find starting block from index itself */
209 if (path[depth].p_bh)
210 return path[depth].p_bh->b_blocknr;
211 }
212
213 /* OK. use inode's group */
214 return ext4_inode_to_goal_block(inode);
215 }
216
217 /*
218 * Allocation for a meta data block
219 */
220 static ext4_fsblk_t
221 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
222 struct ext4_ext_path *path,
223 struct ext4_extent *ex, int *err, unsigned int flags)
224 {
225 ext4_fsblk_t goal, newblock;
226
227 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
228 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
229 NULL, err);
230 return newblock;
231 }
232
233 static inline int ext4_ext_space_block(struct inode *inode, int check)
234 {
235 int size;
236
237 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
238 / sizeof(struct ext4_extent);
239 #ifdef AGGRESSIVE_TEST
240 if (!check && size > 6)
241 size = 6;
242 #endif
243 return size;
244 }
245
246 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
247 {
248 int size;
249
250 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
251 / sizeof(struct ext4_extent_idx);
252 #ifdef AGGRESSIVE_TEST
253 if (!check && size > 5)
254 size = 5;
255 #endif
256 return size;
257 }
258
259 static inline int ext4_ext_space_root(struct inode *inode, int check)
260 {
261 int size;
262
263 size = sizeof(EXT4_I(inode)->i_data);
264 size -= sizeof(struct ext4_extent_header);
265 size /= sizeof(struct ext4_extent);
266 #ifdef AGGRESSIVE_TEST
267 if (!check && size > 3)
268 size = 3;
269 #endif
270 return size;
271 }
272
273 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
274 {
275 int size;
276
277 size = sizeof(EXT4_I(inode)->i_data);
278 size -= sizeof(struct ext4_extent_header);
279 size /= sizeof(struct ext4_extent_idx);
280 #ifdef AGGRESSIVE_TEST
281 if (!check && size > 4)
282 size = 4;
283 #endif
284 return size;
285 }
286
287 /*
288 * Calculate the number of metadata blocks needed
289 * to allocate @blocks
290 * Worse case is one block per extent
291 */
292 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
293 {
294 struct ext4_inode_info *ei = EXT4_I(inode);
295 int idxs;
296
297 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
298 / sizeof(struct ext4_extent_idx));
299
300 /*
301 * If the new delayed allocation block is contiguous with the
302 * previous da block, it can share index blocks with the
303 * previous block, so we only need to allocate a new index
304 * block every idxs leaf blocks. At ldxs**2 blocks, we need
305 * an additional index block, and at ldxs**3 blocks, yet
306 * another index blocks.
307 */
308 if (ei->i_da_metadata_calc_len &&
309 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
310 int num = 0;
311
312 if ((ei->i_da_metadata_calc_len % idxs) == 0)
313 num++;
314 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
315 num++;
316 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
317 num++;
318 ei->i_da_metadata_calc_len = 0;
319 } else
320 ei->i_da_metadata_calc_len++;
321 ei->i_da_metadata_calc_last_lblock++;
322 return num;
323 }
324
325 /*
326 * In the worst case we need a new set of index blocks at
327 * every level of the inode's extent tree.
328 */
329 ei->i_da_metadata_calc_len = 1;
330 ei->i_da_metadata_calc_last_lblock = lblock;
331 return ext_depth(inode) + 1;
332 }
333
334 static int
335 ext4_ext_max_entries(struct inode *inode, int depth)
336 {
337 int max;
338
339 if (depth == ext_depth(inode)) {
340 if (depth == 0)
341 max = ext4_ext_space_root(inode, 1);
342 else
343 max = ext4_ext_space_root_idx(inode, 1);
344 } else {
345 if (depth == 0)
346 max = ext4_ext_space_block(inode, 1);
347 else
348 max = ext4_ext_space_block_idx(inode, 1);
349 }
350
351 return max;
352 }
353
354 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
355 {
356 ext4_fsblk_t block = ext4_ext_pblock(ext);
357 int len = ext4_ext_get_actual_len(ext);
358
359 if (len == 0)
360 return 0;
361 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
362 }
363
364 static int ext4_valid_extent_idx(struct inode *inode,
365 struct ext4_extent_idx *ext_idx)
366 {
367 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
368
369 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
370 }
371
372 static int ext4_valid_extent_entries(struct inode *inode,
373 struct ext4_extent_header *eh,
374 int depth)
375 {
376 unsigned short entries;
377 if (eh->eh_entries == 0)
378 return 1;
379
380 entries = le16_to_cpu(eh->eh_entries);
381
382 if (depth == 0) {
383 /* leaf entries */
384 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
385 while (entries) {
386 if (!ext4_valid_extent(inode, ext))
387 return 0;
388 ext++;
389 entries--;
390 }
391 } else {
392 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
393 while (entries) {
394 if (!ext4_valid_extent_idx(inode, ext_idx))
395 return 0;
396 ext_idx++;
397 entries--;
398 }
399 }
400 return 1;
401 }
402
403 static int __ext4_ext_check(const char *function, unsigned int line,
404 struct inode *inode, struct ext4_extent_header *eh,
405 int depth)
406 {
407 const char *error_msg;
408 int max = 0;
409
410 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
411 error_msg = "invalid magic";
412 goto corrupted;
413 }
414 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
415 error_msg = "unexpected eh_depth";
416 goto corrupted;
417 }
418 if (unlikely(eh->eh_max == 0)) {
419 error_msg = "invalid eh_max";
420 goto corrupted;
421 }
422 max = ext4_ext_max_entries(inode, depth);
423 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
424 error_msg = "too large eh_max";
425 goto corrupted;
426 }
427 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
428 error_msg = "invalid eh_entries";
429 goto corrupted;
430 }
431 if (!ext4_valid_extent_entries(inode, eh, depth)) {
432 error_msg = "invalid extent entries";
433 goto corrupted;
434 }
435 /* Verify checksum on non-root extent tree nodes */
436 if (ext_depth(inode) != depth &&
437 !ext4_extent_block_csum_verify(inode, eh)) {
438 error_msg = "extent tree corrupted";
439 goto corrupted;
440 }
441 return 0;
442
443 corrupted:
444 ext4_error_inode(inode, function, line, 0,
445 "bad header/extent: %s - magic %x, "
446 "entries %u, max %u(%u), depth %u(%u)",
447 error_msg, le16_to_cpu(eh->eh_magic),
448 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
449 max, le16_to_cpu(eh->eh_depth), depth);
450
451 return -EIO;
452 }
453
454 #define ext4_ext_check(inode, eh, depth) \
455 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
456
457 int ext4_ext_check_inode(struct inode *inode)
458 {
459 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
460 }
461
462 static int __ext4_ext_check_block(const char *function, unsigned int line,
463 struct inode *inode,
464 struct ext4_extent_header *eh,
465 int depth,
466 struct buffer_head *bh)
467 {
468 int ret;
469
470 if (buffer_verified(bh))
471 return 0;
472 ret = ext4_ext_check(inode, eh, depth);
473 if (ret)
474 return ret;
475 set_buffer_verified(bh);
476 return ret;
477 }
478
479 #define ext4_ext_check_block(inode, eh, depth, bh) \
480 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
481
482 #ifdef EXT_DEBUG
483 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
484 {
485 int k, l = path->p_depth;
486
487 ext_debug("path:");
488 for (k = 0; k <= l; k++, path++) {
489 if (path->p_idx) {
490 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
491 ext4_idx_pblock(path->p_idx));
492 } else if (path->p_ext) {
493 ext_debug(" %d:[%d]%d:%llu ",
494 le32_to_cpu(path->p_ext->ee_block),
495 ext4_ext_is_uninitialized(path->p_ext),
496 ext4_ext_get_actual_len(path->p_ext),
497 ext4_ext_pblock(path->p_ext));
498 } else
499 ext_debug(" []");
500 }
501 ext_debug("\n");
502 }
503
504 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
505 {
506 int depth = ext_depth(inode);
507 struct ext4_extent_header *eh;
508 struct ext4_extent *ex;
509 int i;
510
511 if (!path)
512 return;
513
514 eh = path[depth].p_hdr;
515 ex = EXT_FIRST_EXTENT(eh);
516
517 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
518
519 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
520 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
521 ext4_ext_is_uninitialized(ex),
522 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
523 }
524 ext_debug("\n");
525 }
526
527 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
528 ext4_fsblk_t newblock, int level)
529 {
530 int depth = ext_depth(inode);
531 struct ext4_extent *ex;
532
533 if (depth != level) {
534 struct ext4_extent_idx *idx;
535 idx = path[level].p_idx;
536 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
537 ext_debug("%d: move %d:%llu in new index %llu\n", level,
538 le32_to_cpu(idx->ei_block),
539 ext4_idx_pblock(idx),
540 newblock);
541 idx++;
542 }
543
544 return;
545 }
546
547 ex = path[depth].p_ext;
548 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
549 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
550 le32_to_cpu(ex->ee_block),
551 ext4_ext_pblock(ex),
552 ext4_ext_is_uninitialized(ex),
553 ext4_ext_get_actual_len(ex),
554 newblock);
555 ex++;
556 }
557 }
558
559 #else
560 #define ext4_ext_show_path(inode, path)
561 #define ext4_ext_show_leaf(inode, path)
562 #define ext4_ext_show_move(inode, path, newblock, level)
563 #endif
564
565 void ext4_ext_drop_refs(struct ext4_ext_path *path)
566 {
567 int depth = path->p_depth;
568 int i;
569
570 for (i = 0; i <= depth; i++, path++)
571 if (path->p_bh) {
572 brelse(path->p_bh);
573 path->p_bh = NULL;
574 }
575 }
576
577 /*
578 * ext4_ext_binsearch_idx:
579 * binary search for the closest index of the given block
580 * the header must be checked before calling this
581 */
582 static void
583 ext4_ext_binsearch_idx(struct inode *inode,
584 struct ext4_ext_path *path, ext4_lblk_t block)
585 {
586 struct ext4_extent_header *eh = path->p_hdr;
587 struct ext4_extent_idx *r, *l, *m;
588
589
590 ext_debug("binsearch for %u(idx): ", block);
591
592 l = EXT_FIRST_INDEX(eh) + 1;
593 r = EXT_LAST_INDEX(eh);
594 while (l <= r) {
595 m = l + (r - l) / 2;
596 if (block < le32_to_cpu(m->ei_block))
597 r = m - 1;
598 else
599 l = m + 1;
600 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
601 m, le32_to_cpu(m->ei_block),
602 r, le32_to_cpu(r->ei_block));
603 }
604
605 path->p_idx = l - 1;
606 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
607 ext4_idx_pblock(path->p_idx));
608
609 #ifdef CHECK_BINSEARCH
610 {
611 struct ext4_extent_idx *chix, *ix;
612 int k;
613
614 chix = ix = EXT_FIRST_INDEX(eh);
615 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
616 if (k != 0 &&
617 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
618 printk(KERN_DEBUG "k=%d, ix=0x%p, "
619 "first=0x%p\n", k,
620 ix, EXT_FIRST_INDEX(eh));
621 printk(KERN_DEBUG "%u <= %u\n",
622 le32_to_cpu(ix->ei_block),
623 le32_to_cpu(ix[-1].ei_block));
624 }
625 BUG_ON(k && le32_to_cpu(ix->ei_block)
626 <= le32_to_cpu(ix[-1].ei_block));
627 if (block < le32_to_cpu(ix->ei_block))
628 break;
629 chix = ix;
630 }
631 BUG_ON(chix != path->p_idx);
632 }
633 #endif
634
635 }
636
637 /*
638 * ext4_ext_binsearch:
639 * binary search for closest extent of the given block
640 * the header must be checked before calling this
641 */
642 static void
643 ext4_ext_binsearch(struct inode *inode,
644 struct ext4_ext_path *path, ext4_lblk_t block)
645 {
646 struct ext4_extent_header *eh = path->p_hdr;
647 struct ext4_extent *r, *l, *m;
648
649 if (eh->eh_entries == 0) {
650 /*
651 * this leaf is empty:
652 * we get such a leaf in split/add case
653 */
654 return;
655 }
656
657 ext_debug("binsearch for %u: ", block);
658
659 l = EXT_FIRST_EXTENT(eh) + 1;
660 r = EXT_LAST_EXTENT(eh);
661
662 while (l <= r) {
663 m = l + (r - l) / 2;
664 if (block < le32_to_cpu(m->ee_block))
665 r = m - 1;
666 else
667 l = m + 1;
668 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
669 m, le32_to_cpu(m->ee_block),
670 r, le32_to_cpu(r->ee_block));
671 }
672
673 path->p_ext = l - 1;
674 ext_debug(" -> %d:%llu:[%d]%d ",
675 le32_to_cpu(path->p_ext->ee_block),
676 ext4_ext_pblock(path->p_ext),
677 ext4_ext_is_uninitialized(path->p_ext),
678 ext4_ext_get_actual_len(path->p_ext));
679
680 #ifdef CHECK_BINSEARCH
681 {
682 struct ext4_extent *chex, *ex;
683 int k;
684
685 chex = ex = EXT_FIRST_EXTENT(eh);
686 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
687 BUG_ON(k && le32_to_cpu(ex->ee_block)
688 <= le32_to_cpu(ex[-1].ee_block));
689 if (block < le32_to_cpu(ex->ee_block))
690 break;
691 chex = ex;
692 }
693 BUG_ON(chex != path->p_ext);
694 }
695 #endif
696
697 }
698
699 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
700 {
701 struct ext4_extent_header *eh;
702
703 eh = ext_inode_hdr(inode);
704 eh->eh_depth = 0;
705 eh->eh_entries = 0;
706 eh->eh_magic = EXT4_EXT_MAGIC;
707 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
708 ext4_mark_inode_dirty(handle, inode);
709 ext4_ext_invalidate_cache(inode);
710 return 0;
711 }
712
713 struct ext4_ext_path *
714 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
715 struct ext4_ext_path *path)
716 {
717 struct ext4_extent_header *eh;
718 struct buffer_head *bh;
719 short int depth, i, ppos = 0, alloc = 0;
720
721 eh = ext_inode_hdr(inode);
722 depth = ext_depth(inode);
723
724 /* account possible depth increase */
725 if (!path) {
726 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
727 GFP_NOFS);
728 if (!path)
729 return ERR_PTR(-ENOMEM);
730 alloc = 1;
731 }
732 path[0].p_hdr = eh;
733 path[0].p_bh = NULL;
734
735 i = depth;
736 /* walk through the tree */
737 while (i) {
738 ext_debug("depth %d: num %d, max %d\n",
739 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
740
741 ext4_ext_binsearch_idx(inode, path + ppos, block);
742 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
743 path[ppos].p_depth = i;
744 path[ppos].p_ext = NULL;
745
746 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
747 if (unlikely(!bh))
748 goto err;
749 if (!bh_uptodate_or_lock(bh)) {
750 trace_ext4_ext_load_extent(inode, block,
751 path[ppos].p_block);
752 if (bh_submit_read(bh) < 0) {
753 put_bh(bh);
754 goto err;
755 }
756 }
757 eh = ext_block_hdr(bh);
758 ppos++;
759 if (unlikely(ppos > depth)) {
760 put_bh(bh);
761 EXT4_ERROR_INODE(inode,
762 "ppos %d > depth %d", ppos, depth);
763 goto err;
764 }
765 path[ppos].p_bh = bh;
766 path[ppos].p_hdr = eh;
767 i--;
768
769 if (ext4_ext_check_block(inode, eh, i, bh))
770 goto err;
771 }
772
773 path[ppos].p_depth = i;
774 path[ppos].p_ext = NULL;
775 path[ppos].p_idx = NULL;
776
777 /* find extent */
778 ext4_ext_binsearch(inode, path + ppos, block);
779 /* if not an empty leaf */
780 if (path[ppos].p_ext)
781 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
782
783 ext4_ext_show_path(inode, path);
784
785 return path;
786
787 err:
788 ext4_ext_drop_refs(path);
789 if (alloc)
790 kfree(path);
791 return ERR_PTR(-EIO);
792 }
793
794 /*
795 * ext4_ext_insert_index:
796 * insert new index [@logical;@ptr] into the block at @curp;
797 * check where to insert: before @curp or after @curp
798 */
799 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
800 struct ext4_ext_path *curp,
801 int logical, ext4_fsblk_t ptr)
802 {
803 struct ext4_extent_idx *ix;
804 int len, err;
805
806 err = ext4_ext_get_access(handle, inode, curp);
807 if (err)
808 return err;
809
810 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
811 EXT4_ERROR_INODE(inode,
812 "logical %d == ei_block %d!",
813 logical, le32_to_cpu(curp->p_idx->ei_block));
814 return -EIO;
815 }
816
817 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
818 >= le16_to_cpu(curp->p_hdr->eh_max))) {
819 EXT4_ERROR_INODE(inode,
820 "eh_entries %d >= eh_max %d!",
821 le16_to_cpu(curp->p_hdr->eh_entries),
822 le16_to_cpu(curp->p_hdr->eh_max));
823 return -EIO;
824 }
825
826 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
827 /* insert after */
828 ext_debug("insert new index %d after: %llu\n", logical, ptr);
829 ix = curp->p_idx + 1;
830 } else {
831 /* insert before */
832 ext_debug("insert new index %d before: %llu\n", logical, ptr);
833 ix = curp->p_idx;
834 }
835
836 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
837 BUG_ON(len < 0);
838 if (len > 0) {
839 ext_debug("insert new index %d: "
840 "move %d indices from 0x%p to 0x%p\n",
841 logical, len, ix, ix + 1);
842 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
843 }
844
845 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
846 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
847 return -EIO;
848 }
849
850 ix->ei_block = cpu_to_le32(logical);
851 ext4_idx_store_pblock(ix, ptr);
852 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
853
854 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
855 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
856 return -EIO;
857 }
858
859 err = ext4_ext_dirty(handle, inode, curp);
860 ext4_std_error(inode->i_sb, err);
861
862 return err;
863 }
864
865 /*
866 * ext4_ext_split:
867 * inserts new subtree into the path, using free index entry
868 * at depth @at:
869 * - allocates all needed blocks (new leaf and all intermediate index blocks)
870 * - makes decision where to split
871 * - moves remaining extents and index entries (right to the split point)
872 * into the newly allocated blocks
873 * - initializes subtree
874 */
875 static int ext4_ext_split(handle_t *handle, struct inode *inode,
876 unsigned int flags,
877 struct ext4_ext_path *path,
878 struct ext4_extent *newext, int at)
879 {
880 struct buffer_head *bh = NULL;
881 int depth = ext_depth(inode);
882 struct ext4_extent_header *neh;
883 struct ext4_extent_idx *fidx;
884 int i = at, k, m, a;
885 ext4_fsblk_t newblock, oldblock;
886 __le32 border;
887 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
888 int err = 0;
889
890 /* make decision: where to split? */
891 /* FIXME: now decision is simplest: at current extent */
892
893 /* if current leaf will be split, then we should use
894 * border from split point */
895 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
896 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
897 return -EIO;
898 }
899 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
900 border = path[depth].p_ext[1].ee_block;
901 ext_debug("leaf will be split."
902 " next leaf starts at %d\n",
903 le32_to_cpu(border));
904 } else {
905 border = newext->ee_block;
906 ext_debug("leaf will be added."
907 " next leaf starts at %d\n",
908 le32_to_cpu(border));
909 }
910
911 /*
912 * If error occurs, then we break processing
913 * and mark filesystem read-only. index won't
914 * be inserted and tree will be in consistent
915 * state. Next mount will repair buffers too.
916 */
917
918 /*
919 * Get array to track all allocated blocks.
920 * We need this to handle errors and free blocks
921 * upon them.
922 */
923 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
924 if (!ablocks)
925 return -ENOMEM;
926
927 /* allocate all needed blocks */
928 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
929 for (a = 0; a < depth - at; a++) {
930 newblock = ext4_ext_new_meta_block(handle, inode, path,
931 newext, &err, flags);
932 if (newblock == 0)
933 goto cleanup;
934 ablocks[a] = newblock;
935 }
936
937 /* initialize new leaf */
938 newblock = ablocks[--a];
939 if (unlikely(newblock == 0)) {
940 EXT4_ERROR_INODE(inode, "newblock == 0!");
941 err = -EIO;
942 goto cleanup;
943 }
944 bh = sb_getblk(inode->i_sb, newblock);
945 if (!bh) {
946 err = -EIO;
947 goto cleanup;
948 }
949 lock_buffer(bh);
950
951 err = ext4_journal_get_create_access(handle, bh);
952 if (err)
953 goto cleanup;
954
955 neh = ext_block_hdr(bh);
956 neh->eh_entries = 0;
957 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
958 neh->eh_magic = EXT4_EXT_MAGIC;
959 neh->eh_depth = 0;
960
961 /* move remainder of path[depth] to the new leaf */
962 if (unlikely(path[depth].p_hdr->eh_entries !=
963 path[depth].p_hdr->eh_max)) {
964 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
965 path[depth].p_hdr->eh_entries,
966 path[depth].p_hdr->eh_max);
967 err = -EIO;
968 goto cleanup;
969 }
970 /* start copy from next extent */
971 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
972 ext4_ext_show_move(inode, path, newblock, depth);
973 if (m) {
974 struct ext4_extent *ex;
975 ex = EXT_FIRST_EXTENT(neh);
976 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
977 le16_add_cpu(&neh->eh_entries, m);
978 }
979
980 ext4_extent_block_csum_set(inode, neh);
981 set_buffer_uptodate(bh);
982 unlock_buffer(bh);
983
984 err = ext4_handle_dirty_metadata(handle, inode, bh);
985 if (err)
986 goto cleanup;
987 brelse(bh);
988 bh = NULL;
989
990 /* correct old leaf */
991 if (m) {
992 err = ext4_ext_get_access(handle, inode, path + depth);
993 if (err)
994 goto cleanup;
995 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
996 err = ext4_ext_dirty(handle, inode, path + depth);
997 if (err)
998 goto cleanup;
999
1000 }
1001
1002 /* create intermediate indexes */
1003 k = depth - at - 1;
1004 if (unlikely(k < 0)) {
1005 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1006 err = -EIO;
1007 goto cleanup;
1008 }
1009 if (k)
1010 ext_debug("create %d intermediate indices\n", k);
1011 /* insert new index into current index block */
1012 /* current depth stored in i var */
1013 i = depth - 1;
1014 while (k--) {
1015 oldblock = newblock;
1016 newblock = ablocks[--a];
1017 bh = sb_getblk(inode->i_sb, newblock);
1018 if (!bh) {
1019 err = -EIO;
1020 goto cleanup;
1021 }
1022 lock_buffer(bh);
1023
1024 err = ext4_journal_get_create_access(handle, bh);
1025 if (err)
1026 goto cleanup;
1027
1028 neh = ext_block_hdr(bh);
1029 neh->eh_entries = cpu_to_le16(1);
1030 neh->eh_magic = EXT4_EXT_MAGIC;
1031 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1032 neh->eh_depth = cpu_to_le16(depth - i);
1033 fidx = EXT_FIRST_INDEX(neh);
1034 fidx->ei_block = border;
1035 ext4_idx_store_pblock(fidx, oldblock);
1036
1037 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1038 i, newblock, le32_to_cpu(border), oldblock);
1039
1040 /* move remainder of path[i] to the new index block */
1041 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1042 EXT_LAST_INDEX(path[i].p_hdr))) {
1043 EXT4_ERROR_INODE(inode,
1044 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1045 le32_to_cpu(path[i].p_ext->ee_block));
1046 err = -EIO;
1047 goto cleanup;
1048 }
1049 /* start copy indexes */
1050 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1051 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1052 EXT_MAX_INDEX(path[i].p_hdr));
1053 ext4_ext_show_move(inode, path, newblock, i);
1054 if (m) {
1055 memmove(++fidx, path[i].p_idx,
1056 sizeof(struct ext4_extent_idx) * m);
1057 le16_add_cpu(&neh->eh_entries, m);
1058 }
1059 ext4_extent_block_csum_set(inode, neh);
1060 set_buffer_uptodate(bh);
1061 unlock_buffer(bh);
1062
1063 err = ext4_handle_dirty_metadata(handle, inode, bh);
1064 if (err)
1065 goto cleanup;
1066 brelse(bh);
1067 bh = NULL;
1068
1069 /* correct old index */
1070 if (m) {
1071 err = ext4_ext_get_access(handle, inode, path + i);
1072 if (err)
1073 goto cleanup;
1074 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1075 err = ext4_ext_dirty(handle, inode, path + i);
1076 if (err)
1077 goto cleanup;
1078 }
1079
1080 i--;
1081 }
1082
1083 /* insert new index */
1084 err = ext4_ext_insert_index(handle, inode, path + at,
1085 le32_to_cpu(border), newblock);
1086
1087 cleanup:
1088 if (bh) {
1089 if (buffer_locked(bh))
1090 unlock_buffer(bh);
1091 brelse(bh);
1092 }
1093
1094 if (err) {
1095 /* free all allocated blocks in error case */
1096 for (i = 0; i < depth; i++) {
1097 if (!ablocks[i])
1098 continue;
1099 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1100 EXT4_FREE_BLOCKS_METADATA);
1101 }
1102 }
1103 kfree(ablocks);
1104
1105 return err;
1106 }
1107
1108 /*
1109 * ext4_ext_grow_indepth:
1110 * implements tree growing procedure:
1111 * - allocates new block
1112 * - moves top-level data (index block or leaf) into the new block
1113 * - initializes new top-level, creating index that points to the
1114 * just created block
1115 */
1116 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1117 unsigned int flags,
1118 struct ext4_extent *newext)
1119 {
1120 struct ext4_extent_header *neh;
1121 struct buffer_head *bh;
1122 ext4_fsblk_t newblock;
1123 int err = 0;
1124
1125 newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1126 newext, &err, flags);
1127 if (newblock == 0)
1128 return err;
1129
1130 bh = sb_getblk(inode->i_sb, newblock);
1131 if (!bh) {
1132 err = -EIO;
1133 ext4_std_error(inode->i_sb, err);
1134 return err;
1135 }
1136 lock_buffer(bh);
1137
1138 err = ext4_journal_get_create_access(handle, bh);
1139 if (err) {
1140 unlock_buffer(bh);
1141 goto out;
1142 }
1143
1144 /* move top-level index/leaf into new block */
1145 memmove(bh->b_data, EXT4_I(inode)->i_data,
1146 sizeof(EXT4_I(inode)->i_data));
1147
1148 /* set size of new block */
1149 neh = ext_block_hdr(bh);
1150 /* old root could have indexes or leaves
1151 * so calculate e_max right way */
1152 if (ext_depth(inode))
1153 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1154 else
1155 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1156 neh->eh_magic = EXT4_EXT_MAGIC;
1157 ext4_extent_block_csum_set(inode, neh);
1158 set_buffer_uptodate(bh);
1159 unlock_buffer(bh);
1160
1161 err = ext4_handle_dirty_metadata(handle, inode, bh);
1162 if (err)
1163 goto out;
1164
1165 /* Update top-level index: num,max,pointer */
1166 neh = ext_inode_hdr(inode);
1167 neh->eh_entries = cpu_to_le16(1);
1168 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1169 if (neh->eh_depth == 0) {
1170 /* Root extent block becomes index block */
1171 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1172 EXT_FIRST_INDEX(neh)->ei_block =
1173 EXT_FIRST_EXTENT(neh)->ee_block;
1174 }
1175 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1176 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1177 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1178 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1179
1180 neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1181 ext4_mark_inode_dirty(handle, inode);
1182 out:
1183 brelse(bh);
1184
1185 return err;
1186 }
1187
1188 /*
1189 * ext4_ext_create_new_leaf:
1190 * finds empty index and adds new leaf.
1191 * if no free index is found, then it requests in-depth growing.
1192 */
1193 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1194 unsigned int flags,
1195 struct ext4_ext_path *path,
1196 struct ext4_extent *newext)
1197 {
1198 struct ext4_ext_path *curp;
1199 int depth, i, err = 0;
1200
1201 repeat:
1202 i = depth = ext_depth(inode);
1203
1204 /* walk up to the tree and look for free index entry */
1205 curp = path + depth;
1206 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1207 i--;
1208 curp--;
1209 }
1210
1211 /* we use already allocated block for index block,
1212 * so subsequent data blocks should be contiguous */
1213 if (EXT_HAS_FREE_INDEX(curp)) {
1214 /* if we found index with free entry, then use that
1215 * entry: create all needed subtree and add new leaf */
1216 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1217 if (err)
1218 goto out;
1219
1220 /* refill path */
1221 ext4_ext_drop_refs(path);
1222 path = ext4_ext_find_extent(inode,
1223 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1224 path);
1225 if (IS_ERR(path))
1226 err = PTR_ERR(path);
1227 } else {
1228 /* tree is full, time to grow in depth */
1229 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1230 if (err)
1231 goto out;
1232
1233 /* refill path */
1234 ext4_ext_drop_refs(path);
1235 path = ext4_ext_find_extent(inode,
1236 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1237 path);
1238 if (IS_ERR(path)) {
1239 err = PTR_ERR(path);
1240 goto out;
1241 }
1242
1243 /*
1244 * only first (depth 0 -> 1) produces free space;
1245 * in all other cases we have to split the grown tree
1246 */
1247 depth = ext_depth(inode);
1248 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1249 /* now we need to split */
1250 goto repeat;
1251 }
1252 }
1253
1254 out:
1255 return err;
1256 }
1257
1258 /*
1259 * search the closest allocated block to the left for *logical
1260 * and returns it at @logical + it's physical address at @phys
1261 * if *logical is the smallest allocated block, the function
1262 * returns 0 at @phys
1263 * return value contains 0 (success) or error code
1264 */
1265 static int ext4_ext_search_left(struct inode *inode,
1266 struct ext4_ext_path *path,
1267 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1268 {
1269 struct ext4_extent_idx *ix;
1270 struct ext4_extent *ex;
1271 int depth, ee_len;
1272
1273 if (unlikely(path == NULL)) {
1274 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1275 return -EIO;
1276 }
1277 depth = path->p_depth;
1278 *phys = 0;
1279
1280 if (depth == 0 && path->p_ext == NULL)
1281 return 0;
1282
1283 /* usually extent in the path covers blocks smaller
1284 * then *logical, but it can be that extent is the
1285 * first one in the file */
1286
1287 ex = path[depth].p_ext;
1288 ee_len = ext4_ext_get_actual_len(ex);
1289 if (*logical < le32_to_cpu(ex->ee_block)) {
1290 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1291 EXT4_ERROR_INODE(inode,
1292 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1293 *logical, le32_to_cpu(ex->ee_block));
1294 return -EIO;
1295 }
1296 while (--depth >= 0) {
1297 ix = path[depth].p_idx;
1298 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1299 EXT4_ERROR_INODE(inode,
1300 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1301 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1302 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1303 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1304 depth);
1305 return -EIO;
1306 }
1307 }
1308 return 0;
1309 }
1310
1311 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1312 EXT4_ERROR_INODE(inode,
1313 "logical %d < ee_block %d + ee_len %d!",
1314 *logical, le32_to_cpu(ex->ee_block), ee_len);
1315 return -EIO;
1316 }
1317
1318 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1319 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1320 return 0;
1321 }
1322
1323 /*
1324 * search the closest allocated block to the right for *logical
1325 * and returns it at @logical + it's physical address at @phys
1326 * if *logical is the largest allocated block, the function
1327 * returns 0 at @phys
1328 * return value contains 0 (success) or error code
1329 */
1330 static int ext4_ext_search_right(struct inode *inode,
1331 struct ext4_ext_path *path,
1332 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1333 struct ext4_extent **ret_ex)
1334 {
1335 struct buffer_head *bh = NULL;
1336 struct ext4_extent_header *eh;
1337 struct ext4_extent_idx *ix;
1338 struct ext4_extent *ex;
1339 ext4_fsblk_t block;
1340 int depth; /* Note, NOT eh_depth; depth from top of tree */
1341 int ee_len;
1342
1343 if (unlikely(path == NULL)) {
1344 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1345 return -EIO;
1346 }
1347 depth = path->p_depth;
1348 *phys = 0;
1349
1350 if (depth == 0 && path->p_ext == NULL)
1351 return 0;
1352
1353 /* usually extent in the path covers blocks smaller
1354 * then *logical, but it can be that extent is the
1355 * first one in the file */
1356
1357 ex = path[depth].p_ext;
1358 ee_len = ext4_ext_get_actual_len(ex);
1359 if (*logical < le32_to_cpu(ex->ee_block)) {
1360 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1361 EXT4_ERROR_INODE(inode,
1362 "first_extent(path[%d].p_hdr) != ex",
1363 depth);
1364 return -EIO;
1365 }
1366 while (--depth >= 0) {
1367 ix = path[depth].p_idx;
1368 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1369 EXT4_ERROR_INODE(inode,
1370 "ix != EXT_FIRST_INDEX *logical %d!",
1371 *logical);
1372 return -EIO;
1373 }
1374 }
1375 goto found_extent;
1376 }
1377
1378 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1379 EXT4_ERROR_INODE(inode,
1380 "logical %d < ee_block %d + ee_len %d!",
1381 *logical, le32_to_cpu(ex->ee_block), ee_len);
1382 return -EIO;
1383 }
1384
1385 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1386 /* next allocated block in this leaf */
1387 ex++;
1388 goto found_extent;
1389 }
1390
1391 /* go up and search for index to the right */
1392 while (--depth >= 0) {
1393 ix = path[depth].p_idx;
1394 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1395 goto got_index;
1396 }
1397
1398 /* we've gone up to the root and found no index to the right */
1399 return 0;
1400
1401 got_index:
1402 /* we've found index to the right, let's
1403 * follow it and find the closest allocated
1404 * block to the right */
1405 ix++;
1406 block = ext4_idx_pblock(ix);
1407 while (++depth < path->p_depth) {
1408 bh = sb_bread(inode->i_sb, block);
1409 if (bh == NULL)
1410 return -EIO;
1411 eh = ext_block_hdr(bh);
1412 /* subtract from p_depth to get proper eh_depth */
1413 if (ext4_ext_check_block(inode, eh,
1414 path->p_depth - depth, bh)) {
1415 put_bh(bh);
1416 return -EIO;
1417 }
1418 ix = EXT_FIRST_INDEX(eh);
1419 block = ext4_idx_pblock(ix);
1420 put_bh(bh);
1421 }
1422
1423 bh = sb_bread(inode->i_sb, block);
1424 if (bh == NULL)
1425 return -EIO;
1426 eh = ext_block_hdr(bh);
1427 if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1428 put_bh(bh);
1429 return -EIO;
1430 }
1431 ex = EXT_FIRST_EXTENT(eh);
1432 found_extent:
1433 *logical = le32_to_cpu(ex->ee_block);
1434 *phys = ext4_ext_pblock(ex);
1435 *ret_ex = ex;
1436 if (bh)
1437 put_bh(bh);
1438 return 0;
1439 }
1440
1441 /*
1442 * ext4_ext_next_allocated_block:
1443 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1444 * NOTE: it considers block number from index entry as
1445 * allocated block. Thus, index entries have to be consistent
1446 * with leaves.
1447 */
1448 static ext4_lblk_t
1449 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1450 {
1451 int depth;
1452
1453 BUG_ON(path == NULL);
1454 depth = path->p_depth;
1455
1456 if (depth == 0 && path->p_ext == NULL)
1457 return EXT_MAX_BLOCKS;
1458
1459 while (depth >= 0) {
1460 if (depth == path->p_depth) {
1461 /* leaf */
1462 if (path[depth].p_ext &&
1463 path[depth].p_ext !=
1464 EXT_LAST_EXTENT(path[depth].p_hdr))
1465 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1466 } else {
1467 /* index */
1468 if (path[depth].p_idx !=
1469 EXT_LAST_INDEX(path[depth].p_hdr))
1470 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1471 }
1472 depth--;
1473 }
1474
1475 return EXT_MAX_BLOCKS;
1476 }
1477
1478 /*
1479 * ext4_ext_next_leaf_block:
1480 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1481 */
1482 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1483 {
1484 int depth;
1485
1486 BUG_ON(path == NULL);
1487 depth = path->p_depth;
1488
1489 /* zero-tree has no leaf blocks at all */
1490 if (depth == 0)
1491 return EXT_MAX_BLOCKS;
1492
1493 /* go to index block */
1494 depth--;
1495
1496 while (depth >= 0) {
1497 if (path[depth].p_idx !=
1498 EXT_LAST_INDEX(path[depth].p_hdr))
1499 return (ext4_lblk_t)
1500 le32_to_cpu(path[depth].p_idx[1].ei_block);
1501 depth--;
1502 }
1503
1504 return EXT_MAX_BLOCKS;
1505 }
1506
1507 /*
1508 * ext4_ext_correct_indexes:
1509 * if leaf gets modified and modified extent is first in the leaf,
1510 * then we have to correct all indexes above.
1511 * TODO: do we need to correct tree in all cases?
1512 */
1513 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1514 struct ext4_ext_path *path)
1515 {
1516 struct ext4_extent_header *eh;
1517 int depth = ext_depth(inode);
1518 struct ext4_extent *ex;
1519 __le32 border;
1520 int k, err = 0;
1521
1522 eh = path[depth].p_hdr;
1523 ex = path[depth].p_ext;
1524
1525 if (unlikely(ex == NULL || eh == NULL)) {
1526 EXT4_ERROR_INODE(inode,
1527 "ex %p == NULL or eh %p == NULL", ex, eh);
1528 return -EIO;
1529 }
1530
1531 if (depth == 0) {
1532 /* there is no tree at all */
1533 return 0;
1534 }
1535
1536 if (ex != EXT_FIRST_EXTENT(eh)) {
1537 /* we correct tree if first leaf got modified only */
1538 return 0;
1539 }
1540
1541 /*
1542 * TODO: we need correction if border is smaller than current one
1543 */
1544 k = depth - 1;
1545 border = path[depth].p_ext->ee_block;
1546 err = ext4_ext_get_access(handle, inode, path + k);
1547 if (err)
1548 return err;
1549 path[k].p_idx->ei_block = border;
1550 err = ext4_ext_dirty(handle, inode, path + k);
1551 if (err)
1552 return err;
1553
1554 while (k--) {
1555 /* change all left-side indexes */
1556 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1557 break;
1558 err = ext4_ext_get_access(handle, inode, path + k);
1559 if (err)
1560 break;
1561 path[k].p_idx->ei_block = border;
1562 err = ext4_ext_dirty(handle, inode, path + k);
1563 if (err)
1564 break;
1565 }
1566
1567 return err;
1568 }
1569
1570 int
1571 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1572 struct ext4_extent *ex2)
1573 {
1574 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1575
1576 /*
1577 * Make sure that either both extents are uninitialized, or
1578 * both are _not_.
1579 */
1580 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1581 return 0;
1582
1583 if (ext4_ext_is_uninitialized(ex1))
1584 max_len = EXT_UNINIT_MAX_LEN;
1585 else
1586 max_len = EXT_INIT_MAX_LEN;
1587
1588 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1589 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1590
1591 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1592 le32_to_cpu(ex2->ee_block))
1593 return 0;
1594
1595 /*
1596 * To allow future support for preallocated extents to be added
1597 * as an RO_COMPAT feature, refuse to merge to extents if
1598 * this can result in the top bit of ee_len being set.
1599 */
1600 if (ext1_ee_len + ext2_ee_len > max_len)
1601 return 0;
1602 #ifdef AGGRESSIVE_TEST
1603 if (ext1_ee_len >= 4)
1604 return 0;
1605 #endif
1606
1607 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1608 return 1;
1609 return 0;
1610 }
1611
1612 /*
1613 * This function tries to merge the "ex" extent to the next extent in the tree.
1614 * It always tries to merge towards right. If you want to merge towards
1615 * left, pass "ex - 1" as argument instead of "ex".
1616 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1617 * 1 if they got merged.
1618 */
1619 static int ext4_ext_try_to_merge_right(struct inode *inode,
1620 struct ext4_ext_path *path,
1621 struct ext4_extent *ex)
1622 {
1623 struct ext4_extent_header *eh;
1624 unsigned int depth, len;
1625 int merge_done = 0;
1626 int uninitialized = 0;
1627
1628 depth = ext_depth(inode);
1629 BUG_ON(path[depth].p_hdr == NULL);
1630 eh = path[depth].p_hdr;
1631
1632 while (ex < EXT_LAST_EXTENT(eh)) {
1633 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1634 break;
1635 /* merge with next extent! */
1636 if (ext4_ext_is_uninitialized(ex))
1637 uninitialized = 1;
1638 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1639 + ext4_ext_get_actual_len(ex + 1));
1640 if (uninitialized)
1641 ext4_ext_mark_uninitialized(ex);
1642
1643 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1644 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1645 * sizeof(struct ext4_extent);
1646 memmove(ex + 1, ex + 2, len);
1647 }
1648 le16_add_cpu(&eh->eh_entries, -1);
1649 merge_done = 1;
1650 WARN_ON(eh->eh_entries == 0);
1651 if (!eh->eh_entries)
1652 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1653 }
1654
1655 return merge_done;
1656 }
1657
1658 /*
1659 * This function tries to merge the @ex extent to neighbours in the tree.
1660 * return 1 if merge left else 0.
1661 */
1662 static int ext4_ext_try_to_merge(struct inode *inode,
1663 struct ext4_ext_path *path,
1664 struct ext4_extent *ex) {
1665 struct ext4_extent_header *eh;
1666 unsigned int depth;
1667 int merge_done = 0;
1668 int ret = 0;
1669
1670 depth = ext_depth(inode);
1671 BUG_ON(path[depth].p_hdr == NULL);
1672 eh = path[depth].p_hdr;
1673
1674 if (ex > EXT_FIRST_EXTENT(eh))
1675 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1676
1677 if (!merge_done)
1678 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1679
1680 return ret;
1681 }
1682
1683 /*
1684 * check if a portion of the "newext" extent overlaps with an
1685 * existing extent.
1686 *
1687 * If there is an overlap discovered, it updates the length of the newext
1688 * such that there will be no overlap, and then returns 1.
1689 * If there is no overlap found, it returns 0.
1690 */
1691 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1692 struct inode *inode,
1693 struct ext4_extent *newext,
1694 struct ext4_ext_path *path)
1695 {
1696 ext4_lblk_t b1, b2;
1697 unsigned int depth, len1;
1698 unsigned int ret = 0;
1699
1700 b1 = le32_to_cpu(newext->ee_block);
1701 len1 = ext4_ext_get_actual_len(newext);
1702 depth = ext_depth(inode);
1703 if (!path[depth].p_ext)
1704 goto out;
1705 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1706 b2 &= ~(sbi->s_cluster_ratio - 1);
1707
1708 /*
1709 * get the next allocated block if the extent in the path
1710 * is before the requested block(s)
1711 */
1712 if (b2 < b1) {
1713 b2 = ext4_ext_next_allocated_block(path);
1714 if (b2 == EXT_MAX_BLOCKS)
1715 goto out;
1716 b2 &= ~(sbi->s_cluster_ratio - 1);
1717 }
1718
1719 /* check for wrap through zero on extent logical start block*/
1720 if (b1 + len1 < b1) {
1721 len1 = EXT_MAX_BLOCKS - b1;
1722 newext->ee_len = cpu_to_le16(len1);
1723 ret = 1;
1724 }
1725
1726 /* check for overlap */
1727 if (b1 + len1 > b2) {
1728 newext->ee_len = cpu_to_le16(b2 - b1);
1729 ret = 1;
1730 }
1731 out:
1732 return ret;
1733 }
1734
1735 /*
1736 * ext4_ext_insert_extent:
1737 * tries to merge requsted extent into the existing extent or
1738 * inserts requested extent as new one into the tree,
1739 * creating new leaf in the no-space case.
1740 */
1741 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1742 struct ext4_ext_path *path,
1743 struct ext4_extent *newext, int flag)
1744 {
1745 struct ext4_extent_header *eh;
1746 struct ext4_extent *ex, *fex;
1747 struct ext4_extent *nearex; /* nearest extent */
1748 struct ext4_ext_path *npath = NULL;
1749 int depth, len, err;
1750 ext4_lblk_t next;
1751 unsigned uninitialized = 0;
1752 int flags = 0;
1753
1754 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1755 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1756 return -EIO;
1757 }
1758 depth = ext_depth(inode);
1759 ex = path[depth].p_ext;
1760 if (unlikely(path[depth].p_hdr == NULL)) {
1761 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1762 return -EIO;
1763 }
1764
1765 /* try to insert block into found extent and return */
1766 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1767 && ext4_can_extents_be_merged(inode, ex, newext)) {
1768 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1769 ext4_ext_is_uninitialized(newext),
1770 ext4_ext_get_actual_len(newext),
1771 le32_to_cpu(ex->ee_block),
1772 ext4_ext_is_uninitialized(ex),
1773 ext4_ext_get_actual_len(ex),
1774 ext4_ext_pblock(ex));
1775 err = ext4_ext_get_access(handle, inode, path + depth);
1776 if (err)
1777 return err;
1778
1779 /*
1780 * ext4_can_extents_be_merged should have checked that either
1781 * both extents are uninitialized, or both aren't. Thus we
1782 * need to check only one of them here.
1783 */
1784 if (ext4_ext_is_uninitialized(ex))
1785 uninitialized = 1;
1786 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1787 + ext4_ext_get_actual_len(newext));
1788 if (uninitialized)
1789 ext4_ext_mark_uninitialized(ex);
1790 eh = path[depth].p_hdr;
1791 nearex = ex;
1792 goto merge;
1793 }
1794
1795 depth = ext_depth(inode);
1796 eh = path[depth].p_hdr;
1797 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1798 goto has_space;
1799
1800 /* probably next leaf has space for us? */
1801 fex = EXT_LAST_EXTENT(eh);
1802 next = EXT_MAX_BLOCKS;
1803 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1804 next = ext4_ext_next_leaf_block(path);
1805 if (next != EXT_MAX_BLOCKS) {
1806 ext_debug("next leaf block - %u\n", next);
1807 BUG_ON(npath != NULL);
1808 npath = ext4_ext_find_extent(inode, next, NULL);
1809 if (IS_ERR(npath))
1810 return PTR_ERR(npath);
1811 BUG_ON(npath->p_depth != path->p_depth);
1812 eh = npath[depth].p_hdr;
1813 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1814 ext_debug("next leaf isn't full(%d)\n",
1815 le16_to_cpu(eh->eh_entries));
1816 path = npath;
1817 goto has_space;
1818 }
1819 ext_debug("next leaf has no free space(%d,%d)\n",
1820 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1821 }
1822
1823 /*
1824 * There is no free space in the found leaf.
1825 * We're gonna add a new leaf in the tree.
1826 */
1827 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1828 flags = EXT4_MB_USE_ROOT_BLOCKS;
1829 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1830 if (err)
1831 goto cleanup;
1832 depth = ext_depth(inode);
1833 eh = path[depth].p_hdr;
1834
1835 has_space:
1836 nearex = path[depth].p_ext;
1837
1838 err = ext4_ext_get_access(handle, inode, path + depth);
1839 if (err)
1840 goto cleanup;
1841
1842 if (!nearex) {
1843 /* there is no extent in this leaf, create first one */
1844 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1845 le32_to_cpu(newext->ee_block),
1846 ext4_ext_pblock(newext),
1847 ext4_ext_is_uninitialized(newext),
1848 ext4_ext_get_actual_len(newext));
1849 nearex = EXT_FIRST_EXTENT(eh);
1850 } else {
1851 if (le32_to_cpu(newext->ee_block)
1852 > le32_to_cpu(nearex->ee_block)) {
1853 /* Insert after */
1854 ext_debug("insert %u:%llu:[%d]%d before: "
1855 "nearest %p\n",
1856 le32_to_cpu(newext->ee_block),
1857 ext4_ext_pblock(newext),
1858 ext4_ext_is_uninitialized(newext),
1859 ext4_ext_get_actual_len(newext),
1860 nearex);
1861 nearex++;
1862 } else {
1863 /* Insert before */
1864 BUG_ON(newext->ee_block == nearex->ee_block);
1865 ext_debug("insert %u:%llu:[%d]%d after: "
1866 "nearest %p\n",
1867 le32_to_cpu(newext->ee_block),
1868 ext4_ext_pblock(newext),
1869 ext4_ext_is_uninitialized(newext),
1870 ext4_ext_get_actual_len(newext),
1871 nearex);
1872 }
1873 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1874 if (len > 0) {
1875 ext_debug("insert %u:%llu:[%d]%d: "
1876 "move %d extents from 0x%p to 0x%p\n",
1877 le32_to_cpu(newext->ee_block),
1878 ext4_ext_pblock(newext),
1879 ext4_ext_is_uninitialized(newext),
1880 ext4_ext_get_actual_len(newext),
1881 len, nearex, nearex + 1);
1882 memmove(nearex + 1, nearex,
1883 len * sizeof(struct ext4_extent));
1884 }
1885 }
1886
1887 le16_add_cpu(&eh->eh_entries, 1);
1888 path[depth].p_ext = nearex;
1889 nearex->ee_block = newext->ee_block;
1890 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1891 nearex->ee_len = newext->ee_len;
1892
1893 merge:
1894 /* try to merge extents */
1895 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1896 ext4_ext_try_to_merge(inode, path, nearex);
1897
1898
1899 /* time to correct all indexes above */
1900 err = ext4_ext_correct_indexes(handle, inode, path);
1901 if (err)
1902 goto cleanup;
1903
1904 err = ext4_ext_dirty(handle, inode, path + depth);
1905
1906 cleanup:
1907 if (npath) {
1908 ext4_ext_drop_refs(npath);
1909 kfree(npath);
1910 }
1911 ext4_ext_invalidate_cache(inode);
1912 return err;
1913 }
1914
1915 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1916 ext4_lblk_t num, ext_prepare_callback func,
1917 void *cbdata)
1918 {
1919 struct ext4_ext_path *path = NULL;
1920 struct ext4_ext_cache cbex;
1921 struct ext4_extent *ex;
1922 ext4_lblk_t next, start = 0, end = 0;
1923 ext4_lblk_t last = block + num;
1924 int depth, exists, err = 0;
1925
1926 BUG_ON(func == NULL);
1927 BUG_ON(inode == NULL);
1928
1929 while (block < last && block != EXT_MAX_BLOCKS) {
1930 num = last - block;
1931 /* find extent for this block */
1932 down_read(&EXT4_I(inode)->i_data_sem);
1933 path = ext4_ext_find_extent(inode, block, path);
1934 up_read(&EXT4_I(inode)->i_data_sem);
1935 if (IS_ERR(path)) {
1936 err = PTR_ERR(path);
1937 path = NULL;
1938 break;
1939 }
1940
1941 depth = ext_depth(inode);
1942 if (unlikely(path[depth].p_hdr == NULL)) {
1943 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1944 err = -EIO;
1945 break;
1946 }
1947 ex = path[depth].p_ext;
1948 next = ext4_ext_next_allocated_block(path);
1949
1950 exists = 0;
1951 if (!ex) {
1952 /* there is no extent yet, so try to allocate
1953 * all requested space */
1954 start = block;
1955 end = block + num;
1956 } else if (le32_to_cpu(ex->ee_block) > block) {
1957 /* need to allocate space before found extent */
1958 start = block;
1959 end = le32_to_cpu(ex->ee_block);
1960 if (block + num < end)
1961 end = block + num;
1962 } else if (block >= le32_to_cpu(ex->ee_block)
1963 + ext4_ext_get_actual_len(ex)) {
1964 /* need to allocate space after found extent */
1965 start = block;
1966 end = block + num;
1967 if (end >= next)
1968 end = next;
1969 } else if (block >= le32_to_cpu(ex->ee_block)) {
1970 /*
1971 * some part of requested space is covered
1972 * by found extent
1973 */
1974 start = block;
1975 end = le32_to_cpu(ex->ee_block)
1976 + ext4_ext_get_actual_len(ex);
1977 if (block + num < end)
1978 end = block + num;
1979 exists = 1;
1980 } else {
1981 BUG();
1982 }
1983 BUG_ON(end <= start);
1984
1985 if (!exists) {
1986 cbex.ec_block = start;
1987 cbex.ec_len = end - start;
1988 cbex.ec_start = 0;
1989 } else {
1990 cbex.ec_block = le32_to_cpu(ex->ee_block);
1991 cbex.ec_len = ext4_ext_get_actual_len(ex);
1992 cbex.ec_start = ext4_ext_pblock(ex);
1993 }
1994
1995 if (unlikely(cbex.ec_len == 0)) {
1996 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1997 err = -EIO;
1998 break;
1999 }
2000 err = func(inode, next, &cbex, ex, cbdata);
2001 ext4_ext_drop_refs(path);
2002
2003 if (err < 0)
2004 break;
2005
2006 if (err == EXT_REPEAT)
2007 continue;
2008 else if (err == EXT_BREAK) {
2009 err = 0;
2010 break;
2011 }
2012
2013 if (ext_depth(inode) != depth) {
2014 /* depth was changed. we have to realloc path */
2015 kfree(path);
2016 path = NULL;
2017 }
2018
2019 block = cbex.ec_block + cbex.ec_len;
2020 }
2021
2022 if (path) {
2023 ext4_ext_drop_refs(path);
2024 kfree(path);
2025 }
2026
2027 return err;
2028 }
2029
2030 static void
2031 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2032 __u32 len, ext4_fsblk_t start)
2033 {
2034 struct ext4_ext_cache *cex;
2035 BUG_ON(len == 0);
2036 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2037 trace_ext4_ext_put_in_cache(inode, block, len, start);
2038 cex = &EXT4_I(inode)->i_cached_extent;
2039 cex->ec_block = block;
2040 cex->ec_len = len;
2041 cex->ec_start = start;
2042 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2043 }
2044
2045 /*
2046 * ext4_ext_put_gap_in_cache:
2047 * calculate boundaries of the gap that the requested block fits into
2048 * and cache this gap
2049 */
2050 static void
2051 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2052 ext4_lblk_t block)
2053 {
2054 int depth = ext_depth(inode);
2055 unsigned long len;
2056 ext4_lblk_t lblock;
2057 struct ext4_extent *ex;
2058
2059 ex = path[depth].p_ext;
2060 if (ex == NULL) {
2061 /* there is no extent yet, so gap is [0;-] */
2062 lblock = 0;
2063 len = EXT_MAX_BLOCKS;
2064 ext_debug("cache gap(whole file):");
2065 } else if (block < le32_to_cpu(ex->ee_block)) {
2066 lblock = block;
2067 len = le32_to_cpu(ex->ee_block) - block;
2068 ext_debug("cache gap(before): %u [%u:%u]",
2069 block,
2070 le32_to_cpu(ex->ee_block),
2071 ext4_ext_get_actual_len(ex));
2072 } else if (block >= le32_to_cpu(ex->ee_block)
2073 + ext4_ext_get_actual_len(ex)) {
2074 ext4_lblk_t next;
2075 lblock = le32_to_cpu(ex->ee_block)
2076 + ext4_ext_get_actual_len(ex);
2077
2078 next = ext4_ext_next_allocated_block(path);
2079 ext_debug("cache gap(after): [%u:%u] %u",
2080 le32_to_cpu(ex->ee_block),
2081 ext4_ext_get_actual_len(ex),
2082 block);
2083 BUG_ON(next == lblock);
2084 len = next - lblock;
2085 } else {
2086 lblock = len = 0;
2087 BUG();
2088 }
2089
2090 ext_debug(" -> %u:%lu\n", lblock, len);
2091 ext4_ext_put_in_cache(inode, lblock, len, 0);
2092 }
2093
2094 /*
2095 * ext4_ext_check_cache()
2096 * Checks to see if the given block is in the cache.
2097 * If it is, the cached extent is stored in the given
2098 * cache extent pointer. If the cached extent is a hole,
2099 * this routine should be used instead of
2100 * ext4_ext_in_cache if the calling function needs to
2101 * know the size of the hole.
2102 *
2103 * @inode: The files inode
2104 * @block: The block to look for in the cache
2105 * @ex: Pointer where the cached extent will be stored
2106 * if it contains block
2107 *
2108 * Return 0 if cache is invalid; 1 if the cache is valid
2109 */
2110 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2111 struct ext4_ext_cache *ex){
2112 struct ext4_ext_cache *cex;
2113 struct ext4_sb_info *sbi;
2114 int ret = 0;
2115
2116 /*
2117 * We borrow i_block_reservation_lock to protect i_cached_extent
2118 */
2119 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2120 cex = &EXT4_I(inode)->i_cached_extent;
2121 sbi = EXT4_SB(inode->i_sb);
2122
2123 /* has cache valid data? */
2124 if (cex->ec_len == 0)
2125 goto errout;
2126
2127 if (in_range(block, cex->ec_block, cex->ec_len)) {
2128 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2129 ext_debug("%u cached by %u:%u:%llu\n",
2130 block,
2131 cex->ec_block, cex->ec_len, cex->ec_start);
2132 ret = 1;
2133 }
2134 errout:
2135 trace_ext4_ext_in_cache(inode, block, ret);
2136 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2137 return ret;
2138 }
2139
2140 /*
2141 * ext4_ext_in_cache()
2142 * Checks to see if the given block is in the cache.
2143 * If it is, the cached extent is stored in the given
2144 * extent pointer.
2145 *
2146 * @inode: The files inode
2147 * @block: The block to look for in the cache
2148 * @ex: Pointer where the cached extent will be stored
2149 * if it contains block
2150 *
2151 * Return 0 if cache is invalid; 1 if the cache is valid
2152 */
2153 static int
2154 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2155 struct ext4_extent *ex)
2156 {
2157 struct ext4_ext_cache cex;
2158 int ret = 0;
2159
2160 if (ext4_ext_check_cache(inode, block, &cex)) {
2161 ex->ee_block = cpu_to_le32(cex.ec_block);
2162 ext4_ext_store_pblock(ex, cex.ec_start);
2163 ex->ee_len = cpu_to_le16(cex.ec_len);
2164 ret = 1;
2165 }
2166
2167 return ret;
2168 }
2169
2170
2171 /*
2172 * ext4_ext_rm_idx:
2173 * removes index from the index block.
2174 */
2175 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2176 struct ext4_ext_path *path)
2177 {
2178 int err;
2179 ext4_fsblk_t leaf;
2180
2181 /* free index block */
2182 path--;
2183 leaf = ext4_idx_pblock(path->p_idx);
2184 if (unlikely(path->p_hdr->eh_entries == 0)) {
2185 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2186 return -EIO;
2187 }
2188 err = ext4_ext_get_access(handle, inode, path);
2189 if (err)
2190 return err;
2191
2192 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2193 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2194 len *= sizeof(struct ext4_extent_idx);
2195 memmove(path->p_idx, path->p_idx + 1, len);
2196 }
2197
2198 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2199 err = ext4_ext_dirty(handle, inode, path);
2200 if (err)
2201 return err;
2202 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2203 trace_ext4_ext_rm_idx(inode, leaf);
2204
2205 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2206 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2207 return err;
2208 }
2209
2210 /*
2211 * ext4_ext_calc_credits_for_single_extent:
2212 * This routine returns max. credits that needed to insert an extent
2213 * to the extent tree.
2214 * When pass the actual path, the caller should calculate credits
2215 * under i_data_sem.
2216 */
2217 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2218 struct ext4_ext_path *path)
2219 {
2220 if (path) {
2221 int depth = ext_depth(inode);
2222 int ret = 0;
2223
2224 /* probably there is space in leaf? */
2225 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2226 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2227
2228 /*
2229 * There are some space in the leaf tree, no
2230 * need to account for leaf block credit
2231 *
2232 * bitmaps and block group descriptor blocks
2233 * and other metadata blocks still need to be
2234 * accounted.
2235 */
2236 /* 1 bitmap, 1 block group descriptor */
2237 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2238 return ret;
2239 }
2240 }
2241
2242 return ext4_chunk_trans_blocks(inode, nrblocks);
2243 }
2244
2245 /*
2246 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2247 *
2248 * if nrblocks are fit in a single extent (chunk flag is 1), then
2249 * in the worse case, each tree level index/leaf need to be changed
2250 * if the tree split due to insert a new extent, then the old tree
2251 * index/leaf need to be updated too
2252 *
2253 * If the nrblocks are discontiguous, they could cause
2254 * the whole tree split more than once, but this is really rare.
2255 */
2256 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2257 {
2258 int index;
2259 int depth = ext_depth(inode);
2260
2261 if (chunk)
2262 index = depth * 2;
2263 else
2264 index = depth * 3;
2265
2266 return index;
2267 }
2268
2269 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2270 struct ext4_extent *ex,
2271 ext4_fsblk_t *partial_cluster,
2272 ext4_lblk_t from, ext4_lblk_t to)
2273 {
2274 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2275 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2276 ext4_fsblk_t pblk;
2277 int flags = EXT4_FREE_BLOCKS_FORGET;
2278
2279 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2280 flags |= EXT4_FREE_BLOCKS_METADATA;
2281 /*
2282 * For bigalloc file systems, we never free a partial cluster
2283 * at the beginning of the extent. Instead, we make a note
2284 * that we tried freeing the cluster, and check to see if we
2285 * need to free it on a subsequent call to ext4_remove_blocks,
2286 * or at the end of the ext4_truncate() operation.
2287 */
2288 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2289
2290 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2291 /*
2292 * If we have a partial cluster, and it's different from the
2293 * cluster of the last block, we need to explicitly free the
2294 * partial cluster here.
2295 */
2296 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2297 if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2298 ext4_free_blocks(handle, inode, NULL,
2299 EXT4_C2B(sbi, *partial_cluster),
2300 sbi->s_cluster_ratio, flags);
2301 *partial_cluster = 0;
2302 }
2303
2304 #ifdef EXTENTS_STATS
2305 {
2306 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2307 spin_lock(&sbi->s_ext_stats_lock);
2308 sbi->s_ext_blocks += ee_len;
2309 sbi->s_ext_extents++;
2310 if (ee_len < sbi->s_ext_min)
2311 sbi->s_ext_min = ee_len;
2312 if (ee_len > sbi->s_ext_max)
2313 sbi->s_ext_max = ee_len;
2314 if (ext_depth(inode) > sbi->s_depth_max)
2315 sbi->s_depth_max = ext_depth(inode);
2316 spin_unlock(&sbi->s_ext_stats_lock);
2317 }
2318 #endif
2319 if (from >= le32_to_cpu(ex->ee_block)
2320 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2321 /* tail removal */
2322 ext4_lblk_t num;
2323
2324 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2325 pblk = ext4_ext_pblock(ex) + ee_len - num;
2326 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2327 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2328 /*
2329 * If the block range to be freed didn't start at the
2330 * beginning of a cluster, and we removed the entire
2331 * extent, save the partial cluster here, since we
2332 * might need to delete if we determine that the
2333 * truncate operation has removed all of the blocks in
2334 * the cluster.
2335 */
2336 if (pblk & (sbi->s_cluster_ratio - 1) &&
2337 (ee_len == num))
2338 *partial_cluster = EXT4_B2C(sbi, pblk);
2339 else
2340 *partial_cluster = 0;
2341 } else if (from == le32_to_cpu(ex->ee_block)
2342 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2343 /* head removal */
2344 ext4_lblk_t num;
2345 ext4_fsblk_t start;
2346
2347 num = to - from;
2348 start = ext4_ext_pblock(ex);
2349
2350 ext_debug("free first %u blocks starting %llu\n", num, start);
2351 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2352
2353 } else {
2354 printk(KERN_INFO "strange request: removal(2) "
2355 "%u-%u from %u:%u\n",
2356 from, to, le32_to_cpu(ex->ee_block), ee_len);
2357 }
2358 return 0;
2359 }
2360
2361
2362 /*
2363 * ext4_ext_rm_leaf() Removes the extents associated with the
2364 * blocks appearing between "start" and "end", and splits the extents
2365 * if "start" and "end" appear in the same extent
2366 *
2367 * @handle: The journal handle
2368 * @inode: The files inode
2369 * @path: The path to the leaf
2370 * @start: The first block to remove
2371 * @end: The last block to remove
2372 */
2373 static int
2374 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2375 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2376 ext4_lblk_t start, ext4_lblk_t end)
2377 {
2378 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2379 int err = 0, correct_index = 0;
2380 int depth = ext_depth(inode), credits;
2381 struct ext4_extent_header *eh;
2382 ext4_lblk_t a, b;
2383 unsigned num;
2384 ext4_lblk_t ex_ee_block;
2385 unsigned short ex_ee_len;
2386 unsigned uninitialized = 0;
2387 struct ext4_extent *ex;
2388
2389 /* the header must be checked already in ext4_ext_remove_space() */
2390 ext_debug("truncate since %u in leaf to %u\n", start, end);
2391 if (!path[depth].p_hdr)
2392 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2393 eh = path[depth].p_hdr;
2394 if (unlikely(path[depth].p_hdr == NULL)) {
2395 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2396 return -EIO;
2397 }
2398 /* find where to start removing */
2399 ex = EXT_LAST_EXTENT(eh);
2400
2401 ex_ee_block = le32_to_cpu(ex->ee_block);
2402 ex_ee_len = ext4_ext_get_actual_len(ex);
2403
2404 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2405
2406 while (ex >= EXT_FIRST_EXTENT(eh) &&
2407 ex_ee_block + ex_ee_len > start) {
2408
2409 if (ext4_ext_is_uninitialized(ex))
2410 uninitialized = 1;
2411 else
2412 uninitialized = 0;
2413
2414 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2415 uninitialized, ex_ee_len);
2416 path[depth].p_ext = ex;
2417
2418 a = ex_ee_block > start ? ex_ee_block : start;
2419 b = ex_ee_block+ex_ee_len - 1 < end ?
2420 ex_ee_block+ex_ee_len - 1 : end;
2421
2422 ext_debug(" border %u:%u\n", a, b);
2423
2424 /* If this extent is beyond the end of the hole, skip it */
2425 if (end < ex_ee_block) {
2426 ex--;
2427 ex_ee_block = le32_to_cpu(ex->ee_block);
2428 ex_ee_len = ext4_ext_get_actual_len(ex);
2429 continue;
2430 } else if (b != ex_ee_block + ex_ee_len - 1) {
2431 EXT4_ERROR_INODE(inode,
2432 "can not handle truncate %u:%u "
2433 "on extent %u:%u",
2434 start, end, ex_ee_block,
2435 ex_ee_block + ex_ee_len - 1);
2436 err = -EIO;
2437 goto out;
2438 } else if (a != ex_ee_block) {
2439 /* remove tail of the extent */
2440 num = a - ex_ee_block;
2441 } else {
2442 /* remove whole extent: excellent! */
2443 num = 0;
2444 }
2445 /*
2446 * 3 for leaf, sb, and inode plus 2 (bmap and group
2447 * descriptor) for each block group; assume two block
2448 * groups plus ex_ee_len/blocks_per_block_group for
2449 * the worst case
2450 */
2451 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2452 if (ex == EXT_FIRST_EXTENT(eh)) {
2453 correct_index = 1;
2454 credits += (ext_depth(inode)) + 1;
2455 }
2456 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2457
2458 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2459 if (err)
2460 goto out;
2461
2462 err = ext4_ext_get_access(handle, inode, path + depth);
2463 if (err)
2464 goto out;
2465
2466 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2467 a, b);
2468 if (err)
2469 goto out;
2470
2471 if (num == 0)
2472 /* this extent is removed; mark slot entirely unused */
2473 ext4_ext_store_pblock(ex, 0);
2474
2475 ex->ee_len = cpu_to_le16(num);
2476 /*
2477 * Do not mark uninitialized if all the blocks in the
2478 * extent have been removed.
2479 */
2480 if (uninitialized && num)
2481 ext4_ext_mark_uninitialized(ex);
2482 /*
2483 * If the extent was completely released,
2484 * we need to remove it from the leaf
2485 */
2486 if (num == 0) {
2487 if (end != EXT_MAX_BLOCKS - 1) {
2488 /*
2489 * For hole punching, we need to scoot all the
2490 * extents up when an extent is removed so that
2491 * we dont have blank extents in the middle
2492 */
2493 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2494 sizeof(struct ext4_extent));
2495
2496 /* Now get rid of the one at the end */
2497 memset(EXT_LAST_EXTENT(eh), 0,
2498 sizeof(struct ext4_extent));
2499 }
2500 le16_add_cpu(&eh->eh_entries, -1);
2501 } else
2502 *partial_cluster = 0;
2503
2504 err = ext4_ext_dirty(handle, inode, path + depth);
2505 if (err)
2506 goto out;
2507
2508 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2509 ext4_ext_pblock(ex));
2510 ex--;
2511 ex_ee_block = le32_to_cpu(ex->ee_block);
2512 ex_ee_len = ext4_ext_get_actual_len(ex);
2513 }
2514
2515 if (correct_index && eh->eh_entries)
2516 err = ext4_ext_correct_indexes(handle, inode, path);
2517
2518 /*
2519 * If there is still a entry in the leaf node, check to see if
2520 * it references the partial cluster. This is the only place
2521 * where it could; if it doesn't, we can free the cluster.
2522 */
2523 if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2524 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2525 *partial_cluster)) {
2526 int flags = EXT4_FREE_BLOCKS_FORGET;
2527
2528 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2529 flags |= EXT4_FREE_BLOCKS_METADATA;
2530
2531 ext4_free_blocks(handle, inode, NULL,
2532 EXT4_C2B(sbi, *partial_cluster),
2533 sbi->s_cluster_ratio, flags);
2534 *partial_cluster = 0;
2535 }
2536
2537 /* if this leaf is free, then we should
2538 * remove it from index block above */
2539 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2540 err = ext4_ext_rm_idx(handle, inode, path + depth);
2541
2542 out:
2543 return err;
2544 }
2545
2546 /*
2547 * ext4_ext_more_to_rm:
2548 * returns 1 if current index has to be freed (even partial)
2549 */
2550 static int
2551 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2552 {
2553 BUG_ON(path->p_idx == NULL);
2554
2555 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2556 return 0;
2557
2558 /*
2559 * if truncate on deeper level happened, it wasn't partial,
2560 * so we have to consider current index for truncation
2561 */
2562 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2563 return 0;
2564 return 1;
2565 }
2566
2567 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2568 ext4_lblk_t end)
2569 {
2570 struct super_block *sb = inode->i_sb;
2571 int depth = ext_depth(inode);
2572 struct ext4_ext_path *path = NULL;
2573 ext4_fsblk_t partial_cluster = 0;
2574 handle_t *handle;
2575 int i = 0, err;
2576
2577 ext_debug("truncate since %u to %u\n", start, end);
2578
2579 /* probably first extent we're gonna free will be last in block */
2580 handle = ext4_journal_start(inode, depth + 1);
2581 if (IS_ERR(handle))
2582 return PTR_ERR(handle);
2583
2584 again:
2585 ext4_ext_invalidate_cache(inode);
2586
2587 trace_ext4_ext_remove_space(inode, start, depth);
2588
2589 /*
2590 * Check if we are removing extents inside the extent tree. If that
2591 * is the case, we are going to punch a hole inside the extent tree
2592 * so we have to check whether we need to split the extent covering
2593 * the last block to remove so we can easily remove the part of it
2594 * in ext4_ext_rm_leaf().
2595 */
2596 if (end < EXT_MAX_BLOCKS - 1) {
2597 struct ext4_extent *ex;
2598 ext4_lblk_t ee_block;
2599
2600 /* find extent for this block */
2601 path = ext4_ext_find_extent(inode, end, NULL);
2602 if (IS_ERR(path)) {
2603 ext4_journal_stop(handle);
2604 return PTR_ERR(path);
2605 }
2606 depth = ext_depth(inode);
2607 ex = path[depth].p_ext;
2608 if (!ex) {
2609 ext4_ext_drop_refs(path);
2610 kfree(path);
2611 path = NULL;
2612 goto cont;
2613 }
2614
2615 ee_block = le32_to_cpu(ex->ee_block);
2616
2617 /*
2618 * See if the last block is inside the extent, if so split
2619 * the extent at 'end' block so we can easily remove the
2620 * tail of the first part of the split extent in
2621 * ext4_ext_rm_leaf().
2622 */
2623 if (end >= ee_block &&
2624 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2625 int split_flag = 0;
2626
2627 if (ext4_ext_is_uninitialized(ex))
2628 split_flag = EXT4_EXT_MARK_UNINIT1 |
2629 EXT4_EXT_MARK_UNINIT2;
2630
2631 /*
2632 * Split the extent in two so that 'end' is the last
2633 * block in the first new extent
2634 */
2635 err = ext4_split_extent_at(handle, inode, path,
2636 end + 1, split_flag,
2637 EXT4_GET_BLOCKS_PRE_IO |
2638 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2639
2640 if (err < 0)
2641 goto out;
2642 }
2643 }
2644 cont:
2645
2646 /*
2647 * We start scanning from right side, freeing all the blocks
2648 * after i_size and walking into the tree depth-wise.
2649 */
2650 depth = ext_depth(inode);
2651 if (path) {
2652 int k = i = depth;
2653 while (--k > 0)
2654 path[k].p_block =
2655 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2656 } else {
2657 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2658 GFP_NOFS);
2659 if (path == NULL) {
2660 ext4_journal_stop(handle);
2661 return -ENOMEM;
2662 }
2663 path[0].p_depth = depth;
2664 path[0].p_hdr = ext_inode_hdr(inode);
2665 i = 0;
2666
2667 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2668 err = -EIO;
2669 goto out;
2670 }
2671 }
2672 err = 0;
2673
2674 while (i >= 0 && err == 0) {
2675 if (i == depth) {
2676 /* this is leaf block */
2677 err = ext4_ext_rm_leaf(handle, inode, path,
2678 &partial_cluster, start,
2679 end);
2680 /* root level has p_bh == NULL, brelse() eats this */
2681 brelse(path[i].p_bh);
2682 path[i].p_bh = NULL;
2683 i--;
2684 continue;
2685 }
2686
2687 /* this is index block */
2688 if (!path[i].p_hdr) {
2689 ext_debug("initialize header\n");
2690 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2691 }
2692
2693 if (!path[i].p_idx) {
2694 /* this level hasn't been touched yet */
2695 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2696 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2697 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2698 path[i].p_hdr,
2699 le16_to_cpu(path[i].p_hdr->eh_entries));
2700 } else {
2701 /* we were already here, see at next index */
2702 path[i].p_idx--;
2703 }
2704
2705 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2706 i, EXT_FIRST_INDEX(path[i].p_hdr),
2707 path[i].p_idx);
2708 if (ext4_ext_more_to_rm(path + i)) {
2709 struct buffer_head *bh;
2710 /* go to the next level */
2711 ext_debug("move to level %d (block %llu)\n",
2712 i + 1, ext4_idx_pblock(path[i].p_idx));
2713 memset(path + i + 1, 0, sizeof(*path));
2714 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2715 if (!bh) {
2716 /* should we reset i_size? */
2717 err = -EIO;
2718 break;
2719 }
2720 if (WARN_ON(i + 1 > depth)) {
2721 err = -EIO;
2722 break;
2723 }
2724 if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2725 depth - i - 1, bh)) {
2726 err = -EIO;
2727 break;
2728 }
2729 path[i + 1].p_bh = bh;
2730
2731 /* save actual number of indexes since this
2732 * number is changed at the next iteration */
2733 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2734 i++;
2735 } else {
2736 /* we finished processing this index, go up */
2737 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2738 /* index is empty, remove it;
2739 * handle must be already prepared by the
2740 * truncatei_leaf() */
2741 err = ext4_ext_rm_idx(handle, inode, path + i);
2742 }
2743 /* root level has p_bh == NULL, brelse() eats this */
2744 brelse(path[i].p_bh);
2745 path[i].p_bh = NULL;
2746 i--;
2747 ext_debug("return to level %d\n", i);
2748 }
2749 }
2750
2751 trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2752 path->p_hdr->eh_entries);
2753
2754 /* If we still have something in the partial cluster and we have removed
2755 * even the first extent, then we should free the blocks in the partial
2756 * cluster as well. */
2757 if (partial_cluster && path->p_hdr->eh_entries == 0) {
2758 int flags = EXT4_FREE_BLOCKS_FORGET;
2759
2760 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2761 flags |= EXT4_FREE_BLOCKS_METADATA;
2762
2763 ext4_free_blocks(handle, inode, NULL,
2764 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2765 EXT4_SB(sb)->s_cluster_ratio, flags);
2766 partial_cluster = 0;
2767 }
2768
2769 /* TODO: flexible tree reduction should be here */
2770 if (path->p_hdr->eh_entries == 0) {
2771 /*
2772 * truncate to zero freed all the tree,
2773 * so we need to correct eh_depth
2774 */
2775 err = ext4_ext_get_access(handle, inode, path);
2776 if (err == 0) {
2777 ext_inode_hdr(inode)->eh_depth = 0;
2778 ext_inode_hdr(inode)->eh_max =
2779 cpu_to_le16(ext4_ext_space_root(inode, 0));
2780 err = ext4_ext_dirty(handle, inode, path);
2781 }
2782 }
2783 out:
2784 ext4_ext_drop_refs(path);
2785 kfree(path);
2786 if (err == -EAGAIN) {
2787 path = NULL;
2788 goto again;
2789 }
2790 ext4_journal_stop(handle);
2791
2792 return err;
2793 }
2794
2795 /*
2796 * called at mount time
2797 */
2798 void ext4_ext_init(struct super_block *sb)
2799 {
2800 /*
2801 * possible initialization would be here
2802 */
2803
2804 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2805 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2806 printk(KERN_INFO "EXT4-fs: file extents enabled"
2807 #ifdef AGGRESSIVE_TEST
2808 ", aggressive tests"
2809 #endif
2810 #ifdef CHECK_BINSEARCH
2811 ", check binsearch"
2812 #endif
2813 #ifdef EXTENTS_STATS
2814 ", stats"
2815 #endif
2816 "\n");
2817 #endif
2818 #ifdef EXTENTS_STATS
2819 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2820 EXT4_SB(sb)->s_ext_min = 1 << 30;
2821 EXT4_SB(sb)->s_ext_max = 0;
2822 #endif
2823 }
2824 }
2825
2826 /*
2827 * called at umount time
2828 */
2829 void ext4_ext_release(struct super_block *sb)
2830 {
2831 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2832 return;
2833
2834 #ifdef EXTENTS_STATS
2835 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2836 struct ext4_sb_info *sbi = EXT4_SB(sb);
2837 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2838 sbi->s_ext_blocks, sbi->s_ext_extents,
2839 sbi->s_ext_blocks / sbi->s_ext_extents);
2840 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2841 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2842 }
2843 #endif
2844 }
2845
2846 /* FIXME!! we need to try to merge to left or right after zero-out */
2847 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2848 {
2849 ext4_fsblk_t ee_pblock;
2850 unsigned int ee_len;
2851 int ret;
2852
2853 ee_len = ext4_ext_get_actual_len(ex);
2854 ee_pblock = ext4_ext_pblock(ex);
2855
2856 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2857 if (ret > 0)
2858 ret = 0;
2859
2860 return ret;
2861 }
2862
2863 /*
2864 * ext4_split_extent_at() splits an extent at given block.
2865 *
2866 * @handle: the journal handle
2867 * @inode: the file inode
2868 * @path: the path to the extent
2869 * @split: the logical block where the extent is splitted.
2870 * @split_flags: indicates if the extent could be zeroout if split fails, and
2871 * the states(init or uninit) of new extents.
2872 * @flags: flags used to insert new extent to extent tree.
2873 *
2874 *
2875 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2876 * of which are deterimined by split_flag.
2877 *
2878 * There are two cases:
2879 * a> the extent are splitted into two extent.
2880 * b> split is not needed, and just mark the extent.
2881 *
2882 * return 0 on success.
2883 */
2884 static int ext4_split_extent_at(handle_t *handle,
2885 struct inode *inode,
2886 struct ext4_ext_path *path,
2887 ext4_lblk_t split,
2888 int split_flag,
2889 int flags)
2890 {
2891 ext4_fsblk_t newblock;
2892 ext4_lblk_t ee_block;
2893 struct ext4_extent *ex, newex, orig_ex;
2894 struct ext4_extent *ex2 = NULL;
2895 unsigned int ee_len, depth;
2896 int err = 0;
2897
2898 ext_debug("ext4_split_extents_at: inode %lu, logical"
2899 "block %llu\n", inode->i_ino, (unsigned long long)split);
2900
2901 ext4_ext_show_leaf(inode, path);
2902
2903 depth = ext_depth(inode);
2904 ex = path[depth].p_ext;
2905 ee_block = le32_to_cpu(ex->ee_block);
2906 ee_len = ext4_ext_get_actual_len(ex);
2907 newblock = split - ee_block + ext4_ext_pblock(ex);
2908
2909 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2910
2911 err = ext4_ext_get_access(handle, inode, path + depth);
2912 if (err)
2913 goto out;
2914
2915 if (split == ee_block) {
2916 /*
2917 * case b: block @split is the block that the extent begins with
2918 * then we just change the state of the extent, and splitting
2919 * is not needed.
2920 */
2921 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2922 ext4_ext_mark_uninitialized(ex);
2923 else
2924 ext4_ext_mark_initialized(ex);
2925
2926 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2927 ext4_ext_try_to_merge(inode, path, ex);
2928
2929 err = ext4_ext_dirty(handle, inode, path + depth);
2930 goto out;
2931 }
2932
2933 /* case a */
2934 memcpy(&orig_ex, ex, sizeof(orig_ex));
2935 ex->ee_len = cpu_to_le16(split - ee_block);
2936 if (split_flag & EXT4_EXT_MARK_UNINIT1)
2937 ext4_ext_mark_uninitialized(ex);
2938
2939 /*
2940 * path may lead to new leaf, not to original leaf any more
2941 * after ext4_ext_insert_extent() returns,
2942 */
2943 err = ext4_ext_dirty(handle, inode, path + depth);
2944 if (err)
2945 goto fix_extent_len;
2946
2947 ex2 = &newex;
2948 ex2->ee_block = cpu_to_le32(split);
2949 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
2950 ext4_ext_store_pblock(ex2, newblock);
2951 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2952 ext4_ext_mark_uninitialized(ex2);
2953
2954 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2955 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2956 err = ext4_ext_zeroout(inode, &orig_ex);
2957 if (err)
2958 goto fix_extent_len;
2959 /* update the extent length and mark as initialized */
2960 ex->ee_len = cpu_to_le16(ee_len);
2961 ext4_ext_try_to_merge(inode, path, ex);
2962 err = ext4_ext_dirty(handle, inode, path + depth);
2963 goto out;
2964 } else if (err)
2965 goto fix_extent_len;
2966
2967 out:
2968 ext4_ext_show_leaf(inode, path);
2969 return err;
2970
2971 fix_extent_len:
2972 ex->ee_len = orig_ex.ee_len;
2973 ext4_ext_dirty(handle, inode, path + depth);
2974 return err;
2975 }
2976
2977 /*
2978 * ext4_split_extents() splits an extent and mark extent which is covered
2979 * by @map as split_flags indicates
2980 *
2981 * It may result in splitting the extent into multiple extents (upto three)
2982 * There are three possibilities:
2983 * a> There is no split required
2984 * b> Splits in two extents: Split is happening at either end of the extent
2985 * c> Splits in three extents: Somone is splitting in middle of the extent
2986 *
2987 */
2988 static int ext4_split_extent(handle_t *handle,
2989 struct inode *inode,
2990 struct ext4_ext_path *path,
2991 struct ext4_map_blocks *map,
2992 int split_flag,
2993 int flags)
2994 {
2995 ext4_lblk_t ee_block;
2996 struct ext4_extent *ex;
2997 unsigned int ee_len, depth;
2998 int err = 0;
2999 int uninitialized;
3000 int split_flag1, flags1;
3001
3002 depth = ext_depth(inode);
3003 ex = path[depth].p_ext;
3004 ee_block = le32_to_cpu(ex->ee_block);
3005 ee_len = ext4_ext_get_actual_len(ex);
3006 uninitialized = ext4_ext_is_uninitialized(ex);
3007
3008 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3009 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
3010 EXT4_EXT_MAY_ZEROOUT : 0;
3011 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3012 if (uninitialized)
3013 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3014 EXT4_EXT_MARK_UNINIT2;
3015 err = ext4_split_extent_at(handle, inode, path,
3016 map->m_lblk + map->m_len, split_flag1, flags1);
3017 if (err)
3018 goto out;
3019 }
3020
3021 ext4_ext_drop_refs(path);
3022 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3023 if (IS_ERR(path))
3024 return PTR_ERR(path);
3025
3026 if (map->m_lblk >= ee_block) {
3027 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
3028 EXT4_EXT_MAY_ZEROOUT : 0;
3029 if (uninitialized)
3030 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3031 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3032 split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3033 err = ext4_split_extent_at(handle, inode, path,
3034 map->m_lblk, split_flag1, flags);
3035 if (err)
3036 goto out;
3037 }
3038
3039 ext4_ext_show_leaf(inode, path);
3040 out:
3041 return err ? err : map->m_len;
3042 }
3043
3044 #define EXT4_EXT_ZERO_LEN 7
3045 /*
3046 * This function is called by ext4_ext_map_blocks() if someone tries to write
3047 * to an uninitialized extent. It may result in splitting the uninitialized
3048 * extent into multiple extents (up to three - one initialized and two
3049 * uninitialized).
3050 * There are three possibilities:
3051 * a> There is no split required: Entire extent should be initialized
3052 * b> Splits in two extents: Write is happening at either end of the extent
3053 * c> Splits in three extents: Somone is writing in middle of the extent
3054 *
3055 * Pre-conditions:
3056 * - The extent pointed to by 'path' is uninitialized.
3057 * - The extent pointed to by 'path' contains a superset
3058 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3059 *
3060 * Post-conditions on success:
3061 * - the returned value is the number of blocks beyond map->l_lblk
3062 * that are allocated and initialized.
3063 * It is guaranteed to be >= map->m_len.
3064 */
3065 static int ext4_ext_convert_to_initialized(handle_t *handle,
3066 struct inode *inode,
3067 struct ext4_map_blocks *map,
3068 struct ext4_ext_path *path)
3069 {
3070 struct ext4_extent_header *eh;
3071 struct ext4_map_blocks split_map;
3072 struct ext4_extent zero_ex;
3073 struct ext4_extent *ex;
3074 ext4_lblk_t ee_block, eof_block;
3075 unsigned int ee_len, depth;
3076 int allocated;
3077 int err = 0;
3078 int split_flag = 0;
3079
3080 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3081 "block %llu, max_blocks %u\n", inode->i_ino,
3082 (unsigned long long)map->m_lblk, map->m_len);
3083
3084 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3085 inode->i_sb->s_blocksize_bits;
3086 if (eof_block < map->m_lblk + map->m_len)
3087 eof_block = map->m_lblk + map->m_len;
3088
3089 depth = ext_depth(inode);
3090 eh = path[depth].p_hdr;
3091 ex = path[depth].p_ext;
3092 ee_block = le32_to_cpu(ex->ee_block);
3093 ee_len = ext4_ext_get_actual_len(ex);
3094 allocated = ee_len - (map->m_lblk - ee_block);
3095
3096 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3097
3098 /* Pre-conditions */
3099 BUG_ON(!ext4_ext_is_uninitialized(ex));
3100 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3101
3102 /*
3103 * Attempt to transfer newly initialized blocks from the currently
3104 * uninitialized extent to its left neighbor. This is much cheaper
3105 * than an insertion followed by a merge as those involve costly
3106 * memmove() calls. This is the common case in steady state for
3107 * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3108 * writes.
3109 *
3110 * Limitations of the current logic:
3111 * - L1: we only deal with writes at the start of the extent.
3112 * The approach could be extended to writes at the end
3113 * of the extent but this scenario was deemed less common.
3114 * - L2: we do not deal with writes covering the whole extent.
3115 * This would require removing the extent if the transfer
3116 * is possible.
3117 * - L3: we only attempt to merge with an extent stored in the
3118 * same extent tree node.
3119 */
3120 if ((map->m_lblk == ee_block) && /*L1*/
3121 (map->m_len < ee_len) && /*L2*/
3122 (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/
3123 struct ext4_extent *prev_ex;
3124 ext4_lblk_t prev_lblk;
3125 ext4_fsblk_t prev_pblk, ee_pblk;
3126 unsigned int prev_len, write_len;
3127
3128 prev_ex = ex - 1;
3129 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3130 prev_len = ext4_ext_get_actual_len(prev_ex);
3131 prev_pblk = ext4_ext_pblock(prev_ex);
3132 ee_pblk = ext4_ext_pblock(ex);
3133 write_len = map->m_len;
3134
3135 /*
3136 * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3137 * upon those conditions:
3138 * - C1: prev_ex is initialized,
3139 * - C2: prev_ex is logically abutting ex,
3140 * - C3: prev_ex is physically abutting ex,
3141 * - C4: prev_ex can receive the additional blocks without
3142 * overflowing the (initialized) length limit.
3143 */
3144 if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/
3145 ((prev_lblk + prev_len) == ee_block) && /*C2*/
3146 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3147 (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/
3148 err = ext4_ext_get_access(handle, inode, path + depth);
3149 if (err)
3150 goto out;
3151
3152 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3153 map, ex, prev_ex);
3154
3155 /* Shift the start of ex by 'write_len' blocks */
3156 ex->ee_block = cpu_to_le32(ee_block + write_len);
3157 ext4_ext_store_pblock(ex, ee_pblk + write_len);
3158 ex->ee_len = cpu_to_le16(ee_len - write_len);
3159 ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3160
3161 /* Extend prev_ex by 'write_len' blocks */
3162 prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3163
3164 /* Mark the block containing both extents as dirty */
3165 ext4_ext_dirty(handle, inode, path + depth);
3166
3167 /* Update path to point to the right extent */
3168 path[depth].p_ext = prev_ex;
3169
3170 /* Result: number of initialized blocks past m_lblk */
3171 allocated = write_len;
3172 goto out;
3173 }
3174 }
3175
3176 WARN_ON(map->m_lblk < ee_block);
3177 /*
3178 * It is safe to convert extent to initialized via explicit
3179 * zeroout only if extent is fully insde i_size or new_size.
3180 */
3181 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3182
3183 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3184 if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3185 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3186 err = ext4_ext_zeroout(inode, ex);
3187 if (err)
3188 goto out;
3189
3190 err = ext4_ext_get_access(handle, inode, path + depth);
3191 if (err)
3192 goto out;
3193 ext4_ext_mark_initialized(ex);
3194 ext4_ext_try_to_merge(inode, path, ex);
3195 err = ext4_ext_dirty(handle, inode, path + depth);
3196 goto out;
3197 }
3198
3199 /*
3200 * four cases:
3201 * 1. split the extent into three extents.
3202 * 2. split the extent into two extents, zeroout the first half.
3203 * 3. split the extent into two extents, zeroout the second half.
3204 * 4. split the extent into two extents with out zeroout.
3205 */
3206 split_map.m_lblk = map->m_lblk;
3207 split_map.m_len = map->m_len;
3208
3209 if (allocated > map->m_len) {
3210 if (allocated <= EXT4_EXT_ZERO_LEN &&
3211 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3212 /* case 3 */
3213 zero_ex.ee_block =
3214 cpu_to_le32(map->m_lblk);
3215 zero_ex.ee_len = cpu_to_le16(allocated);
3216 ext4_ext_store_pblock(&zero_ex,
3217 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3218 err = ext4_ext_zeroout(inode, &zero_ex);
3219 if (err)
3220 goto out;
3221 split_map.m_lblk = map->m_lblk;
3222 split_map.m_len = allocated;
3223 } else if ((map->m_lblk - ee_block + map->m_len <
3224 EXT4_EXT_ZERO_LEN) &&
3225 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3226 /* case 2 */
3227 if (map->m_lblk != ee_block) {
3228 zero_ex.ee_block = ex->ee_block;
3229 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3230 ee_block);
3231 ext4_ext_store_pblock(&zero_ex,
3232 ext4_ext_pblock(ex));
3233 err = ext4_ext_zeroout(inode, &zero_ex);
3234 if (err)
3235 goto out;
3236 }
3237
3238 split_map.m_lblk = ee_block;
3239 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3240 allocated = map->m_len;
3241 }
3242 }
3243
3244 allocated = ext4_split_extent(handle, inode, path,
3245 &split_map, split_flag, 0);
3246 if (allocated < 0)
3247 err = allocated;
3248
3249 out:
3250 return err ? err : allocated;
3251 }
3252
3253 /*
3254 * This function is called by ext4_ext_map_blocks() from
3255 * ext4_get_blocks_dio_write() when DIO to write
3256 * to an uninitialized extent.
3257 *
3258 * Writing to an uninitialized extent may result in splitting the uninitialized
3259 * extent into multiple /initialized uninitialized extents (up to three)
3260 * There are three possibilities:
3261 * a> There is no split required: Entire extent should be uninitialized
3262 * b> Splits in two extents: Write is happening at either end of the extent
3263 * c> Splits in three extents: Somone is writing in middle of the extent
3264 *
3265 * One of more index blocks maybe needed if the extent tree grow after
3266 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3267 * complete, we need to split the uninitialized extent before DIO submit
3268 * the IO. The uninitialized extent called at this time will be split
3269 * into three uninitialized extent(at most). After IO complete, the part
3270 * being filled will be convert to initialized by the end_io callback function
3271 * via ext4_convert_unwritten_extents().
3272 *
3273 * Returns the size of uninitialized extent to be written on success.
3274 */
3275 static int ext4_split_unwritten_extents(handle_t *handle,
3276 struct inode *inode,
3277 struct ext4_map_blocks *map,
3278 struct ext4_ext_path *path,
3279 int flags)
3280 {
3281 ext4_lblk_t eof_block;
3282 ext4_lblk_t ee_block;
3283 struct ext4_extent *ex;
3284 unsigned int ee_len;
3285 int split_flag = 0, depth;
3286
3287 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3288 "block %llu, max_blocks %u\n", inode->i_ino,
3289 (unsigned long long)map->m_lblk, map->m_len);
3290
3291 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3292 inode->i_sb->s_blocksize_bits;
3293 if (eof_block < map->m_lblk + map->m_len)
3294 eof_block = map->m_lblk + map->m_len;
3295 /*
3296 * It is safe to convert extent to initialized via explicit
3297 * zeroout only if extent is fully insde i_size or new_size.
3298 */
3299 depth = ext_depth(inode);
3300 ex = path[depth].p_ext;
3301 ee_block = le32_to_cpu(ex->ee_block);
3302 ee_len = ext4_ext_get_actual_len(ex);
3303
3304 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3305 split_flag |= EXT4_EXT_MARK_UNINIT2;
3306
3307 flags |= EXT4_GET_BLOCKS_PRE_IO;
3308 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3309 }
3310
3311 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3312 struct inode *inode,
3313 struct ext4_ext_path *path)
3314 {
3315 struct ext4_extent *ex;
3316 int depth;
3317 int err = 0;
3318
3319 depth = ext_depth(inode);
3320 ex = path[depth].p_ext;
3321
3322 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3323 "block %llu, max_blocks %u\n", inode->i_ino,
3324 (unsigned long long)le32_to_cpu(ex->ee_block),
3325 ext4_ext_get_actual_len(ex));
3326
3327 err = ext4_ext_get_access(handle, inode, path + depth);
3328 if (err)
3329 goto out;
3330 /* first mark the extent as initialized */
3331 ext4_ext_mark_initialized(ex);
3332
3333 /* note: ext4_ext_correct_indexes() isn't needed here because
3334 * borders are not changed
3335 */
3336 ext4_ext_try_to_merge(inode, path, ex);
3337
3338 /* Mark modified extent as dirty */
3339 err = ext4_ext_dirty(handle, inode, path + depth);
3340 out:
3341 ext4_ext_show_leaf(inode, path);
3342 return err;
3343 }
3344
3345 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3346 sector_t block, int count)
3347 {
3348 int i;
3349 for (i = 0; i < count; i++)
3350 unmap_underlying_metadata(bdev, block + i);
3351 }
3352
3353 /*
3354 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3355 */
3356 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3357 ext4_lblk_t lblk,
3358 struct ext4_ext_path *path,
3359 unsigned int len)
3360 {
3361 int i, depth;
3362 struct ext4_extent_header *eh;
3363 struct ext4_extent *last_ex;
3364
3365 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3366 return 0;
3367
3368 depth = ext_depth(inode);
3369 eh = path[depth].p_hdr;
3370
3371 /*
3372 * We're going to remove EOFBLOCKS_FL entirely in future so we
3373 * do not care for this case anymore. Simply remove the flag
3374 * if there are no extents.
3375 */
3376 if (unlikely(!eh->eh_entries))
3377 goto out;
3378 last_ex = EXT_LAST_EXTENT(eh);
3379 /*
3380 * We should clear the EOFBLOCKS_FL flag if we are writing the
3381 * last block in the last extent in the file. We test this by
3382 * first checking to see if the caller to
3383 * ext4_ext_get_blocks() was interested in the last block (or
3384 * a block beyond the last block) in the current extent. If
3385 * this turns out to be false, we can bail out from this
3386 * function immediately.
3387 */
3388 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3389 ext4_ext_get_actual_len(last_ex))
3390 return 0;
3391 /*
3392 * If the caller does appear to be planning to write at or
3393 * beyond the end of the current extent, we then test to see
3394 * if the current extent is the last extent in the file, by
3395 * checking to make sure it was reached via the rightmost node
3396 * at each level of the tree.
3397 */
3398 for (i = depth-1; i >= 0; i--)
3399 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3400 return 0;
3401 out:
3402 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3403 return ext4_mark_inode_dirty(handle, inode);
3404 }
3405
3406 /**
3407 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3408 *
3409 * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3410 * whether there are any buffers marked for delayed allocation. It returns '1'
3411 * on the first delalloc'ed buffer head found. If no buffer head in the given
3412 * range is marked for delalloc, it returns 0.
3413 * lblk_start should always be <= lblk_end.
3414 * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3415 * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3416 * block sooner). This is useful when blocks are truncated sequentially from
3417 * lblk_start towards lblk_end.
3418 */
3419 static int ext4_find_delalloc_range(struct inode *inode,
3420 ext4_lblk_t lblk_start,
3421 ext4_lblk_t lblk_end,
3422 int search_hint_reverse)
3423 {
3424 struct address_space *mapping = inode->i_mapping;
3425 struct buffer_head *head, *bh = NULL;
3426 struct page *page;
3427 ext4_lblk_t i, pg_lblk;
3428 pgoff_t index;
3429
3430 if (!test_opt(inode->i_sb, DELALLOC))
3431 return 0;
3432
3433 /* reverse search wont work if fs block size is less than page size */
3434 if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3435 search_hint_reverse = 0;
3436
3437 if (search_hint_reverse)
3438 i = lblk_end;
3439 else
3440 i = lblk_start;
3441
3442 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3443
3444 while ((i >= lblk_start) && (i <= lblk_end)) {
3445 page = find_get_page(mapping, index);
3446 if (!page)
3447 goto nextpage;
3448
3449 if (!page_has_buffers(page))
3450 goto nextpage;
3451
3452 head = page_buffers(page);
3453 if (!head)
3454 goto nextpage;
3455
3456 bh = head;
3457 pg_lblk = index << (PAGE_CACHE_SHIFT -
3458 inode->i_blkbits);
3459 do {
3460 if (unlikely(pg_lblk < lblk_start)) {
3461 /*
3462 * This is possible when fs block size is less
3463 * than page size and our cluster starts/ends in
3464 * middle of the page. So we need to skip the
3465 * initial few blocks till we reach the 'lblk'
3466 */
3467 pg_lblk++;
3468 continue;
3469 }
3470
3471 /* Check if the buffer is delayed allocated and that it
3472 * is not yet mapped. (when da-buffers are mapped during
3473 * their writeout, their da_mapped bit is set.)
3474 */
3475 if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3476 page_cache_release(page);
3477 trace_ext4_find_delalloc_range(inode,
3478 lblk_start, lblk_end,
3479 search_hint_reverse,
3480 1, i);
3481 return 1;
3482 }
3483 if (search_hint_reverse)
3484 i--;
3485 else
3486 i++;
3487 } while ((i >= lblk_start) && (i <= lblk_end) &&
3488 ((bh = bh->b_this_page) != head));
3489 nextpage:
3490 if (page)
3491 page_cache_release(page);
3492 /*
3493 * Move to next page. 'i' will be the first lblk in the next
3494 * page.
3495 */
3496 if (search_hint_reverse)
3497 index--;
3498 else
3499 index++;
3500 i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3501 }
3502
3503 trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3504 search_hint_reverse, 0, 0);
3505 return 0;
3506 }
3507
3508 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3509 int search_hint_reverse)
3510 {
3511 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3512 ext4_lblk_t lblk_start, lblk_end;
3513 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3514 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3515
3516 return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3517 search_hint_reverse);
3518 }
3519
3520 /**
3521 * Determines how many complete clusters (out of those specified by the 'map')
3522 * are under delalloc and were reserved quota for.
3523 * This function is called when we are writing out the blocks that were
3524 * originally written with their allocation delayed, but then the space was
3525 * allocated using fallocate() before the delayed allocation could be resolved.
3526 * The cases to look for are:
3527 * ('=' indicated delayed allocated blocks
3528 * '-' indicates non-delayed allocated blocks)
3529 * (a) partial clusters towards beginning and/or end outside of allocated range
3530 * are not delalloc'ed.
3531 * Ex:
3532 * |----c---=|====c====|====c====|===-c----|
3533 * |++++++ allocated ++++++|
3534 * ==> 4 complete clusters in above example
3535 *
3536 * (b) partial cluster (outside of allocated range) towards either end is
3537 * marked for delayed allocation. In this case, we will exclude that
3538 * cluster.
3539 * Ex:
3540 * |----====c========|========c========|
3541 * |++++++ allocated ++++++|
3542 * ==> 1 complete clusters in above example
3543 *
3544 * Ex:
3545 * |================c================|
3546 * |++++++ allocated ++++++|
3547 * ==> 0 complete clusters in above example
3548 *
3549 * The ext4_da_update_reserve_space will be called only if we
3550 * determine here that there were some "entire" clusters that span
3551 * this 'allocated' range.
3552 * In the non-bigalloc case, this function will just end up returning num_blks
3553 * without ever calling ext4_find_delalloc_range.
3554 */
3555 static unsigned int
3556 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3557 unsigned int num_blks)
3558 {
3559 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3560 ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3561 ext4_lblk_t lblk_from, lblk_to, c_offset;
3562 unsigned int allocated_clusters = 0;
3563
3564 alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3565 alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3566
3567 /* max possible clusters for this allocation */
3568 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3569
3570 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3571
3572 /* Check towards left side */
3573 c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3574 if (c_offset) {
3575 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3576 lblk_to = lblk_from + c_offset - 1;
3577
3578 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3579 allocated_clusters--;
3580 }
3581
3582 /* Now check towards right. */
3583 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3584 if (allocated_clusters && c_offset) {
3585 lblk_from = lblk_start + num_blks;
3586 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3587
3588 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3589 allocated_clusters--;
3590 }
3591
3592 return allocated_clusters;
3593 }
3594
3595 static int
3596 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3597 struct ext4_map_blocks *map,
3598 struct ext4_ext_path *path, int flags,
3599 unsigned int allocated, ext4_fsblk_t newblock)
3600 {
3601 int ret = 0;
3602 int err = 0;
3603 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3604
3605 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3606 "block %llu, max_blocks %u, flags %x, allocated %u\n",
3607 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3608 flags, allocated);
3609 ext4_ext_show_leaf(inode, path);
3610
3611 trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3612 newblock);
3613
3614 /* get_block() before submit the IO, split the extent */
3615 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3616 ret = ext4_split_unwritten_extents(handle, inode, map,
3617 path, flags);
3618 /*
3619 * Flag the inode(non aio case) or end_io struct (aio case)
3620 * that this IO needs to conversion to written when IO is
3621 * completed
3622 */
3623 if (io)
3624 ext4_set_io_unwritten_flag(inode, io);
3625 else
3626 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3627 if (ext4_should_dioread_nolock(inode))
3628 map->m_flags |= EXT4_MAP_UNINIT;
3629 goto out;
3630 }
3631 /* IO end_io complete, convert the filled extent to written */
3632 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3633 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3634 path);
3635 if (ret >= 0) {
3636 ext4_update_inode_fsync_trans(handle, inode, 1);
3637 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3638 path, map->m_len);
3639 } else
3640 err = ret;
3641 goto out2;
3642 }
3643 /* buffered IO case */
3644 /*
3645 * repeat fallocate creation request
3646 * we already have an unwritten extent
3647 */
3648 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3649 goto map_out;
3650
3651 /* buffered READ or buffered write_begin() lookup */
3652 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3653 /*
3654 * We have blocks reserved already. We
3655 * return allocated blocks so that delalloc
3656 * won't do block reservation for us. But
3657 * the buffer head will be unmapped so that
3658 * a read from the block returns 0s.
3659 */
3660 map->m_flags |= EXT4_MAP_UNWRITTEN;
3661 goto out1;
3662 }
3663
3664 /* buffered write, writepage time, convert*/
3665 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3666 if (ret >= 0)
3667 ext4_update_inode_fsync_trans(handle, inode, 1);
3668 out:
3669 if (ret <= 0) {
3670 err = ret;
3671 goto out2;
3672 } else
3673 allocated = ret;
3674 map->m_flags |= EXT4_MAP_NEW;
3675 /*
3676 * if we allocated more blocks than requested
3677 * we need to make sure we unmap the extra block
3678 * allocated. The actual needed block will get
3679 * unmapped later when we find the buffer_head marked
3680 * new.
3681 */
3682 if (allocated > map->m_len) {
3683 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3684 newblock + map->m_len,
3685 allocated - map->m_len);
3686 allocated = map->m_len;
3687 }
3688
3689 /*
3690 * If we have done fallocate with the offset that is already
3691 * delayed allocated, we would have block reservation
3692 * and quota reservation done in the delayed write path.
3693 * But fallocate would have already updated quota and block
3694 * count for this offset. So cancel these reservation
3695 */
3696 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3697 unsigned int reserved_clusters;
3698 reserved_clusters = get_reserved_cluster_alloc(inode,
3699 map->m_lblk, map->m_len);
3700 if (reserved_clusters)
3701 ext4_da_update_reserve_space(inode,
3702 reserved_clusters,
3703 0);
3704 }
3705
3706 map_out:
3707 map->m_flags |= EXT4_MAP_MAPPED;
3708 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3709 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3710 map->m_len);
3711 if (err < 0)
3712 goto out2;
3713 }
3714 out1:
3715 if (allocated > map->m_len)
3716 allocated = map->m_len;
3717 ext4_ext_show_leaf(inode, path);
3718 map->m_pblk = newblock;
3719 map->m_len = allocated;
3720 out2:
3721 if (path) {
3722 ext4_ext_drop_refs(path);
3723 kfree(path);
3724 }
3725 return err ? err : allocated;
3726 }
3727
3728 /*
3729 * get_implied_cluster_alloc - check to see if the requested
3730 * allocation (in the map structure) overlaps with a cluster already
3731 * allocated in an extent.
3732 * @sb The filesystem superblock structure
3733 * @map The requested lblk->pblk mapping
3734 * @ex The extent structure which might contain an implied
3735 * cluster allocation
3736 *
3737 * This function is called by ext4_ext_map_blocks() after we failed to
3738 * find blocks that were already in the inode's extent tree. Hence,
3739 * we know that the beginning of the requested region cannot overlap
3740 * the extent from the inode's extent tree. There are three cases we
3741 * want to catch. The first is this case:
3742 *
3743 * |--- cluster # N--|
3744 * |--- extent ---| |---- requested region ---|
3745 * |==========|
3746 *
3747 * The second case that we need to test for is this one:
3748 *
3749 * |--------- cluster # N ----------------|
3750 * |--- requested region --| |------- extent ----|
3751 * |=======================|
3752 *
3753 * The third case is when the requested region lies between two extents
3754 * within the same cluster:
3755 * |------------- cluster # N-------------|
3756 * |----- ex -----| |---- ex_right ----|
3757 * |------ requested region ------|
3758 * |================|
3759 *
3760 * In each of the above cases, we need to set the map->m_pblk and
3761 * map->m_len so it corresponds to the return the extent labelled as
3762 * "|====|" from cluster #N, since it is already in use for data in
3763 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
3764 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3765 * as a new "allocated" block region. Otherwise, we will return 0 and
3766 * ext4_ext_map_blocks() will then allocate one or more new clusters
3767 * by calling ext4_mb_new_blocks().
3768 */
3769 static int get_implied_cluster_alloc(struct super_block *sb,
3770 struct ext4_map_blocks *map,
3771 struct ext4_extent *ex,
3772 struct ext4_ext_path *path)
3773 {
3774 struct ext4_sb_info *sbi = EXT4_SB(sb);
3775 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3776 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3777 ext4_lblk_t rr_cluster_start;
3778 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3779 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3780 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3781
3782 /* The extent passed in that we are trying to match */
3783 ex_cluster_start = EXT4_B2C(sbi, ee_block);
3784 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3785
3786 /* The requested region passed into ext4_map_blocks() */
3787 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3788
3789 if ((rr_cluster_start == ex_cluster_end) ||
3790 (rr_cluster_start == ex_cluster_start)) {
3791 if (rr_cluster_start == ex_cluster_end)
3792 ee_start += ee_len - 1;
3793 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3794 c_offset;
3795 map->m_len = min(map->m_len,
3796 (unsigned) sbi->s_cluster_ratio - c_offset);
3797 /*
3798 * Check for and handle this case:
3799 *
3800 * |--------- cluster # N-------------|
3801 * |------- extent ----|
3802 * |--- requested region ---|
3803 * |===========|
3804 */
3805
3806 if (map->m_lblk < ee_block)
3807 map->m_len = min(map->m_len, ee_block - map->m_lblk);
3808
3809 /*
3810 * Check for the case where there is already another allocated
3811 * block to the right of 'ex' but before the end of the cluster.
3812 *
3813 * |------------- cluster # N-------------|
3814 * |----- ex -----| |---- ex_right ----|
3815 * |------ requested region ------|
3816 * |================|
3817 */
3818 if (map->m_lblk > ee_block) {
3819 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3820 map->m_len = min(map->m_len, next - map->m_lblk);
3821 }
3822
3823 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3824 return 1;
3825 }
3826
3827 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3828 return 0;
3829 }
3830
3831
3832 /*
3833 * Block allocation/map/preallocation routine for extents based files
3834 *
3835 *
3836 * Need to be called with
3837 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3838 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3839 *
3840 * return > 0, number of of blocks already mapped/allocated
3841 * if create == 0 and these are pre-allocated blocks
3842 * buffer head is unmapped
3843 * otherwise blocks are mapped
3844 *
3845 * return = 0, if plain look up failed (blocks have not been allocated)
3846 * buffer head is unmapped
3847 *
3848 * return < 0, error case.
3849 */
3850 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3851 struct ext4_map_blocks *map, int flags)
3852 {
3853 struct ext4_ext_path *path = NULL;
3854 struct ext4_extent newex, *ex, *ex2;
3855 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3856 ext4_fsblk_t newblock = 0;
3857 int free_on_err = 0, err = 0, depth, ret;
3858 unsigned int allocated = 0, offset = 0;
3859 unsigned int allocated_clusters = 0;
3860 struct ext4_allocation_request ar;
3861 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3862 ext4_lblk_t cluster_offset;
3863
3864 ext_debug("blocks %u/%u requested for inode %lu\n",
3865 map->m_lblk, map->m_len, inode->i_ino);
3866 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3867
3868 /* check in cache */
3869 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3870 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3871 if ((sbi->s_cluster_ratio > 1) &&
3872 ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3873 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3874
3875 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3876 /*
3877 * block isn't allocated yet and
3878 * user doesn't want to allocate it
3879 */
3880 goto out2;
3881 }
3882 /* we should allocate requested block */
3883 } else {
3884 /* block is already allocated */
3885 if (sbi->s_cluster_ratio > 1)
3886 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3887 newblock = map->m_lblk
3888 - le32_to_cpu(newex.ee_block)
3889 + ext4_ext_pblock(&newex);
3890 /* number of remaining blocks in the extent */
3891 allocated = ext4_ext_get_actual_len(&newex) -
3892 (map->m_lblk - le32_to_cpu(newex.ee_block));
3893 goto out;
3894 }
3895 }
3896
3897 /* find extent for this block */
3898 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3899 if (IS_ERR(path)) {
3900 err = PTR_ERR(path);
3901 path = NULL;
3902 goto out2;
3903 }
3904
3905 depth = ext_depth(inode);
3906
3907 /*
3908 * consistent leaf must not be empty;
3909 * this situation is possible, though, _during_ tree modification;
3910 * this is why assert can't be put in ext4_ext_find_extent()
3911 */
3912 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3913 EXT4_ERROR_INODE(inode, "bad extent address "
3914 "lblock: %lu, depth: %d pblock %lld",
3915 (unsigned long) map->m_lblk, depth,
3916 path[depth].p_block);
3917 err = -EIO;
3918 goto out2;
3919 }
3920
3921 ex = path[depth].p_ext;
3922 if (ex) {
3923 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3924 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3925 unsigned short ee_len;
3926
3927 /*
3928 * Uninitialized extents are treated as holes, except that
3929 * we split out initialized portions during a write.
3930 */
3931 ee_len = ext4_ext_get_actual_len(ex);
3932
3933 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3934
3935 /* if found extent covers block, simply return it */
3936 if (in_range(map->m_lblk, ee_block, ee_len)) {
3937 newblock = map->m_lblk - ee_block + ee_start;
3938 /* number of remaining blocks in the extent */
3939 allocated = ee_len - (map->m_lblk - ee_block);
3940 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3941 ee_block, ee_len, newblock);
3942
3943 /*
3944 * Do not put uninitialized extent
3945 * in the cache
3946 */
3947 if (!ext4_ext_is_uninitialized(ex)) {
3948 ext4_ext_put_in_cache(inode, ee_block,
3949 ee_len, ee_start);
3950 goto out;
3951 }
3952 ret = ext4_ext_handle_uninitialized_extents(
3953 handle, inode, map, path, flags,
3954 allocated, newblock);
3955 return ret;
3956 }
3957 }
3958
3959 if ((sbi->s_cluster_ratio > 1) &&
3960 ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3961 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3962
3963 /*
3964 * requested block isn't allocated yet;
3965 * we couldn't try to create block if create flag is zero
3966 */
3967 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3968 /*
3969 * put just found gap into cache to speed up
3970 * subsequent requests
3971 */
3972 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3973 goto out2;
3974 }
3975
3976 /*
3977 * Okay, we need to do block allocation.
3978 */
3979 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3980 newex.ee_block = cpu_to_le32(map->m_lblk);
3981 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3982
3983 /*
3984 * If we are doing bigalloc, check to see if the extent returned
3985 * by ext4_ext_find_extent() implies a cluster we can use.
3986 */
3987 if (cluster_offset && ex &&
3988 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
3989 ar.len = allocated = map->m_len;
3990 newblock = map->m_pblk;
3991 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3992 goto got_allocated_blocks;
3993 }
3994
3995 /* find neighbour allocated blocks */
3996 ar.lleft = map->m_lblk;
3997 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3998 if (err)
3999 goto out2;
4000 ar.lright = map->m_lblk;
4001 ex2 = NULL;
4002 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4003 if (err)
4004 goto out2;
4005
4006 /* Check if the extent after searching to the right implies a
4007 * cluster we can use. */
4008 if ((sbi->s_cluster_ratio > 1) && ex2 &&
4009 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4010 ar.len = allocated = map->m_len;
4011 newblock = map->m_pblk;
4012 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4013 goto got_allocated_blocks;
4014 }
4015
4016 /*
4017 * See if request is beyond maximum number of blocks we can have in
4018 * a single extent. For an initialized extent this limit is
4019 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4020 * EXT_UNINIT_MAX_LEN.
4021 */
4022 if (map->m_len > EXT_INIT_MAX_LEN &&
4023 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4024 map->m_len = EXT_INIT_MAX_LEN;
4025 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4026 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4027 map->m_len = EXT_UNINIT_MAX_LEN;
4028
4029 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4030 newex.ee_len = cpu_to_le16(map->m_len);
4031 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4032 if (err)
4033 allocated = ext4_ext_get_actual_len(&newex);
4034 else
4035 allocated = map->m_len;
4036
4037 /* allocate new block */
4038 ar.inode = inode;
4039 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4040 ar.logical = map->m_lblk;
4041 /*
4042 * We calculate the offset from the beginning of the cluster
4043 * for the logical block number, since when we allocate a
4044 * physical cluster, the physical block should start at the
4045 * same offset from the beginning of the cluster. This is
4046 * needed so that future calls to get_implied_cluster_alloc()
4047 * work correctly.
4048 */
4049 offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4050 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4051 ar.goal -= offset;
4052 ar.logical -= offset;
4053 if (S_ISREG(inode->i_mode))
4054 ar.flags = EXT4_MB_HINT_DATA;
4055 else
4056 /* disable in-core preallocation for non-regular files */
4057 ar.flags = 0;
4058 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4059 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4060 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4061 if (!newblock)
4062 goto out2;
4063 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4064 ar.goal, newblock, allocated);
4065 free_on_err = 1;
4066 allocated_clusters = ar.len;
4067 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4068 if (ar.len > allocated)
4069 ar.len = allocated;
4070
4071 got_allocated_blocks:
4072 /* try to insert new extent into found leaf and return */
4073 ext4_ext_store_pblock(&newex, newblock + offset);
4074 newex.ee_len = cpu_to_le16(ar.len);
4075 /* Mark uninitialized */
4076 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4077 ext4_ext_mark_uninitialized(&newex);
4078 /*
4079 * io_end structure was created for every IO write to an
4080 * uninitialized extent. To avoid unnecessary conversion,
4081 * here we flag the IO that really needs the conversion.
4082 * For non asycn direct IO case, flag the inode state
4083 * that we need to perform conversion when IO is done.
4084 */
4085 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4086 if (io)
4087 ext4_set_io_unwritten_flag(inode, io);
4088 else
4089 ext4_set_inode_state(inode,
4090 EXT4_STATE_DIO_UNWRITTEN);
4091 }
4092 if (ext4_should_dioread_nolock(inode))
4093 map->m_flags |= EXT4_MAP_UNINIT;
4094 }
4095
4096 err = 0;
4097 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4098 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4099 path, ar.len);
4100 if (!err)
4101 err = ext4_ext_insert_extent(handle, inode, path,
4102 &newex, flags);
4103 if (err && free_on_err) {
4104 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4105 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4106 /* free data blocks we just allocated */
4107 /* not a good idea to call discard here directly,
4108 * but otherwise we'd need to call it every free() */
4109 ext4_discard_preallocations(inode);
4110 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4111 ext4_ext_get_actual_len(&newex), fb_flags);
4112 goto out2;
4113 }
4114
4115 /* previous routine could use block we allocated */
4116 newblock = ext4_ext_pblock(&newex);
4117 allocated = ext4_ext_get_actual_len(&newex);
4118 if (allocated > map->m_len)
4119 allocated = map->m_len;
4120 map->m_flags |= EXT4_MAP_NEW;
4121
4122 /*
4123 * Update reserved blocks/metadata blocks after successful
4124 * block allocation which had been deferred till now.
4125 */
4126 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4127 unsigned int reserved_clusters;
4128 /*
4129 * Check how many clusters we had reserved this allocated range
4130 */
4131 reserved_clusters = get_reserved_cluster_alloc(inode,
4132 map->m_lblk, allocated);
4133 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4134 if (reserved_clusters) {
4135 /*
4136 * We have clusters reserved for this range.
4137 * But since we are not doing actual allocation
4138 * and are simply using blocks from previously
4139 * allocated cluster, we should release the
4140 * reservation and not claim quota.
4141 */
4142 ext4_da_update_reserve_space(inode,
4143 reserved_clusters, 0);
4144 }
4145 } else {
4146 BUG_ON(allocated_clusters < reserved_clusters);
4147 /* We will claim quota for all newly allocated blocks.*/
4148 ext4_da_update_reserve_space(inode, allocated_clusters,
4149 1);
4150 if (reserved_clusters < allocated_clusters) {
4151 struct ext4_inode_info *ei = EXT4_I(inode);
4152 int reservation = allocated_clusters -
4153 reserved_clusters;
4154 /*
4155 * It seems we claimed few clusters outside of
4156 * the range of this allocation. We should give
4157 * it back to the reservation pool. This can
4158 * happen in the following case:
4159 *
4160 * * Suppose s_cluster_ratio is 4 (i.e., each
4161 * cluster has 4 blocks. Thus, the clusters
4162 * are [0-3],[4-7],[8-11]...
4163 * * First comes delayed allocation write for
4164 * logical blocks 10 & 11. Since there were no
4165 * previous delayed allocated blocks in the
4166 * range [8-11], we would reserve 1 cluster
4167 * for this write.
4168 * * Next comes write for logical blocks 3 to 8.
4169 * In this case, we will reserve 2 clusters
4170 * (for [0-3] and [4-7]; and not for [8-11] as
4171 * that range has a delayed allocated blocks.
4172 * Thus total reserved clusters now becomes 3.
4173 * * Now, during the delayed allocation writeout
4174 * time, we will first write blocks [3-8] and
4175 * allocate 3 clusters for writing these
4176 * blocks. Also, we would claim all these
4177 * three clusters above.
4178 * * Now when we come here to writeout the
4179 * blocks [10-11], we would expect to claim
4180 * the reservation of 1 cluster we had made
4181 * (and we would claim it since there are no
4182 * more delayed allocated blocks in the range
4183 * [8-11]. But our reserved cluster count had
4184 * already gone to 0.
4185 *
4186 * Thus, at the step 4 above when we determine
4187 * that there are still some unwritten delayed
4188 * allocated blocks outside of our current
4189 * block range, we should increment the
4190 * reserved clusters count so that when the
4191 * remaining blocks finally gets written, we
4192 * could claim them.
4193 */
4194 dquot_reserve_block(inode,
4195 EXT4_C2B(sbi, reservation));
4196 spin_lock(&ei->i_block_reservation_lock);
4197 ei->i_reserved_data_blocks += reservation;
4198 spin_unlock(&ei->i_block_reservation_lock);
4199 }
4200 }
4201 }
4202
4203 /*
4204 * Cache the extent and update transaction to commit on fdatasync only
4205 * when it is _not_ an uninitialized extent.
4206 */
4207 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4208 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4209 ext4_update_inode_fsync_trans(handle, inode, 1);
4210 } else
4211 ext4_update_inode_fsync_trans(handle, inode, 0);
4212 out:
4213 if (allocated > map->m_len)
4214 allocated = map->m_len;
4215 ext4_ext_show_leaf(inode, path);
4216 map->m_flags |= EXT4_MAP_MAPPED;
4217 map->m_pblk = newblock;
4218 map->m_len = allocated;
4219 out2:
4220 if (path) {
4221 ext4_ext_drop_refs(path);
4222 kfree(path);
4223 }
4224
4225 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4226 newblock, map->m_len, err ? err : allocated);
4227
4228 return err ? err : allocated;
4229 }
4230
4231 void ext4_ext_truncate(struct inode *inode)
4232 {
4233 struct address_space *mapping = inode->i_mapping;
4234 struct super_block *sb = inode->i_sb;
4235 ext4_lblk_t last_block;
4236 handle_t *handle;
4237 loff_t page_len;
4238 int err = 0;
4239
4240 /*
4241 * finish any pending end_io work so we won't run the risk of
4242 * converting any truncated blocks to initialized later
4243 */
4244 ext4_flush_completed_IO(inode);
4245
4246 /*
4247 * probably first extent we're gonna free will be last in block
4248 */
4249 err = ext4_writepage_trans_blocks(inode);
4250 handle = ext4_journal_start(inode, err);
4251 if (IS_ERR(handle))
4252 return;
4253
4254 if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4255 page_len = PAGE_CACHE_SIZE -
4256 (inode->i_size & (PAGE_CACHE_SIZE - 1));
4257
4258 err = ext4_discard_partial_page_buffers(handle,
4259 mapping, inode->i_size, page_len, 0);
4260
4261 if (err)
4262 goto out_stop;
4263 }
4264
4265 if (ext4_orphan_add(handle, inode))
4266 goto out_stop;
4267
4268 down_write(&EXT4_I(inode)->i_data_sem);
4269 ext4_ext_invalidate_cache(inode);
4270
4271 ext4_discard_preallocations(inode);
4272
4273 /*
4274 * TODO: optimization is possible here.
4275 * Probably we need not scan at all,
4276 * because page truncation is enough.
4277 */
4278
4279 /* we have to know where to truncate from in crash case */
4280 EXT4_I(inode)->i_disksize = inode->i_size;
4281 ext4_mark_inode_dirty(handle, inode);
4282
4283 last_block = (inode->i_size + sb->s_blocksize - 1)
4284 >> EXT4_BLOCK_SIZE_BITS(sb);
4285 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4286
4287 /* In a multi-transaction truncate, we only make the final
4288 * transaction synchronous.
4289 */
4290 if (IS_SYNC(inode))
4291 ext4_handle_sync(handle);
4292
4293 up_write(&EXT4_I(inode)->i_data_sem);
4294
4295 out_stop:
4296 /*
4297 * If this was a simple ftruncate() and the file will remain alive,
4298 * then we need to clear up the orphan record which we created above.
4299 * However, if this was a real unlink then we were called by
4300 * ext4_delete_inode(), and we allow that function to clean up the
4301 * orphan info for us.
4302 */
4303 if (inode->i_nlink)
4304 ext4_orphan_del(handle, inode);
4305
4306 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4307 ext4_mark_inode_dirty(handle, inode);
4308 ext4_journal_stop(handle);
4309 }
4310
4311 static void ext4_falloc_update_inode(struct inode *inode,
4312 int mode, loff_t new_size, int update_ctime)
4313 {
4314 struct timespec now;
4315
4316 if (update_ctime) {
4317 now = current_fs_time(inode->i_sb);
4318 if (!timespec_equal(&inode->i_ctime, &now))
4319 inode->i_ctime = now;
4320 }
4321 /*
4322 * Update only when preallocation was requested beyond
4323 * the file size.
4324 */
4325 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4326 if (new_size > i_size_read(inode))
4327 i_size_write(inode, new_size);
4328 if (new_size > EXT4_I(inode)->i_disksize)
4329 ext4_update_i_disksize(inode, new_size);
4330 } else {
4331 /*
4332 * Mark that we allocate beyond EOF so the subsequent truncate
4333 * can proceed even if the new size is the same as i_size.
4334 */
4335 if (new_size > i_size_read(inode))
4336 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4337 }
4338
4339 }
4340
4341 /*
4342 * preallocate space for a file. This implements ext4's fallocate file
4343 * operation, which gets called from sys_fallocate system call.
4344 * For block-mapped files, posix_fallocate should fall back to the method
4345 * of writing zeroes to the required new blocks (the same behavior which is
4346 * expected for file systems which do not support fallocate() system call).
4347 */
4348 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4349 {
4350 struct inode *inode = file->f_path.dentry->d_inode;
4351 handle_t *handle;
4352 loff_t new_size;
4353 unsigned int max_blocks;
4354 int ret = 0;
4355 int ret2 = 0;
4356 int retries = 0;
4357 int flags;
4358 struct ext4_map_blocks map;
4359 unsigned int credits, blkbits = inode->i_blkbits;
4360
4361 /*
4362 * currently supporting (pre)allocate mode for extent-based
4363 * files _only_
4364 */
4365 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4366 return -EOPNOTSUPP;
4367
4368 /* Return error if mode is not supported */
4369 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4370 return -EOPNOTSUPP;
4371
4372 if (mode & FALLOC_FL_PUNCH_HOLE)
4373 return ext4_punch_hole(file, offset, len);
4374
4375 trace_ext4_fallocate_enter(inode, offset, len, mode);
4376 map.m_lblk = offset >> blkbits;
4377 /*
4378 * We can't just convert len to max_blocks because
4379 * If blocksize = 4096 offset = 3072 and len = 2048
4380 */
4381 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4382 - map.m_lblk;
4383 /*
4384 * credits to insert 1 extent into extent tree
4385 */
4386 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4387 mutex_lock(&inode->i_mutex);
4388 ret = inode_newsize_ok(inode, (len + offset));
4389 if (ret) {
4390 mutex_unlock(&inode->i_mutex);
4391 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4392 return ret;
4393 }
4394 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4395 if (mode & FALLOC_FL_KEEP_SIZE)
4396 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4397 /*
4398 * Don't normalize the request if it can fit in one extent so
4399 * that it doesn't get unnecessarily split into multiple
4400 * extents.
4401 */
4402 if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4403 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4404 retry:
4405 while (ret >= 0 && ret < max_blocks) {
4406 map.m_lblk = map.m_lblk + ret;
4407 map.m_len = max_blocks = max_blocks - ret;
4408 handle = ext4_journal_start(inode, credits);
4409 if (IS_ERR(handle)) {
4410 ret = PTR_ERR(handle);
4411 break;
4412 }
4413 ret = ext4_map_blocks(handle, inode, &map, flags);
4414 if (ret <= 0) {
4415 #ifdef EXT4FS_DEBUG
4416 WARN_ON(ret <= 0);
4417 printk(KERN_ERR "%s: ext4_ext_map_blocks "
4418 "returned error inode#%lu, block=%u, "
4419 "max_blocks=%u", __func__,
4420 inode->i_ino, map.m_lblk, max_blocks);
4421 #endif
4422 ext4_mark_inode_dirty(handle, inode);
4423 ret2 = ext4_journal_stop(handle);
4424 break;
4425 }
4426 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4427 blkbits) >> blkbits))
4428 new_size = offset + len;
4429 else
4430 new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4431
4432 ext4_falloc_update_inode(inode, mode, new_size,
4433 (map.m_flags & EXT4_MAP_NEW));
4434 ext4_mark_inode_dirty(handle, inode);
4435 if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4436 ext4_handle_sync(handle);
4437 ret2 = ext4_journal_stop(handle);
4438 if (ret2)
4439 break;
4440 }
4441 if (ret == -ENOSPC &&
4442 ext4_should_retry_alloc(inode->i_sb, &retries)) {
4443 ret = 0;
4444 goto retry;
4445 }
4446 mutex_unlock(&inode->i_mutex);
4447 trace_ext4_fallocate_exit(inode, offset, max_blocks,
4448 ret > 0 ? ret2 : ret);
4449 return ret > 0 ? ret2 : ret;
4450 }
4451
4452 /*
4453 * This function convert a range of blocks to written extents
4454 * The caller of this function will pass the start offset and the size.
4455 * all unwritten extents within this range will be converted to
4456 * written extents.
4457 *
4458 * This function is called from the direct IO end io call back
4459 * function, to convert the fallocated extents after IO is completed.
4460 * Returns 0 on success.
4461 */
4462 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4463 ssize_t len)
4464 {
4465 handle_t *handle;
4466 unsigned int max_blocks;
4467 int ret = 0;
4468 int ret2 = 0;
4469 struct ext4_map_blocks map;
4470 unsigned int credits, blkbits = inode->i_blkbits;
4471
4472 map.m_lblk = offset >> blkbits;
4473 /*
4474 * We can't just convert len to max_blocks because
4475 * If blocksize = 4096 offset = 3072 and len = 2048
4476 */
4477 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4478 map.m_lblk);
4479 /*
4480 * credits to insert 1 extent into extent tree
4481 */
4482 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4483 while (ret >= 0 && ret < max_blocks) {
4484 map.m_lblk += ret;
4485 map.m_len = (max_blocks -= ret);
4486 handle = ext4_journal_start(inode, credits);
4487 if (IS_ERR(handle)) {
4488 ret = PTR_ERR(handle);
4489 break;
4490 }
4491 ret = ext4_map_blocks(handle, inode, &map,
4492 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4493 if (ret <= 0) {
4494 WARN_ON(ret <= 0);
4495 ext4_msg(inode->i_sb, KERN_ERR,
4496 "%s:%d: inode #%lu: block %u: len %u: "
4497 "ext4_ext_map_blocks returned %d",
4498 __func__, __LINE__, inode->i_ino, map.m_lblk,
4499 map.m_len, ret);
4500 }
4501 ext4_mark_inode_dirty(handle, inode);
4502 ret2 = ext4_journal_stop(handle);
4503 if (ret <= 0 || ret2 )
4504 break;
4505 }
4506 return ret > 0 ? ret2 : ret;
4507 }
4508
4509 /*
4510 * Callback function called for each extent to gather FIEMAP information.
4511 */
4512 static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4513 struct ext4_ext_cache *newex, struct ext4_extent *ex,
4514 void *data)
4515 {
4516 __u64 logical;
4517 __u64 physical;
4518 __u64 length;
4519 __u32 flags = 0;
4520 int ret = 0;
4521 struct fiemap_extent_info *fieinfo = data;
4522 unsigned char blksize_bits;
4523
4524 blksize_bits = inode->i_sb->s_blocksize_bits;
4525 logical = (__u64)newex->ec_block << blksize_bits;
4526
4527 if (newex->ec_start == 0) {
4528 /*
4529 * No extent in extent-tree contains block @newex->ec_start,
4530 * then the block may stay in 1)a hole or 2)delayed-extent.
4531 *
4532 * Holes or delayed-extents are processed as follows.
4533 * 1. lookup dirty pages with specified range in pagecache.
4534 * If no page is got, then there is no delayed-extent and
4535 * return with EXT_CONTINUE.
4536 * 2. find the 1st mapped buffer,
4537 * 3. check if the mapped buffer is both in the request range
4538 * and a delayed buffer. If not, there is no delayed-extent,
4539 * then return.
4540 * 4. a delayed-extent is found, the extent will be collected.
4541 */
4542 ext4_lblk_t end = 0;
4543 pgoff_t last_offset;
4544 pgoff_t offset;
4545 pgoff_t index;
4546 pgoff_t start_index = 0;
4547 struct page **pages = NULL;
4548 struct buffer_head *bh = NULL;
4549 struct buffer_head *head = NULL;
4550 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4551
4552 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4553 if (pages == NULL)
4554 return -ENOMEM;
4555
4556 offset = logical >> PAGE_SHIFT;
4557 repeat:
4558 last_offset = offset;
4559 head = NULL;
4560 ret = find_get_pages_tag(inode->i_mapping, &offset,
4561 PAGECACHE_TAG_DIRTY, nr_pages, pages);
4562
4563 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4564 /* First time, try to find a mapped buffer. */
4565 if (ret == 0) {
4566 out:
4567 for (index = 0; index < ret; index++)
4568 page_cache_release(pages[index]);
4569 /* just a hole. */
4570 kfree(pages);
4571 return EXT_CONTINUE;
4572 }
4573 index = 0;
4574
4575 next_page:
4576 /* Try to find the 1st mapped buffer. */
4577 end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4578 blksize_bits;
4579 if (!page_has_buffers(pages[index]))
4580 goto out;
4581 head = page_buffers(pages[index]);
4582 if (!head)
4583 goto out;
4584
4585 index++;
4586 bh = head;
4587 do {
4588 if (end >= newex->ec_block +
4589 newex->ec_len)
4590 /* The buffer is out of
4591 * the request range.
4592 */
4593 goto out;
4594
4595 if (buffer_mapped(bh) &&
4596 end >= newex->ec_block) {
4597 start_index = index - 1;
4598 /* get the 1st mapped buffer. */
4599 goto found_mapped_buffer;
4600 }
4601
4602 bh = bh->b_this_page;
4603 end++;
4604 } while (bh != head);
4605
4606 /* No mapped buffer in the range found in this page,
4607 * We need to look up next page.
4608 */
4609 if (index >= ret) {
4610 /* There is no page left, but we need to limit
4611 * newex->ec_len.
4612 */
4613 newex->ec_len = end - newex->ec_block;
4614 goto out;
4615 }
4616 goto next_page;
4617 } else {
4618 /*Find contiguous delayed buffers. */
4619 if (ret > 0 && pages[0]->index == last_offset)
4620 head = page_buffers(pages[0]);
4621 bh = head;
4622 index = 1;
4623 start_index = 0;
4624 }
4625
4626 found_mapped_buffer:
4627 if (bh != NULL && buffer_delay(bh)) {
4628 /* 1st or contiguous delayed buffer found. */
4629 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4630 /*
4631 * 1st delayed buffer found, record
4632 * the start of extent.
4633 */
4634 flags |= FIEMAP_EXTENT_DELALLOC;
4635 newex->ec_block = end;
4636 logical = (__u64)end << blksize_bits;
4637 }
4638 /* Find contiguous delayed buffers. */
4639 do {
4640 if (!buffer_delay(bh))
4641 goto found_delayed_extent;
4642 bh = bh->b_this_page;
4643 end++;
4644 } while (bh != head);
4645
4646 for (; index < ret; index++) {
4647 if (!page_has_buffers(pages[index])) {
4648 bh = NULL;
4649 break;
4650 }
4651 head = page_buffers(pages[index]);
4652 if (!head) {
4653 bh = NULL;
4654 break;
4655 }
4656
4657 if (pages[index]->index !=
4658 pages[start_index]->index + index
4659 - start_index) {
4660 /* Blocks are not contiguous. */
4661 bh = NULL;
4662 break;
4663 }
4664 bh = head;
4665 do {
4666 if (!buffer_delay(bh))
4667 /* Delayed-extent ends. */
4668 goto found_delayed_extent;
4669 bh = bh->b_this_page;
4670 end++;
4671 } while (bh != head);
4672 }
4673 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4674 /* a hole found. */
4675 goto out;
4676
4677 found_delayed_extent:
4678 newex->ec_len = min(end - newex->ec_block,
4679 (ext4_lblk_t)EXT_INIT_MAX_LEN);
4680 if (ret == nr_pages && bh != NULL &&
4681 newex->ec_len < EXT_INIT_MAX_LEN &&
4682 buffer_delay(bh)) {
4683 /* Have not collected an extent and continue. */
4684 for (index = 0; index < ret; index++)
4685 page_cache_release(pages[index]);
4686 goto repeat;
4687 }
4688
4689 for (index = 0; index < ret; index++)
4690 page_cache_release(pages[index]);
4691 kfree(pages);
4692 }
4693
4694 physical = (__u64)newex->ec_start << blksize_bits;
4695 length = (__u64)newex->ec_len << blksize_bits;
4696
4697 if (ex && ext4_ext_is_uninitialized(ex))
4698 flags |= FIEMAP_EXTENT_UNWRITTEN;
4699
4700 if (next == EXT_MAX_BLOCKS)
4701 flags |= FIEMAP_EXTENT_LAST;
4702
4703 ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4704 length, flags);
4705 if (ret < 0)
4706 return ret;
4707 if (ret == 1)
4708 return EXT_BREAK;
4709 return EXT_CONTINUE;
4710 }
4711 /* fiemap flags we can handle specified here */
4712 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4713
4714 static int ext4_xattr_fiemap(struct inode *inode,
4715 struct fiemap_extent_info *fieinfo)
4716 {
4717 __u64 physical = 0;
4718 __u64 length;
4719 __u32 flags = FIEMAP_EXTENT_LAST;
4720 int blockbits = inode->i_sb->s_blocksize_bits;
4721 int error = 0;
4722
4723 /* in-inode? */
4724 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4725 struct ext4_iloc iloc;
4726 int offset; /* offset of xattr in inode */
4727
4728 error = ext4_get_inode_loc(inode, &iloc);
4729 if (error)
4730 return error;
4731 physical = iloc.bh->b_blocknr << blockbits;
4732 offset = EXT4_GOOD_OLD_INODE_SIZE +
4733 EXT4_I(inode)->i_extra_isize;
4734 physical += offset;
4735 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4736 flags |= FIEMAP_EXTENT_DATA_INLINE;
4737 brelse(iloc.bh);
4738 } else { /* external block */
4739 physical = EXT4_I(inode)->i_file_acl << blockbits;
4740 length = inode->i_sb->s_blocksize;
4741 }
4742
4743 if (physical)
4744 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4745 length, flags);
4746 return (error < 0 ? error : 0);
4747 }
4748
4749 /*
4750 * ext4_ext_punch_hole
4751 *
4752 * Punches a hole of "length" bytes in a file starting
4753 * at byte "offset"
4754 *
4755 * @inode: The inode of the file to punch a hole in
4756 * @offset: The starting byte offset of the hole
4757 * @length: The length of the hole
4758 *
4759 * Returns the number of blocks removed or negative on err
4760 */
4761 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4762 {
4763 struct inode *inode = file->f_path.dentry->d_inode;
4764 struct super_block *sb = inode->i_sb;
4765 ext4_lblk_t first_block, stop_block;
4766 struct address_space *mapping = inode->i_mapping;
4767 handle_t *handle;
4768 loff_t first_page, last_page, page_len;
4769 loff_t first_page_offset, last_page_offset;
4770 int credits, err = 0;
4771
4772 /* No need to punch hole beyond i_size */
4773 if (offset >= inode->i_size)
4774 return 0;
4775
4776 /*
4777 * If the hole extends beyond i_size, set the hole
4778 * to end after the page that contains i_size
4779 */
4780 if (offset + length > inode->i_size) {
4781 length = inode->i_size +
4782 PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4783 offset;
4784 }
4785
4786 first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4787 last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4788
4789 first_page_offset = first_page << PAGE_CACHE_SHIFT;
4790 last_page_offset = last_page << PAGE_CACHE_SHIFT;
4791
4792 /*
4793 * Write out all dirty pages to avoid race conditions
4794 * Then release them.
4795 */
4796 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4797 err = filemap_write_and_wait_range(mapping,
4798 offset, offset + length - 1);
4799
4800 if (err)
4801 return err;
4802 }
4803
4804 /* Now release the pages */
4805 if (last_page_offset > first_page_offset) {
4806 truncate_pagecache_range(inode, first_page_offset,
4807 last_page_offset - 1);
4808 }
4809
4810 /* finish any pending end_io work */
4811 ext4_flush_completed_IO(inode);
4812
4813 credits = ext4_writepage_trans_blocks(inode);
4814 handle = ext4_journal_start(inode, credits);
4815 if (IS_ERR(handle))
4816 return PTR_ERR(handle);
4817
4818 err = ext4_orphan_add(handle, inode);
4819 if (err)
4820 goto out;
4821
4822 /*
4823 * Now we need to zero out the non-page-aligned data in the
4824 * pages at the start and tail of the hole, and unmap the buffer
4825 * heads for the block aligned regions of the page that were
4826 * completely zeroed.
4827 */
4828 if (first_page > last_page) {
4829 /*
4830 * If the file space being truncated is contained within a page
4831 * just zero out and unmap the middle of that page
4832 */
4833 err = ext4_discard_partial_page_buffers(handle,
4834 mapping, offset, length, 0);
4835
4836 if (err)
4837 goto out;
4838 } else {
4839 /*
4840 * zero out and unmap the partial page that contains
4841 * the start of the hole
4842 */
4843 page_len = first_page_offset - offset;
4844 if (page_len > 0) {
4845 err = ext4_discard_partial_page_buffers(handle, mapping,
4846 offset, page_len, 0);
4847 if (err)
4848 goto out;
4849 }
4850
4851 /*
4852 * zero out and unmap the partial page that contains
4853 * the end of the hole
4854 */
4855 page_len = offset + length - last_page_offset;
4856 if (page_len > 0) {
4857 err = ext4_discard_partial_page_buffers(handle, mapping,
4858 last_page_offset, page_len, 0);
4859 if (err)
4860 goto out;
4861 }
4862 }
4863
4864 /*
4865 * If i_size is contained in the last page, we need to
4866 * unmap and zero the partial page after i_size
4867 */
4868 if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4869 inode->i_size % PAGE_CACHE_SIZE != 0) {
4870
4871 page_len = PAGE_CACHE_SIZE -
4872 (inode->i_size & (PAGE_CACHE_SIZE - 1));
4873
4874 if (page_len > 0) {
4875 err = ext4_discard_partial_page_buffers(handle,
4876 mapping, inode->i_size, page_len, 0);
4877
4878 if (err)
4879 goto out;
4880 }
4881 }
4882
4883 first_block = (offset + sb->s_blocksize - 1) >>
4884 EXT4_BLOCK_SIZE_BITS(sb);
4885 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4886
4887 /* If there are no blocks to remove, return now */
4888 if (first_block >= stop_block)
4889 goto out;
4890
4891 down_write(&EXT4_I(inode)->i_data_sem);
4892 ext4_ext_invalidate_cache(inode);
4893 ext4_discard_preallocations(inode);
4894
4895 err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4896
4897 ext4_ext_invalidate_cache(inode);
4898 ext4_discard_preallocations(inode);
4899
4900 if (IS_SYNC(inode))
4901 ext4_handle_sync(handle);
4902
4903 up_write(&EXT4_I(inode)->i_data_sem);
4904
4905 out:
4906 ext4_orphan_del(handle, inode);
4907 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4908 ext4_mark_inode_dirty(handle, inode);
4909 ext4_journal_stop(handle);
4910 return err;
4911 }
4912 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4913 __u64 start, __u64 len)
4914 {
4915 ext4_lblk_t start_blk;
4916 int error = 0;
4917
4918 /* fallback to generic here if not in extents fmt */
4919 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4920 return generic_block_fiemap(inode, fieinfo, start, len,
4921 ext4_get_block);
4922
4923 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4924 return -EBADR;
4925
4926 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4927 error = ext4_xattr_fiemap(inode, fieinfo);
4928 } else {
4929 ext4_lblk_t len_blks;
4930 __u64 last_blk;
4931
4932 start_blk = start >> inode->i_sb->s_blocksize_bits;
4933 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4934 if (last_blk >= EXT_MAX_BLOCKS)
4935 last_blk = EXT_MAX_BLOCKS-1;
4936 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4937
4938 /*
4939 * Walk the extent tree gathering extent information.
4940 * ext4_ext_fiemap_cb will push extents back to user.
4941 */
4942 error = ext4_ext_walk_space(inode, start_blk, len_blks,
4943 ext4_ext_fiemap_cb, fieinfo);
4944 }
4945
4946 return error;
4947 }