2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/compat.h>
31 #include <linux/slab.h>
32 #include <linux/btrfs.h>
33 #include <linux/uio.h>
36 #include "transaction.h"
37 #include "btrfs_inode.h"
38 #include "print-tree.h"
43 #include "compression.h"
45 static struct kmem_cache
*btrfs_inode_defrag_cachep
;
47 * when auto defrag is enabled we
48 * queue up these defrag structs to remember which
49 * inodes need defragging passes
52 struct rb_node rb_node
;
56 * transid where the defrag was added, we search for
57 * extents newer than this
64 /* last offset we were able to defrag */
67 /* if we've wrapped around back to zero once already */
71 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
72 struct inode_defrag
*defrag2
)
74 if (defrag1
->root
> defrag2
->root
)
76 else if (defrag1
->root
< defrag2
->root
)
78 else if (defrag1
->ino
> defrag2
->ino
)
80 else if (defrag1
->ino
< defrag2
->ino
)
86 /* pop a record for an inode into the defrag tree. The lock
87 * must be held already
89 * If you're inserting a record for an older transid than an
90 * existing record, the transid already in the tree is lowered
92 * If an existing record is found the defrag item you
95 static int __btrfs_add_inode_defrag(struct btrfs_inode
*inode
,
96 struct inode_defrag
*defrag
)
98 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
99 struct inode_defrag
*entry
;
101 struct rb_node
*parent
= NULL
;
104 p
= &fs_info
->defrag_inodes
.rb_node
;
107 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
109 ret
= __compare_inode_defrag(defrag
, entry
);
111 p
= &parent
->rb_left
;
113 p
= &parent
->rb_right
;
115 /* if we're reinserting an entry for
116 * an old defrag run, make sure to
117 * lower the transid of our existing record
119 if (defrag
->transid
< entry
->transid
)
120 entry
->transid
= defrag
->transid
;
121 if (defrag
->last_offset
> entry
->last_offset
)
122 entry
->last_offset
= defrag
->last_offset
;
126 set_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
);
127 rb_link_node(&defrag
->rb_node
, parent
, p
);
128 rb_insert_color(&defrag
->rb_node
, &fs_info
->defrag_inodes
);
132 static inline int __need_auto_defrag(struct btrfs_fs_info
*fs_info
)
134 if (!btrfs_test_opt(fs_info
, AUTO_DEFRAG
))
137 if (btrfs_fs_closing(fs_info
))
144 * insert a defrag record for this inode if auto defrag is
147 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
148 struct btrfs_inode
*inode
)
150 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
151 struct btrfs_root
*root
= inode
->root
;
152 struct inode_defrag
*defrag
;
156 if (!__need_auto_defrag(fs_info
))
159 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
))
163 transid
= trans
->transid
;
165 transid
= inode
->root
->last_trans
;
167 defrag
= kmem_cache_zalloc(btrfs_inode_defrag_cachep
, GFP_NOFS
);
171 defrag
->ino
= btrfs_ino(inode
);
172 defrag
->transid
= transid
;
173 defrag
->root
= root
->root_key
.objectid
;
175 spin_lock(&fs_info
->defrag_inodes_lock
);
176 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
)) {
178 * If we set IN_DEFRAG flag and evict the inode from memory,
179 * and then re-read this inode, this new inode doesn't have
180 * IN_DEFRAG flag. At the case, we may find the existed defrag.
182 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
184 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
186 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
188 spin_unlock(&fs_info
->defrag_inodes_lock
);
193 * Requeue the defrag object. If there is a defrag object that points to
194 * the same inode in the tree, we will merge them together (by
195 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
197 static void btrfs_requeue_inode_defrag(struct btrfs_inode
*inode
,
198 struct inode_defrag
*defrag
)
200 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
203 if (!__need_auto_defrag(fs_info
))
207 * Here we don't check the IN_DEFRAG flag, because we need merge
210 spin_lock(&fs_info
->defrag_inodes_lock
);
211 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
212 spin_unlock(&fs_info
->defrag_inodes_lock
);
217 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
221 * pick the defragable inode that we want, if it doesn't exist, we will get
224 static struct inode_defrag
*
225 btrfs_pick_defrag_inode(struct btrfs_fs_info
*fs_info
, u64 root
, u64 ino
)
227 struct inode_defrag
*entry
= NULL
;
228 struct inode_defrag tmp
;
230 struct rb_node
*parent
= NULL
;
236 spin_lock(&fs_info
->defrag_inodes_lock
);
237 p
= fs_info
->defrag_inodes
.rb_node
;
240 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
242 ret
= __compare_inode_defrag(&tmp
, entry
);
246 p
= parent
->rb_right
;
251 if (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
252 parent
= rb_next(parent
);
254 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
260 rb_erase(parent
, &fs_info
->defrag_inodes
);
261 spin_unlock(&fs_info
->defrag_inodes_lock
);
265 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info
*fs_info
)
267 struct inode_defrag
*defrag
;
268 struct rb_node
*node
;
270 spin_lock(&fs_info
->defrag_inodes_lock
);
271 node
= rb_first(&fs_info
->defrag_inodes
);
273 rb_erase(node
, &fs_info
->defrag_inodes
);
274 defrag
= rb_entry(node
, struct inode_defrag
, rb_node
);
275 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
277 cond_resched_lock(&fs_info
->defrag_inodes_lock
);
279 node
= rb_first(&fs_info
->defrag_inodes
);
281 spin_unlock(&fs_info
->defrag_inodes_lock
);
284 #define BTRFS_DEFRAG_BATCH 1024
286 static int __btrfs_run_defrag_inode(struct btrfs_fs_info
*fs_info
,
287 struct inode_defrag
*defrag
)
289 struct btrfs_root
*inode_root
;
291 struct btrfs_key key
;
292 struct btrfs_ioctl_defrag_range_args range
;
298 key
.objectid
= defrag
->root
;
299 key
.type
= BTRFS_ROOT_ITEM_KEY
;
300 key
.offset
= (u64
)-1;
302 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
304 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
305 if (IS_ERR(inode_root
)) {
306 ret
= PTR_ERR(inode_root
);
310 key
.objectid
= defrag
->ino
;
311 key
.type
= BTRFS_INODE_ITEM_KEY
;
313 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
315 ret
= PTR_ERR(inode
);
318 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
320 /* do a chunk of defrag */
321 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
322 memset(&range
, 0, sizeof(range
));
324 range
.start
= defrag
->last_offset
;
326 sb_start_write(fs_info
->sb
);
327 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
329 sb_end_write(fs_info
->sb
);
331 * if we filled the whole defrag batch, there
332 * must be more work to do. Queue this defrag
335 if (num_defrag
== BTRFS_DEFRAG_BATCH
) {
336 defrag
->last_offset
= range
.start
;
337 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
338 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
340 * we didn't fill our defrag batch, but
341 * we didn't start at zero. Make sure we loop
342 * around to the start of the file.
344 defrag
->last_offset
= 0;
346 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
348 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
354 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
355 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
360 * run through the list of inodes in the FS that need
363 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
365 struct inode_defrag
*defrag
;
367 u64 root_objectid
= 0;
369 atomic_inc(&fs_info
->defrag_running
);
371 /* Pause the auto defragger. */
372 if (test_bit(BTRFS_FS_STATE_REMOUNTING
,
376 if (!__need_auto_defrag(fs_info
))
379 /* find an inode to defrag */
380 defrag
= btrfs_pick_defrag_inode(fs_info
, root_objectid
,
383 if (root_objectid
|| first_ino
) {
392 first_ino
= defrag
->ino
+ 1;
393 root_objectid
= defrag
->root
;
395 __btrfs_run_defrag_inode(fs_info
, defrag
);
397 atomic_dec(&fs_info
->defrag_running
);
400 * during unmount, we use the transaction_wait queue to
401 * wait for the defragger to stop
403 wake_up(&fs_info
->transaction_wait
);
407 /* simple helper to fault in pages and copy. This should go away
408 * and be replaced with calls into generic code.
410 static noinline
int btrfs_copy_from_user(loff_t pos
, size_t write_bytes
,
411 struct page
**prepared_pages
,
415 size_t total_copied
= 0;
417 int offset
= pos
& (PAGE_SIZE
- 1);
419 while (write_bytes
> 0) {
420 size_t count
= min_t(size_t,
421 PAGE_SIZE
- offset
, write_bytes
);
422 struct page
*page
= prepared_pages
[pg
];
424 * Copy data from userspace to the current page
426 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
428 /* Flush processor's dcache for this page */
429 flush_dcache_page(page
);
432 * if we get a partial write, we can end up with
433 * partially up to date pages. These add
434 * a lot of complexity, so make sure they don't
435 * happen by forcing this copy to be retried.
437 * The rest of the btrfs_file_write code will fall
438 * back to page at a time copies after we return 0.
440 if (!PageUptodate(page
) && copied
< count
)
443 iov_iter_advance(i
, copied
);
444 write_bytes
-= copied
;
445 total_copied
+= copied
;
447 /* Return to btrfs_file_write_iter to fault page */
448 if (unlikely(copied
== 0))
451 if (copied
< PAGE_SIZE
- offset
) {
462 * unlocks pages after btrfs_file_write is done with them
464 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
467 for (i
= 0; i
< num_pages
; i
++) {
468 /* page checked is some magic around finding pages that
469 * have been modified without going through btrfs_set_page_dirty
470 * clear it here. There should be no need to mark the pages
471 * accessed as prepare_pages should have marked them accessed
472 * in prepare_pages via find_or_create_page()
474 ClearPageChecked(pages
[i
]);
475 unlock_page(pages
[i
]);
480 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
483 struct extent_state
**cached_state
)
485 u64 search_start
= start
;
486 const u64 end
= start
+ len
- 1;
488 while (search_start
< end
) {
489 const u64 search_len
= end
- search_start
+ 1;
490 struct extent_map
*em
;
494 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
,
499 if (em
->block_start
!= EXTENT_MAP_HOLE
)
503 if (em
->start
< search_start
)
504 em_len
-= search_start
- em
->start
;
505 if (em_len
> search_len
)
508 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
509 search_start
+ em_len
- 1,
511 NULL
, cached_state
, GFP_NOFS
);
513 search_start
= extent_map_end(em
);
522 * after copy_from_user, pages need to be dirtied and we need to make
523 * sure holes are created between the current EOF and the start of
524 * any next extents (if required).
526 * this also makes the decision about creating an inline extent vs
527 * doing real data extents, marking pages dirty and delalloc as required.
529 int btrfs_dirty_pages(struct inode
*inode
, struct page
**pages
,
530 size_t num_pages
, loff_t pos
, size_t write_bytes
,
531 struct extent_state
**cached
)
533 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
538 u64 end_of_last_block
;
539 u64 end_pos
= pos
+ write_bytes
;
540 loff_t isize
= i_size_read(inode
);
541 unsigned int extra_bits
= 0;
543 start_pos
= pos
& ~((u64
) fs_info
->sectorsize
- 1);
544 num_bytes
= round_up(write_bytes
+ pos
- start_pos
,
545 fs_info
->sectorsize
);
547 end_of_last_block
= start_pos
+ num_bytes
- 1;
550 * The pages may have already been dirty, clear out old accounting so
551 * we can set things up properly
553 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
, end_of_last_block
,
554 EXTENT_DIRTY
| EXTENT_DELALLOC
|
555 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 0, 0, cached
,
558 if (!btrfs_is_free_space_inode(BTRFS_I(inode
))) {
559 if (start_pos
>= isize
&&
560 !(BTRFS_I(inode
)->flags
& BTRFS_INODE_PREALLOC
)) {
562 * There can't be any extents following eof in this case
563 * so just set the delalloc new bit for the range
566 extra_bits
|= EXTENT_DELALLOC_NEW
;
568 err
= btrfs_find_new_delalloc_bytes(BTRFS_I(inode
),
576 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
577 extra_bits
, cached
, 0);
581 for (i
= 0; i
< num_pages
; i
++) {
582 struct page
*p
= pages
[i
];
589 * we've only changed i_size in ram, and we haven't updated
590 * the disk i_size. There is no need to log the inode
594 i_size_write(inode
, end_pos
);
599 * this drops all the extents in the cache that intersect the range
600 * [start, end]. Existing extents are split as required.
602 void btrfs_drop_extent_cache(struct btrfs_inode
*inode
, u64 start
, u64 end
,
605 struct extent_map
*em
;
606 struct extent_map
*split
= NULL
;
607 struct extent_map
*split2
= NULL
;
608 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
609 u64 len
= end
- start
+ 1;
617 WARN_ON(end
< start
);
618 if (end
== (u64
)-1) {
627 split
= alloc_extent_map();
629 split2
= alloc_extent_map();
630 if (!split
|| !split2
)
633 write_lock(&em_tree
->lock
);
634 em
= lookup_extent_mapping(em_tree
, start
, len
);
636 write_unlock(&em_tree
->lock
);
640 gen
= em
->generation
;
641 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
642 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
644 write_unlock(&em_tree
->lock
);
647 start
= em
->start
+ em
->len
;
649 len
= start
+ len
- (em
->start
+ em
->len
);
651 write_unlock(&em_tree
->lock
);
654 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
655 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
656 clear_bit(EXTENT_FLAG_LOGGING
, &flags
);
657 modified
= !list_empty(&em
->list
);
661 if (em
->start
< start
) {
662 split
->start
= em
->start
;
663 split
->len
= start
- em
->start
;
665 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
666 split
->orig_start
= em
->orig_start
;
667 split
->block_start
= em
->block_start
;
670 split
->block_len
= em
->block_len
;
672 split
->block_len
= split
->len
;
673 split
->orig_block_len
= max(split
->block_len
,
675 split
->ram_bytes
= em
->ram_bytes
;
677 split
->orig_start
= split
->start
;
678 split
->block_len
= 0;
679 split
->block_start
= em
->block_start
;
680 split
->orig_block_len
= 0;
681 split
->ram_bytes
= split
->len
;
684 split
->generation
= gen
;
685 split
->bdev
= em
->bdev
;
686 split
->flags
= flags
;
687 split
->compress_type
= em
->compress_type
;
688 replace_extent_mapping(em_tree
, em
, split
, modified
);
689 free_extent_map(split
);
693 if (testend
&& em
->start
+ em
->len
> start
+ len
) {
694 u64 diff
= start
+ len
- em
->start
;
696 split
->start
= start
+ len
;
697 split
->len
= em
->start
+ em
->len
- (start
+ len
);
698 split
->bdev
= em
->bdev
;
699 split
->flags
= flags
;
700 split
->compress_type
= em
->compress_type
;
701 split
->generation
= gen
;
703 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
704 split
->orig_block_len
= max(em
->block_len
,
707 split
->ram_bytes
= em
->ram_bytes
;
709 split
->block_len
= em
->block_len
;
710 split
->block_start
= em
->block_start
;
711 split
->orig_start
= em
->orig_start
;
713 split
->block_len
= split
->len
;
714 split
->block_start
= em
->block_start
716 split
->orig_start
= em
->orig_start
;
719 split
->ram_bytes
= split
->len
;
720 split
->orig_start
= split
->start
;
721 split
->block_len
= 0;
722 split
->block_start
= em
->block_start
;
723 split
->orig_block_len
= 0;
726 if (extent_map_in_tree(em
)) {
727 replace_extent_mapping(em_tree
, em
, split
,
730 ret
= add_extent_mapping(em_tree
, split
,
732 ASSERT(ret
== 0); /* Logic error */
734 free_extent_map(split
);
738 if (extent_map_in_tree(em
))
739 remove_extent_mapping(em_tree
, em
);
740 write_unlock(&em_tree
->lock
);
744 /* once for the tree*/
748 free_extent_map(split
);
750 free_extent_map(split2
);
754 * this is very complex, but the basic idea is to drop all extents
755 * in the range start - end. hint_block is filled in with a block number
756 * that would be a good hint to the block allocator for this file.
758 * If an extent intersects the range but is not entirely inside the range
759 * it is either truncated or split. Anything entirely inside the range
760 * is deleted from the tree.
762 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
763 struct btrfs_root
*root
, struct inode
*inode
,
764 struct btrfs_path
*path
, u64 start
, u64 end
,
765 u64
*drop_end
, int drop_cache
,
767 u32 extent_item_size
,
770 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
771 struct extent_buffer
*leaf
;
772 struct btrfs_file_extent_item
*fi
;
773 struct btrfs_key key
;
774 struct btrfs_key new_key
;
775 u64 ino
= btrfs_ino(BTRFS_I(inode
));
776 u64 search_start
= start
;
779 u64 extent_offset
= 0;
781 u64 last_end
= start
;
787 int modify_tree
= -1;
790 int leafs_visited
= 0;
793 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, end
- 1, 0);
795 if (start
>= BTRFS_I(inode
)->disk_i_size
&& !replace_extent
)
798 update_refs
= (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) ||
799 root
== fs_info
->tree_root
);
802 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
803 search_start
, modify_tree
);
806 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
807 leaf
= path
->nodes
[0];
808 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
809 if (key
.objectid
== ino
&&
810 key
.type
== BTRFS_EXTENT_DATA_KEY
)
816 leaf
= path
->nodes
[0];
817 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
819 ret
= btrfs_next_leaf(root
, path
);
827 leaf
= path
->nodes
[0];
831 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
833 if (key
.objectid
> ino
)
835 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
836 key
.type
< BTRFS_EXTENT_DATA_KEY
) {
841 if (key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
844 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
845 struct btrfs_file_extent_item
);
846 extent_type
= btrfs_file_extent_type(leaf
, fi
);
848 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
849 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
850 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
851 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
852 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
853 extent_end
= key
.offset
+
854 btrfs_file_extent_num_bytes(leaf
, fi
);
855 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
856 extent_end
= key
.offset
+
857 btrfs_file_extent_ram_bytes(leaf
, fi
);
864 * Don't skip extent items representing 0 byte lengths. They
865 * used to be created (bug) if while punching holes we hit
866 * -ENOSPC condition. So if we find one here, just ensure we
867 * delete it, otherwise we would insert a new file extent item
868 * with the same key (offset) as that 0 bytes length file
869 * extent item in the call to setup_items_for_insert() later
872 if (extent_end
== key
.offset
&& extent_end
>= search_start
) {
873 last_end
= extent_end
;
874 goto delete_extent_item
;
877 if (extent_end
<= search_start
) {
883 search_start
= max(key
.offset
, start
);
884 if (recow
|| !modify_tree
) {
886 btrfs_release_path(path
);
891 * | - range to drop - |
892 * | -------- extent -------- |
894 if (start
> key
.offset
&& end
< extent_end
) {
896 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
901 memcpy(&new_key
, &key
, sizeof(new_key
));
902 new_key
.offset
= start
;
903 ret
= btrfs_duplicate_item(trans
, root
, path
,
905 if (ret
== -EAGAIN
) {
906 btrfs_release_path(path
);
912 leaf
= path
->nodes
[0];
913 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
914 struct btrfs_file_extent_item
);
915 btrfs_set_file_extent_num_bytes(leaf
, fi
,
918 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
919 struct btrfs_file_extent_item
);
921 extent_offset
+= start
- key
.offset
;
922 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
923 btrfs_set_file_extent_num_bytes(leaf
, fi
,
925 btrfs_mark_buffer_dirty(leaf
);
927 if (update_refs
&& disk_bytenr
> 0) {
928 ret
= btrfs_inc_extent_ref(trans
, root
,
929 disk_bytenr
, num_bytes
, 0,
930 root
->root_key
.objectid
,
932 start
- extent_offset
);
933 BUG_ON(ret
); /* -ENOMEM */
938 * From here on out we will have actually dropped something, so
939 * last_end can be updated.
941 last_end
= extent_end
;
944 * | ---- range to drop ----- |
945 * | -------- extent -------- |
947 if (start
<= key
.offset
&& end
< extent_end
) {
948 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
953 memcpy(&new_key
, &key
, sizeof(new_key
));
954 new_key
.offset
= end
;
955 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
957 extent_offset
+= end
- key
.offset
;
958 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
959 btrfs_set_file_extent_num_bytes(leaf
, fi
,
961 btrfs_mark_buffer_dirty(leaf
);
962 if (update_refs
&& disk_bytenr
> 0)
963 inode_sub_bytes(inode
, end
- key
.offset
);
967 search_start
= extent_end
;
969 * | ---- range to drop ----- |
970 * | -------- extent -------- |
972 if (start
> key
.offset
&& end
>= extent_end
) {
974 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
979 btrfs_set_file_extent_num_bytes(leaf
, fi
,
981 btrfs_mark_buffer_dirty(leaf
);
982 if (update_refs
&& disk_bytenr
> 0)
983 inode_sub_bytes(inode
, extent_end
- start
);
984 if (end
== extent_end
)
992 * | ---- range to drop ----- |
993 * | ------ extent ------ |
995 if (start
<= key
.offset
&& end
>= extent_end
) {
998 del_slot
= path
->slots
[0];
1001 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
1006 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1007 inode_sub_bytes(inode
,
1008 extent_end
- key
.offset
);
1009 extent_end
= ALIGN(extent_end
,
1010 fs_info
->sectorsize
);
1011 } else if (update_refs
&& disk_bytenr
> 0) {
1012 ret
= btrfs_free_extent(trans
, root
,
1013 disk_bytenr
, num_bytes
, 0,
1014 root
->root_key
.objectid
,
1015 key
.objectid
, key
.offset
-
1017 BUG_ON(ret
); /* -ENOMEM */
1018 inode_sub_bytes(inode
,
1019 extent_end
- key
.offset
);
1022 if (end
== extent_end
)
1025 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
1030 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
1033 btrfs_abort_transaction(trans
, ret
);
1040 btrfs_release_path(path
);
1047 if (!ret
&& del_nr
> 0) {
1049 * Set path->slots[0] to first slot, so that after the delete
1050 * if items are move off from our leaf to its immediate left or
1051 * right neighbor leafs, we end up with a correct and adjusted
1052 * path->slots[0] for our insertion (if replace_extent != 0).
1054 path
->slots
[0] = del_slot
;
1055 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1057 btrfs_abort_transaction(trans
, ret
);
1060 leaf
= path
->nodes
[0];
1062 * If btrfs_del_items() was called, it might have deleted a leaf, in
1063 * which case it unlocked our path, so check path->locks[0] matches a
1066 if (!ret
&& replace_extent
&& leafs_visited
== 1 &&
1067 (path
->locks
[0] == BTRFS_WRITE_LOCK_BLOCKING
||
1068 path
->locks
[0] == BTRFS_WRITE_LOCK
) &&
1069 btrfs_leaf_free_space(fs_info
, leaf
) >=
1070 sizeof(struct btrfs_item
) + extent_item_size
) {
1073 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1075 if (!del_nr
&& path
->slots
[0] < btrfs_header_nritems(leaf
)) {
1076 struct btrfs_key slot_key
;
1078 btrfs_item_key_to_cpu(leaf
, &slot_key
, path
->slots
[0]);
1079 if (btrfs_comp_cpu_keys(&key
, &slot_key
) > 0)
1082 setup_items_for_insert(root
, path
, &key
,
1085 sizeof(struct btrfs_item
) +
1086 extent_item_size
, 1);
1090 if (!replace_extent
|| !(*key_inserted
))
1091 btrfs_release_path(path
);
1093 *drop_end
= found
? min(end
, last_end
) : end
;
1097 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
1098 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
1099 u64 end
, int drop_cache
)
1101 struct btrfs_path
*path
;
1104 path
= btrfs_alloc_path();
1107 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, start
, end
, NULL
,
1108 drop_cache
, 0, 0, NULL
);
1109 btrfs_free_path(path
);
1113 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
1114 u64 objectid
, u64 bytenr
, u64 orig_offset
,
1115 u64
*start
, u64
*end
)
1117 struct btrfs_file_extent_item
*fi
;
1118 struct btrfs_key key
;
1121 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1124 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1125 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1128 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1129 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
1130 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
1131 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
1132 btrfs_file_extent_compression(leaf
, fi
) ||
1133 btrfs_file_extent_encryption(leaf
, fi
) ||
1134 btrfs_file_extent_other_encoding(leaf
, fi
))
1137 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1138 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
1141 *start
= key
.offset
;
1147 * Mark extent in the range start - end as written.
1149 * This changes extent type from 'pre-allocated' to 'regular'. If only
1150 * part of extent is marked as written, the extent will be split into
1153 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
1154 struct btrfs_inode
*inode
, u64 start
, u64 end
)
1156 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1157 struct btrfs_root
*root
= inode
->root
;
1158 struct extent_buffer
*leaf
;
1159 struct btrfs_path
*path
;
1160 struct btrfs_file_extent_item
*fi
;
1161 struct btrfs_key key
;
1162 struct btrfs_key new_key
;
1174 u64 ino
= btrfs_ino(inode
);
1176 path
= btrfs_alloc_path();
1183 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1186 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1189 if (ret
> 0 && path
->slots
[0] > 0)
1192 leaf
= path
->nodes
[0];
1193 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1194 if (key
.objectid
!= ino
||
1195 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
1197 btrfs_abort_transaction(trans
, ret
);
1200 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1201 struct btrfs_file_extent_item
);
1202 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_PREALLOC
) {
1204 btrfs_abort_transaction(trans
, ret
);
1207 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1208 if (key
.offset
> start
|| extent_end
< end
) {
1210 btrfs_abort_transaction(trans
, ret
);
1214 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1215 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1216 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
1217 memcpy(&new_key
, &key
, sizeof(new_key
));
1219 if (start
== key
.offset
&& end
< extent_end
) {
1222 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1223 ino
, bytenr
, orig_offset
,
1224 &other_start
, &other_end
)) {
1225 new_key
.offset
= end
;
1226 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1227 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1228 struct btrfs_file_extent_item
);
1229 btrfs_set_file_extent_generation(leaf
, fi
,
1231 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1233 btrfs_set_file_extent_offset(leaf
, fi
,
1235 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1236 struct btrfs_file_extent_item
);
1237 btrfs_set_file_extent_generation(leaf
, fi
,
1239 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1241 btrfs_mark_buffer_dirty(leaf
);
1246 if (start
> key
.offset
&& end
== extent_end
) {
1249 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1250 ino
, bytenr
, orig_offset
,
1251 &other_start
, &other_end
)) {
1252 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1253 struct btrfs_file_extent_item
);
1254 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1255 start
- key
.offset
);
1256 btrfs_set_file_extent_generation(leaf
, fi
,
1259 new_key
.offset
= start
;
1260 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1262 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1263 struct btrfs_file_extent_item
);
1264 btrfs_set_file_extent_generation(leaf
, fi
,
1266 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1268 btrfs_set_file_extent_offset(leaf
, fi
,
1269 start
- orig_offset
);
1270 btrfs_mark_buffer_dirty(leaf
);
1275 while (start
> key
.offset
|| end
< extent_end
) {
1276 if (key
.offset
== start
)
1279 new_key
.offset
= split
;
1280 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1281 if (ret
== -EAGAIN
) {
1282 btrfs_release_path(path
);
1286 btrfs_abort_transaction(trans
, ret
);
1290 leaf
= path
->nodes
[0];
1291 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1292 struct btrfs_file_extent_item
);
1293 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1294 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1295 split
- key
.offset
);
1297 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1298 struct btrfs_file_extent_item
);
1300 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1301 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1302 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1303 extent_end
- split
);
1304 btrfs_mark_buffer_dirty(leaf
);
1306 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
,
1307 0, root
->root_key
.objectid
,
1310 btrfs_abort_transaction(trans
, ret
);
1314 if (split
== start
) {
1317 if (start
!= key
.offset
) {
1319 btrfs_abort_transaction(trans
, ret
);
1330 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1331 ino
, bytenr
, orig_offset
,
1332 &other_start
, &other_end
)) {
1334 btrfs_release_path(path
);
1337 extent_end
= other_end
;
1338 del_slot
= path
->slots
[0] + 1;
1340 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1341 0, root
->root_key
.objectid
,
1344 btrfs_abort_transaction(trans
, ret
);
1350 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1351 ino
, bytenr
, orig_offset
,
1352 &other_start
, &other_end
)) {
1354 btrfs_release_path(path
);
1357 key
.offset
= other_start
;
1358 del_slot
= path
->slots
[0];
1360 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1361 0, root
->root_key
.objectid
,
1364 btrfs_abort_transaction(trans
, ret
);
1369 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1370 struct btrfs_file_extent_item
);
1371 btrfs_set_file_extent_type(leaf
, fi
,
1372 BTRFS_FILE_EXTENT_REG
);
1373 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1374 btrfs_mark_buffer_dirty(leaf
);
1376 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1377 struct btrfs_file_extent_item
);
1378 btrfs_set_file_extent_type(leaf
, fi
,
1379 BTRFS_FILE_EXTENT_REG
);
1380 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1381 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1382 extent_end
- key
.offset
);
1383 btrfs_mark_buffer_dirty(leaf
);
1385 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1387 btrfs_abort_transaction(trans
, ret
);
1392 btrfs_free_path(path
);
1397 * on error we return an unlocked page and the error value
1398 * on success we return a locked page and 0
1400 static int prepare_uptodate_page(struct inode
*inode
,
1401 struct page
*page
, u64 pos
,
1402 bool force_uptodate
)
1406 if (((pos
& (PAGE_SIZE
- 1)) || force_uptodate
) &&
1407 !PageUptodate(page
)) {
1408 ret
= btrfs_readpage(NULL
, page
);
1412 if (!PageUptodate(page
)) {
1416 if (page
->mapping
!= inode
->i_mapping
) {
1425 * this just gets pages into the page cache and locks them down.
1427 static noinline
int prepare_pages(struct inode
*inode
, struct page
**pages
,
1428 size_t num_pages
, loff_t pos
,
1429 size_t write_bytes
, bool force_uptodate
)
1432 unsigned long index
= pos
>> PAGE_SHIFT
;
1433 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1437 for (i
= 0; i
< num_pages
; i
++) {
1439 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1440 mask
| __GFP_WRITE
);
1448 err
= prepare_uptodate_page(inode
, pages
[i
], pos
,
1450 if (!err
&& i
== num_pages
- 1)
1451 err
= prepare_uptodate_page(inode
, pages
[i
],
1452 pos
+ write_bytes
, false);
1455 if (err
== -EAGAIN
) {
1462 wait_on_page_writeback(pages
[i
]);
1467 while (faili
>= 0) {
1468 unlock_page(pages
[faili
]);
1469 put_page(pages
[faili
]);
1477 * This function locks the extent and properly waits for data=ordered extents
1478 * to finish before allowing the pages to be modified if need.
1481 * 1 - the extent is locked
1482 * 0 - the extent is not locked, and everything is OK
1483 * -EAGAIN - need re-prepare the pages
1484 * the other < 0 number - Something wrong happens
1487 lock_and_cleanup_extent_if_need(struct btrfs_inode
*inode
, struct page
**pages
,
1488 size_t num_pages
, loff_t pos
,
1490 u64
*lockstart
, u64
*lockend
,
1491 struct extent_state
**cached_state
)
1493 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1499 start_pos
= round_down(pos
, fs_info
->sectorsize
);
1500 last_pos
= start_pos
1501 + round_up(pos
+ write_bytes
- start_pos
,
1502 fs_info
->sectorsize
) - 1;
1504 if (start_pos
< inode
->vfs_inode
.i_size
) {
1505 struct btrfs_ordered_extent
*ordered
;
1507 lock_extent_bits(&inode
->io_tree
, start_pos
, last_pos
,
1509 ordered
= btrfs_lookup_ordered_range(inode
, start_pos
,
1510 last_pos
- start_pos
+ 1);
1512 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1513 ordered
->file_offset
<= last_pos
) {
1514 unlock_extent_cached(&inode
->io_tree
, start_pos
,
1515 last_pos
, cached_state
, GFP_NOFS
);
1516 for (i
= 0; i
< num_pages
; i
++) {
1517 unlock_page(pages
[i
]);
1520 btrfs_start_ordered_extent(&inode
->vfs_inode
,
1522 btrfs_put_ordered_extent(ordered
);
1526 btrfs_put_ordered_extent(ordered
);
1528 *lockstart
= start_pos
;
1529 *lockend
= last_pos
;
1534 * It's possible the pages are dirty right now, but we don't want
1535 * to clean them yet because copy_from_user may catch a page fault
1536 * and we might have to fall back to one page at a time. If that
1537 * happens, we'll unlock these pages and we'd have a window where
1538 * reclaim could sneak in and drop the once-dirty page on the floor
1539 * without writing it.
1541 * We have the pages locked and the extent range locked, so there's
1542 * no way someone can start IO on any dirty pages in this range.
1544 * We'll call btrfs_dirty_pages() later on, and that will flip around
1545 * delalloc bits and dirty the pages as required.
1547 for (i
= 0; i
< num_pages
; i
++) {
1548 set_page_extent_mapped(pages
[i
]);
1549 WARN_ON(!PageLocked(pages
[i
]));
1555 static noinline
int check_can_nocow(struct btrfs_inode
*inode
, loff_t pos
,
1556 size_t *write_bytes
)
1558 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1559 struct btrfs_root
*root
= inode
->root
;
1560 struct btrfs_ordered_extent
*ordered
;
1561 u64 lockstart
, lockend
;
1565 ret
= btrfs_start_write_no_snapshotting(root
);
1569 lockstart
= round_down(pos
, fs_info
->sectorsize
);
1570 lockend
= round_up(pos
+ *write_bytes
,
1571 fs_info
->sectorsize
) - 1;
1574 lock_extent(&inode
->io_tree
, lockstart
, lockend
);
1575 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
,
1576 lockend
- lockstart
+ 1);
1580 unlock_extent(&inode
->io_tree
, lockstart
, lockend
);
1581 btrfs_start_ordered_extent(&inode
->vfs_inode
, ordered
, 1);
1582 btrfs_put_ordered_extent(ordered
);
1585 num_bytes
= lockend
- lockstart
+ 1;
1586 ret
= can_nocow_extent(&inode
->vfs_inode
, lockstart
, &num_bytes
,
1590 btrfs_end_write_no_snapshotting(root
);
1592 *write_bytes
= min_t(size_t, *write_bytes
,
1593 num_bytes
- pos
+ lockstart
);
1596 unlock_extent(&inode
->io_tree
, lockstart
, lockend
);
1601 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1605 struct inode
*inode
= file_inode(file
);
1606 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1607 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1608 struct page
**pages
= NULL
;
1609 struct extent_changeset
*data_reserved
= NULL
;
1610 u64 release_bytes
= 0;
1613 size_t num_written
= 0;
1616 bool only_release_metadata
= false;
1617 bool force_page_uptodate
= false;
1619 nrptrs
= min(DIV_ROUND_UP(iov_iter_count(i
), PAGE_SIZE
),
1620 PAGE_SIZE
/ (sizeof(struct page
*)));
1621 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1622 nrptrs
= max(nrptrs
, 8);
1623 pages
= kmalloc_array(nrptrs
, sizeof(struct page
*), GFP_KERNEL
);
1627 while (iov_iter_count(i
) > 0) {
1628 size_t offset
= pos
& (PAGE_SIZE
- 1);
1629 struct extent_state
*cached_state
= NULL
;
1630 size_t sector_offset
;
1631 size_t write_bytes
= min(iov_iter_count(i
),
1632 nrptrs
* (size_t)PAGE_SIZE
-
1634 size_t num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1636 size_t reserve_bytes
;
1639 size_t dirty_sectors
;
1643 WARN_ON(num_pages
> nrptrs
);
1646 * Fault pages before locking them in prepare_pages
1647 * to avoid recursive lock
1649 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1654 only_release_metadata
= false;
1655 sector_offset
= pos
& (fs_info
->sectorsize
- 1);
1656 reserve_bytes
= round_up(write_bytes
+ sector_offset
,
1657 fs_info
->sectorsize
);
1659 extent_changeset_release(data_reserved
);
1660 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, pos
,
1663 if ((BTRFS_I(inode
)->flags
& (BTRFS_INODE_NODATACOW
|
1664 BTRFS_INODE_PREALLOC
)) &&
1665 check_can_nocow(BTRFS_I(inode
), pos
,
1666 &write_bytes
) > 0) {
1668 * For nodata cow case, no need to reserve
1671 only_release_metadata
= true;
1673 * our prealloc extent may be smaller than
1674 * write_bytes, so scale down.
1676 num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1678 reserve_bytes
= round_up(write_bytes
+
1680 fs_info
->sectorsize
);
1686 WARN_ON(reserve_bytes
== 0);
1687 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
),
1690 if (!only_release_metadata
)
1691 btrfs_free_reserved_data_space(inode
,
1695 btrfs_end_write_no_snapshotting(root
);
1699 release_bytes
= reserve_bytes
;
1702 * This is going to setup the pages array with the number of
1703 * pages we want, so we don't really need to worry about the
1704 * contents of pages from loop to loop
1706 ret
= prepare_pages(inode
, pages
, num_pages
,
1708 force_page_uptodate
);
1710 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1715 extents_locked
= lock_and_cleanup_extent_if_need(
1716 BTRFS_I(inode
), pages
,
1717 num_pages
, pos
, write_bytes
, &lockstart
,
1718 &lockend
, &cached_state
);
1719 if (extents_locked
< 0) {
1720 if (extents_locked
== -EAGAIN
)
1722 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1724 ret
= extents_locked
;
1728 copied
= btrfs_copy_from_user(pos
, write_bytes
, pages
, i
);
1730 num_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, reserve_bytes
);
1731 dirty_sectors
= round_up(copied
+ sector_offset
,
1732 fs_info
->sectorsize
);
1733 dirty_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, dirty_sectors
);
1736 * if we have trouble faulting in the pages, fall
1737 * back to one page at a time
1739 if (copied
< write_bytes
)
1743 force_page_uptodate
= true;
1747 force_page_uptodate
= false;
1748 dirty_pages
= DIV_ROUND_UP(copied
+ offset
,
1752 if (num_sectors
> dirty_sectors
) {
1753 /* release everything except the sectors we dirtied */
1754 release_bytes
-= dirty_sectors
<<
1755 fs_info
->sb
->s_blocksize_bits
;
1756 if (only_release_metadata
) {
1757 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1762 __pos
= round_down(pos
,
1763 fs_info
->sectorsize
) +
1764 (dirty_pages
<< PAGE_SHIFT
);
1765 btrfs_delalloc_release_space(inode
,
1766 data_reserved
, __pos
,
1771 release_bytes
= round_up(copied
+ sector_offset
,
1772 fs_info
->sectorsize
);
1775 ret
= btrfs_dirty_pages(inode
, pages
, dirty_pages
,
1779 * If we have not locked the extent range, because the range's
1780 * start offset is >= i_size, we might still have a non-NULL
1781 * cached extent state, acquired while marking the extent range
1782 * as delalloc through btrfs_dirty_pages(). Therefore free any
1783 * possible cached extent state to avoid a memory leak.
1786 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1787 lockstart
, lockend
, &cached_state
,
1790 free_extent_state(cached_state
);
1792 btrfs_delalloc_release_extents(BTRFS_I(inode
), reserve_bytes
);
1794 btrfs_drop_pages(pages
, num_pages
);
1799 if (only_release_metadata
)
1800 btrfs_end_write_no_snapshotting(root
);
1802 if (only_release_metadata
&& copied
> 0) {
1803 lockstart
= round_down(pos
,
1804 fs_info
->sectorsize
);
1805 lockend
= round_up(pos
+ copied
,
1806 fs_info
->sectorsize
) - 1;
1808 set_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1809 lockend
, EXTENT_NORESERVE
, NULL
,
1813 btrfs_drop_pages(pages
, num_pages
);
1817 balance_dirty_pages_ratelimited(inode
->i_mapping
);
1818 if (dirty_pages
< (fs_info
->nodesize
>> PAGE_SHIFT
) + 1)
1819 btrfs_btree_balance_dirty(fs_info
);
1822 num_written
+= copied
;
1827 if (release_bytes
) {
1828 if (only_release_metadata
) {
1829 btrfs_end_write_no_snapshotting(root
);
1830 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1833 btrfs_delalloc_release_space(inode
, data_reserved
,
1834 round_down(pos
, fs_info
->sectorsize
),
1839 extent_changeset_free(data_reserved
);
1840 return num_written
? num_written
: ret
;
1843 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1845 struct file
*file
= iocb
->ki_filp
;
1846 struct inode
*inode
= file_inode(file
);
1847 loff_t pos
= iocb
->ki_pos
;
1849 ssize_t written_buffered
;
1853 written
= generic_file_direct_write(iocb
, from
);
1855 if (written
< 0 || !iov_iter_count(from
))
1859 written_buffered
= __btrfs_buffered_write(file
, from
, pos
);
1860 if (written_buffered
< 0) {
1861 err
= written_buffered
;
1865 * Ensure all data is persisted. We want the next direct IO read to be
1866 * able to read what was just written.
1868 endbyte
= pos
+ written_buffered
- 1;
1869 err
= btrfs_fdatawrite_range(inode
, pos
, endbyte
);
1872 err
= filemap_fdatawait_range(inode
->i_mapping
, pos
, endbyte
);
1875 written
+= written_buffered
;
1876 iocb
->ki_pos
= pos
+ written_buffered
;
1877 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_SHIFT
,
1878 endbyte
>> PAGE_SHIFT
);
1880 return written
? written
: err
;
1883 static void update_time_for_write(struct inode
*inode
)
1885 struct timespec now
;
1887 if (IS_NOCMTIME(inode
))
1890 now
= current_time(inode
);
1891 if (!timespec_equal(&inode
->i_mtime
, &now
))
1892 inode
->i_mtime
= now
;
1894 if (!timespec_equal(&inode
->i_ctime
, &now
))
1895 inode
->i_ctime
= now
;
1897 if (IS_I_VERSION(inode
))
1898 inode_inc_iversion(inode
);
1901 static ssize_t
btrfs_file_write_iter(struct kiocb
*iocb
,
1902 struct iov_iter
*from
)
1904 struct file
*file
= iocb
->ki_filp
;
1905 struct inode
*inode
= file_inode(file
);
1906 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1907 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1910 ssize_t num_written
= 0;
1911 bool sync
= (file
->f_flags
& O_DSYNC
) || IS_SYNC(file
->f_mapping
->host
);
1918 if (!(iocb
->ki_flags
& IOCB_DIRECT
) &&
1919 (iocb
->ki_flags
& IOCB_NOWAIT
))
1922 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1923 if (!inode_trylock(inode
))
1929 err
= generic_write_checks(iocb
, from
);
1931 inode_unlock(inode
);
1936 count
= iov_iter_count(from
);
1937 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1939 * We will allocate space in case nodatacow is not set,
1942 if (!(BTRFS_I(inode
)->flags
& (BTRFS_INODE_NODATACOW
|
1943 BTRFS_INODE_PREALLOC
)) ||
1944 check_can_nocow(BTRFS_I(inode
), pos
, &count
) <= 0) {
1945 inode_unlock(inode
);
1950 current
->backing_dev_info
= inode_to_bdi(inode
);
1951 err
= file_remove_privs(file
);
1953 inode_unlock(inode
);
1958 * If BTRFS flips readonly due to some impossible error
1959 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1960 * although we have opened a file as writable, we have
1961 * to stop this write operation to ensure FS consistency.
1963 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
1964 inode_unlock(inode
);
1970 * We reserve space for updating the inode when we reserve space for the
1971 * extent we are going to write, so we will enospc out there. We don't
1972 * need to start yet another transaction to update the inode as we will
1973 * update the inode when we finish writing whatever data we write.
1975 update_time_for_write(inode
);
1977 start_pos
= round_down(pos
, fs_info
->sectorsize
);
1978 oldsize
= i_size_read(inode
);
1979 if (start_pos
> oldsize
) {
1980 /* Expand hole size to cover write data, preventing empty gap */
1981 end_pos
= round_up(pos
+ count
,
1982 fs_info
->sectorsize
);
1983 err
= btrfs_cont_expand(inode
, oldsize
, end_pos
);
1985 inode_unlock(inode
);
1988 if (start_pos
> round_up(oldsize
, fs_info
->sectorsize
))
1993 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
1995 if (iocb
->ki_flags
& IOCB_DIRECT
) {
1996 num_written
= __btrfs_direct_write(iocb
, from
);
1998 num_written
= __btrfs_buffered_write(file
, from
, pos
);
1999 if (num_written
> 0)
2000 iocb
->ki_pos
= pos
+ num_written
;
2002 pagecache_isize_extended(inode
, oldsize
,
2003 i_size_read(inode
));
2006 inode_unlock(inode
);
2009 * We also have to set last_sub_trans to the current log transid,
2010 * otherwise subsequent syncs to a file that's been synced in this
2011 * transaction will appear to have already occurred.
2013 spin_lock(&BTRFS_I(inode
)->lock
);
2014 BTRFS_I(inode
)->last_sub_trans
= root
->log_transid
;
2015 spin_unlock(&BTRFS_I(inode
)->lock
);
2016 if (num_written
> 0)
2017 num_written
= generic_write_sync(iocb
, num_written
);
2020 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
2022 current
->backing_dev_info
= NULL
;
2023 return num_written
? num_written
: err
;
2026 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
2028 struct btrfs_file_private
*private = filp
->private_data
;
2030 if (private && private->trans
)
2031 btrfs_ioctl_trans_end(filp
);
2032 if (private && private->filldir_buf
)
2033 kfree(private->filldir_buf
);
2035 filp
->private_data
= NULL
;
2038 * ordered_data_close is set by settattr when we are about to truncate
2039 * a file from a non-zero size to a zero size. This tries to
2040 * flush down new bytes that may have been written if the
2041 * application were using truncate to replace a file in place.
2043 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
2044 &BTRFS_I(inode
)->runtime_flags
))
2045 filemap_flush(inode
->i_mapping
);
2049 static int start_ordered_ops(struct inode
*inode
, loff_t start
, loff_t end
)
2052 struct blk_plug plug
;
2055 * This is only called in fsync, which would do synchronous writes, so
2056 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2057 * multiple disks using raid profile, a large IO can be split to
2058 * several segments of stripe length (currently 64K).
2060 blk_start_plug(&plug
);
2061 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
2062 ret
= btrfs_fdatawrite_range(inode
, start
, end
);
2063 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
2064 blk_finish_plug(&plug
);
2070 * fsync call for both files and directories. This logs the inode into
2071 * the tree log instead of forcing full commits whenever possible.
2073 * It needs to call filemap_fdatawait so that all ordered extent updates are
2074 * in the metadata btree are up to date for copying to the log.
2076 * It drops the inode mutex before doing the tree log commit. This is an
2077 * important optimization for directories because holding the mutex prevents
2078 * new operations on the dir while we write to disk.
2080 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2082 struct dentry
*dentry
= file_dentry(file
);
2083 struct inode
*inode
= d_inode(dentry
);
2084 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2085 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2086 struct btrfs_trans_handle
*trans
;
2087 struct btrfs_log_ctx ctx
;
2089 bool full_sync
= false;
2093 * If the inode needs a full sync, make sure we use a full range to
2094 * avoid log tree corruption, due to hole detection racing with ordered
2095 * extent completion for adjacent ranges, and assertion failures during
2098 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2099 &BTRFS_I(inode
)->runtime_flags
)) {
2105 * The range length can be represented by u64, we have to do the typecasts
2106 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
2108 len
= (u64
)end
- (u64
)start
+ 1;
2109 trace_btrfs_sync_file(file
, datasync
);
2111 btrfs_init_log_ctx(&ctx
, inode
);
2114 * Before we acquired the inode's lock, someone may have dirtied more
2115 * pages in the target range. We need to make sure that writeback for
2116 * any such pages does not start while we are logging the inode, because
2117 * if it does, any of the following might happen when we are not doing a
2120 * 1) We log an extent after its writeback finishes but before its
2121 * checksums are added to the csum tree, leading to -EIO errors
2122 * when attempting to read the extent after a log replay.
2124 * 2) We can end up logging an extent before its writeback finishes.
2125 * Therefore after the log replay we will have a file extent item
2126 * pointing to an unwritten extent (and no data checksums as well).
2128 * So trigger writeback for any eventual new dirty pages and then we
2129 * wait for all ordered extents to complete below.
2131 ret
= start_ordered_ops(inode
, start
, end
);
2133 inode_unlock(inode
);
2138 * We write the dirty pages in the range and wait until they complete
2139 * out of the ->i_mutex. If so, we can flush the dirty pages by
2140 * multi-task, and make the performance up. See
2141 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2143 ret
= start_ordered_ops(inode
, start
, end
);
2150 * We take the dio_sem here because the tree log stuff can race with
2151 * lockless dio writes and get an extent map logged for an extent we
2152 * never waited on. We need it this high up for lockdep reasons.
2154 down_write(&BTRFS_I(inode
)->dio_sem
);
2156 atomic_inc(&root
->log_batch
);
2157 full_sync
= test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2158 &BTRFS_I(inode
)->runtime_flags
);
2160 * We might have have had more pages made dirty after calling
2161 * start_ordered_ops and before acquiring the inode's i_mutex.
2165 * For a full sync, we need to make sure any ordered operations
2166 * start and finish before we start logging the inode, so that
2167 * all extents are persisted and the respective file extent
2168 * items are in the fs/subvol btree.
2170 ret
= btrfs_wait_ordered_range(inode
, start
, len
);
2173 * Start any new ordered operations before starting to log the
2174 * inode. We will wait for them to finish in btrfs_sync_log().
2176 * Right before acquiring the inode's mutex, we might have new
2177 * writes dirtying pages, which won't immediately start the
2178 * respective ordered operations - that is done through the
2179 * fill_delalloc callbacks invoked from the writepage and
2180 * writepages address space operations. So make sure we start
2181 * all ordered operations before starting to log our inode. Not
2182 * doing this means that while logging the inode, writeback
2183 * could start and invoke writepage/writepages, which would call
2184 * the fill_delalloc callbacks (cow_file_range,
2185 * submit_compressed_extents). These callbacks add first an
2186 * extent map to the modified list of extents and then create
2187 * the respective ordered operation, which means in
2188 * tree-log.c:btrfs_log_inode() we might capture all existing
2189 * ordered operations (with btrfs_get_logged_extents()) before
2190 * the fill_delalloc callback adds its ordered operation, and by
2191 * the time we visit the modified list of extent maps (with
2192 * btrfs_log_changed_extents()), we see and process the extent
2193 * map they created. We then use the extent map to construct a
2194 * file extent item for logging without waiting for the
2195 * respective ordered operation to finish - this file extent
2196 * item points to a disk location that might not have yet been
2197 * written to, containing random data - so after a crash a log
2198 * replay will make our inode have file extent items that point
2199 * to disk locations containing invalid data, as we returned
2200 * success to userspace without waiting for the respective
2201 * ordered operation to finish, because it wasn't captured by
2202 * btrfs_get_logged_extents().
2204 ret
= start_ordered_ops(inode
, start
, end
);
2207 up_write(&BTRFS_I(inode
)->dio_sem
);
2208 inode_unlock(inode
);
2211 atomic_inc(&root
->log_batch
);
2214 * If the last transaction that changed this file was before the current
2215 * transaction and we have the full sync flag set in our inode, we can
2216 * bail out now without any syncing.
2218 * Note that we can't bail out if the full sync flag isn't set. This is
2219 * because when the full sync flag is set we start all ordered extents
2220 * and wait for them to fully complete - when they complete they update
2221 * the inode's last_trans field through:
2223 * btrfs_finish_ordered_io() ->
2224 * btrfs_update_inode_fallback() ->
2225 * btrfs_update_inode() ->
2226 * btrfs_set_inode_last_trans()
2228 * So we are sure that last_trans is up to date and can do this check to
2229 * bail out safely. For the fast path, when the full sync flag is not
2230 * set in our inode, we can not do it because we start only our ordered
2231 * extents and don't wait for them to complete (that is when
2232 * btrfs_finish_ordered_io runs), so here at this point their last_trans
2233 * value might be less than or equals to fs_info->last_trans_committed,
2234 * and setting a speculative last_trans for an inode when a buffered
2235 * write is made (such as fs_info->generation + 1 for example) would not
2236 * be reliable since after setting the value and before fsync is called
2237 * any number of transactions can start and commit (transaction kthread
2238 * commits the current transaction periodically), and a transaction
2239 * commit does not start nor waits for ordered extents to complete.
2242 if (btrfs_inode_in_log(BTRFS_I(inode
), fs_info
->generation
) ||
2243 (full_sync
&& BTRFS_I(inode
)->last_trans
<=
2244 fs_info
->last_trans_committed
) ||
2245 (!btrfs_have_ordered_extents_in_range(inode
, start
, len
) &&
2246 BTRFS_I(inode
)->last_trans
2247 <= fs_info
->last_trans_committed
)) {
2249 * We've had everything committed since the last time we were
2250 * modified so clear this flag in case it was set for whatever
2251 * reason, it's no longer relevant.
2253 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2254 &BTRFS_I(inode
)->runtime_flags
);
2256 * An ordered extent might have started before and completed
2257 * already with io errors, in which case the inode was not
2258 * updated and we end up here. So check the inode's mapping
2259 * for any errors that might have happened since we last
2260 * checked called fsync.
2262 ret
= filemap_check_wb_err(inode
->i_mapping
, file
->f_wb_err
);
2263 up_write(&BTRFS_I(inode
)->dio_sem
);
2264 inode_unlock(inode
);
2269 * ok we haven't committed the transaction yet, lets do a commit
2271 if (file
->private_data
)
2272 btrfs_ioctl_trans_end(file
);
2275 * We use start here because we will need to wait on the IO to complete
2276 * in btrfs_sync_log, which could require joining a transaction (for
2277 * example checking cross references in the nocow path). If we use join
2278 * here we could get into a situation where we're waiting on IO to
2279 * happen that is blocked on a transaction trying to commit. With start
2280 * we inc the extwriter counter, so we wait for all extwriters to exit
2281 * before we start blocking join'ers. This comment is to keep somebody
2282 * from thinking they are super smart and changing this to
2283 * btrfs_join_transaction *cough*Josef*cough*.
2285 trans
= btrfs_start_transaction(root
, 0);
2286 if (IS_ERR(trans
)) {
2287 ret
= PTR_ERR(trans
);
2288 up_write(&BTRFS_I(inode
)->dio_sem
);
2289 inode_unlock(inode
);
2294 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
, start
, end
, &ctx
);
2296 /* Fallthrough and commit/free transaction. */
2300 /* we've logged all the items and now have a consistent
2301 * version of the file in the log. It is possible that
2302 * someone will come in and modify the file, but that's
2303 * fine because the log is consistent on disk, and we
2304 * have references to all of the file's extents
2306 * It is possible that someone will come in and log the
2307 * file again, but that will end up using the synchronization
2308 * inside btrfs_sync_log to keep things safe.
2310 up_write(&BTRFS_I(inode
)->dio_sem
);
2311 inode_unlock(inode
);
2314 * If any of the ordered extents had an error, just return it to user
2315 * space, so that the application knows some writes didn't succeed and
2316 * can take proper action (retry for e.g.). Blindly committing the
2317 * transaction in this case, would fool userspace that everything was
2318 * successful. And we also want to make sure our log doesn't contain
2319 * file extent items pointing to extents that weren't fully written to -
2320 * just like in the non fast fsync path, where we check for the ordered
2321 * operation's error flag before writing to the log tree and return -EIO
2322 * if any of them had this flag set (btrfs_wait_ordered_range) -
2323 * therefore we need to check for errors in the ordered operations,
2324 * which are indicated by ctx.io_err.
2327 btrfs_end_transaction(trans
);
2332 if (ret
!= BTRFS_NO_LOG_SYNC
) {
2334 ret
= btrfs_sync_log(trans
, root
, &ctx
);
2336 ret
= btrfs_end_transaction(trans
);
2341 ret
= btrfs_wait_ordered_range(inode
, start
, len
);
2343 btrfs_end_transaction(trans
);
2347 ret
= btrfs_commit_transaction(trans
);
2349 ret
= btrfs_end_transaction(trans
);
2352 ASSERT(list_empty(&ctx
.list
));
2353 err
= file_check_and_advance_wb_err(file
);
2356 return ret
> 0 ? -EIO
: ret
;
2359 static const struct vm_operations_struct btrfs_file_vm_ops
= {
2360 .fault
= filemap_fault
,
2361 .map_pages
= filemap_map_pages
,
2362 .page_mkwrite
= btrfs_page_mkwrite
,
2365 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2367 struct address_space
*mapping
= filp
->f_mapping
;
2369 if (!mapping
->a_ops
->readpage
)
2372 file_accessed(filp
);
2373 vma
->vm_ops
= &btrfs_file_vm_ops
;
2378 static int hole_mergeable(struct btrfs_inode
*inode
, struct extent_buffer
*leaf
,
2379 int slot
, u64 start
, u64 end
)
2381 struct btrfs_file_extent_item
*fi
;
2382 struct btrfs_key key
;
2384 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
2387 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2388 if (key
.objectid
!= btrfs_ino(inode
) ||
2389 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2392 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
2394 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
2397 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
2400 if (key
.offset
== end
)
2402 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
2407 static int fill_holes(struct btrfs_trans_handle
*trans
,
2408 struct btrfs_inode
*inode
,
2409 struct btrfs_path
*path
, u64 offset
, u64 end
)
2411 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
2412 struct btrfs_root
*root
= inode
->root
;
2413 struct extent_buffer
*leaf
;
2414 struct btrfs_file_extent_item
*fi
;
2415 struct extent_map
*hole_em
;
2416 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
2417 struct btrfs_key key
;
2420 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
2423 key
.objectid
= btrfs_ino(inode
);
2424 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2425 key
.offset
= offset
;
2427 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2430 * We should have dropped this offset, so if we find it then
2431 * something has gone horribly wrong.
2438 leaf
= path
->nodes
[0];
2439 if (hole_mergeable(inode
, leaf
, path
->slots
[0] - 1, offset
, end
)) {
2443 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2444 struct btrfs_file_extent_item
);
2445 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
2447 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2448 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2449 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2450 btrfs_mark_buffer_dirty(leaf
);
2454 if (hole_mergeable(inode
, leaf
, path
->slots
[0], offset
, end
)) {
2457 key
.offset
= offset
;
2458 btrfs_set_item_key_safe(fs_info
, path
, &key
);
2459 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2460 struct btrfs_file_extent_item
);
2461 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
2463 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2464 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2465 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2466 btrfs_mark_buffer_dirty(leaf
);
2469 btrfs_release_path(path
);
2471 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
),
2472 offset
, 0, 0, end
- offset
, 0, end
- offset
, 0, 0, 0);
2477 btrfs_release_path(path
);
2479 hole_em
= alloc_extent_map();
2481 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2482 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
);
2484 hole_em
->start
= offset
;
2485 hole_em
->len
= end
- offset
;
2486 hole_em
->ram_bytes
= hole_em
->len
;
2487 hole_em
->orig_start
= offset
;
2489 hole_em
->block_start
= EXTENT_MAP_HOLE
;
2490 hole_em
->block_len
= 0;
2491 hole_em
->orig_block_len
= 0;
2492 hole_em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
2493 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
2494 hole_em
->generation
= trans
->transid
;
2497 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2498 write_lock(&em_tree
->lock
);
2499 ret
= add_extent_mapping(em_tree
, hole_em
, 1);
2500 write_unlock(&em_tree
->lock
);
2501 } while (ret
== -EEXIST
);
2502 free_extent_map(hole_em
);
2504 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2505 &inode
->runtime_flags
);
2512 * Find a hole extent on given inode and change start/len to the end of hole
2513 * extent.(hole/vacuum extent whose em->start <= start &&
2514 * em->start + em->len > start)
2515 * When a hole extent is found, return 1 and modify start/len.
2517 static int find_first_non_hole(struct inode
*inode
, u64
*start
, u64
*len
)
2519 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2520 struct extent_map
*em
;
2523 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0,
2524 round_down(*start
, fs_info
->sectorsize
),
2525 round_up(*len
, fs_info
->sectorsize
), 0);
2529 /* Hole or vacuum extent(only exists in no-hole mode) */
2530 if (em
->block_start
== EXTENT_MAP_HOLE
) {
2532 *len
= em
->start
+ em
->len
> *start
+ *len
?
2533 0 : *start
+ *len
- em
->start
- em
->len
;
2534 *start
= em
->start
+ em
->len
;
2536 free_extent_map(em
);
2540 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
2542 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2543 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2544 struct extent_state
*cached_state
= NULL
;
2545 struct btrfs_path
*path
;
2546 struct btrfs_block_rsv
*rsv
;
2547 struct btrfs_trans_handle
*trans
;
2552 u64 orig_start
= offset
;
2554 u64 min_size
= btrfs_calc_trans_metadata_size(fs_info
, 1);
2558 unsigned int rsv_count
;
2560 bool no_holes
= btrfs_fs_incompat(fs_info
, NO_HOLES
);
2562 bool truncated_block
= false;
2563 bool updated_inode
= false;
2565 ret
= btrfs_wait_ordered_range(inode
, offset
, len
);
2570 ino_size
= round_up(inode
->i_size
, fs_info
->sectorsize
);
2571 ret
= find_first_non_hole(inode
, &offset
, &len
);
2573 goto out_only_mutex
;
2575 /* Already in a large hole */
2577 goto out_only_mutex
;
2580 lockstart
= round_up(offset
, btrfs_inode_sectorsize(inode
));
2581 lockend
= round_down(offset
+ len
,
2582 btrfs_inode_sectorsize(inode
)) - 1;
2583 same_block
= (BTRFS_BYTES_TO_BLKS(fs_info
, offset
))
2584 == (BTRFS_BYTES_TO_BLKS(fs_info
, offset
+ len
- 1));
2586 * We needn't truncate any block which is beyond the end of the file
2587 * because we are sure there is no data there.
2590 * Only do this if we are in the same block and we aren't doing the
2593 if (same_block
&& len
< fs_info
->sectorsize
) {
2594 if (offset
< ino_size
) {
2595 truncated_block
= true;
2596 ret
= btrfs_truncate_block(inode
, offset
, len
, 0);
2600 goto out_only_mutex
;
2603 /* zero back part of the first block */
2604 if (offset
< ino_size
) {
2605 truncated_block
= true;
2606 ret
= btrfs_truncate_block(inode
, offset
, 0, 0);
2608 inode_unlock(inode
);
2613 /* Check the aligned pages after the first unaligned page,
2614 * if offset != orig_start, which means the first unaligned page
2615 * including several following pages are already in holes,
2616 * the extra check can be skipped */
2617 if (offset
== orig_start
) {
2618 /* after truncate page, check hole again */
2619 len
= offset
+ len
- lockstart
;
2621 ret
= find_first_non_hole(inode
, &offset
, &len
);
2623 goto out_only_mutex
;
2626 goto out_only_mutex
;
2631 /* Check the tail unaligned part is in a hole */
2632 tail_start
= lockend
+ 1;
2633 tail_len
= offset
+ len
- tail_start
;
2635 ret
= find_first_non_hole(inode
, &tail_start
, &tail_len
);
2636 if (unlikely(ret
< 0))
2637 goto out_only_mutex
;
2639 /* zero the front end of the last page */
2640 if (tail_start
+ tail_len
< ino_size
) {
2641 truncated_block
= true;
2642 ret
= btrfs_truncate_block(inode
,
2643 tail_start
+ tail_len
,
2646 goto out_only_mutex
;
2651 if (lockend
< lockstart
) {
2653 goto out_only_mutex
;
2657 struct btrfs_ordered_extent
*ordered
;
2659 truncate_pagecache_range(inode
, lockstart
, lockend
);
2661 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2663 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
2666 * We need to make sure we have no ordered extents in this range
2667 * and nobody raced in and read a page in this range, if we did
2668 * we need to try again.
2671 (ordered
->file_offset
+ ordered
->len
<= lockstart
||
2672 ordered
->file_offset
> lockend
)) &&
2673 !btrfs_page_exists_in_range(inode
, lockstart
, lockend
)) {
2675 btrfs_put_ordered_extent(ordered
);
2679 btrfs_put_ordered_extent(ordered
);
2680 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
2681 lockend
, &cached_state
, GFP_NOFS
);
2682 ret
= btrfs_wait_ordered_range(inode
, lockstart
,
2683 lockend
- lockstart
+ 1);
2685 inode_unlock(inode
);
2690 path
= btrfs_alloc_path();
2696 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
2701 rsv
->size
= btrfs_calc_trans_metadata_size(fs_info
, 1);
2705 * 1 - update the inode
2706 * 1 - removing the extents in the range
2707 * 1 - adding the hole extent if no_holes isn't set
2709 rsv_count
= no_holes
? 2 : 3;
2710 trans
= btrfs_start_transaction(root
, rsv_count
);
2711 if (IS_ERR(trans
)) {
2712 err
= PTR_ERR(trans
);
2716 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
2719 trans
->block_rsv
= rsv
;
2721 cur_offset
= lockstart
;
2722 len
= lockend
- cur_offset
;
2723 while (cur_offset
< lockend
) {
2724 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
2725 cur_offset
, lockend
+ 1,
2726 &drop_end
, 1, 0, 0, NULL
);
2730 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2732 if (cur_offset
< drop_end
&& cur_offset
< ino_size
) {
2733 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2734 cur_offset
, drop_end
);
2737 * If we failed then we didn't insert our hole
2738 * entries for the area we dropped, so now the
2739 * fs is corrupted, so we must abort the
2742 btrfs_abort_transaction(trans
, ret
);
2748 cur_offset
= drop_end
;
2750 ret
= btrfs_update_inode(trans
, root
, inode
);
2756 btrfs_end_transaction(trans
);
2757 btrfs_btree_balance_dirty(fs_info
);
2759 trans
= btrfs_start_transaction(root
, rsv_count
);
2760 if (IS_ERR(trans
)) {
2761 ret
= PTR_ERR(trans
);
2766 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
2768 BUG_ON(ret
); /* shouldn't happen */
2769 trans
->block_rsv
= rsv
;
2771 ret
= find_first_non_hole(inode
, &cur_offset
, &len
);
2772 if (unlikely(ret
< 0))
2785 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2787 * If we are using the NO_HOLES feature we might have had already an
2788 * hole that overlaps a part of the region [lockstart, lockend] and
2789 * ends at (or beyond) lockend. Since we have no file extent items to
2790 * represent holes, drop_end can be less than lockend and so we must
2791 * make sure we have an extent map representing the existing hole (the
2792 * call to __btrfs_drop_extents() might have dropped the existing extent
2793 * map representing the existing hole), otherwise the fast fsync path
2794 * will not record the existence of the hole region
2795 * [existing_hole_start, lockend].
2797 if (drop_end
<= lockend
)
2798 drop_end
= lockend
+ 1;
2800 * Don't insert file hole extent item if it's for a range beyond eof
2801 * (because it's useless) or if it represents a 0 bytes range (when
2802 * cur_offset == drop_end).
2804 if (cur_offset
< ino_size
&& cur_offset
< drop_end
) {
2805 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2806 cur_offset
, drop_end
);
2808 /* Same comment as above. */
2809 btrfs_abort_transaction(trans
, ret
);
2819 inode_inc_iversion(inode
);
2820 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
2822 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2823 ret
= btrfs_update_inode(trans
, root
, inode
);
2824 updated_inode
= true;
2825 btrfs_end_transaction(trans
);
2826 btrfs_btree_balance_dirty(fs_info
);
2828 btrfs_free_path(path
);
2829 btrfs_free_block_rsv(fs_info
, rsv
);
2831 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2832 &cached_state
, GFP_NOFS
);
2834 if (!updated_inode
&& truncated_block
&& !ret
&& !err
) {
2836 * If we only end up zeroing part of a page, we still need to
2837 * update the inode item, so that all the time fields are
2838 * updated as well as the necessary btrfs inode in memory fields
2839 * for detecting, at fsync time, if the inode isn't yet in the
2840 * log tree or it's there but not up to date.
2842 struct timespec now
= current_time(inode
);
2844 inode_inc_iversion(inode
);
2845 inode
->i_mtime
= now
;
2846 inode
->i_ctime
= now
;
2847 trans
= btrfs_start_transaction(root
, 1);
2848 if (IS_ERR(trans
)) {
2849 err
= PTR_ERR(trans
);
2851 err
= btrfs_update_inode(trans
, root
, inode
);
2852 ret
= btrfs_end_transaction(trans
);
2855 inode_unlock(inode
);
2861 /* Helper structure to record which range is already reserved */
2862 struct falloc_range
{
2863 struct list_head list
;
2869 * Helper function to add falloc range
2871 * Caller should have locked the larger range of extent containing
2874 static int add_falloc_range(struct list_head
*head
, u64 start
, u64 len
)
2876 struct falloc_range
*prev
= NULL
;
2877 struct falloc_range
*range
= NULL
;
2879 if (list_empty(head
))
2883 * As fallocate iterate by bytenr order, we only need to check
2886 prev
= list_entry(head
->prev
, struct falloc_range
, list
);
2887 if (prev
->start
+ prev
->len
== start
) {
2892 range
= kmalloc(sizeof(*range
), GFP_KERNEL
);
2895 range
->start
= start
;
2897 list_add_tail(&range
->list
, head
);
2901 static long btrfs_fallocate(struct file
*file
, int mode
,
2902 loff_t offset
, loff_t len
)
2904 struct inode
*inode
= file_inode(file
);
2905 struct extent_state
*cached_state
= NULL
;
2906 struct extent_changeset
*data_reserved
= NULL
;
2907 struct falloc_range
*range
;
2908 struct falloc_range
*tmp
;
2909 struct list_head reserve_list
;
2917 struct extent_map
*em
;
2918 int blocksize
= btrfs_inode_sectorsize(inode
);
2921 alloc_start
= round_down(offset
, blocksize
);
2922 alloc_end
= round_up(offset
+ len
, blocksize
);
2923 cur_offset
= alloc_start
;
2925 /* Make sure we aren't being give some crap mode */
2926 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2929 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2930 return btrfs_punch_hole(inode
, offset
, len
);
2933 * Only trigger disk allocation, don't trigger qgroup reserve
2935 * For qgroup space, it will be checked later.
2937 ret
= btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode
),
2938 alloc_end
- alloc_start
);
2944 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> inode
->i_size
) {
2945 ret
= inode_newsize_ok(inode
, offset
+ len
);
2951 * TODO: Move these two operations after we have checked
2952 * accurate reserved space, or fallocate can still fail but
2953 * with page truncated or size expanded.
2955 * But that's a minor problem and won't do much harm BTW.
2957 if (alloc_start
> inode
->i_size
) {
2958 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
2962 } else if (offset
+ len
> inode
->i_size
) {
2964 * If we are fallocating from the end of the file onward we
2965 * need to zero out the end of the block if i_size lands in the
2966 * middle of a block.
2968 ret
= btrfs_truncate_block(inode
, inode
->i_size
, 0, 0);
2974 * wait for ordered IO before we have any locks. We'll loop again
2975 * below with the locks held.
2977 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
2978 alloc_end
- alloc_start
);
2982 locked_end
= alloc_end
- 1;
2984 struct btrfs_ordered_extent
*ordered
;
2986 /* the extent lock is ordered inside the running
2989 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
2990 locked_end
, &cached_state
);
2991 ordered
= btrfs_lookup_first_ordered_extent(inode
,
2994 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
2995 ordered
->file_offset
< alloc_end
) {
2996 btrfs_put_ordered_extent(ordered
);
2997 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
2998 alloc_start
, locked_end
,
2999 &cached_state
, GFP_KERNEL
);
3001 * we can't wait on the range with the transaction
3002 * running or with the extent lock held
3004 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
3005 alloc_end
- alloc_start
);
3010 btrfs_put_ordered_extent(ordered
);
3015 /* First, check if we exceed the qgroup limit */
3016 INIT_LIST_HEAD(&reserve_list
);
3018 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, cur_offset
,
3019 alloc_end
- cur_offset
, 0);
3024 last_byte
= min(extent_map_end(em
), alloc_end
);
3025 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
3026 last_byte
= ALIGN(last_byte
, blocksize
);
3027 if (em
->block_start
== EXTENT_MAP_HOLE
||
3028 (cur_offset
>= inode
->i_size
&&
3029 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
3030 ret
= add_falloc_range(&reserve_list
, cur_offset
,
3031 last_byte
- cur_offset
);
3033 free_extent_map(em
);
3036 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
,
3037 cur_offset
, last_byte
- cur_offset
);
3039 cur_offset
= last_byte
;
3040 free_extent_map(em
);
3045 * Do not need to reserve unwritten extent for this
3046 * range, free reserved data space first, otherwise
3047 * it'll result in false ENOSPC error.
3049 btrfs_free_reserved_data_space(inode
, data_reserved
,
3050 cur_offset
, last_byte
- cur_offset
);
3052 free_extent_map(em
);
3053 cur_offset
= last_byte
;
3054 if (cur_offset
>= alloc_end
)
3059 * If ret is still 0, means we're OK to fallocate.
3060 * Or just cleanup the list and exit.
3062 list_for_each_entry_safe(range
, tmp
, &reserve_list
, list
) {
3064 ret
= btrfs_prealloc_file_range(inode
, mode
,
3066 range
->len
, i_blocksize(inode
),
3067 offset
+ len
, &alloc_hint
);
3069 btrfs_free_reserved_data_space(inode
,
3070 data_reserved
, range
->start
,
3072 list_del(&range
->list
);
3078 if (actual_end
> inode
->i_size
&&
3079 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
3080 struct btrfs_trans_handle
*trans
;
3081 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3084 * We didn't need to allocate any more space, but we
3085 * still extended the size of the file so we need to
3086 * update i_size and the inode item.
3088 trans
= btrfs_start_transaction(root
, 1);
3089 if (IS_ERR(trans
)) {
3090 ret
= PTR_ERR(trans
);
3092 inode
->i_ctime
= current_time(inode
);
3093 i_size_write(inode
, actual_end
);
3094 btrfs_ordered_update_i_size(inode
, actual_end
, NULL
);
3095 ret
= btrfs_update_inode(trans
, root
, inode
);
3097 btrfs_end_transaction(trans
);
3099 ret
= btrfs_end_transaction(trans
);
3103 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
3104 &cached_state
, GFP_KERNEL
);
3106 inode_unlock(inode
);
3107 /* Let go of our reservation. */
3109 btrfs_free_reserved_data_space(inode
, data_reserved
,
3110 cur_offset
, alloc_end
- cur_offset
);
3111 extent_changeset_free(data_reserved
);
3115 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int whence
)
3117 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3118 struct extent_map
*em
= NULL
;
3119 struct extent_state
*cached_state
= NULL
;
3126 if (inode
->i_size
== 0)
3130 * *offset can be negative, in this case we start finding DATA/HOLE from
3131 * the very start of the file.
3133 start
= max_t(loff_t
, 0, *offset
);
3135 lockstart
= round_down(start
, fs_info
->sectorsize
);
3136 lockend
= round_up(i_size_read(inode
),
3137 fs_info
->sectorsize
);
3138 if (lockend
<= lockstart
)
3139 lockend
= lockstart
+ fs_info
->sectorsize
;
3141 len
= lockend
- lockstart
+ 1;
3143 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3146 while (start
< inode
->i_size
) {
3147 em
= btrfs_get_extent_fiemap(BTRFS_I(inode
), NULL
, 0,
3155 if (whence
== SEEK_HOLE
&&
3156 (em
->block_start
== EXTENT_MAP_HOLE
||
3157 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3159 else if (whence
== SEEK_DATA
&&
3160 (em
->block_start
!= EXTENT_MAP_HOLE
&&
3161 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3164 start
= em
->start
+ em
->len
;
3165 free_extent_map(em
);
3169 free_extent_map(em
);
3171 if (whence
== SEEK_DATA
&& start
>= inode
->i_size
)
3174 *offset
= min_t(loff_t
, start
, inode
->i_size
);
3176 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3177 &cached_state
, GFP_NOFS
);
3181 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int whence
)
3183 struct inode
*inode
= file
->f_mapping
->host
;
3190 offset
= generic_file_llseek(file
, offset
, whence
);
3194 if (offset
>= i_size_read(inode
)) {
3195 inode_unlock(inode
);
3199 ret
= find_desired_extent(inode
, &offset
, whence
);
3201 inode_unlock(inode
);
3206 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
3208 inode_unlock(inode
);
3212 static int btrfs_file_open(struct inode
*inode
, struct file
*filp
)
3214 filp
->f_mode
|= FMODE_NOWAIT
;
3215 return generic_file_open(inode
, filp
);
3218 const struct file_operations btrfs_file_operations
= {
3219 .llseek
= btrfs_file_llseek
,
3220 .read_iter
= generic_file_read_iter
,
3221 .splice_read
= generic_file_splice_read
,
3222 .write_iter
= btrfs_file_write_iter
,
3223 .mmap
= btrfs_file_mmap
,
3224 .open
= btrfs_file_open
,
3225 .release
= btrfs_release_file
,
3226 .fsync
= btrfs_sync_file
,
3227 .fallocate
= btrfs_fallocate
,
3228 .unlocked_ioctl
= btrfs_ioctl
,
3229 #ifdef CONFIG_COMPAT
3230 .compat_ioctl
= btrfs_compat_ioctl
,
3232 .clone_file_range
= btrfs_clone_file_range
,
3233 .dedupe_file_range
= btrfs_dedupe_file_range
,
3236 void btrfs_auto_defrag_exit(void)
3238 kmem_cache_destroy(btrfs_inode_defrag_cachep
);
3241 int btrfs_auto_defrag_init(void)
3243 btrfs_inode_defrag_cachep
= kmem_cache_create("btrfs_inode_defrag",
3244 sizeof(struct inode_defrag
), 0,
3247 if (!btrfs_inode_defrag_cachep
)
3253 int btrfs_fdatawrite_range(struct inode
*inode
, loff_t start
, loff_t end
)
3258 * So with compression we will find and lock a dirty page and clear the
3259 * first one as dirty, setup an async extent, and immediately return
3260 * with the entire range locked but with nobody actually marked with
3261 * writeback. So we can't just filemap_write_and_wait_range() and
3262 * expect it to work since it will just kick off a thread to do the
3263 * actual work. So we need to call filemap_fdatawrite_range _again_
3264 * since it will wait on the page lock, which won't be unlocked until
3265 * after the pages have been marked as writeback and so we're good to go
3266 * from there. We have to do this otherwise we'll miss the ordered
3267 * extents and that results in badness. Please Josef, do not think you
3268 * know better and pull this out at some point in the future, it is
3269 * right and you are wrong.
3271 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3272 if (!ret
&& test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
3273 &BTRFS_I(inode
)->runtime_flags
))
3274 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);