1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
21 #include "transaction.h"
22 #include "btrfs_inode.h"
23 #include "print-tree.h"
28 #include "compression.h"
29 #include "delalloc-space.h"
32 static struct kmem_cache
*btrfs_inode_defrag_cachep
;
34 * when auto defrag is enabled we
35 * queue up these defrag structs to remember which
36 * inodes need defragging passes
39 struct rb_node rb_node
;
43 * transid where the defrag was added, we search for
44 * extents newer than this
51 /* last offset we were able to defrag */
54 /* if we've wrapped around back to zero once already */
58 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
59 struct inode_defrag
*defrag2
)
61 if (defrag1
->root
> defrag2
->root
)
63 else if (defrag1
->root
< defrag2
->root
)
65 else if (defrag1
->ino
> defrag2
->ino
)
67 else if (defrag1
->ino
< defrag2
->ino
)
73 /* pop a record for an inode into the defrag tree. The lock
74 * must be held already
76 * If you're inserting a record for an older transid than an
77 * existing record, the transid already in the tree is lowered
79 * If an existing record is found the defrag item you
82 static int __btrfs_add_inode_defrag(struct btrfs_inode
*inode
,
83 struct inode_defrag
*defrag
)
85 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
86 struct inode_defrag
*entry
;
88 struct rb_node
*parent
= NULL
;
91 p
= &fs_info
->defrag_inodes
.rb_node
;
94 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
96 ret
= __compare_inode_defrag(defrag
, entry
);
100 p
= &parent
->rb_right
;
102 /* if we're reinserting an entry for
103 * an old defrag run, make sure to
104 * lower the transid of our existing record
106 if (defrag
->transid
< entry
->transid
)
107 entry
->transid
= defrag
->transid
;
108 if (defrag
->last_offset
> entry
->last_offset
)
109 entry
->last_offset
= defrag
->last_offset
;
113 set_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
);
114 rb_link_node(&defrag
->rb_node
, parent
, p
);
115 rb_insert_color(&defrag
->rb_node
, &fs_info
->defrag_inodes
);
119 static inline int __need_auto_defrag(struct btrfs_fs_info
*fs_info
)
121 if (!btrfs_test_opt(fs_info
, AUTO_DEFRAG
))
124 if (btrfs_fs_closing(fs_info
))
131 * insert a defrag record for this inode if auto defrag is
134 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
135 struct btrfs_inode
*inode
)
137 struct btrfs_root
*root
= inode
->root
;
138 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
139 struct inode_defrag
*defrag
;
143 if (!__need_auto_defrag(fs_info
))
146 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
))
150 transid
= trans
->transid
;
152 transid
= inode
->root
->last_trans
;
154 defrag
= kmem_cache_zalloc(btrfs_inode_defrag_cachep
, GFP_NOFS
);
158 defrag
->ino
= btrfs_ino(inode
);
159 defrag
->transid
= transid
;
160 defrag
->root
= root
->root_key
.objectid
;
162 spin_lock(&fs_info
->defrag_inodes_lock
);
163 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &inode
->runtime_flags
)) {
165 * If we set IN_DEFRAG flag and evict the inode from memory,
166 * and then re-read this inode, this new inode doesn't have
167 * IN_DEFRAG flag. At the case, we may find the existed defrag.
169 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
171 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
173 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
175 spin_unlock(&fs_info
->defrag_inodes_lock
);
180 * Requeue the defrag object. If there is a defrag object that points to
181 * the same inode in the tree, we will merge them together (by
182 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
184 static void btrfs_requeue_inode_defrag(struct btrfs_inode
*inode
,
185 struct inode_defrag
*defrag
)
187 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
190 if (!__need_auto_defrag(fs_info
))
194 * Here we don't check the IN_DEFRAG flag, because we need merge
197 spin_lock(&fs_info
->defrag_inodes_lock
);
198 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
199 spin_unlock(&fs_info
->defrag_inodes_lock
);
204 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
208 * pick the defragable inode that we want, if it doesn't exist, we will get
211 static struct inode_defrag
*
212 btrfs_pick_defrag_inode(struct btrfs_fs_info
*fs_info
, u64 root
, u64 ino
)
214 struct inode_defrag
*entry
= NULL
;
215 struct inode_defrag tmp
;
217 struct rb_node
*parent
= NULL
;
223 spin_lock(&fs_info
->defrag_inodes_lock
);
224 p
= fs_info
->defrag_inodes
.rb_node
;
227 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
229 ret
= __compare_inode_defrag(&tmp
, entry
);
233 p
= parent
->rb_right
;
238 if (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
239 parent
= rb_next(parent
);
241 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
247 rb_erase(parent
, &fs_info
->defrag_inodes
);
248 spin_unlock(&fs_info
->defrag_inodes_lock
);
252 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info
*fs_info
)
254 struct inode_defrag
*defrag
;
255 struct rb_node
*node
;
257 spin_lock(&fs_info
->defrag_inodes_lock
);
258 node
= rb_first(&fs_info
->defrag_inodes
);
260 rb_erase(node
, &fs_info
->defrag_inodes
);
261 defrag
= rb_entry(node
, struct inode_defrag
, rb_node
);
262 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
264 cond_resched_lock(&fs_info
->defrag_inodes_lock
);
266 node
= rb_first(&fs_info
->defrag_inodes
);
268 spin_unlock(&fs_info
->defrag_inodes_lock
);
271 #define BTRFS_DEFRAG_BATCH 1024
273 static int __btrfs_run_defrag_inode(struct btrfs_fs_info
*fs_info
,
274 struct inode_defrag
*defrag
)
276 struct btrfs_root
*inode_root
;
278 struct btrfs_ioctl_defrag_range_args range
;
283 inode_root
= btrfs_get_fs_root(fs_info
, defrag
->root
, true);
284 if (IS_ERR(inode_root
)) {
285 ret
= PTR_ERR(inode_root
);
289 inode
= btrfs_iget(fs_info
->sb
, defrag
->ino
, inode_root
);
290 btrfs_put_root(inode_root
);
292 ret
= PTR_ERR(inode
);
296 /* do a chunk of defrag */
297 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
298 memset(&range
, 0, sizeof(range
));
300 range
.start
= defrag
->last_offset
;
302 sb_start_write(fs_info
->sb
);
303 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
305 sb_end_write(fs_info
->sb
);
307 * if we filled the whole defrag batch, there
308 * must be more work to do. Queue this defrag
311 if (num_defrag
== BTRFS_DEFRAG_BATCH
) {
312 defrag
->last_offset
= range
.start
;
313 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
314 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
316 * we didn't fill our defrag batch, but
317 * we didn't start at zero. Make sure we loop
318 * around to the start of the file.
320 defrag
->last_offset
= 0;
322 btrfs_requeue_inode_defrag(BTRFS_I(inode
), defrag
);
324 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
330 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
335 * run through the list of inodes in the FS that need
338 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
340 struct inode_defrag
*defrag
;
342 u64 root_objectid
= 0;
344 atomic_inc(&fs_info
->defrag_running
);
346 /* Pause the auto defragger. */
347 if (test_bit(BTRFS_FS_STATE_REMOUNTING
,
351 if (!__need_auto_defrag(fs_info
))
354 /* find an inode to defrag */
355 defrag
= btrfs_pick_defrag_inode(fs_info
, root_objectid
,
358 if (root_objectid
|| first_ino
) {
367 first_ino
= defrag
->ino
+ 1;
368 root_objectid
= defrag
->root
;
370 __btrfs_run_defrag_inode(fs_info
, defrag
);
372 atomic_dec(&fs_info
->defrag_running
);
375 * during unmount, we use the transaction_wait queue to
376 * wait for the defragger to stop
378 wake_up(&fs_info
->transaction_wait
);
382 /* simple helper to fault in pages and copy. This should go away
383 * and be replaced with calls into generic code.
385 static noinline
int btrfs_copy_from_user(loff_t pos
, size_t write_bytes
,
386 struct page
**prepared_pages
,
390 size_t total_copied
= 0;
392 int offset
= offset_in_page(pos
);
394 while (write_bytes
> 0) {
395 size_t count
= min_t(size_t,
396 PAGE_SIZE
- offset
, write_bytes
);
397 struct page
*page
= prepared_pages
[pg
];
399 * Copy data from userspace to the current page
401 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
403 /* Flush processor's dcache for this page */
404 flush_dcache_page(page
);
407 * if we get a partial write, we can end up with
408 * partially up to date pages. These add
409 * a lot of complexity, so make sure they don't
410 * happen by forcing this copy to be retried.
412 * The rest of the btrfs_file_write code will fall
413 * back to page at a time copies after we return 0.
415 if (!PageUptodate(page
) && copied
< count
)
418 iov_iter_advance(i
, copied
);
419 write_bytes
-= copied
;
420 total_copied
+= copied
;
422 /* Return to btrfs_file_write_iter to fault page */
423 if (unlikely(copied
== 0))
426 if (copied
< PAGE_SIZE
- offset
) {
437 * unlocks pages after btrfs_file_write is done with them
439 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
442 for (i
= 0; i
< num_pages
; i
++) {
443 /* page checked is some magic around finding pages that
444 * have been modified without going through btrfs_set_page_dirty
445 * clear it here. There should be no need to mark the pages
446 * accessed as prepare_pages should have marked them accessed
447 * in prepare_pages via find_or_create_page()
449 ClearPageChecked(pages
[i
]);
450 unlock_page(pages
[i
]);
455 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
458 struct extent_state
**cached_state
)
460 u64 search_start
= start
;
461 const u64 end
= start
+ len
- 1;
463 while (search_start
< end
) {
464 const u64 search_len
= end
- search_start
+ 1;
465 struct extent_map
*em
;
469 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
473 if (em
->block_start
!= EXTENT_MAP_HOLE
)
477 if (em
->start
< search_start
)
478 em_len
-= search_start
- em
->start
;
479 if (em_len
> search_len
)
482 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
483 search_start
+ em_len
- 1,
485 NULL
, cached_state
, GFP_NOFS
);
487 search_start
= extent_map_end(em
);
496 * after copy_from_user, pages need to be dirtied and we need to make
497 * sure holes are created between the current EOF and the start of
498 * any next extents (if required).
500 * this also makes the decision about creating an inline extent vs
501 * doing real data extents, marking pages dirty and delalloc as required.
503 int btrfs_dirty_pages(struct btrfs_inode
*inode
, struct page
**pages
,
504 size_t num_pages
, loff_t pos
, size_t write_bytes
,
505 struct extent_state
**cached
)
507 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
512 u64 end_of_last_block
;
513 u64 end_pos
= pos
+ write_bytes
;
514 loff_t isize
= i_size_read(&inode
->vfs_inode
);
515 unsigned int extra_bits
= 0;
517 start_pos
= pos
& ~((u64
) fs_info
->sectorsize
- 1);
518 num_bytes
= round_up(write_bytes
+ pos
- start_pos
,
519 fs_info
->sectorsize
);
521 end_of_last_block
= start_pos
+ num_bytes
- 1;
524 * The pages may have already been dirty, clear out old accounting so
525 * we can set things up properly
527 clear_extent_bit(&inode
->io_tree
, start_pos
, end_of_last_block
,
528 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
531 if (!btrfs_is_free_space_inode(inode
)) {
532 if (start_pos
>= isize
&&
533 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
535 * There can't be any extents following eof in this case
536 * so just set the delalloc new bit for the range
539 extra_bits
|= EXTENT_DELALLOC_NEW
;
541 err
= btrfs_find_new_delalloc_bytes(inode
, start_pos
,
548 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
553 for (i
= 0; i
< num_pages
; i
++) {
554 struct page
*p
= pages
[i
];
561 * we've only changed i_size in ram, and we haven't updated
562 * the disk i_size. There is no need to log the inode
566 i_size_write(&inode
->vfs_inode
, end_pos
);
571 * this drops all the extents in the cache that intersect the range
572 * [start, end]. Existing extents are split as required.
574 void btrfs_drop_extent_cache(struct btrfs_inode
*inode
, u64 start
, u64 end
,
577 struct extent_map
*em
;
578 struct extent_map
*split
= NULL
;
579 struct extent_map
*split2
= NULL
;
580 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
581 u64 len
= end
- start
+ 1;
589 WARN_ON(end
< start
);
590 if (end
== (u64
)-1) {
599 split
= alloc_extent_map();
601 split2
= alloc_extent_map();
602 if (!split
|| !split2
)
605 write_lock(&em_tree
->lock
);
606 em
= lookup_extent_mapping(em_tree
, start
, len
);
608 write_unlock(&em_tree
->lock
);
612 gen
= em
->generation
;
613 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
614 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
616 write_unlock(&em_tree
->lock
);
619 start
= em
->start
+ em
->len
;
621 len
= start
+ len
- (em
->start
+ em
->len
);
623 write_unlock(&em_tree
->lock
);
626 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
627 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
628 clear_bit(EXTENT_FLAG_LOGGING
, &flags
);
629 modified
= !list_empty(&em
->list
);
633 if (em
->start
< start
) {
634 split
->start
= em
->start
;
635 split
->len
= start
- em
->start
;
637 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
638 split
->orig_start
= em
->orig_start
;
639 split
->block_start
= em
->block_start
;
642 split
->block_len
= em
->block_len
;
644 split
->block_len
= split
->len
;
645 split
->orig_block_len
= max(split
->block_len
,
647 split
->ram_bytes
= em
->ram_bytes
;
649 split
->orig_start
= split
->start
;
650 split
->block_len
= 0;
651 split
->block_start
= em
->block_start
;
652 split
->orig_block_len
= 0;
653 split
->ram_bytes
= split
->len
;
656 split
->generation
= gen
;
657 split
->flags
= flags
;
658 split
->compress_type
= em
->compress_type
;
659 replace_extent_mapping(em_tree
, em
, split
, modified
);
660 free_extent_map(split
);
664 if (testend
&& em
->start
+ em
->len
> start
+ len
) {
665 u64 diff
= start
+ len
- em
->start
;
667 split
->start
= start
+ len
;
668 split
->len
= em
->start
+ em
->len
- (start
+ len
);
669 split
->flags
= flags
;
670 split
->compress_type
= em
->compress_type
;
671 split
->generation
= gen
;
673 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
674 split
->orig_block_len
= max(em
->block_len
,
677 split
->ram_bytes
= em
->ram_bytes
;
679 split
->block_len
= em
->block_len
;
680 split
->block_start
= em
->block_start
;
681 split
->orig_start
= em
->orig_start
;
683 split
->block_len
= split
->len
;
684 split
->block_start
= em
->block_start
686 split
->orig_start
= em
->orig_start
;
689 split
->ram_bytes
= split
->len
;
690 split
->orig_start
= split
->start
;
691 split
->block_len
= 0;
692 split
->block_start
= em
->block_start
;
693 split
->orig_block_len
= 0;
696 if (extent_map_in_tree(em
)) {
697 replace_extent_mapping(em_tree
, em
, split
,
700 ret
= add_extent_mapping(em_tree
, split
,
702 ASSERT(ret
== 0); /* Logic error */
704 free_extent_map(split
);
708 if (extent_map_in_tree(em
))
709 remove_extent_mapping(em_tree
, em
);
710 write_unlock(&em_tree
->lock
);
714 /* once for the tree*/
718 free_extent_map(split
);
720 free_extent_map(split2
);
724 * this is very complex, but the basic idea is to drop all extents
725 * in the range start - end. hint_block is filled in with a block number
726 * that would be a good hint to the block allocator for this file.
728 * If an extent intersects the range but is not entirely inside the range
729 * it is either truncated or split. Anything entirely inside the range
730 * is deleted from the tree.
732 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
733 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
734 struct btrfs_path
*path
, u64 start
, u64 end
,
735 u64
*drop_end
, int drop_cache
,
737 u32 extent_item_size
,
740 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
741 struct extent_buffer
*leaf
;
742 struct btrfs_file_extent_item
*fi
;
743 struct btrfs_ref ref
= { 0 };
744 struct btrfs_key key
;
745 struct btrfs_key new_key
;
746 struct inode
*vfs_inode
= &inode
->vfs_inode
;
747 u64 ino
= btrfs_ino(inode
);
748 u64 search_start
= start
;
751 u64 extent_offset
= 0;
753 u64 last_end
= start
;
759 int modify_tree
= -1;
762 int leafs_visited
= 0;
765 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
767 if (start
>= inode
->disk_i_size
&& !replace_extent
)
770 update_refs
= (test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) ||
771 root
== fs_info
->tree_root
);
774 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
775 search_start
, modify_tree
);
778 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
779 leaf
= path
->nodes
[0];
780 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
781 if (key
.objectid
== ino
&&
782 key
.type
== BTRFS_EXTENT_DATA_KEY
)
788 leaf
= path
->nodes
[0];
789 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
791 ret
= btrfs_next_leaf(root
, path
);
799 leaf
= path
->nodes
[0];
803 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
805 if (key
.objectid
> ino
)
807 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
808 key
.type
< BTRFS_EXTENT_DATA_KEY
) {
813 if (key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
816 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
817 struct btrfs_file_extent_item
);
818 extent_type
= btrfs_file_extent_type(leaf
, fi
);
820 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
821 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
822 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
823 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
824 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
825 extent_end
= key
.offset
+
826 btrfs_file_extent_num_bytes(leaf
, fi
);
827 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
828 extent_end
= key
.offset
+
829 btrfs_file_extent_ram_bytes(leaf
, fi
);
836 * Don't skip extent items representing 0 byte lengths. They
837 * used to be created (bug) if while punching holes we hit
838 * -ENOSPC condition. So if we find one here, just ensure we
839 * delete it, otherwise we would insert a new file extent item
840 * with the same key (offset) as that 0 bytes length file
841 * extent item in the call to setup_items_for_insert() later
844 if (extent_end
== key
.offset
&& extent_end
>= search_start
) {
845 last_end
= extent_end
;
846 goto delete_extent_item
;
849 if (extent_end
<= search_start
) {
855 search_start
= max(key
.offset
, start
);
856 if (recow
|| !modify_tree
) {
858 btrfs_release_path(path
);
863 * | - range to drop - |
864 * | -------- extent -------- |
866 if (start
> key
.offset
&& end
< extent_end
) {
868 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
873 memcpy(&new_key
, &key
, sizeof(new_key
));
874 new_key
.offset
= start
;
875 ret
= btrfs_duplicate_item(trans
, root
, path
,
877 if (ret
== -EAGAIN
) {
878 btrfs_release_path(path
);
884 leaf
= path
->nodes
[0];
885 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
886 struct btrfs_file_extent_item
);
887 btrfs_set_file_extent_num_bytes(leaf
, fi
,
890 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
891 struct btrfs_file_extent_item
);
893 extent_offset
+= start
- key
.offset
;
894 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
895 btrfs_set_file_extent_num_bytes(leaf
, fi
,
897 btrfs_mark_buffer_dirty(leaf
);
899 if (update_refs
&& disk_bytenr
> 0) {
900 btrfs_init_generic_ref(&ref
,
901 BTRFS_ADD_DELAYED_REF
,
902 disk_bytenr
, num_bytes
, 0);
903 btrfs_init_data_ref(&ref
,
904 root
->root_key
.objectid
,
906 start
- extent_offset
);
907 ret
= btrfs_inc_extent_ref(trans
, &ref
);
908 BUG_ON(ret
); /* -ENOMEM */
913 * From here on out we will have actually dropped something, so
914 * last_end can be updated.
916 last_end
= extent_end
;
919 * | ---- range to drop ----- |
920 * | -------- extent -------- |
922 if (start
<= key
.offset
&& end
< extent_end
) {
923 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
928 memcpy(&new_key
, &key
, sizeof(new_key
));
929 new_key
.offset
= end
;
930 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
932 extent_offset
+= end
- key
.offset
;
933 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
934 btrfs_set_file_extent_num_bytes(leaf
, fi
,
936 btrfs_mark_buffer_dirty(leaf
);
937 if (update_refs
&& disk_bytenr
> 0)
938 inode_sub_bytes(vfs_inode
, end
- key
.offset
);
942 search_start
= extent_end
;
944 * | ---- range to drop ----- |
945 * | -------- extent -------- |
947 if (start
> key
.offset
&& end
>= extent_end
) {
949 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
954 btrfs_set_file_extent_num_bytes(leaf
, fi
,
956 btrfs_mark_buffer_dirty(leaf
);
957 if (update_refs
&& disk_bytenr
> 0)
958 inode_sub_bytes(vfs_inode
, extent_end
- start
);
959 if (end
== extent_end
)
967 * | ---- range to drop ----- |
968 * | ------ extent ------ |
970 if (start
<= key
.offset
&& end
>= extent_end
) {
973 del_slot
= path
->slots
[0];
976 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
981 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
982 inode_sub_bytes(vfs_inode
,
983 extent_end
- key
.offset
);
984 extent_end
= ALIGN(extent_end
,
985 fs_info
->sectorsize
);
986 } else if (update_refs
&& disk_bytenr
> 0) {
987 btrfs_init_generic_ref(&ref
,
988 BTRFS_DROP_DELAYED_REF
,
989 disk_bytenr
, num_bytes
, 0);
990 btrfs_init_data_ref(&ref
,
991 root
->root_key
.objectid
,
993 key
.offset
- extent_offset
);
994 ret
= btrfs_free_extent(trans
, &ref
);
995 BUG_ON(ret
); /* -ENOMEM */
996 inode_sub_bytes(vfs_inode
,
997 extent_end
- key
.offset
);
1000 if (end
== extent_end
)
1003 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
1008 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
1011 btrfs_abort_transaction(trans
, ret
);
1018 btrfs_release_path(path
);
1025 if (!ret
&& del_nr
> 0) {
1027 * Set path->slots[0] to first slot, so that after the delete
1028 * if items are move off from our leaf to its immediate left or
1029 * right neighbor leafs, we end up with a correct and adjusted
1030 * path->slots[0] for our insertion (if replace_extent != 0).
1032 path
->slots
[0] = del_slot
;
1033 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1035 btrfs_abort_transaction(trans
, ret
);
1038 leaf
= path
->nodes
[0];
1040 * If btrfs_del_items() was called, it might have deleted a leaf, in
1041 * which case it unlocked our path, so check path->locks[0] matches a
1044 if (!ret
&& replace_extent
&& leafs_visited
== 1 &&
1045 (path
->locks
[0] == BTRFS_WRITE_LOCK_BLOCKING
||
1046 path
->locks
[0] == BTRFS_WRITE_LOCK
) &&
1047 btrfs_leaf_free_space(leaf
) >=
1048 sizeof(struct btrfs_item
) + extent_item_size
) {
1051 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1053 if (!del_nr
&& path
->slots
[0] < btrfs_header_nritems(leaf
)) {
1054 struct btrfs_key slot_key
;
1056 btrfs_item_key_to_cpu(leaf
, &slot_key
, path
->slots
[0]);
1057 if (btrfs_comp_cpu_keys(&key
, &slot_key
) > 0)
1060 setup_items_for_insert(root
, path
, &key
,
1063 sizeof(struct btrfs_item
) +
1064 extent_item_size
, 1);
1068 if (!replace_extent
|| !(*key_inserted
))
1069 btrfs_release_path(path
);
1071 *drop_end
= found
? min(end
, last_end
) : end
;
1075 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
1076 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
1077 u64 end
, int drop_cache
)
1079 struct btrfs_path
*path
;
1082 path
= btrfs_alloc_path();
1085 ret
= __btrfs_drop_extents(trans
, root
, BTRFS_I(inode
), path
, start
,
1086 end
, NULL
, drop_cache
, 0, 0, NULL
);
1087 btrfs_free_path(path
);
1091 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
1092 u64 objectid
, u64 bytenr
, u64 orig_offset
,
1093 u64
*start
, u64
*end
)
1095 struct btrfs_file_extent_item
*fi
;
1096 struct btrfs_key key
;
1099 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1102 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1103 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1106 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1107 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
1108 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
1109 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
1110 btrfs_file_extent_compression(leaf
, fi
) ||
1111 btrfs_file_extent_encryption(leaf
, fi
) ||
1112 btrfs_file_extent_other_encoding(leaf
, fi
))
1115 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1116 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
1119 *start
= key
.offset
;
1125 * Mark extent in the range start - end as written.
1127 * This changes extent type from 'pre-allocated' to 'regular'. If only
1128 * part of extent is marked as written, the extent will be split into
1131 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
1132 struct btrfs_inode
*inode
, u64 start
, u64 end
)
1134 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1135 struct btrfs_root
*root
= inode
->root
;
1136 struct extent_buffer
*leaf
;
1137 struct btrfs_path
*path
;
1138 struct btrfs_file_extent_item
*fi
;
1139 struct btrfs_ref ref
= { 0 };
1140 struct btrfs_key key
;
1141 struct btrfs_key new_key
;
1153 u64 ino
= btrfs_ino(inode
);
1155 path
= btrfs_alloc_path();
1162 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1165 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1168 if (ret
> 0 && path
->slots
[0] > 0)
1171 leaf
= path
->nodes
[0];
1172 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1173 if (key
.objectid
!= ino
||
1174 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
1176 btrfs_abort_transaction(trans
, ret
);
1179 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1180 struct btrfs_file_extent_item
);
1181 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_PREALLOC
) {
1183 btrfs_abort_transaction(trans
, ret
);
1186 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1187 if (key
.offset
> start
|| extent_end
< end
) {
1189 btrfs_abort_transaction(trans
, ret
);
1193 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1194 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1195 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
1196 memcpy(&new_key
, &key
, sizeof(new_key
));
1198 if (start
== key
.offset
&& end
< extent_end
) {
1201 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1202 ino
, bytenr
, orig_offset
,
1203 &other_start
, &other_end
)) {
1204 new_key
.offset
= end
;
1205 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1206 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1207 struct btrfs_file_extent_item
);
1208 btrfs_set_file_extent_generation(leaf
, fi
,
1210 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1212 btrfs_set_file_extent_offset(leaf
, fi
,
1214 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1215 struct btrfs_file_extent_item
);
1216 btrfs_set_file_extent_generation(leaf
, fi
,
1218 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1220 btrfs_mark_buffer_dirty(leaf
);
1225 if (start
> key
.offset
&& end
== extent_end
) {
1228 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1229 ino
, bytenr
, orig_offset
,
1230 &other_start
, &other_end
)) {
1231 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1232 struct btrfs_file_extent_item
);
1233 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1234 start
- key
.offset
);
1235 btrfs_set_file_extent_generation(leaf
, fi
,
1238 new_key
.offset
= start
;
1239 btrfs_set_item_key_safe(fs_info
, path
, &new_key
);
1241 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1242 struct btrfs_file_extent_item
);
1243 btrfs_set_file_extent_generation(leaf
, fi
,
1245 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1247 btrfs_set_file_extent_offset(leaf
, fi
,
1248 start
- orig_offset
);
1249 btrfs_mark_buffer_dirty(leaf
);
1254 while (start
> key
.offset
|| end
< extent_end
) {
1255 if (key
.offset
== start
)
1258 new_key
.offset
= split
;
1259 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1260 if (ret
== -EAGAIN
) {
1261 btrfs_release_path(path
);
1265 btrfs_abort_transaction(trans
, ret
);
1269 leaf
= path
->nodes
[0];
1270 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1271 struct btrfs_file_extent_item
);
1272 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1273 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1274 split
- key
.offset
);
1276 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1277 struct btrfs_file_extent_item
);
1279 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1280 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1281 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1282 extent_end
- split
);
1283 btrfs_mark_buffer_dirty(leaf
);
1285 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
, bytenr
,
1287 btrfs_init_data_ref(&ref
, root
->root_key
.objectid
, ino
,
1289 ret
= btrfs_inc_extent_ref(trans
, &ref
);
1291 btrfs_abort_transaction(trans
, ret
);
1295 if (split
== start
) {
1298 if (start
!= key
.offset
) {
1300 btrfs_abort_transaction(trans
, ret
);
1311 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
, bytenr
,
1313 btrfs_init_data_ref(&ref
, root
->root_key
.objectid
, ino
, orig_offset
);
1314 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1315 ino
, bytenr
, orig_offset
,
1316 &other_start
, &other_end
)) {
1318 btrfs_release_path(path
);
1321 extent_end
= other_end
;
1322 del_slot
= path
->slots
[0] + 1;
1324 ret
= btrfs_free_extent(trans
, &ref
);
1326 btrfs_abort_transaction(trans
, ret
);
1332 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1333 ino
, bytenr
, orig_offset
,
1334 &other_start
, &other_end
)) {
1336 btrfs_release_path(path
);
1339 key
.offset
= other_start
;
1340 del_slot
= path
->slots
[0];
1342 ret
= btrfs_free_extent(trans
, &ref
);
1344 btrfs_abort_transaction(trans
, ret
);
1349 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1350 struct btrfs_file_extent_item
);
1351 btrfs_set_file_extent_type(leaf
, fi
,
1352 BTRFS_FILE_EXTENT_REG
);
1353 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1354 btrfs_mark_buffer_dirty(leaf
);
1356 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1357 struct btrfs_file_extent_item
);
1358 btrfs_set_file_extent_type(leaf
, fi
,
1359 BTRFS_FILE_EXTENT_REG
);
1360 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1361 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1362 extent_end
- key
.offset
);
1363 btrfs_mark_buffer_dirty(leaf
);
1365 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1367 btrfs_abort_transaction(trans
, ret
);
1372 btrfs_free_path(path
);
1377 * on error we return an unlocked page and the error value
1378 * on success we return a locked page and 0
1380 static int prepare_uptodate_page(struct inode
*inode
,
1381 struct page
*page
, u64 pos
,
1382 bool force_uptodate
)
1386 if (((pos
& (PAGE_SIZE
- 1)) || force_uptodate
) &&
1387 !PageUptodate(page
)) {
1388 ret
= btrfs_readpage(NULL
, page
);
1392 if (!PageUptodate(page
)) {
1396 if (page
->mapping
!= inode
->i_mapping
) {
1405 * this just gets pages into the page cache and locks them down.
1407 static noinline
int prepare_pages(struct inode
*inode
, struct page
**pages
,
1408 size_t num_pages
, loff_t pos
,
1409 size_t write_bytes
, bool force_uptodate
)
1412 unsigned long index
= pos
>> PAGE_SHIFT
;
1413 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1417 for (i
= 0; i
< num_pages
; i
++) {
1419 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1420 mask
| __GFP_WRITE
);
1428 err
= prepare_uptodate_page(inode
, pages
[i
], pos
,
1430 if (!err
&& i
== num_pages
- 1)
1431 err
= prepare_uptodate_page(inode
, pages
[i
],
1432 pos
+ write_bytes
, false);
1435 if (err
== -EAGAIN
) {
1442 wait_on_page_writeback(pages
[i
]);
1447 while (faili
>= 0) {
1448 unlock_page(pages
[faili
]);
1449 put_page(pages
[faili
]);
1457 * This function locks the extent and properly waits for data=ordered extents
1458 * to finish before allowing the pages to be modified if need.
1461 * 1 - the extent is locked
1462 * 0 - the extent is not locked, and everything is OK
1463 * -EAGAIN - need re-prepare the pages
1464 * the other < 0 number - Something wrong happens
1467 lock_and_cleanup_extent_if_need(struct btrfs_inode
*inode
, struct page
**pages
,
1468 size_t num_pages
, loff_t pos
,
1470 u64
*lockstart
, u64
*lockend
,
1471 struct extent_state
**cached_state
)
1473 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1479 start_pos
= round_down(pos
, fs_info
->sectorsize
);
1480 last_pos
= start_pos
1481 + round_up(pos
+ write_bytes
- start_pos
,
1482 fs_info
->sectorsize
) - 1;
1484 if (start_pos
< inode
->vfs_inode
.i_size
) {
1485 struct btrfs_ordered_extent
*ordered
;
1487 lock_extent_bits(&inode
->io_tree
, start_pos
, last_pos
,
1489 ordered
= btrfs_lookup_ordered_range(inode
, start_pos
,
1490 last_pos
- start_pos
+ 1);
1492 ordered
->file_offset
+ ordered
->num_bytes
> start_pos
&&
1493 ordered
->file_offset
<= last_pos
) {
1494 unlock_extent_cached(&inode
->io_tree
, start_pos
,
1495 last_pos
, cached_state
);
1496 for (i
= 0; i
< num_pages
; i
++) {
1497 unlock_page(pages
[i
]);
1500 btrfs_start_ordered_extent(&inode
->vfs_inode
,
1502 btrfs_put_ordered_extent(ordered
);
1506 btrfs_put_ordered_extent(ordered
);
1508 *lockstart
= start_pos
;
1509 *lockend
= last_pos
;
1514 * It's possible the pages are dirty right now, but we don't want
1515 * to clean them yet because copy_from_user may catch a page fault
1516 * and we might have to fall back to one page at a time. If that
1517 * happens, we'll unlock these pages and we'd have a window where
1518 * reclaim could sneak in and drop the once-dirty page on the floor
1519 * without writing it.
1521 * We have the pages locked and the extent range locked, so there's
1522 * no way someone can start IO on any dirty pages in this range.
1524 * We'll call btrfs_dirty_pages() later on, and that will flip around
1525 * delalloc bits and dirty the pages as required.
1527 for (i
= 0; i
< num_pages
; i
++) {
1528 set_page_extent_mapped(pages
[i
]);
1529 WARN_ON(!PageLocked(pages
[i
]));
1535 static int check_can_nocow(struct btrfs_inode
*inode
, loff_t pos
,
1536 size_t *write_bytes
, bool nowait
)
1538 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1539 struct btrfs_root
*root
= inode
->root
;
1540 u64 lockstart
, lockend
;
1544 if (!(inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)))
1547 if (!nowait
&& !btrfs_drew_try_write_lock(&root
->snapshot_lock
))
1550 lockstart
= round_down(pos
, fs_info
->sectorsize
);
1551 lockend
= round_up(pos
+ *write_bytes
,
1552 fs_info
->sectorsize
) - 1;
1553 num_bytes
= lockend
- lockstart
+ 1;
1556 struct btrfs_ordered_extent
*ordered
;
1558 if (!try_lock_extent(&inode
->io_tree
, lockstart
, lockend
))
1561 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
,
1564 btrfs_put_ordered_extent(ordered
);
1569 btrfs_lock_and_flush_ordered_range(inode
, lockstart
,
1573 ret
= can_nocow_extent(&inode
->vfs_inode
, lockstart
, &num_bytes
,
1574 NULL
, NULL
, NULL
, false);
1578 btrfs_drew_write_unlock(&root
->snapshot_lock
);
1580 *write_bytes
= min_t(size_t, *write_bytes
,
1581 num_bytes
- pos
+ lockstart
);
1584 unlock_extent(&inode
->io_tree
, lockstart
, lockend
);
1589 static int check_nocow_nolock(struct btrfs_inode
*inode
, loff_t pos
,
1590 size_t *write_bytes
)
1592 return check_can_nocow(inode
, pos
, write_bytes
, true);
1596 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1599 * @write_bytes: The length to write, will be updated to the nocow writeable
1602 * This function will flush ordered extents in the range to ensure proper
1606 * >0 and update @write_bytes if we can do nocow write
1607 * 0 if we can't do nocow write
1608 * -EAGAIN if we can't get the needed lock or there are ordered extents
1609 * for * (nowait == true) case
1610 * <0 if other error happened
1612 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1614 int btrfs_check_nocow_lock(struct btrfs_inode
*inode
, loff_t pos
,
1615 size_t *write_bytes
)
1617 return check_can_nocow(inode
, pos
, write_bytes
, false);
1620 void btrfs_check_nocow_unlock(struct btrfs_inode
*inode
)
1622 btrfs_drew_write_unlock(&inode
->root
->snapshot_lock
);
1625 static noinline ssize_t
btrfs_buffered_write(struct kiocb
*iocb
,
1628 struct file
*file
= iocb
->ki_filp
;
1629 loff_t pos
= iocb
->ki_pos
;
1630 struct inode
*inode
= file_inode(file
);
1631 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1632 struct page
**pages
= NULL
;
1633 struct extent_changeset
*data_reserved
= NULL
;
1634 u64 release_bytes
= 0;
1637 size_t num_written
= 0;
1640 bool only_release_metadata
= false;
1641 bool force_page_uptodate
= false;
1643 nrptrs
= min(DIV_ROUND_UP(iov_iter_count(i
), PAGE_SIZE
),
1644 PAGE_SIZE
/ (sizeof(struct page
*)));
1645 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1646 nrptrs
= max(nrptrs
, 8);
1647 pages
= kmalloc_array(nrptrs
, sizeof(struct page
*), GFP_KERNEL
);
1651 while (iov_iter_count(i
) > 0) {
1652 struct extent_state
*cached_state
= NULL
;
1653 size_t offset
= offset_in_page(pos
);
1654 size_t sector_offset
;
1655 size_t write_bytes
= min(iov_iter_count(i
),
1656 nrptrs
* (size_t)PAGE_SIZE
-
1658 size_t num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1660 size_t reserve_bytes
;
1663 size_t dirty_sectors
;
1667 WARN_ON(num_pages
> nrptrs
);
1670 * Fault pages before locking them in prepare_pages
1671 * to avoid recursive lock
1673 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1678 only_release_metadata
= false;
1679 sector_offset
= pos
& (fs_info
->sectorsize
- 1);
1680 reserve_bytes
= round_up(write_bytes
+ sector_offset
,
1681 fs_info
->sectorsize
);
1683 extent_changeset_release(data_reserved
);
1684 ret
= btrfs_check_data_free_space(BTRFS_I(inode
),
1685 &data_reserved
, pos
,
1688 if (btrfs_check_nocow_lock(BTRFS_I(inode
), pos
,
1689 &write_bytes
) > 0) {
1691 * For nodata cow case, no need to reserve
1694 only_release_metadata
= true;
1696 * our prealloc extent may be smaller than
1697 * write_bytes, so scale down.
1699 num_pages
= DIV_ROUND_UP(write_bytes
+ offset
,
1701 reserve_bytes
= round_up(write_bytes
+
1703 fs_info
->sectorsize
);
1709 WARN_ON(reserve_bytes
== 0);
1710 ret
= btrfs_delalloc_reserve_metadata(BTRFS_I(inode
),
1713 if (!only_release_metadata
)
1714 btrfs_free_reserved_data_space(BTRFS_I(inode
),
1718 btrfs_check_nocow_unlock(BTRFS_I(inode
));
1722 release_bytes
= reserve_bytes
;
1725 * This is going to setup the pages array with the number of
1726 * pages we want, so we don't really need to worry about the
1727 * contents of pages from loop to loop
1729 ret
= prepare_pages(inode
, pages
, num_pages
,
1731 force_page_uptodate
);
1733 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1738 extents_locked
= lock_and_cleanup_extent_if_need(
1739 BTRFS_I(inode
), pages
,
1740 num_pages
, pos
, write_bytes
, &lockstart
,
1741 &lockend
, &cached_state
);
1742 if (extents_locked
< 0) {
1743 if (extents_locked
== -EAGAIN
)
1745 btrfs_delalloc_release_extents(BTRFS_I(inode
),
1747 ret
= extents_locked
;
1751 copied
= btrfs_copy_from_user(pos
, write_bytes
, pages
, i
);
1753 num_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, reserve_bytes
);
1754 dirty_sectors
= round_up(copied
+ sector_offset
,
1755 fs_info
->sectorsize
);
1756 dirty_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, dirty_sectors
);
1759 * if we have trouble faulting in the pages, fall
1760 * back to one page at a time
1762 if (copied
< write_bytes
)
1766 force_page_uptodate
= true;
1770 force_page_uptodate
= false;
1771 dirty_pages
= DIV_ROUND_UP(copied
+ offset
,
1775 if (num_sectors
> dirty_sectors
) {
1776 /* release everything except the sectors we dirtied */
1777 release_bytes
-= dirty_sectors
<<
1778 fs_info
->sb
->s_blocksize_bits
;
1779 if (only_release_metadata
) {
1780 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1781 release_bytes
, true);
1785 __pos
= round_down(pos
,
1786 fs_info
->sectorsize
) +
1787 (dirty_pages
<< PAGE_SHIFT
);
1788 btrfs_delalloc_release_space(BTRFS_I(inode
),
1789 data_reserved
, __pos
,
1790 release_bytes
, true);
1794 release_bytes
= round_up(copied
+ sector_offset
,
1795 fs_info
->sectorsize
);
1798 ret
= btrfs_dirty_pages(BTRFS_I(inode
), pages
,
1799 dirty_pages
, pos
, copied
,
1803 * If we have not locked the extent range, because the range's
1804 * start offset is >= i_size, we might still have a non-NULL
1805 * cached extent state, acquired while marking the extent range
1806 * as delalloc through btrfs_dirty_pages(). Therefore free any
1807 * possible cached extent state to avoid a memory leak.
1810 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1811 lockstart
, lockend
, &cached_state
);
1813 free_extent_state(cached_state
);
1815 btrfs_delalloc_release_extents(BTRFS_I(inode
), reserve_bytes
);
1817 btrfs_drop_pages(pages
, num_pages
);
1822 if (only_release_metadata
)
1823 btrfs_check_nocow_unlock(BTRFS_I(inode
));
1825 if (only_release_metadata
&& copied
> 0) {
1826 lockstart
= round_down(pos
,
1827 fs_info
->sectorsize
);
1828 lockend
= round_up(pos
+ copied
,
1829 fs_info
->sectorsize
) - 1;
1831 set_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1832 lockend
, EXTENT_NORESERVE
, NULL
,
1836 btrfs_drop_pages(pages
, num_pages
);
1840 balance_dirty_pages_ratelimited(inode
->i_mapping
);
1843 num_written
+= copied
;
1848 if (release_bytes
) {
1849 if (only_release_metadata
) {
1850 btrfs_check_nocow_unlock(BTRFS_I(inode
));
1851 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
1852 release_bytes
, true);
1854 btrfs_delalloc_release_space(BTRFS_I(inode
),
1856 round_down(pos
, fs_info
->sectorsize
),
1857 release_bytes
, true);
1861 extent_changeset_free(data_reserved
);
1862 return num_written
? num_written
: ret
;
1865 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1867 struct file
*file
= iocb
->ki_filp
;
1868 struct inode
*inode
= file_inode(file
);
1871 ssize_t written_buffered
;
1875 written
= generic_file_direct_write(iocb
, from
);
1877 if (written
< 0 || !iov_iter_count(from
))
1881 written_buffered
= btrfs_buffered_write(iocb
, from
);
1882 if (written_buffered
< 0) {
1883 err
= written_buffered
;
1887 * Ensure all data is persisted. We want the next direct IO read to be
1888 * able to read what was just written.
1890 endbyte
= pos
+ written_buffered
- 1;
1891 err
= btrfs_fdatawrite_range(inode
, pos
, endbyte
);
1894 err
= filemap_fdatawait_range(inode
->i_mapping
, pos
, endbyte
);
1897 written
+= written_buffered
;
1898 iocb
->ki_pos
= pos
+ written_buffered
;
1899 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_SHIFT
,
1900 endbyte
>> PAGE_SHIFT
);
1902 return written
? written
: err
;
1905 static void update_time_for_write(struct inode
*inode
)
1907 struct timespec64 now
;
1909 if (IS_NOCMTIME(inode
))
1912 now
= current_time(inode
);
1913 if (!timespec64_equal(&inode
->i_mtime
, &now
))
1914 inode
->i_mtime
= now
;
1916 if (!timespec64_equal(&inode
->i_ctime
, &now
))
1917 inode
->i_ctime
= now
;
1919 if (IS_I_VERSION(inode
))
1920 inode_inc_iversion(inode
);
1923 static ssize_t
btrfs_file_write_iter(struct kiocb
*iocb
,
1924 struct iov_iter
*from
)
1926 struct file
*file
= iocb
->ki_filp
;
1927 struct inode
*inode
= file_inode(file
);
1928 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1929 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1932 ssize_t num_written
= 0;
1933 const bool sync
= iocb
->ki_flags
& IOCB_DSYNC
;
1940 if (!(iocb
->ki_flags
& IOCB_DIRECT
) &&
1941 (iocb
->ki_flags
& IOCB_NOWAIT
))
1944 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1945 if (!inode_trylock(inode
))
1951 err
= generic_write_checks(iocb
, from
);
1953 inode_unlock(inode
);
1958 count
= iov_iter_count(from
);
1959 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1960 size_t nocow_bytes
= count
;
1963 * We will allocate space in case nodatacow is not set,
1966 if (check_nocow_nolock(BTRFS_I(inode
), pos
, &nocow_bytes
)
1968 inode_unlock(inode
);
1972 * There are holes in the range or parts of the range that must
1973 * be COWed (shared extents, RO block groups, etc), so just bail
1976 if (nocow_bytes
< count
) {
1977 inode_unlock(inode
);
1982 current
->backing_dev_info
= inode_to_bdi(inode
);
1983 err
= file_remove_privs(file
);
1985 inode_unlock(inode
);
1990 * If BTRFS flips readonly due to some impossible error
1991 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1992 * although we have opened a file as writable, we have
1993 * to stop this write operation to ensure FS consistency.
1995 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
)) {
1996 inode_unlock(inode
);
2002 * We reserve space for updating the inode when we reserve space for the
2003 * extent we are going to write, so we will enospc out there. We don't
2004 * need to start yet another transaction to update the inode as we will
2005 * update the inode when we finish writing whatever data we write.
2007 update_time_for_write(inode
);
2009 start_pos
= round_down(pos
, fs_info
->sectorsize
);
2010 oldsize
= i_size_read(inode
);
2011 if (start_pos
> oldsize
) {
2012 /* Expand hole size to cover write data, preventing empty gap */
2013 end_pos
= round_up(pos
+ count
,
2014 fs_info
->sectorsize
);
2015 err
= btrfs_cont_expand(inode
, oldsize
, end_pos
);
2017 inode_unlock(inode
);
2020 if (start_pos
> round_up(oldsize
, fs_info
->sectorsize
))
2025 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
2027 if (iocb
->ki_flags
& IOCB_DIRECT
) {
2028 num_written
= __btrfs_direct_write(iocb
, from
);
2030 num_written
= btrfs_buffered_write(iocb
, from
);
2031 if (num_written
> 0)
2032 iocb
->ki_pos
= pos
+ num_written
;
2034 pagecache_isize_extended(inode
, oldsize
,
2035 i_size_read(inode
));
2038 inode_unlock(inode
);
2041 * We also have to set last_sub_trans to the current log transid,
2042 * otherwise subsequent syncs to a file that's been synced in this
2043 * transaction will appear to have already occurred.
2045 spin_lock(&BTRFS_I(inode
)->lock
);
2046 BTRFS_I(inode
)->last_sub_trans
= root
->log_transid
;
2047 spin_unlock(&BTRFS_I(inode
)->lock
);
2048 if (num_written
> 0)
2049 num_written
= generic_write_sync(iocb
, num_written
);
2052 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
2054 current
->backing_dev_info
= NULL
;
2055 return num_written
? num_written
: err
;
2058 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
2060 struct btrfs_file_private
*private = filp
->private_data
;
2062 if (private && private->filldir_buf
)
2063 kfree(private->filldir_buf
);
2065 filp
->private_data
= NULL
;
2068 * ordered_data_close is set by setattr when we are about to truncate
2069 * a file from a non-zero size to a zero size. This tries to
2070 * flush down new bytes that may have been written if the
2071 * application were using truncate to replace a file in place.
2073 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
2074 &BTRFS_I(inode
)->runtime_flags
))
2075 filemap_flush(inode
->i_mapping
);
2079 static int start_ordered_ops(struct inode
*inode
, loff_t start
, loff_t end
)
2082 struct blk_plug plug
;
2085 * This is only called in fsync, which would do synchronous writes, so
2086 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2087 * multiple disks using raid profile, a large IO can be split to
2088 * several segments of stripe length (currently 64K).
2090 blk_start_plug(&plug
);
2091 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
2092 ret
= btrfs_fdatawrite_range(inode
, start
, end
);
2093 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
2094 blk_finish_plug(&plug
);
2100 * fsync call for both files and directories. This logs the inode into
2101 * the tree log instead of forcing full commits whenever possible.
2103 * It needs to call filemap_fdatawait so that all ordered extent updates are
2104 * in the metadata btree are up to date for copying to the log.
2106 * It drops the inode mutex before doing the tree log commit. This is an
2107 * important optimization for directories because holding the mutex prevents
2108 * new operations on the dir while we write to disk.
2110 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2112 struct dentry
*dentry
= file_dentry(file
);
2113 struct inode
*inode
= d_inode(dentry
);
2114 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2115 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2116 struct btrfs_trans_handle
*trans
;
2117 struct btrfs_log_ctx ctx
;
2120 trace_btrfs_sync_file(file
, datasync
);
2122 btrfs_init_log_ctx(&ctx
, inode
);
2125 * Set the range to full if the NO_HOLES feature is not enabled.
2126 * This is to avoid missing file extent items representing holes after
2127 * replaying the log.
2129 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
)) {
2135 * We write the dirty pages in the range and wait until they complete
2136 * out of the ->i_mutex. If so, we can flush the dirty pages by
2137 * multi-task, and make the performance up. See
2138 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2140 ret
= start_ordered_ops(inode
, start
, end
);
2147 * We take the dio_sem here because the tree log stuff can race with
2148 * lockless dio writes and get an extent map logged for an extent we
2149 * never waited on. We need it this high up for lockdep reasons.
2151 down_write(&BTRFS_I(inode
)->dio_sem
);
2153 atomic_inc(&root
->log_batch
);
2156 * If the inode needs a full sync, make sure we use a full range to
2157 * avoid log tree corruption, due to hole detection racing with ordered
2158 * extent completion for adjacent ranges and races between logging and
2159 * completion of ordered extents for adjancent ranges - both races
2160 * could lead to file extent items in the log with overlapping ranges.
2161 * Do this while holding the inode lock, to avoid races with other
2164 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2165 &BTRFS_I(inode
)->runtime_flags
)) {
2171 * Before we acquired the inode's lock, someone may have dirtied more
2172 * pages in the target range. We need to make sure that writeback for
2173 * any such pages does not start while we are logging the inode, because
2174 * if it does, any of the following might happen when we are not doing a
2177 * 1) We log an extent after its writeback finishes but before its
2178 * checksums are added to the csum tree, leading to -EIO errors
2179 * when attempting to read the extent after a log replay.
2181 * 2) We can end up logging an extent before its writeback finishes.
2182 * Therefore after the log replay we will have a file extent item
2183 * pointing to an unwritten extent (and no data checksums as well).
2185 * So trigger writeback for any eventual new dirty pages and then we
2186 * wait for all ordered extents to complete below.
2188 ret
= start_ordered_ops(inode
, start
, end
);
2190 up_write(&BTRFS_I(inode
)->dio_sem
);
2191 inode_unlock(inode
);
2196 * We have to do this here to avoid the priority inversion of waiting on
2197 * IO of a lower priority task while holding a transaction open.
2199 * Also, the range length can be represented by u64, we have to do the
2200 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
2202 ret
= btrfs_wait_ordered_range(inode
, start
, (u64
)end
- (u64
)start
+ 1);
2204 up_write(&BTRFS_I(inode
)->dio_sem
);
2205 inode_unlock(inode
);
2208 atomic_inc(&root
->log_batch
);
2211 if (btrfs_inode_in_log(BTRFS_I(inode
), fs_info
->generation
) ||
2212 BTRFS_I(inode
)->last_trans
<= fs_info
->last_trans_committed
) {
2214 * We've had everything committed since the last time we were
2215 * modified so clear this flag in case it was set for whatever
2216 * reason, it's no longer relevant.
2218 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2219 &BTRFS_I(inode
)->runtime_flags
);
2221 * An ordered extent might have started before and completed
2222 * already with io errors, in which case the inode was not
2223 * updated and we end up here. So check the inode's mapping
2224 * for any errors that might have happened since we last
2225 * checked called fsync.
2227 ret
= filemap_check_wb_err(inode
->i_mapping
, file
->f_wb_err
);
2228 up_write(&BTRFS_I(inode
)->dio_sem
);
2229 inode_unlock(inode
);
2234 * We use start here because we will need to wait on the IO to complete
2235 * in btrfs_sync_log, which could require joining a transaction (for
2236 * example checking cross references in the nocow path). If we use join
2237 * here we could get into a situation where we're waiting on IO to
2238 * happen that is blocked on a transaction trying to commit. With start
2239 * we inc the extwriter counter, so we wait for all extwriters to exit
2240 * before we start blocking joiners. This comment is to keep somebody
2241 * from thinking they are super smart and changing this to
2242 * btrfs_join_transaction *cough*Josef*cough*.
2244 trans
= btrfs_start_transaction(root
, 0);
2245 if (IS_ERR(trans
)) {
2246 ret
= PTR_ERR(trans
);
2247 up_write(&BTRFS_I(inode
)->dio_sem
);
2248 inode_unlock(inode
);
2252 ret
= btrfs_log_dentry_safe(trans
, dentry
, start
, end
, &ctx
);
2254 /* Fallthrough and commit/free transaction. */
2258 /* we've logged all the items and now have a consistent
2259 * version of the file in the log. It is possible that
2260 * someone will come in and modify the file, but that's
2261 * fine because the log is consistent on disk, and we
2262 * have references to all of the file's extents
2264 * It is possible that someone will come in and log the
2265 * file again, but that will end up using the synchronization
2266 * inside btrfs_sync_log to keep things safe.
2268 up_write(&BTRFS_I(inode
)->dio_sem
);
2269 inode_unlock(inode
);
2271 if (ret
!= BTRFS_NO_LOG_SYNC
) {
2273 ret
= btrfs_sync_log(trans
, root
, &ctx
);
2275 ret
= btrfs_end_transaction(trans
);
2279 ret
= btrfs_commit_transaction(trans
);
2281 ret
= btrfs_end_transaction(trans
);
2284 ASSERT(list_empty(&ctx
.list
));
2285 err
= file_check_and_advance_wb_err(file
);
2288 return ret
> 0 ? -EIO
: ret
;
2291 static const struct vm_operations_struct btrfs_file_vm_ops
= {
2292 .fault
= filemap_fault
,
2293 .map_pages
= filemap_map_pages
,
2294 .page_mkwrite
= btrfs_page_mkwrite
,
2297 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2299 struct address_space
*mapping
= filp
->f_mapping
;
2301 if (!mapping
->a_ops
->readpage
)
2304 file_accessed(filp
);
2305 vma
->vm_ops
= &btrfs_file_vm_ops
;
2310 static int hole_mergeable(struct btrfs_inode
*inode
, struct extent_buffer
*leaf
,
2311 int slot
, u64 start
, u64 end
)
2313 struct btrfs_file_extent_item
*fi
;
2314 struct btrfs_key key
;
2316 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
2319 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2320 if (key
.objectid
!= btrfs_ino(inode
) ||
2321 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2324 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
2326 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
2329 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
2332 if (key
.offset
== end
)
2334 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
2339 static int fill_holes(struct btrfs_trans_handle
*trans
,
2340 struct btrfs_inode
*inode
,
2341 struct btrfs_path
*path
, u64 offset
, u64 end
)
2343 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2344 struct btrfs_root
*root
= inode
->root
;
2345 struct extent_buffer
*leaf
;
2346 struct btrfs_file_extent_item
*fi
;
2347 struct extent_map
*hole_em
;
2348 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
2349 struct btrfs_key key
;
2352 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
2355 key
.objectid
= btrfs_ino(inode
);
2356 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2357 key
.offset
= offset
;
2359 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2362 * We should have dropped this offset, so if we find it then
2363 * something has gone horribly wrong.
2370 leaf
= path
->nodes
[0];
2371 if (hole_mergeable(inode
, leaf
, path
->slots
[0] - 1, offset
, end
)) {
2375 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2376 struct btrfs_file_extent_item
);
2377 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
2379 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2380 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2381 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2382 btrfs_mark_buffer_dirty(leaf
);
2386 if (hole_mergeable(inode
, leaf
, path
->slots
[0], offset
, end
)) {
2389 key
.offset
= offset
;
2390 btrfs_set_item_key_safe(fs_info
, path
, &key
);
2391 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2392 struct btrfs_file_extent_item
);
2393 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
2395 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2396 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2397 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2398 btrfs_mark_buffer_dirty(leaf
);
2401 btrfs_release_path(path
);
2403 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
),
2404 offset
, 0, 0, end
- offset
, 0, end
- offset
, 0, 0, 0);
2409 btrfs_release_path(path
);
2411 hole_em
= alloc_extent_map();
2413 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2414 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
);
2416 hole_em
->start
= offset
;
2417 hole_em
->len
= end
- offset
;
2418 hole_em
->ram_bytes
= hole_em
->len
;
2419 hole_em
->orig_start
= offset
;
2421 hole_em
->block_start
= EXTENT_MAP_HOLE
;
2422 hole_em
->block_len
= 0;
2423 hole_em
->orig_block_len
= 0;
2424 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
2425 hole_em
->generation
= trans
->transid
;
2428 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2429 write_lock(&em_tree
->lock
);
2430 ret
= add_extent_mapping(em_tree
, hole_em
, 1);
2431 write_unlock(&em_tree
->lock
);
2432 } while (ret
== -EEXIST
);
2433 free_extent_map(hole_em
);
2435 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2436 &inode
->runtime_flags
);
2443 * Find a hole extent on given inode and change start/len to the end of hole
2444 * extent.(hole/vacuum extent whose em->start <= start &&
2445 * em->start + em->len > start)
2446 * When a hole extent is found, return 1 and modify start/len.
2448 static int find_first_non_hole(struct inode
*inode
, u64
*start
, u64
*len
)
2450 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2451 struct extent_map
*em
;
2454 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0,
2455 round_down(*start
, fs_info
->sectorsize
),
2456 round_up(*len
, fs_info
->sectorsize
));
2460 /* Hole or vacuum extent(only exists in no-hole mode) */
2461 if (em
->block_start
== EXTENT_MAP_HOLE
) {
2463 *len
= em
->start
+ em
->len
> *start
+ *len
?
2464 0 : *start
+ *len
- em
->start
- em
->len
;
2465 *start
= em
->start
+ em
->len
;
2467 free_extent_map(em
);
2471 static int btrfs_punch_hole_lock_range(struct inode
*inode
,
2472 const u64 lockstart
,
2474 struct extent_state
**cached_state
)
2477 struct btrfs_ordered_extent
*ordered
;
2480 truncate_pagecache_range(inode
, lockstart
, lockend
);
2482 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2484 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
2487 * We need to make sure we have no ordered extents in this range
2488 * and nobody raced in and read a page in this range, if we did
2489 * we need to try again.
2492 (ordered
->file_offset
+ ordered
->num_bytes
<= lockstart
||
2493 ordered
->file_offset
> lockend
)) &&
2494 !filemap_range_has_page(inode
->i_mapping
,
2495 lockstart
, lockend
)) {
2497 btrfs_put_ordered_extent(ordered
);
2501 btrfs_put_ordered_extent(ordered
);
2502 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
2503 lockend
, cached_state
);
2504 ret
= btrfs_wait_ordered_range(inode
, lockstart
,
2505 lockend
- lockstart
+ 1);
2512 static int btrfs_insert_clone_extent(struct btrfs_trans_handle
*trans
,
2513 struct inode
*inode
,
2514 struct btrfs_path
*path
,
2515 struct btrfs_clone_extent_info
*clone_info
,
2516 const u64 clone_len
)
2518 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2519 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2520 struct btrfs_file_extent_item
*extent
;
2521 struct extent_buffer
*leaf
;
2522 struct btrfs_key key
;
2524 struct btrfs_ref ref
= { 0 };
2531 if (clone_info
->disk_offset
== 0 &&
2532 btrfs_fs_incompat(fs_info
, NO_HOLES
))
2535 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
2536 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2537 key
.offset
= clone_info
->file_offset
;
2538 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
2539 clone_info
->item_size
);
2542 leaf
= path
->nodes
[0];
2543 slot
= path
->slots
[0];
2544 write_extent_buffer(leaf
, clone_info
->extent_buf
,
2545 btrfs_item_ptr_offset(leaf
, slot
),
2546 clone_info
->item_size
);
2547 extent
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
2548 btrfs_set_file_extent_offset(leaf
, extent
, clone_info
->data_offset
);
2549 btrfs_set_file_extent_num_bytes(leaf
, extent
, clone_len
);
2550 btrfs_mark_buffer_dirty(leaf
);
2551 btrfs_release_path(path
);
2553 ret
= btrfs_inode_set_file_extent_range(BTRFS_I(inode
),
2554 clone_info
->file_offset
, clone_len
);
2558 /* If it's a hole, nothing more needs to be done. */
2559 if (clone_info
->disk_offset
== 0)
2562 inode_add_bytes(inode
, clone_len
);
2563 btrfs_init_generic_ref(&ref
, BTRFS_ADD_DELAYED_REF
,
2564 clone_info
->disk_offset
,
2565 clone_info
->disk_len
, 0);
2566 ref_offset
= clone_info
->file_offset
- clone_info
->data_offset
;
2567 btrfs_init_data_ref(&ref
, root
->root_key
.objectid
,
2568 btrfs_ino(BTRFS_I(inode
)), ref_offset
);
2569 ret
= btrfs_inc_extent_ref(trans
, &ref
);
2575 * The respective range must have been previously locked, as well as the inode.
2576 * The end offset is inclusive (last byte of the range).
2577 * @clone_info is NULL for fallocate's hole punching and non-NULL for extent
2579 * When cloning, we don't want to end up in a state where we dropped extents
2580 * without inserting a new one, so we must abort the transaction to avoid a
2583 int btrfs_punch_hole_range(struct inode
*inode
, struct btrfs_path
*path
,
2584 const u64 start
, const u64 end
,
2585 struct btrfs_clone_extent_info
*clone_info
,
2586 struct btrfs_trans_handle
**trans_out
)
2588 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2589 u64 min_size
= btrfs_calc_insert_metadata_size(fs_info
, 1);
2590 u64 ino_size
= round_up(inode
->i_size
, fs_info
->sectorsize
);
2591 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2592 struct btrfs_trans_handle
*trans
= NULL
;
2593 struct btrfs_block_rsv
*rsv
;
2594 unsigned int rsv_count
;
2597 u64 len
= end
- start
;
2603 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
2608 rsv
->size
= btrfs_calc_insert_metadata_size(fs_info
, 1);
2612 * 1 - update the inode
2613 * 1 - removing the extents in the range
2614 * 1 - adding the hole extent if no_holes isn't set or if we are cloning
2617 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
) || clone_info
)
2622 trans
= btrfs_start_transaction(root
, rsv_count
);
2623 if (IS_ERR(trans
)) {
2624 ret
= PTR_ERR(trans
);
2629 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
2632 trans
->block_rsv
= rsv
;
2635 while (cur_offset
< end
) {
2636 ret
= __btrfs_drop_extents(trans
, root
, BTRFS_I(inode
), path
,
2637 cur_offset
, end
+ 1, &drop_end
,
2639 if (ret
!= -ENOSPC
) {
2641 * When cloning we want to avoid transaction aborts when
2642 * nothing was done and we are attempting to clone parts
2643 * of inline extents, in such cases -EOPNOTSUPP is
2644 * returned by __btrfs_drop_extents() without having
2645 * changed anything in the file.
2647 if (clone_info
&& ret
&& ret
!= -EOPNOTSUPP
)
2648 btrfs_abort_transaction(trans
, ret
);
2652 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2654 if (!clone_info
&& cur_offset
< drop_end
&&
2655 cur_offset
< ino_size
) {
2656 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2657 cur_offset
, drop_end
);
2660 * If we failed then we didn't insert our hole
2661 * entries for the area we dropped, so now the
2662 * fs is corrupted, so we must abort the
2665 btrfs_abort_transaction(trans
, ret
);
2668 } else if (!clone_info
&& cur_offset
< drop_end
) {
2670 * We are past the i_size here, but since we didn't
2671 * insert holes we need to clear the mapped area so we
2672 * know to not set disk_i_size in this area until a new
2673 * file extent is inserted here.
2675 ret
= btrfs_inode_clear_file_extent_range(BTRFS_I(inode
),
2676 cur_offset
, drop_end
- cur_offset
);
2679 * We couldn't clear our area, so we could
2680 * presumably adjust up and corrupt the fs, so
2683 btrfs_abort_transaction(trans
, ret
);
2688 if (clone_info
&& drop_end
> clone_info
->file_offset
) {
2689 u64 clone_len
= drop_end
- clone_info
->file_offset
;
2691 ret
= btrfs_insert_clone_extent(trans
, inode
, path
,
2692 clone_info
, clone_len
);
2694 btrfs_abort_transaction(trans
, ret
);
2697 clone_info
->data_len
-= clone_len
;
2698 clone_info
->data_offset
+= clone_len
;
2699 clone_info
->file_offset
+= clone_len
;
2702 cur_offset
= drop_end
;
2704 ret
= btrfs_update_inode(trans
, root
, inode
);
2708 btrfs_end_transaction(trans
);
2709 btrfs_btree_balance_dirty(fs_info
);
2711 trans
= btrfs_start_transaction(root
, rsv_count
);
2712 if (IS_ERR(trans
)) {
2713 ret
= PTR_ERR(trans
);
2718 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
2719 rsv
, min_size
, false);
2720 BUG_ON(ret
); /* shouldn't happen */
2721 trans
->block_rsv
= rsv
;
2724 ret
= find_first_non_hole(inode
, &cur_offset
, &len
);
2725 if (unlikely(ret
< 0))
2735 * If we were cloning, force the next fsync to be a full one since we
2736 * we replaced (or just dropped in the case of cloning holes when
2737 * NO_HOLES is enabled) extents and extent maps.
2738 * This is for the sake of simplicity, and cloning into files larger
2739 * than 16Mb would force the full fsync any way (when
2740 * try_release_extent_mapping() is invoked during page cache truncation.
2743 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2744 &BTRFS_I(inode
)->runtime_flags
);
2749 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2751 * If we are using the NO_HOLES feature we might have had already an
2752 * hole that overlaps a part of the region [lockstart, lockend] and
2753 * ends at (or beyond) lockend. Since we have no file extent items to
2754 * represent holes, drop_end can be less than lockend and so we must
2755 * make sure we have an extent map representing the existing hole (the
2756 * call to __btrfs_drop_extents() might have dropped the existing extent
2757 * map representing the existing hole), otherwise the fast fsync path
2758 * will not record the existence of the hole region
2759 * [existing_hole_start, lockend].
2761 if (drop_end
<= end
)
2764 * Don't insert file hole extent item if it's for a range beyond eof
2765 * (because it's useless) or if it represents a 0 bytes range (when
2766 * cur_offset == drop_end).
2768 if (!clone_info
&& cur_offset
< ino_size
&& cur_offset
< drop_end
) {
2769 ret
= fill_holes(trans
, BTRFS_I(inode
), path
,
2770 cur_offset
, drop_end
);
2772 /* Same comment as above. */
2773 btrfs_abort_transaction(trans
, ret
);
2776 } else if (!clone_info
&& cur_offset
< drop_end
) {
2777 /* See the comment in the loop above for the reasoning here. */
2778 ret
= btrfs_inode_clear_file_extent_range(BTRFS_I(inode
),
2779 cur_offset
, drop_end
- cur_offset
);
2781 btrfs_abort_transaction(trans
, ret
);
2787 ret
= btrfs_insert_clone_extent(trans
, inode
, path
, clone_info
,
2788 clone_info
->data_len
);
2790 btrfs_abort_transaction(trans
, ret
);
2799 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
2801 btrfs_end_transaction(trans
);
2805 btrfs_free_block_rsv(fs_info
, rsv
);
2810 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
2812 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2813 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2814 struct extent_state
*cached_state
= NULL
;
2815 struct btrfs_path
*path
;
2816 struct btrfs_trans_handle
*trans
= NULL
;
2821 u64 orig_start
= offset
;
2825 bool truncated_block
= false;
2826 bool updated_inode
= false;
2828 ret
= btrfs_wait_ordered_range(inode
, offset
, len
);
2833 ino_size
= round_up(inode
->i_size
, fs_info
->sectorsize
);
2834 ret
= find_first_non_hole(inode
, &offset
, &len
);
2836 goto out_only_mutex
;
2838 /* Already in a large hole */
2840 goto out_only_mutex
;
2843 lockstart
= round_up(offset
, btrfs_inode_sectorsize(inode
));
2844 lockend
= round_down(offset
+ len
,
2845 btrfs_inode_sectorsize(inode
)) - 1;
2846 same_block
= (BTRFS_BYTES_TO_BLKS(fs_info
, offset
))
2847 == (BTRFS_BYTES_TO_BLKS(fs_info
, offset
+ len
- 1));
2849 * We needn't truncate any block which is beyond the end of the file
2850 * because we are sure there is no data there.
2853 * Only do this if we are in the same block and we aren't doing the
2856 if (same_block
&& len
< fs_info
->sectorsize
) {
2857 if (offset
< ino_size
) {
2858 truncated_block
= true;
2859 ret
= btrfs_truncate_block(inode
, offset
, len
, 0);
2863 goto out_only_mutex
;
2866 /* zero back part of the first block */
2867 if (offset
< ino_size
) {
2868 truncated_block
= true;
2869 ret
= btrfs_truncate_block(inode
, offset
, 0, 0);
2871 inode_unlock(inode
);
2876 /* Check the aligned pages after the first unaligned page,
2877 * if offset != orig_start, which means the first unaligned page
2878 * including several following pages are already in holes,
2879 * the extra check can be skipped */
2880 if (offset
== orig_start
) {
2881 /* after truncate page, check hole again */
2882 len
= offset
+ len
- lockstart
;
2884 ret
= find_first_non_hole(inode
, &offset
, &len
);
2886 goto out_only_mutex
;
2889 goto out_only_mutex
;
2894 /* Check the tail unaligned part is in a hole */
2895 tail_start
= lockend
+ 1;
2896 tail_len
= offset
+ len
- tail_start
;
2898 ret
= find_first_non_hole(inode
, &tail_start
, &tail_len
);
2899 if (unlikely(ret
< 0))
2900 goto out_only_mutex
;
2902 /* zero the front end of the last page */
2903 if (tail_start
+ tail_len
< ino_size
) {
2904 truncated_block
= true;
2905 ret
= btrfs_truncate_block(inode
,
2906 tail_start
+ tail_len
,
2909 goto out_only_mutex
;
2914 if (lockend
< lockstart
) {
2916 goto out_only_mutex
;
2919 ret
= btrfs_punch_hole_lock_range(inode
, lockstart
, lockend
,
2922 goto out_only_mutex
;
2924 path
= btrfs_alloc_path();
2930 ret
= btrfs_punch_hole_range(inode
, path
, lockstart
, lockend
, NULL
,
2932 btrfs_free_path(path
);
2936 ASSERT(trans
!= NULL
);
2937 inode_inc_iversion(inode
);
2938 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
2939 ret
= btrfs_update_inode(trans
, root
, inode
);
2940 updated_inode
= true;
2941 btrfs_end_transaction(trans
);
2942 btrfs_btree_balance_dirty(fs_info
);
2944 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2947 if (!updated_inode
&& truncated_block
&& !ret
) {
2949 * If we only end up zeroing part of a page, we still need to
2950 * update the inode item, so that all the time fields are
2951 * updated as well as the necessary btrfs inode in memory fields
2952 * for detecting, at fsync time, if the inode isn't yet in the
2953 * log tree or it's there but not up to date.
2955 struct timespec64 now
= current_time(inode
);
2957 inode_inc_iversion(inode
);
2958 inode
->i_mtime
= now
;
2959 inode
->i_ctime
= now
;
2960 trans
= btrfs_start_transaction(root
, 1);
2961 if (IS_ERR(trans
)) {
2962 ret
= PTR_ERR(trans
);
2966 ret
= btrfs_update_inode(trans
, root
, inode
);
2967 ret2
= btrfs_end_transaction(trans
);
2972 inode_unlock(inode
);
2976 /* Helper structure to record which range is already reserved */
2977 struct falloc_range
{
2978 struct list_head list
;
2984 * Helper function to add falloc range
2986 * Caller should have locked the larger range of extent containing
2989 static int add_falloc_range(struct list_head
*head
, u64 start
, u64 len
)
2991 struct falloc_range
*prev
= NULL
;
2992 struct falloc_range
*range
= NULL
;
2994 if (list_empty(head
))
2998 * As fallocate iterate by bytenr order, we only need to check
3001 prev
= list_entry(head
->prev
, struct falloc_range
, list
);
3002 if (prev
->start
+ prev
->len
== start
) {
3007 range
= kmalloc(sizeof(*range
), GFP_KERNEL
);
3010 range
->start
= start
;
3012 list_add_tail(&range
->list
, head
);
3016 static int btrfs_fallocate_update_isize(struct inode
*inode
,
3020 struct btrfs_trans_handle
*trans
;
3021 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3025 if (mode
& FALLOC_FL_KEEP_SIZE
|| end
<= i_size_read(inode
))
3028 trans
= btrfs_start_transaction(root
, 1);
3030 return PTR_ERR(trans
);
3032 inode
->i_ctime
= current_time(inode
);
3033 i_size_write(inode
, end
);
3034 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3035 ret
= btrfs_update_inode(trans
, root
, inode
);
3036 ret2
= btrfs_end_transaction(trans
);
3038 return ret
? ret
: ret2
;
3042 RANGE_BOUNDARY_WRITTEN_EXTENT
,
3043 RANGE_BOUNDARY_PREALLOC_EXTENT
,
3044 RANGE_BOUNDARY_HOLE
,
3047 static int btrfs_zero_range_check_range_boundary(struct inode
*inode
,
3050 const u64 sectorsize
= btrfs_inode_sectorsize(inode
);
3051 struct extent_map
*em
;
3054 offset
= round_down(offset
, sectorsize
);
3055 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, offset
, sectorsize
);
3059 if (em
->block_start
== EXTENT_MAP_HOLE
)
3060 ret
= RANGE_BOUNDARY_HOLE
;
3061 else if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
3062 ret
= RANGE_BOUNDARY_PREALLOC_EXTENT
;
3064 ret
= RANGE_BOUNDARY_WRITTEN_EXTENT
;
3066 free_extent_map(em
);
3070 static int btrfs_zero_range(struct inode
*inode
,
3075 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
3076 struct extent_map
*em
;
3077 struct extent_changeset
*data_reserved
= NULL
;
3080 const u64 sectorsize
= btrfs_inode_sectorsize(inode
);
3081 u64 alloc_start
= round_down(offset
, sectorsize
);
3082 u64 alloc_end
= round_up(offset
+ len
, sectorsize
);
3083 u64 bytes_to_reserve
= 0;
3084 bool space_reserved
= false;
3086 inode_dio_wait(inode
);
3088 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, alloc_start
,
3089 alloc_end
- alloc_start
);
3096 * Avoid hole punching and extent allocation for some cases. More cases
3097 * could be considered, but these are unlikely common and we keep things
3098 * as simple as possible for now. Also, intentionally, if the target
3099 * range contains one or more prealloc extents together with regular
3100 * extents and holes, we drop all the existing extents and allocate a
3101 * new prealloc extent, so that we get a larger contiguous disk extent.
3103 if (em
->start
<= alloc_start
&&
3104 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
3105 const u64 em_end
= em
->start
+ em
->len
;
3107 if (em_end
>= offset
+ len
) {
3109 * The whole range is already a prealloc extent,
3110 * do nothing except updating the inode's i_size if
3113 free_extent_map(em
);
3114 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
,
3119 * Part of the range is already a prealloc extent, so operate
3120 * only on the remaining part of the range.
3122 alloc_start
= em_end
;
3123 ASSERT(IS_ALIGNED(alloc_start
, sectorsize
));
3124 len
= offset
+ len
- alloc_start
;
3125 offset
= alloc_start
;
3126 alloc_hint
= em
->block_start
+ em
->len
;
3128 free_extent_map(em
);
3130 if (BTRFS_BYTES_TO_BLKS(fs_info
, offset
) ==
3131 BTRFS_BYTES_TO_BLKS(fs_info
, offset
+ len
- 1)) {
3132 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, alloc_start
,
3139 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
3140 free_extent_map(em
);
3141 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
,
3145 if (len
< sectorsize
&& em
->block_start
!= EXTENT_MAP_HOLE
) {
3146 free_extent_map(em
);
3147 ret
= btrfs_truncate_block(inode
, offset
, len
, 0);
3149 ret
= btrfs_fallocate_update_isize(inode
,
3154 free_extent_map(em
);
3155 alloc_start
= round_down(offset
, sectorsize
);
3156 alloc_end
= alloc_start
+ sectorsize
;
3160 alloc_start
= round_up(offset
, sectorsize
);
3161 alloc_end
= round_down(offset
+ len
, sectorsize
);
3164 * For unaligned ranges, check the pages at the boundaries, they might
3165 * map to an extent, in which case we need to partially zero them, or
3166 * they might map to a hole, in which case we need our allocation range
3169 if (!IS_ALIGNED(offset
, sectorsize
)) {
3170 ret
= btrfs_zero_range_check_range_boundary(inode
, offset
);
3173 if (ret
== RANGE_BOUNDARY_HOLE
) {
3174 alloc_start
= round_down(offset
, sectorsize
);
3176 } else if (ret
== RANGE_BOUNDARY_WRITTEN_EXTENT
) {
3177 ret
= btrfs_truncate_block(inode
, offset
, 0, 0);
3185 if (!IS_ALIGNED(offset
+ len
, sectorsize
)) {
3186 ret
= btrfs_zero_range_check_range_boundary(inode
,
3190 if (ret
== RANGE_BOUNDARY_HOLE
) {
3191 alloc_end
= round_up(offset
+ len
, sectorsize
);
3193 } else if (ret
== RANGE_BOUNDARY_WRITTEN_EXTENT
) {
3194 ret
= btrfs_truncate_block(inode
, offset
+ len
, 0, 1);
3203 if (alloc_start
< alloc_end
) {
3204 struct extent_state
*cached_state
= NULL
;
3205 const u64 lockstart
= alloc_start
;
3206 const u64 lockend
= alloc_end
- 1;
3208 bytes_to_reserve
= alloc_end
- alloc_start
;
3209 ret
= btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode
),
3213 space_reserved
= true;
3214 ret
= btrfs_punch_hole_lock_range(inode
, lockstart
, lockend
,
3218 ret
= btrfs_qgroup_reserve_data(BTRFS_I(inode
), &data_reserved
,
3219 alloc_start
, bytes_to_reserve
);
3222 ret
= btrfs_prealloc_file_range(inode
, mode
, alloc_start
,
3223 alloc_end
- alloc_start
,
3225 offset
+ len
, &alloc_hint
);
3226 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
3227 lockend
, &cached_state
);
3228 /* btrfs_prealloc_file_range releases reserved space on error */
3230 space_reserved
= false;
3234 ret
= btrfs_fallocate_update_isize(inode
, offset
+ len
, mode
);
3236 if (ret
&& space_reserved
)
3237 btrfs_free_reserved_data_space(BTRFS_I(inode
), data_reserved
,
3238 alloc_start
, bytes_to_reserve
);
3239 extent_changeset_free(data_reserved
);
3244 static long btrfs_fallocate(struct file
*file
, int mode
,
3245 loff_t offset
, loff_t len
)
3247 struct inode
*inode
= file_inode(file
);
3248 struct extent_state
*cached_state
= NULL
;
3249 struct extent_changeset
*data_reserved
= NULL
;
3250 struct falloc_range
*range
;
3251 struct falloc_range
*tmp
;
3252 struct list_head reserve_list
;
3260 struct extent_map
*em
;
3261 int blocksize
= btrfs_inode_sectorsize(inode
);
3264 alloc_start
= round_down(offset
, blocksize
);
3265 alloc_end
= round_up(offset
+ len
, blocksize
);
3266 cur_offset
= alloc_start
;
3268 /* Make sure we aren't being give some crap mode */
3269 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
3270 FALLOC_FL_ZERO_RANGE
))
3273 if (mode
& FALLOC_FL_PUNCH_HOLE
)
3274 return btrfs_punch_hole(inode
, offset
, len
);
3277 * Only trigger disk allocation, don't trigger qgroup reserve
3279 * For qgroup space, it will be checked later.
3281 if (!(mode
& FALLOC_FL_ZERO_RANGE
)) {
3282 ret
= btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode
),
3283 alloc_end
- alloc_start
);
3290 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> inode
->i_size
) {
3291 ret
= inode_newsize_ok(inode
, offset
+ len
);
3297 * TODO: Move these two operations after we have checked
3298 * accurate reserved space, or fallocate can still fail but
3299 * with page truncated or size expanded.
3301 * But that's a minor problem and won't do much harm BTW.
3303 if (alloc_start
> inode
->i_size
) {
3304 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
3308 } else if (offset
+ len
> inode
->i_size
) {
3310 * If we are fallocating from the end of the file onward we
3311 * need to zero out the end of the block if i_size lands in the
3312 * middle of a block.
3314 ret
= btrfs_truncate_block(inode
, inode
->i_size
, 0, 0);
3320 * wait for ordered IO before we have any locks. We'll loop again
3321 * below with the locks held.
3323 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
3324 alloc_end
- alloc_start
);
3328 if (mode
& FALLOC_FL_ZERO_RANGE
) {
3329 ret
= btrfs_zero_range(inode
, offset
, len
, mode
);
3330 inode_unlock(inode
);
3334 locked_end
= alloc_end
- 1;
3336 struct btrfs_ordered_extent
*ordered
;
3338 /* the extent lock is ordered inside the running
3341 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
3342 locked_end
, &cached_state
);
3343 ordered
= btrfs_lookup_first_ordered_extent(inode
, locked_end
);
3346 ordered
->file_offset
+ ordered
->num_bytes
> alloc_start
&&
3347 ordered
->file_offset
< alloc_end
) {
3348 btrfs_put_ordered_extent(ordered
);
3349 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
3350 alloc_start
, locked_end
,
3353 * we can't wait on the range with the transaction
3354 * running or with the extent lock held
3356 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
3357 alloc_end
- alloc_start
);
3362 btrfs_put_ordered_extent(ordered
);
3367 /* First, check if we exceed the qgroup limit */
3368 INIT_LIST_HEAD(&reserve_list
);
3369 while (cur_offset
< alloc_end
) {
3370 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, cur_offset
,
3371 alloc_end
- cur_offset
);
3376 last_byte
= min(extent_map_end(em
), alloc_end
);
3377 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
3378 last_byte
= ALIGN(last_byte
, blocksize
);
3379 if (em
->block_start
== EXTENT_MAP_HOLE
||
3380 (cur_offset
>= inode
->i_size
&&
3381 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
3382 ret
= add_falloc_range(&reserve_list
, cur_offset
,
3383 last_byte
- cur_offset
);
3385 free_extent_map(em
);
3388 ret
= btrfs_qgroup_reserve_data(BTRFS_I(inode
),
3389 &data_reserved
, cur_offset
,
3390 last_byte
- cur_offset
);
3392 cur_offset
= last_byte
;
3393 free_extent_map(em
);
3398 * Do not need to reserve unwritten extent for this
3399 * range, free reserved data space first, otherwise
3400 * it'll result in false ENOSPC error.
3402 btrfs_free_reserved_data_space(BTRFS_I(inode
),
3403 data_reserved
, cur_offset
,
3404 last_byte
- cur_offset
);
3406 free_extent_map(em
);
3407 cur_offset
= last_byte
;
3411 * If ret is still 0, means we're OK to fallocate.
3412 * Or just cleanup the list and exit.
3414 list_for_each_entry_safe(range
, tmp
, &reserve_list
, list
) {
3416 ret
= btrfs_prealloc_file_range(inode
, mode
,
3418 range
->len
, i_blocksize(inode
),
3419 offset
+ len
, &alloc_hint
);
3421 btrfs_free_reserved_data_space(BTRFS_I(inode
),
3422 data_reserved
, range
->start
,
3424 list_del(&range
->list
);
3431 * We didn't need to allocate any more space, but we still extended the
3432 * size of the file so we need to update i_size and the inode item.
3434 ret
= btrfs_fallocate_update_isize(inode
, actual_end
, mode
);
3436 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
3439 inode_unlock(inode
);
3440 /* Let go of our reservation. */
3441 if (ret
!= 0 && !(mode
& FALLOC_FL_ZERO_RANGE
))
3442 btrfs_free_reserved_data_space(BTRFS_I(inode
), data_reserved
,
3443 cur_offset
, alloc_end
- cur_offset
);
3444 extent_changeset_free(data_reserved
);
3448 static loff_t
find_desired_extent(struct inode
*inode
, loff_t offset
,
3451 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3452 struct extent_map
*em
= NULL
;
3453 struct extent_state
*cached_state
= NULL
;
3454 loff_t i_size
= inode
->i_size
;
3461 if (i_size
== 0 || offset
>= i_size
)
3465 * offset can be negative, in this case we start finding DATA/HOLE from
3466 * the very start of the file.
3468 start
= max_t(loff_t
, 0, offset
);
3470 lockstart
= round_down(start
, fs_info
->sectorsize
);
3471 lockend
= round_up(i_size
, fs_info
->sectorsize
);
3472 if (lockend
<= lockstart
)
3473 lockend
= lockstart
+ fs_info
->sectorsize
;
3475 len
= lockend
- lockstart
+ 1;
3477 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3480 while (start
< i_size
) {
3481 em
= btrfs_get_extent_fiemap(BTRFS_I(inode
), start
, len
);
3488 if (whence
== SEEK_HOLE
&&
3489 (em
->block_start
== EXTENT_MAP_HOLE
||
3490 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3492 else if (whence
== SEEK_DATA
&&
3493 (em
->block_start
!= EXTENT_MAP_HOLE
&&
3494 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
3497 start
= em
->start
+ em
->len
;
3498 free_extent_map(em
);
3502 free_extent_map(em
);
3503 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
3508 if (whence
== SEEK_DATA
&& start
>= i_size
)
3511 offset
= min_t(loff_t
, start
, i_size
);
3517 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int whence
)
3519 struct inode
*inode
= file
->f_mapping
->host
;
3523 return generic_file_llseek(file
, offset
, whence
);
3526 inode_lock_shared(inode
);
3527 offset
= find_desired_extent(inode
, offset
, whence
);
3528 inode_unlock_shared(inode
);
3535 return vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
3538 static int btrfs_file_open(struct inode
*inode
, struct file
*filp
)
3540 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
3541 return generic_file_open(inode
, filp
);
3544 const struct file_operations btrfs_file_operations
= {
3545 .llseek
= btrfs_file_llseek
,
3546 .read_iter
= generic_file_read_iter
,
3547 .splice_read
= generic_file_splice_read
,
3548 .write_iter
= btrfs_file_write_iter
,
3549 .splice_write
= iter_file_splice_write
,
3550 .mmap
= btrfs_file_mmap
,
3551 .open
= btrfs_file_open
,
3552 .release
= btrfs_release_file
,
3553 .fsync
= btrfs_sync_file
,
3554 .fallocate
= btrfs_fallocate
,
3555 .unlocked_ioctl
= btrfs_ioctl
,
3556 #ifdef CONFIG_COMPAT
3557 .compat_ioctl
= btrfs_compat_ioctl
,
3559 .remap_file_range
= btrfs_remap_file_range
,
3562 void __cold
btrfs_auto_defrag_exit(void)
3564 kmem_cache_destroy(btrfs_inode_defrag_cachep
);
3567 int __init
btrfs_auto_defrag_init(void)
3569 btrfs_inode_defrag_cachep
= kmem_cache_create("btrfs_inode_defrag",
3570 sizeof(struct inode_defrag
), 0,
3573 if (!btrfs_inode_defrag_cachep
)
3579 int btrfs_fdatawrite_range(struct inode
*inode
, loff_t start
, loff_t end
)
3584 * So with compression we will find and lock a dirty page and clear the
3585 * first one as dirty, setup an async extent, and immediately return
3586 * with the entire range locked but with nobody actually marked with
3587 * writeback. So we can't just filemap_write_and_wait_range() and
3588 * expect it to work since it will just kick off a thread to do the
3589 * actual work. So we need to call filemap_fdatawrite_range _again_
3590 * since it will wait on the page lock, which won't be unlocked until
3591 * after the pages have been marked as writeback and so we're good to go
3592 * from there. We have to do this otherwise we'll miss the ordered
3593 * extents and that results in badness. Please Josef, do not think you
3594 * know better and pull this out at some point in the future, it is
3595 * right and you are wrong.
3597 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3598 if (!ret
&& test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
3599 &BTRFS_I(inode
)->runtime_flags
))
3600 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);