1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/file.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/backing-dev.h>
17 #include <linux/writeback.h>
18 #include <linux/compat.h>
19 #include <linux/xattr.h>
20 #include <linux/posix_acl.h>
21 #include <linux/falloc.h>
22 #include <linux/slab.h>
23 #include <linux/ratelimit.h>
24 #include <linux/btrfs.h>
25 #include <linux/blkdev.h>
26 #include <linux/posix_acl_xattr.h>
27 #include <linux/uio.h>
28 #include <linux/magic.h>
29 #include <linux/iversion.h>
30 #include <linux/swap.h>
31 #include <linux/migrate.h>
32 #include <linux/sched/mm.h>
33 #include <linux/iomap.h>
34 #include <asm/unaligned.h>
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "ordered-data.h"
45 #include "compression.h"
47 #include "free-space-cache.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
55 struct btrfs_iget_args
{
57 struct btrfs_root
*root
;
60 struct btrfs_dio_data
{
64 struct extent_changeset
*data_reserved
;
67 static const struct inode_operations btrfs_dir_inode_operations
;
68 static const struct inode_operations btrfs_symlink_inode_operations
;
69 static const struct inode_operations btrfs_special_inode_operations
;
70 static const struct inode_operations btrfs_file_inode_operations
;
71 static const struct address_space_operations btrfs_aops
;
72 static const struct file_operations btrfs_dir_file_operations
;
74 static struct kmem_cache
*btrfs_inode_cachep
;
75 struct kmem_cache
*btrfs_trans_handle_cachep
;
76 struct kmem_cache
*btrfs_path_cachep
;
77 struct kmem_cache
*btrfs_free_space_cachep
;
78 struct kmem_cache
*btrfs_free_space_bitmap_cachep
;
80 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
81 static int btrfs_truncate(struct inode
*inode
, bool skip_writeback
);
82 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
);
83 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
84 struct page
*locked_page
,
85 u64 start
, u64 end
, int *page_started
,
86 unsigned long *nr_written
, int unlock
);
87 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
88 u64 len
, u64 orig_start
, u64 block_start
,
89 u64 block_len
, u64 orig_block_len
,
90 u64 ram_bytes
, int compress_type
,
93 static void __endio_write_update_ordered(struct btrfs_inode
*inode
,
94 const u64 offset
, const u64 bytes
,
98 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
100 * ilock_flags can have the following bit set:
102 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
103 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
105 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
107 int btrfs_inode_lock(struct inode
*inode
, unsigned int ilock_flags
)
109 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
110 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
111 if (!inode_trylock_shared(inode
))
116 inode_lock_shared(inode
);
118 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
119 if (!inode_trylock(inode
))
126 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
127 down_write(&BTRFS_I(inode
)->i_mmap_lock
);
132 * btrfs_inode_unlock - unock inode i_rwsem
134 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
135 * to decide whether the lock acquired is shared or exclusive.
137 void btrfs_inode_unlock(struct inode
*inode
, unsigned int ilock_flags
)
139 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
140 up_write(&BTRFS_I(inode
)->i_mmap_lock
);
141 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
142 inode_unlock_shared(inode
);
148 * Cleanup all submitted ordered extents in specified range to handle errors
149 * from the btrfs_run_delalloc_range() callback.
151 * NOTE: caller must ensure that when an error happens, it can not call
152 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
153 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
154 * to be released, which we want to happen only when finishing the ordered
155 * extent (btrfs_finish_ordered_io()).
157 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
158 struct page
*locked_page
,
159 u64 offset
, u64 bytes
)
161 unsigned long index
= offset
>> PAGE_SHIFT
;
162 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
163 u64 page_start
= page_offset(locked_page
);
164 u64 page_end
= page_start
+ PAGE_SIZE
- 1;
168 while (index
<= end_index
) {
169 page
= find_get_page(inode
->vfs_inode
.i_mapping
, index
);
173 ClearPagePrivate2(page
);
178 * In case this page belongs to the delalloc range being instantiated
179 * then skip it, since the first page of a range is going to be
180 * properly cleaned up by the caller of run_delalloc_range
182 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
187 return __endio_write_update_ordered(inode
, offset
, bytes
, false);
190 static int btrfs_dirty_inode(struct inode
*inode
);
192 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
193 struct inode
*inode
, struct inode
*dir
,
194 const struct qstr
*qstr
)
198 err
= btrfs_init_acl(trans
, inode
, dir
);
200 err
= btrfs_xattr_security_init(trans
, inode
, dir
, qstr
);
205 * this does all the hard work for inserting an inline extent into
206 * the btree. The caller should have done a btrfs_drop_extents so that
207 * no overlapping inline items exist in the btree
209 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
210 struct btrfs_path
*path
, bool extent_inserted
,
211 struct btrfs_root
*root
, struct inode
*inode
,
212 u64 start
, size_t size
, size_t compressed_size
,
214 struct page
**compressed_pages
)
216 struct extent_buffer
*leaf
;
217 struct page
*page
= NULL
;
220 struct btrfs_file_extent_item
*ei
;
222 size_t cur_size
= size
;
223 unsigned long offset
;
225 ASSERT((compressed_size
> 0 && compressed_pages
) ||
226 (compressed_size
== 0 && !compressed_pages
));
228 if (compressed_size
&& compressed_pages
)
229 cur_size
= compressed_size
;
231 if (!extent_inserted
) {
232 struct btrfs_key key
;
235 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
237 key
.type
= BTRFS_EXTENT_DATA_KEY
;
239 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
240 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
245 leaf
= path
->nodes
[0];
246 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
247 struct btrfs_file_extent_item
);
248 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
249 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
250 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
251 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
252 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
253 ptr
= btrfs_file_extent_inline_start(ei
);
255 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
258 while (compressed_size
> 0) {
259 cpage
= compressed_pages
[i
];
260 cur_size
= min_t(unsigned long, compressed_size
,
263 kaddr
= kmap_atomic(cpage
);
264 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
265 kunmap_atomic(kaddr
);
269 compressed_size
-= cur_size
;
271 btrfs_set_file_extent_compression(leaf
, ei
,
274 page
= find_get_page(inode
->i_mapping
,
275 start
>> PAGE_SHIFT
);
276 btrfs_set_file_extent_compression(leaf
, ei
, 0);
277 kaddr
= kmap_atomic(page
);
278 offset
= offset_in_page(start
);
279 write_extent_buffer(leaf
, kaddr
+ offset
, ptr
, size
);
280 kunmap_atomic(kaddr
);
283 btrfs_mark_buffer_dirty(leaf
);
284 btrfs_release_path(path
);
287 * We align size to sectorsize for inline extents just for simplicity
290 size
= ALIGN(size
, root
->fs_info
->sectorsize
);
291 ret
= btrfs_inode_set_file_extent_range(BTRFS_I(inode
), start
, size
);
296 * we're an inline extent, so nobody can
297 * extend the file past i_size without locking
298 * a page we already have locked.
300 * We must do any isize and inode updates
301 * before we unlock the pages. Otherwise we
302 * could end up racing with unlink.
304 BTRFS_I(inode
)->disk_i_size
= inode
->i_size
;
311 * conditionally insert an inline extent into the file. This
312 * does the checks required to make sure the data is small enough
313 * to fit as an inline extent.
315 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
, u64 start
,
316 u64 end
, size_t compressed_size
,
318 struct page
**compressed_pages
)
320 struct btrfs_drop_extents_args drop_args
= { 0 };
321 struct btrfs_root
*root
= inode
->root
;
322 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
323 struct btrfs_trans_handle
*trans
;
324 u64 isize
= i_size_read(&inode
->vfs_inode
);
325 u64 actual_end
= min(end
+ 1, isize
);
326 u64 inline_len
= actual_end
- start
;
327 u64 aligned_end
= ALIGN(end
, fs_info
->sectorsize
);
328 u64 data_len
= inline_len
;
330 struct btrfs_path
*path
;
333 data_len
= compressed_size
;
336 actual_end
> fs_info
->sectorsize
||
337 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
339 (actual_end
& (fs_info
->sectorsize
- 1)) == 0) ||
341 data_len
> fs_info
->max_inline
) {
345 path
= btrfs_alloc_path();
349 trans
= btrfs_join_transaction(root
);
351 btrfs_free_path(path
);
352 return PTR_ERR(trans
);
354 trans
->block_rsv
= &inode
->block_rsv
;
356 drop_args
.path
= path
;
357 drop_args
.start
= start
;
358 drop_args
.end
= aligned_end
;
359 drop_args
.drop_cache
= true;
360 drop_args
.replace_extent
= true;
362 if (compressed_size
&& compressed_pages
)
363 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(
366 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(
369 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
371 btrfs_abort_transaction(trans
, ret
);
375 if (isize
> actual_end
)
376 inline_len
= min_t(u64
, isize
, actual_end
);
377 ret
= insert_inline_extent(trans
, path
, drop_args
.extent_inserted
,
378 root
, &inode
->vfs_inode
, start
,
379 inline_len
, compressed_size
,
380 compress_type
, compressed_pages
);
381 if (ret
&& ret
!= -ENOSPC
) {
382 btrfs_abort_transaction(trans
, ret
);
384 } else if (ret
== -ENOSPC
) {
389 btrfs_update_inode_bytes(inode
, inline_len
, drop_args
.bytes_found
);
390 ret
= btrfs_update_inode(trans
, root
, inode
);
391 if (ret
&& ret
!= -ENOSPC
) {
392 btrfs_abort_transaction(trans
, ret
);
394 } else if (ret
== -ENOSPC
) {
399 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
);
402 * Don't forget to free the reserved space, as for inlined extent
403 * it won't count as data extent, free them directly here.
404 * And at reserve time, it's always aligned to page size, so
405 * just free one page here.
407 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
);
408 btrfs_free_path(path
);
409 btrfs_end_transaction(trans
);
413 struct async_extent
{
418 unsigned long nr_pages
;
420 struct list_head list
;
425 struct page
*locked_page
;
428 unsigned int write_flags
;
429 struct list_head extents
;
430 struct cgroup_subsys_state
*blkcg_css
;
431 struct btrfs_work work
;
436 /* Number of chunks in flight; must be first in the structure */
438 struct async_chunk chunks
[];
441 static noinline
int add_async_extent(struct async_chunk
*cow
,
442 u64 start
, u64 ram_size
,
445 unsigned long nr_pages
,
448 struct async_extent
*async_extent
;
450 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
451 BUG_ON(!async_extent
); /* -ENOMEM */
452 async_extent
->start
= start
;
453 async_extent
->ram_size
= ram_size
;
454 async_extent
->compressed_size
= compressed_size
;
455 async_extent
->pages
= pages
;
456 async_extent
->nr_pages
= nr_pages
;
457 async_extent
->compress_type
= compress_type
;
458 list_add_tail(&async_extent
->list
, &cow
->extents
);
463 * Check if the inode has flags compatible with compression
465 static inline bool inode_can_compress(struct btrfs_inode
*inode
)
467 if (inode
->flags
& BTRFS_INODE_NODATACOW
||
468 inode
->flags
& BTRFS_INODE_NODATASUM
)
474 * Check if the inode needs to be submitted to compression, based on mount
475 * options, defragmentation, properties or heuristics.
477 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
480 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
482 if (!inode_can_compress(inode
)) {
483 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
484 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
489 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
492 if (inode
->defrag_compress
)
494 /* bad compression ratios */
495 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
497 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
498 inode
->flags
& BTRFS_INODE_COMPRESS
||
499 inode
->prop_compress
)
500 return btrfs_compress_heuristic(&inode
->vfs_inode
, start
, end
);
504 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
505 u64 start
, u64 end
, u64 num_bytes
, u64 small_write
)
507 /* If this is a small write inside eof, kick off a defrag */
508 if (num_bytes
< small_write
&&
509 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
510 btrfs_add_inode_defrag(NULL
, inode
);
514 * we create compressed extents in two phases. The first
515 * phase compresses a range of pages that have already been
516 * locked (both pages and state bits are locked).
518 * This is done inside an ordered work queue, and the compression
519 * is spread across many cpus. The actual IO submission is step
520 * two, and the ordered work queue takes care of making sure that
521 * happens in the same order things were put onto the queue by
522 * writepages and friends.
524 * If this code finds it can't get good compression, it puts an
525 * entry onto the work queue to write the uncompressed bytes. This
526 * makes sure that both compressed inodes and uncompressed inodes
527 * are written in the same order that the flusher thread sent them
530 static noinline
int compress_file_range(struct async_chunk
*async_chunk
)
532 struct inode
*inode
= async_chunk
->inode
;
533 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
534 u64 blocksize
= fs_info
->sectorsize
;
535 u64 start
= async_chunk
->start
;
536 u64 end
= async_chunk
->end
;
540 struct page
**pages
= NULL
;
541 unsigned long nr_pages
;
542 unsigned long total_compressed
= 0;
543 unsigned long total_in
= 0;
546 int compress_type
= fs_info
->compress_type
;
547 int compressed_extents
= 0;
550 inode_should_defrag(BTRFS_I(inode
), start
, end
, end
- start
+ 1,
554 * We need to save i_size before now because it could change in between
555 * us evaluating the size and assigning it. This is because we lock and
556 * unlock the page in truncate and fallocate, and then modify the i_size
559 * The barriers are to emulate READ_ONCE, remove that once i_size_read
563 i_size
= i_size_read(inode
);
565 actual_end
= min_t(u64
, i_size
, end
+ 1);
568 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
569 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED
% PAGE_SIZE
) != 0);
570 nr_pages
= min_t(unsigned long, nr_pages
,
571 BTRFS_MAX_COMPRESSED
/ PAGE_SIZE
);
574 * we don't want to send crud past the end of i_size through
575 * compression, that's just a waste of CPU time. So, if the
576 * end of the file is before the start of our current
577 * requested range of bytes, we bail out to the uncompressed
578 * cleanup code that can deal with all of this.
580 * It isn't really the fastest way to fix things, but this is a
581 * very uncommon corner.
583 if (actual_end
<= start
)
584 goto cleanup_and_bail_uncompressed
;
586 total_compressed
= actual_end
- start
;
589 * skip compression for a small file range(<=blocksize) that
590 * isn't an inline extent, since it doesn't save disk space at all.
592 if (total_compressed
<= blocksize
&&
593 (start
> 0 || end
+ 1 < BTRFS_I(inode
)->disk_i_size
))
594 goto cleanup_and_bail_uncompressed
;
596 total_compressed
= min_t(unsigned long, total_compressed
,
597 BTRFS_MAX_UNCOMPRESSED
);
602 * we do compression for mount -o compress and when the
603 * inode has not been flagged as nocompress. This flag can
604 * change at any time if we discover bad compression ratios.
606 if (inode_need_compress(BTRFS_I(inode
), start
, end
)) {
608 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
610 /* just bail out to the uncompressed code */
615 if (BTRFS_I(inode
)->defrag_compress
)
616 compress_type
= BTRFS_I(inode
)->defrag_compress
;
617 else if (BTRFS_I(inode
)->prop_compress
)
618 compress_type
= BTRFS_I(inode
)->prop_compress
;
621 * we need to call clear_page_dirty_for_io on each
622 * page in the range. Otherwise applications with the file
623 * mmap'd can wander in and change the page contents while
624 * we are compressing them.
626 * If the compression fails for any reason, we set the pages
627 * dirty again later on.
629 * Note that the remaining part is redirtied, the start pointer
630 * has moved, the end is the original one.
633 extent_range_clear_dirty_for_io(inode
, start
, end
);
637 /* Compression level is applied here and only here */
638 ret
= btrfs_compress_pages(
639 compress_type
| (fs_info
->compress_level
<< 4),
640 inode
->i_mapping
, start
,
647 unsigned long offset
= offset_in_page(total_compressed
);
648 struct page
*page
= pages
[nr_pages
- 1];
650 /* zero the tail end of the last page, we might be
651 * sending it down to disk
654 memzero_page(page
, offset
, PAGE_SIZE
- offset
);
660 /* lets try to make an inline extent */
661 if (ret
|| total_in
< actual_end
) {
662 /* we didn't compress the entire range, try
663 * to make an uncompressed inline extent.
665 ret
= cow_file_range_inline(BTRFS_I(inode
), start
, end
,
666 0, BTRFS_COMPRESS_NONE
,
669 /* try making a compressed inline extent */
670 ret
= cow_file_range_inline(BTRFS_I(inode
), start
, end
,
672 compress_type
, pages
);
675 unsigned long clear_flags
= EXTENT_DELALLOC
|
676 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
677 EXTENT_DO_ACCOUNTING
;
678 unsigned long page_error_op
;
680 page_error_op
= ret
< 0 ? PAGE_SET_ERROR
: 0;
683 * inline extent creation worked or returned error,
684 * we don't need to create any more async work items.
685 * Unlock and free up our temp pages.
687 * We use DO_ACCOUNTING here because we need the
688 * delalloc_release_metadata to be done _after_ we drop
689 * our outstanding extent for clearing delalloc for this
692 extent_clear_unlock_delalloc(BTRFS_I(inode
), start
, end
,
696 PAGE_START_WRITEBACK
|
701 * Ensure we only free the compressed pages if we have
702 * them allocated, as we can still reach here with
703 * inode_need_compress() == false.
706 for (i
= 0; i
< nr_pages
; i
++) {
707 WARN_ON(pages
[i
]->mapping
);
718 * we aren't doing an inline extent round the compressed size
719 * up to a block size boundary so the allocator does sane
722 total_compressed
= ALIGN(total_compressed
, blocksize
);
725 * one last check to make sure the compression is really a
726 * win, compare the page count read with the blocks on disk,
727 * compression must free at least one sector size
729 total_in
= ALIGN(total_in
, PAGE_SIZE
);
730 if (total_compressed
+ blocksize
<= total_in
) {
731 compressed_extents
++;
734 * The async work queues will take care of doing actual
735 * allocation on disk for these compressed pages, and
736 * will submit them to the elevator.
738 add_async_extent(async_chunk
, start
, total_in
,
739 total_compressed
, pages
, nr_pages
,
742 if (start
+ total_in
< end
) {
748 return compressed_extents
;
753 * the compression code ran but failed to make things smaller,
754 * free any pages it allocated and our page pointer array
756 for (i
= 0; i
< nr_pages
; i
++) {
757 WARN_ON(pages
[i
]->mapping
);
762 total_compressed
= 0;
765 /* flag the file so we don't compress in the future */
766 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) &&
767 !(BTRFS_I(inode
)->prop_compress
)) {
768 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NOCOMPRESS
;
771 cleanup_and_bail_uncompressed
:
773 * No compression, but we still need to write the pages in the file
774 * we've been given so far. redirty the locked page if it corresponds
775 * to our extent and set things up for the async work queue to run
776 * cow_file_range to do the normal delalloc dance.
778 if (async_chunk
->locked_page
&&
779 (page_offset(async_chunk
->locked_page
) >= start
&&
780 page_offset(async_chunk
->locked_page
)) <= end
) {
781 __set_page_dirty_nobuffers(async_chunk
->locked_page
);
782 /* unlocked later on in the async handlers */
786 extent_range_redirty_for_io(inode
, start
, end
);
787 add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
788 BTRFS_COMPRESS_NONE
);
789 compressed_extents
++;
791 return compressed_extents
;
794 static void free_async_extent_pages(struct async_extent
*async_extent
)
798 if (!async_extent
->pages
)
801 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
802 WARN_ON(async_extent
->pages
[i
]->mapping
);
803 put_page(async_extent
->pages
[i
]);
805 kfree(async_extent
->pages
);
806 async_extent
->nr_pages
= 0;
807 async_extent
->pages
= NULL
;
811 * phase two of compressed writeback. This is the ordered portion
812 * of the code, which only gets called in the order the work was
813 * queued. We walk all the async extents created by compress_file_range
814 * and send them down to the disk.
816 static noinline
void submit_compressed_extents(struct async_chunk
*async_chunk
)
818 struct btrfs_inode
*inode
= BTRFS_I(async_chunk
->inode
);
819 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
820 struct async_extent
*async_extent
;
822 struct btrfs_key ins
;
823 struct extent_map
*em
;
824 struct btrfs_root
*root
= inode
->root
;
825 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
829 while (!list_empty(&async_chunk
->extents
)) {
830 async_extent
= list_entry(async_chunk
->extents
.next
,
831 struct async_extent
, list
);
832 list_del(&async_extent
->list
);
835 lock_extent(io_tree
, async_extent
->start
,
836 async_extent
->start
+ async_extent
->ram_size
- 1);
837 /* did the compression code fall back to uncompressed IO? */
838 if (!async_extent
->pages
) {
839 int page_started
= 0;
840 unsigned long nr_written
= 0;
842 /* allocate blocks */
843 ret
= cow_file_range(inode
, async_chunk
->locked_page
,
845 async_extent
->start
+
846 async_extent
->ram_size
- 1,
847 &page_started
, &nr_written
, 0);
852 * if page_started, cow_file_range inserted an
853 * inline extent and took care of all the unlocking
854 * and IO for us. Otherwise, we need to submit
855 * all those pages down to the drive.
857 if (!page_started
&& !ret
)
858 extent_write_locked_range(&inode
->vfs_inode
,
860 async_extent
->start
+
861 async_extent
->ram_size
- 1,
863 else if (ret
&& async_chunk
->locked_page
)
864 unlock_page(async_chunk
->locked_page
);
870 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
871 async_extent
->compressed_size
,
872 async_extent
->compressed_size
,
873 0, alloc_hint
, &ins
, 1, 1);
875 free_async_extent_pages(async_extent
);
877 if (ret
== -ENOSPC
) {
878 unlock_extent(io_tree
, async_extent
->start
,
879 async_extent
->start
+
880 async_extent
->ram_size
- 1);
883 * we need to redirty the pages if we decide to
884 * fallback to uncompressed IO, otherwise we
885 * will not submit these pages down to lower
888 extent_range_redirty_for_io(&inode
->vfs_inode
,
890 async_extent
->start
+
891 async_extent
->ram_size
- 1);
898 * here we're doing allocation and writeback of the
901 em
= create_io_em(inode
, async_extent
->start
,
902 async_extent
->ram_size
, /* len */
903 async_extent
->start
, /* orig_start */
904 ins
.objectid
, /* block_start */
905 ins
.offset
, /* block_len */
906 ins
.offset
, /* orig_block_len */
907 async_extent
->ram_size
, /* ram_bytes */
908 async_extent
->compress_type
,
909 BTRFS_ORDERED_COMPRESSED
);
911 /* ret value is not necessary due to void function */
912 goto out_free_reserve
;
915 ret
= btrfs_add_ordered_extent_compress(inode
,
918 async_extent
->ram_size
,
920 async_extent
->compress_type
);
922 btrfs_drop_extent_cache(inode
, async_extent
->start
,
923 async_extent
->start
+
924 async_extent
->ram_size
- 1, 0);
925 goto out_free_reserve
;
927 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
930 * clear dirty, set writeback and unlock the pages.
932 extent_clear_unlock_delalloc(inode
, async_extent
->start
,
933 async_extent
->start
+
934 async_extent
->ram_size
- 1,
935 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
936 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
937 if (btrfs_submit_compressed_write(inode
, async_extent
->start
,
938 async_extent
->ram_size
,
940 ins
.offset
, async_extent
->pages
,
941 async_extent
->nr_pages
,
942 async_chunk
->write_flags
,
943 async_chunk
->blkcg_css
)) {
944 struct page
*p
= async_extent
->pages
[0];
945 const u64 start
= async_extent
->start
;
946 const u64 end
= start
+ async_extent
->ram_size
- 1;
948 p
->mapping
= inode
->vfs_inode
.i_mapping
;
949 btrfs_writepage_endio_finish_ordered(p
, start
, end
, 0);
952 extent_clear_unlock_delalloc(inode
, start
, end
, NULL
, 0,
955 free_async_extent_pages(async_extent
);
957 alloc_hint
= ins
.objectid
+ ins
.offset
;
963 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
964 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
966 extent_clear_unlock_delalloc(inode
, async_extent
->start
,
967 async_extent
->start
+
968 async_extent
->ram_size
- 1,
969 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
970 EXTENT_DELALLOC_NEW
|
971 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
972 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
973 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
);
974 free_async_extent_pages(async_extent
);
979 static u64
get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
982 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
983 struct extent_map
*em
;
986 read_lock(&em_tree
->lock
);
987 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
990 * if block start isn't an actual block number then find the
991 * first block in this inode and use that as a hint. If that
992 * block is also bogus then just don't worry about it.
994 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
996 em
= search_extent_mapping(em_tree
, 0, 0);
997 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
998 alloc_hint
= em
->block_start
;
1000 free_extent_map(em
);
1002 alloc_hint
= em
->block_start
;
1003 free_extent_map(em
);
1006 read_unlock(&em_tree
->lock
);
1012 * when extent_io.c finds a delayed allocation range in the file,
1013 * the call backs end up in this code. The basic idea is to
1014 * allocate extents on disk for the range, and create ordered data structs
1015 * in ram to track those extents.
1017 * locked_page is the page that writepage had locked already. We use
1018 * it to make sure we don't do extra locks or unlocks.
1020 * *page_started is set to one if we unlock locked_page and do everything
1021 * required to start IO on it. It may be clean and already done with
1022 * IO when we return.
1024 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1025 struct page
*locked_page
,
1026 u64 start
, u64 end
, int *page_started
,
1027 unsigned long *nr_written
, int unlock
)
1029 struct btrfs_root
*root
= inode
->root
;
1030 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1033 unsigned long ram_size
;
1034 u64 cur_alloc_size
= 0;
1036 u64 blocksize
= fs_info
->sectorsize
;
1037 struct btrfs_key ins
;
1038 struct extent_map
*em
;
1039 unsigned clear_bits
;
1040 unsigned long page_ops
;
1041 bool extent_reserved
= false;
1044 if (btrfs_is_free_space_inode(inode
)) {
1050 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1051 num_bytes
= max(blocksize
, num_bytes
);
1052 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1054 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1057 /* lets try to make an inline extent */
1058 ret
= cow_file_range_inline(inode
, start
, end
, 0,
1059 BTRFS_COMPRESS_NONE
, NULL
);
1062 * We use DO_ACCOUNTING here because we need the
1063 * delalloc_release_metadata to be run _after_ we drop
1064 * our outstanding extent for clearing delalloc for this
1067 extent_clear_unlock_delalloc(inode
, start
, end
, NULL
,
1068 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1069 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1070 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1071 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
1072 *nr_written
= *nr_written
+
1073 (end
- start
+ PAGE_SIZE
) / PAGE_SIZE
;
1076 } else if (ret
< 0) {
1081 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
1082 btrfs_drop_extent_cache(inode
, start
, start
+ num_bytes
- 1, 0);
1085 * Relocation relies on the relocated extents to have exactly the same
1086 * size as the original extents. Normally writeback for relocation data
1087 * extents follows a NOCOW path because relocation preallocates the
1088 * extents. However, due to an operation such as scrub turning a block
1089 * group to RO mode, it may fallback to COW mode, so we must make sure
1090 * an extent allocated during COW has exactly the requested size and can
1091 * not be split into smaller extents, otherwise relocation breaks and
1092 * fails during the stage where it updates the bytenr of file extent
1095 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
1096 min_alloc_size
= num_bytes
;
1098 min_alloc_size
= fs_info
->sectorsize
;
1100 while (num_bytes
> 0) {
1101 cur_alloc_size
= num_bytes
;
1102 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
1103 min_alloc_size
, 0, alloc_hint
,
1107 cur_alloc_size
= ins
.offset
;
1108 extent_reserved
= true;
1110 ram_size
= ins
.offset
;
1111 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
1112 start
, /* orig_start */
1113 ins
.objectid
, /* block_start */
1114 ins
.offset
, /* block_len */
1115 ins
.offset
, /* orig_block_len */
1116 ram_size
, /* ram_bytes */
1117 BTRFS_COMPRESS_NONE
, /* compress_type */
1118 BTRFS_ORDERED_REGULAR
/* type */);
1123 free_extent_map(em
);
1125 ret
= btrfs_add_ordered_extent(inode
, start
, ins
.objectid
,
1126 ram_size
, cur_alloc_size
,
1127 BTRFS_ORDERED_REGULAR
);
1129 goto out_drop_extent_cache
;
1131 if (root
->root_key
.objectid
==
1132 BTRFS_DATA_RELOC_TREE_OBJECTID
) {
1133 ret
= btrfs_reloc_clone_csums(inode
, start
,
1136 * Only drop cache here, and process as normal.
1138 * We must not allow extent_clear_unlock_delalloc()
1139 * at out_unlock label to free meta of this ordered
1140 * extent, as its meta should be freed by
1141 * btrfs_finish_ordered_io().
1143 * So we must continue until @start is increased to
1144 * skip current ordered extent.
1147 btrfs_drop_extent_cache(inode
, start
,
1148 start
+ ram_size
- 1, 0);
1151 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1153 /* we're not doing compressed IO, don't unlock the first
1154 * page (which the caller expects to stay locked), don't
1155 * clear any dirty bits and don't set any writeback bits
1157 * Do set the Private2 bit so we know this page was properly
1158 * setup for writepage
1160 page_ops
= unlock
? PAGE_UNLOCK
: 0;
1161 page_ops
|= PAGE_SET_PRIVATE2
;
1163 extent_clear_unlock_delalloc(inode
, start
, start
+ ram_size
- 1,
1165 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1167 if (num_bytes
< cur_alloc_size
)
1170 num_bytes
-= cur_alloc_size
;
1171 alloc_hint
= ins
.objectid
+ ins
.offset
;
1172 start
+= cur_alloc_size
;
1173 extent_reserved
= false;
1176 * btrfs_reloc_clone_csums() error, since start is increased
1177 * extent_clear_unlock_delalloc() at out_unlock label won't
1178 * free metadata of current ordered extent, we're OK to exit.
1186 out_drop_extent_cache
:
1187 btrfs_drop_extent_cache(inode
, start
, start
+ ram_size
- 1, 0);
1189 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1190 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1192 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1193 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1194 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1196 * If we reserved an extent for our delalloc range (or a subrange) and
1197 * failed to create the respective ordered extent, then it means that
1198 * when we reserved the extent we decremented the extent's size from
1199 * the data space_info's bytes_may_use counter and incremented the
1200 * space_info's bytes_reserved counter by the same amount. We must make
1201 * sure extent_clear_unlock_delalloc() does not try to decrement again
1202 * the data space_info's bytes_may_use counter, therefore we do not pass
1203 * it the flag EXTENT_CLEAR_DATA_RESV.
1205 if (extent_reserved
) {
1206 extent_clear_unlock_delalloc(inode
, start
,
1207 start
+ cur_alloc_size
- 1,
1211 start
+= cur_alloc_size
;
1215 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1216 clear_bits
| EXTENT_CLEAR_DATA_RESV
,
1222 * work queue call back to started compression on a file and pages
1224 static noinline
void async_cow_start(struct btrfs_work
*work
)
1226 struct async_chunk
*async_chunk
;
1227 int compressed_extents
;
1229 async_chunk
= container_of(work
, struct async_chunk
, work
);
1231 compressed_extents
= compress_file_range(async_chunk
);
1232 if (compressed_extents
== 0) {
1233 btrfs_add_delayed_iput(async_chunk
->inode
);
1234 async_chunk
->inode
= NULL
;
1239 * work queue call back to submit previously compressed pages
1241 static noinline
void async_cow_submit(struct btrfs_work
*work
)
1243 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1245 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1246 unsigned long nr_pages
;
1248 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1251 /* atomic_sub_return implies a barrier */
1252 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1254 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1257 * ->inode could be NULL if async_chunk_start has failed to compress,
1258 * in which case we don't have anything to submit, yet we need to
1259 * always adjust ->async_delalloc_pages as its paired with the init
1260 * happening in cow_file_range_async
1262 if (async_chunk
->inode
)
1263 submit_compressed_extents(async_chunk
);
1266 static noinline
void async_cow_free(struct btrfs_work
*work
)
1268 struct async_chunk
*async_chunk
;
1270 async_chunk
= container_of(work
, struct async_chunk
, work
);
1271 if (async_chunk
->inode
)
1272 btrfs_add_delayed_iput(async_chunk
->inode
);
1273 if (async_chunk
->blkcg_css
)
1274 css_put(async_chunk
->blkcg_css
);
1276 * Since the pointer to 'pending' is at the beginning of the array of
1277 * async_chunk's, freeing it ensures the whole array has been freed.
1279 if (atomic_dec_and_test(async_chunk
->pending
))
1280 kvfree(async_chunk
->pending
);
1283 static int cow_file_range_async(struct btrfs_inode
*inode
,
1284 struct writeback_control
*wbc
,
1285 struct page
*locked_page
,
1286 u64 start
, u64 end
, int *page_started
,
1287 unsigned long *nr_written
)
1289 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1290 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1291 struct async_cow
*ctx
;
1292 struct async_chunk
*async_chunk
;
1293 unsigned long nr_pages
;
1295 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1297 bool should_compress
;
1299 const unsigned int write_flags
= wbc_to_write_flags(wbc
);
1301 unlock_extent(&inode
->io_tree
, start
, end
);
1303 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
&&
1304 !btrfs_test_opt(fs_info
, FORCE_COMPRESS
)) {
1306 should_compress
= false;
1308 should_compress
= true;
1311 nofs_flag
= memalloc_nofs_save();
1312 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1313 memalloc_nofs_restore(nofs_flag
);
1316 unsigned clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
|
1317 EXTENT_DELALLOC_NEW
| EXTENT_DEFRAG
|
1318 EXTENT_DO_ACCOUNTING
;
1319 unsigned long page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1320 PAGE_END_WRITEBACK
| PAGE_SET_ERROR
;
1322 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1323 clear_bits
, page_ops
);
1327 async_chunk
= ctx
->chunks
;
1328 atomic_set(&ctx
->num_chunks
, num_chunks
);
1330 for (i
= 0; i
< num_chunks
; i
++) {
1331 if (should_compress
)
1332 cur_end
= min(end
, start
+ SZ_512K
- 1);
1337 * igrab is called higher up in the call chain, take only the
1338 * lightweight reference for the callback lifetime
1340 ihold(&inode
->vfs_inode
);
1341 async_chunk
[i
].pending
= &ctx
->num_chunks
;
1342 async_chunk
[i
].inode
= &inode
->vfs_inode
;
1343 async_chunk
[i
].start
= start
;
1344 async_chunk
[i
].end
= cur_end
;
1345 async_chunk
[i
].write_flags
= write_flags
;
1346 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1349 * The locked_page comes all the way from writepage and its
1350 * the original page we were actually given. As we spread
1351 * this large delalloc region across multiple async_chunk
1352 * structs, only the first struct needs a pointer to locked_page
1354 * This way we don't need racey decisions about who is supposed
1359 * Depending on the compressibility, the pages might or
1360 * might not go through async. We want all of them to
1361 * be accounted against wbc once. Let's do it here
1362 * before the paths diverge. wbc accounting is used
1363 * only for foreign writeback detection and doesn't
1364 * need full accuracy. Just account the whole thing
1365 * against the first page.
1367 wbc_account_cgroup_owner(wbc
, locked_page
,
1369 async_chunk
[i
].locked_page
= locked_page
;
1372 async_chunk
[i
].locked_page
= NULL
;
1375 if (blkcg_css
!= blkcg_root_css
) {
1377 async_chunk
[i
].blkcg_css
= blkcg_css
;
1379 async_chunk
[i
].blkcg_css
= NULL
;
1382 btrfs_init_work(&async_chunk
[i
].work
, async_cow_start
,
1383 async_cow_submit
, async_cow_free
);
1385 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1386 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1388 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1390 *nr_written
+= nr_pages
;
1391 start
= cur_end
+ 1;
1397 static noinline
int run_delalloc_zoned(struct btrfs_inode
*inode
,
1398 struct page
*locked_page
, u64 start
,
1399 u64 end
, int *page_started
,
1400 unsigned long *nr_written
)
1404 ret
= cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1412 __set_page_dirty_nobuffers(locked_page
);
1413 account_page_redirty(locked_page
);
1414 extent_write_locked_range(&inode
->vfs_inode
, start
, end
, WB_SYNC_ALL
);
1420 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1421 u64 bytenr
, u64 num_bytes
)
1424 struct btrfs_ordered_sum
*sums
;
1427 ret
= btrfs_lookup_csums_range(fs_info
->csum_root
, bytenr
,
1428 bytenr
+ num_bytes
- 1, &list
, 0);
1429 if (ret
== 0 && list_empty(&list
))
1432 while (!list_empty(&list
)) {
1433 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1434 list_del(&sums
->list
);
1442 static int fallback_to_cow(struct btrfs_inode
*inode
, struct page
*locked_page
,
1443 const u64 start
, const u64 end
,
1444 int *page_started
, unsigned long *nr_written
)
1446 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1447 const bool is_reloc_ino
= (inode
->root
->root_key
.objectid
==
1448 BTRFS_DATA_RELOC_TREE_OBJECTID
);
1449 const u64 range_bytes
= end
+ 1 - start
;
1450 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1451 u64 range_start
= start
;
1455 * If EXTENT_NORESERVE is set it means that when the buffered write was
1456 * made we had not enough available data space and therefore we did not
1457 * reserve data space for it, since we though we could do NOCOW for the
1458 * respective file range (either there is prealloc extent or the inode
1459 * has the NOCOW bit set).
1461 * However when we need to fallback to COW mode (because for example the
1462 * block group for the corresponding extent was turned to RO mode by a
1463 * scrub or relocation) we need to do the following:
1465 * 1) We increment the bytes_may_use counter of the data space info.
1466 * If COW succeeds, it allocates a new data extent and after doing
1467 * that it decrements the space info's bytes_may_use counter and
1468 * increments its bytes_reserved counter by the same amount (we do
1469 * this at btrfs_add_reserved_bytes()). So we need to increment the
1470 * bytes_may_use counter to compensate (when space is reserved at
1471 * buffered write time, the bytes_may_use counter is incremented);
1473 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1474 * that if the COW path fails for any reason, it decrements (through
1475 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1476 * data space info, which we incremented in the step above.
1478 * If we need to fallback to cow and the inode corresponds to a free
1479 * space cache inode or an inode of the data relocation tree, we must
1480 * also increment bytes_may_use of the data space_info for the same
1481 * reason. Space caches and relocated data extents always get a prealloc
1482 * extent for them, however scrub or balance may have set the block
1483 * group that contains that extent to RO mode and therefore force COW
1484 * when starting writeback.
1486 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1487 EXTENT_NORESERVE
, 0);
1488 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1490 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1491 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1493 if (is_space_ino
|| is_reloc_ino
)
1494 bytes
= range_bytes
;
1496 spin_lock(&sinfo
->lock
);
1497 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1498 spin_unlock(&sinfo
->lock
);
1501 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1505 return cow_file_range(inode
, locked_page
, start
, end
, page_started
,
1510 * when nowcow writeback call back. This checks for snapshots or COW copies
1511 * of the extents that exist in the file, and COWs the file as required.
1513 * If no cow copies or snapshots exist, we write directly to the existing
1516 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
1517 struct page
*locked_page
,
1518 const u64 start
, const u64 end
,
1520 unsigned long *nr_written
)
1522 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1523 struct btrfs_root
*root
= inode
->root
;
1524 struct btrfs_path
*path
;
1525 u64 cow_start
= (u64
)-1;
1526 u64 cur_offset
= start
;
1528 bool check_prev
= true;
1529 const bool freespace_inode
= btrfs_is_free_space_inode(inode
);
1530 u64 ino
= btrfs_ino(inode
);
1532 u64 disk_bytenr
= 0;
1533 const bool force
= inode
->flags
& BTRFS_INODE_NODATACOW
;
1535 path
= btrfs_alloc_path();
1537 extent_clear_unlock_delalloc(inode
, start
, end
, locked_page
,
1538 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1539 EXTENT_DO_ACCOUNTING
|
1540 EXTENT_DEFRAG
, PAGE_UNLOCK
|
1541 PAGE_START_WRITEBACK
|
1542 PAGE_END_WRITEBACK
);
1547 struct btrfs_key found_key
;
1548 struct btrfs_file_extent_item
*fi
;
1549 struct extent_buffer
*leaf
;
1559 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
1565 * If there is no extent for our range when doing the initial
1566 * search, then go back to the previous slot as it will be the
1567 * one containing the search offset
1569 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
1570 leaf
= path
->nodes
[0];
1571 btrfs_item_key_to_cpu(leaf
, &found_key
,
1572 path
->slots
[0] - 1);
1573 if (found_key
.objectid
== ino
&&
1574 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
1579 /* Go to next leaf if we have exhausted the current one */
1580 leaf
= path
->nodes
[0];
1581 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1582 ret
= btrfs_next_leaf(root
, path
);
1584 if (cow_start
!= (u64
)-1)
1585 cur_offset
= cow_start
;
1590 leaf
= path
->nodes
[0];
1593 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1595 /* Didn't find anything for our INO */
1596 if (found_key
.objectid
> ino
)
1599 * Keep searching until we find an EXTENT_ITEM or there are no
1600 * more extents for this inode
1602 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
1603 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
1608 /* Found key is not EXTENT_DATA_KEY or starts after req range */
1609 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
1610 found_key
.offset
> end
)
1614 * If the found extent starts after requested offset, then
1615 * adjust extent_end to be right before this extent begins
1617 if (found_key
.offset
> cur_offset
) {
1618 extent_end
= found_key
.offset
;
1624 * Found extent which begins before our range and potentially
1627 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1628 struct btrfs_file_extent_item
);
1629 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1631 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
1632 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
1633 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1634 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1635 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
1636 extent_end
= found_key
.offset
+
1637 btrfs_file_extent_num_bytes(leaf
, fi
);
1639 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1641 * If the extent we got ends before our current offset,
1642 * skip to the next extent.
1644 if (extent_end
<= cur_offset
) {
1649 if (disk_bytenr
== 0)
1651 /* Skip compressed/encrypted/encoded extents */
1652 if (btrfs_file_extent_compression(leaf
, fi
) ||
1653 btrfs_file_extent_encryption(leaf
, fi
) ||
1654 btrfs_file_extent_other_encoding(leaf
, fi
))
1657 * If extent is created before the last volume's snapshot
1658 * this implies the extent is shared, hence we can't do
1659 * nocow. This is the same check as in
1660 * btrfs_cross_ref_exist but without calling
1661 * btrfs_search_slot.
1663 if (!freespace_inode
&&
1664 btrfs_file_extent_generation(leaf
, fi
) <=
1665 btrfs_root_last_snapshot(&root
->root_item
))
1667 if (extent_type
== BTRFS_FILE_EXTENT_REG
&& !force
)
1671 * The following checks can be expensive, as they need to
1672 * take other locks and do btree or rbtree searches, so
1673 * release the path to avoid blocking other tasks for too
1676 btrfs_release_path(path
);
1678 ret
= btrfs_cross_ref_exist(root
, ino
,
1680 extent_offset
, disk_bytenr
, false);
1683 * ret could be -EIO if the above fails to read
1687 if (cow_start
!= (u64
)-1)
1688 cur_offset
= cow_start
;
1692 WARN_ON_ONCE(freespace_inode
);
1695 disk_bytenr
+= extent_offset
;
1696 disk_bytenr
+= cur_offset
- found_key
.offset
;
1697 num_bytes
= min(end
+ 1, extent_end
) - cur_offset
;
1699 * If there are pending snapshots for this root, we
1700 * fall into common COW way
1702 if (!freespace_inode
&& atomic_read(&root
->snapshot_force_cow
))
1705 * force cow if csum exists in the range.
1706 * this ensure that csum for a given extent are
1707 * either valid or do not exist.
1709 ret
= csum_exist_in_range(fs_info
, disk_bytenr
,
1713 * ret could be -EIO if the above fails to read
1717 if (cow_start
!= (u64
)-1)
1718 cur_offset
= cow_start
;
1721 WARN_ON_ONCE(freespace_inode
);
1724 /* If the extent's block group is RO, we must COW */
1725 if (!btrfs_inc_nocow_writers(fs_info
, disk_bytenr
))
1728 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1729 extent_end
= found_key
.offset
+ ram_bytes
;
1730 extent_end
= ALIGN(extent_end
, fs_info
->sectorsize
);
1731 /* Skip extents outside of our requested range */
1732 if (extent_end
<= start
) {
1737 /* If this triggers then we have a memory corruption */
1742 * If nocow is false then record the beginning of the range
1743 * that needs to be COWed
1746 if (cow_start
== (u64
)-1)
1747 cow_start
= cur_offset
;
1748 cur_offset
= extent_end
;
1749 if (cur_offset
> end
)
1751 if (!path
->nodes
[0])
1758 * COW range from cow_start to found_key.offset - 1. As the key
1759 * will contain the beginning of the first extent that can be
1760 * NOCOW, following one which needs to be COW'ed
1762 if (cow_start
!= (u64
)-1) {
1763 ret
= fallback_to_cow(inode
, locked_page
,
1764 cow_start
, found_key
.offset
- 1,
1765 page_started
, nr_written
);
1768 cow_start
= (u64
)-1;
1771 if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1772 u64 orig_start
= found_key
.offset
- extent_offset
;
1773 struct extent_map
*em
;
1775 em
= create_io_em(inode
, cur_offset
, num_bytes
,
1777 disk_bytenr
, /* block_start */
1778 num_bytes
, /* block_len */
1779 disk_num_bytes
, /* orig_block_len */
1780 ram_bytes
, BTRFS_COMPRESS_NONE
,
1781 BTRFS_ORDERED_PREALLOC
);
1786 free_extent_map(em
);
1787 ret
= btrfs_add_ordered_extent(inode
, cur_offset
,
1788 disk_bytenr
, num_bytes
,
1790 BTRFS_ORDERED_PREALLOC
);
1792 btrfs_drop_extent_cache(inode
, cur_offset
,
1793 cur_offset
+ num_bytes
- 1,
1798 ret
= btrfs_add_ordered_extent(inode
, cur_offset
,
1799 disk_bytenr
, num_bytes
,
1801 BTRFS_ORDERED_NOCOW
);
1807 btrfs_dec_nocow_writers(fs_info
, disk_bytenr
);
1810 if (root
->root_key
.objectid
==
1811 BTRFS_DATA_RELOC_TREE_OBJECTID
)
1813 * Error handled later, as we must prevent
1814 * extent_clear_unlock_delalloc() in error handler
1815 * from freeing metadata of created ordered extent.
1817 ret
= btrfs_reloc_clone_csums(inode
, cur_offset
,
1820 extent_clear_unlock_delalloc(inode
, cur_offset
,
1821 cur_offset
+ num_bytes
- 1,
1822 locked_page
, EXTENT_LOCKED
|
1824 EXTENT_CLEAR_DATA_RESV
,
1825 PAGE_UNLOCK
| PAGE_SET_PRIVATE2
);
1827 cur_offset
= extent_end
;
1830 * btrfs_reloc_clone_csums() error, now we're OK to call error
1831 * handler, as metadata for created ordered extent will only
1832 * be freed by btrfs_finish_ordered_io().
1836 if (cur_offset
> end
)
1839 btrfs_release_path(path
);
1841 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
1842 cow_start
= cur_offset
;
1844 if (cow_start
!= (u64
)-1) {
1846 ret
= fallback_to_cow(inode
, locked_page
, cow_start
, end
,
1847 page_started
, nr_written
);
1854 btrfs_dec_nocow_writers(fs_info
, disk_bytenr
);
1856 if (ret
&& cur_offset
< end
)
1857 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
1858 locked_page
, EXTENT_LOCKED
|
1859 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
1860 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1861 PAGE_START_WRITEBACK
|
1862 PAGE_END_WRITEBACK
);
1863 btrfs_free_path(path
);
1867 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
1869 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
1870 if (inode
->defrag_bytes
&&
1871 test_range_bit(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
,
1880 * Function to process delayed allocation (create CoW) for ranges which are
1881 * being touched for the first time.
1883 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct page
*locked_page
,
1884 u64 start
, u64 end
, int *page_started
, unsigned long *nr_written
,
1885 struct writeback_control
*wbc
)
1888 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
1890 if (should_nocow(inode
, start
, end
)) {
1892 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
,
1893 page_started
, nr_written
);
1894 } else if (!inode_can_compress(inode
) ||
1895 !inode_need_compress(inode
, start
, end
)) {
1897 ret
= run_delalloc_zoned(inode
, locked_page
, start
, end
,
1898 page_started
, nr_written
);
1900 ret
= cow_file_range(inode
, locked_page
, start
, end
,
1901 page_started
, nr_written
, 1);
1903 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
1904 ret
= cow_file_range_async(inode
, wbc
, locked_page
, start
, end
,
1905 page_started
, nr_written
);
1908 btrfs_cleanup_ordered_extents(inode
, locked_page
, start
,
1913 void btrfs_split_delalloc_extent(struct inode
*inode
,
1914 struct extent_state
*orig
, u64 split
)
1918 /* not delalloc, ignore it */
1919 if (!(orig
->state
& EXTENT_DELALLOC
))
1922 size
= orig
->end
- orig
->start
+ 1;
1923 if (size
> BTRFS_MAX_EXTENT_SIZE
) {
1928 * See the explanation in btrfs_merge_delalloc_extent, the same
1929 * applies here, just in reverse.
1931 new_size
= orig
->end
- split
+ 1;
1932 num_extents
= count_max_extents(new_size
);
1933 new_size
= split
- orig
->start
;
1934 num_extents
+= count_max_extents(new_size
);
1935 if (count_max_extents(size
) >= num_extents
)
1939 spin_lock(&BTRFS_I(inode
)->lock
);
1940 btrfs_mod_outstanding_extents(BTRFS_I(inode
), 1);
1941 spin_unlock(&BTRFS_I(inode
)->lock
);
1945 * Handle merged delayed allocation extents so we can keep track of new extents
1946 * that are just merged onto old extents, such as when we are doing sequential
1947 * writes, so we can properly account for the metadata space we'll need.
1949 void btrfs_merge_delalloc_extent(struct inode
*inode
, struct extent_state
*new,
1950 struct extent_state
*other
)
1952 u64 new_size
, old_size
;
1955 /* not delalloc, ignore it */
1956 if (!(other
->state
& EXTENT_DELALLOC
))
1959 if (new->start
> other
->start
)
1960 new_size
= new->end
- other
->start
+ 1;
1962 new_size
= other
->end
- new->start
+ 1;
1964 /* we're not bigger than the max, unreserve the space and go */
1965 if (new_size
<= BTRFS_MAX_EXTENT_SIZE
) {
1966 spin_lock(&BTRFS_I(inode
)->lock
);
1967 btrfs_mod_outstanding_extents(BTRFS_I(inode
), -1);
1968 spin_unlock(&BTRFS_I(inode
)->lock
);
1973 * We have to add up either side to figure out how many extents were
1974 * accounted for before we merged into one big extent. If the number of
1975 * extents we accounted for is <= the amount we need for the new range
1976 * then we can return, otherwise drop. Think of it like this
1980 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1981 * need 2 outstanding extents, on one side we have 1 and the other side
1982 * we have 1 so they are == and we can return. But in this case
1984 * [MAX_SIZE+4k][MAX_SIZE+4k]
1986 * Each range on their own accounts for 2 extents, but merged together
1987 * they are only 3 extents worth of accounting, so we need to drop in
1990 old_size
= other
->end
- other
->start
+ 1;
1991 num_extents
= count_max_extents(old_size
);
1992 old_size
= new->end
- new->start
+ 1;
1993 num_extents
+= count_max_extents(old_size
);
1994 if (count_max_extents(new_size
) >= num_extents
)
1997 spin_lock(&BTRFS_I(inode
)->lock
);
1998 btrfs_mod_outstanding_extents(BTRFS_I(inode
), -1);
1999 spin_unlock(&BTRFS_I(inode
)->lock
);
2002 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
2003 struct inode
*inode
)
2005 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2007 spin_lock(&root
->delalloc_lock
);
2008 if (list_empty(&BTRFS_I(inode
)->delalloc_inodes
)) {
2009 list_add_tail(&BTRFS_I(inode
)->delalloc_inodes
,
2010 &root
->delalloc_inodes
);
2011 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2012 &BTRFS_I(inode
)->runtime_flags
);
2013 root
->nr_delalloc_inodes
++;
2014 if (root
->nr_delalloc_inodes
== 1) {
2015 spin_lock(&fs_info
->delalloc_root_lock
);
2016 BUG_ON(!list_empty(&root
->delalloc_root
));
2017 list_add_tail(&root
->delalloc_root
,
2018 &fs_info
->delalloc_roots
);
2019 spin_unlock(&fs_info
->delalloc_root_lock
);
2022 spin_unlock(&root
->delalloc_lock
);
2026 void __btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2027 struct btrfs_inode
*inode
)
2029 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2031 if (!list_empty(&inode
->delalloc_inodes
)) {
2032 list_del_init(&inode
->delalloc_inodes
);
2033 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2034 &inode
->runtime_flags
);
2035 root
->nr_delalloc_inodes
--;
2036 if (!root
->nr_delalloc_inodes
) {
2037 ASSERT(list_empty(&root
->delalloc_inodes
));
2038 spin_lock(&fs_info
->delalloc_root_lock
);
2039 BUG_ON(list_empty(&root
->delalloc_root
));
2040 list_del_init(&root
->delalloc_root
);
2041 spin_unlock(&fs_info
->delalloc_root_lock
);
2046 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
2047 struct btrfs_inode
*inode
)
2049 spin_lock(&root
->delalloc_lock
);
2050 __btrfs_del_delalloc_inode(root
, inode
);
2051 spin_unlock(&root
->delalloc_lock
);
2055 * Properly track delayed allocation bytes in the inode and to maintain the
2056 * list of inodes that have pending delalloc work to be done.
2058 void btrfs_set_delalloc_extent(struct inode
*inode
, struct extent_state
*state
,
2061 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2063 if ((*bits
& EXTENT_DEFRAG
) && !(*bits
& EXTENT_DELALLOC
))
2066 * set_bit and clear bit hooks normally require _irqsave/restore
2067 * but in this case, we are only testing for the DELALLOC
2068 * bit, which is only set or cleared with irqs on
2070 if (!(state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
2071 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2072 u64 len
= state
->end
+ 1 - state
->start
;
2073 u32 num_extents
= count_max_extents(len
);
2074 bool do_list
= !btrfs_is_free_space_inode(BTRFS_I(inode
));
2076 spin_lock(&BTRFS_I(inode
)->lock
);
2077 btrfs_mod_outstanding_extents(BTRFS_I(inode
), num_extents
);
2078 spin_unlock(&BTRFS_I(inode
)->lock
);
2080 /* For sanity tests */
2081 if (btrfs_is_testing(fs_info
))
2084 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2085 fs_info
->delalloc_batch
);
2086 spin_lock(&BTRFS_I(inode
)->lock
);
2087 BTRFS_I(inode
)->delalloc_bytes
+= len
;
2088 if (*bits
& EXTENT_DEFRAG
)
2089 BTRFS_I(inode
)->defrag_bytes
+= len
;
2090 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2091 &BTRFS_I(inode
)->runtime_flags
))
2092 btrfs_add_delalloc_inodes(root
, inode
);
2093 spin_unlock(&BTRFS_I(inode
)->lock
);
2096 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2097 (*bits
& EXTENT_DELALLOC_NEW
)) {
2098 spin_lock(&BTRFS_I(inode
)->lock
);
2099 BTRFS_I(inode
)->new_delalloc_bytes
+= state
->end
+ 1 -
2101 spin_unlock(&BTRFS_I(inode
)->lock
);
2106 * Once a range is no longer delalloc this function ensures that proper
2107 * accounting happens.
2109 void btrfs_clear_delalloc_extent(struct inode
*vfs_inode
,
2110 struct extent_state
*state
, unsigned *bits
)
2112 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
2113 struct btrfs_fs_info
*fs_info
= btrfs_sb(vfs_inode
->i_sb
);
2114 u64 len
= state
->end
+ 1 - state
->start
;
2115 u32 num_extents
= count_max_extents(len
);
2117 if ((state
->state
& EXTENT_DEFRAG
) && (*bits
& EXTENT_DEFRAG
)) {
2118 spin_lock(&inode
->lock
);
2119 inode
->defrag_bytes
-= len
;
2120 spin_unlock(&inode
->lock
);
2124 * set_bit and clear bit hooks normally require _irqsave/restore
2125 * but in this case, we are only testing for the DELALLOC
2126 * bit, which is only set or cleared with irqs on
2128 if ((state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
2129 struct btrfs_root
*root
= inode
->root
;
2130 bool do_list
= !btrfs_is_free_space_inode(inode
);
2132 spin_lock(&inode
->lock
);
2133 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2134 spin_unlock(&inode
->lock
);
2137 * We don't reserve metadata space for space cache inodes so we
2138 * don't need to call delalloc_release_metadata if there is an
2141 if (*bits
& EXTENT_CLEAR_META_RESV
&&
2142 root
!= fs_info
->tree_root
)
2143 btrfs_delalloc_release_metadata(inode
, len
, false);
2145 /* For sanity tests. */
2146 if (btrfs_is_testing(fs_info
))
2149 if (root
->root_key
.objectid
!= BTRFS_DATA_RELOC_TREE_OBJECTID
&&
2150 do_list
&& !(state
->state
& EXTENT_NORESERVE
) &&
2151 (*bits
& EXTENT_CLEAR_DATA_RESV
))
2152 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2154 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2155 fs_info
->delalloc_batch
);
2156 spin_lock(&inode
->lock
);
2157 inode
->delalloc_bytes
-= len
;
2158 if (do_list
&& inode
->delalloc_bytes
== 0 &&
2159 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
2160 &inode
->runtime_flags
))
2161 btrfs_del_delalloc_inode(root
, inode
);
2162 spin_unlock(&inode
->lock
);
2165 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2166 (*bits
& EXTENT_DELALLOC_NEW
)) {
2167 spin_lock(&inode
->lock
);
2168 ASSERT(inode
->new_delalloc_bytes
>= len
);
2169 inode
->new_delalloc_bytes
-= len
;
2170 if (*bits
& EXTENT_ADD_INODE_BYTES
)
2171 inode_add_bytes(&inode
->vfs_inode
, len
);
2172 spin_unlock(&inode
->lock
);
2177 * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2178 * in a chunk's stripe. This function ensures that bios do not span a
2181 * @page - The page we are about to add to the bio
2182 * @size - size we want to add to the bio
2183 * @bio - bio we want to ensure is smaller than a stripe
2184 * @bio_flags - flags of the bio
2186 * return 1 if page cannot be added to the bio
2187 * return 0 if page can be added to the bio
2188 * return error otherwise
2190 int btrfs_bio_fits_in_stripe(struct page
*page
, size_t size
, struct bio
*bio
,
2191 unsigned long bio_flags
)
2193 struct inode
*inode
= page
->mapping
->host
;
2194 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2195 u64 logical
= bio
->bi_iter
.bi_sector
<< 9;
2196 struct extent_map
*em
;
2200 struct btrfs_io_geometry geom
;
2202 if (bio_flags
& EXTENT_BIO_COMPRESSED
)
2205 length
= bio
->bi_iter
.bi_size
;
2206 map_length
= length
;
2207 em
= btrfs_get_chunk_map(fs_info
, logical
, map_length
);
2210 ret
= btrfs_get_io_geometry(fs_info
, em
, btrfs_op(bio
), logical
,
2215 if (geom
.len
< length
+ size
)
2218 free_extent_map(em
);
2223 * in order to insert checksums into the metadata in large chunks,
2224 * we wait until bio submission time. All the pages in the bio are
2225 * checksummed and sums are attached onto the ordered extent record.
2227 * At IO completion time the cums attached on the ordered extent record
2228 * are inserted into the btree
2230 static blk_status_t
btrfs_submit_bio_start(struct inode
*inode
, struct bio
*bio
,
2231 u64 dio_file_offset
)
2233 return btrfs_csum_one_bio(BTRFS_I(inode
), bio
, 0, 0);
2236 bool btrfs_bio_fits_in_ordered_extent(struct page
*page
, struct bio
*bio
,
2239 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
2240 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2241 struct btrfs_ordered_extent
*ordered
;
2242 u64 len
= bio
->bi_iter
.bi_size
+ size
;
2245 ASSERT(btrfs_is_zoned(fs_info
));
2246 ASSERT(fs_info
->max_zone_append_size
> 0);
2247 ASSERT(bio_op(bio
) == REQ_OP_ZONE_APPEND
);
2249 /* Ordered extent not yet created, so we're good */
2250 ordered
= btrfs_lookup_ordered_extent(inode
, page_offset(page
));
2254 if ((bio
->bi_iter
.bi_sector
<< SECTOR_SHIFT
) + len
>
2255 ordered
->disk_bytenr
+ ordered
->disk_num_bytes
)
2258 btrfs_put_ordered_extent(ordered
);
2263 static blk_status_t
extract_ordered_extent(struct btrfs_inode
*inode
,
2264 struct bio
*bio
, loff_t file_offset
)
2266 struct btrfs_ordered_extent
*ordered
;
2267 struct extent_map
*em
= NULL
, *em_new
= NULL
;
2268 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
2269 u64 start
= (u64
)bio
->bi_iter
.bi_sector
<< SECTOR_SHIFT
;
2270 u64 len
= bio
->bi_iter
.bi_size
;
2271 u64 end
= start
+ len
;
2276 ordered
= btrfs_lookup_ordered_extent(inode
, file_offset
);
2277 if (WARN_ON_ONCE(!ordered
))
2278 return BLK_STS_IOERR
;
2280 /* No need to split */
2281 if (ordered
->disk_num_bytes
== len
)
2284 /* We cannot split once end_bio'd ordered extent */
2285 if (WARN_ON_ONCE(ordered
->bytes_left
!= ordered
->disk_num_bytes
)) {
2290 /* We cannot split a compressed ordered extent */
2291 if (WARN_ON_ONCE(ordered
->disk_num_bytes
!= ordered
->num_bytes
)) {
2296 ordered_end
= ordered
->disk_bytenr
+ ordered
->disk_num_bytes
;
2297 /* bio must be in one ordered extent */
2298 if (WARN_ON_ONCE(start
< ordered
->disk_bytenr
|| end
> ordered_end
)) {
2303 /* Checksum list should be empty */
2304 if (WARN_ON_ONCE(!list_empty(&ordered
->list
))) {
2309 pre
= start
- ordered
->disk_bytenr
;
2310 post
= ordered_end
- end
;
2312 ret
= btrfs_split_ordered_extent(ordered
, pre
, post
);
2316 read_lock(&em_tree
->lock
);
2317 em
= lookup_extent_mapping(em_tree
, ordered
->file_offset
, len
);
2319 read_unlock(&em_tree
->lock
);
2323 read_unlock(&em_tree
->lock
);
2325 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
));
2327 * We cannot reuse em_new here but have to create a new one, as
2328 * unpin_extent_cache() expects the start of the extent map to be the
2329 * logical offset of the file, which does not hold true anymore after
2332 em_new
= create_io_em(inode
, em
->start
+ pre
, len
,
2333 em
->start
+ pre
, em
->block_start
+ pre
, len
,
2334 len
, len
, BTRFS_COMPRESS_NONE
,
2335 BTRFS_ORDERED_REGULAR
);
2336 if (IS_ERR(em_new
)) {
2337 ret
= PTR_ERR(em_new
);
2340 free_extent_map(em_new
);
2343 free_extent_map(em
);
2344 btrfs_put_ordered_extent(ordered
);
2346 return errno_to_blk_status(ret
);
2350 * extent_io.c submission hook. This does the right thing for csum calculation
2351 * on write, or reading the csums from the tree before a read.
2353 * Rules about async/sync submit,
2354 * a) read: sync submit
2356 * b) write without checksum: sync submit
2358 * c) write with checksum:
2359 * c-1) if bio is issued by fsync: sync submit
2360 * (sync_writers != 0)
2362 * c-2) if root is reloc root: sync submit
2363 * (only in case of buffered IO)
2365 * c-3) otherwise: async submit
2367 blk_status_t
btrfs_submit_data_bio(struct inode
*inode
, struct bio
*bio
,
2368 int mirror_num
, unsigned long bio_flags
)
2371 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2372 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2373 enum btrfs_wq_endio_type metadata
= BTRFS_WQ_ENDIO_DATA
;
2374 blk_status_t ret
= 0;
2376 int async
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
2378 skip_sum
= (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) ||
2379 !fs_info
->csum_root
;
2381 if (btrfs_is_free_space_inode(BTRFS_I(inode
)))
2382 metadata
= BTRFS_WQ_ENDIO_FREE_SPACE
;
2384 if (bio_op(bio
) == REQ_OP_ZONE_APPEND
) {
2385 struct page
*page
= bio_first_bvec_all(bio
)->bv_page
;
2386 loff_t file_offset
= page_offset(page
);
2388 ret
= extract_ordered_extent(BTRFS_I(inode
), bio
, file_offset
);
2393 if (btrfs_op(bio
) != BTRFS_MAP_WRITE
) {
2394 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, metadata
);
2398 if (bio_flags
& EXTENT_BIO_COMPRESSED
) {
2399 ret
= btrfs_submit_compressed_read(inode
, bio
,
2405 * Lookup bio sums does extra checks around whether we
2406 * need to csum or not, which is why we ignore skip_sum
2409 ret
= btrfs_lookup_bio_sums(inode
, bio
, NULL
);
2414 } else if (async
&& !skip_sum
) {
2415 /* csum items have already been cloned */
2416 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
2418 /* we're doing a write, do the async checksumming */
2419 ret
= btrfs_wq_submit_bio(inode
, bio
, mirror_num
, bio_flags
,
2420 0, btrfs_submit_bio_start
);
2422 } else if (!skip_sum
) {
2423 ret
= btrfs_csum_one_bio(BTRFS_I(inode
), bio
, 0, 0);
2429 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
);
2433 bio
->bi_status
= ret
;
2440 * given a list of ordered sums record them in the inode. This happens
2441 * at IO completion time based on sums calculated at bio submission time.
2443 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2444 struct list_head
*list
)
2446 struct btrfs_ordered_sum
*sum
;
2449 list_for_each_entry(sum
, list
, list
) {
2450 trans
->adding_csums
= true;
2451 ret
= btrfs_csum_file_blocks(trans
, trans
->fs_info
->csum_root
, sum
);
2452 trans
->adding_csums
= false;
2459 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2462 struct extent_state
**cached_state
)
2464 u64 search_start
= start
;
2465 const u64 end
= start
+ len
- 1;
2467 while (search_start
< end
) {
2468 const u64 search_len
= end
- search_start
+ 1;
2469 struct extent_map
*em
;
2473 em
= btrfs_get_extent(inode
, NULL
, 0, search_start
, search_len
);
2477 if (em
->block_start
!= EXTENT_MAP_HOLE
)
2481 if (em
->start
< search_start
)
2482 em_len
-= search_start
- em
->start
;
2483 if (em_len
> search_len
)
2484 em_len
= search_len
;
2486 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2487 search_start
+ em_len
- 1,
2488 EXTENT_DELALLOC_NEW
, 0, NULL
, cached_state
,
2491 search_start
= extent_map_end(em
);
2492 free_extent_map(em
);
2499 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2500 unsigned int extra_bits
,
2501 struct extent_state
**cached_state
)
2503 WARN_ON(PAGE_ALIGNED(end
));
2505 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2506 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2508 * There can't be any extents following eof in this case so just
2509 * set the delalloc new bit for the range directly.
2511 extra_bits
|= EXTENT_DELALLOC_NEW
;
2515 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2522 return set_extent_delalloc(&inode
->io_tree
, start
, end
, extra_bits
,
2526 /* see btrfs_writepage_start_hook for details on why this is required */
2527 struct btrfs_writepage_fixup
{
2529 struct inode
*inode
;
2530 struct btrfs_work work
;
2533 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2535 struct btrfs_writepage_fixup
*fixup
;
2536 struct btrfs_ordered_extent
*ordered
;
2537 struct extent_state
*cached_state
= NULL
;
2538 struct extent_changeset
*data_reserved
= NULL
;
2540 struct btrfs_inode
*inode
;
2544 bool free_delalloc_space
= true;
2546 fixup
= container_of(work
, struct btrfs_writepage_fixup
, work
);
2548 inode
= BTRFS_I(fixup
->inode
);
2549 page_start
= page_offset(page
);
2550 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
2553 * This is similar to page_mkwrite, we need to reserve the space before
2554 * we take the page lock.
2556 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2562 * Before we queued this fixup, we took a reference on the page.
2563 * page->mapping may go NULL, but it shouldn't be moved to a different
2566 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
2568 * Unfortunately this is a little tricky, either
2570 * 1) We got here and our page had already been dealt with and
2571 * we reserved our space, thus ret == 0, so we need to just
2572 * drop our space reservation and bail. This can happen the
2573 * first time we come into the fixup worker, or could happen
2574 * while waiting for the ordered extent.
2575 * 2) Our page was already dealt with, but we happened to get an
2576 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2577 * this case we obviously don't have anything to release, but
2578 * because the page was already dealt with we don't want to
2579 * mark the page with an error, so make sure we're resetting
2580 * ret to 0. This is why we have this check _before_ the ret
2581 * check, because we do not want to have a surprise ENOSPC
2582 * when the page was already properly dealt with.
2585 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2586 btrfs_delalloc_release_space(inode
, data_reserved
,
2587 page_start
, PAGE_SIZE
,
2595 * We can't mess with the page state unless it is locked, so now that
2596 * it is locked bail if we failed to make our space reservation.
2601 lock_extent_bits(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2603 /* already ordered? We're done */
2604 if (PagePrivate2(page
))
2607 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2609 unlock_extent_cached(&inode
->io_tree
, page_start
, page_end
,
2612 btrfs_start_ordered_extent(ordered
, 1);
2613 btrfs_put_ordered_extent(ordered
);
2617 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2623 * Everything went as planned, we're now the owner of a dirty page with
2624 * delayed allocation bits set and space reserved for our COW
2627 * The page was dirty when we started, nothing should have cleaned it.
2629 BUG_ON(!PageDirty(page
));
2630 free_delalloc_space
= false;
2632 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2633 if (free_delalloc_space
)
2634 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2636 unlock_extent_cached(&inode
->io_tree
, page_start
, page_end
,
2641 * We hit ENOSPC or other errors. Update the mapping and page
2642 * to reflect the errors and clean the page.
2644 mapping_set_error(page
->mapping
, ret
);
2645 end_extent_writepage(page
, ret
, page_start
, page_end
);
2646 clear_page_dirty_for_io(page
);
2649 ClearPageChecked(page
);
2653 extent_changeset_free(data_reserved
);
2655 * As a precaution, do a delayed iput in case it would be the last iput
2656 * that could need flushing space. Recursing back to fixup worker would
2659 btrfs_add_delayed_iput(&inode
->vfs_inode
);
2663 * There are a few paths in the higher layers of the kernel that directly
2664 * set the page dirty bit without asking the filesystem if it is a
2665 * good idea. This causes problems because we want to make sure COW
2666 * properly happens and the data=ordered rules are followed.
2668 * In our case any range that doesn't have the ORDERED bit set
2669 * hasn't been properly setup for IO. We kick off an async process
2670 * to fix it up. The async helper will wait for ordered extents, set
2671 * the delalloc bit and make it safe to write the page.
2673 int btrfs_writepage_cow_fixup(struct page
*page
, u64 start
, u64 end
)
2675 struct inode
*inode
= page
->mapping
->host
;
2676 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2677 struct btrfs_writepage_fixup
*fixup
;
2679 /* this page is properly in the ordered list */
2680 if (TestClearPagePrivate2(page
))
2684 * PageChecked is set below when we create a fixup worker for this page,
2685 * don't try to create another one if we're already PageChecked()
2687 * The extent_io writepage code will redirty the page if we send back
2690 if (PageChecked(page
))
2693 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2698 * We are already holding a reference to this inode from
2699 * write_cache_pages. We need to hold it because the space reservation
2700 * takes place outside of the page lock, and we can't trust
2701 * page->mapping outside of the page lock.
2704 SetPageChecked(page
);
2706 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
, NULL
);
2708 fixup
->inode
= inode
;
2709 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2714 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2715 struct btrfs_inode
*inode
, u64 file_pos
,
2716 struct btrfs_file_extent_item
*stack_fi
,
2717 const bool update_inode_bytes
,
2718 u64 qgroup_reserved
)
2720 struct btrfs_root
*root
= inode
->root
;
2721 const u64 sectorsize
= root
->fs_info
->sectorsize
;
2722 struct btrfs_path
*path
;
2723 struct extent_buffer
*leaf
;
2724 struct btrfs_key ins
;
2725 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
2726 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
2727 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
2728 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
2729 struct btrfs_drop_extents_args drop_args
= { 0 };
2732 path
= btrfs_alloc_path();
2737 * we may be replacing one extent in the tree with another.
2738 * The new extent is pinned in the extent map, and we don't want
2739 * to drop it from the cache until it is completely in the btree.
2741 * So, tell btrfs_drop_extents to leave this extent in the cache.
2742 * the caller is expected to unpin it and allow it to be merged
2745 drop_args
.path
= path
;
2746 drop_args
.start
= file_pos
;
2747 drop_args
.end
= file_pos
+ num_bytes
;
2748 drop_args
.replace_extent
= true;
2749 drop_args
.extent_item_size
= sizeof(*stack_fi
);
2750 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
2754 if (!drop_args
.extent_inserted
) {
2755 ins
.objectid
= btrfs_ino(inode
);
2756 ins
.offset
= file_pos
;
2757 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2759 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
2764 leaf
= path
->nodes
[0];
2765 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
2766 write_extent_buffer(leaf
, stack_fi
,
2767 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2768 sizeof(struct btrfs_file_extent_item
));
2770 btrfs_mark_buffer_dirty(leaf
);
2771 btrfs_release_path(path
);
2774 * If we dropped an inline extent here, we know the range where it is
2775 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2776 * number of bytes only for that range contaning the inline extent.
2777 * The remaining of the range will be processed when clearning the
2778 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2780 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
2781 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
2783 inline_size
= drop_args
.bytes_found
- inline_size
;
2784 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
2785 drop_args
.bytes_found
-= inline_size
;
2786 num_bytes
-= sectorsize
;
2789 if (update_inode_bytes
)
2790 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
2792 ins
.objectid
= disk_bytenr
;
2793 ins
.offset
= disk_num_bytes
;
2794 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
2796 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
2800 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
2801 file_pos
, qgroup_reserved
, &ins
);
2803 btrfs_free_path(path
);
2808 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
2811 struct btrfs_block_group
*cache
;
2813 cache
= btrfs_lookup_block_group(fs_info
, start
);
2816 spin_lock(&cache
->lock
);
2817 cache
->delalloc_bytes
-= len
;
2818 spin_unlock(&cache
->lock
);
2820 btrfs_put_block_group(cache
);
2823 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
2824 struct btrfs_ordered_extent
*oe
)
2826 struct btrfs_file_extent_item stack_fi
;
2828 bool update_inode_bytes
;
2830 memset(&stack_fi
, 0, sizeof(stack_fi
));
2831 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
2832 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
2833 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
2834 oe
->disk_num_bytes
);
2835 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
))
2836 logical_len
= oe
->truncated_len
;
2838 logical_len
= oe
->num_bytes
;
2839 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, logical_len
);
2840 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, logical_len
);
2841 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
2842 /* Encryption and other encoding is reserved and all 0 */
2845 * For delalloc, when completing an ordered extent we update the inode's
2846 * bytes when clearing the range in the inode's io tree, so pass false
2847 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
2848 * except if the ordered extent was truncated.
2850 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
2851 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
2853 return insert_reserved_file_extent(trans
, BTRFS_I(oe
->inode
),
2854 oe
->file_offset
, &stack_fi
,
2855 update_inode_bytes
, oe
->qgroup_rsv
);
2859 * As ordered data IO finishes, this gets called so we can finish
2860 * an ordered extent if the range of bytes in the file it covers are
2863 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
)
2865 struct btrfs_inode
*inode
= BTRFS_I(ordered_extent
->inode
);
2866 struct btrfs_root
*root
= inode
->root
;
2867 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2868 struct btrfs_trans_handle
*trans
= NULL
;
2869 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
2870 struct extent_state
*cached_state
= NULL
;
2872 int compress_type
= 0;
2874 u64 logical_len
= ordered_extent
->num_bytes
;
2875 bool freespace_inode
;
2876 bool truncated
= false;
2877 bool clear_reserved_extent
= true;
2878 unsigned int clear_bits
= EXTENT_DEFRAG
;
2880 start
= ordered_extent
->file_offset
;
2881 end
= start
+ ordered_extent
->num_bytes
- 1;
2883 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
2884 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
2885 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
))
2886 clear_bits
|= EXTENT_DELALLOC_NEW
;
2888 freespace_inode
= btrfs_is_free_space_inode(inode
);
2890 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
2895 if (ordered_extent
->disk
)
2896 btrfs_rewrite_logical_zoned(ordered_extent
);
2898 btrfs_free_io_failure_record(inode
, start
, end
);
2900 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
2902 logical_len
= ordered_extent
->truncated_len
;
2903 /* Truncated the entire extent, don't bother adding */
2908 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
2909 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
2911 btrfs_inode_safe_disk_i_size_write(inode
, 0);
2912 if (freespace_inode
)
2913 trans
= btrfs_join_transaction_spacecache(root
);
2915 trans
= btrfs_join_transaction(root
);
2916 if (IS_ERR(trans
)) {
2917 ret
= PTR_ERR(trans
);
2921 trans
->block_rsv
= &inode
->block_rsv
;
2922 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
2923 if (ret
) /* -ENOMEM or corruption */
2924 btrfs_abort_transaction(trans
, ret
);
2928 clear_bits
|= EXTENT_LOCKED
;
2929 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
2931 if (freespace_inode
)
2932 trans
= btrfs_join_transaction_spacecache(root
);
2934 trans
= btrfs_join_transaction(root
);
2935 if (IS_ERR(trans
)) {
2936 ret
= PTR_ERR(trans
);
2941 trans
->block_rsv
= &inode
->block_rsv
;
2943 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
2944 compress_type
= ordered_extent
->compress_type
;
2945 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
2946 BUG_ON(compress_type
);
2947 ret
= btrfs_mark_extent_written(trans
, inode
,
2948 ordered_extent
->file_offset
,
2949 ordered_extent
->file_offset
+
2952 BUG_ON(root
== fs_info
->tree_root
);
2953 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
2955 clear_reserved_extent
= false;
2956 btrfs_release_delalloc_bytes(fs_info
,
2957 ordered_extent
->disk_bytenr
,
2958 ordered_extent
->disk_num_bytes
);
2961 unpin_extent_cache(&inode
->extent_tree
, ordered_extent
->file_offset
,
2962 ordered_extent
->num_bytes
, trans
->transid
);
2964 btrfs_abort_transaction(trans
, ret
);
2968 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
2970 btrfs_abort_transaction(trans
, ret
);
2975 * If this is a new delalloc range, clear its new delalloc flag to
2976 * update the inode's number of bytes. This needs to be done first
2977 * before updating the inode item.
2979 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
2980 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
2981 clear_extent_bit(&inode
->io_tree
, start
, end
,
2982 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
2983 0, 0, &cached_state
);
2985 btrfs_inode_safe_disk_i_size_write(inode
, 0);
2986 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
2987 if (ret
) { /* -ENOMEM or corruption */
2988 btrfs_abort_transaction(trans
, ret
);
2993 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
2994 (clear_bits
& EXTENT_LOCKED
) ? 1 : 0, 0,
2998 btrfs_end_transaction(trans
);
3000 if (ret
|| truncated
) {
3001 u64 unwritten_start
= start
;
3004 unwritten_start
+= logical_len
;
3005 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3007 /* Drop the cache for the part of the extent we didn't write. */
3008 btrfs_drop_extent_cache(inode
, unwritten_start
, end
, 0);
3011 * If the ordered extent had an IOERR or something else went
3012 * wrong we need to return the space for this ordered extent
3013 * back to the allocator. We only free the extent in the
3014 * truncated case if we didn't write out the extent at all.
3016 * If we made it past insert_reserved_file_extent before we
3017 * errored out then we don't need to do this as the accounting
3018 * has already been done.
3020 if ((ret
|| !logical_len
) &&
3021 clear_reserved_extent
&&
3022 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3023 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3025 * Discard the range before returning it back to the
3028 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3029 btrfs_discard_extent(fs_info
,
3030 ordered_extent
->disk_bytenr
,
3031 ordered_extent
->disk_num_bytes
,
3033 btrfs_free_reserved_extent(fs_info
,
3034 ordered_extent
->disk_bytenr
,
3035 ordered_extent
->disk_num_bytes
, 1);
3040 * This needs to be done to make sure anybody waiting knows we are done
3041 * updating everything for this ordered extent.
3043 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3046 btrfs_put_ordered_extent(ordered_extent
);
3047 /* once for the tree */
3048 btrfs_put_ordered_extent(ordered_extent
);
3053 static void finish_ordered_fn(struct btrfs_work
*work
)
3055 struct btrfs_ordered_extent
*ordered_extent
;
3056 ordered_extent
= container_of(work
, struct btrfs_ordered_extent
, work
);
3057 btrfs_finish_ordered_io(ordered_extent
);
3060 void btrfs_writepage_endio_finish_ordered(struct page
*page
, u64 start
,
3061 u64 end
, int uptodate
)
3063 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
3064 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3065 struct btrfs_ordered_extent
*ordered_extent
= NULL
;
3066 struct btrfs_workqueue
*wq
;
3068 trace_btrfs_writepage_end_io_hook(page
, start
, end
, uptodate
);
3070 ClearPagePrivate2(page
);
3071 if (!btrfs_dec_test_ordered_pending(inode
, &ordered_extent
, start
,
3072 end
- start
+ 1, uptodate
))
3075 if (btrfs_is_free_space_inode(inode
))
3076 wq
= fs_info
->endio_freespace_worker
;
3078 wq
= fs_info
->endio_write_workers
;
3080 btrfs_init_work(&ordered_extent
->work
, finish_ordered_fn
, NULL
, NULL
);
3081 btrfs_queue_work(wq
, &ordered_extent
->work
);
3085 * check_data_csum - verify checksum of one sector of uncompressed data
3087 * @io_bio: btrfs_io_bio which contains the csum
3088 * @bio_offset: offset to the beginning of the bio (in bytes)
3089 * @page: page where is the data to be verified
3090 * @pgoff: offset inside the page
3091 * @start: logical offset in the file
3093 * The length of such check is always one sector size.
3095 static int check_data_csum(struct inode
*inode
, struct btrfs_io_bio
*io_bio
,
3096 u32 bio_offset
, struct page
*page
, u32 pgoff
,
3099 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3100 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3102 u32 len
= fs_info
->sectorsize
;
3103 const u32 csum_size
= fs_info
->csum_size
;
3104 unsigned int offset_sectors
;
3106 u8 csum
[BTRFS_CSUM_SIZE
];
3108 ASSERT(pgoff
+ len
<= PAGE_SIZE
);
3110 offset_sectors
= bio_offset
>> fs_info
->sectorsize_bits
;
3111 csum_expected
= ((u8
*)io_bio
->csum
) + offset_sectors
* csum_size
;
3113 kaddr
= kmap_atomic(page
);
3114 shash
->tfm
= fs_info
->csum_shash
;
3116 crypto_shash_digest(shash
, kaddr
+ pgoff
, len
, csum
);
3118 if (memcmp(csum
, csum_expected
, csum_size
))
3121 kunmap_atomic(kaddr
);
3124 btrfs_print_data_csum_error(BTRFS_I(inode
), start
, csum
, csum_expected
,
3125 io_bio
->mirror_num
);
3127 btrfs_dev_stat_inc_and_print(io_bio
->device
,
3128 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3129 memset(kaddr
+ pgoff
, 1, len
);
3130 flush_dcache_page(page
);
3131 kunmap_atomic(kaddr
);
3136 * When reads are done, we need to check csums to verify the data is correct.
3137 * if there's a match, we allow the bio to finish. If not, the code in
3138 * extent_io.c will try to find good copies for us.
3140 * @bio_offset: offset to the beginning of the bio (in bytes)
3141 * @start: file offset of the range start
3142 * @end: file offset of the range end (inclusive)
3144 int btrfs_verify_data_csum(struct btrfs_io_bio
*io_bio
, u32 bio_offset
,
3145 struct page
*page
, u64 start
, u64 end
)
3147 struct inode
*inode
= page
->mapping
->host
;
3148 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
3149 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3150 const u32 sectorsize
= root
->fs_info
->sectorsize
;
3153 if (PageChecked(page
)) {
3154 ClearPageChecked(page
);
3158 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
3161 if (!root
->fs_info
->csum_root
)
3164 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
&&
3165 test_range_bit(io_tree
, start
, end
, EXTENT_NODATASUM
, 1, NULL
)) {
3166 clear_extent_bits(io_tree
, start
, end
, EXTENT_NODATASUM
);
3170 ASSERT(page_offset(page
) <= start
&&
3171 end
<= page_offset(page
) + PAGE_SIZE
- 1);
3172 for (pg_off
= offset_in_page(start
);
3173 pg_off
< offset_in_page(end
);
3174 pg_off
+= sectorsize
, bio_offset
+= sectorsize
) {
3177 ret
= check_data_csum(inode
, io_bio
, bio_offset
, page
, pg_off
,
3178 page_offset(page
) + pg_off
);
3186 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3188 * @inode: The inode we want to perform iput on
3190 * This function uses the generic vfs_inode::i_count to track whether we should
3191 * just decrement it (in case it's > 1) or if this is the last iput then link
3192 * the inode to the delayed iput machinery. Delayed iputs are processed at
3193 * transaction commit time/superblock commit/cleaner kthread.
3195 void btrfs_add_delayed_iput(struct inode
*inode
)
3197 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3198 struct btrfs_inode
*binode
= BTRFS_I(inode
);
3200 if (atomic_add_unless(&inode
->i_count
, -1, 1))
3203 atomic_inc(&fs_info
->nr_delayed_iputs
);
3204 spin_lock(&fs_info
->delayed_iput_lock
);
3205 ASSERT(list_empty(&binode
->delayed_iput
));
3206 list_add_tail(&binode
->delayed_iput
, &fs_info
->delayed_iputs
);
3207 spin_unlock(&fs_info
->delayed_iput_lock
);
3208 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3209 wake_up_process(fs_info
->cleaner_kthread
);
3212 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3213 struct btrfs_inode
*inode
)
3215 list_del_init(&inode
->delayed_iput
);
3216 spin_unlock(&fs_info
->delayed_iput_lock
);
3217 iput(&inode
->vfs_inode
);
3218 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3219 wake_up(&fs_info
->delayed_iputs_wait
);
3220 spin_lock(&fs_info
->delayed_iput_lock
);
3223 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3224 struct btrfs_inode
*inode
)
3226 if (!list_empty(&inode
->delayed_iput
)) {
3227 spin_lock(&fs_info
->delayed_iput_lock
);
3228 if (!list_empty(&inode
->delayed_iput
))
3229 run_delayed_iput_locked(fs_info
, inode
);
3230 spin_unlock(&fs_info
->delayed_iput_lock
);
3234 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3237 spin_lock(&fs_info
->delayed_iput_lock
);
3238 while (!list_empty(&fs_info
->delayed_iputs
)) {
3239 struct btrfs_inode
*inode
;
3241 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3242 struct btrfs_inode
, delayed_iput
);
3243 run_delayed_iput_locked(fs_info
, inode
);
3245 spin_unlock(&fs_info
->delayed_iput_lock
);
3249 * Wait for flushing all delayed iputs
3251 * @fs_info: the filesystem
3253 * This will wait on any delayed iputs that are currently running with KILLABLE
3254 * set. Once they are all done running we will return, unless we are killed in
3255 * which case we return EINTR. This helps in user operations like fallocate etc
3256 * that might get blocked on the iputs.
3258 * Return EINTR if we were killed, 0 if nothing's pending
3260 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3262 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3263 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3270 * This creates an orphan entry for the given inode in case something goes wrong
3271 * in the middle of an unlink.
3273 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3274 struct btrfs_inode
*inode
)
3278 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3279 if (ret
&& ret
!= -EEXIST
) {
3280 btrfs_abort_transaction(trans
, ret
);
3288 * We have done the delete so we can go ahead and remove the orphan item for
3289 * this particular inode.
3291 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3292 struct btrfs_inode
*inode
)
3294 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3298 * this cleans up any orphans that may be left on the list from the last use
3301 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3303 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3304 struct btrfs_path
*path
;
3305 struct extent_buffer
*leaf
;
3306 struct btrfs_key key
, found_key
;
3307 struct btrfs_trans_handle
*trans
;
3308 struct inode
*inode
;
3309 u64 last_objectid
= 0;
3310 int ret
= 0, nr_unlink
= 0;
3312 if (cmpxchg(&root
->orphan_cleanup_state
, 0, ORPHAN_CLEANUP_STARTED
))
3315 path
= btrfs_alloc_path();
3320 path
->reada
= READA_BACK
;
3322 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3323 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3324 key
.offset
= (u64
)-1;
3327 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3332 * if ret == 0 means we found what we were searching for, which
3333 * is weird, but possible, so only screw with path if we didn't
3334 * find the key and see if we have stuff that matches
3338 if (path
->slots
[0] == 0)
3343 /* pull out the item */
3344 leaf
= path
->nodes
[0];
3345 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3347 /* make sure the item matches what we want */
3348 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3350 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3353 /* release the path since we're done with it */
3354 btrfs_release_path(path
);
3357 * this is where we are basically btrfs_lookup, without the
3358 * crossing root thing. we store the inode number in the
3359 * offset of the orphan item.
3362 if (found_key
.offset
== last_objectid
) {
3364 "Error removing orphan entry, stopping orphan cleanup");
3369 last_objectid
= found_key
.offset
;
3371 found_key
.objectid
= found_key
.offset
;
3372 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3373 found_key
.offset
= 0;
3374 inode
= btrfs_iget(fs_info
->sb
, last_objectid
, root
);
3375 ret
= PTR_ERR_OR_ZERO(inode
);
3376 if (ret
&& ret
!= -ENOENT
)
3379 if (ret
== -ENOENT
&& root
== fs_info
->tree_root
) {
3380 struct btrfs_root
*dead_root
;
3381 int is_dead_root
= 0;
3384 * This is an orphan in the tree root. Currently these
3385 * could come from 2 sources:
3386 * a) a root (snapshot/subvolume) deletion in progress
3387 * b) a free space cache inode
3388 * We need to distinguish those two, as the orphan item
3389 * for a root must not get deleted before the deletion
3390 * of the snapshot/subvolume's tree completes.
3392 * btrfs_find_orphan_roots() ran before us, which has
3393 * found all deleted roots and loaded them into
3394 * fs_info->fs_roots_radix. So here we can find if an
3395 * orphan item corresponds to a deleted root by looking
3396 * up the root from that radix tree.
3399 spin_lock(&fs_info
->fs_roots_radix_lock
);
3400 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3401 (unsigned long)found_key
.objectid
);
3402 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3404 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3407 /* prevent this orphan from being found again */
3408 key
.offset
= found_key
.objectid
- 1;
3415 * If we have an inode with links, there are a couple of
3416 * possibilities. Old kernels (before v3.12) used to create an
3417 * orphan item for truncate indicating that there were possibly
3418 * extent items past i_size that needed to be deleted. In v3.12,
3419 * truncate was changed to update i_size in sync with the extent
3420 * items, but the (useless) orphan item was still created. Since
3421 * v4.18, we don't create the orphan item for truncate at all.
3423 * So, this item could mean that we need to do a truncate, but
3424 * only if this filesystem was last used on a pre-v3.12 kernel
3425 * and was not cleanly unmounted. The odds of that are quite
3426 * slim, and it's a pain to do the truncate now, so just delete
3429 * It's also possible that this orphan item was supposed to be
3430 * deleted but wasn't. The inode number may have been reused,
3431 * but either way, we can delete the orphan item.
3433 if (ret
== -ENOENT
|| inode
->i_nlink
) {
3436 trans
= btrfs_start_transaction(root
, 1);
3437 if (IS_ERR(trans
)) {
3438 ret
= PTR_ERR(trans
);
3441 btrfs_debug(fs_info
, "auto deleting %Lu",
3442 found_key
.objectid
);
3443 ret
= btrfs_del_orphan_item(trans
, root
,
3444 found_key
.objectid
);
3445 btrfs_end_transaction(trans
);
3453 /* this will do delete_inode and everything for us */
3456 /* release the path since we're done with it */
3457 btrfs_release_path(path
);
3459 root
->orphan_cleanup_state
= ORPHAN_CLEANUP_DONE
;
3461 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3462 trans
= btrfs_join_transaction(root
);
3464 btrfs_end_transaction(trans
);
3468 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3472 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3473 btrfs_free_path(path
);
3478 * very simple check to peek ahead in the leaf looking for xattrs. If we
3479 * don't find any xattrs, we know there can't be any acls.
3481 * slot is the slot the inode is in, objectid is the objectid of the inode
3483 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3484 int slot
, u64 objectid
,
3485 int *first_xattr_slot
)
3487 u32 nritems
= btrfs_header_nritems(leaf
);
3488 struct btrfs_key found_key
;
3489 static u64 xattr_access
= 0;
3490 static u64 xattr_default
= 0;
3493 if (!xattr_access
) {
3494 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3495 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3496 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3497 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3501 *first_xattr_slot
= -1;
3502 while (slot
< nritems
) {
3503 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3505 /* we found a different objectid, there must not be acls */
3506 if (found_key
.objectid
!= objectid
)
3509 /* we found an xattr, assume we've got an acl */
3510 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3511 if (*first_xattr_slot
== -1)
3512 *first_xattr_slot
= slot
;
3513 if (found_key
.offset
== xattr_access
||
3514 found_key
.offset
== xattr_default
)
3519 * we found a key greater than an xattr key, there can't
3520 * be any acls later on
3522 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3529 * it goes inode, inode backrefs, xattrs, extents,
3530 * so if there are a ton of hard links to an inode there can
3531 * be a lot of backrefs. Don't waste time searching too hard,
3532 * this is just an optimization
3537 /* we hit the end of the leaf before we found an xattr or
3538 * something larger than an xattr. We have to assume the inode
3541 if (*first_xattr_slot
== -1)
3542 *first_xattr_slot
= slot
;
3547 * read an inode from the btree into the in-memory inode
3549 static int btrfs_read_locked_inode(struct inode
*inode
,
3550 struct btrfs_path
*in_path
)
3552 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3553 struct btrfs_path
*path
= in_path
;
3554 struct extent_buffer
*leaf
;
3555 struct btrfs_inode_item
*inode_item
;
3556 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3557 struct btrfs_key location
;
3562 bool filled
= false;
3563 int first_xattr_slot
;
3565 ret
= btrfs_fill_inode(inode
, &rdev
);
3570 path
= btrfs_alloc_path();
3575 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3577 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3579 if (path
!= in_path
)
3580 btrfs_free_path(path
);
3584 leaf
= path
->nodes
[0];
3589 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3590 struct btrfs_inode_item
);
3591 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3592 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3593 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3594 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3595 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3596 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3597 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3599 inode
->i_atime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->atime
);
3600 inode
->i_atime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->atime
);
3602 inode
->i_mtime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->mtime
);
3603 inode
->i_mtime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->mtime
);
3605 inode
->i_ctime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->ctime
);
3606 inode
->i_ctime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->ctime
);
3608 BTRFS_I(inode
)->i_otime
.tv_sec
=
3609 btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3610 BTRFS_I(inode
)->i_otime
.tv_nsec
=
3611 btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3613 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3614 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3615 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3617 inode_set_iversion_queried(inode
,
3618 btrfs_inode_sequence(leaf
, inode_item
));
3619 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3621 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3623 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3624 BTRFS_I(inode
)->flags
= btrfs_inode_flags(leaf
, inode_item
);
3628 * If we were modified in the current generation and evicted from memory
3629 * and then re-read we need to do a full sync since we don't have any
3630 * idea about which extents were modified before we were evicted from
3633 * This is required for both inode re-read from disk and delayed inode
3634 * in delayed_nodes_tree.
3636 if (BTRFS_I(inode
)->last_trans
== fs_info
->generation
)
3637 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3638 &BTRFS_I(inode
)->runtime_flags
);
3641 * We don't persist the id of the transaction where an unlink operation
3642 * against the inode was last made. So here we assume the inode might
3643 * have been evicted, and therefore the exact value of last_unlink_trans
3644 * lost, and set it to last_trans to avoid metadata inconsistencies
3645 * between the inode and its parent if the inode is fsync'ed and the log
3646 * replayed. For example, in the scenario:
3649 * ln mydir/foo mydir/bar
3652 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3653 * xfs_io -c fsync mydir/foo
3655 * mount fs, triggers fsync log replay
3657 * We must make sure that when we fsync our inode foo we also log its
3658 * parent inode, otherwise after log replay the parent still has the
3659 * dentry with the "bar" name but our inode foo has a link count of 1
3660 * and doesn't have an inode ref with the name "bar" anymore.
3662 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3663 * but it guarantees correctness at the expense of occasional full
3664 * transaction commits on fsync if our inode is a directory, or if our
3665 * inode is not a directory, logging its parent unnecessarily.
3667 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3670 * Same logic as for last_unlink_trans. We don't persist the generation
3671 * of the last transaction where this inode was used for a reflink
3672 * operation, so after eviction and reloading the inode we must be
3673 * pessimistic and assume the last transaction that modified the inode.
3675 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3678 if (inode
->i_nlink
!= 1 ||
3679 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3682 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3683 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3686 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3687 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3688 struct btrfs_inode_ref
*ref
;
3690 ref
= (struct btrfs_inode_ref
*)ptr
;
3691 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3692 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3693 struct btrfs_inode_extref
*extref
;
3695 extref
= (struct btrfs_inode_extref
*)ptr
;
3696 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3701 * try to precache a NULL acl entry for files that don't have
3702 * any xattrs or acls
3704 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3705 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3706 if (first_xattr_slot
!= -1) {
3707 path
->slots
[0] = first_xattr_slot
;
3708 ret
= btrfs_load_inode_props(inode
, path
);
3711 "error loading props for ino %llu (root %llu): %d",
3712 btrfs_ino(BTRFS_I(inode
)),
3713 root
->root_key
.objectid
, ret
);
3715 if (path
!= in_path
)
3716 btrfs_free_path(path
);
3719 cache_no_acl(inode
);
3721 switch (inode
->i_mode
& S_IFMT
) {
3723 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3724 inode
->i_fop
= &btrfs_file_operations
;
3725 inode
->i_op
= &btrfs_file_inode_operations
;
3728 inode
->i_fop
= &btrfs_dir_file_operations
;
3729 inode
->i_op
= &btrfs_dir_inode_operations
;
3732 inode
->i_op
= &btrfs_symlink_inode_operations
;
3733 inode_nohighmem(inode
);
3734 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3737 inode
->i_op
= &btrfs_special_inode_operations
;
3738 init_special_inode(inode
, inode
->i_mode
, rdev
);
3742 btrfs_sync_inode_flags_to_i_flags(inode
);
3747 * given a leaf and an inode, copy the inode fields into the leaf
3749 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3750 struct extent_buffer
*leaf
,
3751 struct btrfs_inode_item
*item
,
3752 struct inode
*inode
)
3754 struct btrfs_map_token token
;
3756 btrfs_init_map_token(&token
, leaf
);
3758 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
3759 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
3760 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
3761 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
3762 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
3764 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
3765 inode
->i_atime
.tv_sec
);
3766 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
3767 inode
->i_atime
.tv_nsec
);
3769 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
3770 inode
->i_mtime
.tv_sec
);
3771 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
3772 inode
->i_mtime
.tv_nsec
);
3774 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
3775 inode
->i_ctime
.tv_sec
);
3776 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
3777 inode
->i_ctime
.tv_nsec
);
3779 btrfs_set_token_timespec_sec(&token
, &item
->otime
,
3780 BTRFS_I(inode
)->i_otime
.tv_sec
);
3781 btrfs_set_token_timespec_nsec(&token
, &item
->otime
,
3782 BTRFS_I(inode
)->i_otime
.tv_nsec
);
3784 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
3785 btrfs_set_token_inode_generation(&token
, item
,
3786 BTRFS_I(inode
)->generation
);
3787 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
3788 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
3789 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
3790 btrfs_set_token_inode_flags(&token
, item
, BTRFS_I(inode
)->flags
);
3791 btrfs_set_token_inode_block_group(&token
, item
, 0);
3795 * copy everything in the in-memory inode into the btree.
3797 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
3798 struct btrfs_root
*root
,
3799 struct btrfs_inode
*inode
)
3801 struct btrfs_inode_item
*inode_item
;
3802 struct btrfs_path
*path
;
3803 struct extent_buffer
*leaf
;
3806 path
= btrfs_alloc_path();
3810 ret
= btrfs_lookup_inode(trans
, root
, path
, &inode
->location
, 1);
3817 leaf
= path
->nodes
[0];
3818 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3819 struct btrfs_inode_item
);
3821 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
3822 btrfs_mark_buffer_dirty(leaf
);
3823 btrfs_set_inode_last_trans(trans
, inode
);
3826 btrfs_free_path(path
);
3831 * copy everything in the in-memory inode into the btree.
3833 noinline
int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
3834 struct btrfs_root
*root
,
3835 struct btrfs_inode
*inode
)
3837 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3841 * If the inode is a free space inode, we can deadlock during commit
3842 * if we put it into the delayed code.
3844 * The data relocation inode should also be directly updated
3847 if (!btrfs_is_free_space_inode(inode
)
3848 && root
->root_key
.objectid
!= BTRFS_DATA_RELOC_TREE_OBJECTID
3849 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
3850 btrfs_update_root_times(trans
, root
);
3852 ret
= btrfs_delayed_update_inode(trans
, root
, inode
);
3854 btrfs_set_inode_last_trans(trans
, inode
);
3858 return btrfs_update_inode_item(trans
, root
, inode
);
3861 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
3862 struct btrfs_root
*root
, struct btrfs_inode
*inode
)
3866 ret
= btrfs_update_inode(trans
, root
, inode
);
3868 return btrfs_update_inode_item(trans
, root
, inode
);
3873 * unlink helper that gets used here in inode.c and in the tree logging
3874 * recovery code. It remove a link in a directory with a given name, and
3875 * also drops the back refs in the inode to the directory
3877 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
3878 struct btrfs_root
*root
,
3879 struct btrfs_inode
*dir
,
3880 struct btrfs_inode
*inode
,
3881 const char *name
, int name_len
)
3883 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3884 struct btrfs_path
*path
;
3886 struct btrfs_dir_item
*di
;
3888 u64 ino
= btrfs_ino(inode
);
3889 u64 dir_ino
= btrfs_ino(dir
);
3891 path
= btrfs_alloc_path();
3897 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
3898 name
, name_len
, -1);
3899 if (IS_ERR_OR_NULL(di
)) {
3900 ret
= di
? PTR_ERR(di
) : -ENOENT
;
3903 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
3906 btrfs_release_path(path
);
3909 * If we don't have dir index, we have to get it by looking up
3910 * the inode ref, since we get the inode ref, remove it directly,
3911 * it is unnecessary to do delayed deletion.
3913 * But if we have dir index, needn't search inode ref to get it.
3914 * Since the inode ref is close to the inode item, it is better
3915 * that we delay to delete it, and just do this deletion when
3916 * we update the inode item.
3918 if (inode
->dir_index
) {
3919 ret
= btrfs_delayed_delete_inode_ref(inode
);
3921 index
= inode
->dir_index
;
3926 ret
= btrfs_del_inode_ref(trans
, root
, name
, name_len
, ino
,
3930 "failed to delete reference to %.*s, inode %llu parent %llu",
3931 name_len
, name
, ino
, dir_ino
);
3932 btrfs_abort_transaction(trans
, ret
);
3936 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
3938 btrfs_abort_transaction(trans
, ret
);
3942 ret
= btrfs_del_inode_ref_in_log(trans
, root
, name
, name_len
, inode
,
3944 if (ret
!= 0 && ret
!= -ENOENT
) {
3945 btrfs_abort_transaction(trans
, ret
);
3949 ret
= btrfs_del_dir_entries_in_log(trans
, root
, name
, name_len
, dir
,
3954 btrfs_abort_transaction(trans
, ret
);
3957 * If we have a pending delayed iput we could end up with the final iput
3958 * being run in btrfs-cleaner context. If we have enough of these built
3959 * up we can end up burning a lot of time in btrfs-cleaner without any
3960 * way to throttle the unlinks. Since we're currently holding a ref on
3961 * the inode we can run the delayed iput here without any issues as the
3962 * final iput won't be done until after we drop the ref we're currently
3965 btrfs_run_delayed_iput(fs_info
, inode
);
3967 btrfs_free_path(path
);
3971 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name_len
* 2);
3972 inode_inc_iversion(&inode
->vfs_inode
);
3973 inode_inc_iversion(&dir
->vfs_inode
);
3974 inode
->vfs_inode
.i_ctime
= dir
->vfs_inode
.i_mtime
=
3975 dir
->vfs_inode
.i_ctime
= current_time(&inode
->vfs_inode
);
3976 ret
= btrfs_update_inode(trans
, root
, dir
);
3981 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
3982 struct btrfs_root
*root
,
3983 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
3984 const char *name
, int name_len
)
3987 ret
= __btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
3989 drop_nlink(&inode
->vfs_inode
);
3990 ret
= btrfs_update_inode(trans
, root
, inode
);
3996 * helper to start transaction for unlink and rmdir.
3998 * unlink and rmdir are special in btrfs, they do not always free space, so
3999 * if we cannot make our reservations the normal way try and see if there is
4000 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4001 * allow the unlink to occur.
4003 static struct btrfs_trans_handle
*__unlink_start_trans(struct inode
*dir
)
4005 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4008 * 1 for the possible orphan item
4009 * 1 for the dir item
4010 * 1 for the dir index
4011 * 1 for the inode ref
4014 return btrfs_start_transaction_fallback_global_rsv(root
, 5);
4017 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4019 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4020 struct btrfs_trans_handle
*trans
;
4021 struct inode
*inode
= d_inode(dentry
);
4024 trans
= __unlink_start_trans(dir
);
4026 return PTR_ERR(trans
);
4028 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4031 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
4032 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4033 dentry
->d_name
.len
);
4037 if (inode
->i_nlink
== 0) {
4038 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4044 btrfs_end_transaction(trans
);
4045 btrfs_btree_balance_dirty(root
->fs_info
);
4049 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4050 struct inode
*dir
, struct dentry
*dentry
)
4052 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4053 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4054 struct btrfs_path
*path
;
4055 struct extent_buffer
*leaf
;
4056 struct btrfs_dir_item
*di
;
4057 struct btrfs_key key
;
4058 const char *name
= dentry
->d_name
.name
;
4059 int name_len
= dentry
->d_name
.len
;
4063 u64 dir_ino
= btrfs_ino(BTRFS_I(dir
));
4065 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4066 objectid
= inode
->root
->root_key
.objectid
;
4067 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4068 objectid
= inode
->location
.objectid
;
4074 path
= btrfs_alloc_path();
4078 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4079 name
, name_len
, -1);
4080 if (IS_ERR_OR_NULL(di
)) {
4081 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4085 leaf
= path
->nodes
[0];
4086 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4087 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4088 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4090 btrfs_abort_transaction(trans
, ret
);
4093 btrfs_release_path(path
);
4096 * This is a placeholder inode for a subvolume we didn't have a
4097 * reference to at the time of the snapshot creation. In the meantime
4098 * we could have renamed the real subvol link into our snapshot, so
4099 * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
4100 * Instead simply lookup the dir_index_item for this entry so we can
4101 * remove it. Otherwise we know we have a ref to the root and we can
4102 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4104 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4105 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
,
4107 if (IS_ERR_OR_NULL(di
)) {
4112 btrfs_abort_transaction(trans
, ret
);
4116 leaf
= path
->nodes
[0];
4117 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4119 btrfs_release_path(path
);
4121 ret
= btrfs_del_root_ref(trans
, objectid
,
4122 root
->root_key
.objectid
, dir_ino
,
4123 &index
, name
, name_len
);
4125 btrfs_abort_transaction(trans
, ret
);
4130 ret
= btrfs_delete_delayed_dir_index(trans
, BTRFS_I(dir
), index
);
4132 btrfs_abort_transaction(trans
, ret
);
4136 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
- name_len
* 2);
4137 inode_inc_iversion(dir
);
4138 dir
->i_mtime
= dir
->i_ctime
= current_time(dir
);
4139 ret
= btrfs_update_inode_fallback(trans
, root
, BTRFS_I(dir
));
4141 btrfs_abort_transaction(trans
, ret
);
4143 btrfs_free_path(path
);
4148 * Helper to check if the subvolume references other subvolumes or if it's
4151 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4153 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4154 struct btrfs_path
*path
;
4155 struct btrfs_dir_item
*di
;
4156 struct btrfs_key key
;
4160 path
= btrfs_alloc_path();
4164 /* Make sure this root isn't set as the default subvol */
4165 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4166 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4167 dir_id
, "default", 7, 0);
4168 if (di
&& !IS_ERR(di
)) {
4169 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4170 if (key
.objectid
== root
->root_key
.objectid
) {
4173 "deleting default subvolume %llu is not allowed",
4177 btrfs_release_path(path
);
4180 key
.objectid
= root
->root_key
.objectid
;
4181 key
.type
= BTRFS_ROOT_REF_KEY
;
4182 key
.offset
= (u64
)-1;
4184 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4190 if (path
->slots
[0] > 0) {
4192 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4193 if (key
.objectid
== root
->root_key
.objectid
&&
4194 key
.type
== BTRFS_ROOT_REF_KEY
)
4198 btrfs_free_path(path
);
4202 /* Delete all dentries for inodes belonging to the root */
4203 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4205 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4206 struct rb_node
*node
;
4207 struct rb_node
*prev
;
4208 struct btrfs_inode
*entry
;
4209 struct inode
*inode
;
4212 if (!test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
4213 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4215 spin_lock(&root
->inode_lock
);
4217 node
= root
->inode_tree
.rb_node
;
4221 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4223 if (objectid
< btrfs_ino(entry
))
4224 node
= node
->rb_left
;
4225 else if (objectid
> btrfs_ino(entry
))
4226 node
= node
->rb_right
;
4232 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
4233 if (objectid
<= btrfs_ino(entry
)) {
4237 prev
= rb_next(prev
);
4241 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
4242 objectid
= btrfs_ino(entry
) + 1;
4243 inode
= igrab(&entry
->vfs_inode
);
4245 spin_unlock(&root
->inode_lock
);
4246 if (atomic_read(&inode
->i_count
) > 1)
4247 d_prune_aliases(inode
);
4249 * btrfs_drop_inode will have it removed from the inode
4250 * cache when its usage count hits zero.
4254 spin_lock(&root
->inode_lock
);
4258 if (cond_resched_lock(&root
->inode_lock
))
4261 node
= rb_next(node
);
4263 spin_unlock(&root
->inode_lock
);
4266 int btrfs_delete_subvolume(struct inode
*dir
, struct dentry
*dentry
)
4268 struct btrfs_fs_info
*fs_info
= btrfs_sb(dentry
->d_sb
);
4269 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4270 struct inode
*inode
= d_inode(dentry
);
4271 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4272 struct btrfs_trans_handle
*trans
;
4273 struct btrfs_block_rsv block_rsv
;
4278 * Don't allow to delete a subvolume with send in progress. This is
4279 * inside the inode lock so the error handling that has to drop the bit
4280 * again is not run concurrently.
4282 spin_lock(&dest
->root_item_lock
);
4283 if (dest
->send_in_progress
) {
4284 spin_unlock(&dest
->root_item_lock
);
4286 "attempt to delete subvolume %llu during send",
4287 dest
->root_key
.objectid
);
4290 root_flags
= btrfs_root_flags(&dest
->root_item
);
4291 btrfs_set_root_flags(&dest
->root_item
,
4292 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4293 spin_unlock(&dest
->root_item_lock
);
4295 down_write(&fs_info
->subvol_sem
);
4297 ret
= may_destroy_subvol(dest
);
4301 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4303 * One for dir inode,
4304 * two for dir entries,
4305 * two for root ref/backref.
4307 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4311 trans
= btrfs_start_transaction(root
, 0);
4312 if (IS_ERR(trans
)) {
4313 ret
= PTR_ERR(trans
);
4316 trans
->block_rsv
= &block_rsv
;
4317 trans
->bytes_reserved
= block_rsv
.size
;
4319 btrfs_record_snapshot_destroy(trans
, BTRFS_I(dir
));
4321 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4323 btrfs_abort_transaction(trans
, ret
);
4327 ret
= btrfs_record_root_in_trans(trans
, dest
);
4329 btrfs_abort_transaction(trans
, ret
);
4333 memset(&dest
->root_item
.drop_progress
, 0,
4334 sizeof(dest
->root_item
.drop_progress
));
4335 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4336 btrfs_set_root_refs(&dest
->root_item
, 0);
4338 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4339 ret
= btrfs_insert_orphan_item(trans
,
4341 dest
->root_key
.objectid
);
4343 btrfs_abort_transaction(trans
, ret
);
4348 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4349 BTRFS_UUID_KEY_SUBVOL
,
4350 dest
->root_key
.objectid
);
4351 if (ret
&& ret
!= -ENOENT
) {
4352 btrfs_abort_transaction(trans
, ret
);
4355 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4356 ret
= btrfs_uuid_tree_remove(trans
,
4357 dest
->root_item
.received_uuid
,
4358 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4359 dest
->root_key
.objectid
);
4360 if (ret
&& ret
!= -ENOENT
) {
4361 btrfs_abort_transaction(trans
, ret
);
4366 free_anon_bdev(dest
->anon_dev
);
4369 trans
->block_rsv
= NULL
;
4370 trans
->bytes_reserved
= 0;
4371 ret
= btrfs_end_transaction(trans
);
4372 inode
->i_flags
|= S_DEAD
;
4374 btrfs_subvolume_release_metadata(root
, &block_rsv
);
4376 up_write(&fs_info
->subvol_sem
);
4378 spin_lock(&dest
->root_item_lock
);
4379 root_flags
= btrfs_root_flags(&dest
->root_item
);
4380 btrfs_set_root_flags(&dest
->root_item
,
4381 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4382 spin_unlock(&dest
->root_item_lock
);
4384 d_invalidate(dentry
);
4385 btrfs_prune_dentries(dest
);
4386 ASSERT(dest
->send_in_progress
== 0);
4392 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4394 struct inode
*inode
= d_inode(dentry
);
4396 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4397 struct btrfs_trans_handle
*trans
;
4398 u64 last_unlink_trans
;
4400 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4402 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
)
4403 return btrfs_delete_subvolume(dir
, dentry
);
4405 trans
= __unlink_start_trans(dir
);
4407 return PTR_ERR(trans
);
4409 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4410 err
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4414 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4418 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4420 /* now the directory is empty */
4421 err
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
4422 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4423 dentry
->d_name
.len
);
4425 btrfs_i_size_write(BTRFS_I(inode
), 0);
4427 * Propagate the last_unlink_trans value of the deleted dir to
4428 * its parent directory. This is to prevent an unrecoverable
4429 * log tree in the case we do something like this:
4431 * 2) create snapshot under dir foo
4432 * 3) delete the snapshot
4435 * 6) fsync foo or some file inside foo
4437 if (last_unlink_trans
>= trans
->transid
)
4438 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4441 btrfs_end_transaction(trans
);
4442 btrfs_btree_balance_dirty(root
->fs_info
);
4448 * Return this if we need to call truncate_block for the last bit of the
4451 #define NEED_TRUNCATE_BLOCK 1
4454 * this can truncate away extent items, csum items and directory items.
4455 * It starts at a high offset and removes keys until it can't find
4456 * any higher than new_size
4458 * csum items that cross the new i_size are truncated to the new size
4461 * min_type is the minimum key type to truncate down to. If set to 0, this
4462 * will kill all the items on this inode, including the INODE_ITEM_KEY.
4464 int btrfs_truncate_inode_items(struct btrfs_trans_handle
*trans
,
4465 struct btrfs_root
*root
,
4466 struct btrfs_inode
*inode
,
4467 u64 new_size
, u32 min_type
)
4469 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4470 struct btrfs_path
*path
;
4471 struct extent_buffer
*leaf
;
4472 struct btrfs_file_extent_item
*fi
;
4473 struct btrfs_key key
;
4474 struct btrfs_key found_key
;
4475 u64 extent_start
= 0;
4476 u64 extent_num_bytes
= 0;
4477 u64 extent_offset
= 0;
4479 u64 last_size
= new_size
;
4480 u32 found_type
= (u8
)-1;
4483 int pending_del_nr
= 0;
4484 int pending_del_slot
= 0;
4485 int extent_type
= -1;
4487 u64 ino
= btrfs_ino(inode
);
4488 u64 bytes_deleted
= 0;
4489 bool be_nice
= false;
4490 bool should_throttle
= false;
4491 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
4492 struct extent_state
*cached_state
= NULL
;
4494 BUG_ON(new_size
> 0 && min_type
!= BTRFS_EXTENT_DATA_KEY
);
4497 * For non-free space inodes and non-shareable roots, we want to back
4498 * off from time to time. This means all inodes in subvolume roots,
4499 * reloc roots, and data reloc roots.
4501 if (!btrfs_is_free_space_inode(inode
) &&
4502 test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
))
4505 path
= btrfs_alloc_path();
4508 path
->reada
= READA_BACK
;
4510 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4511 lock_extent_bits(&inode
->io_tree
, lock_start
, (u64
)-1,
4515 * We want to drop from the next block forward in case this
4516 * new size is not block aligned since we will be keeping the
4517 * last block of the extent just the way it is.
4519 btrfs_drop_extent_cache(inode
, ALIGN(new_size
,
4520 fs_info
->sectorsize
),
4525 * This function is also used to drop the items in the log tree before
4526 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4527 * it is used to drop the logged items. So we shouldn't kill the delayed
4530 if (min_type
== 0 && root
== inode
->root
)
4531 btrfs_kill_delayed_inode_items(inode
);
4534 key
.offset
= (u64
)-1;
4539 * with a 16K leaf size and 128MB extents, you can actually queue
4540 * up a huge file in a single leaf. Most of the time that
4541 * bytes_deleted is > 0, it will be huge by the time we get here
4543 if (be_nice
&& bytes_deleted
> SZ_32M
&&
4544 btrfs_should_end_transaction(trans
)) {
4549 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
4555 /* there are no items in the tree for us to truncate, we're
4558 if (path
->slots
[0] == 0)
4564 u64 clear_start
= 0, clear_len
= 0;
4567 leaf
= path
->nodes
[0];
4568 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4569 found_type
= found_key
.type
;
4571 if (found_key
.objectid
!= ino
)
4574 if (found_type
< min_type
)
4577 item_end
= found_key
.offset
;
4578 if (found_type
== BTRFS_EXTENT_DATA_KEY
) {
4579 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4580 struct btrfs_file_extent_item
);
4581 extent_type
= btrfs_file_extent_type(leaf
, fi
);
4582 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
) {
4584 btrfs_file_extent_num_bytes(leaf
, fi
);
4586 trace_btrfs_truncate_show_fi_regular(
4587 inode
, leaf
, fi
, found_key
.offset
);
4588 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
4589 item_end
+= btrfs_file_extent_ram_bytes(leaf
,
4592 trace_btrfs_truncate_show_fi_inline(
4593 inode
, leaf
, fi
, path
->slots
[0],
4598 if (found_type
> min_type
) {
4601 if (item_end
< new_size
)
4603 if (found_key
.offset
>= new_size
)
4609 /* FIXME, shrink the extent if the ref count is only 1 */
4610 if (found_type
!= BTRFS_EXTENT_DATA_KEY
)
4613 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
) {
4616 clear_start
= found_key
.offset
;
4617 extent_start
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
4619 u64 orig_num_bytes
=
4620 btrfs_file_extent_num_bytes(leaf
, fi
);
4621 extent_num_bytes
= ALIGN(new_size
-
4623 fs_info
->sectorsize
);
4624 clear_start
= ALIGN(new_size
, fs_info
->sectorsize
);
4625 btrfs_set_file_extent_num_bytes(leaf
, fi
,
4627 num_dec
= (orig_num_bytes
-
4629 if (test_bit(BTRFS_ROOT_SHAREABLE
,
4632 inode_sub_bytes(&inode
->vfs_inode
,
4634 btrfs_mark_buffer_dirty(leaf
);
4637 btrfs_file_extent_disk_num_bytes(leaf
,
4639 extent_offset
= found_key
.offset
-
4640 btrfs_file_extent_offset(leaf
, fi
);
4642 /* FIXME blocksize != 4096 */
4643 num_dec
= btrfs_file_extent_num_bytes(leaf
, fi
);
4644 if (extent_start
!= 0) {
4646 if (test_bit(BTRFS_ROOT_SHAREABLE
,
4648 inode_sub_bytes(&inode
->vfs_inode
,
4652 clear_len
= num_dec
;
4653 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
4655 * we can't truncate inline items that have had
4659 btrfs_file_extent_encryption(leaf
, fi
) == 0 &&
4660 btrfs_file_extent_other_encoding(leaf
, fi
) == 0 &&
4661 btrfs_file_extent_compression(leaf
, fi
) == 0) {
4662 u32 size
= (u32
)(new_size
- found_key
.offset
);
4664 btrfs_set_file_extent_ram_bytes(leaf
, fi
, size
);
4665 size
= btrfs_file_extent_calc_inline_size(size
);
4666 btrfs_truncate_item(path
, size
, 1);
4667 } else if (!del_item
) {
4669 * We have to bail so the last_size is set to
4670 * just before this extent.
4672 ret
= NEED_TRUNCATE_BLOCK
;
4676 * Inline extents are special, we just treat
4677 * them as a full sector worth in the file
4678 * extent tree just for simplicity sake.
4680 clear_len
= fs_info
->sectorsize
;
4683 if (test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
))
4684 inode_sub_bytes(&inode
->vfs_inode
,
4685 item_end
+ 1 - new_size
);
4689 * We use btrfs_truncate_inode_items() to clean up log trees for
4690 * multiple fsyncs, and in this case we don't want to clear the
4691 * file extent range because it's just the log.
4693 if (root
== inode
->root
) {
4694 ret
= btrfs_inode_clear_file_extent_range(inode
,
4695 clear_start
, clear_len
);
4697 btrfs_abort_transaction(trans
, ret
);
4703 last_size
= found_key
.offset
;
4705 last_size
= new_size
;
4707 if (!pending_del_nr
) {
4708 /* no pending yet, add ourselves */
4709 pending_del_slot
= path
->slots
[0];
4711 } else if (pending_del_nr
&&
4712 path
->slots
[0] + 1 == pending_del_slot
) {
4713 /* hop on the pending chunk */
4715 pending_del_slot
= path
->slots
[0];
4722 should_throttle
= false;
4725 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4726 struct btrfs_ref ref
= { 0 };
4728 bytes_deleted
+= extent_num_bytes
;
4730 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
,
4731 extent_start
, extent_num_bytes
, 0);
4732 ref
.real_root
= root
->root_key
.objectid
;
4733 btrfs_init_data_ref(&ref
, btrfs_header_owner(leaf
),
4734 ino
, extent_offset
);
4735 ret
= btrfs_free_extent(trans
, &ref
);
4737 btrfs_abort_transaction(trans
, ret
);
4741 if (btrfs_should_throttle_delayed_refs(trans
))
4742 should_throttle
= true;
4746 if (found_type
== BTRFS_INODE_ITEM_KEY
)
4749 if (path
->slots
[0] == 0 ||
4750 path
->slots
[0] != pending_del_slot
||
4752 if (pending_del_nr
) {
4753 ret
= btrfs_del_items(trans
, root
, path
,
4757 btrfs_abort_transaction(trans
, ret
);
4762 btrfs_release_path(path
);
4765 * We can generate a lot of delayed refs, so we need to
4766 * throttle every once and a while and make sure we're
4767 * adding enough space to keep up with the work we are
4768 * generating. Since we hold a transaction here we
4769 * can't flush, and we don't want to FLUSH_LIMIT because
4770 * we could have generated too many delayed refs to
4771 * actually allocate, so just bail if we're short and
4772 * let the normal reservation dance happen higher up.
4774 if (should_throttle
) {
4775 ret
= btrfs_delayed_refs_rsv_refill(fs_info
,
4776 BTRFS_RESERVE_NO_FLUSH
);
4788 if (ret
>= 0 && pending_del_nr
) {
4791 err
= btrfs_del_items(trans
, root
, path
, pending_del_slot
,
4794 btrfs_abort_transaction(trans
, err
);
4798 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4799 ASSERT(last_size
>= new_size
);
4800 if (!ret
&& last_size
> new_size
)
4801 last_size
= new_size
;
4802 btrfs_inode_safe_disk_i_size_write(inode
, last_size
);
4803 unlock_extent_cached(&inode
->io_tree
, lock_start
, (u64
)-1,
4807 btrfs_free_path(path
);
4812 * btrfs_truncate_block - read, zero a chunk and write a block
4813 * @inode - inode that we're zeroing
4814 * @from - the offset to start zeroing
4815 * @len - the length to zero, 0 to zero the entire range respective to the
4817 * @front - zero up to the offset instead of from the offset on
4819 * This will find the block for the "from" offset and cow the block and zero the
4820 * part we want to zero. This is used with truncate and hole punching.
4822 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4825 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4826 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4827 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4828 struct btrfs_ordered_extent
*ordered
;
4829 struct extent_state
*cached_state
= NULL
;
4830 struct extent_changeset
*data_reserved
= NULL
;
4831 bool only_release_metadata
= false;
4832 u32 blocksize
= fs_info
->sectorsize
;
4833 pgoff_t index
= from
>> PAGE_SHIFT
;
4834 unsigned offset
= from
& (blocksize
- 1);
4836 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4837 size_t write_bytes
= blocksize
;
4842 if (IS_ALIGNED(offset
, blocksize
) &&
4843 (!len
|| IS_ALIGNED(len
, blocksize
)))
4846 block_start
= round_down(from
, blocksize
);
4847 block_end
= block_start
+ blocksize
- 1;
4849 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4852 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
) > 0) {
4853 /* For nocow case, no need to reserve data space */
4854 only_release_metadata
= true;
4859 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
);
4861 if (!only_release_metadata
)
4862 btrfs_free_reserved_data_space(inode
, data_reserved
,
4863 block_start
, blocksize
);
4867 page
= find_or_create_page(mapping
, index
, mask
);
4869 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4871 btrfs_delalloc_release_extents(inode
, blocksize
);
4875 ret
= set_page_extent_mapped(page
);
4879 if (!PageUptodate(page
)) {
4880 ret
= btrfs_readpage(NULL
, page
);
4882 if (page
->mapping
!= mapping
) {
4887 if (!PageUptodate(page
)) {
4892 wait_on_page_writeback(page
);
4894 lock_extent_bits(io_tree
, block_start
, block_end
, &cached_state
);
4896 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4898 unlock_extent_cached(io_tree
, block_start
, block_end
,
4902 btrfs_start_ordered_extent(ordered
, 1);
4903 btrfs_put_ordered_extent(ordered
);
4907 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4908 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4909 0, 0, &cached_state
);
4911 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4914 unlock_extent_cached(io_tree
, block_start
, block_end
,
4919 if (offset
!= blocksize
) {
4921 len
= blocksize
- offset
;
4923 memzero_page(page
, (block_start
- page_offset(page
)),
4926 memzero_page(page
, (block_start
- page_offset(page
)) + offset
,
4928 flush_dcache_page(page
);
4930 ClearPageChecked(page
);
4931 set_page_dirty(page
);
4932 unlock_extent_cached(io_tree
, block_start
, block_end
, &cached_state
);
4934 if (only_release_metadata
)
4935 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4936 EXTENT_NORESERVE
, 0, NULL
, NULL
, GFP_NOFS
, NULL
);
4940 if (only_release_metadata
)
4941 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4943 btrfs_delalloc_release_space(inode
, data_reserved
,
4944 block_start
, blocksize
, true);
4946 btrfs_delalloc_release_extents(inode
, blocksize
);
4950 if (only_release_metadata
)
4951 btrfs_check_nocow_unlock(inode
);
4952 extent_changeset_free(data_reserved
);
4956 static int maybe_insert_hole(struct btrfs_root
*root
, struct btrfs_inode
*inode
,
4957 u64 offset
, u64 len
)
4959 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4960 struct btrfs_trans_handle
*trans
;
4961 struct btrfs_drop_extents_args drop_args
= { 0 };
4965 * Still need to make sure the inode looks like it's been updated so
4966 * that any holes get logged if we fsync.
4968 if (btrfs_fs_incompat(fs_info
, NO_HOLES
)) {
4969 inode
->last_trans
= fs_info
->generation
;
4970 inode
->last_sub_trans
= root
->log_transid
;
4971 inode
->last_log_commit
= root
->last_log_commit
;
4976 * 1 - for the one we're dropping
4977 * 1 - for the one we're adding
4978 * 1 - for updating the inode.
4980 trans
= btrfs_start_transaction(root
, 3);
4982 return PTR_ERR(trans
);
4984 drop_args
.start
= offset
;
4985 drop_args
.end
= offset
+ len
;
4986 drop_args
.drop_cache
= true;
4988 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4990 btrfs_abort_transaction(trans
, ret
);
4991 btrfs_end_transaction(trans
);
4995 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
),
4996 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
4998 btrfs_abort_transaction(trans
, ret
);
5000 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
5001 btrfs_update_inode(trans
, root
, inode
);
5003 btrfs_end_transaction(trans
);
5008 * This function puts in dummy file extents for the area we're creating a hole
5009 * for. So if we are truncating this file to a larger size we need to insert
5010 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5011 * the range between oldsize and size
5013 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
5015 struct btrfs_root
*root
= inode
->root
;
5016 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5017 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
5018 struct extent_map
*em
= NULL
;
5019 struct extent_state
*cached_state
= NULL
;
5020 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
5021 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
5022 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
5029 * If our size started in the middle of a block we need to zero out the
5030 * rest of the block before we expand the i_size, otherwise we could
5031 * expose stale data.
5033 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
5037 if (size
<= hole_start
)
5040 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
5042 cur_offset
= hole_start
;
5044 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
5045 block_end
- cur_offset
);
5051 last_byte
= min(extent_map_end(em
), block_end
);
5052 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
5053 hole_size
= last_byte
- cur_offset
;
5055 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
5056 struct extent_map
*hole_em
;
5058 err
= maybe_insert_hole(root
, inode
, cur_offset
,
5063 err
= btrfs_inode_set_file_extent_range(inode
,
5064 cur_offset
, hole_size
);
5068 btrfs_drop_extent_cache(inode
, cur_offset
,
5069 cur_offset
+ hole_size
- 1, 0);
5070 hole_em
= alloc_extent_map();
5072 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5073 &inode
->runtime_flags
);
5076 hole_em
->start
= cur_offset
;
5077 hole_em
->len
= hole_size
;
5078 hole_em
->orig_start
= cur_offset
;
5080 hole_em
->block_start
= EXTENT_MAP_HOLE
;
5081 hole_em
->block_len
= 0;
5082 hole_em
->orig_block_len
= 0;
5083 hole_em
->ram_bytes
= hole_size
;
5084 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
5085 hole_em
->generation
= fs_info
->generation
;
5088 write_lock(&em_tree
->lock
);
5089 err
= add_extent_mapping(em_tree
, hole_em
, 1);
5090 write_unlock(&em_tree
->lock
);
5093 btrfs_drop_extent_cache(inode
, cur_offset
,
5097 free_extent_map(hole_em
);
5099 err
= btrfs_inode_set_file_extent_range(inode
,
5100 cur_offset
, hole_size
);
5105 free_extent_map(em
);
5107 cur_offset
= last_byte
;
5108 if (cur_offset
>= block_end
)
5111 free_extent_map(em
);
5112 unlock_extent_cached(io_tree
, hole_start
, block_end
- 1, &cached_state
);
5116 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
5118 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5119 struct btrfs_trans_handle
*trans
;
5120 loff_t oldsize
= i_size_read(inode
);
5121 loff_t newsize
= attr
->ia_size
;
5122 int mask
= attr
->ia_valid
;
5126 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5127 * special case where we need to update the times despite not having
5128 * these flags set. For all other operations the VFS set these flags
5129 * explicitly if it wants a timestamp update.
5131 if (newsize
!= oldsize
) {
5132 inode_inc_iversion(inode
);
5133 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
)))
5134 inode
->i_ctime
= inode
->i_mtime
=
5135 current_time(inode
);
5138 if (newsize
> oldsize
) {
5140 * Don't do an expanding truncate while snapshotting is ongoing.
5141 * This is to ensure the snapshot captures a fully consistent
5142 * state of this file - if the snapshot captures this expanding
5143 * truncation, it must capture all writes that happened before
5146 btrfs_drew_write_lock(&root
->snapshot_lock
);
5147 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
5149 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5153 trans
= btrfs_start_transaction(root
, 1);
5154 if (IS_ERR(trans
)) {
5155 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5156 return PTR_ERR(trans
);
5159 i_size_write(inode
, newsize
);
5160 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
5161 pagecache_isize_extended(inode
, oldsize
, newsize
);
5162 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
5163 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5164 btrfs_end_transaction(trans
);
5166 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5168 if (btrfs_is_zoned(fs_info
)) {
5169 ret
= btrfs_wait_ordered_range(inode
,
5170 ALIGN(newsize
, fs_info
->sectorsize
),
5177 * We're truncating a file that used to have good data down to
5178 * zero. Make sure any new writes to the file get on disk
5182 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5183 &BTRFS_I(inode
)->runtime_flags
);
5185 truncate_setsize(inode
, newsize
);
5187 inode_dio_wait(inode
);
5189 ret
= btrfs_truncate(inode
, newsize
== oldsize
);
5190 if (ret
&& inode
->i_nlink
) {
5194 * Truncate failed, so fix up the in-memory size. We
5195 * adjusted disk_i_size down as we removed extents, so
5196 * wait for disk_i_size to be stable and then update the
5197 * in-memory size to match.
5199 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5202 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5209 static int btrfs_setattr(struct user_namespace
*mnt_userns
, struct dentry
*dentry
,
5212 struct inode
*inode
= d_inode(dentry
);
5213 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5216 if (btrfs_root_readonly(root
))
5219 err
= setattr_prepare(&init_user_ns
, dentry
, attr
);
5223 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5224 err
= btrfs_setsize(inode
, attr
);
5229 if (attr
->ia_valid
) {
5230 setattr_copy(&init_user_ns
, inode
, attr
);
5231 inode_inc_iversion(inode
);
5232 err
= btrfs_dirty_inode(inode
);
5234 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5235 err
= posix_acl_chmod(&init_user_ns
, inode
,
5243 * While truncating the inode pages during eviction, we get the VFS calling
5244 * btrfs_invalidatepage() against each page of the inode. This is slow because
5245 * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5246 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5247 * extent_state structures over and over, wasting lots of time.
5249 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5250 * those expensive operations on a per page basis and do only the ordered io
5251 * finishing, while we release here the extent_map and extent_state structures,
5252 * without the excessive merging and splitting.
5254 static void evict_inode_truncate_pages(struct inode
*inode
)
5256 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5257 struct extent_map_tree
*map_tree
= &BTRFS_I(inode
)->extent_tree
;
5258 struct rb_node
*node
;
5260 ASSERT(inode
->i_state
& I_FREEING
);
5261 truncate_inode_pages_final(&inode
->i_data
);
5263 write_lock(&map_tree
->lock
);
5264 while (!RB_EMPTY_ROOT(&map_tree
->map
.rb_root
)) {
5265 struct extent_map
*em
;
5267 node
= rb_first_cached(&map_tree
->map
);
5268 em
= rb_entry(node
, struct extent_map
, rb_node
);
5269 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
5270 clear_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
5271 remove_extent_mapping(map_tree
, em
);
5272 free_extent_map(em
);
5273 if (need_resched()) {
5274 write_unlock(&map_tree
->lock
);
5276 write_lock(&map_tree
->lock
);
5279 write_unlock(&map_tree
->lock
);
5282 * Keep looping until we have no more ranges in the io tree.
5283 * We can have ongoing bios started by readahead that have
5284 * their endio callback (extent_io.c:end_bio_extent_readpage)
5285 * still in progress (unlocked the pages in the bio but did not yet
5286 * unlocked the ranges in the io tree). Therefore this means some
5287 * ranges can still be locked and eviction started because before
5288 * submitting those bios, which are executed by a separate task (work
5289 * queue kthread), inode references (inode->i_count) were not taken
5290 * (which would be dropped in the end io callback of each bio).
5291 * Therefore here we effectively end up waiting for those bios and
5292 * anyone else holding locked ranges without having bumped the inode's
5293 * reference count - if we don't do it, when they access the inode's
5294 * io_tree to unlock a range it may be too late, leading to an
5295 * use-after-free issue.
5297 spin_lock(&io_tree
->lock
);
5298 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5299 struct extent_state
*state
;
5300 struct extent_state
*cached_state
= NULL
;
5303 unsigned state_flags
;
5305 node
= rb_first(&io_tree
->state
);
5306 state
= rb_entry(node
, struct extent_state
, rb_node
);
5307 start
= state
->start
;
5309 state_flags
= state
->state
;
5310 spin_unlock(&io_tree
->lock
);
5312 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
5315 * If still has DELALLOC flag, the extent didn't reach disk,
5316 * and its reserved space won't be freed by delayed_ref.
5317 * So we need to free its reserved space here.
5318 * (Refer to comment in btrfs_invalidatepage, case 2)
5320 * Note, end is the bytenr of last byte, so we need + 1 here.
5322 if (state_flags
& EXTENT_DELALLOC
)
5323 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5326 clear_extent_bit(io_tree
, start
, end
,
5327 EXTENT_LOCKED
| EXTENT_DELALLOC
|
5328 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 1, 1,
5332 spin_lock(&io_tree
->lock
);
5334 spin_unlock(&io_tree
->lock
);
5337 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5338 struct btrfs_block_rsv
*rsv
)
5340 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5341 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
5342 struct btrfs_trans_handle
*trans
;
5343 u64 delayed_refs_extra
= btrfs_calc_insert_metadata_size(fs_info
, 1);
5347 * Eviction should be taking place at some place safe because of our
5348 * delayed iputs. However the normal flushing code will run delayed
5349 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5351 * We reserve the delayed_refs_extra here again because we can't use
5352 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5353 * above. We reserve our extra bit here because we generate a ton of
5354 * delayed refs activity by truncating.
5356 * If we cannot make our reservation we'll attempt to steal from the
5357 * global reserve, because we really want to be able to free up space.
5359 ret
= btrfs_block_rsv_refill(root
, rsv
, rsv
->size
+ delayed_refs_extra
,
5360 BTRFS_RESERVE_FLUSH_EVICT
);
5363 * Try to steal from the global reserve if there is space for
5366 if (btrfs_check_space_for_delayed_refs(fs_info
) ||
5367 btrfs_block_rsv_migrate(global_rsv
, rsv
, rsv
->size
, 0)) {
5369 "could not allocate space for delete; will truncate on mount");
5370 return ERR_PTR(-ENOSPC
);
5372 delayed_refs_extra
= 0;
5375 trans
= btrfs_join_transaction(root
);
5379 if (delayed_refs_extra
) {
5380 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5381 trans
->bytes_reserved
= delayed_refs_extra
;
5382 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5383 delayed_refs_extra
, 1);
5388 void btrfs_evict_inode(struct inode
*inode
)
5390 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5391 struct btrfs_trans_handle
*trans
;
5392 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5393 struct btrfs_block_rsv
*rsv
;
5396 trace_btrfs_inode_evict(inode
);
5403 evict_inode_truncate_pages(inode
);
5405 if (inode
->i_nlink
&&
5406 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5407 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5408 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5411 if (is_bad_inode(inode
))
5414 btrfs_free_io_failure_record(BTRFS_I(inode
), 0, (u64
)-1);
5416 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5419 if (inode
->i_nlink
> 0) {
5420 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5421 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5425 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5429 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5432 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5435 btrfs_i_size_write(BTRFS_I(inode
), 0);
5438 trans
= evict_refill_and_join(root
, rsv
);
5442 trans
->block_rsv
= rsv
;
5444 ret
= btrfs_truncate_inode_items(trans
, root
, BTRFS_I(inode
),
5446 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5447 btrfs_end_transaction(trans
);
5448 btrfs_btree_balance_dirty(fs_info
);
5449 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5456 * Errors here aren't a big deal, it just means we leave orphan items in
5457 * the tree. They will be cleaned up on the next mount. If the inode
5458 * number gets reused, cleanup deletes the orphan item without doing
5459 * anything, and unlink reuses the existing orphan item.
5461 * If it turns out that we are dropping too many of these, we might want
5462 * to add a mechanism for retrying these after a commit.
5464 trans
= evict_refill_and_join(root
, rsv
);
5465 if (!IS_ERR(trans
)) {
5466 trans
->block_rsv
= rsv
;
5467 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5468 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5469 btrfs_end_transaction(trans
);
5473 btrfs_free_block_rsv(fs_info
, rsv
);
5476 * If we didn't successfully delete, the orphan item will still be in
5477 * the tree and we'll retry on the next mount. Again, we might also want
5478 * to retry these periodically in the future.
5480 btrfs_remove_delayed_node(BTRFS_I(inode
));
5485 * Return the key found in the dir entry in the location pointer, fill @type
5486 * with BTRFS_FT_*, and return 0.
5488 * If no dir entries were found, returns -ENOENT.
5489 * If found a corrupted location in dir entry, returns -EUCLEAN.
5491 static int btrfs_inode_by_name(struct inode
*dir
, struct dentry
*dentry
,
5492 struct btrfs_key
*location
, u8
*type
)
5494 const char *name
= dentry
->d_name
.name
;
5495 int namelen
= dentry
->d_name
.len
;
5496 struct btrfs_dir_item
*di
;
5497 struct btrfs_path
*path
;
5498 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5501 path
= btrfs_alloc_path();
5505 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(BTRFS_I(dir
)),
5507 if (IS_ERR_OR_NULL(di
)) {
5508 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5512 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5513 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5514 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5516 btrfs_warn(root
->fs_info
,
5517 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5518 __func__
, name
, btrfs_ino(BTRFS_I(dir
)),
5519 location
->objectid
, location
->type
, location
->offset
);
5522 *type
= btrfs_dir_type(path
->nodes
[0], di
);
5524 btrfs_free_path(path
);
5529 * when we hit a tree root in a directory, the btrfs part of the inode
5530 * needs to be changed to reflect the root directory of the tree root. This
5531 * is kind of like crossing a mount point.
5533 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5535 struct dentry
*dentry
,
5536 struct btrfs_key
*location
,
5537 struct btrfs_root
**sub_root
)
5539 struct btrfs_path
*path
;
5540 struct btrfs_root
*new_root
;
5541 struct btrfs_root_ref
*ref
;
5542 struct extent_buffer
*leaf
;
5543 struct btrfs_key key
;
5547 path
= btrfs_alloc_path();
5554 key
.objectid
= BTRFS_I(dir
)->root
->root_key
.objectid
;
5555 key
.type
= BTRFS_ROOT_REF_KEY
;
5556 key
.offset
= location
->objectid
;
5558 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5565 leaf
= path
->nodes
[0];
5566 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5567 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(BTRFS_I(dir
)) ||
5568 btrfs_root_ref_name_len(leaf
, ref
) != dentry
->d_name
.len
)
5571 ret
= memcmp_extent_buffer(leaf
, dentry
->d_name
.name
,
5572 (unsigned long)(ref
+ 1),
5573 dentry
->d_name
.len
);
5577 btrfs_release_path(path
);
5579 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5580 if (IS_ERR(new_root
)) {
5581 err
= PTR_ERR(new_root
);
5585 *sub_root
= new_root
;
5586 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5587 location
->type
= BTRFS_INODE_ITEM_KEY
;
5588 location
->offset
= 0;
5591 btrfs_free_path(path
);
5595 static void inode_tree_add(struct inode
*inode
)
5597 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5598 struct btrfs_inode
*entry
;
5600 struct rb_node
*parent
;
5601 struct rb_node
*new = &BTRFS_I(inode
)->rb_node
;
5602 u64 ino
= btrfs_ino(BTRFS_I(inode
));
5604 if (inode_unhashed(inode
))
5607 spin_lock(&root
->inode_lock
);
5608 p
= &root
->inode_tree
.rb_node
;
5611 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5613 if (ino
< btrfs_ino(entry
))
5614 p
= &parent
->rb_left
;
5615 else if (ino
> btrfs_ino(entry
))
5616 p
= &parent
->rb_right
;
5618 WARN_ON(!(entry
->vfs_inode
.i_state
&
5619 (I_WILL_FREE
| I_FREEING
)));
5620 rb_replace_node(parent
, new, &root
->inode_tree
);
5621 RB_CLEAR_NODE(parent
);
5622 spin_unlock(&root
->inode_lock
);
5626 rb_link_node(new, parent
, p
);
5627 rb_insert_color(new, &root
->inode_tree
);
5628 spin_unlock(&root
->inode_lock
);
5631 static void inode_tree_del(struct btrfs_inode
*inode
)
5633 struct btrfs_root
*root
= inode
->root
;
5636 spin_lock(&root
->inode_lock
);
5637 if (!RB_EMPTY_NODE(&inode
->rb_node
)) {
5638 rb_erase(&inode
->rb_node
, &root
->inode_tree
);
5639 RB_CLEAR_NODE(&inode
->rb_node
);
5640 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5642 spin_unlock(&root
->inode_lock
);
5644 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5645 spin_lock(&root
->inode_lock
);
5646 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5647 spin_unlock(&root
->inode_lock
);
5649 btrfs_add_dead_root(root
);
5654 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5656 struct btrfs_iget_args
*args
= p
;
5658 inode
->i_ino
= args
->ino
;
5659 BTRFS_I(inode
)->location
.objectid
= args
->ino
;
5660 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5661 BTRFS_I(inode
)->location
.offset
= 0;
5662 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5663 BUG_ON(args
->root
&& !BTRFS_I(inode
)->root
);
5667 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5669 struct btrfs_iget_args
*args
= opaque
;
5671 return args
->ino
== BTRFS_I(inode
)->location
.objectid
&&
5672 args
->root
== BTRFS_I(inode
)->root
;
5675 static struct inode
*btrfs_iget_locked(struct super_block
*s
, u64 ino
,
5676 struct btrfs_root
*root
)
5678 struct inode
*inode
;
5679 struct btrfs_iget_args args
;
5680 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5685 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5686 btrfs_init_locked_inode
,
5692 * Get an inode object given its inode number and corresponding root.
5693 * Path can be preallocated to prevent recursing back to iget through
5694 * allocator. NULL is also valid but may require an additional allocation
5697 struct inode
*btrfs_iget_path(struct super_block
*s
, u64 ino
,
5698 struct btrfs_root
*root
, struct btrfs_path
*path
)
5700 struct inode
*inode
;
5702 inode
= btrfs_iget_locked(s
, ino
, root
);
5704 return ERR_PTR(-ENOMEM
);
5706 if (inode
->i_state
& I_NEW
) {
5709 ret
= btrfs_read_locked_inode(inode
, path
);
5711 inode_tree_add(inode
);
5712 unlock_new_inode(inode
);
5716 * ret > 0 can come from btrfs_search_slot called by
5717 * btrfs_read_locked_inode, this means the inode item
5722 inode
= ERR_PTR(ret
);
5729 struct inode
*btrfs_iget(struct super_block
*s
, u64 ino
, struct btrfs_root
*root
)
5731 return btrfs_iget_path(s
, ino
, root
, NULL
);
5734 static struct inode
*new_simple_dir(struct super_block
*s
,
5735 struct btrfs_key
*key
,
5736 struct btrfs_root
*root
)
5738 struct inode
*inode
= new_inode(s
);
5741 return ERR_PTR(-ENOMEM
);
5743 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5744 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5745 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5747 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5749 * We only need lookup, the rest is read-only and there's no inode
5750 * associated with the dentry
5752 inode
->i_op
= &simple_dir_inode_operations
;
5753 inode
->i_opflags
&= ~IOP_XATTR
;
5754 inode
->i_fop
= &simple_dir_operations
;
5755 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5756 inode
->i_mtime
= current_time(inode
);
5757 inode
->i_atime
= inode
->i_mtime
;
5758 inode
->i_ctime
= inode
->i_mtime
;
5759 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
5764 static inline u8
btrfs_inode_type(struct inode
*inode
)
5767 * Compile-time asserts that generic FT_* types still match
5770 BUILD_BUG_ON(BTRFS_FT_UNKNOWN
!= FT_UNKNOWN
);
5771 BUILD_BUG_ON(BTRFS_FT_REG_FILE
!= FT_REG_FILE
);
5772 BUILD_BUG_ON(BTRFS_FT_DIR
!= FT_DIR
);
5773 BUILD_BUG_ON(BTRFS_FT_CHRDEV
!= FT_CHRDEV
);
5774 BUILD_BUG_ON(BTRFS_FT_BLKDEV
!= FT_BLKDEV
);
5775 BUILD_BUG_ON(BTRFS_FT_FIFO
!= FT_FIFO
);
5776 BUILD_BUG_ON(BTRFS_FT_SOCK
!= FT_SOCK
);
5777 BUILD_BUG_ON(BTRFS_FT_SYMLINK
!= FT_SYMLINK
);
5779 return fs_umode_to_ftype(inode
->i_mode
);
5782 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5784 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5785 struct inode
*inode
;
5786 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5787 struct btrfs_root
*sub_root
= root
;
5788 struct btrfs_key location
;
5792 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5793 return ERR_PTR(-ENAMETOOLONG
);
5795 ret
= btrfs_inode_by_name(dir
, dentry
, &location
, &di_type
);
5797 return ERR_PTR(ret
);
5799 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5800 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, root
);
5804 /* Do extra check against inode mode with di_type */
5805 if (btrfs_inode_type(inode
) != di_type
) {
5807 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5808 inode
->i_mode
, btrfs_inode_type(inode
),
5811 return ERR_PTR(-EUCLEAN
);
5816 ret
= fixup_tree_root_location(fs_info
, dir
, dentry
,
5817 &location
, &sub_root
);
5820 inode
= ERR_PTR(ret
);
5822 inode
= new_simple_dir(dir
->i_sb
, &location
, sub_root
);
5824 inode
= btrfs_iget(dir
->i_sb
, location
.objectid
, sub_root
);
5826 if (root
!= sub_root
)
5827 btrfs_put_root(sub_root
);
5829 if (!IS_ERR(inode
) && root
!= sub_root
) {
5830 down_read(&fs_info
->cleanup_work_sem
);
5831 if (!sb_rdonly(inode
->i_sb
))
5832 ret
= btrfs_orphan_cleanup(sub_root
);
5833 up_read(&fs_info
->cleanup_work_sem
);
5836 inode
= ERR_PTR(ret
);
5843 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5845 struct btrfs_root
*root
;
5846 struct inode
*inode
= d_inode(dentry
);
5848 if (!inode
&& !IS_ROOT(dentry
))
5849 inode
= d_inode(dentry
->d_parent
);
5852 root
= BTRFS_I(inode
)->root
;
5853 if (btrfs_root_refs(&root
->root_item
) == 0)
5856 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5862 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5865 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5867 if (inode
== ERR_PTR(-ENOENT
))
5869 return d_splice_alias(inode
, dentry
);
5873 * All this infrastructure exists because dir_emit can fault, and we are holding
5874 * the tree lock when doing readdir. For now just allocate a buffer and copy
5875 * our information into that, and then dir_emit from the buffer. This is
5876 * similar to what NFS does, only we don't keep the buffer around in pagecache
5877 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5878 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5881 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5883 struct btrfs_file_private
*private;
5885 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5888 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5889 if (!private->filldir_buf
) {
5893 file
->private_data
= private;
5904 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5907 struct dir_entry
*entry
= addr
;
5908 char *name
= (char *)(entry
+ 1);
5910 ctx
->pos
= get_unaligned(&entry
->offset
);
5911 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5912 get_unaligned(&entry
->ino
),
5913 get_unaligned(&entry
->type
)))
5915 addr
+= sizeof(struct dir_entry
) +
5916 get_unaligned(&entry
->name_len
);
5922 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5924 struct inode
*inode
= file_inode(file
);
5925 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5926 struct btrfs_file_private
*private = file
->private_data
;
5927 struct btrfs_dir_item
*di
;
5928 struct btrfs_key key
;
5929 struct btrfs_key found_key
;
5930 struct btrfs_path
*path
;
5932 struct list_head ins_list
;
5933 struct list_head del_list
;
5935 struct extent_buffer
*leaf
;
5942 struct btrfs_key location
;
5944 if (!dir_emit_dots(file
, ctx
))
5947 path
= btrfs_alloc_path();
5951 addr
= private->filldir_buf
;
5952 path
->reada
= READA_FORWARD
;
5954 INIT_LIST_HEAD(&ins_list
);
5955 INIT_LIST_HEAD(&del_list
);
5956 put
= btrfs_readdir_get_delayed_items(inode
, &ins_list
, &del_list
);
5959 key
.type
= BTRFS_DIR_INDEX_KEY
;
5960 key
.offset
= ctx
->pos
;
5961 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5963 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5968 struct dir_entry
*entry
;
5970 leaf
= path
->nodes
[0];
5971 slot
= path
->slots
[0];
5972 if (slot
>= btrfs_header_nritems(leaf
)) {
5973 ret
= btrfs_next_leaf(root
, path
);
5981 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5983 if (found_key
.objectid
!= key
.objectid
)
5985 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5987 if (found_key
.offset
< ctx
->pos
)
5989 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5991 di
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dir_item
);
5992 name_len
= btrfs_dir_name_len(leaf
, di
);
5993 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5995 btrfs_release_path(path
);
5996 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5999 addr
= private->filldir_buf
;
6006 put_unaligned(name_len
, &entry
->name_len
);
6007 name_ptr
= (char *)(entry
+ 1);
6008 read_extent_buffer(leaf
, name_ptr
, (unsigned long)(di
+ 1),
6010 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf
, di
)),
6012 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
6013 put_unaligned(location
.objectid
, &entry
->ino
);
6014 put_unaligned(found_key
.offset
, &entry
->offset
);
6016 addr
+= sizeof(struct dir_entry
) + name_len
;
6017 total_len
+= sizeof(struct dir_entry
) + name_len
;
6021 btrfs_release_path(path
);
6023 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
6027 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
6032 * Stop new entries from being returned after we return the last
6035 * New directory entries are assigned a strictly increasing
6036 * offset. This means that new entries created during readdir
6037 * are *guaranteed* to be seen in the future by that readdir.
6038 * This has broken buggy programs which operate on names as
6039 * they're returned by readdir. Until we re-use freed offsets
6040 * we have this hack to stop new entries from being returned
6041 * under the assumption that they'll never reach this huge
6044 * This is being careful not to overflow 32bit loff_t unless the
6045 * last entry requires it because doing so has broken 32bit apps
6048 if (ctx
->pos
>= INT_MAX
)
6049 ctx
->pos
= LLONG_MAX
;
6056 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
6057 btrfs_free_path(path
);
6062 * This is somewhat expensive, updating the tree every time the
6063 * inode changes. But, it is most likely to find the inode in cache.
6064 * FIXME, needs more benchmarking...there are no reasons other than performance
6065 * to keep or drop this code.
6067 static int btrfs_dirty_inode(struct inode
*inode
)
6069 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6070 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6071 struct btrfs_trans_handle
*trans
;
6074 if (test_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
))
6077 trans
= btrfs_join_transaction(root
);
6079 return PTR_ERR(trans
);
6081 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6082 if (ret
&& (ret
== -ENOSPC
|| ret
== -EDQUOT
)) {
6083 /* whoops, lets try again with the full transaction */
6084 btrfs_end_transaction(trans
);
6085 trans
= btrfs_start_transaction(root
, 1);
6087 return PTR_ERR(trans
);
6089 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6091 btrfs_end_transaction(trans
);
6092 if (BTRFS_I(inode
)->delayed_node
)
6093 btrfs_balance_delayed_items(fs_info
);
6099 * This is a copy of file_update_time. We need this so we can return error on
6100 * ENOSPC for updating the inode in the case of file write and mmap writes.
6102 static int btrfs_update_time(struct inode
*inode
, struct timespec64
*now
,
6105 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6106 bool dirty
= flags
& ~S_VERSION
;
6108 if (btrfs_root_readonly(root
))
6111 if (flags
& S_VERSION
)
6112 dirty
|= inode_maybe_inc_iversion(inode
, dirty
);
6113 if (flags
& S_CTIME
)
6114 inode
->i_ctime
= *now
;
6115 if (flags
& S_MTIME
)
6116 inode
->i_mtime
= *now
;
6117 if (flags
& S_ATIME
)
6118 inode
->i_atime
= *now
;
6119 return dirty
? btrfs_dirty_inode(inode
) : 0;
6123 * find the highest existing sequence number in a directory
6124 * and then set the in-memory index_cnt variable to reflect
6125 * free sequence numbers
6127 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
6129 struct btrfs_root
*root
= inode
->root
;
6130 struct btrfs_key key
, found_key
;
6131 struct btrfs_path
*path
;
6132 struct extent_buffer
*leaf
;
6135 key
.objectid
= btrfs_ino(inode
);
6136 key
.type
= BTRFS_DIR_INDEX_KEY
;
6137 key
.offset
= (u64
)-1;
6139 path
= btrfs_alloc_path();
6143 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
6146 /* FIXME: we should be able to handle this */
6152 * MAGIC NUMBER EXPLANATION:
6153 * since we search a directory based on f_pos we have to start at 2
6154 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6155 * else has to start at 2
6157 if (path
->slots
[0] == 0) {
6158 inode
->index_cnt
= 2;
6164 leaf
= path
->nodes
[0];
6165 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6167 if (found_key
.objectid
!= btrfs_ino(inode
) ||
6168 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
6169 inode
->index_cnt
= 2;
6173 inode
->index_cnt
= found_key
.offset
+ 1;
6175 btrfs_free_path(path
);
6180 * helper to find a free sequence number in a given directory. This current
6181 * code is very simple, later versions will do smarter things in the btree
6183 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6187 if (dir
->index_cnt
== (u64
)-1) {
6188 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6190 ret
= btrfs_set_inode_index_count(dir
);
6196 *index
= dir
->index_cnt
;
6202 static int btrfs_insert_inode_locked(struct inode
*inode
)
6204 struct btrfs_iget_args args
;
6206 args
.ino
= BTRFS_I(inode
)->location
.objectid
;
6207 args
.root
= BTRFS_I(inode
)->root
;
6209 return insert_inode_locked4(inode
,
6210 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6211 btrfs_find_actor
, &args
);
6215 * Inherit flags from the parent inode.
6217 * Currently only the compression flags and the cow flags are inherited.
6219 static void btrfs_inherit_iflags(struct inode
*inode
, struct inode
*dir
)
6226 flags
= BTRFS_I(dir
)->flags
;
6228 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6229 BTRFS_I(inode
)->flags
&= ~BTRFS_INODE_COMPRESS
;
6230 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NOCOMPRESS
;
6231 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6232 BTRFS_I(inode
)->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6233 BTRFS_I(inode
)->flags
|= BTRFS_INODE_COMPRESS
;
6236 if (flags
& BTRFS_INODE_NODATACOW
) {
6237 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
;
6238 if (S_ISREG(inode
->i_mode
))
6239 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6242 btrfs_sync_inode_flags_to_i_flags(inode
);
6245 static struct inode
*btrfs_new_inode(struct btrfs_trans_handle
*trans
,
6246 struct btrfs_root
*root
,
6248 const char *name
, int name_len
,
6249 u64 ref_objectid
, u64 objectid
,
6250 umode_t mode
, u64
*index
)
6252 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6253 struct inode
*inode
;
6254 struct btrfs_inode_item
*inode_item
;
6255 struct btrfs_key
*location
;
6256 struct btrfs_path
*path
;
6257 struct btrfs_inode_ref
*ref
;
6258 struct btrfs_key key
[2];
6260 int nitems
= name
? 2 : 1;
6262 unsigned int nofs_flag
;
6265 path
= btrfs_alloc_path();
6267 return ERR_PTR(-ENOMEM
);
6269 nofs_flag
= memalloc_nofs_save();
6270 inode
= new_inode(fs_info
->sb
);
6271 memalloc_nofs_restore(nofs_flag
);
6273 btrfs_free_path(path
);
6274 return ERR_PTR(-ENOMEM
);
6278 * O_TMPFILE, set link count to 0, so that after this point,
6279 * we fill in an inode item with the correct link count.
6282 set_nlink(inode
, 0);
6285 * we have to initialize this early, so we can reclaim the inode
6286 * number if we fail afterwards in this function.
6288 inode
->i_ino
= objectid
;
6291 trace_btrfs_inode_request(dir
);
6293 ret
= btrfs_set_inode_index(BTRFS_I(dir
), index
);
6295 btrfs_free_path(path
);
6297 return ERR_PTR(ret
);
6303 * index_cnt is ignored for everything but a dir,
6304 * btrfs_set_inode_index_count has an explanation for the magic
6307 BTRFS_I(inode
)->index_cnt
= 2;
6308 BTRFS_I(inode
)->dir_index
= *index
;
6309 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
6310 BTRFS_I(inode
)->generation
= trans
->transid
;
6311 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6314 * We could have gotten an inode number from somebody who was fsynced
6315 * and then removed in this same transaction, so let's just set full
6316 * sync since it will be a full sync anyway and this will blow away the
6317 * old info in the log.
6319 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &BTRFS_I(inode
)->runtime_flags
);
6321 key
[0].objectid
= objectid
;
6322 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6325 sizes
[0] = sizeof(struct btrfs_inode_item
);
6329 * Start new inodes with an inode_ref. This is slightly more
6330 * efficient for small numbers of hard links since they will
6331 * be packed into one item. Extended refs will kick in if we
6332 * add more hard links than can fit in the ref item.
6334 key
[1].objectid
= objectid
;
6335 key
[1].type
= BTRFS_INODE_REF_KEY
;
6336 key
[1].offset
= ref_objectid
;
6338 sizes
[1] = name_len
+ sizeof(*ref
);
6341 location
= &BTRFS_I(inode
)->location
;
6342 location
->objectid
= objectid
;
6343 location
->offset
= 0;
6344 location
->type
= BTRFS_INODE_ITEM_KEY
;
6346 ret
= btrfs_insert_inode_locked(inode
);
6352 ret
= btrfs_insert_empty_items(trans
, root
, path
, key
, sizes
, nitems
);
6356 inode_init_owner(&init_user_ns
, inode
, dir
, mode
);
6357 inode_set_bytes(inode
, 0);
6359 inode
->i_mtime
= current_time(inode
);
6360 inode
->i_atime
= inode
->i_mtime
;
6361 inode
->i_ctime
= inode
->i_mtime
;
6362 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
6364 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6365 struct btrfs_inode_item
);
6366 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6367 sizeof(*inode_item
));
6368 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6371 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6372 struct btrfs_inode_ref
);
6373 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, name_len
);
6374 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, *index
);
6375 ptr
= (unsigned long)(ref
+ 1);
6376 write_extent_buffer(path
->nodes
[0], name
, ptr
, name_len
);
6379 btrfs_mark_buffer_dirty(path
->nodes
[0]);
6380 btrfs_free_path(path
);
6382 btrfs_inherit_iflags(inode
, dir
);
6384 if (S_ISREG(mode
)) {
6385 if (btrfs_test_opt(fs_info
, NODATASUM
))
6386 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6387 if (btrfs_test_opt(fs_info
, NODATACOW
))
6388 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6389 BTRFS_INODE_NODATASUM
;
6392 inode_tree_add(inode
);
6394 trace_btrfs_inode_new(inode
);
6395 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6397 btrfs_update_root_times(trans
, root
);
6399 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6402 "error inheriting props for ino %llu (root %llu): %d",
6403 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
, ret
);
6408 discard_new_inode(inode
);
6411 BTRFS_I(dir
)->index_cnt
--;
6412 btrfs_free_path(path
);
6413 return ERR_PTR(ret
);
6417 * utility function to add 'inode' into 'parent_inode' with
6418 * a give name and a given sequence number.
6419 * if 'add_backref' is true, also insert a backref from the
6420 * inode to the parent directory.
6422 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6423 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6424 const char *name
, int name_len
, int add_backref
, u64 index
)
6427 struct btrfs_key key
;
6428 struct btrfs_root
*root
= parent_inode
->root
;
6429 u64 ino
= btrfs_ino(inode
);
6430 u64 parent_ino
= btrfs_ino(parent_inode
);
6432 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6433 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6436 key
.type
= BTRFS_INODE_ITEM_KEY
;
6440 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6441 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6442 root
->root_key
.objectid
, parent_ino
,
6443 index
, name
, name_len
);
6444 } else if (add_backref
) {
6445 ret
= btrfs_insert_inode_ref(trans
, root
, name
, name_len
, ino
,
6449 /* Nothing to clean up yet */
6453 ret
= btrfs_insert_dir_item(trans
, name
, name_len
, parent_inode
, &key
,
6454 btrfs_inode_type(&inode
->vfs_inode
), index
);
6455 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6458 btrfs_abort_transaction(trans
, ret
);
6462 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6464 inode_inc_iversion(&parent_inode
->vfs_inode
);
6466 * If we are replaying a log tree, we do not want to update the mtime
6467 * and ctime of the parent directory with the current time, since the
6468 * log replay procedure is responsible for setting them to their correct
6469 * values (the ones it had when the fsync was done).
6471 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
)) {
6472 struct timespec64 now
= current_time(&parent_inode
->vfs_inode
);
6474 parent_inode
->vfs_inode
.i_mtime
= now
;
6475 parent_inode
->vfs_inode
.i_ctime
= now
;
6477 ret
= btrfs_update_inode(trans
, root
, parent_inode
);
6479 btrfs_abort_transaction(trans
, ret
);
6483 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6486 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6487 root
->root_key
.objectid
, parent_ino
,
6488 &local_index
, name
, name_len
);
6490 btrfs_abort_transaction(trans
, err
);
6491 } else if (add_backref
) {
6495 err
= btrfs_del_inode_ref(trans
, root
, name
, name_len
,
6496 ino
, parent_ino
, &local_index
);
6498 btrfs_abort_transaction(trans
, err
);
6501 /* Return the original error code */
6505 static int btrfs_add_nondir(struct btrfs_trans_handle
*trans
,
6506 struct btrfs_inode
*dir
, struct dentry
*dentry
,
6507 struct btrfs_inode
*inode
, int backref
, u64 index
)
6509 int err
= btrfs_add_link(trans
, dir
, inode
,
6510 dentry
->d_name
.name
, dentry
->d_name
.len
,
6517 static int btrfs_mknod(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6518 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6520 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6521 struct btrfs_trans_handle
*trans
;
6522 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6523 struct inode
*inode
= NULL
;
6529 * 2 for inode item and ref
6531 * 1 for xattr if selinux is on
6533 trans
= btrfs_start_transaction(root
, 5);
6535 return PTR_ERR(trans
);
6537 err
= btrfs_get_free_objectid(root
, &objectid
);
6541 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6542 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6544 if (IS_ERR(inode
)) {
6545 err
= PTR_ERR(inode
);
6551 * If the active LSM wants to access the inode during
6552 * d_instantiate it needs these. Smack checks to see
6553 * if the filesystem supports xattrs by looking at the
6556 inode
->i_op
= &btrfs_special_inode_operations
;
6557 init_special_inode(inode
, inode
->i_mode
, rdev
);
6559 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6563 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6568 btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6569 d_instantiate_new(dentry
, inode
);
6572 btrfs_end_transaction(trans
);
6573 btrfs_btree_balance_dirty(fs_info
);
6575 inode_dec_link_count(inode
);
6576 discard_new_inode(inode
);
6581 static int btrfs_create(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6582 struct dentry
*dentry
, umode_t mode
, bool excl
)
6584 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6585 struct btrfs_trans_handle
*trans
;
6586 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6587 struct inode
*inode
= NULL
;
6593 * 2 for inode item and ref
6595 * 1 for xattr if selinux is on
6597 trans
= btrfs_start_transaction(root
, 5);
6599 return PTR_ERR(trans
);
6601 err
= btrfs_get_free_objectid(root
, &objectid
);
6605 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6606 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6608 if (IS_ERR(inode
)) {
6609 err
= PTR_ERR(inode
);
6614 * If the active LSM wants to access the inode during
6615 * d_instantiate it needs these. Smack checks to see
6616 * if the filesystem supports xattrs by looking at the
6619 inode
->i_fop
= &btrfs_file_operations
;
6620 inode
->i_op
= &btrfs_file_inode_operations
;
6621 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6623 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6627 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6631 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6636 d_instantiate_new(dentry
, inode
);
6639 btrfs_end_transaction(trans
);
6641 inode_dec_link_count(inode
);
6642 discard_new_inode(inode
);
6644 btrfs_btree_balance_dirty(fs_info
);
6648 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6649 struct dentry
*dentry
)
6651 struct btrfs_trans_handle
*trans
= NULL
;
6652 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6653 struct inode
*inode
= d_inode(old_dentry
);
6654 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6659 /* do not allow sys_link's with other subvols of the same device */
6660 if (root
->root_key
.objectid
!= BTRFS_I(inode
)->root
->root_key
.objectid
)
6663 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6666 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6671 * 2 items for inode and inode ref
6672 * 2 items for dir items
6673 * 1 item for parent inode
6674 * 1 item for orphan item deletion if O_TMPFILE
6676 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6677 if (IS_ERR(trans
)) {
6678 err
= PTR_ERR(trans
);
6683 /* There are several dir indexes for this inode, clear the cache. */
6684 BTRFS_I(inode
)->dir_index
= 0ULL;
6686 inode_inc_iversion(inode
);
6687 inode
->i_ctime
= current_time(inode
);
6689 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6691 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6697 struct dentry
*parent
= dentry
->d_parent
;
6699 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6702 if (inode
->i_nlink
== 1) {
6704 * If new hard link count is 1, it's a file created
6705 * with open(2) O_TMPFILE flag.
6707 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6711 d_instantiate(dentry
, inode
);
6712 btrfs_log_new_name(trans
, BTRFS_I(inode
), NULL
, parent
);
6717 btrfs_end_transaction(trans
);
6719 inode_dec_link_count(inode
);
6722 btrfs_btree_balance_dirty(fs_info
);
6726 static int btrfs_mkdir(struct user_namespace
*mnt_userns
, struct inode
*dir
,
6727 struct dentry
*dentry
, umode_t mode
)
6729 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6730 struct inode
*inode
= NULL
;
6731 struct btrfs_trans_handle
*trans
;
6732 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6738 * 2 items for inode and ref
6739 * 2 items for dir items
6740 * 1 for xattr if selinux is on
6742 trans
= btrfs_start_transaction(root
, 5);
6744 return PTR_ERR(trans
);
6746 err
= btrfs_get_free_objectid(root
, &objectid
);
6750 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6751 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6752 S_IFDIR
| mode
, &index
);
6753 if (IS_ERR(inode
)) {
6754 err
= PTR_ERR(inode
);
6759 /* these must be set before we unlock the inode */
6760 inode
->i_op
= &btrfs_dir_inode_operations
;
6761 inode
->i_fop
= &btrfs_dir_file_operations
;
6763 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6767 btrfs_i_size_write(BTRFS_I(inode
), 0);
6768 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
6772 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6773 dentry
->d_name
.name
,
6774 dentry
->d_name
.len
, 0, index
);
6778 d_instantiate_new(dentry
, inode
);
6781 btrfs_end_transaction(trans
);
6783 inode_dec_link_count(inode
);
6784 discard_new_inode(inode
);
6786 btrfs_btree_balance_dirty(fs_info
);
6790 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6792 size_t pg_offset
, u64 extent_offset
,
6793 struct btrfs_file_extent_item
*item
)
6796 struct extent_buffer
*leaf
= path
->nodes
[0];
6799 unsigned long inline_size
;
6803 WARN_ON(pg_offset
!= 0);
6804 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6805 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6806 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
6807 btrfs_item_nr(path
->slots
[0]));
6808 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6811 ptr
= btrfs_file_extent_inline_start(item
);
6813 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6815 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6816 ret
= btrfs_decompress(compress_type
, tmp
, page
,
6817 extent_offset
, inline_size
, max_size
);
6820 * decompression code contains a memset to fill in any space between the end
6821 * of the uncompressed data and the end of max_size in case the decompressed
6822 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6823 * the end of an inline extent and the beginning of the next block, so we
6824 * cover that region here.
6827 if (max_size
+ pg_offset
< PAGE_SIZE
)
6828 memzero_page(page
, pg_offset
+ max_size
,
6829 PAGE_SIZE
- max_size
- pg_offset
);
6835 * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6836 * @inode: file to search in
6837 * @page: page to read extent data into if the extent is inline
6838 * @pg_offset: offset into @page to copy to
6839 * @start: file offset
6840 * @len: length of range starting at @start
6842 * This returns the first &struct extent_map which overlaps with the given
6843 * range, reading it from the B-tree and caching it if necessary. Note that
6844 * there may be more extents which overlap the given range after the returned
6847 * If @page is not NULL and the extent is inline, this also reads the extent
6848 * data directly into the page and marks the extent up to date in the io_tree.
6850 * Return: ERR_PTR on error, non-NULL extent_map on success.
6852 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6853 struct page
*page
, size_t pg_offset
,
6856 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6858 u64 extent_start
= 0;
6860 u64 objectid
= btrfs_ino(inode
);
6861 int extent_type
= -1;
6862 struct btrfs_path
*path
= NULL
;
6863 struct btrfs_root
*root
= inode
->root
;
6864 struct btrfs_file_extent_item
*item
;
6865 struct extent_buffer
*leaf
;
6866 struct btrfs_key found_key
;
6867 struct extent_map
*em
= NULL
;
6868 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6869 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
6871 read_lock(&em_tree
->lock
);
6872 em
= lookup_extent_mapping(em_tree
, start
, len
);
6873 read_unlock(&em_tree
->lock
);
6876 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6877 free_extent_map(em
);
6878 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6879 free_extent_map(em
);
6883 em
= alloc_extent_map();
6888 em
->start
= EXTENT_MAP_HOLE
;
6889 em
->orig_start
= EXTENT_MAP_HOLE
;
6891 em
->block_len
= (u64
)-1;
6893 path
= btrfs_alloc_path();
6899 /* Chances are we'll be called again, so go ahead and do readahead */
6900 path
->reada
= READA_FORWARD
;
6903 * The same explanation in load_free_space_cache applies here as well,
6904 * we only read when we're loading the free space cache, and at that
6905 * point the commit_root has everything we need.
6907 if (btrfs_is_free_space_inode(inode
)) {
6908 path
->search_commit_root
= 1;
6909 path
->skip_locking
= 1;
6912 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6915 } else if (ret
> 0) {
6916 if (path
->slots
[0] == 0)
6922 leaf
= path
->nodes
[0];
6923 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6924 struct btrfs_file_extent_item
);
6925 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6926 if (found_key
.objectid
!= objectid
||
6927 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6929 * If we backup past the first extent we want to move forward
6930 * and see if there is an extent in front of us, otherwise we'll
6931 * say there is a hole for our whole search range which can
6938 extent_type
= btrfs_file_extent_type(leaf
, item
);
6939 extent_start
= found_key
.offset
;
6940 extent_end
= btrfs_file_extent_end(path
);
6941 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6942 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6943 /* Only regular file could have regular/prealloc extent */
6944 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6947 "regular/prealloc extent found for non-regular inode %llu",
6951 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6953 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6954 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6959 if (start
>= extent_end
) {
6961 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6962 ret
= btrfs_next_leaf(root
, path
);
6968 leaf
= path
->nodes
[0];
6970 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6971 if (found_key
.objectid
!= objectid
||
6972 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6974 if (start
+ len
<= found_key
.offset
)
6976 if (start
> found_key
.offset
)
6979 /* New extent overlaps with existing one */
6981 em
->orig_start
= start
;
6982 em
->len
= found_key
.offset
- start
;
6983 em
->block_start
= EXTENT_MAP_HOLE
;
6987 btrfs_extent_item_to_extent_map(inode
, path
, item
, !page
, em
);
6989 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6990 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6992 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6996 size_t extent_offset
;
7002 size
= btrfs_file_extent_ram_bytes(leaf
, item
);
7003 extent_offset
= page_offset(page
) + pg_offset
- extent_start
;
7004 copy_size
= min_t(u64
, PAGE_SIZE
- pg_offset
,
7005 size
- extent_offset
);
7006 em
->start
= extent_start
+ extent_offset
;
7007 em
->len
= ALIGN(copy_size
, fs_info
->sectorsize
);
7008 em
->orig_block_len
= em
->len
;
7009 em
->orig_start
= em
->start
;
7010 ptr
= btrfs_file_extent_inline_start(item
) + extent_offset
;
7012 if (!PageUptodate(page
)) {
7013 if (btrfs_file_extent_compression(leaf
, item
) !=
7014 BTRFS_COMPRESS_NONE
) {
7015 ret
= uncompress_inline(path
, page
, pg_offset
,
7016 extent_offset
, item
);
7020 map
= kmap_local_page(page
);
7021 read_extent_buffer(leaf
, map
+ pg_offset
, ptr
,
7023 if (pg_offset
+ copy_size
< PAGE_SIZE
) {
7024 memset(map
+ pg_offset
+ copy_size
, 0,
7025 PAGE_SIZE
- pg_offset
-
7030 flush_dcache_page(page
);
7032 set_extent_uptodate(io_tree
, em
->start
,
7033 extent_map_end(em
) - 1, NULL
, GFP_NOFS
);
7038 em
->orig_start
= start
;
7040 em
->block_start
= EXTENT_MAP_HOLE
;
7043 btrfs_release_path(path
);
7044 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
7046 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7047 em
->start
, em
->len
, start
, len
);
7052 write_lock(&em_tree
->lock
);
7053 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
7054 write_unlock(&em_tree
->lock
);
7056 btrfs_free_path(path
);
7058 trace_btrfs_get_extent(root
, inode
, em
);
7061 free_extent_map(em
);
7062 return ERR_PTR(ret
);
7067 struct extent_map
*btrfs_get_extent_fiemap(struct btrfs_inode
*inode
,
7070 struct extent_map
*em
;
7071 struct extent_map
*hole_em
= NULL
;
7072 u64 delalloc_start
= start
;
7078 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
);
7082 * If our em maps to:
7084 * - a pre-alloc extent,
7085 * there might actually be delalloc bytes behind it.
7087 if (em
->block_start
!= EXTENT_MAP_HOLE
&&
7088 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7093 /* check to see if we've wrapped (len == -1 or similar) */
7102 /* ok, we didn't find anything, lets look for delalloc */
7103 delalloc_len
= count_range_bits(&inode
->io_tree
, &delalloc_start
,
7104 end
, len
, EXTENT_DELALLOC
, 1);
7105 delalloc_end
= delalloc_start
+ delalloc_len
;
7106 if (delalloc_end
< delalloc_start
)
7107 delalloc_end
= (u64
)-1;
7110 * We didn't find anything useful, return the original results from
7113 if (delalloc_start
> end
|| delalloc_end
<= start
) {
7120 * Adjust the delalloc_start to make sure it doesn't go backwards from
7121 * the start they passed in
7123 delalloc_start
= max(start
, delalloc_start
);
7124 delalloc_len
= delalloc_end
- delalloc_start
;
7126 if (delalloc_len
> 0) {
7129 const u64 hole_end
= extent_map_end(hole_em
);
7131 em
= alloc_extent_map();
7139 * When btrfs_get_extent can't find anything it returns one
7142 * Make sure what it found really fits our range, and adjust to
7143 * make sure it is based on the start from the caller
7145 if (hole_end
<= start
|| hole_em
->start
> end
) {
7146 free_extent_map(hole_em
);
7149 hole_start
= max(hole_em
->start
, start
);
7150 hole_len
= hole_end
- hole_start
;
7153 if (hole_em
&& delalloc_start
> hole_start
) {
7155 * Our hole starts before our delalloc, so we have to
7156 * return just the parts of the hole that go until the
7159 em
->len
= min(hole_len
, delalloc_start
- hole_start
);
7160 em
->start
= hole_start
;
7161 em
->orig_start
= hole_start
;
7163 * Don't adjust block start at all, it is fixed at
7166 em
->block_start
= hole_em
->block_start
;
7167 em
->block_len
= hole_len
;
7168 if (test_bit(EXTENT_FLAG_PREALLOC
, &hole_em
->flags
))
7169 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
7172 * Hole is out of passed range or it starts after
7175 em
->start
= delalloc_start
;
7176 em
->len
= delalloc_len
;
7177 em
->orig_start
= delalloc_start
;
7178 em
->block_start
= EXTENT_MAP_DELALLOC
;
7179 em
->block_len
= delalloc_len
;
7186 free_extent_map(hole_em
);
7188 free_extent_map(em
);
7189 return ERR_PTR(err
);
7194 static struct extent_map
*btrfs_create_dio_extent(struct btrfs_inode
*inode
,
7197 const u64 orig_start
,
7198 const u64 block_start
,
7199 const u64 block_len
,
7200 const u64 orig_block_len
,
7201 const u64 ram_bytes
,
7204 struct extent_map
*em
= NULL
;
7207 if (type
!= BTRFS_ORDERED_NOCOW
) {
7208 em
= create_io_em(inode
, start
, len
, orig_start
, block_start
,
7209 block_len
, orig_block_len
, ram_bytes
,
7210 BTRFS_COMPRESS_NONE
, /* compress_type */
7215 ret
= btrfs_add_ordered_extent_dio(inode
, start
, block_start
, len
,
7219 free_extent_map(em
);
7220 btrfs_drop_extent_cache(inode
, start
, start
+ len
- 1, 0);
7229 static struct extent_map
*btrfs_new_extent_direct(struct btrfs_inode
*inode
,
7232 struct btrfs_root
*root
= inode
->root
;
7233 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
7234 struct extent_map
*em
;
7235 struct btrfs_key ins
;
7239 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
7240 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
7241 0, alloc_hint
, &ins
, 1, 1);
7243 return ERR_PTR(ret
);
7245 em
= btrfs_create_dio_extent(inode
, start
, ins
.offset
, start
,
7246 ins
.objectid
, ins
.offset
, ins
.offset
,
7247 ins
.offset
, BTRFS_ORDERED_REGULAR
);
7248 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
7250 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
,
7256 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
7258 struct btrfs_block_group
*block_group
;
7259 bool readonly
= false;
7261 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
7262 if (!block_group
|| block_group
->ro
)
7265 btrfs_put_block_group(block_group
);
7270 * Check if we can do nocow write into the range [@offset, @offset + @len)
7272 * @offset: File offset
7273 * @len: The length to write, will be updated to the nocow writeable
7275 * @orig_start: (optional) Return the original file offset of the file extent
7276 * @orig_len: (optional) Return the original on-disk length of the file extent
7277 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7278 * @strict: if true, omit optimizations that might force us into unnecessary
7279 * cow. e.g., don't trust generation number.
7282 * >0 and update @len if we can do nocow write
7283 * 0 if we can't do nocow write
7284 * <0 if error happened
7286 * NOTE: This only checks the file extents, caller is responsible to wait for
7287 * any ordered extents.
7289 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7290 u64
*orig_start
, u64
*orig_block_len
,
7291 u64
*ram_bytes
, bool strict
)
7293 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7294 struct btrfs_path
*path
;
7296 struct extent_buffer
*leaf
;
7297 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7298 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7299 struct btrfs_file_extent_item
*fi
;
7300 struct btrfs_key key
;
7307 bool nocow
= (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
);
7309 path
= btrfs_alloc_path();
7313 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7314 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7318 slot
= path
->slots
[0];
7321 /* can't find the item, must cow */
7328 leaf
= path
->nodes
[0];
7329 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7330 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7331 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7332 /* not our file or wrong item type, must cow */
7336 if (key
.offset
> offset
) {
7337 /* Wrong offset, must cow */
7341 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
7342 found_type
= btrfs_file_extent_type(leaf
, fi
);
7343 if (found_type
!= BTRFS_FILE_EXTENT_REG
&&
7344 found_type
!= BTRFS_FILE_EXTENT_PREALLOC
) {
7345 /* not a regular extent, must cow */
7349 if (!nocow
&& found_type
== BTRFS_FILE_EXTENT_REG
)
7352 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
7353 if (extent_end
<= offset
)
7356 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
7357 if (disk_bytenr
== 0)
7360 if (btrfs_file_extent_compression(leaf
, fi
) ||
7361 btrfs_file_extent_encryption(leaf
, fi
) ||
7362 btrfs_file_extent_other_encoding(leaf
, fi
))
7366 * Do the same check as in btrfs_cross_ref_exist but without the
7367 * unnecessary search.
7370 (btrfs_file_extent_generation(leaf
, fi
) <=
7371 btrfs_root_last_snapshot(&root
->root_item
)))
7374 backref_offset
= btrfs_file_extent_offset(leaf
, fi
);
7377 *orig_start
= key
.offset
- backref_offset
;
7378 *orig_block_len
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
7379 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7382 if (btrfs_extent_readonly(fs_info
, disk_bytenr
))
7385 num_bytes
= min(offset
+ *len
, extent_end
) - offset
;
7386 if (!nocow
&& found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7389 range_end
= round_up(offset
+ num_bytes
,
7390 root
->fs_info
->sectorsize
) - 1;
7391 ret
= test_range_bit(io_tree
, offset
, range_end
,
7392 EXTENT_DELALLOC
, 0, NULL
);
7399 btrfs_release_path(path
);
7402 * look for other files referencing this extent, if we
7403 * find any we must cow
7406 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(BTRFS_I(inode
)),
7407 key
.offset
- backref_offset
, disk_bytenr
,
7415 * adjust disk_bytenr and num_bytes to cover just the bytes
7416 * in this extent we are about to write. If there
7417 * are any csums in that range we have to cow in order
7418 * to keep the csums correct
7420 disk_bytenr
+= backref_offset
;
7421 disk_bytenr
+= offset
- key
.offset
;
7422 if (csum_exist_in_range(fs_info
, disk_bytenr
, num_bytes
))
7425 * all of the above have passed, it is safe to overwrite this extent
7431 btrfs_free_path(path
);
7435 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7436 struct extent_state
**cached_state
, bool writing
)
7438 struct btrfs_ordered_extent
*ordered
;
7442 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7445 * We're concerned with the entire range that we're going to be
7446 * doing DIO to, so we need to make sure there's no ordered
7447 * extents in this range.
7449 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7450 lockend
- lockstart
+ 1);
7453 * We need to make sure there are no buffered pages in this
7454 * range either, we could have raced between the invalidate in
7455 * generic_file_direct_write and locking the extent. The
7456 * invalidate needs to happen so that reads after a write do not
7460 (!writing
|| !filemap_range_has_page(inode
->i_mapping
,
7461 lockstart
, lockend
)))
7464 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7469 * If we are doing a DIO read and the ordered extent we
7470 * found is for a buffered write, we can not wait for it
7471 * to complete and retry, because if we do so we can
7472 * deadlock with concurrent buffered writes on page
7473 * locks. This happens only if our DIO read covers more
7474 * than one extent map, if at this point has already
7475 * created an ordered extent for a previous extent map
7476 * and locked its range in the inode's io tree, and a
7477 * concurrent write against that previous extent map's
7478 * range and this range started (we unlock the ranges
7479 * in the io tree only when the bios complete and
7480 * buffered writes always lock pages before attempting
7481 * to lock range in the io tree).
7484 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7485 btrfs_start_ordered_extent(ordered
, 1);
7488 btrfs_put_ordered_extent(ordered
);
7491 * We could trigger writeback for this range (and wait
7492 * for it to complete) and then invalidate the pages for
7493 * this range (through invalidate_inode_pages2_range()),
7494 * but that can lead us to a deadlock with a concurrent
7495 * call to readahead (a buffered read or a defrag call
7496 * triggered a readahead) on a page lock due to an
7497 * ordered dio extent we created before but did not have
7498 * yet a corresponding bio submitted (whence it can not
7499 * complete), which makes readahead wait for that
7500 * ordered extent to complete while holding a lock on
7515 /* The callers of this must take lock_extent() */
7516 static struct extent_map
*create_io_em(struct btrfs_inode
*inode
, u64 start
,
7517 u64 len
, u64 orig_start
, u64 block_start
,
7518 u64 block_len
, u64 orig_block_len
,
7519 u64 ram_bytes
, int compress_type
,
7522 struct extent_map_tree
*em_tree
;
7523 struct extent_map
*em
;
7526 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7527 type
== BTRFS_ORDERED_COMPRESSED
||
7528 type
== BTRFS_ORDERED_NOCOW
||
7529 type
== BTRFS_ORDERED_REGULAR
);
7531 em_tree
= &inode
->extent_tree
;
7532 em
= alloc_extent_map();
7534 return ERR_PTR(-ENOMEM
);
7537 em
->orig_start
= orig_start
;
7539 em
->block_len
= block_len
;
7540 em
->block_start
= block_start
;
7541 em
->orig_block_len
= orig_block_len
;
7542 em
->ram_bytes
= ram_bytes
;
7543 em
->generation
= -1;
7544 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
7545 if (type
== BTRFS_ORDERED_PREALLOC
) {
7546 set_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
7547 } else if (type
== BTRFS_ORDERED_COMPRESSED
) {
7548 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
7549 em
->compress_type
= compress_type
;
7553 btrfs_drop_extent_cache(inode
, em
->start
,
7554 em
->start
+ em
->len
- 1, 0);
7555 write_lock(&em_tree
->lock
);
7556 ret
= add_extent_mapping(em_tree
, em
, 1);
7557 write_unlock(&em_tree
->lock
);
7559 * The caller has taken lock_extent(), who could race with us
7562 } while (ret
== -EEXIST
);
7565 free_extent_map(em
);
7566 return ERR_PTR(ret
);
7569 /* em got 2 refs now, callers needs to do free_extent_map once. */
7574 static int btrfs_get_blocks_direct_write(struct extent_map
**map
,
7575 struct inode
*inode
,
7576 struct btrfs_dio_data
*dio_data
,
7579 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7580 struct extent_map
*em
= *map
;
7584 * We don't allocate a new extent in the following cases
7586 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7588 * 2) The extent is marked as PREALLOC. We're good to go here and can
7589 * just use the extent.
7592 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
7593 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7594 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7596 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7598 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7599 type
= BTRFS_ORDERED_PREALLOC
;
7601 type
= BTRFS_ORDERED_NOCOW
;
7602 len
= min(len
, em
->len
- (start
- em
->start
));
7603 block_start
= em
->block_start
+ (start
- em
->start
);
7605 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7606 &orig_block_len
, &ram_bytes
, false) == 1 &&
7607 btrfs_inc_nocow_writers(fs_info
, block_start
)) {
7608 struct extent_map
*em2
;
7610 em2
= btrfs_create_dio_extent(BTRFS_I(inode
), start
, len
,
7611 orig_start
, block_start
,
7612 len
, orig_block_len
,
7614 btrfs_dec_nocow_writers(fs_info
, block_start
);
7615 if (type
== BTRFS_ORDERED_PREALLOC
) {
7616 free_extent_map(em
);
7620 if (em2
&& IS_ERR(em2
)) {
7625 * For inode marked NODATACOW or extent marked PREALLOC,
7626 * use the existing or preallocated extent, so does not
7627 * need to adjust btrfs_space_info's bytes_may_use.
7629 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
7634 /* this will cow the extent */
7635 free_extent_map(em
);
7636 *map
= em
= btrfs_new_extent_direct(BTRFS_I(inode
), start
, len
);
7642 len
= min(len
, em
->len
- (start
- em
->start
));
7646 * Need to update the i_size under the extent lock so buffered
7647 * readers will get the updated i_size when we unlock.
7649 if (start
+ len
> i_size_read(inode
))
7650 i_size_write(inode
, start
+ len
);
7652 dio_data
->reserve
-= len
;
7657 static int btrfs_dio_iomap_begin(struct inode
*inode
, loff_t start
,
7658 loff_t length
, unsigned int flags
, struct iomap
*iomap
,
7659 struct iomap
*srcmap
)
7661 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7662 struct extent_map
*em
;
7663 struct extent_state
*cached_state
= NULL
;
7664 struct btrfs_dio_data
*dio_data
= NULL
;
7665 u64 lockstart
, lockend
;
7666 const bool write
= !!(flags
& IOMAP_WRITE
);
7669 bool unlock_extents
= false;
7672 len
= min_t(u64
, len
, fs_info
->sectorsize
);
7675 lockend
= start
+ len
- 1;
7678 * The generic stuff only does filemap_write_and_wait_range, which
7679 * isn't enough if we've written compressed pages to this area, so we
7680 * need to flush the dirty pages again to make absolutely sure that any
7681 * outstanding dirty pages are on disk.
7683 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
7684 &BTRFS_I(inode
)->runtime_flags
)) {
7685 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
7686 start
+ length
- 1);
7691 dio_data
= kzalloc(sizeof(*dio_data
), GFP_NOFS
);
7695 dio_data
->length
= length
;
7697 dio_data
->reserve
= round_up(length
, fs_info
->sectorsize
);
7698 ret
= btrfs_delalloc_reserve_space(BTRFS_I(inode
),
7699 &dio_data
->data_reserved
,
7700 start
, dio_data
->reserve
);
7702 extent_changeset_free(dio_data
->data_reserved
);
7707 iomap
->private = dio_data
;
7711 * If this errors out it's because we couldn't invalidate pagecache for
7712 * this range and we need to fallback to buffered.
7714 if (lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
, write
)) {
7719 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
7726 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7727 * io. INLINE is special, and we could probably kludge it in here, but
7728 * it's still buffered so for safety lets just fall back to the generic
7731 * For COMPRESSED we _have_ to read the entire extent in so we can
7732 * decompress it, so there will be buffering required no matter what we
7733 * do, so go ahead and fallback to buffered.
7735 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7736 * to buffered IO. Don't blame me, this is the price we pay for using
7739 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
) ||
7740 em
->block_start
== EXTENT_MAP_INLINE
) {
7741 free_extent_map(em
);
7746 len
= min(len
, em
->len
- (start
- em
->start
));
7748 ret
= btrfs_get_blocks_direct_write(&em
, inode
, dio_data
,
7752 unlock_extents
= true;
7753 /* Recalc len in case the new em is smaller than requested */
7754 len
= min(len
, em
->len
- (start
- em
->start
));
7757 * We need to unlock only the end area that we aren't using.
7758 * The rest is going to be unlocked by the endio routine.
7760 lockstart
= start
+ len
;
7761 if (lockstart
< lockend
)
7762 unlock_extents
= true;
7766 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
7767 lockstart
, lockend
, &cached_state
);
7769 free_extent_state(cached_state
);
7772 * Translate extent map information to iomap.
7773 * We trim the extents (and move the addr) even though iomap code does
7774 * that, since we have locked only the parts we are performing I/O in.
7776 if ((em
->block_start
== EXTENT_MAP_HOLE
) ||
7777 (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) && !write
)) {
7778 iomap
->addr
= IOMAP_NULL_ADDR
;
7779 iomap
->type
= IOMAP_HOLE
;
7781 iomap
->addr
= em
->block_start
+ (start
- em
->start
);
7782 iomap
->type
= IOMAP_MAPPED
;
7784 iomap
->offset
= start
;
7785 iomap
->bdev
= fs_info
->fs_devices
->latest_bdev
;
7786 iomap
->length
= len
;
7788 if (write
&& btrfs_use_zone_append(BTRFS_I(inode
), em
))
7789 iomap
->flags
|= IOMAP_F_ZONE_APPEND
;
7791 free_extent_map(em
);
7796 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7800 btrfs_delalloc_release_space(BTRFS_I(inode
),
7801 dio_data
->data_reserved
, start
,
7802 dio_data
->reserve
, true);
7803 btrfs_delalloc_release_extents(BTRFS_I(inode
), dio_data
->reserve
);
7804 extent_changeset_free(dio_data
->data_reserved
);
7810 static int btrfs_dio_iomap_end(struct inode
*inode
, loff_t pos
, loff_t length
,
7811 ssize_t written
, unsigned int flags
, struct iomap
*iomap
)
7814 struct btrfs_dio_data
*dio_data
= iomap
->private;
7815 size_t submitted
= dio_data
->submitted
;
7816 const bool write
= !!(flags
& IOMAP_WRITE
);
7818 if (!write
&& (iomap
->type
== IOMAP_HOLE
)) {
7819 /* If reading from a hole, unlock and return */
7820 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
, pos
+ length
- 1);
7824 if (submitted
< length
) {
7826 length
-= submitted
;
7828 __endio_write_update_ordered(BTRFS_I(inode
), pos
,
7831 unlock_extent(&BTRFS_I(inode
)->io_tree
, pos
,
7837 if (dio_data
->reserve
)
7838 btrfs_delalloc_release_space(BTRFS_I(inode
),
7839 dio_data
->data_reserved
, pos
,
7840 dio_data
->reserve
, true);
7841 btrfs_delalloc_release_extents(BTRFS_I(inode
), dio_data
->length
);
7842 extent_changeset_free(dio_data
->data_reserved
);
7846 iomap
->private = NULL
;
7851 static void btrfs_dio_private_put(struct btrfs_dio_private
*dip
)
7854 * This implies a barrier so that stores to dio_bio->bi_status before
7855 * this and loads of dio_bio->bi_status after this are fully ordered.
7857 if (!refcount_dec_and_test(&dip
->refs
))
7860 if (btrfs_op(dip
->dio_bio
) == BTRFS_MAP_WRITE
) {
7861 __endio_write_update_ordered(BTRFS_I(dip
->inode
),
7862 dip
->logical_offset
,
7864 !dip
->dio_bio
->bi_status
);
7866 unlock_extent(&BTRFS_I(dip
->inode
)->io_tree
,
7867 dip
->logical_offset
,
7868 dip
->logical_offset
+ dip
->bytes
- 1);
7871 bio_endio(dip
->dio_bio
);
7875 static blk_status_t
submit_dio_repair_bio(struct inode
*inode
, struct bio
*bio
,
7877 unsigned long bio_flags
)
7879 struct btrfs_dio_private
*dip
= bio
->bi_private
;
7880 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7883 BUG_ON(bio_op(bio
) == REQ_OP_WRITE
);
7885 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
7889 refcount_inc(&dip
->refs
);
7890 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
);
7892 refcount_dec(&dip
->refs
);
7896 static blk_status_t
btrfs_check_read_dio_bio(struct inode
*inode
,
7897 struct btrfs_io_bio
*io_bio
,
7898 const bool uptodate
)
7900 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
7901 const u32 sectorsize
= fs_info
->sectorsize
;
7902 struct extent_io_tree
*failure_tree
= &BTRFS_I(inode
)->io_failure_tree
;
7903 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7904 const bool csum
= !(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
);
7905 struct bio_vec bvec
;
7906 struct bvec_iter iter
;
7907 u64 start
= io_bio
->logical
;
7909 blk_status_t err
= BLK_STS_OK
;
7911 __bio_for_each_segment(bvec
, &io_bio
->bio
, iter
, io_bio
->iter
) {
7912 unsigned int i
, nr_sectors
, pgoff
;
7914 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
.bv_len
);
7915 pgoff
= bvec
.bv_offset
;
7916 for (i
= 0; i
< nr_sectors
; i
++) {
7917 ASSERT(pgoff
< PAGE_SIZE
);
7919 (!csum
|| !check_data_csum(inode
, io_bio
,
7920 bio_offset
, bvec
.bv_page
,
7922 clean_io_failure(fs_info
, failure_tree
, io_tree
,
7923 start
, bvec
.bv_page
,
7924 btrfs_ino(BTRFS_I(inode
)),
7927 blk_status_t status
;
7929 ASSERT((start
- io_bio
->logical
) < UINT_MAX
);
7930 status
= btrfs_submit_read_repair(inode
,
7932 start
- io_bio
->logical
,
7933 bvec
.bv_page
, pgoff
,
7935 start
+ sectorsize
- 1,
7937 submit_dio_repair_bio
);
7941 start
+= sectorsize
;
7942 ASSERT(bio_offset
+ sectorsize
> bio_offset
);
7943 bio_offset
+= sectorsize
;
7944 pgoff
+= sectorsize
;
7950 static void __endio_write_update_ordered(struct btrfs_inode
*inode
,
7951 const u64 offset
, const u64 bytes
,
7952 const bool uptodate
)
7954 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
7955 struct btrfs_ordered_extent
*ordered
= NULL
;
7956 struct btrfs_workqueue
*wq
;
7957 u64 ordered_offset
= offset
;
7958 u64 ordered_bytes
= bytes
;
7961 if (btrfs_is_free_space_inode(inode
))
7962 wq
= fs_info
->endio_freespace_worker
;
7964 wq
= fs_info
->endio_write_workers
;
7966 while (ordered_offset
< offset
+ bytes
) {
7967 last_offset
= ordered_offset
;
7968 if (btrfs_dec_test_first_ordered_pending(inode
, &ordered
,
7972 btrfs_init_work(&ordered
->work
, finish_ordered_fn
, NULL
,
7974 btrfs_queue_work(wq
, &ordered
->work
);
7977 /* No ordered extent found in the range, exit */
7978 if (ordered_offset
== last_offset
)
7981 * Our bio might span multiple ordered extents. In this case
7982 * we keep going until we have accounted the whole dio.
7984 if (ordered_offset
< offset
+ bytes
) {
7985 ordered_bytes
= offset
+ bytes
- ordered_offset
;
7991 static blk_status_t
btrfs_submit_bio_start_direct_io(struct inode
*inode
,
7993 u64 dio_file_offset
)
7995 return btrfs_csum_one_bio(BTRFS_I(inode
), bio
, dio_file_offset
, 1);
7998 static void btrfs_end_dio_bio(struct bio
*bio
)
8000 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8001 blk_status_t err
= bio
->bi_status
;
8004 btrfs_warn(BTRFS_I(dip
->inode
)->root
->fs_info
,
8005 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8006 btrfs_ino(BTRFS_I(dip
->inode
)), bio_op(bio
),
8007 bio
->bi_opf
, bio
->bi_iter
.bi_sector
,
8008 bio
->bi_iter
.bi_size
, err
);
8010 if (bio_op(bio
) == REQ_OP_READ
) {
8011 err
= btrfs_check_read_dio_bio(dip
->inode
, btrfs_io_bio(bio
),
8016 dip
->dio_bio
->bi_status
= err
;
8018 btrfs_record_physical_zoned(dip
->inode
, dip
->logical_offset
, bio
);
8021 btrfs_dio_private_put(dip
);
8024 static inline blk_status_t
btrfs_submit_dio_bio(struct bio
*bio
,
8025 struct inode
*inode
, u64 file_offset
, int async_submit
)
8027 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8028 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8029 bool write
= btrfs_op(bio
) == BTRFS_MAP_WRITE
;
8032 /* Check btrfs_submit_bio_hook() for rules about async submit. */
8034 async_submit
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
8037 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
8042 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
8045 if (write
&& async_submit
) {
8046 ret
= btrfs_wq_submit_bio(inode
, bio
, 0, 0, file_offset
,
8047 btrfs_submit_bio_start_direct_io
);
8051 * If we aren't doing async submit, calculate the csum of the
8054 ret
= btrfs_csum_one_bio(BTRFS_I(inode
), bio
, file_offset
, 1);
8060 csum_offset
= file_offset
- dip
->logical_offset
;
8061 csum_offset
>>= fs_info
->sectorsize_bits
;
8062 csum_offset
*= fs_info
->csum_size
;
8063 btrfs_io_bio(bio
)->csum
= dip
->csums
+ csum_offset
;
8066 ret
= btrfs_map_bio(fs_info
, bio
, 0);
8072 * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked
8073 * or ordered extents whether or not we submit any bios.
8075 static struct btrfs_dio_private
*btrfs_create_dio_private(struct bio
*dio_bio
,
8076 struct inode
*inode
,
8079 const bool write
= (btrfs_op(dio_bio
) == BTRFS_MAP_WRITE
);
8080 const bool csum
= !(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
);
8082 struct btrfs_dio_private
*dip
;
8084 dip_size
= sizeof(*dip
);
8085 if (!write
&& csum
) {
8086 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8089 nblocks
= dio_bio
->bi_iter
.bi_size
>> fs_info
->sectorsize_bits
;
8090 dip_size
+= fs_info
->csum_size
* nblocks
;
8093 dip
= kzalloc(dip_size
, GFP_NOFS
);
8098 dip
->logical_offset
= file_offset
;
8099 dip
->bytes
= dio_bio
->bi_iter
.bi_size
;
8100 dip
->disk_bytenr
= dio_bio
->bi_iter
.bi_sector
<< 9;
8101 dip
->dio_bio
= dio_bio
;
8102 refcount_set(&dip
->refs
, 1);
8106 static blk_qc_t
btrfs_submit_direct(struct inode
*inode
, struct iomap
*iomap
,
8107 struct bio
*dio_bio
, loff_t file_offset
)
8109 const bool write
= (btrfs_op(dio_bio
) == BTRFS_MAP_WRITE
);
8110 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8111 const bool raid56
= (btrfs_data_alloc_profile(fs_info
) &
8112 BTRFS_BLOCK_GROUP_RAID56_MASK
);
8113 struct btrfs_dio_private
*dip
;
8116 int async_submit
= 0;
8118 int clone_offset
= 0;
8122 blk_status_t status
;
8123 struct btrfs_io_geometry geom
;
8124 struct btrfs_dio_data
*dio_data
= iomap
->private;
8125 struct extent_map
*em
= NULL
;
8127 dip
= btrfs_create_dio_private(dio_bio
, inode
, file_offset
);
8130 unlock_extent(&BTRFS_I(inode
)->io_tree
, file_offset
,
8131 file_offset
+ dio_bio
->bi_iter
.bi_size
- 1);
8133 dio_bio
->bi_status
= BLK_STS_RESOURCE
;
8135 return BLK_QC_T_NONE
;
8140 * Load the csums up front to reduce csum tree searches and
8141 * contention when submitting bios.
8143 * If we have csums disabled this will do nothing.
8145 status
= btrfs_lookup_bio_sums(inode
, dio_bio
, dip
->csums
);
8146 if (status
!= BLK_STS_OK
)
8150 start_sector
= dio_bio
->bi_iter
.bi_sector
;
8151 submit_len
= dio_bio
->bi_iter
.bi_size
;
8154 logical
= start_sector
<< 9;
8155 em
= btrfs_get_chunk_map(fs_info
, logical
, submit_len
);
8157 status
= errno_to_blk_status(PTR_ERR(em
));
8161 ret
= btrfs_get_io_geometry(fs_info
, em
, btrfs_op(dio_bio
),
8162 logical
, submit_len
, &geom
);
8164 status
= errno_to_blk_status(ret
);
8167 ASSERT(geom
.len
<= INT_MAX
);
8169 clone_len
= min_t(int, submit_len
, geom
.len
);
8172 * This will never fail as it's passing GPF_NOFS and
8173 * the allocation is backed by btrfs_bioset.
8175 bio
= btrfs_bio_clone_partial(dio_bio
, clone_offset
, clone_len
);
8176 bio
->bi_private
= dip
;
8177 bio
->bi_end_io
= btrfs_end_dio_bio
;
8178 btrfs_io_bio(bio
)->logical
= file_offset
;
8180 if (bio_op(bio
) == REQ_OP_ZONE_APPEND
) {
8181 status
= extract_ordered_extent(BTRFS_I(inode
), bio
,
8189 ASSERT(submit_len
>= clone_len
);
8190 submit_len
-= clone_len
;
8193 * Increase the count before we submit the bio so we know
8194 * the end IO handler won't happen before we increase the
8195 * count. Otherwise, the dip might get freed before we're
8196 * done setting it up.
8198 * We transfer the initial reference to the last bio, so we
8199 * don't need to increment the reference count for the last one.
8201 if (submit_len
> 0) {
8202 refcount_inc(&dip
->refs
);
8204 * If we are submitting more than one bio, submit them
8205 * all asynchronously. The exception is RAID 5 or 6, as
8206 * asynchronous checksums make it difficult to collect
8207 * full stripe writes.
8213 status
= btrfs_submit_dio_bio(bio
, inode
, file_offset
,
8218 refcount_dec(&dip
->refs
);
8222 dio_data
->submitted
+= clone_len
;
8223 clone_offset
+= clone_len
;
8224 start_sector
+= clone_len
>> 9;
8225 file_offset
+= clone_len
;
8227 free_extent_map(em
);
8228 } while (submit_len
> 0);
8229 return BLK_QC_T_NONE
;
8232 free_extent_map(em
);
8234 dip
->dio_bio
->bi_status
= status
;
8235 btrfs_dio_private_put(dip
);
8237 return BLK_QC_T_NONE
;
8240 const struct iomap_ops btrfs_dio_iomap_ops
= {
8241 .iomap_begin
= btrfs_dio_iomap_begin
,
8242 .iomap_end
= btrfs_dio_iomap_end
,
8245 const struct iomap_dio_ops btrfs_dio_ops
= {
8246 .submit_io
= btrfs_submit_direct
,
8249 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
8254 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
8258 return extent_fiemap(BTRFS_I(inode
), fieinfo
, start
, len
);
8261 int btrfs_readpage(struct file
*file
, struct page
*page
)
8263 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
8264 u64 start
= page_offset(page
);
8265 u64 end
= start
+ PAGE_SIZE
- 1;
8266 unsigned long bio_flags
= 0;
8267 struct bio
*bio
= NULL
;
8270 btrfs_lock_and_flush_ordered_range(inode
, start
, end
, NULL
);
8272 ret
= btrfs_do_readpage(page
, NULL
, &bio
, &bio_flags
, 0, NULL
);
8274 ret
= submit_one_bio(bio
, 0, bio_flags
);
8278 static int btrfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
8280 struct inode
*inode
= page
->mapping
->host
;
8283 if (current
->flags
& PF_MEMALLOC
) {
8284 redirty_page_for_writepage(wbc
, page
);
8290 * If we are under memory pressure we will call this directly from the
8291 * VM, we need to make sure we have the inode referenced for the ordered
8292 * extent. If not just return like we didn't do anything.
8294 if (!igrab(inode
)) {
8295 redirty_page_for_writepage(wbc
, page
);
8296 return AOP_WRITEPAGE_ACTIVATE
;
8298 ret
= extent_write_full_page(page
, wbc
);
8299 btrfs_add_delayed_iput(inode
);
8303 static int btrfs_writepages(struct address_space
*mapping
,
8304 struct writeback_control
*wbc
)
8306 return extent_writepages(mapping
, wbc
);
8309 static void btrfs_readahead(struct readahead_control
*rac
)
8311 extent_readahead(rac
);
8314 static int __btrfs_releasepage(struct page
*page
, gfp_t gfp_flags
)
8316 int ret
= try_release_extent_mapping(page
, gfp_flags
);
8318 clear_page_extent_mapped(page
);
8322 static int btrfs_releasepage(struct page
*page
, gfp_t gfp_flags
)
8324 if (PageWriteback(page
) || PageDirty(page
))
8326 return __btrfs_releasepage(page
, gfp_flags
);
8329 #ifdef CONFIG_MIGRATION
8330 static int btrfs_migratepage(struct address_space
*mapping
,
8331 struct page
*newpage
, struct page
*page
,
8332 enum migrate_mode mode
)
8336 ret
= migrate_page_move_mapping(mapping
, newpage
, page
, 0);
8337 if (ret
!= MIGRATEPAGE_SUCCESS
)
8340 if (page_has_private(page
))
8341 attach_page_private(newpage
, detach_page_private(page
));
8343 if (PagePrivate2(page
)) {
8344 ClearPagePrivate2(page
);
8345 SetPagePrivate2(newpage
);
8348 if (mode
!= MIGRATE_SYNC_NO_COPY
)
8349 migrate_page_copy(newpage
, page
);
8351 migrate_page_states(newpage
, page
);
8352 return MIGRATEPAGE_SUCCESS
;
8356 static void btrfs_invalidatepage(struct page
*page
, unsigned int offset
,
8357 unsigned int length
)
8359 struct btrfs_inode
*inode
= BTRFS_I(page
->mapping
->host
);
8360 struct extent_io_tree
*tree
= &inode
->io_tree
;
8361 struct btrfs_ordered_extent
*ordered
;
8362 struct extent_state
*cached_state
= NULL
;
8363 u64 page_start
= page_offset(page
);
8364 u64 page_end
= page_start
+ PAGE_SIZE
- 1;
8367 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
8368 bool found_ordered
= false;
8369 bool completed_ordered
= false;
8372 * we have the page locked, so new writeback can't start,
8373 * and the dirty bit won't be cleared while we are here.
8375 * Wait for IO on this page so that we can safely clear
8376 * the PagePrivate2 bit and do ordered accounting
8378 wait_on_page_writeback(page
);
8381 btrfs_releasepage(page
, GFP_NOFS
);
8385 if (!inode_evicting
)
8386 lock_extent_bits(tree
, page_start
, page_end
, &cached_state
);
8390 ordered
= btrfs_lookup_ordered_range(inode
, start
, page_end
- start
+ 1);
8392 found_ordered
= true;
8394 ordered
->file_offset
+ ordered
->num_bytes
- 1);
8396 * IO on this page will never be started, so we need to account
8397 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8398 * here, must leave that up for the ordered extent completion.
8400 if (!inode_evicting
)
8401 clear_extent_bit(tree
, start
, end
,
8403 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8404 EXTENT_DEFRAG
, 1, 0, &cached_state
);
8406 * whoever cleared the private bit is responsible
8407 * for the finish_ordered_io
8409 if (TestClearPagePrivate2(page
)) {
8410 spin_lock_irq(&inode
->ordered_tree
.lock
);
8411 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8412 ordered
->truncated_len
= min(ordered
->truncated_len
,
8413 start
- ordered
->file_offset
);
8414 spin_unlock_irq(&inode
->ordered_tree
.lock
);
8416 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8418 end
- start
+ 1, 1)) {
8419 btrfs_finish_ordered_io(ordered
);
8420 completed_ordered
= true;
8423 btrfs_put_ordered_extent(ordered
);
8424 if (!inode_evicting
) {
8425 cached_state
= NULL
;
8426 lock_extent_bits(tree
, start
, end
,
8431 if (start
< page_end
)
8436 * Qgroup reserved space handler
8437 * Page here will be either
8438 * 1) Already written to disk or ordered extent already submitted
8439 * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
8440 * Qgroup will be handled by its qgroup_record then.
8441 * btrfs_qgroup_free_data() call will do nothing here.
8443 * 2) Not written to disk yet
8444 * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
8445 * bit of its io_tree, and free the qgroup reserved data space.
8446 * Since the IO will never happen for this page.
8448 btrfs_qgroup_free_data(inode
, NULL
, page_start
, PAGE_SIZE
);
8449 if (!inode_evicting
) {
8453 * If there's an ordered extent for this range and we have not
8454 * finished it ourselves, we must leave EXTENT_DELALLOC_NEW set
8455 * in the range for the ordered extent completion. We must also
8456 * not delete the range, otherwise we would lose that bit (and
8457 * any other bits set in the range). Make sure EXTENT_UPTODATE
8458 * is cleared if we don't delete, otherwise it can lead to
8459 * corruptions if the i_size is extented later.
8461 if (found_ordered
&& !completed_ordered
)
8463 clear_extent_bit(tree
, page_start
, page_end
, EXTENT_LOCKED
|
8464 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
8465 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 1,
8466 delete, &cached_state
);
8468 __btrfs_releasepage(page
, GFP_NOFS
);
8471 ClearPageChecked(page
);
8472 clear_page_extent_mapped(page
);
8476 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8477 * called from a page fault handler when a page is first dirtied. Hence we must
8478 * be careful to check for EOF conditions here. We set the page up correctly
8479 * for a written page which means we get ENOSPC checking when writing into
8480 * holes and correct delalloc and unwritten extent mapping on filesystems that
8481 * support these features.
8483 * We are not allowed to take the i_mutex here so we have to play games to
8484 * protect against truncate races as the page could now be beyond EOF. Because
8485 * truncate_setsize() writes the inode size before removing pages, once we have
8486 * the page lock we can determine safely if the page is beyond EOF. If it is not
8487 * beyond EOF, then the page is guaranteed safe against truncation until we
8490 vm_fault_t
btrfs_page_mkwrite(struct vm_fault
*vmf
)
8492 struct page
*page
= vmf
->page
;
8493 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8494 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8495 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8496 struct btrfs_ordered_extent
*ordered
;
8497 struct extent_state
*cached_state
= NULL
;
8498 struct extent_changeset
*data_reserved
= NULL
;
8499 unsigned long zero_start
;
8509 reserved_space
= PAGE_SIZE
;
8511 sb_start_pagefault(inode
->i_sb
);
8512 page_start
= page_offset(page
);
8513 page_end
= page_start
+ PAGE_SIZE
- 1;
8517 * Reserving delalloc space after obtaining the page lock can lead to
8518 * deadlock. For example, if a dirty page is locked by this function
8519 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8520 * dirty page write out, then the btrfs_writepage() function could
8521 * end up waiting indefinitely to get a lock on the page currently
8522 * being processed by btrfs_page_mkwrite() function.
8524 ret2
= btrfs_delalloc_reserve_space(BTRFS_I(inode
), &data_reserved
,
8525 page_start
, reserved_space
);
8527 ret2
= file_update_time(vmf
->vma
->vm_file
);
8531 ret
= vmf_error(ret2
);
8537 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8539 down_read(&BTRFS_I(inode
)->i_mmap_lock
);
8541 size
= i_size_read(inode
);
8543 if ((page
->mapping
!= inode
->i_mapping
) ||
8544 (page_start
>= size
)) {
8545 /* page got truncated out from underneath us */
8548 wait_on_page_writeback(page
);
8550 lock_extent_bits(io_tree
, page_start
, page_end
, &cached_state
);
8551 ret2
= set_page_extent_mapped(page
);
8553 ret
= vmf_error(ret2
);
8554 unlock_extent_cached(io_tree
, page_start
, page_end
, &cached_state
);
8559 * we can't set the delalloc bits if there are pending ordered
8560 * extents. Drop our locks and wait for them to finish
8562 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8565 unlock_extent_cached(io_tree
, page_start
, page_end
,
8568 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8569 btrfs_start_ordered_extent(ordered
, 1);
8570 btrfs_put_ordered_extent(ordered
);
8574 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8575 reserved_space
= round_up(size
- page_start
,
8576 fs_info
->sectorsize
);
8577 if (reserved_space
< PAGE_SIZE
) {
8578 end
= page_start
+ reserved_space
- 1;
8579 btrfs_delalloc_release_space(BTRFS_I(inode
),
8580 data_reserved
, page_start
,
8581 PAGE_SIZE
- reserved_space
, true);
8586 * page_mkwrite gets called when the page is firstly dirtied after it's
8587 * faulted in, but write(2) could also dirty a page and set delalloc
8588 * bits, thus in this case for space account reason, we still need to
8589 * clear any delalloc bits within this page range since we have to
8590 * reserve data&meta space before lock_page() (see above comments).
8592 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
8593 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8594 EXTENT_DEFRAG
, 0, 0, &cached_state
);
8596 ret2
= btrfs_set_extent_delalloc(BTRFS_I(inode
), page_start
, end
, 0,
8599 unlock_extent_cached(io_tree
, page_start
, page_end
,
8601 ret
= VM_FAULT_SIGBUS
;
8605 /* page is wholly or partially inside EOF */
8606 if (page_start
+ PAGE_SIZE
> size
)
8607 zero_start
= offset_in_page(size
);
8609 zero_start
= PAGE_SIZE
;
8611 if (zero_start
!= PAGE_SIZE
) {
8612 memzero_page(page
, zero_start
, PAGE_SIZE
- zero_start
);
8613 flush_dcache_page(page
);
8615 ClearPageChecked(page
);
8616 set_page_dirty(page
);
8617 SetPageUptodate(page
);
8619 btrfs_set_inode_last_sub_trans(BTRFS_I(inode
));
8621 unlock_extent_cached(io_tree
, page_start
, page_end
, &cached_state
);
8622 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8624 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8625 sb_end_pagefault(inode
->i_sb
);
8626 extent_changeset_free(data_reserved
);
8627 return VM_FAULT_LOCKED
;
8631 up_read(&BTRFS_I(inode
)->i_mmap_lock
);
8633 btrfs_delalloc_release_extents(BTRFS_I(inode
), PAGE_SIZE
);
8634 btrfs_delalloc_release_space(BTRFS_I(inode
), data_reserved
, page_start
,
8635 reserved_space
, (ret
!= 0));
8637 sb_end_pagefault(inode
->i_sb
);
8638 extent_changeset_free(data_reserved
);
8642 static int btrfs_truncate(struct inode
*inode
, bool skip_writeback
)
8644 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8645 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8646 struct btrfs_block_rsv
*rsv
;
8648 struct btrfs_trans_handle
*trans
;
8649 u64 mask
= fs_info
->sectorsize
- 1;
8650 u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
8652 if (!skip_writeback
) {
8653 ret
= btrfs_wait_ordered_range(inode
, inode
->i_size
& (~mask
),
8660 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
8661 * things going on here:
8663 * 1) We need to reserve space to update our inode.
8665 * 2) We need to have something to cache all the space that is going to
8666 * be free'd up by the truncate operation, but also have some slack
8667 * space reserved in case it uses space during the truncate (thank you
8668 * very much snapshotting).
8670 * And we need these to be separate. The fact is we can use a lot of
8671 * space doing the truncate, and we have no earthly idea how much space
8672 * we will use, so we need the truncate reservation to be separate so it
8673 * doesn't end up using space reserved for updating the inode. We also
8674 * need to be able to stop the transaction and start a new one, which
8675 * means we need to be able to update the inode several times, and we
8676 * have no idea of knowing how many times that will be, so we can't just
8677 * reserve 1 item for the entirety of the operation, so that has to be
8678 * done separately as well.
8680 * So that leaves us with
8682 * 1) rsv - for the truncate reservation, which we will steal from the
8683 * transaction reservation.
8684 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8685 * updating the inode.
8687 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
8690 rsv
->size
= min_size
;
8694 * 1 for the truncate slack space
8695 * 1 for updating the inode.
8697 trans
= btrfs_start_transaction(root
, 2);
8698 if (IS_ERR(trans
)) {
8699 ret
= PTR_ERR(trans
);
8703 /* Migrate the slack space for the truncate to our reserve */
8704 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
8709 * So if we truncate and then write and fsync we normally would just
8710 * write the extents that changed, which is a problem if we need to
8711 * first truncate that entire inode. So set this flag so we write out
8712 * all of the extents in the inode to the sync log so we're completely
8715 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &BTRFS_I(inode
)->runtime_flags
);
8716 trans
->block_rsv
= rsv
;
8719 ret
= btrfs_truncate_inode_items(trans
, root
, BTRFS_I(inode
),
8721 BTRFS_EXTENT_DATA_KEY
);
8722 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8723 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
8726 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
8730 btrfs_end_transaction(trans
);
8731 btrfs_btree_balance_dirty(fs_info
);
8733 trans
= btrfs_start_transaction(root
, 2);
8734 if (IS_ERR(trans
)) {
8735 ret
= PTR_ERR(trans
);
8740 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
8741 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
8742 rsv
, min_size
, false);
8743 BUG_ON(ret
); /* shouldn't happen */
8744 trans
->block_rsv
= rsv
;
8748 * We can't call btrfs_truncate_block inside a trans handle as we could
8749 * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
8750 * we've truncated everything except the last little bit, and can do
8751 * btrfs_truncate_block and then update the disk_i_size.
8753 if (ret
== NEED_TRUNCATE_BLOCK
) {
8754 btrfs_end_transaction(trans
);
8755 btrfs_btree_balance_dirty(fs_info
);
8757 ret
= btrfs_truncate_block(BTRFS_I(inode
), inode
->i_size
, 0, 0);
8760 trans
= btrfs_start_transaction(root
, 1);
8761 if (IS_ERR(trans
)) {
8762 ret
= PTR_ERR(trans
);
8765 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
8771 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
8772 ret2
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
8776 ret2
= btrfs_end_transaction(trans
);
8779 btrfs_btree_balance_dirty(fs_info
);
8782 btrfs_free_block_rsv(fs_info
, rsv
);
8788 * create a new subvolume directory/inode (helper for the ioctl).
8790 int btrfs_create_subvol_root(struct btrfs_trans_handle
*trans
,
8791 struct btrfs_root
*new_root
,
8792 struct btrfs_root
*parent_root
)
8794 struct inode
*inode
;
8799 err
= btrfs_get_free_objectid(new_root
, &ino
);
8803 inode
= btrfs_new_inode(trans
, new_root
, NULL
, "..", 2, ino
, ino
,
8804 S_IFDIR
| (~current_umask() & S_IRWXUGO
),
8807 return PTR_ERR(inode
);
8808 inode
->i_op
= &btrfs_dir_inode_operations
;
8809 inode
->i_fop
= &btrfs_dir_file_operations
;
8811 set_nlink(inode
, 1);
8812 btrfs_i_size_write(BTRFS_I(inode
), 0);
8813 unlock_new_inode(inode
);
8815 err
= btrfs_subvol_inherit_props(trans
, new_root
, parent_root
);
8817 btrfs_err(new_root
->fs_info
,
8818 "error inheriting subvolume %llu properties: %d",
8819 new_root
->root_key
.objectid
, err
);
8821 err
= btrfs_update_inode(trans
, new_root
, BTRFS_I(inode
));
8827 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
8829 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
8830 struct btrfs_inode
*ei
;
8831 struct inode
*inode
;
8833 ei
= kmem_cache_alloc(btrfs_inode_cachep
, GFP_KERNEL
);
8840 ei
->last_sub_trans
= 0;
8841 ei
->logged_trans
= 0;
8842 ei
->delalloc_bytes
= 0;
8843 ei
->new_delalloc_bytes
= 0;
8844 ei
->defrag_bytes
= 0;
8845 ei
->disk_i_size
= 0;
8848 ei
->index_cnt
= (u64
)-1;
8850 ei
->last_unlink_trans
= 0;
8851 ei
->last_reflink_trans
= 0;
8852 ei
->last_log_commit
= 0;
8854 spin_lock_init(&ei
->lock
);
8855 ei
->outstanding_extents
= 0;
8856 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
8857 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
8858 BTRFS_BLOCK_RSV_DELALLOC
);
8859 ei
->runtime_flags
= 0;
8860 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
8861 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
8863 ei
->delayed_node
= NULL
;
8865 ei
->i_otime
.tv_sec
= 0;
8866 ei
->i_otime
.tv_nsec
= 0;
8868 inode
= &ei
->vfs_inode
;
8869 extent_map_tree_init(&ei
->extent_tree
);
8870 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
, inode
);
8871 extent_io_tree_init(fs_info
, &ei
->io_failure_tree
,
8872 IO_TREE_INODE_IO_FAILURE
, inode
);
8873 extent_io_tree_init(fs_info
, &ei
->file_extent_tree
,
8874 IO_TREE_INODE_FILE_EXTENT
, inode
);
8875 ei
->io_tree
.track_uptodate
= true;
8876 ei
->io_failure_tree
.track_uptodate
= true;
8877 atomic_set(&ei
->sync_writers
, 0);
8878 mutex_init(&ei
->log_mutex
);
8879 btrfs_ordered_inode_tree_init(&ei
->ordered_tree
);
8880 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
8881 INIT_LIST_HEAD(&ei
->delayed_iput
);
8882 RB_CLEAR_NODE(&ei
->rb_node
);
8883 init_rwsem(&ei
->i_mmap_lock
);
8888 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8889 void btrfs_test_destroy_inode(struct inode
*inode
)
8891 btrfs_drop_extent_cache(BTRFS_I(inode
), 0, (u64
)-1, 0);
8892 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8896 void btrfs_free_inode(struct inode
*inode
)
8898 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
8901 void btrfs_destroy_inode(struct inode
*vfs_inode
)
8903 struct btrfs_ordered_extent
*ordered
;
8904 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
8905 struct btrfs_root
*root
= inode
->root
;
8907 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
8908 WARN_ON(vfs_inode
->i_data
.nrpages
);
8909 WARN_ON(inode
->block_rsv
.reserved
);
8910 WARN_ON(inode
->block_rsv
.size
);
8911 WARN_ON(inode
->outstanding_extents
);
8912 WARN_ON(inode
->delalloc_bytes
);
8913 WARN_ON(inode
->new_delalloc_bytes
);
8914 WARN_ON(inode
->csum_bytes
);
8915 WARN_ON(inode
->defrag_bytes
);
8918 * This can happen where we create an inode, but somebody else also
8919 * created the same inode and we need to destroy the one we already
8926 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
8930 btrfs_err(root
->fs_info
,
8931 "found ordered extent %llu %llu on inode cleanup",
8932 ordered
->file_offset
, ordered
->num_bytes
);
8933 btrfs_remove_ordered_extent(inode
, ordered
);
8934 btrfs_put_ordered_extent(ordered
);
8935 btrfs_put_ordered_extent(ordered
);
8938 btrfs_qgroup_check_reserved_leak(inode
);
8939 inode_tree_del(inode
);
8940 btrfs_drop_extent_cache(inode
, 0, (u64
)-1, 0);
8941 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
8942 btrfs_put_root(inode
->root
);
8945 int btrfs_drop_inode(struct inode
*inode
)
8947 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8952 /* the snap/subvol tree is on deleting */
8953 if (btrfs_root_refs(&root
->root_item
) == 0)
8956 return generic_drop_inode(inode
);
8959 static void init_once(void *foo
)
8961 struct btrfs_inode
*ei
= (struct btrfs_inode
*) foo
;
8963 inode_init_once(&ei
->vfs_inode
);
8966 void __cold
btrfs_destroy_cachep(void)
8969 * Make sure all delayed rcu free inodes are flushed before we
8973 kmem_cache_destroy(btrfs_inode_cachep
);
8974 kmem_cache_destroy(btrfs_trans_handle_cachep
);
8975 kmem_cache_destroy(btrfs_path_cachep
);
8976 kmem_cache_destroy(btrfs_free_space_cachep
);
8977 kmem_cache_destroy(btrfs_free_space_bitmap_cachep
);
8980 int __init
btrfs_init_cachep(void)
8982 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
8983 sizeof(struct btrfs_inode
), 0,
8984 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
8986 if (!btrfs_inode_cachep
)
8989 btrfs_trans_handle_cachep
= kmem_cache_create("btrfs_trans_handle",
8990 sizeof(struct btrfs_trans_handle
), 0,
8991 SLAB_TEMPORARY
| SLAB_MEM_SPREAD
, NULL
);
8992 if (!btrfs_trans_handle_cachep
)
8995 btrfs_path_cachep
= kmem_cache_create("btrfs_path",
8996 sizeof(struct btrfs_path
), 0,
8997 SLAB_MEM_SPREAD
, NULL
);
8998 if (!btrfs_path_cachep
)
9001 btrfs_free_space_cachep
= kmem_cache_create("btrfs_free_space",
9002 sizeof(struct btrfs_free_space
), 0,
9003 SLAB_MEM_SPREAD
, NULL
);
9004 if (!btrfs_free_space_cachep
)
9007 btrfs_free_space_bitmap_cachep
= kmem_cache_create("btrfs_free_space_bitmap",
9008 PAGE_SIZE
, PAGE_SIZE
,
9009 SLAB_MEM_SPREAD
, NULL
);
9010 if (!btrfs_free_space_bitmap_cachep
)
9015 btrfs_destroy_cachep();
9019 static int btrfs_getattr(struct user_namespace
*mnt_userns
,
9020 const struct path
*path
, struct kstat
*stat
,
9021 u32 request_mask
, unsigned int flags
)
9025 struct inode
*inode
= d_inode(path
->dentry
);
9026 u32 blocksize
= inode
->i_sb
->s_blocksize
;
9027 u32 bi_flags
= BTRFS_I(inode
)->flags
;
9029 stat
->result_mask
|= STATX_BTIME
;
9030 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime
.tv_sec
;
9031 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime
.tv_nsec
;
9032 if (bi_flags
& BTRFS_INODE_APPEND
)
9033 stat
->attributes
|= STATX_ATTR_APPEND
;
9034 if (bi_flags
& BTRFS_INODE_COMPRESS
)
9035 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
9036 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
9037 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
9038 if (bi_flags
& BTRFS_INODE_NODUMP
)
9039 stat
->attributes
|= STATX_ATTR_NODUMP
;
9041 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
9042 STATX_ATTR_COMPRESSED
|
9043 STATX_ATTR_IMMUTABLE
|
9046 generic_fillattr(&init_user_ns
, inode
, stat
);
9047 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
9049 spin_lock(&BTRFS_I(inode
)->lock
);
9050 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
9051 inode_bytes
= inode_get_bytes(inode
);
9052 spin_unlock(&BTRFS_I(inode
)->lock
);
9053 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
9054 ALIGN(delalloc_bytes
, blocksize
)) >> 9;
9058 static int btrfs_rename_exchange(struct inode
*old_dir
,
9059 struct dentry
*old_dentry
,
9060 struct inode
*new_dir
,
9061 struct dentry
*new_dentry
)
9063 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9064 struct btrfs_trans_handle
*trans
;
9065 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9066 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9067 struct inode
*new_inode
= new_dentry
->d_inode
;
9068 struct inode
*old_inode
= old_dentry
->d_inode
;
9069 struct timespec64 ctime
= current_time(old_inode
);
9070 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9071 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
9076 bool root_log_pinned
= false;
9077 bool dest_log_pinned
= false;
9079 /* we only allow rename subvolume link between subvolumes */
9080 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9083 /* close the race window with snapshot create/destroy ioctl */
9084 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
9085 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9086 down_read(&fs_info
->subvol_sem
);
9089 * We want to reserve the absolute worst case amount of items. So if
9090 * both inodes are subvols and we need to unlink them then that would
9091 * require 4 item modifications, but if they are both normal inodes it
9092 * would require 5 item modifications, so we'll assume their normal
9093 * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9094 * should cover the worst case number of items we'll modify.
9096 trans
= btrfs_start_transaction(root
, 12);
9097 if (IS_ERR(trans
)) {
9098 ret
= PTR_ERR(trans
);
9103 ret
= btrfs_record_root_in_trans(trans
, dest
);
9109 * We need to find a free sequence number both in the source and
9110 * in the destination directory for the exchange.
9112 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
9115 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
9119 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9120 BTRFS_I(new_inode
)->dir_index
= 0ULL;
9122 /* Reference for the source. */
9123 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9124 /* force full log commit if subvolume involved. */
9125 btrfs_set_log_full_commit(trans
);
9127 btrfs_pin_log_trans(root
);
9128 root_log_pinned
= true;
9129 ret
= btrfs_insert_inode_ref(trans
, dest
,
9130 new_dentry
->d_name
.name
,
9131 new_dentry
->d_name
.len
,
9133 btrfs_ino(BTRFS_I(new_dir
)),
9139 /* And now for the dest. */
9140 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9141 /* force full log commit if subvolume involved. */
9142 btrfs_set_log_full_commit(trans
);
9144 btrfs_pin_log_trans(dest
);
9145 dest_log_pinned
= true;
9146 ret
= btrfs_insert_inode_ref(trans
, root
,
9147 old_dentry
->d_name
.name
,
9148 old_dentry
->d_name
.len
,
9150 btrfs_ino(BTRFS_I(old_dir
)),
9156 /* Update inode version and ctime/mtime. */
9157 inode_inc_iversion(old_dir
);
9158 inode_inc_iversion(new_dir
);
9159 inode_inc_iversion(old_inode
);
9160 inode_inc_iversion(new_inode
);
9161 old_dir
->i_ctime
= old_dir
->i_mtime
= ctime
;
9162 new_dir
->i_ctime
= new_dir
->i_mtime
= ctime
;
9163 old_inode
->i_ctime
= ctime
;
9164 new_inode
->i_ctime
= ctime
;
9166 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
9167 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9168 BTRFS_I(old_inode
), 1);
9169 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
9170 BTRFS_I(new_inode
), 1);
9173 /* src is a subvolume */
9174 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9175 ret
= btrfs_unlink_subvol(trans
, old_dir
, old_dentry
);
9176 } else { /* src is an inode */
9177 ret
= __btrfs_unlink_inode(trans
, root
, BTRFS_I(old_dir
),
9178 BTRFS_I(old_dentry
->d_inode
),
9179 old_dentry
->d_name
.name
,
9180 old_dentry
->d_name
.len
);
9182 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9185 btrfs_abort_transaction(trans
, ret
);
9189 /* dest is a subvolume */
9190 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9191 ret
= btrfs_unlink_subvol(trans
, new_dir
, new_dentry
);
9192 } else { /* dest is an inode */
9193 ret
= __btrfs_unlink_inode(trans
, dest
, BTRFS_I(new_dir
),
9194 BTRFS_I(new_dentry
->d_inode
),
9195 new_dentry
->d_name
.name
,
9196 new_dentry
->d_name
.len
);
9198 ret
= btrfs_update_inode(trans
, dest
, BTRFS_I(new_inode
));
9201 btrfs_abort_transaction(trans
, ret
);
9205 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9206 new_dentry
->d_name
.name
,
9207 new_dentry
->d_name
.len
, 0, old_idx
);
9209 btrfs_abort_transaction(trans
, ret
);
9213 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
9214 old_dentry
->d_name
.name
,
9215 old_dentry
->d_name
.len
, 0, new_idx
);
9217 btrfs_abort_transaction(trans
, ret
);
9221 if (old_inode
->i_nlink
== 1)
9222 BTRFS_I(old_inode
)->dir_index
= old_idx
;
9223 if (new_inode
->i_nlink
== 1)
9224 BTRFS_I(new_inode
)->dir_index
= new_idx
;
9226 if (root_log_pinned
) {
9227 btrfs_log_new_name(trans
, BTRFS_I(old_inode
), BTRFS_I(old_dir
),
9228 new_dentry
->d_parent
);
9229 btrfs_end_log_trans(root
);
9230 root_log_pinned
= false;
9232 if (dest_log_pinned
) {
9233 btrfs_log_new_name(trans
, BTRFS_I(new_inode
), BTRFS_I(new_dir
),
9234 old_dentry
->d_parent
);
9235 btrfs_end_log_trans(dest
);
9236 dest_log_pinned
= false;
9240 * If we have pinned a log and an error happened, we unpin tasks
9241 * trying to sync the log and force them to fallback to a transaction
9242 * commit if the log currently contains any of the inodes involved in
9243 * this rename operation (to ensure we do not persist a log with an
9244 * inconsistent state for any of these inodes or leading to any
9245 * inconsistencies when replayed). If the transaction was aborted, the
9246 * abortion reason is propagated to userspace when attempting to commit
9247 * the transaction. If the log does not contain any of these inodes, we
9248 * allow the tasks to sync it.
9250 if (ret
&& (root_log_pinned
|| dest_log_pinned
)) {
9251 if (btrfs_inode_in_log(BTRFS_I(old_dir
), fs_info
->generation
) ||
9252 btrfs_inode_in_log(BTRFS_I(new_dir
), fs_info
->generation
) ||
9253 btrfs_inode_in_log(BTRFS_I(old_inode
), fs_info
->generation
) ||
9255 btrfs_inode_in_log(BTRFS_I(new_inode
), fs_info
->generation
)))
9256 btrfs_set_log_full_commit(trans
);
9258 if (root_log_pinned
) {
9259 btrfs_end_log_trans(root
);
9260 root_log_pinned
= false;
9262 if (dest_log_pinned
) {
9263 btrfs_end_log_trans(dest
);
9264 dest_log_pinned
= false;
9267 ret2
= btrfs_end_transaction(trans
);
9268 ret
= ret
? ret
: ret2
;
9270 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
9271 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9272 up_read(&fs_info
->subvol_sem
);
9277 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle
*trans
,
9278 struct btrfs_root
*root
,
9280 struct dentry
*dentry
)
9283 struct inode
*inode
;
9287 ret
= btrfs_get_free_objectid(root
, &objectid
);
9291 inode
= btrfs_new_inode(trans
, root
, dir
,
9292 dentry
->d_name
.name
,
9294 btrfs_ino(BTRFS_I(dir
)),
9296 S_IFCHR
| WHITEOUT_MODE
,
9299 if (IS_ERR(inode
)) {
9300 ret
= PTR_ERR(inode
);
9304 inode
->i_op
= &btrfs_special_inode_operations
;
9305 init_special_inode(inode
, inode
->i_mode
,
9308 ret
= btrfs_init_inode_security(trans
, inode
, dir
,
9313 ret
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
,
9314 BTRFS_I(inode
), 0, index
);
9318 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
9320 unlock_new_inode(inode
);
9322 inode_dec_link_count(inode
);
9328 static int btrfs_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
9329 struct inode
*new_dir
, struct dentry
*new_dentry
,
9332 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9333 struct btrfs_trans_handle
*trans
;
9334 unsigned int trans_num_items
;
9335 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9336 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9337 struct inode
*new_inode
= d_inode(new_dentry
);
9338 struct inode
*old_inode
= d_inode(old_dentry
);
9342 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9343 bool log_pinned
= false;
9345 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
9348 /* we only allow rename subvolume link between subvolumes */
9349 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9352 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
9353 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
9356 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
9357 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
9361 /* check for collisions, even if the name isn't there */
9362 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
,
9363 new_dentry
->d_name
.name
,
9364 new_dentry
->d_name
.len
);
9367 if (ret
== -EEXIST
) {
9369 * eexist without a new_inode */
9370 if (WARN_ON(!new_inode
)) {
9374 /* maybe -EOVERFLOW */
9381 * we're using rename to replace one file with another. Start IO on it
9382 * now so we don't add too much work to the end of the transaction
9384 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
9385 filemap_flush(old_inode
->i_mapping
);
9387 /* close the racy window with snapshot create/destroy ioctl */
9388 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9389 down_read(&fs_info
->subvol_sem
);
9391 * We want to reserve the absolute worst case amount of items. So if
9392 * both inodes are subvols and we need to unlink them then that would
9393 * require 4 item modifications, but if they are both normal inodes it
9394 * would require 5 item modifications, so we'll assume they are normal
9395 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9396 * should cover the worst case number of items we'll modify.
9397 * If our rename has the whiteout flag, we need more 5 units for the
9398 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9399 * when selinux is enabled).
9401 trans_num_items
= 11;
9402 if (flags
& RENAME_WHITEOUT
)
9403 trans_num_items
+= 5;
9404 trans
= btrfs_start_transaction(root
, trans_num_items
);
9405 if (IS_ERR(trans
)) {
9406 ret
= PTR_ERR(trans
);
9411 ret
= btrfs_record_root_in_trans(trans
, dest
);
9416 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9420 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9421 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9422 /* force full log commit if subvolume involved. */
9423 btrfs_set_log_full_commit(trans
);
9425 btrfs_pin_log_trans(root
);
9427 ret
= btrfs_insert_inode_ref(trans
, dest
,
9428 new_dentry
->d_name
.name
,
9429 new_dentry
->d_name
.len
,
9431 btrfs_ino(BTRFS_I(new_dir
)), index
);
9436 inode_inc_iversion(old_dir
);
9437 inode_inc_iversion(new_dir
);
9438 inode_inc_iversion(old_inode
);
9439 old_dir
->i_ctime
= old_dir
->i_mtime
=
9440 new_dir
->i_ctime
= new_dir
->i_mtime
=
9441 old_inode
->i_ctime
= current_time(old_dir
);
9443 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9444 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9445 BTRFS_I(old_inode
), 1);
9447 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9448 ret
= btrfs_unlink_subvol(trans
, old_dir
, old_dentry
);
9450 ret
= __btrfs_unlink_inode(trans
, root
, BTRFS_I(old_dir
),
9451 BTRFS_I(d_inode(old_dentry
)),
9452 old_dentry
->d_name
.name
,
9453 old_dentry
->d_name
.len
);
9455 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(old_inode
));
9458 btrfs_abort_transaction(trans
, ret
);
9463 inode_inc_iversion(new_inode
);
9464 new_inode
->i_ctime
= current_time(new_inode
);
9465 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9466 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9467 ret
= btrfs_unlink_subvol(trans
, new_dir
, new_dentry
);
9468 BUG_ON(new_inode
->i_nlink
== 0);
9470 ret
= btrfs_unlink_inode(trans
, dest
, BTRFS_I(new_dir
),
9471 BTRFS_I(d_inode(new_dentry
)),
9472 new_dentry
->d_name
.name
,
9473 new_dentry
->d_name
.len
);
9475 if (!ret
&& new_inode
->i_nlink
== 0)
9476 ret
= btrfs_orphan_add(trans
,
9477 BTRFS_I(d_inode(new_dentry
)));
9479 btrfs_abort_transaction(trans
, ret
);
9484 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9485 new_dentry
->d_name
.name
,
9486 new_dentry
->d_name
.len
, 0, index
);
9488 btrfs_abort_transaction(trans
, ret
);
9492 if (old_inode
->i_nlink
== 1)
9493 BTRFS_I(old_inode
)->dir_index
= index
;
9496 btrfs_log_new_name(trans
, BTRFS_I(old_inode
), BTRFS_I(old_dir
),
9497 new_dentry
->d_parent
);
9498 btrfs_end_log_trans(root
);
9502 if (flags
& RENAME_WHITEOUT
) {
9503 ret
= btrfs_whiteout_for_rename(trans
, root
, old_dir
,
9507 btrfs_abort_transaction(trans
, ret
);
9513 * If we have pinned the log and an error happened, we unpin tasks
9514 * trying to sync the log and force them to fallback to a transaction
9515 * commit if the log currently contains any of the inodes involved in
9516 * this rename operation (to ensure we do not persist a log with an
9517 * inconsistent state for any of these inodes or leading to any
9518 * inconsistencies when replayed). If the transaction was aborted, the
9519 * abortion reason is propagated to userspace when attempting to commit
9520 * the transaction. If the log does not contain any of these inodes, we
9521 * allow the tasks to sync it.
9523 if (ret
&& log_pinned
) {
9524 if (btrfs_inode_in_log(BTRFS_I(old_dir
), fs_info
->generation
) ||
9525 btrfs_inode_in_log(BTRFS_I(new_dir
), fs_info
->generation
) ||
9526 btrfs_inode_in_log(BTRFS_I(old_inode
), fs_info
->generation
) ||
9528 btrfs_inode_in_log(BTRFS_I(new_inode
), fs_info
->generation
)))
9529 btrfs_set_log_full_commit(trans
);
9531 btrfs_end_log_trans(root
);
9534 ret2
= btrfs_end_transaction(trans
);
9535 ret
= ret
? ret
: ret2
;
9537 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9538 up_read(&fs_info
->subvol_sem
);
9543 static int btrfs_rename2(struct user_namespace
*mnt_userns
, struct inode
*old_dir
,
9544 struct dentry
*old_dentry
, struct inode
*new_dir
,
9545 struct dentry
*new_dentry
, unsigned int flags
)
9547 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9550 if (flags
& RENAME_EXCHANGE
)
9551 return btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9554 return btrfs_rename(old_dir
, old_dentry
, new_dir
, new_dentry
, flags
);
9557 struct btrfs_delalloc_work
{
9558 struct inode
*inode
;
9559 struct completion completion
;
9560 struct list_head list
;
9561 struct btrfs_work work
;
9564 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9566 struct btrfs_delalloc_work
*delalloc_work
;
9567 struct inode
*inode
;
9569 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9571 inode
= delalloc_work
->inode
;
9572 filemap_flush(inode
->i_mapping
);
9573 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9574 &BTRFS_I(inode
)->runtime_flags
))
9575 filemap_flush(inode
->i_mapping
);
9578 complete(&delalloc_work
->completion
);
9581 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
9583 struct btrfs_delalloc_work
*work
;
9585 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9589 init_completion(&work
->completion
);
9590 INIT_LIST_HEAD(&work
->list
);
9591 work
->inode
= inode
;
9592 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
, NULL
);
9598 * some fairly slow code that needs optimization. This walks the list
9599 * of all the inodes with pending delalloc and forces them to disk.
9601 static int start_delalloc_inodes(struct btrfs_root
*root
,
9602 struct writeback_control
*wbc
, bool snapshot
,
9603 bool in_reclaim_context
)
9605 struct btrfs_inode
*binode
;
9606 struct inode
*inode
;
9607 struct btrfs_delalloc_work
*work
, *next
;
9608 struct list_head works
;
9609 struct list_head splice
;
9611 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
9613 INIT_LIST_HEAD(&works
);
9614 INIT_LIST_HEAD(&splice
);
9616 mutex_lock(&root
->delalloc_mutex
);
9617 spin_lock(&root
->delalloc_lock
);
9618 list_splice_init(&root
->delalloc_inodes
, &splice
);
9619 while (!list_empty(&splice
)) {
9620 binode
= list_entry(splice
.next
, struct btrfs_inode
,
9623 list_move_tail(&binode
->delalloc_inodes
,
9624 &root
->delalloc_inodes
);
9626 if (in_reclaim_context
&&
9627 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
9630 inode
= igrab(&binode
->vfs_inode
);
9632 cond_resched_lock(&root
->delalloc_lock
);
9635 spin_unlock(&root
->delalloc_lock
);
9638 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
9639 &binode
->runtime_flags
);
9641 work
= btrfs_alloc_delalloc_work(inode
);
9647 list_add_tail(&work
->list
, &works
);
9648 btrfs_queue_work(root
->fs_info
->flush_workers
,
9651 ret
= sync_inode(inode
, wbc
);
9653 test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9654 &BTRFS_I(inode
)->runtime_flags
))
9655 ret
= sync_inode(inode
, wbc
);
9656 btrfs_add_delayed_iput(inode
);
9657 if (ret
|| wbc
->nr_to_write
<= 0)
9661 spin_lock(&root
->delalloc_lock
);
9663 spin_unlock(&root
->delalloc_lock
);
9666 list_for_each_entry_safe(work
, next
, &works
, list
) {
9667 list_del_init(&work
->list
);
9668 wait_for_completion(&work
->completion
);
9672 if (!list_empty(&splice
)) {
9673 spin_lock(&root
->delalloc_lock
);
9674 list_splice_tail(&splice
, &root
->delalloc_inodes
);
9675 spin_unlock(&root
->delalloc_lock
);
9677 mutex_unlock(&root
->delalloc_mutex
);
9681 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
9683 struct writeback_control wbc
= {
9684 .nr_to_write
= LONG_MAX
,
9685 .sync_mode
= WB_SYNC_NONE
,
9687 .range_end
= LLONG_MAX
,
9689 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9691 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
9694 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
9697 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
9698 bool in_reclaim_context
)
9700 struct writeback_control wbc
= {
9702 .sync_mode
= WB_SYNC_NONE
,
9704 .range_end
= LLONG_MAX
,
9706 struct btrfs_root
*root
;
9707 struct list_head splice
;
9710 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
9713 INIT_LIST_HEAD(&splice
);
9715 mutex_lock(&fs_info
->delalloc_root_mutex
);
9716 spin_lock(&fs_info
->delalloc_root_lock
);
9717 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
9718 while (!list_empty(&splice
)) {
9720 * Reset nr_to_write here so we know that we're doing a full
9724 wbc
.nr_to_write
= LONG_MAX
;
9726 root
= list_first_entry(&splice
, struct btrfs_root
,
9728 root
= btrfs_grab_root(root
);
9730 list_move_tail(&root
->delalloc_root
,
9731 &fs_info
->delalloc_roots
);
9732 spin_unlock(&fs_info
->delalloc_root_lock
);
9734 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
9735 btrfs_put_root(root
);
9736 if (ret
< 0 || wbc
.nr_to_write
<= 0)
9738 spin_lock(&fs_info
->delalloc_root_lock
);
9740 spin_unlock(&fs_info
->delalloc_root_lock
);
9744 if (!list_empty(&splice
)) {
9745 spin_lock(&fs_info
->delalloc_root_lock
);
9746 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
9747 spin_unlock(&fs_info
->delalloc_root_lock
);
9749 mutex_unlock(&fs_info
->delalloc_root_mutex
);
9753 static int btrfs_symlink(struct user_namespace
*mnt_userns
, struct inode
*dir
,
9754 struct dentry
*dentry
, const char *symname
)
9756 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
9757 struct btrfs_trans_handle
*trans
;
9758 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
9759 struct btrfs_path
*path
;
9760 struct btrfs_key key
;
9761 struct inode
*inode
= NULL
;
9768 struct btrfs_file_extent_item
*ei
;
9769 struct extent_buffer
*leaf
;
9771 name_len
= strlen(symname
);
9772 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
9773 return -ENAMETOOLONG
;
9776 * 2 items for inode item and ref
9777 * 2 items for dir items
9778 * 1 item for updating parent inode item
9779 * 1 item for the inline extent item
9780 * 1 item for xattr if selinux is on
9782 trans
= btrfs_start_transaction(root
, 7);
9784 return PTR_ERR(trans
);
9786 err
= btrfs_get_free_objectid(root
, &objectid
);
9790 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
9791 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)),
9792 objectid
, S_IFLNK
|S_IRWXUGO
, &index
);
9793 if (IS_ERR(inode
)) {
9794 err
= PTR_ERR(inode
);
9800 * If the active LSM wants to access the inode during
9801 * d_instantiate it needs these. Smack checks to see
9802 * if the filesystem supports xattrs by looking at the
9805 inode
->i_fop
= &btrfs_file_operations
;
9806 inode
->i_op
= &btrfs_file_inode_operations
;
9807 inode
->i_mapping
->a_ops
= &btrfs_aops
;
9809 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
9813 path
= btrfs_alloc_path();
9818 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
9820 key
.type
= BTRFS_EXTENT_DATA_KEY
;
9821 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
9822 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
9825 btrfs_free_path(path
);
9828 leaf
= path
->nodes
[0];
9829 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
9830 struct btrfs_file_extent_item
);
9831 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
9832 btrfs_set_file_extent_type(leaf
, ei
,
9833 BTRFS_FILE_EXTENT_INLINE
);
9834 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
9835 btrfs_set_file_extent_compression(leaf
, ei
, 0);
9836 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
9837 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
9839 ptr
= btrfs_file_extent_inline_start(ei
);
9840 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
9841 btrfs_mark_buffer_dirty(leaf
);
9842 btrfs_free_path(path
);
9844 inode
->i_op
= &btrfs_symlink_inode_operations
;
9845 inode_nohighmem(inode
);
9846 inode_set_bytes(inode
, name_len
);
9847 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
9848 err
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
9850 * Last step, add directory indexes for our symlink inode. This is the
9851 * last step to avoid extra cleanup of these indexes if an error happens
9855 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
,
9856 BTRFS_I(inode
), 0, index
);
9860 d_instantiate_new(dentry
, inode
);
9863 btrfs_end_transaction(trans
);
9865 inode_dec_link_count(inode
);
9866 discard_new_inode(inode
);
9868 btrfs_btree_balance_dirty(fs_info
);
9872 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
9873 struct btrfs_trans_handle
*trans_in
,
9874 struct btrfs_inode
*inode
,
9875 struct btrfs_key
*ins
,
9878 struct btrfs_file_extent_item stack_fi
;
9879 struct btrfs_replace_extent_info extent_info
;
9880 struct btrfs_trans_handle
*trans
= trans_in
;
9881 struct btrfs_path
*path
;
9882 u64 start
= ins
->objectid
;
9883 u64 len
= ins
->offset
;
9884 int qgroup_released
;
9887 memset(&stack_fi
, 0, sizeof(stack_fi
));
9889 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
9890 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
9891 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
9892 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
9893 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
9894 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
9895 /* Encryption and other encoding is reserved and all 0 */
9897 qgroup_released
= btrfs_qgroup_release_data(inode
, file_offset
, len
);
9898 if (qgroup_released
< 0)
9899 return ERR_PTR(qgroup_released
);
9902 ret
= insert_reserved_file_extent(trans
, inode
,
9903 file_offset
, &stack_fi
,
9904 true, qgroup_released
);
9910 extent_info
.disk_offset
= start
;
9911 extent_info
.disk_len
= len
;
9912 extent_info
.data_offset
= 0;
9913 extent_info
.data_len
= len
;
9914 extent_info
.file_offset
= file_offset
;
9915 extent_info
.extent_buf
= (char *)&stack_fi
;
9916 extent_info
.is_new_extent
= true;
9917 extent_info
.qgroup_reserved
= qgroup_released
;
9918 extent_info
.insertions
= 0;
9920 path
= btrfs_alloc_path();
9926 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
9927 file_offset
+ len
- 1, &extent_info
,
9929 btrfs_free_path(path
);
9936 * We have released qgroup data range at the beginning of the function,
9937 * and normally qgroup_released bytes will be freed when committing
9939 * But if we error out early, we have to free what we have released
9940 * or we leak qgroup data reservation.
9942 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
9943 inode
->root
->root_key
.objectid
, qgroup_released
,
9944 BTRFS_QGROUP_RSV_DATA
);
9945 return ERR_PTR(ret
);
9948 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
9949 u64 start
, u64 num_bytes
, u64 min_size
,
9950 loff_t actual_len
, u64
*alloc_hint
,
9951 struct btrfs_trans_handle
*trans
)
9953 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9954 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
9955 struct extent_map
*em
;
9956 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9957 struct btrfs_key ins
;
9958 u64 cur_offset
= start
;
9959 u64 clear_offset
= start
;
9962 u64 last_alloc
= (u64
)-1;
9964 bool own_trans
= true;
9965 u64 end
= start
+ num_bytes
- 1;
9969 while (num_bytes
> 0) {
9970 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
9971 cur_bytes
= max(cur_bytes
, min_size
);
9973 * If we are severely fragmented we could end up with really
9974 * small allocations, so if the allocator is returning small
9975 * chunks lets make its job easier by only searching for those
9978 cur_bytes
= min(cur_bytes
, last_alloc
);
9979 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
9980 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
9985 * We've reserved this space, and thus converted it from
9986 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9987 * from here on out we will only need to clear our reservation
9988 * for the remaining unreserved area, so advance our
9989 * clear_offset by our extent size.
9991 clear_offset
+= ins
.offset
;
9993 last_alloc
= ins
.offset
;
9994 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
9997 * Now that we inserted the prealloc extent we can finally
9998 * decrement the number of reservations in the block group.
9999 * If we did it before, we could race with relocation and have
10000 * relocation miss the reserved extent, making it fail later.
10002 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10003 if (IS_ERR(trans
)) {
10004 ret
= PTR_ERR(trans
);
10005 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
10010 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10011 cur_offset
+ ins
.offset
-1, 0);
10013 em
= alloc_extent_map();
10015 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
10016 &BTRFS_I(inode
)->runtime_flags
);
10020 em
->start
= cur_offset
;
10021 em
->orig_start
= cur_offset
;
10022 em
->len
= ins
.offset
;
10023 em
->block_start
= ins
.objectid
;
10024 em
->block_len
= ins
.offset
;
10025 em
->orig_block_len
= ins
.offset
;
10026 em
->ram_bytes
= ins
.offset
;
10027 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
10028 em
->generation
= trans
->transid
;
10031 write_lock(&em_tree
->lock
);
10032 ret
= add_extent_mapping(em_tree
, em
, 1);
10033 write_unlock(&em_tree
->lock
);
10034 if (ret
!= -EEXIST
)
10036 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10037 cur_offset
+ ins
.offset
- 1,
10040 free_extent_map(em
);
10042 num_bytes
-= ins
.offset
;
10043 cur_offset
+= ins
.offset
;
10044 *alloc_hint
= ins
.objectid
+ ins
.offset
;
10046 inode_inc_iversion(inode
);
10047 inode
->i_ctime
= current_time(inode
);
10048 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
10049 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
10050 (actual_len
> inode
->i_size
) &&
10051 (cur_offset
> inode
->i_size
)) {
10052 if (cur_offset
> actual_len
)
10053 i_size
= actual_len
;
10055 i_size
= cur_offset
;
10056 i_size_write(inode
, i_size
);
10057 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
10060 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
10063 btrfs_abort_transaction(trans
, ret
);
10065 btrfs_end_transaction(trans
);
10070 btrfs_end_transaction(trans
);
10074 if (clear_offset
< end
)
10075 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
10076 end
- clear_offset
+ 1);
10080 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
10081 u64 start
, u64 num_bytes
, u64 min_size
,
10082 loff_t actual_len
, u64
*alloc_hint
)
10084 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10085 min_size
, actual_len
, alloc_hint
,
10089 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
10090 struct btrfs_trans_handle
*trans
, int mode
,
10091 u64 start
, u64 num_bytes
, u64 min_size
,
10092 loff_t actual_len
, u64
*alloc_hint
)
10094 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10095 min_size
, actual_len
, alloc_hint
, trans
);
10098 static int btrfs_set_page_dirty(struct page
*page
)
10100 return __set_page_dirty_nobuffers(page
);
10103 static int btrfs_permission(struct user_namespace
*mnt_userns
,
10104 struct inode
*inode
, int mask
)
10106 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10107 umode_t mode
= inode
->i_mode
;
10109 if (mask
& MAY_WRITE
&&
10110 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
10111 if (btrfs_root_readonly(root
))
10113 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
10116 return generic_permission(&init_user_ns
, inode
, mask
);
10119 static int btrfs_tmpfile(struct user_namespace
*mnt_userns
, struct inode
*dir
,
10120 struct dentry
*dentry
, umode_t mode
)
10122 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
10123 struct btrfs_trans_handle
*trans
;
10124 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
10125 struct inode
*inode
= NULL
;
10131 * 5 units required for adding orphan entry
10133 trans
= btrfs_start_transaction(root
, 5);
10135 return PTR_ERR(trans
);
10137 ret
= btrfs_get_free_objectid(root
, &objectid
);
10141 inode
= btrfs_new_inode(trans
, root
, dir
, NULL
, 0,
10142 btrfs_ino(BTRFS_I(dir
)), objectid
, mode
, &index
);
10143 if (IS_ERR(inode
)) {
10144 ret
= PTR_ERR(inode
);
10149 inode
->i_fop
= &btrfs_file_operations
;
10150 inode
->i_op
= &btrfs_file_inode_operations
;
10152 inode
->i_mapping
->a_ops
= &btrfs_aops
;
10154 ret
= btrfs_init_inode_security(trans
, inode
, dir
, NULL
);
10158 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
10161 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
10166 * We set number of links to 0 in btrfs_new_inode(), and here we set
10167 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10170 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10172 set_nlink(inode
, 1);
10173 d_tmpfile(dentry
, inode
);
10174 unlock_new_inode(inode
);
10175 mark_inode_dirty(inode
);
10177 btrfs_end_transaction(trans
);
10179 discard_new_inode(inode
);
10180 btrfs_btree_balance_dirty(fs_info
);
10184 void btrfs_set_range_writeback(struct extent_io_tree
*tree
, u64 start
, u64 end
)
10186 struct inode
*inode
= tree
->private_data
;
10187 unsigned long index
= start
>> PAGE_SHIFT
;
10188 unsigned long end_index
= end
>> PAGE_SHIFT
;
10191 while (index
<= end_index
) {
10192 page
= find_get_page(inode
->i_mapping
, index
);
10193 ASSERT(page
); /* Pages should be in the extent_io_tree */
10194 set_page_writeback(page
);
10202 * Add an entry indicating a block group or device which is pinned by a
10203 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10204 * negative errno on failure.
10206 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
10207 bool is_block_group
)
10209 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10210 struct btrfs_swapfile_pin
*sp
, *entry
;
10211 struct rb_node
**p
;
10212 struct rb_node
*parent
= NULL
;
10214 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
10219 sp
->is_block_group
= is_block_group
;
10220 sp
->bg_extent_count
= 1;
10222 spin_lock(&fs_info
->swapfile_pins_lock
);
10223 p
= &fs_info
->swapfile_pins
.rb_node
;
10226 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
10227 if (sp
->ptr
< entry
->ptr
||
10228 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
10229 p
= &(*p
)->rb_left
;
10230 } else if (sp
->ptr
> entry
->ptr
||
10231 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
10232 p
= &(*p
)->rb_right
;
10234 if (is_block_group
)
10235 entry
->bg_extent_count
++;
10236 spin_unlock(&fs_info
->swapfile_pins_lock
);
10241 rb_link_node(&sp
->node
, parent
, p
);
10242 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
10243 spin_unlock(&fs_info
->swapfile_pins_lock
);
10247 /* Free all of the entries pinned by this swapfile. */
10248 static void btrfs_free_swapfile_pins(struct inode
*inode
)
10250 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
10251 struct btrfs_swapfile_pin
*sp
;
10252 struct rb_node
*node
, *next
;
10254 spin_lock(&fs_info
->swapfile_pins_lock
);
10255 node
= rb_first(&fs_info
->swapfile_pins
);
10257 next
= rb_next(node
);
10258 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
10259 if (sp
->inode
== inode
) {
10260 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
10261 if (sp
->is_block_group
) {
10262 btrfs_dec_block_group_swap_extents(sp
->ptr
,
10263 sp
->bg_extent_count
);
10264 btrfs_put_block_group(sp
->ptr
);
10270 spin_unlock(&fs_info
->swapfile_pins_lock
);
10273 struct btrfs_swap_info
{
10279 unsigned long nr_pages
;
10283 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
10284 struct btrfs_swap_info
*bsi
)
10286 unsigned long nr_pages
;
10287 u64 first_ppage
, first_ppage_reported
, next_ppage
;
10290 first_ppage
= ALIGN(bsi
->block_start
, PAGE_SIZE
) >> PAGE_SHIFT
;
10291 next_ppage
= ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
,
10292 PAGE_SIZE
) >> PAGE_SHIFT
;
10294 if (first_ppage
>= next_ppage
)
10296 nr_pages
= next_ppage
- first_ppage
;
10298 first_ppage_reported
= first_ppage
;
10299 if (bsi
->start
== 0)
10300 first_ppage_reported
++;
10301 if (bsi
->lowest_ppage
> first_ppage_reported
)
10302 bsi
->lowest_ppage
= first_ppage_reported
;
10303 if (bsi
->highest_ppage
< (next_ppage
- 1))
10304 bsi
->highest_ppage
= next_ppage
- 1;
10306 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
10309 bsi
->nr_extents
+= ret
;
10310 bsi
->nr_pages
+= nr_pages
;
10314 static void btrfs_swap_deactivate(struct file
*file
)
10316 struct inode
*inode
= file_inode(file
);
10318 btrfs_free_swapfile_pins(inode
);
10319 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
10322 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10325 struct inode
*inode
= file_inode(file
);
10326 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10327 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10328 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
10329 struct extent_state
*cached_state
= NULL
;
10330 struct extent_map
*em
= NULL
;
10331 struct btrfs_device
*device
= NULL
;
10332 struct btrfs_swap_info bsi
= {
10333 .lowest_ppage
= (sector_t
)-1ULL,
10340 * If the swap file was just created, make sure delalloc is done. If the
10341 * file changes again after this, the user is doing something stupid and
10342 * we don't really care.
10344 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
10349 * The inode is locked, so these flags won't change after we check them.
10351 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
10352 btrfs_warn(fs_info
, "swapfile must not be compressed");
10355 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
10356 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
10359 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
10360 btrfs_warn(fs_info
, "swapfile must not be checksummed");
10365 * Balance or device remove/replace/resize can move stuff around from
10366 * under us. The exclop protection makes sure they aren't running/won't
10367 * run concurrently while we are mapping the swap extents, and
10368 * fs_info->swapfile_pins prevents them from running while the swap
10369 * file is active and moving the extents. Note that this also prevents
10370 * a concurrent device add which isn't actually necessary, but it's not
10371 * really worth the trouble to allow it.
10373 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
10374 btrfs_warn(fs_info
,
10375 "cannot activate swapfile while exclusive operation is running");
10380 * Prevent snapshot creation while we are activating the swap file.
10381 * We do not want to race with snapshot creation. If snapshot creation
10382 * already started before we bumped nr_swapfiles from 0 to 1 and
10383 * completes before the first write into the swap file after it is
10384 * activated, than that write would fallback to COW.
10386 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
10387 btrfs_exclop_finish(fs_info
);
10388 btrfs_warn(fs_info
,
10389 "cannot activate swapfile because snapshot creation is in progress");
10393 * Snapshots can create extents which require COW even if NODATACOW is
10394 * set. We use this counter to prevent snapshots. We must increment it
10395 * before walking the extents because we don't want a concurrent
10396 * snapshot to run after we've already checked the extents.
10398 atomic_inc(&root
->nr_swapfiles
);
10400 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
10402 lock_extent_bits(io_tree
, 0, isize
- 1, &cached_state
);
10404 while (start
< isize
) {
10405 u64 logical_block_start
, physical_block_start
;
10406 struct btrfs_block_group
*bg
;
10407 u64 len
= isize
- start
;
10409 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
);
10415 if (em
->block_start
== EXTENT_MAP_HOLE
) {
10416 btrfs_warn(fs_info
, "swapfile must not have holes");
10420 if (em
->block_start
== EXTENT_MAP_INLINE
) {
10422 * It's unlikely we'll ever actually find ourselves
10423 * here, as a file small enough to fit inline won't be
10424 * big enough to store more than the swap header, but in
10425 * case something changes in the future, let's catch it
10426 * here rather than later.
10428 btrfs_warn(fs_info
, "swapfile must not be inline");
10432 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
10433 btrfs_warn(fs_info
, "swapfile must not be compressed");
10438 logical_block_start
= em
->block_start
+ (start
- em
->start
);
10439 len
= min(len
, em
->len
- (start
- em
->start
));
10440 free_extent_map(em
);
10443 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, NULL
, NULL
, true);
10449 btrfs_warn(fs_info
,
10450 "swapfile must not be copy-on-write");
10455 em
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
10461 if (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
10462 btrfs_warn(fs_info
,
10463 "swapfile must have single data profile");
10468 if (device
== NULL
) {
10469 device
= em
->map_lookup
->stripes
[0].dev
;
10470 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
10475 } else if (device
!= em
->map_lookup
->stripes
[0].dev
) {
10476 btrfs_warn(fs_info
, "swapfile must be on one device");
10481 physical_block_start
= (em
->map_lookup
->stripes
[0].physical
+
10482 (logical_block_start
- em
->start
));
10483 len
= min(len
, em
->len
- (logical_block_start
- em
->start
));
10484 free_extent_map(em
);
10487 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
10489 btrfs_warn(fs_info
,
10490 "could not find block group containing swapfile");
10495 if (!btrfs_inc_block_group_swap_extents(bg
)) {
10496 btrfs_warn(fs_info
,
10497 "block group for swapfile at %llu is read-only%s",
10499 atomic_read(&fs_info
->scrubs_running
) ?
10500 " (scrub running)" : "");
10501 btrfs_put_block_group(bg
);
10506 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
10508 btrfs_put_block_group(bg
);
10515 if (bsi
.block_len
&&
10516 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
10517 bsi
.block_len
+= len
;
10519 if (bsi
.block_len
) {
10520 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10525 bsi
.block_start
= physical_block_start
;
10526 bsi
.block_len
= len
;
10533 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10536 if (!IS_ERR_OR_NULL(em
))
10537 free_extent_map(em
);
10539 unlock_extent_cached(io_tree
, 0, isize
- 1, &cached_state
);
10542 btrfs_swap_deactivate(file
);
10544 btrfs_drew_write_unlock(&root
->snapshot_lock
);
10546 btrfs_exclop_finish(fs_info
);
10552 sis
->bdev
= device
->bdev
;
10553 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
10554 sis
->max
= bsi
.nr_pages
;
10555 sis
->pages
= bsi
.nr_pages
- 1;
10556 sis
->highest_bit
= bsi
.nr_pages
- 1;
10557 return bsi
.nr_extents
;
10560 static void btrfs_swap_deactivate(struct file
*file
)
10564 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10567 return -EOPNOTSUPP
;
10572 * Update the number of bytes used in the VFS' inode. When we replace extents in
10573 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10574 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10575 * always get a correct value.
10577 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
10578 const u64 add_bytes
,
10579 const u64 del_bytes
)
10581 if (add_bytes
== del_bytes
)
10584 spin_lock(&inode
->lock
);
10586 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
10588 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
10589 spin_unlock(&inode
->lock
);
10592 static const struct inode_operations btrfs_dir_inode_operations
= {
10593 .getattr
= btrfs_getattr
,
10594 .lookup
= btrfs_lookup
,
10595 .create
= btrfs_create
,
10596 .unlink
= btrfs_unlink
,
10597 .link
= btrfs_link
,
10598 .mkdir
= btrfs_mkdir
,
10599 .rmdir
= btrfs_rmdir
,
10600 .rename
= btrfs_rename2
,
10601 .symlink
= btrfs_symlink
,
10602 .setattr
= btrfs_setattr
,
10603 .mknod
= btrfs_mknod
,
10604 .listxattr
= btrfs_listxattr
,
10605 .permission
= btrfs_permission
,
10606 .get_acl
= btrfs_get_acl
,
10607 .set_acl
= btrfs_set_acl
,
10608 .update_time
= btrfs_update_time
,
10609 .tmpfile
= btrfs_tmpfile
,
10610 .fileattr_get
= btrfs_fileattr_get
,
10611 .fileattr_set
= btrfs_fileattr_set
,
10614 static const struct file_operations btrfs_dir_file_operations
= {
10615 .llseek
= generic_file_llseek
,
10616 .read
= generic_read_dir
,
10617 .iterate_shared
= btrfs_real_readdir
,
10618 .open
= btrfs_opendir
,
10619 .unlocked_ioctl
= btrfs_ioctl
,
10620 #ifdef CONFIG_COMPAT
10621 .compat_ioctl
= btrfs_compat_ioctl
,
10623 .release
= btrfs_release_file
,
10624 .fsync
= btrfs_sync_file
,
10628 * btrfs doesn't support the bmap operation because swapfiles
10629 * use bmap to make a mapping of extents in the file. They assume
10630 * these extents won't change over the life of the file and they
10631 * use the bmap result to do IO directly to the drive.
10633 * the btrfs bmap call would return logical addresses that aren't
10634 * suitable for IO and they also will change frequently as COW
10635 * operations happen. So, swapfile + btrfs == corruption.
10637 * For now we're avoiding this by dropping bmap.
10639 static const struct address_space_operations btrfs_aops
= {
10640 .readpage
= btrfs_readpage
,
10641 .writepage
= btrfs_writepage
,
10642 .writepages
= btrfs_writepages
,
10643 .readahead
= btrfs_readahead
,
10644 .direct_IO
= noop_direct_IO
,
10645 .invalidatepage
= btrfs_invalidatepage
,
10646 .releasepage
= btrfs_releasepage
,
10647 #ifdef CONFIG_MIGRATION
10648 .migratepage
= btrfs_migratepage
,
10650 .set_page_dirty
= btrfs_set_page_dirty
,
10651 .error_remove_page
= generic_error_remove_page
,
10652 .swap_activate
= btrfs_swap_activate
,
10653 .swap_deactivate
= btrfs_swap_deactivate
,
10656 static const struct inode_operations btrfs_file_inode_operations
= {
10657 .getattr
= btrfs_getattr
,
10658 .setattr
= btrfs_setattr
,
10659 .listxattr
= btrfs_listxattr
,
10660 .permission
= btrfs_permission
,
10661 .fiemap
= btrfs_fiemap
,
10662 .get_acl
= btrfs_get_acl
,
10663 .set_acl
= btrfs_set_acl
,
10664 .update_time
= btrfs_update_time
,
10665 .fileattr_get
= btrfs_fileattr_get
,
10666 .fileattr_set
= btrfs_fileattr_set
,
10668 static const struct inode_operations btrfs_special_inode_operations
= {
10669 .getattr
= btrfs_getattr
,
10670 .setattr
= btrfs_setattr
,
10671 .permission
= btrfs_permission
,
10672 .listxattr
= btrfs_listxattr
,
10673 .get_acl
= btrfs_get_acl
,
10674 .set_acl
= btrfs_set_acl
,
10675 .update_time
= btrfs_update_time
,
10677 static const struct inode_operations btrfs_symlink_inode_operations
= {
10678 .get_link
= page_get_link
,
10679 .getattr
= btrfs_getattr
,
10680 .setattr
= btrfs_setattr
,
10681 .permission
= btrfs_permission
,
10682 .listxattr
= btrfs_listxattr
,
10683 .update_time
= btrfs_update_time
,
10686 const struct dentry_operations btrfs_dentry_operations
= {
10687 .d_delete
= btrfs_dentry_delete
,